Commit 5b6b30ba authored by Vadim Pisarevsky's avatar Vadim Pisarevsky

added dedicated <modname>_init.cpp files with initModule_<modname>() functions…

added dedicated <modname>_init.cpp files with initModule_<modname>() functions and all the relevant structures; made BackgroundSubtractorMOG/MOG2 derivatives from Algorithm; cleaned up MOG2 implementation and made it multi-threaded.
parent 3d108958
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
......@@ -2020,6 +2020,15 @@ public:
};
typedef void (*BinaryFunc)(const uchar* src1, size_t step1,
const uchar* src2, size_t step2,
uchar* dst, size_t step, Size sz,
void*);
CV_EXPORTS BinaryFunc getConvertFunc(int sdepth, int ddepth);
CV_EXPORTS BinaryFunc getConvertScaleFunc(int sdepth, int ddepth);
CV_EXPORTS BinaryFunc getCopyMaskFunc(size_t esz);
//! swaps two matrices
CV_EXPORTS void swap(Mat& a, Mat& b);
......
......@@ -176,15 +176,6 @@ struct NoVec
extern volatile bool USE_SSE2;
typedef void (*BinaryFunc)(const uchar* src1, size_t step1,
const uchar* src2, size_t step2,
uchar* dst, size_t step, Size sz,
void*);
BinaryFunc getConvertFunc(int sdepth, int ddepth);
BinaryFunc getConvertScaleFunc(int sdepth, int ddepth);
BinaryFunc getCopyMaskFunc(size_t esz);
enum { BLOCK_SIZE = 1024 };
#ifdef HAVE_IPP
......
......@@ -144,40 +144,6 @@ void GFTTDetector::detectImpl( const Mat& image, vector<KeyPoint>& keypoints, co
}
}
static Algorithm* createGFTT() { return new GFTTDetector; }
static Algorithm* createHarris()
{
GFTTDetector* d = new GFTTDetector;
d->set("useHarris", true);
return d;
}
static AlgorithmInfo gftt_info("Feature2D.GFTT", createGFTT);
static AlgorithmInfo harris_info("Feature2D.HARRIS", createHarris);
AlgorithmInfo* GFTTDetector::info() const
{
static volatile bool initialized = false;
if( !initialized )
{
GFTTDetector obj;
gftt_info.addParam(obj, "nfeatures", obj.nfeatures);
gftt_info.addParam(obj, "qualityLevel", obj.qualityLevel);
gftt_info.addParam(obj, "minDistance", obj.minDistance);
gftt_info.addParam(obj, "useHarrisDetector", obj.useHarrisDetector);
gftt_info.addParam(obj, "k", obj.k);
harris_info.addParam(obj, "nfeatures", obj.nfeatures);
harris_info.addParam(obj, "qualityLevel", obj.qualityLevel);
harris_info.addParam(obj, "minDistance", obj.minDistance);
harris_info.addParam(obj, "useHarrisDetector", obj.useHarrisDetector);
harris_info.addParam(obj, "k", obj.k);
initialized = true;
}
return &gftt_info;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/*
......@@ -215,29 +181,6 @@ void DenseFeatureDetector::detectImpl( const Mat& image, vector<KeyPoint>& keypo
KeyPointsFilter::runByPixelsMask( keypoints, mask );
}
static Algorithm* createDense() { return new DenseFeatureDetector; }
static AlgorithmInfo dense_info("Feature2D.Dense", createDense);
AlgorithmInfo* DenseFeatureDetector::info() const
{
static volatile bool initialized = false;
if( !initialized )
{
DenseFeatureDetector obj;
dense_info.addParam(obj, "initFeatureScale", obj.initFeatureScale);
dense_info.addParam(obj, "featureScaleLevels", obj.featureScaleLevels);
dense_info.addParam(obj, "featureScaleMul", obj.featureScaleMul);
dense_info.addParam(obj, "initXyStep", obj.initXyStep);
dense_info.addParam(obj, "initImgBound", obj.initImgBound);
dense_info.addParam(obj, "varyXyStepWithScale", obj.varyXyStepWithScale);
dense_info.addParam(obj, "varyImgBoundWithScale", obj.varyImgBoundWithScale);
initialized = true;
}
return &dense_info;
}
/*
* GridAdaptedFeatureDetector
......@@ -359,161 +302,6 @@ void PyramidAdaptedFeatureDetector::detectImpl( const Mat& image, vector<KeyPoin
if( !mask.empty() )
KeyPointsFilter::runByPixelsMask( keypoints, mask );
}
/////////////////////// AlgorithmInfo for various detector & descriptors ////////////////////////////
/* NOTE!!!
All the AlgorithmInfo-related stuff should be in the same file as initModule_features2d().
Otherwise, linker may throw away some seemingly unused stuff.
*/
static Algorithm* createBRIEF() { return new BriefDescriptorExtractor; }
static AlgorithmInfo& brief_info()
{
static AlgorithmInfo brief_info_var("Feature2D.BRIEF", createBRIEF);
return brief_info_var;
}
static AlgorithmInfo& brief_info_auto = brief_info();
AlgorithmInfo* BriefDescriptorExtractor::info() const
{
static volatile bool initialized = false;
if( !initialized )
{
BriefDescriptorExtractor brief;
brief_info().addParam(brief, "bytes", brief.bytes_);
initialized = true;
}
return &brief_info();
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////
static Algorithm* createFAST() { return new FastFeatureDetector; }
static AlgorithmInfo& fast_info()
{
static AlgorithmInfo fast_info_var("Feature2D.FAST", createFAST);
return fast_info_var;
}
static AlgorithmInfo& fast_info_auto = fast_info();
AlgorithmInfo* FastFeatureDetector::info() const
{
static volatile bool initialized = false;
if( !initialized )
{
FastFeatureDetector obj;
fast_info().addParam(obj, "threshold", obj.threshold);
fast_info().addParam(obj, "nonmaxSuppression", obj.nonmaxSuppression);
initialized = true;
}
return &fast_info();
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////
static Algorithm* createStarDetector() { return new StarDetector; }
static AlgorithmInfo& star_info()
{
static AlgorithmInfo star_info_var("Feature2D.STAR", createStarDetector);
return star_info_var;
}
static AlgorithmInfo& star_info_auto = star_info();
AlgorithmInfo* StarDetector::info() const
{
static volatile bool initialized = false;
if( !initialized )
{
StarDetector obj;
star_info().addParam(obj, "maxSize", obj.maxSize);
star_info().addParam(obj, "responseThreshold", obj.responseThreshold);
star_info().addParam(obj, "lineThresholdProjected", obj.lineThresholdProjected);
star_info().addParam(obj, "lineThresholdBinarized", obj.lineThresholdBinarized);
star_info().addParam(obj, "suppressNonmaxSize", obj.suppressNonmaxSize);
initialized = true;
}
return &star_info();
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////
static Algorithm* createMSER() { return new MSER; }
static AlgorithmInfo& mser_info()
{
static AlgorithmInfo mser_info_var("Feature2D.MSER", createMSER);
return mser_info_var;
}
static AlgorithmInfo& mser_info_auto = mser_info();
AlgorithmInfo* MSER::info() const
{
static volatile bool initialized = false;
if( !initialized )
{
MSER obj;
mser_info().addParam(obj, "delta", obj.delta);
mser_info().addParam(obj, "minArea", obj.minArea);
mser_info().addParam(obj, "maxArea", obj.maxArea);
mser_info().addParam(obj, "maxVariation", obj.maxVariation);
mser_info().addParam(obj, "minDiversity", obj.minDiversity);
mser_info().addParam(obj, "maxEvolution", obj.maxEvolution);
mser_info().addParam(obj, "areaThreshold", obj.areaThreshold);
mser_info().addParam(obj, "minMargin", obj.minMargin);
mser_info().addParam(obj, "edgeBlurSize", obj.edgeBlurSize);
initialized = true;
}
return &mser_info();
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////
static Algorithm* createORB() { return new ORB; }
static AlgorithmInfo& orb_info()
{
static AlgorithmInfo orb_info_var("Feature2D.ORB", createORB);
return orb_info_var;
}
static AlgorithmInfo& orb_info_auto = orb_info();
AlgorithmInfo* ORB::info() const
{
static volatile bool initialized = false;
if( !initialized )
{
ORB obj;
orb_info().addParam(obj, "nFeatures", obj.nfeatures);
orb_info().addParam(obj, "scaleFactor", obj.scaleFactor);
orb_info().addParam(obj, "nLevels", obj.nlevels);
orb_info().addParam(obj, "firstLevel", obj.firstLevel);
orb_info().addParam(obj, "edgeThreshold", obj.edgeThreshold);
orb_info().addParam(obj, "patchSize", obj.patchSize);
orb_info().addParam(obj, "WTA_K", obj.WTA_K);
orb_info().addParam(obj, "scoreType", obj.scoreType);
initialized = true;
}
return &orb_info();
}
bool initModule_features2d(void)
{
Ptr<Algorithm> brief = createBRIEF(), orb = createORB(),
star = createStarDetector(), fastd = createFAST(), mser = createMSER();
return brief->info() != 0 && orb->info() != 0 && star->info() != 0 &&
fastd->info() != 0 && mser->info() != 0;
}
}
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
namespace cv
{
/////////////////////// AlgorithmInfo for various detector & descriptors ////////////////////////////
/* NOTE!!!
All the AlgorithmInfo-related stuff should be in the same file as initModule_features2d().
Otherwise, linker may throw away some seemingly unused stuff.
*/
static Algorithm* createBRIEF() { return new BriefDescriptorExtractor; }
static AlgorithmInfo& brief_info()
{
static AlgorithmInfo brief_info_var("Feature2D.BRIEF", createBRIEF);
return brief_info_var;
}
static AlgorithmInfo& brief_info_auto = brief_info();
AlgorithmInfo* BriefDescriptorExtractor::info() const
{
static volatile bool initialized = false;
if( !initialized )
{
BriefDescriptorExtractor brief;
brief_info().addParam(brief, "bytes", brief.bytes_);
initialized = true;
}
return &brief_info();
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////
static Algorithm* createFAST() { return new FastFeatureDetector; }
static AlgorithmInfo& fast_info()
{
static AlgorithmInfo fast_info_var("Feature2D.FAST", createFAST);
return fast_info_var;
}
static AlgorithmInfo& fast_info_auto = fast_info();
AlgorithmInfo* FastFeatureDetector::info() const
{
static volatile bool initialized = false;
if( !initialized )
{
FastFeatureDetector obj;
fast_info().addParam(obj, "threshold", obj.threshold);
fast_info().addParam(obj, "nonmaxSuppression", obj.nonmaxSuppression);
initialized = true;
}
return &fast_info();
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////
static Algorithm* createStarDetector() { return new StarDetector; }
static AlgorithmInfo& star_info()
{
static AlgorithmInfo star_info_var("Feature2D.STAR", createStarDetector);
return star_info_var;
}
static AlgorithmInfo& star_info_auto = star_info();
AlgorithmInfo* StarDetector::info() const
{
static volatile bool initialized = false;
if( !initialized )
{
StarDetector obj;
star_info().addParam(obj, "maxSize", obj.maxSize);
star_info().addParam(obj, "responseThreshold", obj.responseThreshold);
star_info().addParam(obj, "lineThresholdProjected", obj.lineThresholdProjected);
star_info().addParam(obj, "lineThresholdBinarized", obj.lineThresholdBinarized);
star_info().addParam(obj, "suppressNonmaxSize", obj.suppressNonmaxSize);
initialized = true;
}
return &star_info();
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////
static Algorithm* createMSER() { return new MSER; }
static AlgorithmInfo& mser_info()
{
static AlgorithmInfo mser_info_var("Feature2D.MSER", createMSER);
return mser_info_var;
}
static AlgorithmInfo& mser_info_auto = mser_info();
AlgorithmInfo* MSER::info() const
{
static volatile bool initialized = false;
if( !initialized )
{
MSER obj;
mser_info().addParam(obj, "delta", obj.delta);
mser_info().addParam(obj, "minArea", obj.minArea);
mser_info().addParam(obj, "maxArea", obj.maxArea);
mser_info().addParam(obj, "maxVariation", obj.maxVariation);
mser_info().addParam(obj, "minDiversity", obj.minDiversity);
mser_info().addParam(obj, "maxEvolution", obj.maxEvolution);
mser_info().addParam(obj, "areaThreshold", obj.areaThreshold);
mser_info().addParam(obj, "minMargin", obj.minMargin);
mser_info().addParam(obj, "edgeBlurSize", obj.edgeBlurSize);
initialized = true;
}
return &mser_info();
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////
static Algorithm* createORB() { return new ORB; }
static AlgorithmInfo& orb_info()
{
static AlgorithmInfo orb_info_var("Feature2D.ORB", createORB);
return orb_info_var;
}
static AlgorithmInfo& orb_info_auto = orb_info();
AlgorithmInfo* ORB::info() const
{
static volatile bool initialized = false;
if( !initialized )
{
ORB obj;
orb_info().addParam(obj, "nFeatures", obj.nfeatures);
orb_info().addParam(obj, "scaleFactor", obj.scaleFactor);
orb_info().addParam(obj, "nLevels", obj.nlevels);
orb_info().addParam(obj, "firstLevel", obj.firstLevel);
orb_info().addParam(obj, "edgeThreshold", obj.edgeThreshold);
orb_info().addParam(obj, "patchSize", obj.patchSize);
orb_info().addParam(obj, "WTA_K", obj.WTA_K);
orb_info().addParam(obj, "scoreType", obj.scoreType);
initialized = true;
}
return &orb_info();
}
static Algorithm* createGFTT() { return new GFTTDetector; }
static Algorithm* createHarris()
{
GFTTDetector* d = new GFTTDetector;
d->set("useHarris", true);
return d;
}
static AlgorithmInfo gftt_info("Feature2D.GFTT", createGFTT);
static AlgorithmInfo harris_info("Feature2D.HARRIS", createHarris);
AlgorithmInfo* GFTTDetector::info() const
{
static volatile bool initialized = false;
if( !initialized )
{
GFTTDetector obj;
gftt_info.addParam(obj, "nfeatures", obj.nfeatures);
gftt_info.addParam(obj, "qualityLevel", obj.qualityLevel);
gftt_info.addParam(obj, "minDistance", obj.minDistance);
gftt_info.addParam(obj, "useHarrisDetector", obj.useHarrisDetector);
gftt_info.addParam(obj, "k", obj.k);
harris_info.addParam(obj, "nfeatures", obj.nfeatures);
harris_info.addParam(obj, "qualityLevel", obj.qualityLevel);
harris_info.addParam(obj, "minDistance", obj.minDistance);
harris_info.addParam(obj, "useHarrisDetector", obj.useHarrisDetector);
harris_info.addParam(obj, "k", obj.k);
initialized = true;
}
return &gftt_info;
}
static Algorithm* createDense() { return new DenseFeatureDetector; }
static AlgorithmInfo dense_info("Feature2D.Dense", createDense);
AlgorithmInfo* DenseFeatureDetector::info() const
{
static volatile bool initialized = false;
if( !initialized )
{
DenseFeatureDetector obj;
dense_info.addParam(obj, "initFeatureScale", obj.initFeatureScale);
dense_info.addParam(obj, "featureScaleLevels", obj.featureScaleLevels);
dense_info.addParam(obj, "featureScaleMul", obj.featureScaleMul);
dense_info.addParam(obj, "initXyStep", obj.initXyStep);
dense_info.addParam(obj, "initImgBound", obj.initImgBound);
dense_info.addParam(obj, "varyXyStepWithScale", obj.varyXyStepWithScale);
dense_info.addParam(obj, "varyImgBoundWithScale", obj.varyImgBoundWithScale);
initialized = true;
}
return &dense_info;
}
bool initModule_features2d(void)
{
Ptr<Algorithm> brief = createBRIEF(), orb = createORB(),
star = createStarDetector(), fastd = createFAST(), mser = createMSER(),
dense = createDense(), gftt = createGFTT(), harris = createHarris();
return brief->info() != 0 && orb->info() != 0 && star->info() != 0 &&
fastd->info() != 0 && mser->info() != 0 && dense->info() != 0 &&
gftt->info() != 0 && harris->info() != 0;
}
}
......@@ -3365,6 +3365,7 @@ typedef struct CvGaussBGModel
CvGaussBGStatModelParams params;
CvGaussBGPoint* g_point;
int countFrames;
void* mog;
} CvGaussBGModel;
......
......@@ -50,10 +50,9 @@ icvReleaseGaussianBGModel( CvGaussBGModel** bg_model )
if( *bg_model )
{
delete (cv::Mat*)((*bg_model)->g_point);
delete (cv::BackgroundSubtractorMOG*)((*bg_model)->mog);
cvReleaseImage( &(*bg_model)->background );
cvReleaseImage( &(*bg_model)->foreground );
cvReleaseMemStorage(&(*bg_model)->storage);
memset( *bg_model, 0, sizeof(**bg_model) );
delete *bg_model;
*bg_model = 0;
......@@ -64,70 +63,15 @@ icvReleaseGaussianBGModel( CvGaussBGModel** bg_model )
static int CV_CDECL
icvUpdateGaussianBGModel( IplImage* curr_frame, CvGaussBGModel* bg_model, double learningRate )
{
int region_count = 0;
cv::Mat image = cv::cvarrToMat(curr_frame), mask = cv::cvarrToMat(bg_model->foreground);
cv::BackgroundSubtractorMOG mog;
mog.bgmodel = *(cv::Mat*)bg_model->g_point;
mog.frameSize = mog.bgmodel.data ? cv::Size(cvGetSize(curr_frame)) : cv::Size();
mog.frameType = image.type();
mog.nframes = bg_model->countFrames;
mog.history = bg_model->params.win_size;
mog.nmixtures = bg_model->params.n_gauss;
mog.varThreshold = bg_model->params.std_threshold*bg_model->params.std_threshold;
mog.backgroundRatio = bg_model->params.bg_threshold;
mog(image, mask, learningRate);
bg_model->countFrames = mog.nframes;
if( ((cv::Mat*)bg_model->g_point)->data != mog.bgmodel.data )
*((cv::Mat*)bg_model->g_point) = mog.bgmodel;
//foreground filtering
//filter small regions
cvClearMemStorage(bg_model->storage);
//cvMorphologyEx( bg_model->foreground, bg_model->foreground, 0, 0, CV_MOP_OPEN, 1 );
//cvMorphologyEx( bg_model->foreground, bg_model->foreground, 0, 0, CV_MOP_CLOSE, 1 );
#if 0
CvSeq *first_seq = NULL, *prev_seq = NULL, *seq = NULL;
cvFindContours( bg_model->foreground, bg_model->storage, &first_seq, sizeof(CvContour), CV_RETR_LIST );
for( seq = first_seq; seq; seq = seq->h_next )
{
CvContour* cnt = (CvContour*)seq;
if( cnt->rect.width * cnt->rect.height < bg_model->params.minArea )
{
//delete small contour
prev_seq = seq->h_prev;
if( prev_seq )
{
prev_seq->h_next = seq->h_next;
if( seq->h_next ) seq->h_next->h_prev = prev_seq;
}
else
{
first_seq = seq->h_next;
if( seq->h_next ) seq->h_next->h_prev = NULL;
}
}
else
{
region_count++;
}
}
bg_model->foreground_regions = first_seq;
cvZero(bg_model->foreground);
cvDrawContours(bg_model->foreground, first_seq, CV_RGB(0, 0, 255), CV_RGB(0, 0, 255), 10, -1);
#endif
cv::BackgroundSubtractorMOG* mog = (cv::BackgroundSubtractorMOG*)(bg_model->mog);
CV_Assert(mog != 0);
CvMat _mask = mask;
cvCopy(&_mask, bg_model->foreground);
(*mog)(image, mask, learningRate);
bg_model->countFrames++;
return region_count;
return 0;
}
CV_IMPL CvBGStatModel*
......@@ -161,15 +105,17 @@ cvCreateGaussianBGModel( IplImage* first_frame, CvGaussBGStatModelParams* parame
bg_model->params = params;
//prepare storages
bg_model->g_point = (CvGaussBGPoint*)new cv::Mat();
cv::BackgroundSubtractorMOG* mog =
new cv::BackgroundSubtractorMOG(params.win_size,
params.n_gauss,
params.bg_threshold,
params.variance_init);
bg_model->background = cvCreateImage(cvSize(first_frame->width,
first_frame->height), IPL_DEPTH_8U, first_frame->nChannels);
bg_model->foreground = cvCreateImage(cvSize(first_frame->width,
first_frame->height), IPL_DEPTH_8U, 1);
bg_model->mog = mog;
bg_model->storage = cvCreateMemStorage();
CvSize sz = cvGetSize(first_frame);
bg_model->background = cvCreateImage(sz, IPL_DEPTH_8U, first_frame->nChannels);
bg_model->foreground = cvCreateImage(sz, IPL_DEPTH_8U, 1);
bg_model->countFrames = 0;
......
......@@ -671,29 +671,6 @@ void EM::read(const FileNode& fn)
computeLogWeightDivDet();
}
static Algorithm* createEM()
{
return new EM;
}
static AlgorithmInfo em_info("StatModel.EM", createEM);
AlgorithmInfo* EM::info() const
{
static volatile bool initialized = false;
if( !initialized )
{
EM obj;
em_info.addParam(obj, "nclusters", obj.nclusters, true);
em_info.addParam(obj, "covMatType", obj.covMatType, true);
em_info.addParam(obj, "weights", obj.weights, true);
em_info.addParam(obj, "means", obj.means, true);
em_info.addParam(obj, "covs", obj.covs, true);
initialized = true;
}
return &em_info;
}
} // namespace cv
/* End of file. */
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
namespace cv
{
static Algorithm* createEM()
{
return new EM;
}
static AlgorithmInfo em_info("StatModel.EM", createEM);
AlgorithmInfo* EM::info() const
{
static volatile bool initialized = false;
if( !initialized )
{
EM obj;
em_info.addParam(obj, "nclusters", obj.nclusters);
em_info.addParam(obj, "covMatType", obj.covMatType);
em_info.addParam(obj, "maxIters", obj.maxIters);
em_info.addParam(obj, "epsilon", obj.epsilon);
em_info.addParam(obj, "weights", obj.weights, true);
em_info.addParam(obj, "means", obj.means, true);
em_info.addParam(obj, "covs", obj.covs, true);
initialized = true;
}
return &em_info;
}
bool initModule_ml(void)
{
Ptr<Algorithm> em = createEM();
return em->info() != 0;
}
}
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
namespace cv
{
///////////////////////////////////////////////////////////////////////////////////////////////////////////
static Algorithm* createSURF()
{
return new SURF;
}
static AlgorithmInfo& surf_info()
{
static AlgorithmInfo surf_info_var("Feature2D.SURF", createSURF);
return surf_info_var;
}
static AlgorithmInfo& surf_info_auto = surf_info();
AlgorithmInfo* SURF::info() const
{
static volatile bool initialized = false;
if( !initialized )
{
SURF obj;
surf_info().addParam(obj, "hessianThreshold", obj.hessianThreshold);
surf_info().addParam(obj, "nOctaves", obj.nOctaves);
surf_info().addParam(obj, "nOctaveLayers", obj.nOctaveLayers);
surf_info().addParam(obj, "extended", obj.extended);
surf_info().addParam(obj, "upright", obj.upright);
initialized = true;
}
return &surf_info();
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////
static Algorithm* createSIFT() { return new SIFT; }
static AlgorithmInfo& sift_info()
{
static AlgorithmInfo sift_info_var("Feature2D.SIFT", createSIFT);
return sift_info_var;
}
static AlgorithmInfo& sift_info_auto = sift_info();
AlgorithmInfo* SIFT::info() const
{
static volatile bool initialized = false;
if( !initialized )
{
SIFT obj;
sift_info().addParam(obj, "nFeatures", obj.nfeatures);
sift_info().addParam(obj, "nOctaveLayers", obj.nOctaveLayers);
sift_info().addParam(obj, "contrastThreshold", obj.contrastThreshold);
sift_info().addParam(obj, "edgeThreshold", obj.edgeThreshold);
sift_info().addParam(obj, "sigma", obj.sigma);
initialized = true;
}
return &sift_info();
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////
bool initModule_nonfree(void)
{
Ptr<Algorithm> sift = createSIFT(), surf = createSURF();
return sift->info() != 0 && surf->info() != 0;
}
}
\ No newline at end of file
......@@ -7,10 +7,11 @@
// copy or use the software.
//
//
// Intel License Agreement
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
......@@ -23,7 +24,7 @@
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
......
......@@ -7,10 +7,11 @@
// copy or use the software.
//
//
// Intel License Agreement
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
......@@ -23,7 +24,7 @@
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
......
......@@ -938,76 +938,6 @@ void SURF::detectImpl( const Mat& image, vector<KeyPoint>& keypoints, const Mat&
void SURF::computeImpl( const Mat& image, vector<KeyPoint>& keypoints, Mat& descriptors) const
{
(*this)(image, Mat(), keypoints, descriptors, true);
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////
static Algorithm* createSURF()
{
return new SURF;
}
static AlgorithmInfo& surf_info()
{
static AlgorithmInfo surf_info_var("Feature2D.SURF", createSURF);
return surf_info_var;
}
static AlgorithmInfo& surf_info_auto = surf_info();
AlgorithmInfo* SURF::info() const
{
static volatile bool initialized = false;
if( !initialized )
{
SURF obj;
surf_info().addParam(obj, "hessianThreshold", obj.hessianThreshold);
surf_info().addParam(obj, "nOctaves", obj.nOctaves);
surf_info().addParam(obj, "nOctaveLayers", obj.nOctaveLayers);
surf_info().addParam(obj, "extended", obj.extended);
surf_info().addParam(obj, "upright", obj.upright);
initialized = true;
}
return &surf_info();
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////
static Algorithm* createSIFT() { return new SIFT; }
static AlgorithmInfo& sift_info()
{
static AlgorithmInfo sift_info_var("Feature2D.SIFT", createSIFT);
return sift_info_var;
}
static AlgorithmInfo& sift_info_auto = sift_info();
AlgorithmInfo* SIFT::info() const
{
static volatile bool initialized = false;
if( !initialized )
{
SIFT obj;
sift_info().addParam(obj, "nFeatures", obj.nfeatures);
sift_info().addParam(obj, "nOctaveLayers", obj.nOctaveLayers);
sift_info().addParam(obj, "contrastThreshold", obj.contrastThreshold);
sift_info().addParam(obj, "edgeThreshold", obj.edgeThreshold);
sift_info().addParam(obj, "sigma", obj.sigma);
initialized = true;
}
return &sift_info();
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////
bool initModule_nonfree(void)
{
Ptr<Algorithm> sift = createSIFT(), surf = createSURF();
return sift->info() != 0 && surf->info() != 0;
}
}
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
......@@ -54,7 +54,7 @@ namespace cv
The class is only used to define the common interface for
the whole family of background/foreground segmentation algorithms.
*/
class CV_EXPORTS_W BackgroundSubtractor
class CV_EXPORTS_W BackgroundSubtractor : public Algorithm
{
public:
//! the virtual destructor
......@@ -93,6 +93,9 @@ public:
//! re-initiaization method
virtual void initialize(Size frameSize, int frameType);
virtual AlgorithmInfo* info() const;
protected:
Size frameSize;
int frameType;
Mat bgmodel;
......@@ -130,6 +133,9 @@ public:
//! re-initiaization method
virtual void initialize(Size frameSize, int frameType);
virtual AlgorithmInfo* info() const;
protected:
Size frameSize;
int frameType;
Mat bgmodel;
......@@ -137,24 +143,24 @@ public:
int nframes;
int history;
int nmixtures;
//! here it is the maximum allowed number of mixture comonents.
//! here it is the maximum allowed number of mixture components.
//! Actual number is determined dynamically per pixel
float varThreshold;
// threshold on the squared Mahalan. dist. to decide if it is well described
//by the background model or not. Related to Cthr from the paper.
//This does not influence the update of the background. A typical value could be 4 sigma
//and that is varThreshold=4*4=16; Corresponds to Tb in the paper.
double varThreshold;
// threshold on the squared Mahalanobis distance to decide if it is well described
// by the background model or not. Related to Cthr from the paper.
// This does not influence the update of the background. A typical value could be 4 sigma
// and that is varThreshold=4*4=16; Corresponds to Tb in the paper.
/////////////////////////
//less important parameters - things you might change but be carefull
// less important parameters - things you might change but be carefull
////////////////////////
float backgroundRatio;
//corresponds to fTB=1-cf from the paper
//TB - threshold when the component becomes significant enough to be included into
//the background model. It is the TB=1-cf from the paper. So I use cf=0.1 => TB=0.
//For alpha=0.001 it means that the mode should exist for approximately 105 frames before
//it is considered foreground
//float noiseSigma;
// corresponds to fTB=1-cf from the paper
// TB - threshold when the component becomes significant enough to be included into
// the background model. It is the TB=1-cf from the paper. So I use cf=0.1 => TB=0.
// For alpha=0.001 it means that the mode should exist for approximately 105 frames before
// it is considered foreground
// float noiseSigma;
float varThresholdGen;
//correspondts to Tg - threshold on the squared Mahalan. dist. to decide
//when a sample is close to the existing components. If it is not close
......
......@@ -134,17 +134,19 @@ template<typename VT> struct MixData
};
static void process8uC1( BackgroundSubtractorMOG& obj, const Mat& image, Mat& fgmask, double learningRate )
static void process8uC1( const Mat& image, Mat& fgmask, double learningRate,
Mat& bgmodel, int nmixtures, double backgroundRatio,
double varThreshold, double noiseSigma )
{
int x, y, k, k1, rows = image.rows, cols = image.cols;
float alpha = (float)learningRate, T = (float)obj.backgroundRatio, vT = (float)obj.varThreshold;
int K = obj.nmixtures;
MixData<float>* mptr = (MixData<float>*)obj.bgmodel.data;
float alpha = (float)learningRate, T = (float)backgroundRatio, vT = (float)varThreshold;
int K = nmixtures;
MixData<float>* mptr = (MixData<float>*)bgmodel.data;
const float w0 = (float)defaultInitialWeight;
const float sk0 = (float)(w0/(defaultNoiseSigma*2));
const float var0 = (float)(defaultNoiseSigma*defaultNoiseSigma*4);
const float minVar = (float)(obj.noiseSigma*obj.noiseSigma);
const float minVar = (float)(noiseSigma*noiseSigma);
for( y = 0; y < rows; y++ )
{
......@@ -259,17 +261,20 @@ static void process8uC1( BackgroundSubtractorMOG& obj, const Mat& image, Mat& fg
}
}
static void process8uC3( BackgroundSubtractorMOG& obj, const Mat& image, Mat& fgmask, double learningRate )
static void process8uC3( const Mat& image, Mat& fgmask, double learningRate,
Mat& bgmodel, int nmixtures, double backgroundRatio,
double varThreshold, double noiseSigma )
{
int x, y, k, k1, rows = image.rows, cols = image.cols;
float alpha = (float)learningRate, T = (float)obj.backgroundRatio, vT = (float)obj.varThreshold;
int K = obj.nmixtures;
float alpha = (float)learningRate, T = (float)backgroundRatio, vT = (float)varThreshold;
int K = nmixtures;
const float w0 = (float)defaultInitialWeight;
const float sk0 = (float)(w0/(defaultNoiseSigma*2*sqrt(3.)));
const float var0 = (float)(defaultNoiseSigma*defaultNoiseSigma*4);
const float minVar = (float)(obj.noiseSigma*obj.noiseSigma);
MixData<Vec3f>* mptr = (MixData<Vec3f>*)obj.bgmodel.data;
const float minVar = (float)(noiseSigma*noiseSigma);
MixData<Vec3f>* mptr = (MixData<Vec3f>*)bgmodel.data;
for( y = 0; y < rows; y++ )
{
......@@ -403,9 +408,9 @@ void BackgroundSubtractorMOG::operator()(InputArray _image, OutputArray _fgmask,
CV_Assert(learningRate >= 0);
if( image.type() == CV_8UC1 )
process8uC1( *this, image, fgmask, learningRate );
process8uC1( image, fgmask, learningRate, bgmodel, nmixtures, backgroundRatio, varThreshold, noiseSigma );
else if( image.type() == CV_8UC3 )
process8uC3( *this, image, fgmask, learningRate );
process8uC3( image, fgmask, learningRate, bgmodel, nmixtures, backgroundRatio, varThreshold, noiseSigma );
else
CV_Error( CV_StsUnsupportedFormat, "Only 1- and 3-channel 8-bit images are supported in BackgroundSubtractorMOG" );
}
......
......@@ -82,6 +82,8 @@
#include "precomp.hpp"
namespace cv
{
/*
Interface of Gaussian mixture algorithm from:
......@@ -95,29 +97,24 @@
-fast - number of Gausssian components is constantly adapted per pixel.
-performs also shadow detection (see bgfg_segm_test.cpp example)
*/
#define CV_BG_MODEL_MOG2 3 /* "Mixture of Gaussians 2". */
/* default parameters of gaussian background detection algorithm */
#define CV_BGFG_MOG2_STD_THRESHOLD 4.0f /* lambda=2.5 is 99% */
#define CV_BGFG_MOG2_WINDOW_SIZE 500 /* Learning rate; alpha = 1/CV_GBG_WINDOW_SIZE */
#define CV_BGFG_MOG2_BACKGROUND_THRESHOLD 0.9f /* threshold sum of weights for background test */
#define CV_BGFG_MOG2_STD_THRESHOLD_GENERATE 3.0f /* lambda=2.5 is 99% */
#define CV_BGFG_MOG2_NGAUSSIANS 5 /* = K = number of Gaussians in mixture */
#define CV_BGFG_MOG2_VAR_INIT 15.0f /* initial variance for new components*/
#define CV_BGFG_MOG2_VAR_MIN 4.0f
#define CV_BGFG_MOG2_VAR_MAX 5*CV_BGFG_MOG2_VAR_INIT
#define CV_BGFG_MOG2_MINAREA 15.0f /* for postfiltering */
/* additional parameters */
#define CV_BGFG_MOG2_CT 0.05f /* complexity reduction prior constant 0 - no reduction of number of components*/
#define CV_BGFG_MOG2_SHADOW_VALUE 127 /* value to use in the segmentation mask for shadows, sot 0 not to do shadow detection*/
#define CV_BGFG_MOG2_SHADOW_TAU 0.5f /* Tau - shadow threshold, see the paper for explanation*/
typedef struct CvGaussBGStatModel2Params
*/
// default parameters of gaussian background detection algorithm
static const int defaultHistory2 = 500; // Learning rate; alpha = 1/defaultHistory2
static const float defaultVarThreshold2 = 4.0f*4.0f;
static const int defaultNMixtures2 = 5; // maximal number of Gaussians in mixture
static const float defaultBackgroundRatio2 = 0.9f; // threshold sum of weights for background test
static const float defaultVarThresholdGen2 = 3.0f*3.0f;
static const float defaultVarInit2 = 15.0f; // initial variance for new components
static const float defaultVarMax2 = 5*defaultVarInit2;
static const float defaultVarMin2 = 4.0f;
// additional parameters
static const float defaultfCT2 = 0.05f; // complexity reduction prior constant 0 - no reduction of number of components
static const unsigned char defaultnShadowDetection2 = (unsigned char)127; // value to use in the segmentation mask for shadows, set 0 not to do shadow detection
static const float defaultfTau = 0.5f; // Tau - shadow threshold, see the paper for explanation
struct GaussBGStatModel2Params
{
//image info
int nWidth;
......@@ -179,83 +176,62 @@ typedef struct CvGaussBGStatModel2Params
//version of the background. Tau is a threshold on how much darker the shadow can be.
//Tau= 0.5 means that if pixel is more than 2 times darker then it is not shadow
//See: Prati,Mikic,Trivedi,Cucchiarra,"Detecting Moving Shadows...",IEEE PAMI,2003.
} CvGaussBGStatModel2Params;
#define CV_BGFG_MOG2_NDMAX 3
};
typedef struct CvPBGMMGaussian
struct GMM
{
float weight;
float mean[CV_BGFG_MOG2_NDMAX];
float variance;
}CvPBGMMGaussian;
typedef struct CvGaussBGStatModel2Data
{
CvPBGMMGaussian* rGMM; //array for the mixture of Gaussians
unsigned char* rnUsedModes;//number of Gaussian components per pixel (maximum 255)
} CvGaussBGStatModel2Data;
};
//shadow detection performed per pixel
// shadow detection performed per pixel
// should work for rgb data, could be usefull for gray scale and depth data as well
// See: Prati,Mikic,Trivedi,Cucchiarra,"Detecting Moving Shadows...",IEEE PAMI,2003.
CV_INLINE int _icvRemoveShadowGMM(float* data, int nD,
unsigned char nModes,
CvPBGMMGaussian* pGMM,
float m_fTb,
float m_fTB,
float m_fTau)
// See: Prati,Mikic,Trivedi,Cucchiarra,"Detecting Moving Shadows...",IEEE PAMI,2003.
static CV_INLINE bool
detectShadowGMM(const float* data, int nchannels, int nmodes,
const GMM* gmm, const float* mean,
float Tb, float TB, float tau)
{
float tWeight = 0;
float numerator, denominator;
// check all the components marked as background:
for (int iModes=0;iModes<nModes;iModes++)
for( int mode = 0; mode < nmodes; mode++, mean += nchannels )
{
GMM g = gmm[mode];
CvPBGMMGaussian g=pGMM[iModes];
numerator = 0.0f;
denominator = 0.0f;
for (int iD=0;iD<nD;iD++)
float numerator = 0.0f;
float denominator = 0.0f;
for( int c = 0; c < nchannels; c++ )
{
numerator += data[iD] * g.mean[iD];
denominator += g.mean[iD]* g.mean[iD];
numerator += data[c] * mean[c];
denominator += mean[c] * mean[c];
}
// no division by zero allowed
if (denominator == 0)
{
return 0;
};
float a = numerator / denominator;
if( denominator == 0 )
return false;
// if tau < a < 1 then also check the color distortion
if ((a <= 1) && (a >= m_fTau))
if( numerator <= denominator && numerator >= tau*denominator )
{
float dist2a=0.0f;
float a = numerator / denominator;
float dist2a = 0.0f;
for (int iD=0;iD<nD;iD++)
for( int c = 0; c < nchannels; c++ )
{
float dD= a*g.mean[iD] - data[iD];
dist2a += (dD*dD);
float dD= a*mean[c] - data[c];
dist2a += dD*dD;
}
if (dist2a<m_fTb*g.variance*a*a)
{
return 2;
}
if (dist2a < Tb*g.variance*a*a)
return true;
};
tWeight += g.weight;
if (tWeight > m_fTB)
{
return 0;
};
if( tWeight > TB )
return false;
};
return 0;
return false;
}
//update GMM - the base update function performed per pixel
......@@ -272,680 +248,236 @@ CV_INLINE int _icvRemoveShadowGMM(float* data, int nD,
//IEEE Trans. on Pattern Analysis and Machine Intelligence, vol.26, no.5, pages 651-656, 2004
//http://www.zoranz.net/Publications/zivkovic2004PAMI.pdf
CV_INLINE int _icvUpdateGMM(float* data, int nD,
unsigned char* pModesUsed,
CvPBGMMGaussian* pGMM,
int m_nM,
float m_fAlphaT,
float m_fTb,
float m_fTB,
float m_fTg,
float m_fVarInit,
float m_fVarMax,
float m_fVarMin,
float m_fPrune)
struct MOG2Invoker
{
//calculate distances to the modes (+ sort)
//here we need to go in descending order!!!
bool bBackground=0;//return value -> true - the pixel classified as background
//internal:
bool bFitsPDF=0;//if it remains zero a new GMM mode will be added
float m_fOneMinAlpha=1-m_fAlphaT;
unsigned char nModes=*pModesUsed;//current number of modes in GMM
float totalWeight=0.0f;
//////
//go through all modes
int iMode=0;
CvPBGMMGaussian* pGauss=pGMM;
for (;iMode<nModes;iMode++,pGauss++)
MOG2Invoker(const Mat& _src, Mat& _dst,
GMM* _gmm, float* _mean,
uchar* _modesUsed,
int _nmixtures, float _alphaT,
float _Tb, float _TB, float _Tg,
float _varInit, float _varMin, float _varMax,
float _prune, float _tau, bool _detectShadows,
uchar _shadowVal)
{
float weight = pGauss->weight;//need only weight if fit is found
weight=m_fOneMinAlpha*weight+m_fPrune;
////
//fit not found yet
if (!bFitsPDF)
{
//check if it belongs to some of the remaining modes
float var=pGauss->variance;
//calculate difference and distance
float dist2=0.0f;
#if (CV_BGFG_MOG2_NDMAX==1)
float dData=pGauss->mean[0]-data[0];
dist2=dData*dData;
#else
float dData[CV_BGFG_MOG2_NDMAX];
for (int iD=0;iD<nD;iD++)
{
dData[iD]=pGauss->mean[iD]-data[iD];
dist2+=dData[iD]*dData[iD];
}
#endif
//background? - m_fTb - usually larger than m_fTg
if ((totalWeight<m_fTB)&&(dist2<m_fTb*var))
bBackground=1;
//check fit
if (dist2<m_fTg*var)
{
/////
//belongs to the mode - bFitsPDF becomes 1
bFitsPDF=1;
//update distribution
//update weight
weight+=m_fAlphaT;
float k = m_fAlphaT/weight;
//update mean
#if (CV_BGFG_MOG2_NDMAX==1)
pGauss->mean[0]-=k*dData;
#else
for (int iD=0;iD<nD;iD++)
{
pGauss->mean[iD]-=k*dData[iD];
}
#endif
//update variance
float varnew = var + k*(dist2-var);
//limit the variance
pGauss->variance = MIN(m_fVarMax,MAX(varnew,m_fVarMin));
//sort
//all other weights are at the same place and
//only the matched (iModes) is higher -> just find the new place for it
for (int iLocal = iMode;iLocal>0;iLocal--)
{
//check one up
if (weight < (pGMM[iLocal-1].weight))
{
break;
}
else
{
//swap one up
CvPBGMMGaussian temp = pGMM[iLocal];
pGMM[iLocal] = pGMM[iLocal-1];
pGMM[iLocal-1] = temp;
pGauss--;
}
}
//belongs to the mode - bFitsPDF becomes 1
/////
}
}//!bFitsPDF)
//check prune
if (weight<-m_fPrune)
{
weight=0.0;
nModes--;
}
pGauss->weight=weight;//update weight by the calculated value
totalWeight+=weight;
}
//go through all modes
//////
//renormalize weights
for (iMode = 0; iMode < nModes; iMode++)
{
pGMM[iMode].weight = pGMM[iMode].weight/totalWeight;
}
src = &_src;
dst = &_dst;
gmm0 = _gmm;
mean0 = _mean;
modesUsed0 = _modesUsed;
nmixtures = _nmixtures;
alphaT = _alphaT;
Tb = _Tb;
TB = _TB;
Tg = _Tg;
varInit = _varInit;
varMin = MIN(_varMin, _varMax);
varMax = MAX(_varMin, _varMax);
prune = _prune;
tau = _tau;
detectShadows = _detectShadows;
shadowVal = _shadowVal;
//make new mode if needed and exit
if (!bFitsPDF)
{
if (nModes==m_nM)
{
//replace the weakest
pGauss=pGMM+m_nM-1;
}
else
{
//add a new one
pGauss=pGMM+nModes;
nModes++;
}
if (nModes==1)
{
pGauss->weight=1;
}
else
{
pGauss->weight=m_fAlphaT;
//renormalize all weights
for (iMode = 0; iMode < nModes-1; iMode++)
{
pGMM[iMode].weight *=m_fOneMinAlpha;
}
}
//init
memcpy(pGauss->mean,data,nD*sizeof(float));
pGauss->variance=m_fVarInit;
//sort
//find the new place for it
for (int iLocal = nModes-1;iLocal>0;iLocal--)
{
//check one up
if (m_fAlphaT < (pGMM[iLocal-1].weight))
{
break;
}
else
{
//swap one up
CvPBGMMGaussian temp = pGMM[iLocal];
pGMM[iLocal] = pGMM[iLocal-1];
pGMM[iLocal-1] = temp;
}
}
cvtfunc = src->depth() != CV_32F ? getConvertFunc(src->depth(), CV_32F) : 0;
}
//set the number of modes
*pModesUsed=nModes;
return bBackground;
}
// a bit more efficient implementation for common case of 3 channel (rgb) images
CV_INLINE int _icvUpdateGMM_C3(float r,float g, float b,
unsigned char* pModesUsed,
CvPBGMMGaussian* pGMM,
int m_nM,
float m_fAlphaT,
float m_fTb,
float m_fTB,
float m_fTg,
float m_fVarInit,
float m_fVarMax,
float m_fVarMin,
float m_fPrune)
{
//calculate distances to the modes (+ sort)
//here we need to go in descending order!!!
bool bBackground=0;//return value -> true - the pixel classified as background
//internal:
bool bFitsPDF=0;//if it remains zero a new GMM mode will be added
float m_fOneMinAlpha=1-m_fAlphaT;
unsigned char nModes=*pModesUsed;//current number of modes in GMM
float totalWeight=0.0f;
//////
//go through all modes
int iMode=0;
CvPBGMMGaussian* pGauss=pGMM;
for (;iMode<nModes;iMode++,pGauss++)
void operator()(const BlockedRange& range) const
{
float weight = pGauss->weight;//need only weight if fit is found
weight=m_fOneMinAlpha*weight+m_fPrune;
////
//fit not found yet
if (!bFitsPDF)
{
//check if it belongs to some of the remaining modes
float var=pGauss->variance;
//calculate difference and distance
float muR = pGauss->mean[0];
float muG = pGauss->mean[1];
float muB = pGauss->mean[2];
int y0 = range.begin(), y1 = range.end();
int ncols = src->cols, nchannels = src->channels();
AutoBuffer<float> buf(src->cols*nchannels);
float alpha1 = 1.f - alphaT;
float dData[CV_CN_MAX];
float dR=muR - r;
float dG=muG - g;
float dB=muB - b;
float dist2=(dR*dR+dG*dG+dB*dB);
for( int y = y0; y < y1; y++ )
{
const float* data = buf;
if( cvtfunc )
cvtfunc( src->ptr(y), src->step, 0, 0, (uchar*)data, 0, Size(ncols*nchannels, 1), 0);
else
data = src->ptr<float>(y);
//background? - m_fTb - usually larger than m_fTg
if ((totalWeight<m_fTB)&&(dist2<m_fTb*var))
bBackground=1;
//check fit
if (dist2<m_fTg*var)
float* mean = mean0 + ncols*nmixtures*nchannels*y;
GMM* gmm = gmm0 + ncols*nmixtures*y;
uchar* modesUsed = modesUsed0 + ncols*y;
uchar* mask = dst->ptr(y);
for( int x = 0; x < ncols; x++, data += nchannels, gmm += nmixtures, mean += nmixtures*nchannels )
{
/////
//belongs to the mode - bFitsPDF becomes 1
bFitsPDF=1;
//update distribution
//calculate distances to the modes (+ sort)
//here we need to go in descending order!!!
bool background = false;//return value -> true - the pixel classified as background
//update weight
weight+=m_fAlphaT;
//internal:
bool fitsPDF = false;//if it remains zero a new GMM mode will be added
int nmodes = modesUsed[x], nNewModes = nmodes;//current number of modes in GMM
float totalWeight = 0.f;
float k = m_fAlphaT/weight;
//update mean
pGauss->mean[0] = muR - k*(dR);
pGauss->mean[1] = muG - k*(dG);
pGauss->mean[2] = muB - k*(dB);
//update variance
float varnew = var + k*(dist2-var);
//limit the variance
pGauss->variance = MIN(m_fVarMax,MAX(varnew,m_fVarMin));
//sort
//all other weights are at the same place and
//only the matched (iModes) is higher -> just find the new place for it
for (int iLocal = iMode;iLocal>0;iLocal--)
float* mean_m = mean;
//////
//go through all modes
for( int mode = 0; mode < nmodes; mode++, mean_m += nchannels )
{
//check one up
if (weight < (pGMM[iLocal-1].weight))
float weight = alpha1*gmm[mode].weight + prune;//need only weight if fit is found
////
//fit not found yet
if( !fitsPDF )
{
break;
}
else
//check if it belongs to some of the remaining modes
float var = gmm[mode].variance;
//calculate difference and distance
float dist2;
if( nchannels == 3 )
{
dData[0] = mean_m[0] - data[0];
dData[1] = mean_m[1] - data[1];
dData[2] = mean_m[2] - data[2];
dist2 = dData[0]*dData[0] + dData[1]*dData[1] + dData[2]*dData[2];
}
else
{
dist2 = 0.f;
for( int c = 0; c < nchannels; c++ )
{
dData[c] = mean_m[c] - data[c];
dist2 += dData[c]*dData[c];
}
}
//background? - Tb - usually larger than Tg
if( totalWeight < TB && dist2 < Tb*var )
background = true;
//check fit
if( dist2 < Tg*var )
{
/////
//belongs to the mode
fitsPDF = true;
//update distribution
//update weight
weight += alphaT;
float k = alphaT/weight;
//update mean
for( int c = 0; c < nchannels; c++ )
mean_m[c] -= k*dData[c];
//update variance
float varnew = var + k*(dist2-var);
//limit the variance
varnew = MAX(varnew, varMin);
varnew = MIN(varnew, varMax);
gmm[mode].variance = varnew;
//sort
//all other weights are at the same place and
//only the matched (iModes) is higher -> just find the new place for it
for( int i = mode; i > 0; i-- )
{
//check one up
if( weight < gmm[i-1].weight )
break;
//swap one up
std::swap(gmm[i], gmm[i-1]);
for( int c = 0; c < nchannels; c++ )
std::swap(mean[i*nchannels + c], mean[(i-1)*nchannels + c]);
}
//belongs to the mode - bFitsPDF becomes 1
/////
}
}//!bFitsPDF)
//check prune
if( weight < -prune )
{
//swap one up
CvPBGMMGaussian temp = pGMM[iLocal];
pGMM[iLocal] = pGMM[iLocal-1];
pGMM[iLocal-1] = temp;
pGauss--;
weight = 0.0;
nmodes--;
}
gmm[mode].weight = weight;//update weight by the calculated value
totalWeight += weight;
}
//belongs to the mode - bFitsPDF becomes 1
/////
}
}//!bFitsPDF)
//check prunning
if (weight<-m_fPrune)
{
weight=0.0;
nModes--;
}
pGauss->weight=weight;
totalWeight+=weight;
}
//go through all modes
//////
//renormalize weights
for (iMode = 0; iMode < nModes; iMode++)
{
pGMM[iMode].weight = pGMM[iMode].weight/totalWeight;
}
//make new mode if needed and exit
if (!bFitsPDF)
{
if (nModes==m_nM)
{
//replace the weakest
pGauss=pGMM+m_nM-1;
}
else
{
//add a new one
pGauss=pGMM+nModes;
nModes++;
}
if (nModes==1)
{
pGauss->weight=1;
}
else
{
pGauss->weight=m_fAlphaT;
//renormalize all weights
for (iMode = 0; iMode < nModes-1; iMode++)
{
pGMM[iMode].weight *=m_fOneMinAlpha;
}
}
//init
pGauss->mean[0]=r;
pGauss->mean[1]=g;
pGauss->mean[2]=b;
pGauss->variance=m_fVarInit;
//sort
//find the new place for it
for (int iLocal = nModes-1;iLocal>0;iLocal--)
{
//check one up
if (m_fAlphaT < (pGMM[iLocal-1].weight))
//go through all modes
//////
//renormalize weights
totalWeight = 1.f/totalWeight;
for( int mode = 0; mode < nmodes; mode++ )
{
gmm[mode].weight *= totalWeight;
}
nmodes = nNewModes;
//make new mode if needed and exit
if( !fitsPDF )
{
// replace the weakest or add a new one
int mode = nmodes == nmixtures ? nmixtures-1 : nmodes++;
if (nmodes==1)
gmm[mode].weight = 1.f;
else
{
break;
gmm[mode].weight = alphaT;
// renormalize all other weights
for( int i = 0; i < nmodes-1; i++ )
gmm[i].weight *= alpha1;
}
else
// init
for( int c = 0; c < nchannels; c++ )
mean[mode*nchannels + c] = data[c];
gmm[mode].variance = varInit;
//sort
//find the new place for it
for( int i = nmodes - 1; i > 0; i-- )
{
//swap one up
CvPBGMMGaussian temp = pGMM[iLocal];
pGMM[iLocal] = pGMM[iLocal-1];
pGMM[iLocal-1] = temp;
// check one up
if( alphaT < gmm[i-1].weight )
break;
// swap one up
std::swap(gmm[i], gmm[i-1]);
for( int c = 0; c < nchannels; c++ )
std::swap(mean[i*nchannels + c], mean[(i-1)*nchannels + c]);
}
}
}
//set the number of modes
*pModesUsed=nModes;
return bBackground;
}
//the main function to update the background model
static void icvUpdatePixelBackgroundGMM2( const CvArr* srcarr, CvArr* dstarr ,
CvPBGMMGaussian *pGMM,
unsigned char *pUsedModes,
//CvGaussBGStatModel2Params* pGMMPar,
int nM,
float fTb,
float fTB,
float fTg,
float fVarInit,
float fVarMax,
float fVarMin,
float fCT,
float fTau,
bool bShadowDetection,
unsigned char nShadowDetection,
float alpha)
{
CvMat sstub, *src = cvGetMat(srcarr, &sstub);
CvMat dstub, *dst = cvGetMat(dstarr, &dstub);
CvSize size = cvGetMatSize(src);
int nD=CV_MAT_CN(src->type);
//reshape if possible
if( CV_IS_MAT_CONT(src->type & dst->type) )
{
size.width *= size.height;
size.height = 1;
}
int x, y;
float data[CV_BGFG_MOG2_NDMAX];
float prune=-alpha*fCT;
//general nD
if (nD!=3)
{
switch (CV_MAT_DEPTH(src->type))
{
case CV_8U:
for( y = 0; y < size.height; y++ )
{
uchar* sptr = src->data.ptr + src->step*y;
uchar* pDataOutput = dst->data.ptr + dst->step*y;
for( x = 0; x < size.width; x++,
pGMM+=nM,pUsedModes++,pDataOutput++,sptr+=nD)
{
//convert data
for (int iD=0;iD<nD;iD++) data[iD]=float(sptr[iD]);
//update GMM model
int result = _icvUpdateGMM(data,nD,pUsedModes,pGMM,nM,alpha, fTb, fTB, fTg, fVarInit, fVarMax, fVarMin,prune);
//detect shadows in the foreground
if (bShadowDetection)
if (result==0) result= _icvRemoveShadowGMM(data,nD,(*pUsedModes),pGMM,fTb,fTB,fTau);
//generate output
(* pDataOutput)= (result==1) ? 0 : (result==2) ? (nShadowDetection) : 255;
}
}
break;
case CV_16S:
for( y = 0; y < size.height; y++ )
{
short* sptr = src->data.s + src->step*y;
uchar* pDataOutput = dst->data.ptr + dst->step*y;
for( x = 0; x < size.width; x++,
pGMM+=nM,pUsedModes++,pDataOutput++,sptr+=nD)
{
//convert data
for (int iD=0;iD<nD;iD++) data[iD]=float(sptr[iD]);
//update GMM model
int result = _icvUpdateGMM(data,nD,pUsedModes,pGMM,nM,alpha, fTb, fTB, fTg, fVarInit, fVarMax, fVarMin,prune);
//detect shadows in the foreground
if (bShadowDetection)
if (result==0) result= _icvRemoveShadowGMM(data,nD,(*pUsedModes),pGMM,fTb,fTB,fTau);
//generate output
(* pDataOutput)= (result==1) ? 0 : (result==2) ? (nShadowDetection) : 255;
}
}
break;
case CV_16U:
for( y = 0; y < size.height; y++ )
{
unsigned short* sptr = (unsigned short*) (src->data.s + src->step*y);
uchar* pDataOutput = dst->data.ptr + dst->step*y;
for( x = 0; x < size.width; x++,
pGMM+=nM,pUsedModes++,pDataOutput++,sptr+=nD)
{
//convert data
for (int iD=0;iD<nD;iD++) data[iD]=float(sptr[iD]);
//update GMM model
int result = _icvUpdateGMM(data,nD,pUsedModes,pGMM,nM,alpha, fTb, fTB, fTg, fVarInit, fVarMax, fVarMin,prune);
//detect shadows in the foreground
if (bShadowDetection)
if (result==0) result= _icvRemoveShadowGMM(data,nD,(*pUsedModes),pGMM,fTb,fTB,fTau);
//generate output
(* pDataOutput)= (result==1) ? 0 : (result==2) ? (nShadowDetection) : 255;
}
}
break;
case CV_32S:
for( y = 0; y < size.height; y++ )
{
int* sptr = src->data.i + src->step*y;
uchar* pDataOutput = dst->data.ptr + dst->step*y;
for( x = 0; x < size.width; x++,
pGMM+=nM,pUsedModes++,pDataOutput++,sptr+=nD)
{
//convert data
for (int iD=0;iD<nD;iD++) data[iD]=float(sptr[iD]);
//update GMM model
int result = _icvUpdateGMM(data,nD,pUsedModes,pGMM,nM,alpha, fTb, fTB, fTg, fVarInit, fVarMax, fVarMin,prune);
//detect shadows in the foreground
if (bShadowDetection)
if (result==0) result= _icvRemoveShadowGMM(data,nD,(*pUsedModes),pGMM,fTb,fTB,fTau);
//generate output
(* pDataOutput)= (result==1) ? 0 : (result==2) ? (nShadowDetection) : 255;
}
}
break;
case CV_32F:
for( y = 0; y < size.height; y++ )
{
float* sptr = src->data.fl + src->step*y;
uchar* pDataOutput = dst->data.ptr + dst->step*y;
for( x = 0; x < size.width; x++,
pGMM+=nM,pUsedModes++,pDataOutput++,sptr+=nD)
{
//update GMM model
int result = _icvUpdateGMM(sptr,nD,pUsedModes,pGMM,nM,alpha, fTb, fTB, fTg, fVarInit, fVarMax, fVarMin,prune);
//detect shadows in the foreground
if (bShadowDetection)
if (result==0) result= _icvRemoveShadowGMM(data,nD,(*pUsedModes),pGMM,fTb,fTB,fTau);
//generate output
(* pDataOutput)= (result==1) ? 0 : (result==2) ? (nShadowDetection) : 255;
}
}
break;
case CV_64F:
for( y = 0; y < size.height; y++ )
{
double* sptr = src->data.db + src->step*y;
uchar* pDataOutput = dst->data.ptr + dst->step*y;
for( x = 0; x < size.width; x++,
pGMM+=nM,pUsedModes++,pDataOutput++,sptr+=nD)
{
//convert data
for (int iD=0;iD<nD;iD++) data[iD]=float(sptr[iD]);
//update GMM model
int result = _icvUpdateGMM(data,nD,pUsedModes,pGMM,nM,alpha, fTb, fTB, fTg, fVarInit, fVarMax, fVarMin,prune);
//detect shadows in the foreground
if (bShadowDetection)
if (result==0) result= _icvRemoveShadowGMM(data,nD,(*pUsedModes),pGMM,fTb,fTB,fTau);
//generate output
(* pDataOutput)= (result==1) ? 0 : (result==2) ? (nShadowDetection) : 255;
}
}
break;
}
}else ///if (nD==3) - a bit faster
{
switch (CV_MAT_DEPTH(src->type))
{
case CV_8U:
for( y = 0; y < size.height; y++ )
{
uchar* sptr = src->data.ptr + src->step*y;
uchar* pDataOutput = dst->data.ptr + dst->step*y;
for( x = 0; x < size.width; x++,
pGMM+=nM,pUsedModes++,pDataOutput++,sptr+=nD)
{
//convert data
data[0]=float(sptr[0]),data[1]=float(sptr[1]),data[2]=float(sptr[2]);
//update GMM model
int result = _icvUpdateGMM_C3(data[0],data[1],data[2],pUsedModes,pGMM,nM,alpha, fTb, fTB, fTg, fVarInit, fVarMax, fVarMin,prune);
//detect shadows in the foreground
if (bShadowDetection)
if (result==0) result= _icvRemoveShadowGMM(data,nD,(*pUsedModes),pGMM,fTb,fTB,fTau);
//generate output
(* pDataOutput)= (result==1) ? 0 : (result==2) ? (nShadowDetection) : 255;
}
}
break;
case CV_16S:
for( y = 0; y < size.height; y++ )
{
short* sptr = src->data.s + src->step*y;
uchar* pDataOutput = dst->data.ptr + dst->step*y;
for( x = 0; x < size.width; x++,
pGMM+=nM,pUsedModes++,pDataOutput++,sptr+=nD)
{
//convert data
data[0]=float(sptr[0]),data[1]=float(sptr[1]),data[2]=float(sptr[2]);
//update GMM model
int result = _icvUpdateGMM_C3(data[0],data[1],data[2],pUsedModes,pGMM,nM,alpha, fTb, fTB, fTg, fVarInit, fVarMax, fVarMin,prune);
//detect shadows in the foreground
if (bShadowDetection)
if (result==0) result= _icvRemoveShadowGMM(data,nD,(*pUsedModes),pGMM,fTb,fTB,fTau);
//generate output
(* pDataOutput)= (result==1) ? 0 : (result==2) ? (nShadowDetection) : 255;
}
}
break;
case CV_16U:
for( y = 0; y < size.height; y++ )
{
unsigned short* sptr = (unsigned short*) src->data.s + src->step*y;
uchar* pDataOutput = dst->data.ptr + dst->step*y;
for( x = 0; x < size.width; x++,
pGMM+=nM,pUsedModes++,pDataOutput++,sptr+=nD)
{
//convert data
data[0]=float(sptr[0]),data[1]=float(sptr[1]),data[2]=float(sptr[2]);
//update GMM model
int result = _icvUpdateGMM_C3(data[0],data[1],data[2],pUsedModes,pGMM,nM,alpha, fTb, fTB, fTg, fVarInit, fVarMax, fVarMin,prune);
//detect shadows in the foreground
if (bShadowDetection)
if (result==0) result= _icvRemoveShadowGMM(data,nD,(*pUsedModes),pGMM,fTb,fTB,fTau);
//generate output
(* pDataOutput)= (result==1) ? 0 : (result==2) ? (nShadowDetection) : 255;
}
}
break;
case CV_32S:
for( y = 0; y < size.height; y++ )
{
int* sptr = src->data.i + src->step*y;
uchar* pDataOutput = dst->data.ptr + dst->step*y;
for( x = 0; x < size.width; x++,
pGMM+=nM,pUsedModes++,pDataOutput++,sptr+=nD)
{
//convert data
data[0]=float(sptr[0]),data[1]=float(sptr[1]),data[2]=float(sptr[2]);
//update GMM model
int result = _icvUpdateGMM_C3(data[0],data[1],data[2],pUsedModes,pGMM,nM,alpha, fTb, fTB, fTg, fVarInit, fVarMax, fVarMin,prune);
//detect shadows in the foreground
if (bShadowDetection)
if (result==0) result= _icvRemoveShadowGMM(data,nD,(*pUsedModes),pGMM,fTb,fTB,fTau);
//generate output
(* pDataOutput)= (result==1) ? 0 : (result==2) ? (nShadowDetection) : 255;
}
}
break;
case CV_32F:
for( y = 0; y < size.height; y++ )
{
float* sptr = src->data.fl + src->step*y;
uchar* pDataOutput = dst->data.ptr + dst->step*y;
for( x = 0; x < size.width; x++,
pGMM+=nM,pUsedModes++,pDataOutput++,sptr+=nD)
{
//update GMM model
int result = _icvUpdateGMM_C3(sptr[0],sptr[1],sptr[2],pUsedModes,pGMM,nM,alpha, fTb, fTB, fTg, fVarInit, fVarMax, fVarMin,prune);
//detect shadows in the foreground
if (bShadowDetection)
if (result==0) result= _icvRemoveShadowGMM(data,nD,(*pUsedModes),pGMM,fTb,fTB,fTau);
//generate output
(* pDataOutput)= (result==1) ? 0 : (result==2) ? (nShadowDetection) : 255;
}
}
break;
case CV_64F:
for( y = 0; y < size.height; y++ )
{
double* sptr = src->data.db + src->step*y;
uchar* pDataOutput = dst->data.ptr + dst->step*y;
for( x = 0; x < size.width; x++,
pGMM+=nM,pUsedModes++,pDataOutput++,sptr+=nD)
{
//convert data
data[0]=float(sptr[0]),data[1]=float(sptr[1]),data[2]=float(sptr[2]);
//update GMM model
int result = _icvUpdateGMM_C3(data[0],data[1],data[2],pUsedModes,pGMM,nM,alpha, fTb, fTB, fTg, fVarInit, fVarMax, fVarMin,prune);
//detect shadows in the foreground
if (bShadowDetection)
if (result==0) result= _icvRemoveShadowGMM(data,nD,(*pUsedModes),pGMM,fTb,fTB,fTau);
//generate output
(* pDataOutput)= (result==1) ? 0 : (result==2) ? (nShadowDetection) : 255;
}
//set the number of modes
modesUsed[x] = nmodes;
mask[x] = background ? 0 :
detectShadows && detectShadowGMM(data, nchannels, nmodes, gmm, mean, Tb, TB, tau) ?
shadowVal : 255;
}
}
break;
}
}//a bit faster for nD=3;
}
namespace cv
{
static const int defaultHistory2 = CV_BGFG_MOG2_WINDOW_SIZE;
static const float defaultVarThreshold2 = CV_BGFG_MOG2_STD_THRESHOLD*CV_BGFG_MOG2_STD_THRESHOLD;
static const int defaultNMixtures2 = CV_BGFG_MOG2_NGAUSSIANS;
static const float defaultBackgroundRatio2 = CV_BGFG_MOG2_BACKGROUND_THRESHOLD;
static const float defaultVarThresholdGen2 = CV_BGFG_MOG2_STD_THRESHOLD_GENERATE*CV_BGFG_MOG2_STD_THRESHOLD_GENERATE;
static const float defaultVarInit2 = CV_BGFG_MOG2_VAR_INIT;
static const float defaultVarMax2 = CV_BGFG_MOG2_VAR_MAX;
static const float defaultVarMin2 = CV_BGFG_MOG2_VAR_MIN;
static const float defaultfCT2 = CV_BGFG_MOG2_CT;
static const unsigned char defaultnShadowDetection2 = (unsigned char)CV_BGFG_MOG2_SHADOW_VALUE;
static const float defaultfTau = CV_BGFG_MOG2_SHADOW_TAU;
const Mat* src;
Mat* dst;
GMM* gmm0;
float* mean0;
uchar* modesUsed0;
int nmixtures;
float alphaT, Tb, TB, Tg;
float varInit, varMin, varMax, prune, tau;
bool detectShadows;
uchar shadowVal;
BinaryFunc cvtfunc;
};
BackgroundSubtractorMOG2::BackgroundSubtractorMOG2()
{
......@@ -1003,13 +535,13 @@ void BackgroundSubtractorMOG2::initialize(Size _frameSize, int _frameType)
nframes = 0;
int nchannels = CV_MAT_CN(frameType);
CV_Assert( nchannels <= CV_BGFG_MOG2_NDMAX );
CV_Assert( nchannels <= CV_CN_MAX );
// for each gaussian mixture of each pixel bg model we store ...
// the mixture weight (w),
// the mean (nchannels values) and
// the covariance
bgmodel.create( 1, frameSize.height*frameSize.width*nmixtures*(2 + CV_BGFG_MOG2_NDMAX), CV_32F );
bgmodel.create( 1, frameSize.height*frameSize.width*nmixtures*(2 + nchannels), CV_32F );
//make the array for keeping track of the used modes per pixel - all zeros at start
bgmodelUsedModes.create(frameSize,CV_8U);
bgmodelUsedModes = Scalar::all(0);
......@@ -1029,86 +561,71 @@ void BackgroundSubtractorMOG2::operator()(InputArray _image, OutputArray _fgmask
++nframes;
learningRate = learningRate >= 0 && nframes > 1 ? learningRate : 1./min( 2*nframes, history );
CV_Assert(learningRate >= 0);
CvMat _cimage = image, _cfgmask = fgmask;
if (learningRate > 0)
icvUpdatePixelBackgroundGMM2( &_cimage, &_cfgmask,
(CvPBGMMGaussian*) bgmodel.data,
bgmodelUsedModes.data,
nmixtures,//nM
varThreshold,//fTb
backgroundRatio,//fTB
varThresholdGen,//fTg,
fVarInit,
fVarMax,
fVarMin,
fCT,
fTau,
bShadowDetection,
nShadowDetection,
float(learningRate));
if (learningRate > 0)
{
parallel_for(BlockedRange(0, image.rows),
MOG2Invoker(image, fgmask,
(GMM*)bgmodel.data,
(float*)(bgmodel.data + sizeof(GMM)*nmixtures*image.rows*image.cols),
bgmodelUsedModes.data, nmixtures, (float)learningRate,
(float)varThreshold,
backgroundRatio, varThresholdGen,
fVarInit, fVarMin, fVarMax, fCT, fTau,
bShadowDetection, nShadowDetection));
}
}
void BackgroundSubtractorMOG2::getBackgroundImage(OutputArray backgroundImage) const
{
#if _MSC_VER >= 1200
#pragma warning( push )
#pragma warning( disable : 4127 )
#endif
CV_Assert(CV_BGFG_MOG2_NDMAX == 3);
#if _MSC_VER >= 1200
#pragma warning( pop )
#endif
int nchannels = CV_MAT_CN(frameType);
CV_Assert( nchannels == 3 );
Mat meanBackground(frameSize, CV_8UC3, Scalar::all(0));
int firstGaussianIdx = 0;
CvPBGMMGaussian* pGMM = (CvPBGMMGaussian*)bgmodel.data;
const GMM* gmm = (GMM*)bgmodel.data;
const Vec3f* mean = reinterpret_cast<const Vec3f*>(gmm + frameSize.width*frameSize.height*nmixtures);
for(int row=0; row<meanBackground.rows; row++)
{
for(int col=0; col<meanBackground.cols; col++)
{
int nModes = static_cast<int>(bgmodelUsedModes.at<uchar>(row, col));
double meanVal[CV_BGFG_MOG2_NDMAX] = {0.0, 0.0, 0.0};
double totalWeight = 0.0;
for(int gaussianIdx = firstGaussianIdx; gaussianIdx < firstGaussianIdx + nModes; gaussianIdx++)
int nmodes = bgmodelUsedModes.at<uchar>(row, col);
Vec3f meanVal;
float totalWeight = 0.f;
for(int gaussianIdx = firstGaussianIdx; gaussianIdx < firstGaussianIdx + nmodes; gaussianIdx++)
{
CvPBGMMGaussian gaussian = pGMM[gaussianIdx];
GMM gaussian = gmm[gaussianIdx];
meanVal += gaussian.weight * mean[gaussianIdx];
totalWeight += gaussian.weight;
for(int chIdx = 0; chIdx < CV_BGFG_MOG2_NDMAX; chIdx++)
{
meanVal[chIdx] += gaussian.weight * gaussian.mean[chIdx];
}
if(totalWeight > backgroundRatio)
break;
}
Vec3f val = Vec3f((float)meanVal[0], (float)meanVal[1], (float)meanVal[2]) * (float)(1.0 / totalWeight);
meanBackground.at<Vec3b>(row, col) = Vec3b(val);
meanVal *= (1.f / totalWeight);
meanBackground.at<Vec3b>(row, col) = Vec3b(meanVal);
firstGaussianIdx += nmixtures;
}
}
switch(CV_MAT_CN(frameType))
{
case 1:
{
vector<Mat> channels;
split(meanBackground, channels);
channels[0].copyTo(backgroundImage);
break;
}
case 1:
{
vector<Mat> channels;
split(meanBackground, channels);
channels[0].copyTo(backgroundImage);
break;
}
case 3:
{
meanBackground.copyTo(backgroundImage);
break;
}
case 3:
{
meanBackground.copyTo(backgroundImage);
break;
}
default:
CV_Error(CV_StsUnsupportedFormat, "");
default:
CV_Error(CV_StsUnsupportedFormat, "");
}
}
......
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
namespace cv
{
///////////////////////////////////////////////////////////////////////////////////////////////////////////
static Algorithm* createMOG()
{
return new BackgroundSubtractorMOG;
}
static AlgorithmInfo& mog_info()
{
static AlgorithmInfo mog_info_var("BackgroundSubtractor.MOG", createMOG);
return mog_info_var;
}
static AlgorithmInfo& mog_info_auto = mog_info();
AlgorithmInfo* BackgroundSubtractorMOG::info() const
{
static volatile bool initialized = false;
if( !initialized )
{
BackgroundSubtractorMOG obj;
mog_info().addParam(obj, "history", obj.history);
mog_info().addParam(obj, "nmixtures", obj.nmixtures);
mog_info().addParam(obj, "backgroundRatio", obj.backgroundRatio);
mog_info().addParam(obj, "noiseSigma", obj.noiseSigma);
initialized = true;
}
return &mog_info();
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////
static Algorithm* createMOG2()
{
return new BackgroundSubtractorMOG2;
}
static AlgorithmInfo& mog2_info()
{
static AlgorithmInfo mog2_info_var("BackgroundSubtractor.MOG2", createMOG2);
return mog2_info_var;
}
static AlgorithmInfo& mog2_info_auto = mog2_info();
AlgorithmInfo* BackgroundSubtractorMOG2::info() const
{
static volatile bool initialized = false;
if( !initialized )
{
BackgroundSubtractorMOG2 obj;
mog2_info().addParam(obj, "history", obj.history);
mog2_info().addParam(obj, "varThreshold", obj.varThreshold);
mog2_info().addParam(obj, "detectShadows", obj.bShadowDetection);
initialized = true;
}
return &mog2_info();
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////
bool initModule_video(void)
{
Ptr<Algorithm> mog = createMOG(), mog2 = createMOG2();
return mog->info() != 0 && mog2->info() != 0;
}
}
#include "opencv2/core/core.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/video/background_segm.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <stdio.h>
......@@ -17,7 +18,7 @@ void help()
const char* keys =
{
"{c |camera |false | use camera or not}"
"{c |camera |true | use camera or not}"
"{fn|file_name|tree.avi | movie file }"
};
......@@ -49,7 +50,8 @@ int main(int argc, const char** argv)
namedWindow("foreground image", CV_WINDOW_NORMAL);
namedWindow("mean background image", CV_WINDOW_NORMAL);
BackgroundSubtractorMOG2 bg_model;
BackgroundSubtractorMOG2 bg_model;//(100, 3, 0.3, 5);
Mat img, fgmask, fgimg;
for(;;)
......@@ -59,6 +61,8 @@ int main(int argc, const char** argv)
if( img.empty() )
break;
//cvtColor(_img, img, COLOR_BGR2GRAY);
if( fgimg.empty() )
fgimg.create(img.size(), img.type());
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment