Commit 07e0f7bf authored by Vadim Pisarevsky's avatar Vadim Pisarevsky

refactored video module; use the new-style algorithms now

parent 14a0abbf
...@@ -893,7 +893,7 @@ CV_INIT_ALGORITHM(LBPH, "FaceRecognizer.LBPH", ...@@ -893,7 +893,7 @@ CV_INIT_ALGORITHM(LBPH, "FaceRecognizer.LBPH",
bool initModule_contrib() bool initModule_contrib()
{ {
Ptr<Algorithm> efaces = createEigenfaces(), ffaces = createFisherfaces(), lbph = createLBPH(); Ptr<Algorithm> efaces = createEigenfaces_hidden(), ffaces = createFisherfaces_hidden(), lbph = createLBPH_hidden();
return efaces->info() != 0 && ffaces->info() != 0 && lbph->info() != 0; return efaces->info() != 0 && ffaces->info() != 0 && lbph->info() != 0;
} }
......
...@@ -254,14 +254,14 @@ namespace cv ...@@ -254,14 +254,14 @@ namespace cv
} //namespace cv } //namespace cv
#define CV_INIT_ALGORITHM(classname, algname, memberinit) \ #define CV_INIT_ALGORITHM(classname, algname, memberinit) \
static ::cv::Algorithm* create##classname() \ static ::cv::Algorithm* create##classname##_hidden() \
{ \ { \
return new classname; \ return new classname; \
} \ } \
\ \
static ::cv::AlgorithmInfo& classname##_info() \ static ::cv::AlgorithmInfo& classname##_info() \
{ \ { \
static ::cv::AlgorithmInfo classname##_info_var(algname, create##classname); \ static ::cv::AlgorithmInfo classname##_info_var(algname, create##classname##_hidden); \
return classname##_info_var; \ return classname##_info_var; \
} \ } \
\ \
......
...@@ -614,10 +614,10 @@ PERF_TEST_P(Video_Cn_LearningRate, Video_MOG, ...@@ -614,10 +614,10 @@ PERF_TEST_P(Video_Cn_LearningRate, Video_MOG,
} }
else else
{ {
cv::BackgroundSubtractorMOG mog; cv::Ptr<cv::BackgroundSubtractor> mog = cv::createBackgroundSubtractorMOG();
cv::Mat foreground; cv::Mat foreground;
mog(frame, foreground, learningRate); mog->apply(frame, foreground, learningRate);
for (int i = 0; i < 10; ++i) for (int i = 0; i < 10; ++i)
{ {
...@@ -635,7 +635,7 @@ PERF_TEST_P(Video_Cn_LearningRate, Video_MOG, ...@@ -635,7 +635,7 @@ PERF_TEST_P(Video_Cn_LearningRate, Video_MOG,
} }
startTimer(); next(); startTimer(); next();
mog(frame, foreground, learningRate); mog->apply(frame, foreground, learningRate);
stopTimer(); stopTimer();
} }
...@@ -709,12 +709,12 @@ PERF_TEST_P(Video_Cn, Video_MOG2, ...@@ -709,12 +709,12 @@ PERF_TEST_P(Video_Cn, Video_MOG2,
} }
else else
{ {
cv::BackgroundSubtractorMOG2 mog2; cv::Ptr<cv::BackgroundSubtractor> mog2 = cv::createBackgroundSubtractorMOG2();
mog2.set("detectShadows", false); mog2->set("detectShadows", false);
cv::Mat foreground; cv::Mat foreground;
mog2(frame, foreground); mog2->apply(frame, foreground);
for (int i = 0; i < 10; ++i) for (int i = 0; i < 10; ++i)
{ {
...@@ -732,7 +732,7 @@ PERF_TEST_P(Video_Cn, Video_MOG2, ...@@ -732,7 +732,7 @@ PERF_TEST_P(Video_Cn, Video_MOG2,
} }
startTimer(); next(); startTimer(); next();
mog2(frame, foreground); mog2->apply(frame, foreground);
stopTimer(); stopTimer();
} }
...@@ -789,7 +789,7 @@ PERF_TEST_P(Video_Cn, Video_MOG2GetBackgroundImage, ...@@ -789,7 +789,7 @@ PERF_TEST_P(Video_Cn, Video_MOG2GetBackgroundImage,
} }
else else
{ {
cv::BackgroundSubtractorMOG2 mog2; cv::Ptr<cv::BackgroundSubtractor> mog2 = cv::createBackgroundSubtractorMOG2();
cv::Mat foreground; cv::Mat foreground;
for (int i = 0; i < 10; ++i) for (int i = 0; i < 10; ++i)
...@@ -807,12 +807,12 @@ PERF_TEST_P(Video_Cn, Video_MOG2GetBackgroundImage, ...@@ -807,12 +807,12 @@ PERF_TEST_P(Video_Cn, Video_MOG2GetBackgroundImage,
cv::swap(temp, frame); cv::swap(temp, frame);
} }
mog2(frame, foreground); mog2->apply(frame, foreground);
} }
cv::Mat background; cv::Mat background;
TEST_CYCLE() mog2.getBackgroundImage(background); TEST_CYCLE() mog2->getBackgroundImage(background);
CPU_SANITY_CHECK(background); CPU_SANITY_CHECK(background);
} }
...@@ -958,11 +958,11 @@ PERF_TEST_P(Video_Cn_MaxFeatures, Video_GMG, ...@@ -958,11 +958,11 @@ PERF_TEST_P(Video_Cn_MaxFeatures, Video_GMG,
cv::Mat foreground; cv::Mat foreground;
cv::Mat zeros(frame.size(), CV_8UC1, cv::Scalar::all(0)); cv::Mat zeros(frame.size(), CV_8UC1, cv::Scalar::all(0));
cv::BackgroundSubtractorGMG gmg; cv::Ptr<cv::BackgroundSubtractor> gmg = cv::createBackgroundSubtractorGMG();
gmg.set("maxFeatures", maxFeatures); gmg->set("maxFeatures", maxFeatures);
gmg.initialize(frame.size(), 0.0, 255.0); //gmg.initialize(frame.size(), 0.0, 255.0);
gmg(frame, foreground); gmg->apply(frame, foreground);
for (int i = 0; i < 150; ++i) for (int i = 0; i < 150; ++i)
{ {
...@@ -985,7 +985,7 @@ PERF_TEST_P(Video_Cn_MaxFeatures, Video_GMG, ...@@ -985,7 +985,7 @@ PERF_TEST_P(Video_Cn_MaxFeatures, Video_GMG,
} }
startTimer(); next(); startTimer(); next();
gmg(frame, foreground); gmg->apply(frame, foreground);
stopTimer(); stopTimer();
} }
......
...@@ -245,8 +245,8 @@ GPU_TEST_P(MOG2, Update) ...@@ -245,8 +245,8 @@ GPU_TEST_P(MOG2, Update)
mog2.bShadowDetection = detectShadow; mog2.bShadowDetection = detectShadow;
cv::gpu::GpuMat foreground = createMat(frame.size(), CV_8UC1, useRoi); cv::gpu::GpuMat foreground = createMat(frame.size(), CV_8UC1, useRoi);
cv::BackgroundSubtractorMOG2 mog2_gold; cv::Ptr<cv::BackgroundSubtractorMOG2> mog2_gold = cv::createBackgroundSubtractorMOG2();
mog2_gold.set("detectShadows", detectShadow); mog2_gold.setDetectShadows(detectShadow);
cv::Mat foreground_gold; cv::Mat foreground_gold;
for (int i = 0; i < 10; ++i) for (int i = 0; i < 10; ++i)
...@@ -263,7 +263,7 @@ GPU_TEST_P(MOG2, Update) ...@@ -263,7 +263,7 @@ GPU_TEST_P(MOG2, Update)
mog2(loadMat(frame, useRoi), foreground); mog2(loadMat(frame, useRoi), foreground);
mog2_gold(frame, foreground_gold); mog2_gold->apply(frame, foreground_gold);
if (detectShadow) if (detectShadow)
{ {
...@@ -290,8 +290,8 @@ GPU_TEST_P(MOG2, getBackgroundImage) ...@@ -290,8 +290,8 @@ GPU_TEST_P(MOG2, getBackgroundImage)
mog2.bShadowDetection = detectShadow; mog2.bShadowDetection = detectShadow;
cv::gpu::GpuMat foreground; cv::gpu::GpuMat foreground;
cv::BackgroundSubtractorMOG2 mog2_gold; cv::Ptr<cv::BackgroundSubtractorMOG2> mog2_gold = cv::createBackgroundSubtractorMOG2();
mog2_gold.set("detectShadows", detectShadow); mog2_gold.setDetectShadows(detectShadow);
cv::Mat foreground_gold; cv::Mat foreground_gold;
for (int i = 0; i < 10; ++i) for (int i = 0; i < 10; ++i)
...@@ -301,14 +301,14 @@ GPU_TEST_P(MOG2, getBackgroundImage) ...@@ -301,14 +301,14 @@ GPU_TEST_P(MOG2, getBackgroundImage)
mog2(loadMat(frame, useRoi), foreground); mog2(loadMat(frame, useRoi), foreground);
mog2_gold(frame, foreground_gold); mog2_gold->apply(frame, foreground_gold);
} }
cv::gpu::GpuMat background = createMat(frame.size(), frame.type(), useRoi); cv::gpu::GpuMat background = createMat(frame.size(), frame.type(), useRoi);
mog2.getBackgroundImage(background); mog2.getBackgroundImage(background);
cv::Mat background_gold; cv::Mat background_gold;
mog2_gold.getBackgroundImage(background_gold); mog2_gold->getBackgroundImage(background_gold);
ASSERT_MAT_NEAR(background_gold, background, 0); ASSERT_MAT_NEAR(background_gold, background, 0);
} }
......
...@@ -50,7 +50,7 @@ icvReleaseGaussianBGModel( CvGaussBGModel** bg_model ) ...@@ -50,7 +50,7 @@ icvReleaseGaussianBGModel( CvGaussBGModel** bg_model )
if( *bg_model ) if( *bg_model )
{ {
delete (cv::BackgroundSubtractorMOG*)((*bg_model)->mog); delete (cv::Ptr<cv::BackgroundSubtractor>*)((*bg_model)->mog);
cvReleaseImage( &(*bg_model)->background ); cvReleaseImage( &(*bg_model)->background );
cvReleaseImage( &(*bg_model)->foreground ); cvReleaseImage( &(*bg_model)->foreground );
memset( *bg_model, 0, sizeof(**bg_model) ); memset( *bg_model, 0, sizeof(**bg_model) );
...@@ -65,10 +65,10 @@ icvUpdateGaussianBGModel( IplImage* curr_frame, CvGaussBGModel* bg_model, doubl ...@@ -65,10 +65,10 @@ icvUpdateGaussianBGModel( IplImage* curr_frame, CvGaussBGModel* bg_model, doubl
{ {
cv::Mat image = cv::cvarrToMat(curr_frame), mask = cv::cvarrToMat(bg_model->foreground); cv::Mat image = cv::cvarrToMat(curr_frame), mask = cv::cvarrToMat(bg_model->foreground);
cv::BackgroundSubtractorMOG* mog = (cv::BackgroundSubtractorMOG*)(bg_model->mog); cv::Ptr<cv::BackgroundSubtractor>* mog = (cv::Ptr<cv::BackgroundSubtractor>*)(bg_model->mog);
CV_Assert(mog != 0); CV_Assert(mog != 0);
(*mog)(image, mask, learningRate); (*mog)->apply(image, mask, learningRate);
bg_model->countFrames++; bg_model->countFrames++;
return 0; return 0;
...@@ -105,13 +105,11 @@ cvCreateGaussianBGModel( IplImage* first_frame, CvGaussBGStatModelParams* parame ...@@ -105,13 +105,11 @@ cvCreateGaussianBGModel( IplImage* first_frame, CvGaussBGStatModelParams* parame
bg_model->params = params; bg_model->params = params;
cv::BackgroundSubtractorMOG* mog = cv::Ptr<cv::BackgroundSubtractor> mog = cv::createBackgroundSubtractorMOG(params.win_size, params.n_gauss,
new cv::BackgroundSubtractorMOG(params.win_size, params.bg_threshold);
params.n_gauss, cv::Ptr<cv::BackgroundSubtractor>* pmog = new cv::Ptr<cv::BackgroundSubtractor>;
params.bg_threshold, *pmog = mog;
params.variance_init); bg_model->mog = pmog;
bg_model->mog = mog;
CvSize sz = cvGetSize(first_frame); CvSize sz = cvGetSize(first_frame);
bg_model->background = cvCreateImage(sz, IPL_DEPTH_8U, first_frame->nChannels); bg_model->background = cvCreateImage(sz, IPL_DEPTH_8U, first_frame->nChannels);
......
...@@ -56,7 +56,7 @@ CV_INIT_ALGORITHM(EM, "StatModel.EM", ...@@ -56,7 +56,7 @@ CV_INIT_ALGORITHM(EM, "StatModel.EM",
bool initModule_ml(void) bool initModule_ml(void)
{ {
Ptr<Algorithm> em = createEM(); Ptr<Algorithm> em = createEM_hidden();
return em->info() != 0; return em->info() != 0;
} }
......
...@@ -67,7 +67,7 @@ CV_INIT_ALGORITHM(SIFT, "Feature2D.SIFT", ...@@ -67,7 +67,7 @@ CV_INIT_ALGORITHM(SIFT, "Feature2D.SIFT",
bool initModule_nonfree(void) bool initModule_nonfree(void)
{ {
Ptr<Algorithm> sift = createSIFT(), surf = createSURF(); Ptr<Algorithm> sift = createSIFT_hidden(), surf = createSURF_hidden();
return sift->info() != 0 && surf->info() != 0; return sift->info() != 0 && surf->info() != 0;
} }
......
...@@ -125,6 +125,7 @@ typedef Ptr<FeatureDetector> Ptr_FeatureDetector; ...@@ -125,6 +125,7 @@ typedef Ptr<FeatureDetector> Ptr_FeatureDetector;
typedef Ptr<DescriptorExtractor> Ptr_DescriptorExtractor; typedef Ptr<DescriptorExtractor> Ptr_DescriptorExtractor;
typedef Ptr<Feature2D> Ptr_Feature2D; typedef Ptr<Feature2D> Ptr_Feature2D;
typedef Ptr<DescriptorMatcher> Ptr_DescriptorMatcher; typedef Ptr<DescriptorMatcher> Ptr_DescriptorMatcher;
typedef Ptr<BackgroundSubtractor> Ptr_BackgroundSubtractor;
typedef Ptr<cv::softcascade::ChannelFeatureBuilder> Ptr_ChannelFeatureBuilder; typedef Ptr<cv::softcascade::ChannelFeatureBuilder> Ptr_ChannelFeatureBuilder;
......
...@@ -58,8 +58,8 @@ CV_INIT_ALGORITHM(SCascade, "CascadeDetector.SCascade", ...@@ -58,8 +58,8 @@ CV_INIT_ALGORITHM(SCascade, "CascadeDetector.SCascade",
bool initModule_softcascade(void) bool initModule_softcascade(void)
{ {
Ptr<Algorithm> sc = createSCascade(); Ptr<Algorithm> sc = createSCascade_hidden();
Ptr<Algorithm> sc1 = createDetector(); Ptr<Algorithm> sc1 = createDetector_hidden();
return (sc1->info() != 0) && (sc->info() != 0); return (sc1->info() != 0) && (sc->info() != 0);
} }
......
...@@ -219,23 +219,6 @@ CVAPI(const CvMat*) cvKalmanCorrect( CvKalman* kalman, const CvMat* measurement ...@@ -219,23 +219,6 @@ CVAPI(const CvMat*) cvKalmanCorrect( CvKalman* kalman, const CvMat* measurement
#define cvKalmanUpdateByMeasurement cvKalmanCorrect #define cvKalmanUpdateByMeasurement cvKalmanCorrect
/****************************************************************************************\
* Image Alignment (ECC algorithm) *
\****************************************************************************************/
enum
{
MOTION_TRANSLATION,
MOTION_EUCLIDEAN,
MOTION_AFFINE,
MOTION_HOMOGRAPHY
};
/* Estimate the geometric transformation between 2 images (area-based alignment) */
CVAPI(double) cvFindTransformECC (const CvArr* templateImage, const CvArr* inputImage,
CvMat* warpMatrix,
const int motionType,
const CvTermCriteria criteria);
#ifdef __cplusplus #ifdef __cplusplus
} }
...@@ -341,6 +324,14 @@ CV_EXPORTS_W void calcOpticalFlowFarneback( InputArray prev, InputArray next, ...@@ -341,6 +324,14 @@ CV_EXPORTS_W void calcOpticalFlowFarneback( InputArray prev, InputArray next,
CV_EXPORTS_W Mat estimateRigidTransform( InputArray src, InputArray dst, CV_EXPORTS_W Mat estimateRigidTransform( InputArray src, InputArray dst,
bool fullAffine); bool fullAffine);
enum
{
MOTION_TRANSLATION=0,
MOTION_EUCLIDEAN=1,
MOTION_AFFINE=2,
MOTION_HOMOGRAPHY=3
};
//! estimates the best-fit Translation, Euclidean, Affine or Perspective Transformation //! estimates the best-fit Translation, Euclidean, Affine or Perspective Transformation
// with respect to Enhanced Correlation Coefficient criterion that maps one image to // with respect to Enhanced Correlation Coefficient criterion that maps one image to
// another (area-based alignment) // another (area-based alignment)
......
...@@ -7,9 +7,11 @@ ...@@ -7,9 +7,11 @@
// copy or use the software. // copy or use the software.
// //
// //
// Intel License Agreement // License Agreement
// For Open Source Computer Vision Library
// //
// Copyright (C) 2000, Intel Corporation, all rights reserved. // Copyright (C) 2000, Intel Corporation, all rights reserved.
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners. // Third party copyrights are property of their respective owners.
// //
// Redistribution and use in source and binary forms, with or without modification, // Redistribution and use in source and binary forms, with or without modification,
...@@ -22,7 +24,7 @@ ...@@ -22,7 +24,7 @@
// this list of conditions and the following disclaimer in the documentation // this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution. // and/or other materials provided with the distribution.
// //
// * The name of Intel Corporation may not be used to endorse or promote products // * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission. // derived from this software without specific prior written permission.
// //
// This software is provided by the copyright holders and contributors "as is" and // This software is provided by the copyright holders and contributors "as is" and
...@@ -58,15 +60,6 @@ ...@@ -58,15 +60,6 @@
namespace cv namespace cv
{ {
BackgroundSubtractor::~BackgroundSubtractor() {}
void BackgroundSubtractor::operator()(InputArray, OutputArray, double)
{
}
void BackgroundSubtractor::getBackgroundImage(OutputArray) const
{
}
static const int defaultNMixtures = 5; static const int defaultNMixtures = 5;
static const int defaultHistory = 200; static const int defaultHistory = 200;
static const double defaultBackgroundRatio = 0.7; static const double defaultBackgroundRatio = 0.7;
...@@ -74,55 +67,88 @@ static const double defaultVarThreshold = 2.5*2.5; ...@@ -74,55 +67,88 @@ static const double defaultVarThreshold = 2.5*2.5;
static const double defaultNoiseSigma = 30*0.5; static const double defaultNoiseSigma = 30*0.5;
static const double defaultInitialWeight = 0.05; static const double defaultInitialWeight = 0.05;
BackgroundSubtractorMOG::BackgroundSubtractorMOG() class BackgroundSubtractorMOGImpl : public BackgroundSubtractorMOG
{ {
frameSize = Size(0,0); public:
frameType = 0; //! the default constructor
BackgroundSubtractorMOGImpl()
nframes = 0; {
nmixtures = defaultNMixtures; frameSize = Size(0,0);
history = defaultHistory; frameType = 0;
varThreshold = defaultVarThreshold;
backgroundRatio = defaultBackgroundRatio; nframes = 0;
noiseSigma = defaultNoiseSigma; nmixtures = defaultNMixtures;
} history = defaultHistory;
varThreshold = defaultVarThreshold;
backgroundRatio = defaultBackgroundRatio;
noiseSigma = defaultNoiseSigma;
}
// the full constructor that takes the length of the history,
// the number of gaussian mixtures, the background ratio parameter and the noise strength
BackgroundSubtractorMOGImpl(int _history, int _nmixtures, double _backgroundRatio, double _noiseSigma=0)
{
frameSize = Size(0,0);
frameType = 0;
nframes = 0;
nmixtures = std::min(_nmixtures > 0 ? _nmixtures : defaultNMixtures, 8);
history = _history > 0 ? _history : defaultHistory;
varThreshold = defaultVarThreshold;
backgroundRatio = std::min(_backgroundRatio > 0 ? _backgroundRatio : 0.95, 1.);
noiseSigma = _noiseSigma <= 0 ? defaultNoiseSigma : _noiseSigma;
}
BackgroundSubtractorMOG::BackgroundSubtractorMOG(int _history, int _nmixtures, //! the update operator
double _backgroundRatio, virtual void apply(InputArray image, OutputArray fgmask, double learningRate=0);
double _noiseSigma)
{
frameSize = Size(0,0);
frameType = 0;
nframes = 0;
nmixtures = std::min(_nmixtures > 0 ? _nmixtures : defaultNMixtures, 8);
history = _history > 0 ? _history : defaultHistory;
varThreshold = defaultVarThreshold;
backgroundRatio = std::min(_backgroundRatio > 0 ? _backgroundRatio : 0.95, 1.);
noiseSigma = _noiseSigma <= 0 ? defaultNoiseSigma : _noiseSigma;
}
BackgroundSubtractorMOG::~BackgroundSubtractorMOG() //! re-initiaization method
{ virtual void initialize(Size _frameSize, int _frameType)
} {
frameSize = _frameSize;
frameType = _frameType;
nframes = 0;
int nchannels = CV_MAT_CN(frameType);
CV_Assert( CV_MAT_DEPTH(frameType) == CV_8U );
// for each gaussian mixture of each pixel bg model we store ...
// the mixture sort key (w/sum_of_variances), the mixture weight (w),
// the mean (nchannels values) and
// the diagonal covariance matrix (another nchannels values)
bgmodel.create( 1, frameSize.height*frameSize.width*nmixtures*(2 + 2*nchannels), CV_32F );
bgmodel = Scalar::all(0);
}
virtual AlgorithmInfo* info() const { return 0; }
void BackgroundSubtractorMOG::initialize(Size _frameSize, int _frameType) virtual void getBackgroundImage(OutputArray) const
{ {
frameSize = _frameSize; CV_Error( CV_StsNotImplemented, "" );
frameType = _frameType; }
nframes = 0;
virtual int getHistory() const { return history; }
int nchannels = CV_MAT_CN(frameType); virtual void setHistory(int _nframes) { history = _nframes; }
CV_Assert( CV_MAT_DEPTH(frameType) == CV_8U );
virtual int getNMixtures() const { return nmixtures; }
// for each gaussian mixture of each pixel bg model we store ... virtual void setNMixtures(int nmix) { nmixtures = nmix; }
// the mixture sort key (w/sum_of_variances), the mixture weight (w),
// the mean (nchannels values) and virtual double getBackgroundRatio() const { return backgroundRatio; }
// the diagonal covariance matrix (another nchannels values) virtual void setBackgroundRatio(double _backgroundRatio) { backgroundRatio = _backgroundRatio; }
bgmodel.create( 1, frameSize.height*frameSize.width*nmixtures*(2 + 2*nchannels), CV_32F );
bgmodel = Scalar::all(0); virtual double getNoiseSigma() const { return noiseSigma; }
} virtual void setNoiseSigma(double _noiseSigma) { noiseSigma = _noiseSigma; }
protected:
Size frameSize;
int frameType;
Mat bgmodel;
int nframes;
int history;
int nmixtures;
double varThreshold;
double backgroundRatio;
double noiseSigma;
};
template<typename VT> struct MixData template<typename VT> struct MixData
...@@ -391,7 +417,7 @@ static void process8uC3( const Mat& image, Mat& fgmask, double learningRate, ...@@ -391,7 +417,7 @@ static void process8uC3( const Mat& image, Mat& fgmask, double learningRate,
} }
} }
void BackgroundSubtractorMOG::operator()(InputArray _image, OutputArray _fgmask, double learningRate) void BackgroundSubtractorMOGImpl::apply(InputArray _image, OutputArray _fgmask, double learningRate)
{ {
Mat image = _image.getMat(); Mat image = _image.getMat();
bool needToInitialize = nframes == 0 || learningRate >= 1 || image.size() != frameSize || image.type() != frameType; bool needToInitialize = nframes == 0 || learningRate >= 1 || image.size() != frameSize || image.type() != frameType;
...@@ -415,6 +441,12 @@ void BackgroundSubtractorMOG::operator()(InputArray _image, OutputArray _fgmask, ...@@ -415,6 +441,12 @@ void BackgroundSubtractorMOG::operator()(InputArray _image, OutputArray _fgmask,
CV_Error( CV_StsUnsupportedFormat, "Only 1- and 3-channel 8-bit images are supported in BackgroundSubtractorMOG" ); CV_Error( CV_StsUnsupportedFormat, "Only 1- and 3-channel 8-bit images are supported in BackgroundSubtractorMOG" );
} }
Ptr<BackgroundSubtractorMOG> createBackgroundSubtractorMOG(int history, int nmixtures,
double backgroundRatio, double noiseSigma)
{
return new BackgroundSubtractorMOGImpl(history, nmixtures, backgroundRatio, noiseSigma);
}
} }
/* End of file. */ /* End of file. */
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -305,23 +305,8 @@ static void update_warping_matrix_ECC (Mat& map_matrix, const Mat& update, const ...@@ -305,23 +305,8 @@ static void update_warping_matrix_ECC (Mat& map_matrix, const Mat& update, const
mapPtr[3] = (float) sin(new_theta); mapPtr[3] = (float) sin(new_theta);
mapPtr[1] = -mapPtr[3]; mapPtr[1] = -mapPtr[3];
} }
} }
CV_IMPL double cvFindTransformECC (const CvArr* _image1, const CvArr* _image2,
CvMat* _map_matrix,
const int motionType,
const CvTermCriteria _criteria)
{
Mat image1 = cvarrToMat(_image1);
Mat image2 = cvarrToMat(_image2);
Mat map_matrix = cvarrToMat(_map_matrix);
double cc = cv::findTransformECC(image1, image2, map_matrix, motionType,
TermCriteria(TermCriteria::EPS+TermCriteria::COUNT, _criteria.max_iter, _criteria.epsilon));
return cc;
}
double cv::findTransformECC(InputArray templateImage, double cv::findTransformECC(InputArray templateImage,
InputArray inputImage, InputArray inputImage,
......
...@@ -40,176 +40,6 @@ ...@@ -40,176 +40,6 @@
//M*/ //M*/
#include "precomp.hpp" #include "precomp.hpp"
CV_IMPL CvKalman*
cvCreateKalman( int DP, int MP, int CP )
{
CvKalman *kalman = 0;
if( DP <= 0 || MP <= 0 )
CV_Error( CV_StsOutOfRange,
"state and measurement vectors must have positive number of dimensions" );
if( CP < 0 )
CP = DP;
/* allocating memory for the structure */
kalman = (CvKalman *)cvAlloc( sizeof( CvKalman ));
memset( kalman, 0, sizeof(*kalman));
kalman->DP = DP;
kalman->MP = MP;
kalman->CP = CP;
kalman->state_pre = cvCreateMat( DP, 1, CV_32FC1 );
cvZero( kalman->state_pre );
kalman->state_post = cvCreateMat( DP, 1, CV_32FC1 );
cvZero( kalman->state_post );
kalman->transition_matrix = cvCreateMat( DP, DP, CV_32FC1 );
cvSetIdentity( kalman->transition_matrix );
kalman->process_noise_cov = cvCreateMat( DP, DP, CV_32FC1 );
cvSetIdentity( kalman->process_noise_cov );
kalman->measurement_matrix = cvCreateMat( MP, DP, CV_32FC1 );
cvZero( kalman->measurement_matrix );
kalman->measurement_noise_cov = cvCreateMat( MP, MP, CV_32FC1 );
cvSetIdentity( kalman->measurement_noise_cov );
kalman->error_cov_pre = cvCreateMat( DP, DP, CV_32FC1 );
kalman->error_cov_post = cvCreateMat( DP, DP, CV_32FC1 );
cvZero( kalman->error_cov_post );
kalman->gain = cvCreateMat( DP, MP, CV_32FC1 );
if( CP > 0 )
{
kalman->control_matrix = cvCreateMat( DP, CP, CV_32FC1 );
cvZero( kalman->control_matrix );
}
kalman->temp1 = cvCreateMat( DP, DP, CV_32FC1 );
kalman->temp2 = cvCreateMat( MP, DP, CV_32FC1 );
kalman->temp3 = cvCreateMat( MP, MP, CV_32FC1 );
kalman->temp4 = cvCreateMat( MP, DP, CV_32FC1 );
kalman->temp5 = cvCreateMat( MP, 1, CV_32FC1 );
#if 1
kalman->PosterState = kalman->state_pre->data.fl;
kalman->PriorState = kalman->state_post->data.fl;
kalman->DynamMatr = kalman->transition_matrix->data.fl;
kalman->MeasurementMatr = kalman->measurement_matrix->data.fl;
kalman->MNCovariance = kalman->measurement_noise_cov->data.fl;
kalman->PNCovariance = kalman->process_noise_cov->data.fl;
kalman->KalmGainMatr = kalman->gain->data.fl;
kalman->PriorErrorCovariance = kalman->error_cov_pre->data.fl;
kalman->PosterErrorCovariance = kalman->error_cov_post->data.fl;
#endif
return kalman;
}
CV_IMPL void
cvReleaseKalman( CvKalman** _kalman )
{
CvKalman *kalman;
if( !_kalman )
CV_Error( CV_StsNullPtr, "" );
kalman = *_kalman;
if( !kalman )
return;
/* freeing the memory */
cvReleaseMat( &kalman->state_pre );
cvReleaseMat( &kalman->state_post );
cvReleaseMat( &kalman->transition_matrix );
cvReleaseMat( &kalman->control_matrix );
cvReleaseMat( &kalman->measurement_matrix );
cvReleaseMat( &kalman->process_noise_cov );
cvReleaseMat( &kalman->measurement_noise_cov );
cvReleaseMat( &kalman->error_cov_pre );
cvReleaseMat( &kalman->gain );
cvReleaseMat( &kalman->error_cov_post );
cvReleaseMat( &kalman->temp1 );
cvReleaseMat( &kalman->temp2 );
cvReleaseMat( &kalman->temp3 );
cvReleaseMat( &kalman->temp4 );
cvReleaseMat( &kalman->temp5 );
memset( kalman, 0, sizeof(*kalman));
/* deallocating the structure */
cvFree( _kalman );
}
CV_IMPL const CvMat*
cvKalmanPredict( CvKalman* kalman, const CvMat* control )
{
if( !kalman )
CV_Error( CV_StsNullPtr, "" );
/* update the state */
/* x'(k) = A*x(k) */
cvMatMulAdd( kalman->transition_matrix, kalman->state_post, 0, kalman->state_pre );
if( control && kalman->CP > 0 )
/* x'(k) = x'(k) + B*u(k) */
cvMatMulAdd( kalman->control_matrix, control, kalman->state_pre, kalman->state_pre );
/* update error covariance matrices */
/* temp1 = A*P(k) */
cvMatMulAdd( kalman->transition_matrix, kalman->error_cov_post, 0, kalman->temp1 );
/* P'(k) = temp1*At + Q */
cvGEMM( kalman->temp1, kalman->transition_matrix, 1, kalman->process_noise_cov, 1,
kalman->error_cov_pre, CV_GEMM_B_T );
/* handle the case when there will be measurement before the next predict */
cvCopy(kalman->state_pre, kalman->state_post);
return kalman->state_pre;
}
CV_IMPL const CvMat*
cvKalmanCorrect( CvKalman* kalman, const CvMat* measurement )
{
if( !kalman || !measurement )
CV_Error( CV_StsNullPtr, "" );
/* temp2 = H*P'(k) */
cvMatMulAdd( kalman->measurement_matrix, kalman->error_cov_pre, 0, kalman->temp2 );
/* temp3 = temp2*Ht + R */
cvGEMM( kalman->temp2, kalman->measurement_matrix, 1,
kalman->measurement_noise_cov, 1, kalman->temp3, CV_GEMM_B_T );
/* temp4 = inv(temp3)*temp2 = Kt(k) */
cvSolve( kalman->temp3, kalman->temp2, kalman->temp4, CV_SVD );
/* K(k) */
cvTranspose( kalman->temp4, kalman->gain );
/* temp5 = z(k) - H*x'(k) */
cvGEMM( kalman->measurement_matrix, kalman->state_pre, -1, measurement, 1, kalman->temp5 );
/* x(k) = x'(k) + K(k)*temp5 */
cvMatMulAdd( kalman->gain, kalman->temp5, kalman->state_pre, kalman->state_post );
/* P(k) = P'(k) - K(k)*temp2 */
cvGEMM( kalman->gain, kalman->temp2, -1, kalman->error_cov_pre, 1,
kalman->error_cov_post, 0 );
return kalman->state_post;
}
namespace cv namespace cv
{ {
......
This diff is collapsed.
This diff is collapsed.
...@@ -644,18 +644,3 @@ void cv::calcOpticalFlowFarneback( InputArray _prev0, InputArray _next0, ...@@ -644,18 +644,3 @@ void cv::calcOpticalFlowFarneback( InputArray _prev0, InputArray _next0,
prevFlow = flow; prevFlow = flow;
} }
} }
CV_IMPL void cvCalcOpticalFlowFarneback(
const CvArr* _prev, const CvArr* _next,
CvArr* _flow, double pyr_scale, int levels,
int winsize, int iterations, int poly_n,
double poly_sigma, int flags )
{
cv::Mat prev = cv::cvarrToMat(_prev), next = cv::cvarrToMat(_next);
cv::Mat flow = cv::cvarrToMat(_flow);
CV_Assert( flow.size() == prev.size() && flow.type() == CV_32FC2 );
cv::calcOpticalFlowFarneback( prev, next, flow, pyr_scale, levels,
winsize, iterations, poly_n, poly_sigma, flags );
}
...@@ -41,7 +41,6 @@ ...@@ -41,7 +41,6 @@
//M*/ //M*/
#include "precomp.hpp" #include "precomp.hpp"
#include "simpleflow.hpp"
// //
// 2D dense optical flow algorithm from the following paper: // 2D dense optical flow algorithm from the following paper:
...@@ -54,6 +53,39 @@ ...@@ -54,6 +53,39 @@
namespace cv namespace cv
{ {
static const uchar MASK_TRUE_VALUE = (uchar)255;
inline static float dist(const Vec3b& p1, const Vec3b& p2) {
return (float)((p1[0] - p2[0]) * (p1[0] - p2[0]) +
(p1[1] - p2[1]) * (p1[1] - p2[1]) +
(p1[2] - p2[2]) * (p1[2] - p2[2]));
}
inline static float dist(const Vec2f& p1, const Vec2f& p2) {
return (p1[0] - p2[0]) * (p1[0] - p2[0]) +
(p1[1] - p2[1]) * (p1[1] - p2[1]);
}
inline static float dist(const Point2f& p1, const Point2f& p2) {
return (p1.x - p2.x) * (p1.x - p2.x) +
(p1.y - p2.y) * (p1.y - p2.y);
}
inline static float dist(float x1, float y1, float x2, float y2) {
return (x1 - x2) * (x1 - x2) +
(y1 - y2) * (y1 - y2);
}
inline static int dist(int x1, int y1, int x2, int y2) {
return (x1 - x2) * (x1 - x2) +
(y1 - y2) * (y1 - y2);
}
template<class T>
inline static T min(T t1, T t2, T t3) {
return (t1 <= t2 && t1 <= t3) ? t1 : min(t2, t3);
}
static void removeOcclusions(const Mat& flow, static void removeOcclusions(const Mat& flow,
const Mat& flow_inv, const Mat& flow_inv,
float occ_thr, float occ_thr,
......
...@@ -46,50 +46,9 @@ ...@@ -46,50 +46,9 @@
namespace cv namespace cv
{ {
///////////////////////////////////////////////////////////////////////////////////////////////////////////
CV_INIT_ALGORITHM(BackgroundSubtractorMOG, "BackgroundSubtractor.MOG",
obj.info()->addParam(obj, "history", obj.history);
obj.info()->addParam(obj, "nmixtures", obj.nmixtures);
obj.info()->addParam(obj, "backgroundRatio", obj.backgroundRatio);
obj.info()->addParam(obj, "noiseSigma", obj.noiseSigma));
///////////////////////////////////////////////////////////////////////////////////////////////////////////
CV_INIT_ALGORITHM(BackgroundSubtractorMOG2, "BackgroundSubtractor.MOG2",
obj.info()->addParam(obj, "history", obj.history);
obj.info()->addParam(obj, "nmixtures", obj.nmixtures);
obj.info()->addParam(obj, "varThreshold", obj.varThreshold);
obj.info()->addParam(obj, "detectShadows", obj.bShadowDetection));
///////////////////////////////////////////////////////////////////////////////////////////////////////////
CV_INIT_ALGORITHM(BackgroundSubtractorGMG, "BackgroundSubtractor.GMG",
obj.info()->addParam(obj, "maxFeatures", obj.maxFeatures,false,0,0,
"Maximum number of features to store in histogram. Harsh enforcement of sparsity constraint.");
obj.info()->addParam(obj, "learningRate", obj.learningRate,false,0,0,
"Adaptation rate of histogram. Close to 1, slow adaptation. Close to 0, fast adaptation, features forgotten quickly.");
obj.info()->addParam(obj, "initializationFrames", obj.numInitializationFrames,false,0,0,
"Number of frames to use to initialize histograms of pixels.");
obj.info()->addParam(obj, "quantizationLevels", obj.quantizationLevels,false,0,0,
"Number of discrete colors to be used in histograms. Up-front quantization.");
obj.info()->addParam(obj, "backgroundPrior", obj.backgroundPrior,false,0,0,
"Prior probability that each individual pixel is a background pixel.");
obj.info()->addParam(obj, "smoothingRadius", obj.smoothingRadius,false,0,0,
"Radius of smoothing kernel to filter noise from FG mask image.");
obj.info()->addParam(obj, "decisionThreshold", obj.decisionThreshold,false,0,0,
"Threshold for FG decision rule. Pixel is FG if posterior probability exceeds threshold.");
obj.info()->addParam(obj, "updateBackgroundModel", obj.updateBackgroundModel,false,0,0,
"Perform background model update."));
bool initModule_video(void) bool initModule_video(void)
{ {
bool all = true; return true;
all &= !BackgroundSubtractorMOG_info_auto.name().empty();
all &= !BackgroundSubtractorMOG2_info_auto.name().empty();
all &= !BackgroundSubtractorGMG_info_auto.name().empty();
return all;
} }
} }
...@@ -37,8 +37,7 @@ void CV_BackgroundSubtractorTest::run(int) ...@@ -37,8 +37,7 @@ void CV_BackgroundSubtractorTest::run(int)
int width = 2 + ((unsigned int)rng)%98; //!< Mat will be 2 to 100 in width and height int width = 2 + ((unsigned int)rng)%98; //!< Mat will be 2 to 100 in width and height
int height = 2 + ((unsigned int)rng)%98; int height = 2 + ((unsigned int)rng)%98;
Ptr<BackgroundSubtractorGMG> fgbg = Ptr<BackgroundSubtractorGMG> fgbg = createBackgroundSubtractorGMG();
Algorithm::create<BackgroundSubtractorGMG>("BackgroundSubtractor.GMG");
Mat fgmask; Mat fgmask;
if (fgbg.empty()) if (fgbg.empty())
...@@ -47,19 +46,13 @@ void CV_BackgroundSubtractorTest::run(int) ...@@ -47,19 +46,13 @@ void CV_BackgroundSubtractorTest::run(int)
/** /**
* Set a few parameters * Set a few parameters
*/ */
fgbg->set("smoothingRadius",7); fgbg->setSmoothingRadius(7);
fgbg->set("decisionThreshold",0.7); fgbg->setDecisionThreshold(0.7);
fgbg->set("initializationFrames",120); fgbg->setNumFrames(120);
/** /**
* Generate bounds for the values in the matrix for each type * Generate bounds for the values in the matrix for each type
*/ */
uchar maxuc = 0, minuc = 0;
char maxc = 0, minc = 0;
unsigned int maxui = 0, minui = 0;
int maxi=0, mini = 0;
long int maxli = 0, minli = 0;
float maxf = 0, minf = 0;
double maxd = 0, mind = 0; double maxd = 0, mind = 0;
/** /**
...@@ -69,34 +62,34 @@ void CV_BackgroundSubtractorTest::run(int) ...@@ -69,34 +62,34 @@ void CV_BackgroundSubtractorTest::run(int)
if (type == CV_8U) if (type == CV_8U)
{ {
uchar half = UCHAR_MAX/2; uchar half = UCHAR_MAX/2;
maxuc = (unsigned char)rng.uniform(half+32, UCHAR_MAX); maxd = (unsigned char)rng.uniform(half+32, UCHAR_MAX);
minuc = (unsigned char)rng.uniform(0, half-32); mind = (unsigned char)rng.uniform(0, half-32);
} }
else if (type == CV_8S) else if (type == CV_8S)
{ {
maxc = (char)rng.uniform(32, CHAR_MAX); maxd = (char)rng.uniform(32, CHAR_MAX);
minc = (char)rng.uniform(CHAR_MIN, -32); mind = (char)rng.uniform(CHAR_MIN, -32);
} }
else if (type == CV_16U) else if (type == CV_16U)
{ {
ushort half = USHRT_MAX/2; ushort half = USHRT_MAX/2;
maxui = (unsigned int)rng.uniform(half+32, USHRT_MAX); maxd = (unsigned int)rng.uniform(half+32, USHRT_MAX);
minui = (unsigned int)rng.uniform(0, half-32); mind = (unsigned int)rng.uniform(0, half-32);
} }
else if (type == CV_16S) else if (type == CV_16S)
{ {
maxi = rng.uniform(32, SHRT_MAX); maxd = rng.uniform(32, SHRT_MAX);
mini = rng.uniform(SHRT_MIN, -32); mind = rng.uniform(SHRT_MIN, -32);
} }
else if (type == CV_32S) else if (type == CV_32S)
{ {
maxli = rng.uniform(32, INT_MAX); maxd = rng.uniform(32, INT_MAX);
minli = rng.uniform(INT_MIN, -32); mind = rng.uniform(INT_MIN, -32);
} }
else if (type == CV_32F) else if (type == CV_32F)
{ {
maxf = rng.uniform(32.0f, FLT_MAX); maxd = rng.uniform(32.0f, FLT_MAX);
minf = rng.uniform(-FLT_MAX, -32.0f); mind = rng.uniform(-FLT_MAX, -32.0f);
} }
else if (type == CV_64F) else if (type == CV_64F)
{ {
...@@ -104,60 +97,22 @@ void CV_BackgroundSubtractorTest::run(int) ...@@ -104,60 +97,22 @@ void CV_BackgroundSubtractorTest::run(int)
mind = rng.uniform(-DBL_MAX, -32.0); mind = rng.uniform(-DBL_MAX, -32.0);
} }
fgbg->setMinVal(mind);
fgbg->setMaxVal(maxd);
Mat simImage = Mat::zeros(height, width, channelsAndType); Mat simImage = Mat::zeros(height, width, channelsAndType);
const unsigned int numLearningFrames = 120; int numLearningFrames = 120;
for (unsigned int i = 0; i < numLearningFrames; ++i) for (int i = 0; i < numLearningFrames; ++i)
{ {
/** /**
* Genrate simulated "image" for any type. Values always confined to upper half of range. * Genrate simulated "image" for any type. Values always confined to upper half of range.
*/ */
if (type == CV_8U) rng.fill(simImage, RNG::UNIFORM, (mind + maxd)*0.5, maxd);
{
rng.fill(simImage,RNG::UNIFORM,(unsigned char)(minuc/2+maxuc/2),maxuc);
if (i == 0)
fgbg->initialize(simImage.size(),minuc,maxuc);
}
else if (type == CV_8S)
{
rng.fill(simImage,RNG::UNIFORM,(char)(minc/2+maxc/2),maxc);
if (i==0)
fgbg->initialize(simImage.size(),minc,maxc);
}
else if (type == CV_16U)
{
rng.fill(simImage,RNG::UNIFORM,(unsigned int)(minui/2+maxui/2),maxui);
if (i==0)
fgbg->initialize(simImage.size(),minui,maxui);
}
else if (type == CV_16S)
{
rng.fill(simImage,RNG::UNIFORM,(int)(mini/2+maxi/2),maxi);
if (i==0)
fgbg->initialize(simImage.size(),mini,maxi);
}
else if (type == CV_32F)
{
rng.fill(simImage,RNG::UNIFORM,(float)(minf/2.0+maxf/2.0),maxf);
if (i==0)
fgbg->initialize(simImage.size(),minf,maxf);
}
else if (type == CV_32S)
{
rng.fill(simImage,RNG::UNIFORM,(long int)(minli/2+maxli/2),maxli);
if (i==0)
fgbg->initialize(simImage.size(),minli,maxli);
}
else if (type == CV_64F)
{
rng.fill(simImage,RNG::UNIFORM,(double)(mind/2.0+maxd/2.0),maxd);
if (i==0)
fgbg->initialize(simImage.size(),mind,maxd);
}
/** /**
* Feed simulated images into background subtractor * Feed simulated images into background subtractor
*/ */
(*fgbg)(simImage,fgmask); fgbg->apply(simImage,fgmask);
Mat fullbg = Mat::zeros(simImage.rows, simImage.cols, CV_8U); Mat fullbg = Mat::zeros(simImage.rows, simImage.cols, CV_8U);
//! fgmask should be entirely background during training //! fgmask should be entirely background during training
...@@ -166,22 +121,9 @@ void CV_BackgroundSubtractorTest::run(int) ...@@ -166,22 +121,9 @@ void CV_BackgroundSubtractorTest::run(int)
ts->set_failed_test_info( code ); ts->set_failed_test_info( code );
} }
//! generate last image, distinct from training images //! generate last image, distinct from training images
if (type == CV_8U) rng.fill(simImage, RNG::UNIFORM, mind, maxd);
rng.fill(simImage,RNG::UNIFORM,minuc,minuc);
else if (type == CV_8S)
rng.fill(simImage,RNG::UNIFORM,minc,minc);
else if (type == CV_16U)
rng.fill(simImage,RNG::UNIFORM,minui,minui);
else if (type == CV_16S)
rng.fill(simImage,RNG::UNIFORM,mini,mini);
else if (type == CV_32F)
rng.fill(simImage,RNG::UNIFORM,minf,minf);
else if (type == CV_32S)
rng.fill(simImage,RNG::UNIFORM,minli,minli);
else if (type == CV_64F)
rng.fill(simImage,RNG::UNIFORM,mind,mind);
(*fgbg)(simImage,fgmask); fgbg->apply(simImage,fgmask);
//! now fgmask should be entirely foreground //! now fgmask should be entirely foreground
Mat fullfg = 255*Mat::ones(simImage.rows, simImage.cols, CV_8U); Mat fullfg = 255*Mat::ones(simImage.rows, simImage.cols, CV_8U);
code = cvtest::cmpEps2( ts, fgmask, fullfg, 255, false, "The final foreground mask" ); code = cvtest::cmpEps2( ts, fgmask, fullfg, 255, false, "The final foreground mask" );
......
...@@ -153,7 +153,7 @@ bool CV_RigidTransform_Test::testImage() ...@@ -153,7 +153,7 @@ bool CV_RigidTransform_Test::testImage()
Mat aff_est = estimateRigidTransform(img, rotated, true); Mat aff_est = estimateRigidTransform(img, rotated, true);
const double thres = 0.03; const double thres = 0.033;
if (norm(aff_est, aff, NORM_INF) > thres) if (norm(aff_est, aff, NORM_INF) > thres)
{ {
ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY); ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY);
......
...@@ -32,16 +32,13 @@ int main(int argc, char** argv) ...@@ -32,16 +32,13 @@ int main(int argc, char** argv)
setUseOptimized(true); setUseOptimized(true);
setNumThreads(8); setNumThreads(8);
Ptr<BackgroundSubtractorGMG> fgbg = Algorithm::create<BackgroundSubtractorGMG>("BackgroundSubtractor.GMG"); Ptr<BackgroundSubtractor> fgbg = createBackgroundSubtractorGMG(20, 0.7);
if (fgbg.empty()) if (fgbg.empty())
{ {
std::cerr << "Failed to create BackgroundSubtractor.GMG Algorithm." << std::endl; std::cerr << "Failed to create BackgroundSubtractor.GMG Algorithm." << std::endl;
return -1; return -1;
} }
fgbg->set("initializationFrames", 20);
fgbg->set("decisionThreshold", 0.7);
VideoCapture cap; VideoCapture cap;
if (argc > 1) if (argc > 1)
cap.open(argv[1]); cap.open(argv[1]);
...@@ -65,9 +62,9 @@ int main(int argc, char** argv) ...@@ -65,9 +62,9 @@ int main(int argc, char** argv)
if (frame.empty()) if (frame.empty())
break; break;
(*fgbg)(frame, fgmask); fgbg->apply(frame, fgmask);
frame.copyTo(segm); frame.convertTo(segm, CV_8U, 0.5);
add(frame, Scalar(100, 100, 0), segm, fgmask); add(frame, Scalar(100, 100, 0), segm, fgmask);
imshow("FG Segmentation", segm); imshow("FG Segmentation", segm);
......
...@@ -51,7 +51,7 @@ int main(int argc, const char** argv) ...@@ -51,7 +51,7 @@ int main(int argc, const char** argv)
namedWindow("foreground image", CV_WINDOW_NORMAL); namedWindow("foreground image", CV_WINDOW_NORMAL);
namedWindow("mean background image", CV_WINDOW_NORMAL); namedWindow("mean background image", CV_WINDOW_NORMAL);
BackgroundSubtractorMOG2 bg_model;//(100, 3, 0.3, 5); Ptr<BackgroundSubtractor> bg_model = createBackgroundSubtractorMOG2();
Mat img, fgmask, fgimg; Mat img, fgmask, fgimg;
...@@ -68,13 +68,13 @@ int main(int argc, const char** argv) ...@@ -68,13 +68,13 @@ int main(int argc, const char** argv)
fgimg.create(img.size(), img.type()); fgimg.create(img.size(), img.type());
//update the model //update the model
bg_model(img, fgmask, update_bg_model ? -1 : 0); bg_model->apply(img, fgmask, update_bg_model ? -1 : 0);
fgimg = Scalar::all(0); fgimg = Scalar::all(0);
img.copyTo(fgimg, fgmask); img.copyTo(fgimg, fgmask);
Mat bgimg; Mat bgimg;
bg_model.getBackgroundImage(bgimg); bg_model->getBackgroundImage(bgimg);
imshow("image", img); imshow("image", img);
imshow("foreground mask", fgmask); imshow("foreground mask", fgmask);
......
...@@ -87,15 +87,15 @@ int main(int argc, char** argv) ...@@ -87,15 +87,15 @@ int main(int argc, char** argv)
namedWindow("video", 1); namedWindow("video", 1);
namedWindow("segmented", 1); namedWindow("segmented", 1);
BackgroundSubtractorMOG bgsubtractor; Ptr<BackgroundSubtractorMOG> bgsubtractor=createBackgroundSubtractorMOG();
bgsubtractor.set("noiseSigma", 10); bgsubtractor->setNoiseSigma(10);
for(;;) for(;;)
{ {
cap >> tmp_frame; cap >> tmp_frame;
if( !tmp_frame.data ) if( !tmp_frame.data )
break; break;
bgsubtractor(tmp_frame, bgmask, update_bg_model ? -1 : 0); bgsubtractor->apply(tmp_frame, bgmask, update_bg_model ? -1 : 0);
//CvMat _bgmask = bgmask; //CvMat _bgmask = bgmask;
//cvSegmentFGMask(&_bgmask); //cvSegmentFGMask(&_bgmask);
refineSegments(tmp_frame, bgmask, out_frame); refineSegments(tmp_frame, bgmask, out_frame);
......
...@@ -1316,10 +1316,10 @@ TEST(MOG) ...@@ -1316,10 +1316,10 @@ TEST(MOG)
cv::Mat frame; cv::Mat frame;
cap >> frame; cap >> frame;
cv::BackgroundSubtractorMOG mog; cv::Ptr<cv::BackgroundSubtractor> mog = cv::createBackgroundSubtractorMOG();
cv::Mat foreground; cv::Mat foreground;
mog(frame, foreground, 0.01); mog->apply(frame, foreground, 0.01);
while (!TestSystem::instance().stop()) while (!TestSystem::instance().stop())
{ {
...@@ -1327,7 +1327,7 @@ TEST(MOG) ...@@ -1327,7 +1327,7 @@ TEST(MOG)
TestSystem::instance().cpuOn(); TestSystem::instance().cpuOn();
mog(frame, foreground, 0.01); mog->apply(frame, foreground, 0.01);
TestSystem::instance().cpuOff(); TestSystem::instance().cpuOff();
} }
...@@ -1367,12 +1367,12 @@ TEST(MOG2) ...@@ -1367,12 +1367,12 @@ TEST(MOG2)
cv::Mat frame; cv::Mat frame;
cap >> frame; cap >> frame;
cv::BackgroundSubtractorMOG2 mog2; cv::Ptr<cv::BackgroundSubtractor> mog2 = cv::createBackgroundSubtractorMOG2();
cv::Mat foreground; cv::Mat foreground;
cv::Mat background; cv::Mat background;
mog2(frame, foreground); mog2->apply(frame, foreground);
mog2.getBackgroundImage(background); mog2->getBackgroundImage(background);
while (!TestSystem::instance().stop()) while (!TestSystem::instance().stop())
{ {
...@@ -1380,8 +1380,8 @@ TEST(MOG2) ...@@ -1380,8 +1380,8 @@ TEST(MOG2)
TestSystem::instance().cpuOn(); TestSystem::instance().cpuOn();
mog2(frame, foreground); mog2->apply(frame, foreground);
mog2.getBackgroundImage(background); mog2->getBackgroundImage(background);
TestSystem::instance().cpuOff(); TestSystem::instance().cpuOff();
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment