Commit a60dc947 authored by Alexey Spizhevoy's avatar Alexey Spizhevoy

Added first version of videostab module

parent 1d2c6ef4
set(the_description "Video stabilization")
ocv_define_module(videostab opencv_imgproc opencv_features2d opencv_video OPTIONAL opencv_gpu)
#ifndef __OPENCV_VIDEOSTAB_DEBLURRING_HPP__
#define __OPENCV_VIDEOSTAB_DEBLURRING_HPP__
#include <vector>
#include "opencv2/core/core.hpp"
namespace cv
{
namespace videostab
{
float calcBlurriness(const Mat &frame);
class IDeblurer
{
public:
IDeblurer() : radius_(0), frames_(0), motions_(0) {}
virtual ~IDeblurer() {}
virtual void setRadius(int val) { radius_ = val; }
int radius() const { return radius_; }
virtual void setFrames(const std::vector<Mat> &val) { frames_ = &val; }
const std::vector<Mat>& frames() const { return *frames_; }
virtual void setMotions(const std::vector<Mat> &val) { motions_ = &val; }
const std::vector<Mat>& motions() const { return *motions_; }
virtual void setBlurrinessRates(const std::vector<float> &val) { blurrinessRates_ = &val; }
const std::vector<float>& blurrinessRates() const { return *blurrinessRates_; }
virtual void deblur(int idx, Mat &frame) = 0;
protected:
int radius_;
const std::vector<Mat> *frames_;
const std::vector<Mat> *motions_;
const std::vector<float> *blurrinessRates_;
};
class NullDeblurer : public IDeblurer
{
public:
virtual void deblur(int idx, Mat &frame) {}
};
class WeightingDeblurer : public IDeblurer
{
public:
WeightingDeblurer();
void setSensitivity(float val) { sensitivity_ = val; }
float sensitivity() const { return sensitivity_; }
virtual void deblur(int idx, Mat &frame);
private:
float sensitivity_;
Mat_<float> bSum_, gSum_, rSum_, wSum_;
};
} // namespace videostab
} // namespace cv
#endif
#ifndef __OPENCV_VIDEOSTAB_FAST_MARCHING_HPP__
#define __OPENCV_VIDEOSTAB_FAST_MARCHING_HPP__
#include <cmath>
#include <queue>
#include <algorithm>
#include "opencv2/core/core.hpp"
namespace cv
{
namespace videostab
{
// See http://iwi.eldoc.ub.rug.nl/FILES/root/2004/JGraphToolsTelea/2004JGraphToolsTelea.pdf
class FastMarchingMethod
{
public:
FastMarchingMethod() : inf_(1e6f) {}
template <typename Inpaint>
void run(const Mat &mask, Inpaint inpaint);
Mat distanceMap() const { return dist_; }
private:
enum { INSIDE = 0, BAND = 1, KNOWN = 255 };
struct DXY
{
float dist;
int x, y;
DXY() {}
DXY(float dist, int x, int y) : dist(dist), x(x), y(y) {}
bool operator <(const DXY &dxy) const { return dist < dxy.dist; }
};
float solve(int x1, int y1, int x2, int y2) const;
int& indexOf(const DXY &dxy) { return index_(dxy.y, dxy.x); }
void heapUp(int idx);
void heapDown(int idx);
void heapAdd(const DXY &dxy);
void heapRemoveMin();
float inf_;
cv::Mat_<uchar> flag_; // flag map
cv::Mat_<float> dist_; // distance map
cv::Mat_<int> index_; // index of point in the narrow band
std::vector<DXY> narrowBand_; // narrow band heap
int size_; // narrow band size
};
} // namespace videostab
} // namespace cv
#include "fast_marching_inl.hpp"
#endif
#ifndef __OPENCV_VIDEOSTAB_FAST_MARCHING_INL_HPP__
#define __OPENCV_VIDEOSTAB_FAST_MARCHING_INL_HPP__
#include "opencv2/videostab/fast_marching.hpp"
namespace cv
{
namespace videostab
{
template <typename Inpaint>
void FastMarchingMethod::run(const cv::Mat &mask, Inpaint inpaint)
{
using namespace std;
using namespace cv;
CV_Assert(mask.type() == CV_8U);
static const int lut[4][2] = {{-1,0}, {0,-1}, {1,0}, {0,1}};
mask.copyTo(flag_);
flag_.create(mask.size());
dist_.create(mask.size());
index_.create(mask.size());
narrowBand_.clear();
size_ = 0;
// init
for (int y = 0; y < flag_.rows; ++y)
{
for (int x = 0; x < flag_.cols; ++x)
{
if (flag_(y,x) == KNOWN)
dist_(y,x) = 0.f;
else
{
int n = 0;
int nunknown = 0;
for (int i = 0; i < 4; ++i)
{
int xn = x + lut[i][0];
int yn = y + lut[i][1];
if (xn >= 0 && xn < flag_.cols && yn >= 0 && yn < flag_.rows)
{
n++;
if (flag_(yn,xn) != KNOWN)
nunknown++;
}
}
if (n>0 && nunknown == n)
{
dist_(y,x) = inf_;
flag_(y,x) = INSIDE;
}
else
{
dist_(y,x) = 0.f;
flag_(y,x) = BAND;
inpaint(x, y);
narrowBand_.push_back(DXY(0.f,x,y));
index_(y,x) = size_++;
}
}
}
}
// make heap
for (int i = size_/2-1; i >= 0; --i)
heapDown(i);
// main cycle
while (size_ > 0)
{
int x = narrowBand_[0].x;
int y = narrowBand_[0].y;
heapRemoveMin();
flag_(y,x) = KNOWN;
for (int n = 0; n < 4; ++n)
{
int xn = x + lut[n][0];
int yn = y + lut[n][1];
if (xn >= 0 && xn < flag_.cols && yn >= 0 && yn < flag_.rows && flag_(yn,xn) != KNOWN)
{
dist_(yn,xn) = min(min(solve(xn-1, yn, xn, yn-1), solve(xn+1, yn, xn, yn-1)),
min(solve(xn-1, yn, xn, yn+1), solve(xn+1, yn, xn, yn+1)));
if (flag_(yn,xn) == INSIDE)
{
flag_(yn,xn) = BAND;
inpaint(xn, yn);
heapAdd(DXY(dist_(yn,xn),xn,yn));
}
else
{
int i = index_(yn,xn);
if (dist_(yn,xn) < narrowBand_[i].dist)
{
narrowBand_[i].dist = dist_(yn,xn);
heapUp(i);
}
// works better if it's commented out
/*else if (dist(yn,xn) > narrowBand[i].dist)
{
narrowBand[i].dist = dist(yn,xn);
heapDown(i);
}*/
}
}
}
}
}
} // namespace videostab
} // namespace cv
#endif
#ifndef __OPENCV_VIDEOSTAB_FRAME_SOURCE_HPP__
#define __OPENCV_VIDEOSTAB_FRAME_SOURCE_HPP__
#include <vector>
#include <string>
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
namespace cv
{
namespace videostab
{
class IFrameSource
{
public:
virtual ~IFrameSource() {}
virtual void reset() = 0;
virtual Mat nextFrame() = 0;
};
class NullFrameSource : public IFrameSource
{
public:
virtual void reset() {}
virtual Mat nextFrame() { return Mat(); }
};
class VideoFileSource : public IFrameSource
{
public:
VideoFileSource(const std::string &path, bool volatileFrame = false);
virtual void reset();
virtual Mat nextFrame();
int frameCount() { return reader_.get(CV_CAP_PROP_FRAME_COUNT); }
double fps() { return reader_.get(CV_CAP_PROP_FPS); }
private:
std::string path_;
bool volatileFrame_;
VideoCapture reader_;
};
} // namespace videostab
} // namespace cv
#endif
#ifndef __OPENCV_VIDEOSTAB_GLOBAL_MOTION_HPP__
#define __OPENCV_VIDEOSTAB_GLOBAL_MOTION_HPP__
#include <vector>
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/videostab/optical_flow.hpp"
namespace cv
{
namespace videostab
{
enum MotionModel
{
TRANSLATION = 0,
TRANSLATION_AND_SCALE = 1,
AFFINE = 2
};
Mat estimateGlobalMotionLeastSquares(
const std::vector<Point2f> &points0, const std::vector<Point2f> &points1,
int model = AFFINE, float *rmse = 0);
struct RansacParams
{
int size; // subset size
float thresh; // max error to classify as inlier
float eps; // max outliers ratio
float prob; // probability of success
RansacParams(int size, float thresh, float eps, float prob)
: size(size), thresh(thresh), eps(eps), prob(prob) {}
static RansacParams affine2dMotionStd() { return RansacParams(6, 0.5f, 0.5f, 0.99f); }
static RansacParams translationAndScale2dMotionStd() { return RansacParams(3, 0.5f, 0.5f, 0.99f); }
static RansacParams translationMotionStd() { return RansacParams(2, 0.5f, 0.5f, 0.99f); }
};
Mat estimateGlobalMotionRobust(
const std::vector<Point2f> &points0, const std::vector<Point2f> &points1,
int model = AFFINE, const RansacParams &params = RansacParams::affine2dMotionStd(),
float *rmse = 0, int *ninliers = 0);
class IGlobalMotionEstimator
{
public:
virtual ~IGlobalMotionEstimator() {}
virtual Mat estimate(const Mat &frame0, const Mat &frame1) = 0;
};
class PyrLkRobustMotionEstimator : public IGlobalMotionEstimator
{
public:
PyrLkRobustMotionEstimator();
void setDetector(Ptr<FeatureDetector> val) { detector_ = val; }
Ptr<FeatureDetector> detector() const { return detector_; }
void setOptFlowEstimator(Ptr<ISparseOptFlowEstimator> val) { optFlowEstimator_ = val; }
Ptr<ISparseOptFlowEstimator> optFlowEstimator() const { return optFlowEstimator_; }
void setMotionModel(MotionModel val) { motionModel_ = val; }
MotionModel motionModel() const { return motionModel_; }
void setRansacParams(const RansacParams &val) { ransacParams_ = val; }
RansacParams ransacParams() const { return ransacParams_; }
void setMaxRmse(float val) { maxRmse_ = val; }
float maxRmse() const { return maxRmse_; }
void setMinInlierRatio(float val) { minInlierRatio_ = val; }
float minInlierRatio() const { return minInlierRatio_; }
virtual Mat estimate(const Mat &frame0, const Mat &frame1);
private:
Ptr<FeatureDetector> detector_;
Ptr<ISparseOptFlowEstimator> optFlowEstimator_;
MotionModel motionModel_;
RansacParams ransacParams_;
std::vector<uchar> status_;
std::vector<KeyPoint> keypointsPrev_;
std::vector<Point2f> pointsPrev_, points_;
std::vector<Point2f> pointsPrevGood_, pointsGood_;
float maxRmse_;
float minInlierRatio_;
};
Mat getMotion(int from, int to, const std::vector<Mat> &motions);
Mat ensureInclusionConstraint(const Mat &M, Size size, float trimRatio);
float estimateOptimalTrimRatio(const Mat &M, Size size);
// frame1 is non-transformed frame
float alignementError(const Mat &M, const Mat &frame0, const Mat &mask0, const Mat &frame1);
} // namespace videostab
} // namespace cv
#endif
#ifndef __OPENCV_VIDEOSTAB_LOG_HPP__
#define __OPENCV_VIDEOSTAB_LOG_HPP__
namespace cv
{
namespace videostab
{
class ILog
{
public:
virtual ~ILog() {}
virtual void print(const char *format, ...) = 0;
};
class NullLog : public ILog
{
public:
virtual void print(const char *format, ...) {}
};
class LogToStdout : public ILog
{
public:
virtual void print(const char *format, ...);
};
} // namespace videostab
} // namespace cv
#endif
#ifndef __OPENCV_VIDEOSTAB_MOTION_FILTERING_HPP__
#define __OPENCV_VIDEOSTAB_MOTION_FILTERING_HPP__
#include <vector>
#include "opencv2/core/core.hpp"
namespace cv
{
namespace videostab
{
class IMotionFilter
{
public:
virtual ~IMotionFilter() {}
virtual int radius() const = 0;
virtual Mat apply(int index, std::vector<Mat> &Ms) const = 0;
};
class GaussianMotionFilter : public IMotionFilter
{
public:
GaussianMotionFilter(int radius, float stdev);
virtual int radius() const { return radius_; }
virtual Mat apply(int idx, std::vector<Mat> &motions) const;
private:
int radius_;
std::vector<float> weight_;
};
} // namespace videostab
} // namespace
#endif
#ifndef __OPENCV_VIDEOSTAB_OPTICAL_FLOW_HPP__
#define __OPENCV_VIDEOSTAB_OPTICAL_FLOW_HPP__
#include "opencv2/core/core.hpp"
#include "opencv2/gpu/gpu.hpp"
namespace cv
{
namespace videostab
{
class ISparseOptFlowEstimator
{
public:
virtual ~ISparseOptFlowEstimator() {}
virtual void run(
InputArray frame0, InputArray frame1, InputArray points0, InputOutputArray points1,
OutputArray status, OutputArray errors) = 0;
};
class IDenseOptFlowEstimator
{
public:
virtual ~IDenseOptFlowEstimator() {}
virtual void run(
InputArray frame0, InputArray frame1, InputOutputArray flowX, InputOutputArray flowY,
OutputArray errors) = 0;
};
class PyrLkOptFlowEstimatorBase
{
public:
PyrLkOptFlowEstimatorBase() { setWinSize(Size(21, 21)); setMaxLevel(3); }
void setWinSize(Size val) { winSize_ = val; }
Size winSize() const { return winSize_; }
void setMaxLevel(int val) { maxLevel_ = val; }
int maxLevel() const { return maxLevel_; }
protected:
Size winSize_;
int maxLevel_;
};
class SparsePyrLkOptFlowEstimator
: public PyrLkOptFlowEstimatorBase, public ISparseOptFlowEstimator
{
public:
virtual void run(
InputArray frame0, InputArray frame1, InputArray points0, InputOutputArray points1,
OutputArray status, OutputArray errors);
};
class DensePyrLkOptFlowEstimatorGpu
: public PyrLkOptFlowEstimatorBase, public IDenseOptFlowEstimator
{
public:
DensePyrLkOptFlowEstimatorGpu();
virtual void run(
InputArray frame0, InputArray frame1, InputOutputArray flowX, InputOutputArray flowY,
OutputArray errors);
private:
gpu::PyrLKOpticalFlow optFlowEstimator_;
gpu::GpuMat frame0_, frame1_, flowX_, flowY_, errors_;
};
} // namespace videostab
} // namespace cv
#endif
#ifndef __OPENCV_VIDEOSTAB_STABILIZER_HPP__
#define __OPENCV_VIDEOSTAB_STABILIZER_HPP__
#include <vector>
#include "opencv2/core/core.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/videostab/global_motion.hpp"
#include "opencv2/videostab/motion_filtering.hpp"
#include "opencv2/videostab/frame_source.hpp"
#include "opencv2/videostab/log.hpp"
#include "opencv2/videostab/inpainting.hpp"
#include "opencv2/videostab/deblurring.hpp"
namespace cv
{
namespace videostab
{
class Stabilizer : public IFrameSource
{
public:
Stabilizer();
void setLog(Ptr<ILog> log) { log_ = log; }
Ptr<ILog> log() const { return log_; }
void setFrameSource(Ptr<IFrameSource> val) { frameSource_ = val; reset(); }
Ptr<IFrameSource> frameSource() const { return frameSource_; }
void setMotionEstimator(Ptr<IGlobalMotionEstimator> val) { motionEstimator_ = val; }
Ptr<IGlobalMotionEstimator> motionEstimator() const { return motionEstimator_; }
void setMotionFilter(Ptr<IMotionFilter> val) { motionFilter_ = val; reset(); }
Ptr<IMotionFilter> motionFilter() const { return motionFilter_; }
void setDeblurer(Ptr<IDeblurer> val) { deblurer_ = val; reset(); }
Ptr<IDeblurer> deblurrer() const { return deblurer_; }
void setEstimateTrimRatio(bool val) { mustEstimateTrimRatio_ = val; reset(); }
bool mustEstimateTrimRatio() const { return mustEstimateTrimRatio_; }
void setTrimRatio(float val) { trimRatio_ = val; reset(); }
int trimRatio() const { return trimRatio_; }
void setInclusionConstraint(bool val) { inclusionConstraint_ = val; }
bool inclusionConstraint() const { return inclusionConstraint_; }
void setBorderMode(int val) { borderMode_ = val; }
int borderMode() const { return borderMode_; }
void setInpainter(Ptr<IInpainter> val) { inpainter_ = val; reset(); }
Ptr<IInpainter> inpainter() const { return inpainter_; }
virtual void reset();
virtual Mat nextFrame();
private:
void estimateMotionsAndTrimRatio();
void processFirstFrame(Mat &frame);
bool processNextFrame();
void stabilizeFrame(int idx);
Ptr<IFrameSource> frameSource_;
Ptr<IGlobalMotionEstimator> motionEstimator_;
Ptr<IMotionFilter> motionFilter_;
Ptr<IDeblurer> deblurer_;
Ptr<IInpainter> inpainter_;
bool mustEstimateTrimRatio_;
float trimRatio_;
bool inclusionConstraint_;
int borderMode_;
Ptr<ILog> log_;
Size frameSize_;
Mat frameMask_;
int radius_;
int curPos_;
int curStabilizedPos_;
bool auxPassWasDone_;
bool doDeblurring_;
Mat preProcessedFrame_;
bool doInpainting_;
Mat inpaintingMask_;
std::vector<Mat> frames_;
std::vector<Mat> motions_; // motions_[i] is the motion from i to i+1 frame
std::vector<float> blurrinessRates_;
std::vector<Mat> stabilizedFrames_;
std::vector<Mat> stabilizedMasks_;
std::vector<Mat> stabilizationMotions_;
};
} // namespace videostab
} // namespace cv
#endif
#ifndef __OPENCV_VIDEOSTAB_HPP__
#define __OPENCV_VIDEOSTAB_HPP__
#include "opencv2/videostab/stabilizer.hpp"
#endif
#include "precomp.hpp"
#include "opencv2/videostab/deblurring.hpp"
#include "opencv2/videostab/global_motion.hpp"
using namespace std;
namespace cv
{
namespace videostab
{
float calcBlurriness(const Mat &frame)
{
Mat Gx, Gy;
Sobel(frame, Gx, CV_32F, 1, 0);
Sobel(frame, Gy, CV_32F, 0, 1);
double normGx = norm(Gx);
double normGy = norm(Gx);
double sumSq = normGx*normGx + normGy*normGy;
return 1.f / (sumSq / frame.size().area() + 1e-6);
}
WeightingDeblurer::WeightingDeblurer()
{
setSensitivity(0.1);
}
void WeightingDeblurer::deblur(int idx, Mat &frame)
{
CV_Assert(frame.type() == CV_8UC3);
bSum_.create(frame.size());
gSum_.create(frame.size());
rSum_.create(frame.size());
wSum_.create(frame.size());
for (int y = 0; y < frame.rows; ++y)
{
for (int x = 0; x < frame.cols; ++x)
{
Point3_<uchar> p = frame.at<Point3_<uchar> >(y,x);
bSum_(y,x) = p.x;
gSum_(y,x) = p.y;
rSum_(y,x) = p.z;
wSum_(y,x) = 1.f;
}
}
for (int k = idx - radius_; k <= idx + radius_; ++k)
{
const Mat &neighbor = at(k, *frames_);
float bRatio = at(idx, *blurrinessRates_) / at(k, *blurrinessRates_);
Mat_<float> M = getMotion(idx, k, *motions_);
if (bRatio > 1.f)
{
for (int y = 0; y < frame.rows; ++y)
{
for (int x = 0; x < frame.cols; ++x)
{
int x1 = M(0,0)*x + M(0,1)*y + M(0,2);
int y1 = M(1,0)*x + M(1,1)*y + M(1,2);
if (x1 >= 0 && x1 < neighbor.cols && y1 >= 0 && y1 < neighbor.rows)
{
const Point3_<uchar> &p = frame.at<Point3_<uchar> >(y,x);
const Point3_<uchar> &p1 = neighbor.at<Point3_<uchar> >(y1,x1);
float w = bRatio * sensitivity_ /
(sensitivity_ + std::abs(intensity(p1) - intensity(p)));
bSum_(y,x) += w * p1.x;
gSum_(y,x) += w * p1.y;
rSum_(y,x) += w * p1.z;
wSum_(y,x) += w;
}
}
}
}
}
for (int y = 0; y < frame.rows; ++y)
{
for (int x = 0; x < frame.cols; ++x)
{
float wSumInv = 1.f / wSum_(y,x);
frame.at<Point3_<uchar> >(y,x) = Point3_<uchar>(
bSum_(y,x) * wSumInv, gSum_(y,x) * wSumInv, rSum_(y,x) * wSumInv);
}
}
}
} // namespace videostab
} // namespace cv
#include "precomp.hpp"
#include "opencv2/videostab/fast_marching.hpp"
using namespace std;
namespace cv
{
namespace videostab
{
float FastMarchingMethod::solve(int x1, int y1, int x2, int y2) const
{
float sol = inf_;
if (y1 >=0 && y1 < flag_.rows && x1 >= 0 && x1 < flag_.cols && flag_(y1,x1) == KNOWN)
{
float t1 = dist_(y1,x1);
if (y2 >=0 && y2 < flag_.rows && x2 >= 0 && x2 < flag_.cols && flag_(y2,x2) == KNOWN)
{
float t2 = dist_(y2,x2);
float r = sqrt(2 - sqr(t1 - t2));
float s = (t1 + t2 - r) / 2;
if (s >= t1 && s >= t2)
sol = s;
else
{
s += r;
if (s >= t1 && s >= t2)
sol = s;
}
}
else
sol = 1 + t1;
}
else if (y2 >=0 && y2 < flag_.rows && x2 >= 0 && x2 < flag_.cols && flag_(y2,x2) == KNOWN)
sol = 1 + dist_(y2,x1);
return sol;
}
void FastMarchingMethod::heapUp(int idx)
{
int p = (idx-1)/2;
while (idx > 0 && narrowBand_[idx] < narrowBand_[p])
{
std::swap(indexOf(narrowBand_[p]), indexOf(narrowBand_[idx]));
std::swap(narrowBand_[p], narrowBand_[idx]);
idx = p;
p = (idx-1)/2;
}
}
void FastMarchingMethod::heapDown(int idx)
{
int l, r, smallest;
while (true)
{
l = 2*idx+1;
r = 2*idx+2;
smallest = idx;
if (l < size_ && narrowBand_[l] < narrowBand_[smallest]) smallest = l;
if (r < size_ && narrowBand_[r] < narrowBand_[smallest]) smallest = r;
if (smallest == idx) break;
else
{
std::swap(indexOf(narrowBand_[idx]), indexOf(narrowBand_[smallest]));
std::swap(narrowBand_[idx], narrowBand_[smallest]);
idx = smallest;
}
}
}
void FastMarchingMethod::heapAdd(const DXY &dxy)
{
if (static_cast<int>(narrowBand_.size()) < size_ + 1)
narrowBand_.resize(size_*2 + 1);
narrowBand_[size_] = dxy;
indexOf(dxy) = size_++;
heapUp(size_-1);
}
void FastMarchingMethod::heapRemoveMin()
{
if (size_ > 0)
{
size_--;
std::swap(indexOf(narrowBand_[0]), indexOf(narrowBand_[size_]));
std::swap(narrowBand_[0], narrowBand_[size_]);
heapDown(0);
}
}
} // namespace videostab
} // namespace cv
#include "precomp.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/videostab/global_motion.hpp"
using namespace std;
namespace cv
{
namespace videostab
{
static Mat estimateGlobMotionLeastSquaresTranslation(
int npoints, const Point2f *points0, const Point2f *points1, float *rmse)
{
Mat_<float> M = Mat::eye(3, 3, CV_32F);
for (int i = 0; i < npoints; ++i)
{
M(0,2) += points1[i].x - points0[i].x;
M(1,2) += points1[i].y - points0[i].y;
}
M(0,2) /= npoints;
M(1,2) /= npoints;
if (rmse)
{
*rmse = 0;
for (int i = 0; i < npoints; ++i)
*rmse += sqr(points1[i].x - points0[i].x - M(0,2)) +
sqr(points1[i].y - points0[i].y - M(1,2));
*rmse = sqrt(*rmse / npoints);
}
return M;
}
static Mat estimateGlobMotionLeastSquaresTranslationAndScale(
int npoints, const Point2f *points0, const Point2f *points1, float *rmse)
{
Mat_<float> A(2*npoints, 3), b(2*npoints, 1);
float *a0, *a1;
Point2f p0, p1;
for (int i = 0; i < npoints; ++i)
{
a0 = A[2*i];
a1 = A[2*i+1];
p0 = points0[i];
p1 = points1[i];
a0[0] = p0.x; a0[1] = 1; a0[2] = 0;
a1[0] = p0.y; a1[1] = 0; a1[2] = 1;
b(2*i,0) = p1.x;
b(2*i+1,0) = p1.y;
}
Mat_<float> sol;
solve(A, b, sol, DECOMP_SVD);
if (rmse)
*rmse = norm(A*sol, b, NORM_L2) / sqrt(npoints);
Mat_<float> M = Mat::eye(3, 3, CV_32F);
M(0,0) = M(1,1) = sol(0,0);
M(0,2) = sol(1,0);
M(1,2) = sol(2,0);
return M;
}
static Mat estimateGlobMotionLeastSquaresAffine(
int npoints, const Point2f *points0, const Point2f *points1, float *rmse)
{
Mat_<float> A(2*npoints, 6), b(2*npoints, 1);
float *a0, *a1;
Point2f p0, p1;
for (int i = 0; i < npoints; ++i)
{
a0 = A[2*i];
a1 = A[2*i+1];
p0 = points0[i];
p1 = points1[i];
a0[0] = p0.x; a0[1] = p0.y; a0[2] = 1; a0[3] = a0[4] = a0[5] = 0;
a1[0] = a1[1] = a1[2] = 0; a1[3] = p0.x; a1[4] = p0.y; a1[5] = 1;
b(2*i,0) = p1.x;
b(2*i+1,0) = p1.y;
}
Mat_<float> sol;
solve(A, b, sol, DECOMP_SVD);
if (rmse)
*rmse = norm(A*sol, b, NORM_L2) / sqrt(npoints);
Mat_<float> M = Mat::eye(3, 3, CV_32F);
for (int i = 0, k = 0; i < 2; ++i)
for (int j = 0; j < 3; ++j, ++k)
M(i,j) = sol(k,0);
return M;
}
Mat estimateGlobalMotionLeastSquares(
const vector<Point2f> &points0, const vector<Point2f> &points1, int model, float *rmse)
{
CV_Assert(points0.size() == points1.size());
typedef Mat (*Impl)(int, const Point2f*, const Point2f*, float*);
static Impl impls[] = { estimateGlobMotionLeastSquaresTranslation,
estimateGlobMotionLeastSquaresTranslationAndScale,
estimateGlobMotionLeastSquaresAffine };
int npoints = static_cast<int>(points0.size());
return impls[model](npoints, &points0[0], &points1[0], rmse);
}
Mat estimateGlobalMotionRobust(
const vector<Point2f> &points0, const vector<Point2f> &points1, int model, const RansacParams &params,
float *rmse, int *ninliers)
{
CV_Assert(points0.size() == points1.size());
typedef Mat (*Impl)(int, const Point2f*, const Point2f*, float*);
static Impl impls[] = { estimateGlobMotionLeastSquaresTranslation,
estimateGlobMotionLeastSquaresTranslationAndScale,
estimateGlobMotionLeastSquaresAffine };
const int npoints = static_cast<int>(points0.size());
const int niters = static_cast<int>(ceil(log(1 - params.prob) / log(1 - pow(1 - params.eps, params.size))));
RNG rng(0);
vector<int> indices(params.size);
vector<Point2f> subset0(params.size), subset1(params.size);
vector<Point2f> subset0best(params.size), subset1best(params.size);
Mat_<float> bestM;
int ninliersMax = -1;
Point2f p0, p1;
float x, y;
for (int iter = 0; iter < niters; ++iter)
{
for (int i = 0; i < params.size; ++i)
{
bool ok = false;
while (!ok)
{
ok = true;
indices[i] = static_cast<unsigned>(rng) % npoints;
for (int j = 0; j < i; ++j)
if (indices[i] == indices[j])
{ ok = false; break; }
}
}
for (int i = 0; i < params.size; ++i)
{
subset0[i] = points0[indices[i]];
subset1[i] = points1[indices[i]];
}
Mat_<float> M = impls[model](params.size, &subset0[0], &subset1[0], 0);
int ninliers = 0;
for (int i = 0; i < npoints; ++i)
{
p0 = points0[i]; p1 = points1[i];
x = M(0,0)*p0.x + M(0,1)*p0.y + M(0,2);
y = M(1,0)*p0.x + M(1,1)*p0.y + M(1,2);
if (sqr(x - p1.x) + sqr(y - p1.y) < params.thresh * params.thresh)
ninliers++;
}
if (ninliers >= ninliersMax)
{
bestM = M;
ninliersMax = ninliers;
subset0best.swap(subset0);
subset1best.swap(subset1);
}
}
if (ninliersMax < params.size)
// compute rmse
bestM = impls[model](params.size, &subset0best[0], &subset1best[0], rmse);
else
{
subset0.resize(ninliersMax);
subset1.resize(ninliersMax);
for (int i = 0, j = 0; i < npoints; ++i)
{
p0 = points0[i]; p1 = points1[i];
x = bestM(0,0)*p0.x + bestM(0,1)*p0.y + bestM(0,2);
y = bestM(1,0)*p0.x + bestM(1,1)*p0.y + bestM(1,2);
if (sqr(x - p1.x) + sqr(y - p1.y) < params.thresh * params.thresh)
{
subset0[j] = p0;
subset1[j] = p1;
j++;
}
}
bestM = impls[model](ninliersMax, &subset0[0], &subset1[0], rmse);
}
if (ninliers)
*ninliers = ninliersMax;
return bestM;
}
PyrLkRobustMotionEstimator::PyrLkRobustMotionEstimator() : ransacParams_(RansacParams::affine2dMotionStd())
{
setDetector(new GoodFeaturesToTrackDetector());
setOptFlowEstimator(new SparsePyrLkOptFlowEstimator());
setMotionModel(AFFINE);
setMaxRmse(0.5f);
setMinInlierRatio(0.1f);
}
Mat PyrLkRobustMotionEstimator::estimate(const Mat &frame0, const Mat &frame1)
{
detector_->detect(frame0, keypointsPrev_);
pointsPrev_.resize(keypointsPrev_.size());
for (size_t i = 0; i < keypointsPrev_.size(); ++i)
pointsPrev_[i] = keypointsPrev_[i].pt;
optFlowEstimator_->run(frame0, frame1, pointsPrev_, points_, status_, noArray());
size_t npoints = points_.size();
pointsPrevGood_.clear();
pointsPrevGood_.reserve(npoints);
pointsGood_.clear();
pointsGood_.reserve(npoints);
for (size_t i = 0; i < npoints; ++i)
{
if (status_[i])
{
pointsPrevGood_.push_back(pointsPrev_[i]);
pointsGood_.push_back(points_[i]);
}
}
float rmse;
int ninliers;
Mat M = estimateGlobalMotionRobust(
pointsPrevGood_, pointsGood_, motionModel_, ransacParams_, &rmse, &ninliers);
if (rmse > maxRmse_ || static_cast<float>(ninliers) / pointsGood_.size() < minInlierRatio_)
M = Mat::eye(3, 3, CV_32F);
return M;
}
Mat getMotion(int from, int to, const vector<Mat> &motions)
{
Mat M = Mat::eye(3, 3, CV_32F);
if (to > from)
{
for (int i = from; i < to; ++i)
M = at(i, motions) * M;
}
else if (from > to)
{
for (int i = to; i < from; ++i)
M = at(i, motions) * M;
M = M.inv();
}
return M;
}
static inline int areaSign(Point2f a, Point2f b, Point2f c)
{
float area = (b-a).cross(c-a);
if (area < -1e-5f) return -1;
if (area > 1e-5f) return 1;
return 0;
}
static inline bool segmentsIntersect(Point2f a, Point2f b, Point2f c, Point2f d)
{
return areaSign(a,b,c) * areaSign(a,b,d) < 0 &&
areaSign(c,d,a) * areaSign(c,d,b) < 0;
}
// Checks if rect a (with sides parallel to axis) is inside rect b (arbitrary).
// Rects must be passed in the [(0,0), (w,0), (w,h), (0,h)] order.
static inline bool isRectInside(const Point2f a[4], const Point2f b[4])
{
for (int i = 0; i < 4; ++i)
if (b[i].x > a[0].x && b[i].x < a[2].x && b[i].y > a[0].y && b[i].y < a[2].y)
return false;
for (int i = 0; i < 4; ++i)
for (int j = 0; j < 4; ++j)
if (segmentsIntersect(a[i], a[(i+1)%4], b[j], b[(j+1)%4]))
return false;
return true;
}
static inline bool isGoodMotion(const float M[], float w, float h, float dx, float dy)
{
Point2f pt[4] = {Point2f(0,0), Point2f(w,0), Point2f(w,h), Point2f(0,h)};
Point2f Mpt[4];
for (int i = 0; i < 4; ++i)
{
Mpt[i].x = M[0]*pt[i].x + M[1]*pt[i].y + M[2];
Mpt[i].x = M[3]*pt[i].x + M[4]*pt[i].y + M[5];
}
pt[0] = Point2f(dx, dy);
pt[1] = Point2f(w - dx, dy);
pt[2] = Point2f(w - dx, h - dy);
pt[3] = Point2f(dx, h - dy);
return isRectInside(pt, Mpt);
}
static inline void relaxMotion(const float M[], float t, float res[])
{
res[0] = M[0]*(1.f-t) + t;
res[1] = M[1]*(1.f-t);
res[2] = M[2]*(1.f-t);
res[3] = M[3]*(1.f-t);
res[4] = M[4]*(1.f-t) + t;
res[5] = M[5]*(1.f-t);
}
Mat ensureInclusionConstraint(const Mat &M, Size size, float trimRatio)
{
CV_Assert(M.size() == Size(3,3) && M.type() == CV_32F);
const float w = size.width;
const float h = size.height;
const float dx = floor(w * trimRatio);
const float dy = floor(h * trimRatio);
const float srcM[6] =
{M.at<float>(0,0), M.at<float>(0,1), M.at<float>(0,2),
M.at<float>(1,0), M.at<float>(1,1), M.at<float>(1,2)};
float curM[6];
float t = 0;
relaxMotion(srcM, t, curM);
if (isGoodMotion(curM, w, h, dx, dy))
return M;
float l = 0, r = 1;
while (r - l > 1e-3f)
{
t = (l + r) * 0.5f;
relaxMotion(srcM, t, curM);
if (isGoodMotion(curM, w, h, dx, dy))
r = t;
else
l = t;
t = r;
relaxMotion(srcM, r, curM);
}
return (1 - r) * M + r * Mat::eye(3, 3, CV_32F);
}
// TODO can be estimated for O(1) time
float estimateOptimalTrimRatio(const Mat &M, Size size)
{
CV_Assert(M.size() == Size(3,3) && M.type() == CV_32F);
const float w = size.width;
const float h = size.height;
Mat_<float> M_(M);
Point2f pt[4] = {Point2f(0,0), Point2f(w,0), Point2f(w,h), Point2f(0,h)};
Point2f Mpt[4];
for (int i = 0; i < 4; ++i)
{
Mpt[i].x = M_(0,0)*pt[i].x + M_(0,1)*pt[i].y + M_(0,2);
Mpt[i].y = M_(1,0)*pt[i].x + M_(1,1)*pt[i].y + M_(1,2);
}
float l = 0, r = 0.5f;
while (r - l > 1e-3f)
{
float t = (l + r) * 0.5f;
float dx = floor(w * t);
float dy = floor(h * t);
pt[0] = Point2f(dx, dy);
pt[1] = Point2f(w - dx, dy);
pt[2] = Point2f(w - dx, h - dy);
pt[3] = Point2f(dx, h - dy);
if (isRectInside(pt, Mpt))
r = t;
else
l = t;
}
return r;
}
float alignementError(const Mat &M, const Mat &frame0, const Mat &mask0, const Mat &frame1)
{
CV_Assert(frame0.type() == CV_8UC3 && frame1.type() == CV_8UC3);
CV_Assert(mask0.type() == CV_8U && mask0.size() == frame0.size());
CV_Assert(frame0.size() == frame1.size());
CV_Assert(M.size() == Size(3,3) && M.type() == CV_32F);
Mat_<uchar> mask0_(mask0);
Mat_<float> M_(M);
float err = 0;
for (int y0 = 0; y0 < frame0.rows; ++y0)
{
for (int x0 = 0; x0 < frame0.cols; ++x0)
{
if (mask0_(y0,x0))
{
int x1 = cvRound(M_(0,0)*x0 + M_(0,1)*y0 + M_(0,2));
int y1 = cvRound(M_(1,0)*x0 + M_(1,1)*y0 + M_(1,2));
if (y1 >= 0 && y1 < frame1.rows && x1 >= 0 && x1 < frame1.cols)
err += std::abs(intensity(frame1.at<Point3_<uchar> >(y1,x1)) -
intensity(frame0.at<Point3_<uchar> >(y0,x0)));
}
}
}
return err;
}
} // namespace videostab
} // namespace cv
#include "precomp.hpp"
#include <queue>
#include "opencv2/videostab/inpainting.hpp"
#include "opencv2/videostab/global_motion.hpp"
#include "opencv2/videostab/fast_marching.hpp"
using namespace std;
namespace cv
{
namespace videostab
{
void InpaintingPipeline::setRadius(int val)
{
for (size_t i = 0; i < inpainters_.size(); ++i)
inpainters_[i]->setRadius(val);
IInpainter::setRadius(val);
}
void InpaintingPipeline::setFrames(const vector<Mat> &val)
{
for (size_t i = 0; i < inpainters_.size(); ++i)
inpainters_[i]->setFrames(val);
IInpainter::setFrames(val);
}
void InpaintingPipeline::setMotions(const vector<Mat> &val)
{
for (size_t i = 0; i < inpainters_.size(); ++i)
inpainters_[i]->setMotions(val);
IInpainter::setMotions(val);
}
void InpaintingPipeline::setStabilizedFrames(const vector<Mat> &val)
{
for (size_t i = 0; i < inpainters_.size(); ++i)
inpainters_[i]->setStabilizedFrames(val);
IInpainter::setStabilizedFrames(val);
}
void InpaintingPipeline::setStabilizationMotions(const vector<Mat> &val)
{
for (size_t i = 0; i < inpainters_.size(); ++i)
inpainters_[i]->setStabilizationMotions(val);
IInpainter::setStabilizationMotions(val);
}
void InpaintingPipeline::inpaint(int idx, Mat &frame, Mat &mask)
{
for (size_t i = 0; i < inpainters_.size(); ++i)
inpainters_[i]->inpaint(idx, frame, mask);
}
struct Pixel3
{
float intens;
Point3_<uchar> color;
bool operator <(const Pixel3 &other) const { return intens < other.intens; }
};
ConsistentMosaicInpainter::ConsistentMosaicInpainter()
{
setStdevThresh(10);
}
void ConsistentMosaicInpainter::inpaint(int idx, Mat &frame, Mat &mask)
{
CV_Assert(frame.type() == CV_8UC3);
CV_Assert(mask.size() == frame.size() && mask.type() == CV_8U);
Mat invS = at(idx, *stabilizationMotions_).inv();
vector<Mat_<float> > motions(2*radius_ + 1);
for (int i = -radius_; i <= radius_; ++i)
motions[radius_ + i] = getMotion(idx, idx + i, *motions_) * invS;
int n;
float mean, var;
vector<Pixel3> pixels(2*radius_ + 1);
Mat_<Point3_<uchar> > frame_(frame);
Mat_<uchar> mask_(mask);
for (int y = 0; y < mask.rows; ++y)
{
for (int x = 0; x < mask.cols; ++x)
{
if (!mask_(y, x))
{
n = 0;
mean = 0;
var = 0;
for (int i = -radius_; i <= radius_; ++i)
{
const Mat_<Point3_<uchar> > &framei = at(idx + i, *frames_);
const Mat_<float> &Mi = motions[radius_ + i];
int xi = cvRound(Mi(0,0)*x + Mi(0,1)*y + Mi(0,2));
int yi = cvRound(Mi(1,0)*x + Mi(1,1)*y + Mi(1,2));
if (xi >= 0 && xi < framei.cols && yi >= 0 && yi < framei.rows)
{
pixels[n].color = framei(yi, xi);
mean += pixels[n].intens = intensity(pixels[n].color);
n++;
}
}
if (n > 0)
{
mean /= n;
for (int i = 0; i < n; ++i)
var += sqr(pixels[i].intens - mean);
var /= std::max(n - 1, 1);
if (var < stdevThresh_ * stdevThresh_)
{
sort(pixels.begin(), pixels.begin() + n);
int nh = (n-1)/2;
int c1 = pixels[nh].color.x;
int c2 = pixels[nh].color.y;
int c3 = pixels[nh].color.z;
if (n-2*nh)
{
c1 = (c1 + pixels[nh].color.x) / 2;
c2 = (c2 + pixels[nh].color.y) / 2;
c3 = (c3 + pixels[nh].color.z) / 2;
}
frame_(y, x) = Point3_<uchar>(c1, c2, c3);
mask_(y, x) = 255;
}
}
}
}
}
}
class MotionInpaintBody
{
public:
void operator ()(int x, int y)
{
float uEst = 0.f, vEst = 0.f;
float wSum = 0.f;
for (int dy = -rad; dy <= rad; ++dy)
{
for (int dx = -rad; dx <= rad; ++dx)
{
int qx0 = x + dx;
int qy0 = y + dy;
if (qy0 > 0 && qy0+1 < mask0.rows && qx0 > 0 && qx0+1 < mask0.cols && mask0(qy0,qx0) &&
mask0(qy0-1,qx0) && mask0(qy0+1,qx0) && mask0(qy0,qx0-1) && mask0(qy0,qx0+1))
{
float dudx = 0.5f * (flowX(qy0,qx0+1) - flowX(qy0,qx0-1));
float dvdx = 0.5f * (flowY(qy0,qx0+1) - flowY(qy0,qx0-1));
float dudy = 0.5f * (flowX(qy0+1,qx0) - flowX(qy0-1,qx0));
float dvdy = 0.5f * (flowY(qy0+1,qx0) - flowY(qy0-1,qx0));
int qx1 = cvRound(qx0 + flowX(qy0,qx0));
int qy1 = cvRound(qy0 + flowY(qy0,qx0));
int px1 = qx1 - dx;
int py1 = qy1 - dy;
if (qx1 >= 0 && qx1 < mask1.cols && qy1 >= 0 && qy1 < mask1.rows && mask1(qy1,qx1) &&
px1 >= 0 && px1 < mask1.cols && py1 >= 0 && py1 < mask1.rows && mask1(py1,px1))
{
Point3_<uchar> cp = frame1(py1,px1), cq = frame1(qy1,qx1);
float distColor = sqr(cp.x-cq.x) + sqr(cp.y-cq.y) + sqr(cp.z-cq.z);
float w = 1.f / (sqrt(distColor * (dx*dx + dy*dy)) + eps);
uEst += w * (flowX(qy0,qx0) - dudx*dx - dudy*dy);
vEst += w * (flowY(qy0,qx0) - dvdx*dx - dvdy*dy);
wSum += w;
}
//else return;
}
}
}
if (wSum > 0.f)
{
flowX(y,x) = uEst / wSum;
flowY(y,x) = vEst / wSum;
mask0(y,x) = 255;
}
}
Mat_<Point3_<uchar> > frame1;
Mat_<uchar> mask0, mask1;
Mat_<float> flowX, flowY;
float eps;
int rad;
};
MotionInpainter::MotionInpainter()
{
setOptFlowEstimator(new DensePyrLkOptFlowEstimatorGpu());
setFlowErrorThreshold(1e-4f);
setBorderMode(BORDER_REPLICATE);
}
void MotionInpainter::inpaint(int idx, Mat &frame, Mat &mask)
{
priority_queue<pair<float,int> > neighbors;
vector<Mat> motions(2*radius_ + 1);
for (int i = -radius_; i <= radius_; ++i)
{
Mat motion0to1 = getMotion(idx, idx + i, *motions_) * at(idx, *stabilizationMotions_).inv();
motions[radius_ + i] = motion0to1;
if (i != 0)
{
float err = alignementError(motion0to1, frame, mask, at(idx + i, *frames_));
neighbors.push(make_pair(-err, idx + i));
}
}
if (mask1_.size() != mask.size())
{
mask1_.create(mask.size());
mask1_.setTo(255);
}
cvtColor(frame, grayFrame_, CV_BGR2GRAY);
MotionInpaintBody body;
body.rad = 2;
body.eps = 1e-4f;
while (!neighbors.empty())
{
int neighbor = neighbors.top().second;
neighbors.pop();
Mat motion1to0 = motions[radius_ + neighbor - idx].inv();
frame1_ = at(neighbor, *frames_);
warpAffine(
frame1_, transformedFrame1_, motion1to0(Rect(0,0,3,2)), frame1_.size(),
INTER_LINEAR, borderMode_);
cvtColor(transformedFrame1_, transformedGrayFrame1_, CV_BGR2GRAY);
warpAffine(
mask1_, transformedMask1_, motion1to0(Rect(0,0,3,2)), mask1_.size(),
INTER_NEAREST);
erode(transformedMask1_, transformedMask1_, Mat());
optFlowEstimator_->run(grayFrame_, transformedGrayFrame1_, flowX_, flowY_, flowErrors_);
calcFlowMask(
flowX_, flowY_, flowErrors_, flowErrorThreshold_, mask, transformedMask1_,
flowMask_);
body.flowX = flowX_;
body.flowY = flowY_;
body.mask0 = flowMask_;
body.mask1 = transformedMask1_;
body.frame1 = transformedFrame1_;
fmm_.run(flowMask_, body);
completeFrameAccordingToFlow(
flowMask_, flowX_, flowY_, transformedFrame1_, transformedMask1_, frame, mask);
}
}
class ColorAverageInpaintBody
{
public:
void operator ()(int x, int y)
{
float c1 = 0, c2 = 0, c3 = 0;
float wSum = 0;
static const int lut[8][2] = {{-1,-1}, {-1,0}, {-1,1}, {0,-1}, {0,1}, {1,-1}, {1,0}, {1,1}};
for (int i = 0; i < 8; ++i)
{
int qx = x + lut[i][0];
int qy = y + lut[i][1];
if (qy >= 0 && qy < mask.rows && qx >= 0 && qx < mask.cols && mask(qy,qx))
{
c1 += frame.at<uchar>(qy,3*qx);
c2 += frame.at<uchar>(qy,3*qx+1);
c3 += frame.at<uchar>(qy,3*qx+2);
wSum += 1;
}
}
float wSumInv = 1.f / wSum;
frame(y,x) = Point3_<uchar>(c1*wSumInv, c2*wSumInv, c3*wSumInv);
mask(y,x) = 255;
}
cv::Mat_<uchar> mask;
cv::Mat_<cv::Point3_<uchar> > frame;
};
void ColorAverageInpainter::inpaint(int /*idx*/, Mat &frame, Mat &mask)
{
ColorAverageInpaintBody body;
body.mask = mask;
body.frame = frame;
fmm_.run(mask, body);
}
void calcFlowMask(
const Mat &flowX, const Mat &flowY, const Mat &errors, float maxError,
const Mat &mask0, const Mat &mask1, Mat &flowMask)
{
CV_Assert(flowX.type() == CV_32F && flowX.size() == mask0.size());
CV_Assert(flowY.type() == CV_32F && flowY.size() == mask0.size());
CV_Assert(errors.type() == CV_32F && errors.size() == mask0.size());
CV_Assert(mask0.type() == CV_8U);
CV_Assert(mask1.type() == CV_8U && mask1.size() == mask0.size());
Mat_<float> flowX_(flowX), flowY_(flowY), errors_(errors);
Mat_<uchar> mask0_(mask0), mask1_(mask1);
flowMask.create(mask0.size(), CV_8U);
flowMask.setTo(0);
Mat_<uchar> flowMask_(flowMask);
for (int y0 = 0; y0 < flowMask_.rows; ++y0)
{
for (int x0 = 0; x0 < flowMask_.cols; ++x0)
{
if (mask0_(y0,x0) && errors_(y0,x0) < maxError)
{
int x1 = cvRound(x0 + flowX_(y0,x0));
int y1 = cvRound(y0 + flowY_(y0,x0));
if (x1 >= 0 && x1 < mask1_.cols && y1 >= 0 && y1 < mask1_.rows && mask1_(y1,x1))
flowMask_(y0,x0) = 255;
}
}
}
}
void completeFrameAccordingToFlow(
const Mat &flowMask, const Mat &flowX, const Mat &flowY, const Mat &frame1, const Mat &mask1,
Mat &frame0, Mat &mask0)
{
CV_Assert(flowMask.type() == CV_8U);
CV_Assert(flowX.type() == CV_32F && flowX.size() == flowMask.size());
CV_Assert(flowY.type() == CV_32F && flowY.size() == flowMask.size());
CV_Assert(frame1.type() == CV_8UC3 && frame1.size() == flowMask.size());
CV_Assert(mask1.type() == CV_8U && mask1.size() == flowMask.size());
CV_Assert(frame0.type() == CV_8UC3 && frame0.size() == flowMask.size());
CV_Assert(mask0.type() == CV_8U && mask0.size() == flowMask.size());
Mat_<uchar> flowMask_(flowMask), mask1_(mask1), mask0_(mask0);
Mat_<float> flowX_(flowX), flowY_(flowY);
for (int y0 = 0; y0 < frame0.rows; ++y0)
{
for (int x0 = 0; x0 < frame0.cols; ++x0)
{
if (!mask0_(y0,x0) && flowMask_(y0,x0))
{
int x1 = cvRound(x0 + flowX_(y0,x0));
int y1 = cvRound(y0 + flowY_(y0,x0));
if (x1 >= 0 && x1 < frame1.cols && y1 >= 0 && y1 < frame1.rows && mask1_(y1,x1))
{
frame0.at<Point3_<uchar> >(y0,x0) = frame1.at<Point3_<uchar> >(y1,x1);
mask0_(y0,x0) = 255;
}
}
}
}
}
} // namespace videostab
} // namespace cv
#include "precomp.hpp"
#include <cstdio>
#include <cstdarg>
#include "opencv2/videostab/log.hpp"
using namespace std;
namespace cv
{
namespace videostab
{
void LogToStdout::print(const char *format, ...)
{
va_list args;
va_start(args, format);
vprintf(format, args);
fflush(stdout);
va_end(args);
}
} // namespace videostab
} // namespace cv
#include "precomp.hpp"
#include "opencv2/videostab/motion_filtering.hpp"
#include "opencv2/videostab/global_motion.hpp"
using namespace std;
namespace cv
{
namespace videostab
{
GaussianMotionFilter::GaussianMotionFilter(int radius, float stdev) : radius_(radius)
{
float sum = 0;
weight_.resize(2*radius_ + 1);
for (int i = -radius_; i <= radius_; ++i)
sum += weight_[radius_ + i] = std::exp(-i*i/(stdev*stdev));
for (int i = -radius_; i <= radius_; ++i)
weight_[radius_ + i] /= sum;
}
Mat GaussianMotionFilter::apply(int idx, vector<Mat> &motions) const
{
const Mat &cur = at(idx, motions);
Mat res = Mat::zeros(cur.size(), cur.type());
for (int i = -radius_; i <= radius_; ++i)
res += weight_[radius_ + i] * getMotion(idx, idx + i, motions);
return res;
}
} // namespace videostab
} // namespace cv
#include "precomp.hpp"
#include "opencv2/gpu/gpu.hpp"
#include "opencv2/video/video.hpp"
#include "opencv2/videostab/optical_flow.hpp"
using namespace std;
namespace cv
{
namespace videostab
{
void SparsePyrLkOptFlowEstimator::run(
InputArray frame0, InputArray frame1, InputArray points0, InputOutputArray points1,
OutputArray status, OutputArray errors)
{
calcOpticalFlowPyrLK(frame0, frame1, points0, points1, status, errors, winSize_, maxLevel_);
}
DensePyrLkOptFlowEstimatorGpu::DensePyrLkOptFlowEstimatorGpu()
{
CV_Assert(gpu::getCudaEnabledDeviceCount() > 0);
}
void DensePyrLkOptFlowEstimatorGpu::run(
InputArray frame0, InputArray frame1, InputOutputArray flowX, InputOutputArray flowY,
OutputArray errors)
{
frame0_.upload(frame0.getMat());
frame1_.upload(frame1.getMat());
optFlowEstimator_.winSize = winSize_;
optFlowEstimator_.maxLevel = maxLevel_;
if (errors.needed())
{
optFlowEstimator_.dense(frame0_, frame1_, flowX_, flowY_, &errors_);
errors_.download(errors.getMatRef());
}
else
optFlowEstimator_.dense(frame0_, frame1_, flowX_, flowY_);
flowX_.download(flowX.getMatRef());
flowY_.download(flowY.getMatRef());
}
} // namespace videostab
} // namespace cv
#ifndef __OPENCV_PRECOMP_HPP__
#define __OPENCV_PRECOMP_HPP__
#ifdef HAVE_CVCONFIG_H
#include "cvconfig.h"
#endif
#include <stdexcept>
#include <iostream>
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/video/video.hpp"
#include "opencv2/features2d/features2d.hpp"
inline float sqr(float x) { return x * x; }
inline float intensity(const cv::Point3_<uchar> &bgr)
{
return 0.3f*bgr.x + 0.59f*bgr.y + 0.11f*bgr.z;
}
template <typename T> inline T& at(int index, std::vector<T> &items)
{
return items[cv::borderInterpolate(index, items.size(), cv::BORDER_WRAP)];
}
template <typename T> inline const T& at(int index, const std::vector<T> &items)
{
return items[cv::borderInterpolate(index, items.size(), cv::BORDER_WRAP)];
}
#endif
#include "precomp.hpp"
#include "opencv2/videostab/stabilizer.hpp"
using namespace std;
namespace cv
{
namespace videostab
{
Stabilizer::Stabilizer()
{
setFrameSource(new NullFrameSource());
setMotionEstimator(new PyrLkRobustMotionEstimator());
setMotionFilter(new GaussianMotionFilter(15, sqrt(15)));
setDeblurer(new NullDeblurer());
setInpainter(new NullInpainter());
setEstimateTrimRatio(true);
setTrimRatio(0);
setInclusionConstraint(false);
setBorderMode(BORDER_REPLICATE);
setLog(new NullLog());
}
void Stabilizer::reset()
{
radius_ = 0;
curPos_ = -1;
curStabilizedPos_ = -1;
auxPassWasDone_ = false;
frames_.clear();
motions_.clear();
stabilizedFrames_.clear();
stabilizationMotions_.clear();
doDeblurring_ = false;
doInpainting_ = false;
}
Mat Stabilizer::nextFrame()
{
if (mustEstimateTrimRatio_ && !auxPassWasDone_)
{
estimateMotionsAndTrimRatio();
auxPassWasDone_ = true;
frameSource_->reset();
}
if (curStabilizedPos_ == curPos_ && curStabilizedPos_ != -1)
return Mat(); // we've processed all frames already
bool processed;
do {
processed = processNextFrame();
} while (processed && curStabilizedPos_ == -1);
if (curStabilizedPos_ == -1)
return Mat(); // frame source is empty
const Mat &stabilizedFrame = at(curStabilizedPos_, stabilizedFrames_);
int dx = floor(trimRatio_ * stabilizedFrame.cols);
int dy = floor(trimRatio_ * stabilizedFrame.rows);
return stabilizedFrame(Rect(dx, dy, stabilizedFrame.cols - 2*dx, stabilizedFrame.rows - 2*dy));
}
void Stabilizer::estimateMotionsAndTrimRatio()
{
log_->print("estimating motions and trim ratio");
Size size;
Mat prevFrame, frame;
int frameCount = 0;
while (!(frame = frameSource_->nextFrame()).empty())
{
if (frameCount > 0)
motions_.push_back(motionEstimator_->estimate(prevFrame, frame));
else
size = frame.size();
prevFrame = frame;
frameCount++;
log_->print(".");
}
radius_ = motionFilter_->radius();
for (int i = 0; i < radius_; ++i)
motions_.push_back(Mat::eye(3, 3, CV_32F));
log_->print("\n");
trimRatio_ = 0;
for (int i = 0; i < frameCount; ++i)
{
Mat S = motionFilter_->apply(i, motions_);
trimRatio_ = std::max(trimRatio_, estimateOptimalTrimRatio(S, size));
stabilizationMotions_.push_back(S);
}
log_->print("estimated trim ratio: %f\n", static_cast<double>(trimRatio_));
}
void Stabilizer::processFirstFrame(Mat &frame)
{
log_->print("processing frames");
frameSize_ = frame.size();
frameMask_.create(frameSize_, CV_8U);
frameMask_.setTo(255);
radius_ = motionFilter_->radius();
int cacheSize = 2*radius_ + 1;
frames_.resize(cacheSize);
stabilizedFrames_.resize(cacheSize);
stabilizedMasks_.resize(cacheSize);
if (!auxPassWasDone_)
{
motions_.resize(cacheSize);
stabilizationMotions_.resize(cacheSize);
}
for (int i = -radius_; i < 0; ++i)
{
at(i, motions_) = Mat::eye(3, 3, CV_32F);
at(i, frames_) = frame;
}
at(0, frames_) = frame;
IInpainter *inpainter = static_cast<IInpainter*>(inpainter_);
doInpainting_ = dynamic_cast<NullInpainter*>(inpainter) == 0;
if (doInpainting_)
{
inpainter_->setRadius(radius_);
inpainter_->setFrames(frames_);
inpainter_->setMotions(motions_);
inpainter_->setStabilizedFrames(stabilizedFrames_);
inpainter_->setStabilizationMotions(stabilizationMotions_);
}
IDeblurer *deblurer = static_cast<IDeblurer*>(deblurer_);
doDeblurring_ = dynamic_cast<NullDeblurer*>(deblurer) == 0;
if (doDeblurring_)
{
blurrinessRates_.resize(cacheSize);
float blurriness = calcBlurriness(frame);
for (int i = -radius_; i <= 0; ++i)
at(i, blurrinessRates_) = blurriness;
deblurer_->setRadius(radius_);
deblurer_->setFrames(frames_);
deblurer_->setMotions(motions_);
deblurer_->setBlurrinessRates(blurrinessRates_);
}
}
bool Stabilizer::processNextFrame()
{
Mat frame = frameSource_->nextFrame();
if (!frame.empty())
{
curPos_++;
if (curPos_ > 0)
{
at(curPos_, frames_) = frame;
if (doDeblurring_)
at(curPos_, blurrinessRates_) = calcBlurriness(frame);
if (!auxPassWasDone_)
{
Mat motionPrevToCur = motionEstimator_->estimate(
at(curPos_ - 1, frames_), at(curPos_, frames_));
at(curPos_ - 1, motions_) = motionPrevToCur;
}
if (curPos_ >= radius_)
{
curStabilizedPos_ = curPos_ - radius_;
stabilizeFrame(curStabilizedPos_);
}
}
else
processFirstFrame(frame);
log_->print(".");
return true;
}
if (curStabilizedPos_ < curPos_)
{
curStabilizedPos_++;
at(curStabilizedPos_ + radius_, frames_) = at(curPos_, frames_);
at(curStabilizedPos_ + radius_ - 1, motions_) = at(curPos_ - 1, motions_);
stabilizeFrame(curStabilizedPos_);
log_->print(".");
return true;
}
return false;
}
void Stabilizer::stabilizeFrame(int idx)
{
Mat stabMotion;
if (!auxPassWasDone_)
stabMotion = motionFilter_->apply(idx, motions_);
else
stabMotion = at(idx, stabilizationMotions_);
if (inclusionConstraint_ && !mustEstimateTrimRatio_)
stabMotion = ensureInclusionConstraint(stabMotion, frameSize_, trimRatio_);
at(idx, stabilizationMotions_) = stabMotion;
if (doDeblurring_)
{
at(idx, frames_).copyTo(preProcessedFrame_);
deblurer_->deblur(idx, preProcessedFrame_);
}
else
preProcessedFrame_ = at(idx, frames_);
// apply stabilization transformation
warpAffine(
preProcessedFrame_, at(idx, stabilizedFrames_), stabMotion(Rect(0,0,3,2)),
frameSize_, INTER_LINEAR, borderMode_);
if (doInpainting_)
{
warpAffine(
frameMask_, at(idx, stabilizedMasks_), stabMotion(Rect(0,0,3,2)), frameSize_,
INTER_NEAREST);
erode(at(idx, stabilizedMasks_), at(idx, stabilizedMasks_), Mat());
at(idx, stabilizedMasks_).copyTo(inpaintingMask_);
inpainter_->inpaint(idx, at(idx, stabilizedFrames_), inpaintingMask_);
}
}
} // namespace videostab
} // namespace cv
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
SET(OPENCV_CPP_SAMPLES_REQUIRED_DEPS opencv_core opencv_flann opencv_imgproc SET(OPENCV_CPP_SAMPLES_REQUIRED_DEPS opencv_core opencv_flann opencv_imgproc
opencv_highgui opencv_ml opencv_video opencv_objdetect opencv_photo opencv_nonfree opencv_highgui opencv_ml opencv_video opencv_objdetect opencv_photo opencv_nonfree
opencv_features2d opencv_calib3d opencv_legacy opencv_contrib opencv_stitching) opencv_features2d opencv_calib3d opencv_legacy opencv_contrib opencv_stitching opencv_videostab)
ocv_check_dependencies(${OPENCV_CPP_SAMPLES_REQUIRED_DEPS}) ocv_check_dependencies(${OPENCV_CPP_SAMPLES_REQUIRED_DEPS})
......
#include <string>
#include <iostream>
#include <sstream>
#include <stdexcept>
#include "opencv2/core/core.hpp"
#include "opencv2/video/video.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/videostab/videostab.hpp"
using namespace std;
using namespace cv;
using namespace cv::videostab;
Ptr<Stabilizer> stabilizer;
double outputFps;
string outputPath;
void run();
void printHelp();
void run()
{
VideoWriter writer;
Mat stabilizedFrame;
while (!(stabilizedFrame = stabilizer->nextFrame()).empty())
{
if (!outputPath.empty())
{
if (!writer.isOpened())
writer.open(outputPath, CV_FOURCC('X','V','I','D'), outputFps, stabilizedFrame.size());
writer << stabilizedFrame;
}
imshow("stabilizedFrame", stabilizedFrame);
char key = static_cast<char>(waitKey(3));
if (key == 27)
break;
}
cout << "\nfinished\n";
}
void printHelp()
{
cout << "OpenCV video stabilizer.\n"
"Usage: videostab <file_path> [arguments]\n\n"
"Arguments:\n"
" -m, --model=(transl|transl_and_scale|affine)\n"
" Set motion model. The default is affine.\n"
" --outlier-ratio=<float_number>\n"
" Outliers ratio in motion estimation. The default is 0.5.\n"
" --min-inlier-ratio=<float_number>\n"
" Minimum inlier ratio to decide if estiamted motion is OK. The default is 0.1,\n"
" but may want to increase it.\n"
" -r, --radius=<int_number>\n"
" Set smoothing radius. The default is 15.\n"
" --stdev=<float_number>\n"
" Set smoothing weights standard deviation. The default is sqrt(radius).\n"
" --deblur=(yes|no)\n"
" Do deblurring.\n"
" --deblur-sens=<float_number>\n"
" Set deblurring sensitivity (from 0 to +inf). The default is 0.1.\n"
" -t, --trim-ratio=<float_number>\n"
" Set trimming ratio (from 0 to 0.5). The default is 0.\n"
" --est-trim=(yes|no)\n"
" Estimate trim ratio automatically. The default is yes.\n"
" --incl-constr=(yes|no)\n"
" Ensure the inclusion constraint is always satisfied. The default is no.\n"
" --border-mode=(replicate|const)\n"
" Set border extrapolation mode. The default is replicate.\n"
" --mosaic=(yes|no)\n"
" Do consistent mosaicing. The default is no.\n"
" --mosaic-stdev=<float_number>\n"
" Consistent mosaicing stdev threshold. The default is 10.\n"
" --motion-inpaint=(yes|no)\n"
" Do motion inpainting (requires GPU support). The default is no.\n"
" --color-inpaint=(yes|no)\n"
" Do color inpainting. The defailt is no.\n"
" -o, --output=<file_path>\n"
" Set output file path explicitely. The default is stabilized.avi.\n"
" --fps=<int_number>\n"
" Set output video FPS explicitely. By default the source FPS is used.\n"
" -h, --help\n"
" Print help.\n"
"\n";
}
int main(int argc, const char **argv)
{
try
{
const char *keys =
"{ 1 | | | | }"
"{ m | model | | }"
"{ | min-inlier-ratio | | }"
"{ | outlier-ratio | | }"
"{ r | radius | | }"
"{ | stdev | | }"
"{ | deblur | | }"
"{ | deblur-sens | | }"
"{ | est-trim | | }"
"{ t | trim-ratio | | }"
"{ | incl-constr | | }"
"{ | border-mode | | }"
"{ | mosaic | | }"
"{ | mosaic-stdev | | }"
"{ | motion-inpaint | | }"
"{ | color-inpaint | | }"
"{ o | output | stabilized.avi | }"
"{ | fps | | }"
"{ h | help | false | }";
CommandLineParser cmd(argc, argv, keys);
// parse command arguments
if (cmd.get<bool>("help"))
{
printHelp();
return 0;
}
stabilizer = new Stabilizer();
string inputPath = cmd.get<string>("1");
if (inputPath.empty())
throw runtime_error("specify video file path");
VideoFileSource *frameSource = new VideoFileSource(inputPath);
outputFps = frameSource->fps();
stabilizer->setFrameSource(frameSource);
cout << "frame count: " << frameSource->frameCount() << endl;
PyrLkRobustMotionEstimator *motionEstimator = new PyrLkRobustMotionEstimator();
if (cmd.get<string>("model") == "transl")
motionEstimator->setMotionModel(TRANSLATION);
else if (cmd.get<string>("model") == "transl_and_scale")
motionEstimator->setMotionModel(TRANSLATION_AND_SCALE);
else if (cmd.get<string>("model") == "affine")
motionEstimator->setMotionModel(AFFINE);
else if (!cmd.get<string>("model").empty())
throw runtime_error("unknow motion mode: " + cmd.get<string>("model"));
if (!cmd.get<string>("outlier-ratio").empty())
{
RansacParams ransacParams = motionEstimator->ransacParams();
ransacParams.eps = atof(cmd.get<string>("outlier-ratio").c_str());
motionEstimator->setRansacParams(ransacParams);
}
if (!cmd.get<string>("min-inlier-ratio").empty())
motionEstimator->setMinInlierRatio(atof(cmd.get<string>("min-inlier-ratio").c_str()));
stabilizer->setMotionEstimator(motionEstimator);
int smoothRadius = -1;
float smoothStdev = -1;
if (!cmd.get<string>("radius").empty())
smoothRadius = atoi(cmd.get<string>("radius").c_str());
if (!cmd.get<string>("stdev").empty())
smoothStdev = atof(cmd.get<string>("stdev").c_str());
if (smoothRadius > 0 && smoothStdev > 0)
stabilizer->setMotionFilter(new GaussianMotionFilter(smoothRadius, smoothStdev));
else if (smoothRadius > 0 && smoothStdev < 0)
stabilizer->setMotionFilter(new GaussianMotionFilter(smoothRadius, sqrt(smoothRadius)));
if (cmd.get<string>("deblur") == "yes")
{
WeightingDeblurer *deblurer = new WeightingDeblurer();
if (!cmd.get<string>("deblur-sens").empty())
deblurer->setSensitivity(atof(cmd.get<string>("deblur-sens").c_str()));
stabilizer->setDeblurer(deblurer);
}
if (!cmd.get<string>("est-trim").empty())
stabilizer->setEstimateTrimRatio(cmd.get<string>("est-trim") == "yes");
if (!cmd.get<string>("trim-ratio").empty())
stabilizer->setTrimRatio(atof(cmd.get<string>("trim-ratio").c_str()));
stabilizer->setInclusionConstraint(cmd.get<string>("incl-constr") == "yes");
if (cmd.get<string>("border-mode") == "replicate")
stabilizer->setBorderMode(BORDER_REPLICATE);
else if (cmd.get<string>("border-mode") == "const")
stabilizer->setBorderMode(BORDER_CONSTANT);
else if (!cmd.get<string>("border-mode").empty())
throw runtime_error("unknown border extrapolation mode: " + cmd.get<string>("border-mode"));
InpaintingPipeline *inpainters = new InpaintingPipeline();
if (cmd.get<string>("mosaic") == "yes")
{
ConsistentMosaicInpainter *inpainter = new ConsistentMosaicInpainter();
if (!cmd.get<string>("mosaic-stdev").empty())
inpainter->setStdevThresh(atof(cmd.get<string>("mosaic-stdev").c_str()));
inpainters->pushBack(inpainter);
}
if (cmd.get<string>("motion-inpaint") == "yes")
inpainters->pushBack(new MotionInpainter());
if (cmd.get<string>("color-inpaint") == "yes")
inpainters->pushBack(new ColorAverageInpainter());
if (!inpainters->empty())
stabilizer->setInpainter(inpainters);
stabilizer->setLog(new LogToStdout());
outputPath = cmd.get<string>("output");
if (!cmd.get<string>("fps").empty())
outputFps = atoi(cmd.get<string>("fps").c_str());
// run video processing
run();
}
catch (const exception &e)
{
cout << e.what() << endl;
stabilizer.release();
return -1;
}
stabilizer.release();
return 0;
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment