Commit 7ac648c2 authored by Tobias Senst's avatar Tobias Senst Committed by Alexander Alekhin

Merge pull request #2097 from tsenst:robust_optical_flow_fix

optflow: RLOF fixes (python-binding, status flag, flat textures) (#2097)

* * Bugfix for python-binding related to issue #2094. Copying the prevPtsMat to prevPoints was broken, when using python-binding.

* * Connect the blurred image correctly to CROSS support region segmentation. This allows to compute more robust support region shapes. Commit refers to issue #2094.

* * Bugfix to avoid the unintended assertion on flat textures discussed in issue #2094.

* * Bugfix on SparseRLOFOpticalFlow status was wrongly to to 0 if forward backward error < threshold, which is wrong. Inequation has been corrected. Refers to in issue #2087.

* small documentation changes

* bugfix on assertions and small test changes
parent 644f4289
......@@ -35,7 +35,8 @@ enum InterpolationType
/** @brief This is used store and set up the parameters of the robust local optical flow (RLOF) algoritm.
*
* The RLOF is a fast local optical flow approach similar to the pyramidal iterative Lucas-Kanade method as
* The RLOF is a fast local optical flow approach described in @cite Senst2012 @cite Senst2013 @cite Senst2014
* and @cite Senst2016 similar to the pyramidal iterative Lucas-Kanade method as
* proposed by @cite Bouguet00. The implementation is derived from optflow::calcOpticalFlowPyrLK().
* This RLOF implementation can be seen as an improved pyramidal iterative Lucas-Kanade and includes
* a set of improving modules. The main improvements in respect to the pyramidal iterative Lucas-Kanade
......@@ -197,7 +198,8 @@ public:
/** @brief Fast dense optical flow computation based on robust local optical flow (RLOF) algorithms and sparse-to-dense interpolation
* scheme.
*
* The RLOF is a fast local optical flow approach similar to the pyramidal iterative Lucas-Kanade method as
* The RLOF is a fast local optical flow approach described in @cite Senst2012 @cite Senst2013 @cite Senst2014
* and @cite Senst2016 similar to the pyramidal iterative Lucas-Kanade method as
* proposed by @cite Bouguet00. The implementation is derived from optflow::calcOpticalFlowPyrLK().
*
* The sparse-to-dense interpolation scheme allows for fast computation of dense optical flow using RLOF (see @cite Geistert2016).
......@@ -350,7 +352,8 @@ public:
/** @brief Class used for calculation sparse optical flow and feature tracking with robust local optical flow (RLOF) algorithms.
*
* The RLOF is a fast local optical flow approach similar to the pyramidal iterative Lucas-Kanade method as
* The RLOF is a fast local optical flow approach described in @cite Senst2012 @cite Senst2013 @cite Senst2014
* and @cite Senst2016 similar to the pyramidal iterative Lucas-Kanade method as
* proposed by @cite Bouguet00. The implementation is derived from optflow::calcOpticalFlowPyrLK().
*
* For the RLOF configuration see optflow::RLOFOpticalFlowParameter for further details.
......@@ -396,7 +399,8 @@ public:
/** @brief Fast dense optical flow computation based on robust local optical flow (RLOF) algorithms and sparse-to-dense interpolation scheme.
*
* The RLOF is a fast local optical flow approach similar to the pyramidal iterative Lucas-Kanade method as
* The RLOF is a fast local optical flow approach described in @cite Senst2012 @cite Senst2013 @cite Senst2014
* and @cite Senst2016 similar to the pyramidal iterative Lucas-Kanade method as
* proposed by @cite Bouguet00. The implementation is derived from optflow::calcOpticalFlowPyrLK().
*
* The sparse-to-dense interpolation scheme allows for fast computation of dense optical flow using RLOF (see @cite Geistert2016).
......@@ -452,7 +456,8 @@ CV_EXPORTS_W void calcOpticalFlowDenseRLOF(InputArray I0, InputArray I1, InputOu
/** @brief Calculates fast optical flow for a sparse feature set using the robust local optical flow (RLOF) similar
* to optflow::calcOpticalFlowPyrLK().
*
* The RLOF is a fast local optical flow approach similar to the pyramidal iterative Lucas-Kanade method as
* The RLOF is a fast local optical flow approach described in @cite Senst2012 @cite Senst2013 @cite Senst2014
* and @cite Senst2016 similar to the pyramidal iterative Lucas-Kanade method as
* proposed by @cite Bouguet00. The implementation is derived from optflow::calcOpticalFlowPyrLK().
*
* @param prevImg first 8-bit input image. If The cross-based RLOF is used (by selecting optflow::RLOFOpticalFlowParameter::supportRegionType
......
......@@ -102,13 +102,13 @@ void getLocalPatch(
bounds = src.at<cv::Vec4i>(_r,c);
roi.x = bounds.val[0] - border_left;
roi.width = bounds.val[1] - bounds.val[0];
cv::Mat(winPointMask, roi).setTo(1);
}
else
{
bounds.val[0] = border_left;
bounds.val[1] = border_left + roi.width;
}
cv::Mat(winPointMask, roi).setTo(1);
min_c = MIN(min_c, bounds.val[0]);
max_c = MAX(max_c, bounds.val[1]);
noPoints += roi.width;
......
......@@ -354,10 +354,14 @@ int buildOpticalFlowPyramidScale(InputArray _img, OutputArrayOfArrays pyramid, S
return maxLevel;
}
int CImageBuffer::buildPyramid(cv::Size winSize, int maxLevel, float levelScale[2])
int CImageBuffer::buildPyramid(cv::Size winSize, int maxLevel, float levelScale[2],bool withBlurredImage )
{
if (m_Overwrite == false)
if (! m_Overwrite)
return m_maxLevel;
if (withBlurredImage)
m_maxLevel = buildOpticalFlowPyramidScale(m_BlurredImage, m_ImagePyramid, winSize, maxLevel, false, 4, 0, true, levelScale);
else
m_maxLevel = buildOpticalFlowPyramidScale(m_Image, m_ImagePyramid, winSize, maxLevel, false, 4, 0, true, levelScale);
return m_maxLevel;
}
......@@ -407,12 +411,12 @@ void calcLocalOpticalFlowCore(
float levelScale[2] = { 2.f,2.f };
int maxLevel = prevPyramids[0]->buildPyramid(cv::Size(iWinSize, iWinSize), param.maxLevel, levelScale);
maxLevel = currPyramids[0]->buildPyramid(cv::Size(iWinSize, iWinSize), maxLevel, levelScale);
if (useAdditionalRGB)
{
prevPyramids[1]->buildPyramid(cv::Size(iWinSize, iWinSize), maxLevel, levelScale);
currPyramids[1]->buildPyramid(cv::Size(iWinSize, iWinSize), maxLevel, levelScale);
prevPyramids[1]->buildPyramid(cv::Size(iWinSize, iWinSize), maxLevel, levelScale, true);
currPyramids[1]->buildPyramid(cv::Size(iWinSize, iWinSize), maxLevel, levelScale, true);
}
if ((criteria.type & TermCriteria::COUNT) == 0)
......@@ -661,7 +665,8 @@ void calcLocalOpticalFlow(
prevPyramids[0]->m_Overwrite = true;
currPyramids[0]->m_Overwrite = true;
prevPyramids[1]->m_Overwrite = true;
currPyramids[1]->m_Overwrite = true;
// perform blurring and build blur pyramid only for the prev image
currPyramids[1]->m_Overwrite = false;
if (prevImage.type() == CV_8UC3)
{
prevPyramids[0]->setGrayFromRGB(prevImage);
......
......@@ -93,7 +93,7 @@ public:
cv::GaussianBlur(inp, m_BlurredImage, cv::Size(7,7), -1);
}
int buildPyramid(cv::Size winSize, int maxLevel, float levelScale[2]);
int buildPyramid(cv::Size winSize, int maxLevel, float levelScale[2], bool withBlurredImage = false);
cv::Mat & getImage(int level) {return m_ImagePyramid[level];}
std::vector<cv::Mat> m_ImagePyramid;
......
......@@ -303,7 +303,6 @@ class SparseRLOFOpticalFlowImpl : public SparseRLOFOpticalFlow
int npoints = 0;
CV_Assert((npoints = prevPtsMat.checkVector(2, CV_32F, true)) >= 0);
if (npoints == 0)
{
nextPts.release();
......@@ -311,14 +310,22 @@ class SparseRLOFOpticalFlowImpl : public SparseRLOFOpticalFlow
err.release();
return;
}
Mat nextPtsMat = nextPts.getMat();
CV_Assert(nextPtsMat.checkVector(2, CV_32F, true) == npoints);
std::vector<cv::Point2f> prevPoints(npoints), nextPoints(npoints), refPoints;
prevPtsMat.copyTo(cv::Mat(1, npoints, CV_32FC2, &prevPoints[0]));
if (param->useInitialFlow )
nextPtsMat.copyTo(cv::Mat(1, nextPtsMat.cols, CV_32FC2, &nextPoints[0]));
if (prevPtsMat.channels() != 2)
prevPtsMat = prevPtsMat.reshape(2, npoints);
prevPtsMat.copyTo(prevPoints);
if (param->useInitialFlow )
{
if (nextPtsMat.channels() != 2)
nextPtsMat = nextPtsMat.reshape(2, npoints);
nextPtsMat.copyTo(nextPoints);
}
cv::Mat statusMat;
cv::Mat errorMat;
if (status.needed() || forwardBackwardThreshold > 0)
......@@ -346,7 +353,7 @@ class SparseRLOFOpticalFlowImpl : public SparseRLOFOpticalFlow
{
Point2f diff = refPoints[r] - prevPoints[r];
errorMat.at<float>(r) = sqrt(diff.x * diff.x + diff.y * diff.y);
if (errorMat.at<float>(r) <= forwardBackwardThreshold)
if (errorMat.at<float>(r) > forwardBackwardThreshold)
statusMat.at<uchar>(r) = 0;
}
......
......@@ -260,7 +260,7 @@ TEST(DenseOpticalFlow_RLOF, ReferenceAccuracy)
ASSERT_EQ(GT.rows, flow.rows);
ASSERT_EQ(GT.cols, flow.cols);
EXPECT_LE(calcRMSE(GT, flow), 0.44f);
EXPECT_LE(calcRMSE(GT, flow), 0.46f);
algo->setInterpolation(INTERP_GEO);
algo->calc(frame1, frame2, flow);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment