Commit aee6a617 authored by Vadim Pisarevsky's avatar Vadim Pisarevsky Committed by OpenCV Buildbot

Merge pull request #897 from bitwangyaoyao:2.4_TVL1

parents 528db000 4162ebfa
......@@ -407,6 +407,9 @@ namespace cv
//! computes element-wise product of the two arrays (c = a * b)
// supports all types except CV_8SC1,CV_8SC2,CV8SC3 and CV_8SC4
CV_EXPORTS void multiply(const oclMat &a, const oclMat &b, oclMat &c, double scale = 1);
//! multiplies matrix to a number (dst = scalar * src)
// supports CV_32FC1 only
CV_EXPORTS void multiply(double scalar, const oclMat &src, oclMat &dst);
//! computes element-wise quotient of the two arrays (c = a / b)
// supports all types except CV_8SC1,CV_8SC2,CV8SC3 and CV_8SC4
CV_EXPORTS void divide(const oclMat &a, const oclMat &b, oclMat &c, double scale = 1);
......@@ -1372,6 +1375,7 @@ namespace cv
private:
oclMat minSSD, leBuf, riBuf;
};
class CV_EXPORTS StereoBeliefPropagation
{
public:
......@@ -1402,6 +1406,7 @@ namespace cv
std::vector<oclMat> datas;
oclMat out;
};
class CV_EXPORTS StereoConstantSpaceBP
{
public:
......@@ -1440,6 +1445,94 @@ namespace cv
oclMat temp;
oclMat out;
};
// Implementation of the Zach, Pock and Bischof Dual TV-L1 Optical Flow method
//
// see reference:
// [1] C. Zach, T. Pock and H. Bischof, "A Duality Based Approach for Realtime TV-L1 Optical Flow".
// [2] Javier Sanchez, Enric Meinhardt-Llopis and Gabriele Facciolo. "TV-L1 Optical Flow Estimation".
class CV_EXPORTS OpticalFlowDual_TVL1_OCL
{
public:
OpticalFlowDual_TVL1_OCL();
void operator ()(const oclMat& I0, const oclMat& I1, oclMat& flowx, oclMat& flowy);
void collectGarbage();
/**
* Time step of the numerical scheme.
*/
double tau;
/**
* Weight parameter for the data term, attachment parameter.
* This is the most relevant parameter, which determines the smoothness of the output.
* The smaller this parameter is, the smoother the solutions we obtain.
* It depends on the range of motions of the images, so its value should be adapted to each image sequence.
*/
double lambda;
/**
* Weight parameter for (u - v)^2, tightness parameter.
* It serves as a link between the attachment and the regularization terms.
* In theory, it should have a small value in order to maintain both parts in correspondence.
* The method is stable for a large range of values of this parameter.
*/
double theta;
/**
* Number of scales used to create the pyramid of images.
*/
int nscales;
/**
* Number of warpings per scale.
* Represents the number of times that I1(x+u0) and grad( I1(x+u0) ) are computed per scale.
* This is a parameter that assures the stability of the method.
* It also affects the running time, so it is a compromise between speed and accuracy.
*/
int warps;
/**
* Stopping criterion threshold used in the numerical scheme, which is a trade-off between precision and running time.
* A small value will yield more accurate solutions at the expense of a slower convergence.
*/
double epsilon;
/**
* Stopping criterion iterations number used in the numerical scheme.
*/
int iterations;
bool useInitialFlow;
private:
void procOneScale(const oclMat& I0, const oclMat& I1, oclMat& u1, oclMat& u2);
std::vector<oclMat> I0s;
std::vector<oclMat> I1s;
std::vector<oclMat> u1s;
std::vector<oclMat> u2s;
oclMat I1x_buf;
oclMat I1y_buf;
oclMat I1w_buf;
oclMat I1wx_buf;
oclMat I1wy_buf;
oclMat grad_buf;
oclMat rho_c_buf;
oclMat p11_buf;
oclMat p12_buf;
oclMat p21_buf;
oclMat p22_buf;
oclMat diff_buf;
oclMat norm_buf;
};
}
}
#if defined _MSC_VER && _MSC_VER >= 1200
......
......@@ -22,6 +22,7 @@
// Jiang Liyuan, jlyuan001.good@163.com
// Rock Li, Rock.Li@amd.com
// Zailong Wu, bullet@yeah.net
// Peng Xiao, pengxiao@outlook.com
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
......@@ -286,6 +287,7 @@ void cv::ocl::multiply(const oclMat &src1, const oclMat &src2, oclMat &dst, doub
else
arithmetic_run<float>(src1, src2, dst, "arithm_mul", &arithm_mul, (void *)(&scalar));
}
void cv::ocl::divide(const oclMat &src1, const oclMat &src2, oclMat &dst, double scalar)
{
......@@ -468,6 +470,11 @@ void cv::ocl::subtract(const Scalar &src2, const oclMat &src1, oclMat &dst, cons
const char **kernelString = mask.data ? &arithm_add_scalar_mask : &arithm_add_scalar;
arithmetic_scalar( src1, src2, dst, mask, kernelName, kernelString, -1);
}
void cv::ocl::multiply(double scalar, const oclMat &src, oclMat &dst)
{
string kernelName = "arithm_muls";
arithmetic_scalar_run( src, dst, kernelName, &arithm_mul, scalar);
}
void cv::ocl::divide(double scalar, const oclMat &src, oclMat &dst)
{
if(!src.clCxt->supportsFeature(Context::CL_DOUBLE))
......
This diff is collapsed.
This diff is collapsed.
/*M///////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
......@@ -7,12 +7,16 @@
// copy or use the software.
//
//
// Intel License Agreement
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Copyright (C) 2010-2012, Multicoreware, Inc., all rights reserved.
// Copyright (C) 2010-2012, Institute Of Software Chinese Academy Of Science, all rights reserved.
// Copyright (C) 2010-2012, Advanced Micro Devices, Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// @Authors
//
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
......@@ -21,9 +25,9 @@
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// and/or other oclMaterials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
......@@ -51,6 +55,47 @@ using namespace testing;
using namespace std;
extern string workdir;
//////////////////////////////////////////////////////////////////////////
PARAM_TEST_CASE(TVL1, bool)
{
bool useRoi;
virtual void SetUp()
{
useRoi = GET_PARAM(0);
}
};
TEST_P(TVL1, Accuracy)
{
cv::Mat frame0 = readImage(workdir + "../gpu/rubberwhale1.png", cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(frame0.empty());
cv::Mat frame1 = readImage(workdir + "../gpu/rubberwhale2.png", cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(frame1.empty());
cv::ocl::OpticalFlowDual_TVL1_OCL d_alg;
cv::RNG &rng = TS::ptr()->get_rng();
cv::Mat flowx = randomMat(rng, frame0.size(), CV_32FC1, 0, 0, useRoi);
cv::Mat flowy = randomMat(rng, frame0.size(), CV_32FC1, 0, 0, useRoi);
cv::ocl::oclMat d_flowx(flowx), d_flowy(flowy);
d_alg(oclMat(frame0), oclMat(frame1), d_flowx, d_flowy);
cv::Ptr<cv::DenseOpticalFlow> alg = cv::createOptFlow_DualTVL1();
cv::Mat flow;
alg->calc(frame0, frame1, flow);
cv::Mat gold[2];
cv::split(flow, gold);
EXPECT_MAT_SIMILAR(gold[0], d_flowx, 3e-3);
EXPECT_MAT_SIMILAR(gold[1], d_flowy, 3e-3);
}
INSTANTIATE_TEST_CASE_P(OCL_Video, TVL1, Values(true, false));
/////////////////////////////////////////////////////////////////////////////////////////////////
// PyrLKOpticalFlow
PARAM_TEST_CASE(Sparse, bool, bool)
{
......@@ -60,7 +105,7 @@ PARAM_TEST_CASE(Sparse, bool, bool)
virtual void SetUp()
{
UseSmart = GET_PARAM(0);
useGray = GET_PARAM(0);
useGray = GET_PARAM(1);
}
};
......@@ -147,9 +192,9 @@ TEST_P(Sparse, Mat)
}
INSTANTIATE_TEST_CASE_P(Video, Sparse, Combine(
Values(false, true),
Values(false)));
INSTANTIATE_TEST_CASE_P(OCL_Video, Sparse, Combine(
Values(false, true),
Values(false, true)));
#endif // HAVE_OPENCL
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment