Commit 8e3f1c09 authored by Vladislav Vinogradov's avatar Vladislav Vinogradov

added support of 3-channels output to gpu::reprojectImageTo3D

minor refactoring of gpu tests
parent 07ec83cd
......@@ -605,10 +605,10 @@ CV_EXPORTS void drawColorDisp(const GpuMat& src_disp, GpuMat& dst_disp, int ndis
//! Reprojects disparity image to 3D space.
//! Supports CV_8U and CV_16S types of input disparity.
//! The output is a 4-channel floating-point (CV_32FC4) matrix.
//! The output is a 3- or 4-channel floating-point matrix.
//! Each element of this matrix will contain the 3D coordinates of the point (x,y,z,1), computed from the disparity map.
//! Q is the 4x4 perspective transformation matrix that can be obtained with cvStereoRectify.
CV_EXPORTS void reprojectImageTo3D(const GpuMat& disp, GpuMat& xyzw, const Mat& Q, Stream& stream = Stream::Null());
CV_EXPORTS void reprojectImageTo3D(const GpuMat& disp, GpuMat& xyzw, const Mat& Q, int dst_cn = 4, Stream& stream = Stream::Null());
//! converts image from one color space to another
CV_EXPORTS void cvtColor(const GpuMat& src, GpuMat& dst, int code, int dcn = 0, Stream& stream = Stream::Null());
......
......@@ -316,62 +316,51 @@ namespace cv { namespace gpu { namespace device
__constant__ float cq[16];
template <typename T>
__global__ void reprojectImageTo3D(const T* disp, size_t disp_step, float* xyzw, size_t xyzw_step, int rows, int cols)
template <typename T, typename D>
__global__ void reprojectImageTo3D(const DevMem2D_<T> disp, PtrStep<D> xyz)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (y < rows && x < cols)
{
if (y >= disp.rows || x >= disp.cols)
return;
float qx = cq[1] * y + cq[3], qy = cq[5] * y + cq[7];
float qz = cq[9] * y + cq[11], qw = cq[13] * y + cq[15];
const float qx = x * cq[ 0] + y * cq[ 1] + cq[ 3];
const float qy = x * cq[ 4] + y * cq[ 5] + cq[ 7];
const float qz = x * cq[ 8] + y * cq[ 9] + cq[11];
const float qw = x * cq[12] + y * cq[13] + cq[15];
qx += x * cq[0];
qy += x * cq[4];
qz += x * cq[8];
qw += x * cq[12];
const T d = disp(y, x);
T d = *(disp + disp_step * y + x);
const float iW = 1.f / (qw + cq[14] * d);
float iW = 1.f / (qw + cq[14] * d);
float4 v;
v.x = (qx + cq[2] * d) * iW;
v.y = (qy + cq[6] * d) * iW;
v.z = (qz + cq[10] * d) * iW;
v.w = 1.f;
D v = VecTraits<D>::all(1.0f);
v.x = (qx + cq[2] * d) * iW;
v.y = (qy + cq[6] * d) * iW;
v.z = (qz + cq[10] * d) * iW;
*(float4*)(xyzw + xyzw_step * y + (x * 4)) = v;
}
xyz(y, x) = v;
}
template <typename T>
inline void reprojectImageTo3D_caller(const DevMem2D_<T>& disp, const DevMem2Df& xyzw, const float* q, const cudaStream_t& stream)
template <typename T, typename D>
void reprojectImageTo3D_gpu(const DevMem2Db disp, DevMem2Db xyz, const float* q, cudaStream_t stream)
{
dim3 threads(32, 8, 1);
dim3 grid(1, 1, 1);
grid.x = divUp(disp.cols, threads.x);
grid.y = divUp(disp.rows, threads.y);
dim3 block(32, 8);
dim3 grid(divUp(disp.cols, block.x), divUp(disp.rows, block.y));
cudaSafeCall( cudaMemcpyToSymbol(cq, q, 16 * sizeof(float)) );
reprojectImageTo3D<<<grid, threads, 0, stream>>>(disp.data, disp.step / sizeof(T), xyzw.data, xyzw.step / sizeof(float), disp.rows, disp.cols);
reprojectImageTo3D<T, D><<<grid, block, 0, stream>>>((DevMem2D_<T>)disp, (DevMem2D_<D>)xyz);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
void reprojectImageTo3D_gpu(const DevMem2Db& disp, const DevMem2Df& xyzw, const float* q, const cudaStream_t& stream)
{
reprojectImageTo3D_caller(disp, xyzw, q, stream);
}
void reprojectImageTo3D_gpu(const DevMem2D_<short>& disp, const DevMem2Df& xyzw, const float* q, const cudaStream_t& stream)
{
reprojectImageTo3D_caller(disp, xyzw, q, stream);
}
template void reprojectImageTo3D_gpu<uchar, float3>(const DevMem2Db disp, DevMem2Db xyz, const float* q, cudaStream_t stream);
template void reprojectImageTo3D_gpu<uchar, float4>(const DevMem2Db disp, DevMem2Db xyz, const float* q, cudaStream_t stream);
template void reprojectImageTo3D_gpu<short, float3>(const DevMem2Db disp, DevMem2Db xyz, const float* q, cudaStream_t stream);
template void reprojectImageTo3D_gpu<short, float4>(const DevMem2Db disp, DevMem2Db xyz, const float* q, cudaStream_t stream);
/////////////////////////////////////////// Corner Harris /////////////////////////////////////////////////
......
......@@ -50,7 +50,7 @@ using namespace cv::gpu;
void cv::gpu::meanShiftFiltering(const GpuMat&, GpuMat&, int, int, TermCriteria, Stream&) { throw_nogpu(); }
void cv::gpu::meanShiftProc(const GpuMat&, GpuMat&, GpuMat&, int, int, TermCriteria, Stream&) { throw_nogpu(); }
void cv::gpu::drawColorDisp(const GpuMat&, GpuMat&, int, Stream&) { throw_nogpu(); }
void cv::gpu::reprojectImageTo3D(const GpuMat&, GpuMat&, const Mat&, Stream&) { throw_nogpu(); }
void cv::gpu::reprojectImageTo3D(const GpuMat&, GpuMat&, const Mat&, int, Stream&) { throw_nogpu(); }
void cv::gpu::copyMakeBorder(const GpuMat&, GpuMat&, int, int, int, int, int, const Scalar&, Stream&) { throw_nogpu(); }
void cv::gpu::buildWarpPlaneMaps(Size, Rect, const Mat&, const Mat&, const Mat&, float, GpuMat&, GpuMat&, Stream&) { throw_nogpu(); }
void cv::gpu::buildWarpCylindricalMaps(Size, Rect, const Mat&, const Mat&, float, GpuMat&, GpuMat&, Stream&) { throw_nogpu(); }
......@@ -213,33 +213,29 @@ namespace cv { namespace gpu { namespace device
{
namespace imgproc
{
void reprojectImageTo3D_gpu(const DevMem2Db& disp, const DevMem2Df& xyzw, const float* q, const cudaStream_t& stream);
void reprojectImageTo3D_gpu(const DevMem2D_<short>& disp, const DevMem2Df& xyzw, const float* q, const cudaStream_t& stream);
template <typename T, typename D>
void reprojectImageTo3D_gpu(const DevMem2Db disp, DevMem2Db xyz, const float* q, cudaStream_t stream);
}
}}}
namespace
void cv::gpu::reprojectImageTo3D(const GpuMat& disp, GpuMat& xyz, const Mat& Q, int dst_cn, Stream& stream)
{
template <typename T>
void reprojectImageTo3D_caller(const GpuMat& disp, GpuMat& xyzw, const Mat& Q, const cudaStream_t& stream)
{
using namespace ::cv::gpu::device::imgproc;
xyzw.create(disp.rows, disp.cols, CV_32FC4);
reprojectImageTo3D_gpu((DevMem2D_<T>)disp, xyzw, Q.ptr<float>(), stream);
}
using namespace cv::gpu::device::imgproc;
typedef void (*reprojectImageTo3D_caller_t)(const GpuMat& disp, GpuMat& xyzw, const Mat& Q, const cudaStream_t& stream);
typedef void (*func_t)(const DevMem2Db disp, DevMem2Db xyz, const float* q, cudaStream_t stream);
static const func_t funcs[2][4] =
{
{reprojectImageTo3D_gpu<uchar, float3>, 0, 0, reprojectImageTo3D_gpu<short, float3>},
{reprojectImageTo3D_gpu<uchar, float4>, 0, 0, reprojectImageTo3D_gpu<short, float4>}
};
const reprojectImageTo3D_caller_t reprojectImageTo3D_callers[] = {reprojectImageTo3D_caller<unsigned char>, 0, 0, reprojectImageTo3D_caller<short>, 0, 0, 0, 0};
}
CV_Assert(disp.type() == CV_8U || disp.type() == CV_16S);
CV_Assert(Q.type() == CV_32F && Q.rows == 4 && Q.cols == 4 && Q.isContinuous());
CV_Assert(dst_cn == 3 || dst_cn == 4);
void cv::gpu::reprojectImageTo3D(const GpuMat& disp, GpuMat& xyzw, const Mat& Q, Stream& stream)
{
CV_Assert((disp.type() == CV_8U || disp.type() == CV_16S) && Q.type() == CV_32F && Q.rows == 4 && Q.cols == 4);
xyz.create(disp.size(), CV_MAKE_TYPE(CV_32F, dst_cn));
reprojectImageTo3D_callers[disp.type()](disp, xyzw, Q, StreamAccessor::getStream(stream));
funcs[dst_cn == 4][disp.type()](disp, xyz, Q.ptr<float>(), StreamAccessor::getStream(stream));
}
////////////////////////////////////////////////////////////////////////
......@@ -1513,9 +1509,11 @@ void cv::gpu::Canny(const GpuMat& src, CannyBuf& buf, GpuMat& dst, double low_th
{
using namespace ::cv::gpu::device::canny;
CV_Assert(TargetArchs::builtWith(SHARED_ATOMICS) && DeviceInfo().supports(SHARED_ATOMICS));
CV_Assert(src.type() == CV_8UC1);
if (!TargetArchs::builtWith(SHARED_ATOMICS) || !DeviceInfo().supports(SHARED_ATOMICS))
CV_Error(CV_StsNotImplemented, "The device doesn't support shared atomics");
if( low_thresh > high_thresh )
std::swap( low_thresh, high_thresh);
......
......@@ -299,6 +299,47 @@ TEST_P(SolvePnPRansac, Accuracy)
ASSERT_LE(cv::norm(tvec - tvec_gold), 1e-3);
}
INSTANTIATE_TEST_CASE_P(GPU_Calib3D, SolvePnPRansac, ALL_DEVICES);
INSTANTIATE_TEST_CASE_P(GPU_Calib3D, SolvePnPRansac, ALL_DEVICES);
////////////////////////////////////////////////////////////////////////////////
// reprojectImageTo3D
PARAM_TEST_CASE(ReprojectImageTo3D, cv::gpu::DeviceInfo, cv::Size, MatDepth, UseRoi)
{
cv::gpu::DeviceInfo devInfo;
cv::Size size;
int depth;
bool useRoi;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
size = GET_PARAM(1);
depth = GET_PARAM(2);
useRoi = GET_PARAM(3);
cv::gpu::setDevice(devInfo.deviceID());
}
};
TEST_P(ReprojectImageTo3D, Accuracy)
{
cv::Mat disp = randomMat(size, depth, 5.0, 30.0);
cv::Mat Q = randomMat(cv::Size(4, 4), CV_32FC1, 0.1, 1.0);
cv::gpu::GpuMat dst;
cv::gpu::reprojectImageTo3D(loadMat(disp, useRoi), dst, Q, 3);
cv::Mat dst_gold;
cv::reprojectImageTo3D(disp, dst_gold, Q, false);
EXPECT_MAT_NEAR(dst_gold, dst, 1e-5);
}
INSTANTIATE_TEST_CASE_P(GPU_Calib3D, ReprojectImageTo3D, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
testing::Values(MatDepth(CV_8U), MatDepth(CV_16S)),
WHOLE_SUBMAT));
} // namespace
......@@ -41,6 +41,8 @@
#include "precomp.hpp"
namespace {
///////////////////////////////////////////////////////////////////////////////////////////////////////
// cvtColor
......@@ -1652,3 +1654,5 @@ INSTANTIATE_TEST_CASE_P(GPU_ImgProc, SwapChannels, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
WHOLE_SUBMAT));
} // namespace
......@@ -41,15 +41,9 @@
#include "precomp.hpp"
struct KSize : cv::Size
{
KSize() {}
KSize(int width, int height) : cv::Size(width, height) {}
};
void PrintTo(KSize ksize, std::ostream* os)
{
*os << "kernel size " << ksize.width << "x" << ksize.height;
}
namespace {
IMPLEMENT_PARAM_CLASS(KSize, cv::Size)
cv::Mat getInnerROI(cv::InputArray m_, cv::Size ksize)
{
......@@ -107,7 +101,7 @@ INSTANTIATE_TEST_CASE_P(GPU_Filter, Blur, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
testing::Values(MatType(CV_8UC1), MatType(CV_8UC4)),
testing::Values(KSize(3, 3), KSize(5, 5), KSize(7, 7)),
testing::Values(KSize(cv::Size(3, 3)), KSize(cv::Size(5, 5)), KSize(cv::Size(7, 7))),
testing::Values(Anchor(cv::Point(-1, -1)), Anchor(cv::Point(0, 0)), Anchor(cv::Point(2, 2))),
WHOLE_SUBMAT));
......@@ -163,7 +157,7 @@ INSTANTIATE_TEST_CASE_P(GPU_Filter, Sobel, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
testing::Values(MatType(CV_8UC1), MatType(CV_8UC4)),
testing::Values(KSize(3, 3), KSize(5, 5), KSize(7, 7)),
testing::Values(KSize(cv::Size(3, 3)), KSize(cv::Size(5, 5)), KSize(cv::Size(7, 7))),
testing::Values(Deriv_X(0), Deriv_X(1), Deriv_X(2)),
testing::Values(Deriv_Y(0), Deriv_Y(1), Deriv_Y(2)),
testing::Values(BorderType(cv::BORDER_REFLECT101),
......@@ -286,21 +280,21 @@ INSTANTIATE_TEST_CASE_P(GPU_Filter, GaussianBlur, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
testing::Values(MatType(CV_8UC1), MatType(CV_8UC4)),
testing::Values(KSize(3, 3),
KSize(5, 5),
KSize(7, 7),
KSize(9, 9),
KSize(11, 11),
KSize(13, 13),
KSize(15, 15),
KSize(17, 17),
KSize(19, 19),
KSize(21, 21),
KSize(23, 23),
KSize(25, 25),
KSize(27, 27),
KSize(29, 29),
KSize(31, 31)),
testing::Values(KSize(cv::Size(3, 3)),
KSize(cv::Size(5, 5)),
KSize(cv::Size(7, 7)),
KSize(cv::Size(9, 9)),
KSize(cv::Size(11, 11)),
KSize(cv::Size(13, 13)),
KSize(cv::Size(15, 15)),
KSize(cv::Size(17, 17)),
KSize(cv::Size(19, 19)),
KSize(cv::Size(21, 21)),
KSize(cv::Size(23, 23)),
KSize(cv::Size(25, 25)),
KSize(cv::Size(27, 27)),
KSize(cv::Size(29, 29)),
KSize(cv::Size(31, 31))),
testing::Values(BorderType(cv::BORDER_REFLECT101),
BorderType(cv::BORDER_REPLICATE),
BorderType(cv::BORDER_CONSTANT),
......@@ -350,7 +344,7 @@ INSTANTIATE_TEST_CASE_P(GPU_Filter, Laplacian, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
testing::Values(MatType(CV_8UC1), MatType(CV_8UC4), MatType(CV_32FC1)),
testing::Values(KSize(1, 1), KSize(3, 3)),
testing::Values(KSize(cv::Size(1, 1)), KSize(cv::Size(3, 3))),
WHOLE_SUBMAT));
/////////////////////////////////////////////////////////////////////////////////////////////////
......@@ -557,6 +551,8 @@ INSTANTIATE_TEST_CASE_P(GPU_Filter, Filter2D, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
testing::Values(MatType(CV_8UC1), MatType(CV_8UC4), MatType(CV_32FC1)),
testing::Values(KSize(3, 3), KSize(5, 5), KSize(7, 7), KSize(11, 11), KSize(13, 13), KSize(15, 15)),
testing::Values(KSize(cv::Size(3, 3)), KSize(cv::Size(5, 5)), KSize(cv::Size(7, 7)), KSize(cv::Size(11, 11)), KSize(cv::Size(13, 13)), KSize(cv::Size(15, 15))),
testing::Values(Anchor(cv::Point(-1, -1)), Anchor(cv::Point(0, 0)), Anchor(cv::Point(2, 2))),
WHOLE_SUBMAT));
} // namespace
......@@ -41,360 +41,211 @@
#include "precomp.hpp"
#ifdef HAVE_CUDA
using namespace cvtest;
using namespace testing;
namespace {
///////////////////////////////////////////////////////////////////////////////////////////////////////
// integral
// Integral
PARAM_TEST_CASE(Integral, cv::gpu::DeviceInfo, UseRoi)
PARAM_TEST_CASE(Integral, cv::gpu::DeviceInfo, cv::Size, UseRoi)
{
cv::gpu::DeviceInfo devInfo;
bool useRoi;
cv::Size size;
cv::Mat src;
cv::Mat dst_gold;
bool useRoi;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
useRoi = GET_PARAM(1);
size = GET_PARAM(1);
useRoi = GET_PARAM(2);
cv::gpu::setDevice(devInfo.deviceID());
cv::RNG& rng = TS::ptr()->get_rng();
size = cv::Size(rng.uniform(20, 150), rng.uniform(20, 150));
src = randomMat(rng, size, CV_8UC1, 0.0, 255.0, false);
cv::integral(src, dst_gold, CV_32S);
}
};
TEST_P(Integral, Accuracy)
{
cv::Mat dst;
cv::gpu::GpuMat gpuRes;
cv::Mat src = randomMat(size, CV_8UC1);
cv::gpu::integral(loadMat(src, useRoi), gpuRes);
cv::gpu::GpuMat dst = createMat(cv::Size(src.cols + 1, src.rows + 1), CV_32SC1, useRoi);
cv::gpu::integral(loadMat(src, useRoi), dst);
gpuRes.download(dst);
cv::Mat dst_gold;
cv::integral(src, dst_gold, CV_32S);
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
INSTANTIATE_TEST_CASE_P(ImgProc, Integral, Combine(
ALL_DEVICES,
WHOLE_SUBMAT));
INSTANTIATE_TEST_CASE_P(GPU_ImgProc, Integral, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
WHOLE_SUBMAT));
///////////////////////////////////////////////////////////////////////////////////////////////////////
// histograms
// HistEven
struct HistEven : TestWithParam<cv::gpu::DeviceInfo>
struct HistEven : testing::TestWithParam<cv::gpu::DeviceInfo>
{
cv::gpu::DeviceInfo devInfo;
cv::Mat hsv;
int hbins;
float hranges[2];
cv::Mat hist_gold;
virtual void SetUp()
{
devInfo = GetParam();
cv::gpu::setDevice(devInfo.deviceID());
cv::Mat img = readImage("stereobm/aloe-L.png");
ASSERT_FALSE(img.empty());
cv::cvtColor(img, hsv, CV_BGR2HSV);
hbins = 30;
hranges[0] = 0;
hranges[1] = 180;
int histSize[] = {hbins};
const float* ranges[] = {hranges};
cv::MatND histnd;
int channels[] = {0};
cv::calcHist(&hsv, 1, channels, cv::Mat(), histnd, 1, histSize, ranges);
hist_gold = histnd;
hist_gold = hist_gold.t();
hist_gold.convertTo(hist_gold, CV_32S);
}
};
TEST_P(HistEven, Accuracy)
{
cv::Mat hist;
cv::Mat img = readImage("stereobm/aloe-L.png");
ASSERT_FALSE(img.empty());
cv::Mat hsv;
cv::cvtColor(img, hsv, CV_BGR2HSV);
int hbins = 30;
float hranges[] = {0.0f, 180.0f};
std::vector<cv::gpu::GpuMat> srcs;
cv::gpu::split(loadMat(hsv), srcs);
cv::gpu::GpuMat gpuHist;
cv::gpu::GpuMat hist;
cv::gpu::histEven(srcs[0], hist, hbins, (int)hranges[0], (int)hranges[1]);
cv::gpu::histEven(srcs[0], gpuHist, hbins, (int)hranges[0], (int)hranges[1]);
cv::MatND histnd;
int histSize[] = {hbins};
const float* ranges[] = {hranges};
int channels[] = {0};
cv::calcHist(&hsv, 1, channels, cv::Mat(), histnd, 1, histSize, ranges);
gpuHist.download(hist);
cv::Mat hist_gold = histnd;
hist_gold = hist_gold.t();
hist_gold.convertTo(hist_gold, CV_32S);
EXPECT_MAT_NEAR(hist_gold, hist, 0.0);
}
INSTANTIATE_TEST_CASE_P(ImgProc, HistEven, ALL_DEVICES);
INSTANTIATE_TEST_CASE_P(GPU_ImgProc, HistEven, ALL_DEVICES);
struct CalcHist : TestWithParam<cv::gpu::DeviceInfo>
{
cv::gpu::DeviceInfo devInfo;
cv::Size size;
cv::Mat src;
cv::Mat hist_gold;
virtual void SetUp()
{
devInfo = GetParam();
cv::gpu::setDevice(devInfo.deviceID());
cv::RNG& rng = TS::ptr()->get_rng();
size = cv::Size(rng.uniform(100, 200), rng.uniform(100, 200));
src = randomMat(rng, size, CV_8UC1, 0, 255, false);
hist_gold.create(1, 256, CV_32SC1);
hist_gold.setTo(cv::Scalar::all(0));
int* hist = hist_gold.ptr<int>();
for (int y = 0; y < src.rows; ++y)
{
const uchar* src_row = src.ptr(y);
for (int x = 0; x < src.cols; ++x)
++hist[src_row[x]];
}
}
};
TEST_P(CalcHist, Accuracy)
{
cv::Mat hist;
cv::gpu::GpuMat gpuHist;
cv::gpu::calcHist(loadMat(src), gpuHist);
gpuHist.download(hist);
EXPECT_MAT_NEAR(hist_gold, hist, 0.0);
}
INSTANTIATE_TEST_CASE_P(ImgProc, CalcHist, ALL_DEVICES);
///////////////////////////////////////////////////////////////////////////////////////////////////////
// CalcHist
struct EqualizeHist : TestWithParam<cv::gpu::DeviceInfo>
void calcHistGold(const cv::Mat& src, cv::Mat& hist)
{
cv::gpu::DeviceInfo devInfo;
cv::Size size;
cv::Mat src;
cv::Mat dst_gold;
hist.create(1, 256, CV_32SC1);
hist.setTo(cv::Scalar::all(0));
virtual void SetUp()
int* hist_row = hist.ptr<int>();
for (int y = 0; y < src.rows; ++y)
{
devInfo = GetParam();
cv::gpu::setDevice(devInfo.deviceID());
cv::RNG& rng = TS::ptr()->get_rng();
const uchar* src_row = src.ptr(y);
size = cv::Size(rng.uniform(100, 200), rng.uniform(100, 200));
src = randomMat(rng, size, CV_8UC1, 0, 255, false);
cv::equalizeHist(src, dst_gold);
for (int x = 0; x < src.cols; ++x)
++hist_row[src_row[x]];
}
};
TEST_P(EqualizeHist, Accuracy)
{
cv::Mat dst;
cv::gpu::GpuMat gpuDst;
cv::gpu::equalizeHist(loadMat(src), gpuDst);
gpuDst.download(dst);
EXPECT_MAT_NEAR(dst_gold, dst, 3.0);
}
INSTANTIATE_TEST_CASE_P(ImgProc, EqualizeHist, ALL_DEVICES);
///////////////////////////////////////////////////////////////////////////////////////////////////////
// cornerHarris
PARAM_TEST_CASE(CornerHarris, cv::gpu::DeviceInfo, MatType, BorderType, int, int)
PARAM_TEST_CASE(CalcHist, cv::gpu::DeviceInfo, cv::Size)
{
cv::gpu::DeviceInfo devInfo;
int type;
int borderType;
int blockSize;
int apertureSize;
cv::Size size;
cv::Mat src;
double k;
cv::Mat dst_gold;
cv::Mat hist_gold;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
type = GET_PARAM(1);
borderType = GET_PARAM(2);
blockSize = GET_PARAM(3);
apertureSize = GET_PARAM(4);
size = GET_PARAM(1);
cv::gpu::setDevice(devInfo.deviceID());
cv::RNG& rng = TS::ptr()->get_rng();
cv::Mat img = readImage("stereobm/aloe-L.png", CV_LOAD_IMAGE_GRAYSCALE);
ASSERT_FALSE(img.empty());
img.convertTo(src, type, type == CV_32F ? 1.0 / 255.0 : 1.0);
k = rng.uniform(0.1, 0.9);
cv::cornerHarris(src, dst_gold, blockSize, apertureSize, k, borderType);
}
};
TEST_P(CornerHarris, Accuracy)
TEST_P(CalcHist, Accuracy)
{
cv::Mat dst;
cv::gpu::GpuMat dev_dst;
cv::Mat src = randomMat(size, CV_8UC1);
cv::gpu::cornerHarris(loadMat(src), dev_dst, blockSize, apertureSize, k, borderType);
cv::gpu::GpuMat hist;
cv::gpu::calcHist(loadMat(src), hist);
dev_dst.download(dst);
cv::Mat hist_gold;
calcHistGold(src, hist_gold);
EXPECT_MAT_NEAR(dst_gold, dst, 0.02);
EXPECT_MAT_NEAR(hist_gold, hist, 0.0);
}
INSTANTIATE_TEST_CASE_P(ImgProc, CornerHarris, Combine(
ALL_DEVICES,
Values(CV_8UC1, CV_32FC1),
Values((int) cv::BORDER_REFLECT101, (int) cv::BORDER_REPLICATE, (int) cv::BORDER_REFLECT),
Values(3, 5, 7),
Values(0, 3, 5, 7)));
INSTANTIATE_TEST_CASE_P(GPU_ImgProc, CalcHist, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES));
///////////////////////////////////////////////////////////////////////////////////////////////////////
// cornerMinEigen
// EqualizeHist
PARAM_TEST_CASE(CornerMinEigen, cv::gpu::DeviceInfo, MatType, BorderType, int, int)
PARAM_TEST_CASE(EqualizeHist, cv::gpu::DeviceInfo, cv::Size)
{
cv::gpu::DeviceInfo devInfo;
int type;
int borderType;
int blockSize;
int apertureSize;
cv::Mat src;
cv::Mat dst_gold;
cv::Size size;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
type = GET_PARAM(1);
borderType = GET_PARAM(2);
blockSize = GET_PARAM(3);
apertureSize = GET_PARAM(4);
size = GET_PARAM(1);
cv::gpu::setDevice(devInfo.deviceID());
cv::Mat img = readImage("stereobm/aloe-L.png", CV_LOAD_IMAGE_GRAYSCALE);
ASSERT_FALSE(img.empty());
img.convertTo(src, type, type == CV_32F ? 1.0 / 255.0 : 1.0);
cv::cornerMinEigenVal(src, dst_gold, blockSize, apertureSize, borderType);
}
};
TEST_P(CornerMinEigen, Accuracy)
TEST_P(EqualizeHist, Accuracy)
{
cv::Mat dst;
cv::gpu::GpuMat dev_dst;
cv::Mat src = randomMat(size, CV_8UC1);
cv::gpu::cornerMinEigenVal(loadMat(src), dev_dst, blockSize, apertureSize, borderType);
dev_dst.download(dst);
cv::gpu::GpuMat dst;
cv::gpu::equalizeHist(loadMat(src), dst);
cv::Mat dst_gold;
cv::equalizeHist(src, dst_gold);
EXPECT_MAT_NEAR(dst_gold, dst, 0.02);
EXPECT_MAT_NEAR(dst_gold, dst, 3.0);
}
INSTANTIATE_TEST_CASE_P(ImgProc, CornerMinEigen, Combine(
ALL_DEVICES,
Values(CV_8UC1, CV_32FC1),
Values((int) cv::BORDER_REFLECT101, (int) cv::BORDER_REPLICATE, (int) cv::BORDER_REFLECT),
Values(3, 5, 7),
Values(0, 3, 5, 7)));
INSTANTIATE_TEST_CASE_P(GPU_ImgProc, EqualizeHist, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES));
////////////////////////////////////////////////////////////////////////
// ColumnSum
struct ColumnSum : TestWithParam<cv::gpu::DeviceInfo>
PARAM_TEST_CASE(ColumnSum, cv::gpu::DeviceInfo, cv::Size)
{
cv::gpu::DeviceInfo devInfo;
cv::Size size;
cv::Mat src;
virtual void SetUp()
{
devInfo = GetParam();
devInfo = GET_PARAM(0);
size = GET_PARAM(1);
cv::gpu::setDevice(devInfo.deviceID());
cv::RNG& rng = TS::ptr()->get_rng();
size = cv::Size(rng.uniform(100, 400), rng.uniform(100, 400));
src = randomMat(rng, size, CV_32F, 0.0, 1.0, false);
}
};
TEST_P(ColumnSum, Accuracy)
{
cv::Mat dst;
cv::Mat src = randomMat(size, CV_32FC1);
cv::gpu::GpuMat dev_dst;
cv::gpu::columnSum(loadMat(src), dev_dst);
cv::gpu::GpuMat d_dst;
cv::gpu::columnSum(loadMat(src), d_dst);
dev_dst.download(dst);
cv::Mat dst(d_dst);
for (int j = 0; j < src.cols; ++j)
{
float gold = src.at<float>(0, j);
float res = dst.at<float>(0, j);
ASSERT_NEAR(res, gold, 0.5);
ASSERT_NEAR(res, gold, 1e-5);
}
for (int i = 1; i < src.rows; ++i)
......@@ -403,85 +254,87 @@ TEST_P(ColumnSum, Accuracy)
{
float gold = src.at<float>(i, j) += src.at<float>(i - 1, j);
float res = dst.at<float>(i, j);
ASSERT_NEAR(res, gold, 0.5);
ASSERT_NEAR(res, gold, 1e-5);
}
}
}
INSTANTIATE_TEST_CASE_P(ImgProc, ColumnSum, ALL_DEVICES);
INSTANTIATE_TEST_CASE_P(GPU_ImgProc, ColumnSum, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES));
////////////////////////////////////////////////////////////////////////////////
// reprojectImageTo3D
////////////////////////////////////////////////////////
// Canny
IMPLEMENT_PARAM_CLASS(AppertureSize, int);
IMPLEMENT_PARAM_CLASS(L2gradient, bool);
PARAM_TEST_CASE(ReprojectImageTo3D, cv::gpu::DeviceInfo, UseRoi)
PARAM_TEST_CASE(Canny, cv::gpu::DeviceInfo, AppertureSize, L2gradient, UseRoi)
{
cv::gpu::DeviceInfo devInfo;
int apperture_size;
bool useL2gradient;
bool useRoi;
cv::Size size;
cv::Mat disp;
cv::Mat Q;
cv::Mat dst_gold;
cv::Mat edges_gold;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
useRoi = GET_PARAM(1);
apperture_size = GET_PARAM(1);
useL2gradient = GET_PARAM(2);
useRoi = GET_PARAM(3);
cv::gpu::setDevice(devInfo.deviceID());
cv::RNG& rng = TS::ptr()->get_rng();
size = cv::Size(rng.uniform(100, 500), rng.uniform(100, 500));
disp = randomMat(rng, size, CV_8UC1, 5.0, 30.0, false);
Q = randomMat(rng, cv::Size(4, 4), CV_32FC1, 0.1, 1.0, false);
cv::reprojectImageTo3D(disp, dst_gold, Q, false);
}
};
TEST_P(ReprojectImageTo3D, Accuracy)
TEST_P(Canny, Accuracy)
{
cv::Mat dst;
cv::gpu::GpuMat gpures;
cv::gpu::reprojectImageTo3D(loadMat(disp, useRoi), gpures, Q);
gpures.download(dst);
ASSERT_EQ(dst_gold.size(), dst.size());
for (int y = 0; y < dst_gold.rows; ++y)
cv::Mat img = readImage("stereobm/aloe-L.png", cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(img.empty());
double low_thresh = 50.0;
double high_thresh = 100.0;
if (!supportFeature(devInfo, cv::gpu::SHARED_ATOMICS))
{
try
{
cv::gpu::GpuMat edges;
cv::gpu::Canny(loadMat(img), edges, low_thresh, high_thresh, apperture_size, useL2gradient);
}
catch (const cv::Exception& e)
{
ASSERT_EQ(CV_StsNotImplemented, e.code);
}
}
else
{
const cv::Vec3f* cpu_row = dst_gold.ptr<cv::Vec3f>(y);
const cv::Vec4f* gpu_row = dst.ptr<cv::Vec4f>(y);
cv::gpu::GpuMat edges;
cv::gpu::Canny(loadMat(img, useRoi), edges, low_thresh, high_thresh, apperture_size, useL2gradient);
for (int x = 0; x < dst_gold.cols; ++x)
{
cv::Vec3f gold = cpu_row[x];
cv::Vec4f res = gpu_row[x];
cv::Mat edges_gold;
cv::Canny(img, edges_gold, low_thresh, high_thresh, apperture_size, useL2gradient);
ASSERT_NEAR(res[0], gold[0], 1e-5);
ASSERT_NEAR(res[1], gold[1], 1e-5);
ASSERT_NEAR(res[2], gold[2], 1e-5);
}
EXPECT_MAT_SIMILAR(edges_gold, edges, 1e-2);
}
}
INSTANTIATE_TEST_CASE_P(ImgProc, ReprojectImageTo3D, Combine(ALL_DEVICES, WHOLE_SUBMAT));
INSTANTIATE_TEST_CASE_P(GPU_ImgProc, Canny, testing::Combine(
ALL_DEVICES,
testing::Values(AppertureSize(3), AppertureSize(5)),
testing::Values(L2gradient(false), L2gradient(true)),
WHOLE_SUBMAT));
////////////////////////////////////////////////////////////////////////////////
// meanShift
// MeanShift
struct MeanShift : TestWithParam<cv::gpu::DeviceInfo>
struct MeanShift : testing::TestWithParam<cv::gpu::DeviceInfo>
{
cv::gpu::DeviceInfo devInfo;
cv::Mat rgba;
cv::Mat img;
int spatialRad;
int colorRad;
......@@ -492,11 +345,9 @@ struct MeanShift : TestWithParam<cv::gpu::DeviceInfo>
cv::gpu::setDevice(devInfo.deviceID());
cv::Mat img = readImage("meanshift/cones.png");
img = readImageType("meanshift/cones.png", CV_8UC4);
ASSERT_FALSE(img.empty());
cv::cvtColor(img, rgba, CV_BGR2BGRA);
spatialRad = 30;
colorRad = 30;
}
......@@ -505,23 +356,18 @@ struct MeanShift : TestWithParam<cv::gpu::DeviceInfo>
TEST_P(MeanShift, Filtering)
{
cv::Mat img_template;
if (supportFeature(devInfo, cv::gpu::FEATURE_SET_COMPUTE_20))
img_template = readImage("meanshift/con_result.png");
else
img_template = readImage("meanshift/con_result_CC1X.png");
ASSERT_FALSE(img_template.empty());
cv::gpu::GpuMat d_dst;
cv::gpu::meanShiftFiltering(loadMat(img), d_dst, spatialRad, colorRad);
cv::Mat dst;
cv::gpu::GpuMat dev_dst;
cv::gpu::meanShiftFiltering(loadMat(rgba), dev_dst, spatialRad, colorRad);
dev_dst.download(dst);
ASSERT_EQ(CV_8UC4, d_dst.type());
ASSERT_EQ(CV_8UC4, dst.type());
cv::Mat dst(d_dst);
cv::Mat result;
cv::cvtColor(dst, result, CV_BGRA2BGR);
......@@ -531,81 +377,67 @@ TEST_P(MeanShift, Filtering)
TEST_P(MeanShift, Proc)
{
cv::Mat spmap_template;
cv::FileStorage fs;
if (supportFeature(devInfo, cv::gpu::FEATURE_SET_COMPUTE_20))
fs.open(std::string(cvtest::TS::ptr()->get_data_path()) + "meanshift/spmap.yaml", cv::FileStorage::READ);
else
fs.open(std::string(cvtest::TS::ptr()->get_data_path()) + "meanshift/spmap_CC1X.yaml", cv::FileStorage::READ);
ASSERT_TRUE(fs.isOpened());
cv::Mat spmap_template;
fs["spmap"] >> spmap_template;
ASSERT_FALSE(spmap_template.empty());
ASSERT_TRUE(!rgba.empty() && !spmap_template.empty());
cv::Mat rmap_filtered;
cv::Mat rmap;
cv::Mat spmap;
cv::gpu::GpuMat rmap_filtered;
cv::gpu::meanShiftFiltering(loadMat(img), rmap_filtered, spatialRad, colorRad);
cv::gpu::GpuMat d_rmap_filtered;
cv::gpu::meanShiftFiltering(loadMat(rgba), d_rmap_filtered, spatialRad, colorRad);
cv::gpu::GpuMat d_rmap;
cv::gpu::GpuMat d_spmap;
cv::gpu::meanShiftProc(loadMat(rgba), d_rmap, d_spmap, spatialRad, colorRad);
d_rmap_filtered.download(rmap_filtered);
d_rmap.download(rmap);
d_spmap.download(spmap);
cv::gpu::GpuMat rmap;
cv::gpu::GpuMat spmap;
cv::gpu::meanShiftProc(loadMat(img), rmap, spmap, spatialRad, colorRad);
ASSERT_EQ(CV_8UC4, rmap.type());
EXPECT_MAT_NEAR(rmap_filtered, rmap, 0.0);
EXPECT_MAT_NEAR(spmap_template, spmap, 0.0);
}
INSTANTIATE_TEST_CASE_P(ImgProc, MeanShift, ALL_DEVICES);
INSTANTIATE_TEST_CASE_P(GPU_ImgProc, MeanShift, ALL_DEVICES);
////////////////////////////////////////////////////////////////////////////////
// MeanShiftSegmentation
IMPLEMENT_PARAM_CLASS(MinSize, int);
PARAM_TEST_CASE(MeanShiftSegmentation, cv::gpu::DeviceInfo, int)
PARAM_TEST_CASE(MeanShiftSegmentation, cv::gpu::DeviceInfo, MinSize)
{
cv::gpu::DeviceInfo devInfo;
int minsize;
cv::Mat rgba;
cv::Mat dst_gold;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
minsize = GET_PARAM(1);
cv::gpu::setDevice(devInfo.deviceID());
cv::Mat img = readImage("meanshift/cones.png");
ASSERT_FALSE(img.empty());
cv::cvtColor(img, rgba, CV_BGR2BGRA);
std::ostringstream path;
path << "meanshift/cones_segmented_sp10_sr10_minsize" << minsize;
if (supportFeature(devInfo, cv::gpu::FEATURE_SET_COMPUTE_20))
path << ".png";
else
path << "_CC1X.png";
dst_gold = readImage(path.str());
ASSERT_FALSE(dst_gold.empty());
}
};
TEST_P(MeanShiftSegmentation, Regression)
{
cv::Mat dst;
cv::Mat img = readImageType("meanshift/cones.png", CV_8UC4);
ASSERT_FALSE(img.empty());
cv::gpu::meanShiftSegmentation(loadMat(rgba), dst, 10, 10, minsize);
std::ostringstream path;
path << "meanshift/cones_segmented_sp10_sr10_minsize" << minsize;
if (supportFeature(devInfo, cv::gpu::FEATURE_SET_COMPUTE_20))
path << ".png";
else
path << "_CC1X.png";
cv::Mat dst_gold = readImage(path.str());
ASSERT_FALSE(dst_gold.empty());
cv::Mat dst;
cv::gpu::meanShiftSegmentation(loadMat(img), dst, 10, 10, minsize);
cv::Mat dst_rgb;
cv::cvtColor(dst, dst_rgb, CV_BGRA2BGR);
......@@ -613,70 +445,227 @@ TEST_P(MeanShiftSegmentation, Regression)
EXPECT_MAT_SIMILAR(dst_gold, dst_rgb, 1e-3);
}
INSTANTIATE_TEST_CASE_P(ImgProc, MeanShiftSegmentation, Combine(
ALL_DEVICES,
Values(0, 4, 20, 84, 340, 1364)));
INSTANTIATE_TEST_CASE_P(GPU_ImgProc, MeanShiftSegmentation, testing::Combine(
ALL_DEVICES,
testing::Values(MinSize(0), MinSize(4), MinSize(20), MinSize(84), MinSize(340), MinSize(1364))));
////////////////////////////////////////////////////////////////////////////////
// matchTemplate
////////////////////////////////////////////////////////////////////////////
// Blend
CV_ENUM(TemplateMethod, cv::TM_SQDIFF, cv::TM_SQDIFF_NORMED, cv::TM_CCORR, cv::TM_CCORR_NORMED, cv::TM_CCOEFF, cv::TM_CCOEFF_NORMED)
template <typename T>
void blendLinearGold(const cv::Mat& img1, const cv::Mat& img2, const cv::Mat& weights1, const cv::Mat& weights2, cv::Mat& result_gold)
{
result_gold.create(img1.size(), img1.type());
PARAM_TEST_CASE(MatchTemplate8U, cv::gpu::DeviceInfo, int, TemplateMethod)
int cn = img1.channels();
for (int y = 0; y < img1.rows; ++y)
{
const float* weights1_row = weights1.ptr<float>(y);
const float* weights2_row = weights2.ptr<float>(y);
const T* img1_row = img1.ptr<T>(y);
const T* img2_row = img2.ptr<T>(y);
T* result_gold_row = result_gold.ptr<T>(y);
for (int x = 0; x < img1.cols * cn; ++x)
{
float w1 = weights1_row[x / cn];
float w2 = weights2_row[x / cn];
result_gold_row[x] = static_cast<T>((img1_row[x] * w1 + img2_row[x] * w2) / (w1 + w2 + 1e-5f));
}
}
}
PARAM_TEST_CASE(Blend, cv::gpu::DeviceInfo, cv::Size, MatType, UseRoi)
{
cv::gpu::DeviceInfo devInfo;
int cn;
int method;
cv::Size size;
int type;
bool useRoi;
int n, m, h, w;
cv::Mat image, templ;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
size = GET_PARAM(1);
type = GET_PARAM(2);
useRoi = GET_PARAM(3);
cv::gpu::setDevice(devInfo.deviceID());
}
};
TEST_P(Blend, Accuracy)
{
int depth = CV_MAT_DEPTH(type);
cv::Mat img1 = randomMat(size, type, 0.0, depth == CV_8U ? 255.0 : 1.0);
cv::Mat img2 = randomMat(size, type, 0.0, depth == CV_8U ? 255.0 : 1.0);
cv::Mat weights1 = randomMat(size, CV_32F, 0, 1);
cv::Mat weights2 = randomMat(size, CV_32F, 0, 1);
cv::gpu::GpuMat result;
cv::gpu::blendLinear(loadMat(img1, useRoi), loadMat(img2, useRoi), loadMat(weights1, useRoi), loadMat(weights2, useRoi), result);
cv::Mat result_gold;
if (depth == CV_8U)
blendLinearGold<uchar>(img1, img2, weights1, weights2, result_gold);
else
blendLinearGold<float>(img1, img2, weights1, weights2, result_gold);
EXPECT_MAT_NEAR(result_gold, result, CV_MAT_DEPTH(type) == CV_8U ? 1.0 : 1e-5);
}
INSTANTIATE_TEST_CASE_P(GPU_ImgProc, Blend, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
testing::Values(MatType(CV_8UC1), MatType(CV_8UC3), MatType(CV_8UC4), MatType(CV_32FC1), MatType(CV_32FC3), MatType(CV_32FC4)),
WHOLE_SUBMAT));
////////////////////////////////////////////////////////
// Convolve
void convolveDFT(const cv::Mat& A, const cv::Mat& B, cv::Mat& C, bool ccorr = false)
{
// reallocate the output array if needed
C.create(std::abs(A.rows - B.rows) + 1, std::abs(A.cols - B.cols) + 1, A.type());
cv::Size dftSize;
// compute the size of DFT transform
dftSize.width = cv::getOptimalDFTSize(A.cols + B.cols - 1);
dftSize.height = cv::getOptimalDFTSize(A.rows + B.rows - 1);
// allocate temporary buffers and initialize them with 0s
cv::Mat tempA(dftSize, A.type(), cv::Scalar::all(0));
cv::Mat tempB(dftSize, B.type(), cv::Scalar::all(0));
// copy A and B to the top-left corners of tempA and tempB, respectively
cv::Mat roiA(tempA, cv::Rect(0, 0, A.cols, A.rows));
A.copyTo(roiA);
cv::Mat roiB(tempB, cv::Rect(0, 0, B.cols, B.rows));
B.copyTo(roiB);
// now transform the padded A & B in-place;
// use "nonzeroRows" hint for faster processing
cv::dft(tempA, tempA, 0, A.rows);
cv::dft(tempB, tempB, 0, B.rows);
// multiply the spectrums;
// the function handles packed spectrum representations well
cv::mulSpectrums(tempA, tempB, tempA, 0, ccorr);
// transform the product back from the frequency domain.
// Even though all the result rows will be non-zero,
// you need only the first C.rows of them, and thus you
// pass nonzeroRows == C.rows
cv::dft(tempA, tempA, cv::DFT_INVERSE + cv::DFT_SCALE, C.rows);
// now copy the result back to C.
tempA(cv::Rect(0, 0, C.cols, C.rows)).copyTo(C);
}
IMPLEMENT_PARAM_CLASS(KSize, int);
IMPLEMENT_PARAM_CLASS(Ccorr, bool);
PARAM_TEST_CASE(Convolve, cv::gpu::DeviceInfo, cv::Size, KSize, Ccorr)
{
cv::gpu::DeviceInfo devInfo;
cv::Size size;
int ksize;
bool ccorr;
cv::Mat src;
cv::Mat kernel;
cv::Mat dst_gold;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
cn = GET_PARAM(1);
method = GET_PARAM(2);
size = GET_PARAM(1);
ksize = GET_PARAM(2);
ccorr = GET_PARAM(3);
cv::gpu::setDevice(devInfo.deviceID());
}
};
TEST_P(Convolve, Accuracy)
{
cv::Mat src = randomMat(size, CV_32FC1, 0.0, 100.0);
cv::Mat kernel = randomMat(cv::Size(ksize, ksize), CV_32FC1, 0.0, 1.0);
cv::gpu::GpuMat dst;
cv::gpu::convolve(loadMat(src), loadMat(kernel), dst, ccorr);
cv::Mat dst_gold;
convolveDFT(src, kernel, dst_gold, ccorr);
EXPECT_MAT_NEAR(dst, dst_gold, 1e-1);
}
INSTANTIATE_TEST_CASE_P(GPU_ImgProc, Convolve, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
testing::Values(KSize(3), KSize(7), KSize(11), KSize(17), KSize(19), KSize(23), KSize(45)),
testing::Values(Ccorr(false), Ccorr(true))));
cv::RNG& rng = TS::ptr()->get_rng();
////////////////////////////////////////////////////////////////////////////////
// MatchTemplate8U
n = rng.uniform(30, 100);
m = rng.uniform(30, 100);
h = rng.uniform(5, n - 1);
w = rng.uniform(5, m - 1);
CV_ENUM(TemplateMethod, cv::TM_SQDIFF, cv::TM_SQDIFF_NORMED, cv::TM_CCORR, cv::TM_CCORR_NORMED, cv::TM_CCOEFF, cv::TM_CCOEFF_NORMED)
#define ALL_TEMPLATE_METHODS testing::Values(TemplateMethod(cv::TM_SQDIFF), TemplateMethod(cv::TM_SQDIFF_NORMED), TemplateMethod(cv::TM_CCORR), TemplateMethod(cv::TM_CCORR_NORMED), TemplateMethod(cv::TM_CCOEFF), TemplateMethod(cv::TM_CCOEFF_NORMED))
image = randomMat(rng, cv::Size(m, n), CV_MAKETYPE(CV_8U, cn), 1.0, 10.0, false);
templ = randomMat(rng, cv::Size(w, h), CV_MAKETYPE(CV_8U, cn), 1.0, 10.0, false);
IMPLEMENT_PARAM_CLASS(TemplateSize, cv::Size);
cv::matchTemplate(image, templ, dst_gold, method);
PARAM_TEST_CASE(MatchTemplate8U, cv::gpu::DeviceInfo, cv::Size, TemplateSize, Channels, TemplateMethod)
{
cv::gpu::DeviceInfo devInfo;
cv::Size size;
cv::Size templ_size;
int cn;
int method;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
size = GET_PARAM(1);
templ_size = GET_PARAM(2);
cn = GET_PARAM(3);
method = GET_PARAM(4);
cv::gpu::setDevice(devInfo.deviceID());
}
};
TEST_P(MatchTemplate8U, Regression)
TEST_P(MatchTemplate8U, Accuracy)
{
cv::Mat dst;
cv::Mat image = randomMat(size, CV_MAKETYPE(CV_8U, cn));
cv::Mat templ = randomMat(templ_size, CV_MAKETYPE(CV_8U, cn));
cv::gpu::GpuMat dev_dst;
cv::gpu::GpuMat dst;
cv::gpu::matchTemplate(loadMat(image), loadMat(templ), dst, method);
cv::gpu::matchTemplate(loadMat(image), loadMat(templ), dev_dst, method);
dev_dst.download(dst);
cv::Mat dst_gold;
cv::matchTemplate(image, templ, dst_gold, method);
EXPECT_MAT_NEAR(dst_gold, dst, 5 * h * w * 1e-4);
EXPECT_MAT_NEAR(dst_gold, dst, templ_size.area() * 1e-1);
}
INSTANTIATE_TEST_CASE_P(ImgProc, MatchTemplate8U, Combine(
ALL_DEVICES,
Range(1, 5),
Values((int)cv::TM_SQDIFF, (int) cv::TM_SQDIFF_NORMED, (int) cv::TM_CCORR, (int) cv::TM_CCORR_NORMED, (int) cv::TM_CCOEFF, (int) cv::TM_CCOEFF_NORMED)));
INSTANTIATE_TEST_CASE_P(GPU_ImgProc, MatchTemplate8U, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
testing::Values(TemplateSize(cv::Size(5, 5)), TemplateSize(cv::Size(16, 16)), TemplateSize(cv::Size(30, 30))),
testing::Values(Channels(1), Channels(3), Channels(4)),
ALL_TEMPLATE_METHODS));
////////////////////////////////////////////////////////////////////////////////
// MatchTemplate32F
PARAM_TEST_CASE(MatchTemplate32F, cv::gpu::DeviceInfo, int, TemplateMethod)
PARAM_TEST_CASE(MatchTemplate32F, cv::gpu::DeviceInfo, cv::Size, TemplateSize, Channels, TemplateMethod)
{
cv::gpu::DeviceInfo devInfo;
cv::Size size;
cv::Size templ_size;
int cn;
int method;
......@@ -688,43 +677,38 @@ PARAM_TEST_CASE(MatchTemplate32F, cv::gpu::DeviceInfo, int, TemplateMethod)
virtual void SetUp()
{
devInfo = GET_PARAM(0);
cn = GET_PARAM(1);
method = GET_PARAM(2);
size = GET_PARAM(1);
templ_size = GET_PARAM(2);
cn = GET_PARAM(3);
method = GET_PARAM(4);
cv::gpu::setDevice(devInfo.deviceID());
cv::RNG& rng = TS::ptr()->get_rng();
n = rng.uniform(30, 100);
m = rng.uniform(30, 100);
h = rng.uniform(5, n - 1);
w = rng.uniform(5, m - 1);
image = randomMat(rng, cv::Size(m, n), CV_MAKETYPE(CV_32F, cn), 0.001, 1.0, false);
templ = randomMat(rng, cv::Size(w, h), CV_MAKETYPE(CV_32F, cn), 0.001, 1.0, false);
cv::matchTemplate(image, templ, dst_gold, method);
}
};
TEST_P(MatchTemplate32F, Regression)
{
cv::Mat dst;
cv::Mat image = randomMat(size, CV_MAKETYPE(CV_32F, cn));
cv::Mat templ = randomMat(templ_size, CV_MAKETYPE(CV_32F, cn));
cv::gpu::GpuMat dev_dst;
cv::gpu::GpuMat dst;
cv::gpu::matchTemplate(loadMat(image), loadMat(templ), dst, method);
cv::gpu::matchTemplate(loadMat(image), loadMat(templ), dev_dst, method);
dev_dst.download(dst);
cv::Mat dst_gold;
cv::matchTemplate(image, templ, dst_gold, method);
EXPECT_MAT_NEAR(dst_gold, dst, 0.25 * h * w * 1e-4);
EXPECT_MAT_NEAR(dst_gold, dst, templ_size.area() * 1e-1);
}
INSTANTIATE_TEST_CASE_P(ImgProc, MatchTemplate32F, Combine(
ALL_DEVICES,
Range(1, 5),
Values((int) cv::TM_SQDIFF, (int) cv::TM_CCORR)));
INSTANTIATE_TEST_CASE_P(GPU_ImgProc, MatchTemplate32F, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
testing::Values(TemplateSize(cv::Size(5, 5)), TemplateSize(cv::Size(16, 16)), TemplateSize(cv::Size(30, 30))),
testing::Values(Channels(1), Channels(3), Channels(4)),
testing::Values(TemplateMethod(cv::TM_SQDIFF), TemplateMethod(cv::TM_CCORR))));
////////////////////////////////////////////////////////////////////////////////
// MatchTemplateBlackSource
PARAM_TEST_CASE(MatchTemplateBlackSource, cv::gpu::DeviceInfo, TemplateMethod)
{
......@@ -748,27 +732,26 @@ TEST_P(MatchTemplateBlackSource, Accuracy)
cv::Mat pattern = readImage("matchtemplate/cat.png");
ASSERT_FALSE(pattern.empty());
cv::Point maxLocGold = cv::Point(284, 12);
cv::Mat dst;
cv::gpu::GpuMat dev_dst;
cv::gpu::matchTemplate(loadMat(image), loadMat(pattern), dev_dst, method);
cv::gpu::GpuMat d_dst;
cv::gpu::matchTemplate(loadMat(image), loadMat(pattern), d_dst, method);
dev_dst.download(dst);
cv::Mat dst(d_dst);
double maxValue;
cv::Point maxLoc;
cv::minMaxLoc(dst, NULL, &maxValue, NULL, &maxLoc);
cv::Point maxLocGold = cv::Point(284, 12);
ASSERT_EQ(maxLocGold, maxLoc);
}
INSTANTIATE_TEST_CASE_P(ImgProc, MatchTemplateBlackSource, Combine(
ALL_DEVICES,
Values((int) cv::TM_CCOEFF_NORMED, (int) cv::TM_CCORR_NORMED)));
INSTANTIATE_TEST_CASE_P(GPU_ImgProc, MatchTemplateBlackSource, testing::Combine(
ALL_DEVICES,
testing::Values(TemplateMethod(cv::TM_CCOEFF_NORMED), TemplateMethod(cv::TM_CCORR_NORMED))));
////////////////////////////////////////////////////////////////////////////////
// MatchTemplate_CCOEF_NORMED
PARAM_TEST_CASE(MatchTemplate_CCOEF_NORMED, cv::gpu::DeviceInfo, std::pair<std::string, std::string>)
{
......@@ -776,73 +759,81 @@ PARAM_TEST_CASE(MatchTemplate_CCOEF_NORMED, cv::gpu::DeviceInfo, std::pair<std::
std::string imageName;
std::string patternName;
cv::Mat image, pattern;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
imageName = GET_PARAM(1).first;
patternName = GET_PARAM(1).second;
image = readImage(imageName);
ASSERT_FALSE(image.empty());
pattern = readImage(patternName);
ASSERT_FALSE(pattern.empty());
cv::gpu::setDevice(devInfo.deviceID());
}
};
TEST_P(MatchTemplate_CCOEF_NORMED, Accuracy)
{
cv::Mat dstGold;
cv::matchTemplate(image, pattern, dstGold, CV_TM_CCOEFF_NORMED);
double minValGold, maxValGold;
cv::Point minLocGold, maxLocGold;
cv::minMaxLoc(dstGold, &minValGold, &maxValGold, &minLocGold, &maxLocGold);
cv::Mat dst;
cv::Mat image = readImage(imageName);
ASSERT_FALSE(image.empty());
cv::gpu::GpuMat dev_dst;
cv::Mat pattern = readImage(patternName);
ASSERT_FALSE(pattern.empty());
cv::gpu::matchTemplate(loadMat(image), loadMat(pattern), dev_dst, CV_TM_CCOEFF_NORMED);
cv::gpu::GpuMat d_dst;
cv::gpu::matchTemplate(loadMat(image), loadMat(pattern), d_dst, CV_TM_CCOEFF_NORMED);
dev_dst.download(dst);
cv::Mat dst(d_dst);
cv::Point minLoc, maxLoc;
double minVal, maxVal;
cv::minMaxLoc(dst, &minVal, &maxVal, &minLoc, &maxLoc);
cv::Mat dstGold;
cv::matchTemplate(image, pattern, dstGold, CV_TM_CCOEFF_NORMED);
double minValGold, maxValGold;
cv::Point minLocGold, maxLocGold;
cv::minMaxLoc(dstGold, &minValGold, &maxValGold, &minLocGold, &maxLocGold);
ASSERT_EQ(minLocGold, minLoc);
ASSERT_EQ(maxLocGold, maxLoc);
ASSERT_LE(maxVal, 1.);
ASSERT_GE(minVal, -1.);
ASSERT_LE(maxVal, 1.0);
ASSERT_GE(minVal, -1.0);
}
INSTANTIATE_TEST_CASE_P(ImgProc, MatchTemplate_CCOEF_NORMED, Combine(
ALL_DEVICES,
Values(std::make_pair(std::string("matchtemplate/source-0.png"), std::string("matchtemplate/target-0.png")))));
INSTANTIATE_TEST_CASE_P(GPU_ImgProc, MatchTemplate_CCOEF_NORMED, testing::Combine(
ALL_DEVICES,
testing::Values(std::make_pair(std::string("matchtemplate/source-0.png"), std::string("matchtemplate/target-0.png")))));
////////////////////////////////////////////////////////////////////////////////
// MatchTemplate_CanFindBigTemplate
class MatchTemplate_CanFindBigTemplate : public TestWithParam<cv::gpu::DeviceInfo>
struct MatchTemplate_CanFindBigTemplate : testing::TestWithParam<cv::gpu::DeviceInfo>
{
cv::gpu::DeviceInfo devInfo;
virtual void SetUp()
{
cv::gpu::setDevice(GetParam().deviceID());
devInfo = GetParam();
cv::gpu::setDevice(devInfo.deviceID());
}
};
TEST_P(MatchTemplate_CanFindBigTemplate, SQDIFF_NORMED)
{
cv::Mat scene = readImage("matchtemplate/scene.jpg");
ASSERT_FALSE(scene.empty());
cv::Mat templ = readImage("matchtemplate/template.jpg");
ASSERT_FALSE(templ.empty());
cv::gpu::GpuMat d_result;
cv::gpu::matchTemplate(loadMat(scene), loadMat(templ), d_result, CV_TM_SQDIFF_NORMED);
cv::gpu::GpuMat d_scene(scene), d_templ(templ), d_result;
cv::gpu::matchTemplate(d_scene, d_templ, d_result, CV_TM_SQDIFF_NORMED);
cv::Mat result(d_result);
double minVal;
cv::Point minLoc;
cv::gpu::minMaxLoc(d_result, &minVal, 0, &minLoc, 0);
cv::minMaxLoc(result, &minVal, 0, &minLoc, 0);
ASSERT_GE(minVal, 0);
ASSERT_LT(minVal, 1e-3);
......@@ -853,30 +844,36 @@ TEST_P(MatchTemplate_CanFindBigTemplate, SQDIFF_NORMED)
TEST_P(MatchTemplate_CanFindBigTemplate, SQDIFF)
{
cv::Mat scene = readImage("matchtemplate/scene.jpg");
ASSERT_FALSE(scene.empty());
cv::Mat templ = readImage("matchtemplate/template.jpg");
ASSERT_FALSE(templ.empty());
cv::gpu::GpuMat d_result;
cv::gpu::matchTemplate(loadMat(scene), loadMat(templ), d_result, CV_TM_SQDIFF);
cv::gpu::GpuMat d_scene(scene), d_templ(templ), d_result;
cv::gpu::matchTemplate(d_scene, d_templ, d_result, CV_TM_SQDIFF);
cv::Mat result(d_result);
double minVal;
cv::Point minLoc;
cv::gpu::minMaxLoc(d_result, &minVal, 0, &minLoc, 0);
cv::minMaxLoc(result, &minVal, 0, &minLoc, 0);
ASSERT_GE(minVal, 0);
ASSERT_EQ(344, minLoc.x);
ASSERT_EQ(0, minLoc.y);
}
INSTANTIATE_TEST_CASE_P(ImgProc, MatchTemplate_CanFindBigTemplate, ALL_DEVICES);
INSTANTIATE_TEST_CASE_P(GPU_ImgProc, MatchTemplate_CanFindBigTemplate, ALL_DEVICES);
////////////////////////////////////////////////////////////////////////////
// MulSpectrums
CV_FLAGS(DftFlags, cv::DFT_INVERSE, cv::DFT_SCALE, cv::DFT_ROWS, cv::DFT_COMPLEX_OUTPUT, cv::DFT_REAL_OUTPUT)
CV_FLAGS(DftFlags, 0, cv::DFT_INVERSE, cv::DFT_SCALE, cv::DFT_ROWS, cv::DFT_COMPLEX_OUTPUT, cv::DFT_REAL_OUTPUT)
PARAM_TEST_CASE(MulSpectrums, cv::gpu::DeviceInfo, DftFlags)
PARAM_TEST_CASE(MulSpectrums, cv::gpu::DeviceInfo, cv::Size, DftFlags)
{
cv::gpu::DeviceInfo devInfo;
cv::Size size;
int flag;
cv::Mat a, b;
......@@ -884,60 +881,50 @@ PARAM_TEST_CASE(MulSpectrums, cv::gpu::DeviceInfo, DftFlags)
virtual void SetUp()
{
devInfo = GET_PARAM(0);
flag = GET_PARAM(1);
size = GET_PARAM(1);
flag = GET_PARAM(2);
cv::gpu::setDevice(devInfo.deviceID());
cv::RNG& rng = TS::ptr()->get_rng();
a = randomMat(rng, cv::Size(rng.uniform(100, 200), rng.uniform(100, 200)), CV_32FC2, 0.0, 10.0, false);
b = randomMat(rng, a.size(), CV_32FC2, 0.0, 10.0, false);
a = randomMat(size, CV_32FC2);
b = randomMat(size, CV_32FC2);
}
};
TEST_P(MulSpectrums, Simple)
{
cv::gpu::GpuMat c;
cv::gpu::mulSpectrums(loadMat(a), loadMat(b), c, flag, false);
cv::Mat c_gold;
cv::mulSpectrums(a, b, c_gold, flag, false);
cv::Mat c;
cv::gpu::GpuMat d_c;
cv::gpu::mulSpectrums(loadMat(a), loadMat(b), d_c, flag, false);
d_c.download(c);
EXPECT_MAT_NEAR(c_gold, c, 1e-4);
EXPECT_MAT_NEAR(c_gold, c, 1e-2);
}
TEST_P(MulSpectrums, Scaled)
{
float scale = 1.f / a.size().area();
float scale = 1.f / size.area();
cv::gpu::GpuMat c;
cv::gpu::mulAndScaleSpectrums(loadMat(a), loadMat(b), c, flag, scale, false);
cv::Mat c_gold;
cv::mulSpectrums(a, b, c_gold, flag, false);
c_gold.convertTo(c_gold, c_gold.type(), scale);
cv::Mat c;
cv::gpu::GpuMat d_c;
cv::gpu::mulAndScaleSpectrums(loadMat(a), loadMat(b), d_c, flag, scale, false);
d_c.download(c);
EXPECT_MAT_NEAR(c_gold, c, 1e-4);
EXPECT_MAT_NEAR(c_gold, c, 1e-2);
}
INSTANTIATE_TEST_CASE_P(ImgProc, MulSpectrums, Combine(
ALL_DEVICES,
Values(0, (int) cv::DFT_ROWS)));
INSTANTIATE_TEST_CASE_P(GPU_ImgProc, MulSpectrums, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
testing::Values(DftFlags(0), DftFlags(cv::DFT_ROWS))));
////////////////////////////////////////////////////////////////////////////
// Dft
struct Dft : TestWithParam<cv::gpu::DeviceInfo>
struct Dft : testing::TestWithParam<cv::gpu::DeviceInfo>
{
cv::gpu::DeviceInfo devInfo;
......@@ -949,14 +936,11 @@ struct Dft : TestWithParam<cv::gpu::DeviceInfo>
}
};
void testC2C(const std::string& hint, int cols, int rows, int flags, bool inplace)
{
SCOPED_TRACE(hint);
cv::RNG& rng = cvtest::TS::ptr()->get_rng();
cv::Mat a = randomMat(rng, cv::Size(cols, rows), CV_32FC2, 0.0, 10.0, false);
cv::Mat a = randomMat(cv::Size(cols, rows), CV_32FC2, 0.0, 10.0);
cv::Mat b_gold;
cv::dft(a, b_gold, flags);
......@@ -978,9 +962,8 @@ void testC2C(const std::string& hint, int cols, int rows, int flags, bool inplac
TEST_P(Dft, C2C)
{
cv::RNG& rng = cvtest::TS::ptr()->get_rng();
int cols = 2 + rng.next() % 100, rows = 2 + rng.next() % 100;
int cols = randomInt(2, 100);
int rows = randomInt(2, 100);
for (int i = 0; i < 2; ++i)
{
......@@ -1006,9 +989,7 @@ void testR2CThenC2R(const std::string& hint, int cols, int rows, bool inplace)
{
SCOPED_TRACE(hint);
cv::RNG& rng = TS::ptr()->get_rng();
cv::Mat a = randomMat(rng, cv::Size(cols, rows), CV_32FC1, 0.0, 10.0, false);
cv::Mat a = randomMat(cv::Size(cols, rows), CV_32FC1, 0.0, 10.0);
cv::gpu::GpuMat d_b, d_c;
cv::gpu::GpuMat d_b_data, d_c_data;
......@@ -1042,9 +1023,8 @@ void testR2CThenC2R(const std::string& hint, int cols, int rows, bool inplace)
TEST_P(Dft, R2CThenC2R)
{
cv::RNG& rng = TS::ptr()->get_rng();
int cols = 2 + rng.next() % 100, rows = 2 + rng.next() % 100;
int cols = randomInt(2, 100);
int rows = randomInt(2, 100);
testR2CThenC2R("sanity", cols, rows, false);
testR2CThenC2R("sanity 0 1", cols, rows + 1, false);
......@@ -1063,241 +1043,99 @@ TEST_P(Dft, R2CThenC2R)
testR2CThenC2R("single row 1", cols + 1, 1, true);
}
INSTANTIATE_TEST_CASE_P(ImgProc, Dft, ALL_DEVICES);
////////////////////////////////////////////////////////////////////////////
// blend
template <typename T>
void blendLinearGold(const cv::Mat& img1, const cv::Mat& img2, const cv::Mat& weights1, const cv::Mat& weights2, cv::Mat& result_gold)
{
result_gold.create(img1.size(), img1.type());
int cn = img1.channels();
INSTANTIATE_TEST_CASE_P(GPU_ImgProc, Dft, ALL_DEVICES);
for (int y = 0; y < img1.rows; ++y)
{
const float* weights1_row = weights1.ptr<float>(y);
const float* weights2_row = weights2.ptr<float>(y);
const T* img1_row = img1.ptr<T>(y);
const T* img2_row = img2.ptr<T>(y);
T* result_gold_row = result_gold.ptr<T>(y);
///////////////////////////////////////////////////////////////////////////////////////////////////////
// CornerHarris
for (int x = 0; x < img1.cols * cn; ++x)
{
float w1 = weights1_row[x / cn];
float w2 = weights2_row[x / cn];
result_gold_row[x] = static_cast<T>((img1_row[x] * w1 + img2_row[x] * w2) / (w1 + w2 + 1e-5f));
}
}
}
IMPLEMENT_PARAM_CLASS(BlockSize, int);
IMPLEMENT_PARAM_CLASS(ApertureSize, int);
PARAM_TEST_CASE(Blend, cv::gpu::DeviceInfo, MatType, UseRoi)
PARAM_TEST_CASE(CornerHarris, cv::gpu::DeviceInfo, MatType, BorderType, BlockSize, ApertureSize)
{
cv::gpu::DeviceInfo devInfo;
int type;
bool useRoi;
cv::Size size;
cv::Mat img1;
cv::Mat img2;
cv::Mat weights1;
cv::Mat weights2;
cv::Mat result_gold;
int borderType;
int blockSize;
int apertureSize;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
type = GET_PARAM(1);
useRoi = GET_PARAM(2);
borderType = GET_PARAM(2);
blockSize = GET_PARAM(3);
apertureSize = GET_PARAM(4);
cv::gpu::setDevice(devInfo.deviceID());
cv::RNG& rng = TS::ptr()->get_rng();
size = cv::Size(200 + randInt(rng) % 1000, 200 + randInt(rng) % 1000);
int depth = CV_MAT_DEPTH(type);
img1 = randomMat(rng, size, type, 0.0, depth == CV_8U ? 255.0 : 1.0, false);
img2 = randomMat(rng, size, type, 0.0, depth == CV_8U ? 255.0 : 1.0, false);
weights1 = randomMat(rng, size, CV_32F, 0, 1, false);
weights2 = randomMat(rng, size, CV_32F, 0, 1, false);
if (depth == CV_8U)
blendLinearGold<uchar>(img1, img2, weights1, weights2, result_gold);
else
blendLinearGold<float>(img1, img2, weights1, weights2, result_gold);
}
};
TEST_P(Blend, Accuracy)
TEST_P(CornerHarris, Accuracy)
{
cv::Mat result;
cv::gpu::GpuMat d_result;
cv::Mat src = readImageType("stereobm/aloe-L.png", type);
ASSERT_FALSE(src.empty());
cv::gpu::blendLinear(loadMat(img1, useRoi), loadMat(img2, useRoi), loadMat(weights1, useRoi), loadMat(weights2, useRoi), d_result);
double k = randomDouble(0.1, 0.9);
d_result.download(result);
cv::gpu::GpuMat dst;
cv::gpu::cornerHarris(loadMat(src), dst, blockSize, apertureSize, k, borderType);
cv::Mat dst_gold;
cv::cornerHarris(src, dst_gold, blockSize, apertureSize, k, borderType);
EXPECT_MAT_NEAR(result_gold, result, CV_MAT_DEPTH(type) == CV_8U ? 1.0 : 1e-5);
EXPECT_MAT_NEAR(dst_gold, dst, 0.02);
}
INSTANTIATE_TEST_CASE_P(ImgProc, Blend, Combine(
ALL_DEVICES,
testing::Values(CV_8UC1, CV_8UC3, CV_8UC4, CV_32FC1, CV_32FC3, CV_32FC4),
WHOLE_SUBMAT));
INSTANTIATE_TEST_CASE_P(GPU_ImgProc, CornerHarris, testing::Combine(
ALL_DEVICES,
testing::Values(MatType(CV_8UC1), MatType(CV_32FC1)),
testing::Values(BorderType(cv::BORDER_REFLECT101), BorderType(cv::BORDER_REPLICATE), BorderType(cv::BORDER_REFLECT)),
testing::Values(BlockSize(3), BlockSize(5), BlockSize(7)),
testing::Values(ApertureSize(0), ApertureSize(3), ApertureSize(5), ApertureSize(7))));
////////////////////////////////////////////////////////
// Canny
///////////////////////////////////////////////////////////////////////////////////////////////////////
// cornerMinEigen
PARAM_TEST_CASE(Canny, cv::gpu::DeviceInfo, int, bool, UseRoi)
PARAM_TEST_CASE(CornerMinEigen, cv::gpu::DeviceInfo, MatType, BorderType, BlockSize, ApertureSize)
{
cv::gpu::DeviceInfo devInfo;
int apperture_size;
bool L2gradient;
bool useRoi;
cv::Mat img;
double low_thresh;
double high_thresh;
cv::Mat edges_gold;
int type;
int borderType;
int blockSize;
int apertureSize;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
apperture_size = GET_PARAM(1);
L2gradient = GET_PARAM(2);
useRoi = GET_PARAM(3);
type = GET_PARAM(1);
borderType = GET_PARAM(2);
blockSize = GET_PARAM(3);
apertureSize = GET_PARAM(4);
cv::gpu::setDevice(devInfo.deviceID());
img = readImage("stereobm/aloe-L.png", CV_LOAD_IMAGE_GRAYSCALE);
ASSERT_FALSE(img.empty());
low_thresh = 50.0;
high_thresh = 100.0;
cv::Canny(img, edges_gold, low_thresh, high_thresh, apperture_size, L2gradient);
}
};
TEST_P(Canny, Accuracy)
{
cv::Mat edges;
cv::gpu::GpuMat d_edges;
cv::gpu::Canny(loadMat(img, useRoi), d_edges, low_thresh, high_thresh, apperture_size, L2gradient);
d_edges.download(edges);
EXPECT_MAT_SIMILAR(edges_gold, edges, 1.0);
}
INSTANTIATE_TEST_CASE_P(ImgProc, Canny, testing::Combine(
DEVICES(cv::gpu::SHARED_ATOMICS),
Values(3, 5),
Values(false, true),
WHOLE_SUBMAT));
////////////////////////////////////////////////////////
// convolve
namespace
{
void convolveDFT(const cv::Mat& A, const cv::Mat& B, cv::Mat& C, bool ccorr = false)
{
// reallocate the output array if needed
C.create(std::abs(A.rows - B.rows) + 1, std::abs(A.cols - B.cols) + 1, A.type());
Size dftSize;
// compute the size of DFT transform
dftSize.width = cv::getOptimalDFTSize(A.cols + B.cols - 1);
dftSize.height = cv::getOptimalDFTSize(A.rows + B.rows - 1);
// allocate temporary buffers and initialize them with 0s
cv::Mat tempA(dftSize, A.type(), cv::Scalar::all(0));
cv::Mat tempB(dftSize, B.type(), cv::Scalar::all(0));
// copy A and B to the top-left corners of tempA and tempB, respectively
cv::Mat roiA(tempA, cv::Rect(0, 0, A.cols, A.rows));
A.copyTo(roiA);
cv::Mat roiB(tempB, cv::Rect(0, 0, B.cols, B.rows));
B.copyTo(roiB);
// now transform the padded A & B in-place;
// use "nonzeroRows" hint for faster processing
cv::dft(tempA, tempA, 0, A.rows);
cv::dft(tempB, tempB, 0, B.rows);
// multiply the spectrums;
// the function handles packed spectrum representations well
cv::mulSpectrums(tempA, tempB, tempA, 0, ccorr);
// transform the product back from the frequency domain.
// Even though all the result rows will be non-zero,
// you need only the first C.rows of them, and thus you
// pass nonzeroRows == C.rows
cv::dft(tempA, tempA, cv::DFT_INVERSE + cv::DFT_SCALE, C.rows);
// now copy the result back to C.
tempA(cv::Rect(0, 0, C.cols, C.rows)).copyTo(C);
}
}
PARAM_TEST_CASE(Convolve, cv::gpu::DeviceInfo, int, bool)
TEST_P(CornerMinEigen, Accuracy)
{
cv::gpu::DeviceInfo devInfo;
int ksize;
bool ccorr;
cv::Mat src = readImageType("stereobm/aloe-L.png", type);
ASSERT_FALSE(src.empty());
cv::Mat src;
cv::Mat kernel;
cv::gpu::GpuMat dst;
cv::gpu::cornerMinEigenVal(loadMat(src), dst, blockSize, apertureSize, borderType);
cv::Mat dst_gold;
cv::cornerMinEigenVal(src, dst_gold, blockSize, apertureSize, borderType);
virtual void SetUp()
{
devInfo = GET_PARAM(0);
ksize = GET_PARAM(1);
ccorr = GET_PARAM(2);
cv::gpu::setDevice(devInfo.deviceID());
cv::RNG& rng = TS::ptr()->get_rng();
cv::Size size(rng.uniform(200, 400), rng.uniform(200, 400));
src = randomMat(rng, size, CV_32FC1, 0.0, 100.0, false);
kernel = randomMat(rng, cv::Size(ksize, ksize), CV_32FC1, 0.0, 1.0, false);
convolveDFT(src, kernel, dst_gold, ccorr);
}
};
TEST_P(Convolve, Accuracy)
{
cv::Mat dst;
cv::gpu::GpuMat d_dst;
cv::gpu::convolve(loadMat(src), loadMat(kernel), d_dst, ccorr);
d_dst.download(dst);
EXPECT_MAT_NEAR(dst, dst_gold, 1e-1);
EXPECT_MAT_NEAR(dst_gold, dst, 0.02);
}
INSTANTIATE_TEST_CASE_P(GPU_ImgProc, CornerMinEigen, testing::Combine(
ALL_DEVICES,
testing::Values(MatType(CV_8UC1), MatType(CV_32FC1)),
testing::Values(BorderType(cv::BORDER_REFLECT101), BorderType(cv::BORDER_REPLICATE), BorderType(cv::BORDER_REFLECT)),
testing::Values(BlockSize(3), BlockSize(5), BlockSize(7)),
testing::Values(ApertureSize(0), ApertureSize(3), ApertureSize(5), ApertureSize(7))));
INSTANTIATE_TEST_CASE_P(ImgProc, Convolve, Combine(
ALL_DEVICES,
Values(3, 7, 11, 17, 19, 23, 45),
Bool()));
#endif // HAVE_CUDA
} // namespace
......@@ -265,7 +265,7 @@ void PrintTo(const Inverse& useRoi, std::ostream* os);
}; \
inline void PrintTo( name param, std::ostream* os) \
{ \
*os << #name << "(" << static_cast< type >(param) << ")"; \
*os << #name << "(" << testing::PrintToString(static_cast< type >(param)) << ")"; \
}
IMPLEMENT_PARAM_CLASS(Channels, int)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment