Commit 509c9101 authored by Vladislav Vinogradov's avatar Vladislav Vinogradov

refactored and fixed some gpu tests

fixed some bugs in gpu module
parent a659832d
...@@ -91,6 +91,12 @@ void cv::gpu::gemm(const GpuMat& src1, const GpuMat& src2, double alpha, const G ...@@ -91,6 +91,12 @@ void cv::gpu::gemm(const GpuMat& src1, const GpuMat& src2, double alpha, const G
bool tr2 = (flags & GEMM_2_T) != 0; bool tr2 = (flags & GEMM_2_T) != 0;
bool tr3 = (flags & GEMM_3_T) != 0; bool tr3 = (flags & GEMM_3_T) != 0;
if (src1.type() == CV_64FC2)
{
if (tr1 || tr2 || tr3)
CV_Error(CV_StsNotImplemented, "transpose operation doesn't implemented for CV_64FC2 type");
}
Size src1Size = tr1 ? Size(src1.rows, src1.cols) : src1.size(); Size src1Size = tr1 ? Size(src1.rows, src1.cols) : src1.size();
Size src2Size = tr2 ? Size(src2.rows, src2.cols) : src2.size(); Size src2Size = tr2 ? Size(src2.rows, src2.cols) : src2.size();
Size src3Size = tr3 ? Size(src3.rows, src3.cols) : src3.size(); Size src3Size = tr3 ? Size(src3.rows, src3.cols) : src3.size();
...@@ -99,7 +105,7 @@ void cv::gpu::gemm(const GpuMat& src1, const GpuMat& src2, double alpha, const G ...@@ -99,7 +105,7 @@ void cv::gpu::gemm(const GpuMat& src1, const GpuMat& src2, double alpha, const G
CV_Assert(src1Size.width == src2Size.height); CV_Assert(src1Size.width == src2Size.height);
CV_Assert(src3.empty() || src3Size == dstSize); CV_Assert(src3.empty() || src3Size == dstSize);
dst.create(dstSize, CV_32FC1); dst.create(dstSize, src1.type());
if (beta != 0) if (beta != 0)
{ {
......
...@@ -1672,40 +1672,53 @@ namespace cv { namespace gpu { namespace device ...@@ -1672,40 +1672,53 @@ namespace cv { namespace gpu { namespace device
template<typename T, bool Signed = device::numeric_limits<T>::is_signed> struct PowOp : unary_function<T, T> template<typename T, bool Signed = device::numeric_limits<T>::is_signed> struct PowOp : unary_function<T, T>
{ {
float power; const float power;
PowOp(float power_) : power(power_) {}
__device__ __forceinline__ T operator()(const T& e) const PowOp(double power_) : power(static_cast<float>(power_)) {}
__device__ __forceinline__ T operator()(T e) const
{ {
return saturate_cast<T>(__powf((float)e, power)); return saturate_cast<T>(__powf((float)e, power));
} }
}; };
template<typename T> struct PowOp<T, true> : unary_function<T, T> template<typename T> struct PowOp<T, true> : unary_function<T, T>
{ {
float power; const float power;
PowOp(float power_) : power(power_) {}
PowOp(double power_) : power(static_cast<float>(power_)) {}
__device__ __forceinline__ float operator()(const T& e) const __device__ __forceinline__ T operator()(T e) const
{ {
T res = saturate_cast<T>(__powf((float)e, power)); T res = saturate_cast<T>(__powf((float)e, power));
if ( (e < 0) && (1 & (int)power) ) if ((e < 0) && (1 & static_cast<int>(power)))
res *= -1; res *= -1;
return res; return res;
} }
}; };
template<> struct PowOp<float> : unary_function<float, float> template<> struct PowOp<float> : unary_function<float, float>
{ {
float power; const float power;
PowOp(float power_) : power(power_) {}
PowOp(double power_) : power(static_cast<float>(power_)) {}
__device__ __forceinline__ float operator()(const float& e) const __device__ __forceinline__ float operator()(float e) const
{ {
return __powf(::fabs(e), power); return __powf(::fabs(e), power);
} }
}; };
template<> struct PowOp<double> : unary_function<double, double>
{
const double power;
PowOp(double power_) : power(power_) {}
__device__ __forceinline__ double operator()(double e) const
{
return ::pow(::fabs(e), power);
}
};
namespace detail namespace detail
{ {
...@@ -1733,17 +1746,18 @@ namespace cv { namespace gpu { namespace device ...@@ -1733,17 +1746,18 @@ namespace cv { namespace gpu { namespace device
}; };
template<typename T> template<typename T>
void pow_caller(const DevMem2Db& src, float power, DevMem2Db dst, cudaStream_t stream) void pow_caller(DevMem2Db src, double power, DevMem2Db dst, cudaStream_t stream)
{ {
cv::gpu::device::transform((DevMem2D_<T>)src, (DevMem2D_<T>)dst, PowOp<T>(power), WithOutMask(), stream); cv::gpu::device::transform((DevMem2D_<T>)src, (DevMem2D_<T>)dst, PowOp<T>(power), WithOutMask(), stream);
} }
template void pow_caller<uchar>(const DevMem2Db& src, float power, DevMem2Db dst, cudaStream_t stream); template void pow_caller<uchar>(DevMem2Db src, double power, DevMem2Db dst, cudaStream_t stream);
template void pow_caller<schar>(const DevMem2Db& src, float power, DevMem2Db dst, cudaStream_t stream); template void pow_caller<schar>(DevMem2Db src, double power, DevMem2Db dst, cudaStream_t stream);
template void pow_caller<short>(const DevMem2Db& src, float power, DevMem2Db dst, cudaStream_t stream); template void pow_caller<short>(DevMem2Db src, double power, DevMem2Db dst, cudaStream_t stream);
template void pow_caller<ushort>(const DevMem2Db& src, float power, DevMem2Db dst, cudaStream_t stream); template void pow_caller<ushort>(DevMem2Db src, double power, DevMem2Db dst, cudaStream_t stream);
template void pow_caller<int>(const DevMem2Db& src, float power, DevMem2Db dst, cudaStream_t stream); template void pow_caller<int>(DevMem2Db src, double power, DevMem2Db dst, cudaStream_t stream);
template void pow_caller<float>(const DevMem2Db& src, float power, DevMem2Db dst, cudaStream_t stream); template void pow_caller<float>(DevMem2Db src, double power, DevMem2Db dst, cudaStream_t stream);
template void pow_caller<double>(DevMem2Db src, double power, DevMem2Db dst, cudaStream_t stream);
////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////
// addWeighted // addWeighted
......
...@@ -1301,50 +1301,26 @@ void cv::gpu::compare(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, int c ...@@ -1301,50 +1301,26 @@ void cv::gpu::compare(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, int c
}; };
CV_Assert(src1.size() == src2.size() && src1.type() == src2.type()); CV_Assert(src1.size() == src2.size() && src1.type() == src2.type());
CV_Assert(cmpop >= CMP_EQ && cmpop <= CMP_NE);
int code; static const int codes[] =
const GpuMat* psrc1; {
const GpuMat* psrc2; 0, 2, 3, 2, 3, 1
};
switch (cmpop)
{ const GpuMat* psrc1[] =
case CMP_EQ: {
code = 0; &src1, &src2, &src2, &src1, &src1, &src1
psrc1 = &src1; };
psrc2 = &src2;
break; const GpuMat* psrc2[] =
case CMP_GE: {
code = 3; &src2, &src1, &src1, &src2, &src2, &src2
psrc1 = &src2;
psrc2 = &src1;
break;
case CMP_GT:
code = 2;
psrc1 = &src2;
psrc2 = &src1;
break;
case CMP_LE:
code = 3;
psrc1 = &src1;
psrc2 = &src2;
break;
case CMP_LT:
code = 2;
psrc1 = &src1;
psrc2 = &src2;
break;
case CMP_NE:
code = 1;
psrc1 = &src1;
psrc2 = &src2;
break;
default:
CV_Error(CV_StsBadFlag, "Incorrect compare operation");
}; };
dst.create(src1.size(), CV_MAKE_TYPE(CV_8U, src1.channels())); dst.create(src1.size(), CV_MAKE_TYPE(CV_8U, src1.channels()));
funcs[src1.depth()][code](psrc1->reshape(1), psrc2->reshape(1), dst.reshape(1), StreamAccessor::getStream(stream)); funcs[src1.depth()][codes[cmpop]](psrc1[cmpop]->reshape(1), psrc2[cmpop]->reshape(1), dst.reshape(1), StreamAccessor::getStream(stream));
} }
...@@ -1944,26 +1920,25 @@ double cv::gpu::threshold(const GpuMat& src, GpuMat& dst, double thresh, double ...@@ -1944,26 +1920,25 @@ double cv::gpu::threshold(const GpuMat& src, GpuMat& dst, double thresh, double
namespace cv { namespace gpu { namespace device namespace cv { namespace gpu { namespace device
{ {
template<typename T> template<typename T>
void pow_caller(const DevMem2Db& src, float power, DevMem2Db dst, cudaStream_t stream); void pow_caller(DevMem2Db src, double power, DevMem2Db dst, cudaStream_t stream);
}}} }}}
void cv::gpu::pow(const GpuMat& src, double power, GpuMat& dst, Stream& stream) void cv::gpu::pow(const GpuMat& src, double power, GpuMat& dst, Stream& stream)
{ {
using namespace ::cv::gpu::device; using namespace cv::gpu::device;
CV_Assert(src.depth() != CV_64F);
dst.create(src.size(), src.type());
typedef void (*caller_t)(const DevMem2Db& src, float power, DevMem2Db dst, cudaStream_t stream); typedef void (*func_t)(DevMem2Db src, double power, DevMem2Db dst, cudaStream_t stream);
static const caller_t callers[] = static const func_t funcs[] =
{ {
pow_caller<unsigned char>, pow_caller<signed char>, pow_caller<unsigned char>, pow_caller<signed char>,
pow_caller<unsigned short>, pow_caller<short>, pow_caller<unsigned short>, pow_caller<short>,
pow_caller<int>, pow_caller<float> pow_caller<int>, pow_caller<float>, pow_caller<double>
}; };
callers[src.depth()](src.reshape(1), (float)power, dst.reshape(1), StreamAccessor::getStream(stream)); dst.create(src.size(), src.type());
funcs[src.depth()](src.reshape(1), power, dst.reshape(1), StreamAccessor::getStream(stream));
} }
//////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////
...@@ -2052,27 +2027,11 @@ namespace cv { namespace gpu { namespace device ...@@ -2052,27 +2027,11 @@ namespace cv { namespace gpu { namespace device
void cv::gpu::addWeighted(const GpuMat& src1, double alpha, const GpuMat& src2, double beta, double gamma, GpuMat& dst, int dtype, Stream& stream) void cv::gpu::addWeighted(const GpuMat& src1, double alpha, const GpuMat& src2, double beta, double gamma, GpuMat& dst, int dtype, Stream& stream)
{ {
using namespace ::cv::gpu::device; using namespace cv::gpu::device;
CV_Assert(src1.size() == src2.size());
CV_Assert(src1.type() == src2.type() || (dtype >= 0 && src1.channels() == src2.channels()));
dtype = dtype >= 0 ? CV_MAKETYPE(dtype, src1.channels()) : src1.type();
dst.create(src1.size(), dtype);
const GpuMat* psrc1 = &src1;
const GpuMat* psrc2 = &src2;
if (src1.depth() > src2.depth())
{
std::swap(psrc1, psrc2);
std::swap(alpha, beta);
}
typedef void (*caller_t)(const DevMem2Db& src1, double alpha, const DevMem2Db& src2, double beta, double gamma, const DevMem2Db& dst, cudaStream_t stream); typedef void (*func_t)(const DevMem2Db& src1, double alpha, const DevMem2Db& src2, double beta, double gamma, const DevMem2Db& dst, cudaStream_t stream);
static const caller_t callers[7][7][7] = static const func_t funcs[7][7][7] =
{ {
{ {
{ {
...@@ -2531,7 +2490,26 @@ void cv::gpu::addWeighted(const GpuMat& src1, double alpha, const GpuMat& src2, ...@@ -2531,7 +2490,26 @@ void cv::gpu::addWeighted(const GpuMat& src1, double alpha, const GpuMat& src2,
} }
}; };
callers[psrc1->depth()][psrc2->depth()][dst.depth()](psrc1->reshape(1), alpha, psrc2->reshape(1), beta, gamma, dst.reshape(1), StreamAccessor::getStream(stream)); CV_Assert(src1.size() == src2.size());
CV_Assert(src1.type() == src2.type() || (dtype >= 0 && src1.channels() == src2.channels()));
dtype = dtype >= 0 ? CV_MAKETYPE(dtype, src1.channels()) : src1.type();
dst.create(src1.size(), dtype);
const GpuMat* psrc1 = &src1;
const GpuMat* psrc2 = &src2;
if (src1.depth() > src2.depth())
{
std::swap(psrc1, psrc2);
std::swap(alpha, beta);
}
const func_t func = funcs[psrc1->depth()][psrc2->depth()][dst.depth()];
CV_Assert(func != 0);
func(psrc1->reshape(1), alpha, psrc2->reshape(1), beta, gamma, dst.reshape(1), StreamAccessor::getStream(stream));
} }
#endif #endif
...@@ -41,8 +41,6 @@ ...@@ -41,8 +41,6 @@
#include "precomp.hpp" #include "precomp.hpp"
#ifdef HAVE_CUDA
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
// Add_Array // Add_Array
...@@ -74,12 +72,6 @@ PARAM_TEST_CASE(Add_Array, cv::gpu::DeviceInfo, cv::Size, std::pair<MatDepth, Ma ...@@ -74,12 +72,6 @@ PARAM_TEST_CASE(Add_Array, cv::gpu::DeviceInfo, cv::Size, std::pair<MatDepth, Ma
TEST_P(Add_Array, Accuracy) TEST_P(Add_Array, Accuracy)
{ {
if (depth.first == CV_64F || depth.second == CV_64F)
{
if (!devInfo.supports(cv::gpu::NATIVE_DOUBLE))
return;
}
cv::Mat mat1 = randomMat(size, stype); cv::Mat mat1 = randomMat(size, stype);
cv::Mat mat2 = randomMat(size, stype); cv::Mat mat2 = randomMat(size, stype);
cv::Mat mask = randomMat(size, CV_8UC1, 0.0, 2.0); cv::Mat mask = randomMat(size, CV_8UC1, 0.0, 2.0);
...@@ -124,12 +116,6 @@ PARAM_TEST_CASE(Add_Scalar, cv::gpu::DeviceInfo, cv::Size, std::pair<MatDepth, M ...@@ -124,12 +116,6 @@ PARAM_TEST_CASE(Add_Scalar, cv::gpu::DeviceInfo, cv::Size, std::pair<MatDepth, M
TEST_P(Add_Scalar, Accuracy) TEST_P(Add_Scalar, Accuracy)
{ {
if (depth.first == CV_64F || depth.second == CV_64F)
{
if (!devInfo.supports(cv::gpu::NATIVE_DOUBLE))
return;
}
cv::Mat mat = randomMat(size, depth.first); cv::Mat mat = randomMat(size, depth.first);
cv::Scalar val = randomScalar(0, 255); cv::Scalar val = randomScalar(0, 255);
cv::Mat mask = randomMat(size, CV_8UC1, 0.0, 2.0); cv::Mat mask = randomMat(size, CV_8UC1, 0.0, 2.0);
...@@ -181,12 +167,6 @@ PARAM_TEST_CASE(Subtract_Array, cv::gpu::DeviceInfo, cv::Size, std::pair<MatDept ...@@ -181,12 +167,6 @@ PARAM_TEST_CASE(Subtract_Array, cv::gpu::DeviceInfo, cv::Size, std::pair<MatDept
TEST_P(Subtract_Array, Accuracy) TEST_P(Subtract_Array, Accuracy)
{ {
if (depth.first == CV_64F || depth.second == CV_64F)
{
if (!devInfo.supports(cv::gpu::NATIVE_DOUBLE))
return;
}
cv::Mat mat1 = randomMat(size, stype); cv::Mat mat1 = randomMat(size, stype);
cv::Mat mat2 = randomMat(size, stype); cv::Mat mat2 = randomMat(size, stype);
cv::Mat mask = randomMat(size, CV_8UC1, 0.0, 2.0); cv::Mat mask = randomMat(size, CV_8UC1, 0.0, 2.0);
...@@ -231,12 +211,6 @@ PARAM_TEST_CASE(Subtract_Scalar, cv::gpu::DeviceInfo, cv::Size, std::pair<MatDep ...@@ -231,12 +211,6 @@ PARAM_TEST_CASE(Subtract_Scalar, cv::gpu::DeviceInfo, cv::Size, std::pair<MatDep
TEST_P(Subtract_Scalar, Accuracy) TEST_P(Subtract_Scalar, Accuracy)
{ {
if (depth.first == CV_64F || depth.second == CV_64F)
{
if (!devInfo.supports(cv::gpu::NATIVE_DOUBLE))
return;
}
cv::Mat mat = randomMat(size, depth.first); cv::Mat mat = randomMat(size, depth.first);
cv::Scalar val = randomScalar(0, 255); cv::Scalar val = randomScalar(0, 255);
cv::Mat mask = randomMat(size, CV_8UC1, 0.0, 2.0); cv::Mat mask = randomMat(size, CV_8UC1, 0.0, 2.0);
...@@ -288,12 +262,6 @@ PARAM_TEST_CASE(Multiply_Array, cv::gpu::DeviceInfo, cv::Size, std::pair<MatDept ...@@ -288,12 +262,6 @@ PARAM_TEST_CASE(Multiply_Array, cv::gpu::DeviceInfo, cv::Size, std::pair<MatDept
TEST_P(Multiply_Array, Accuracy) TEST_P(Multiply_Array, Accuracy)
{ {
if (depth.first == CV_64F || depth.second == CV_64F)
{
if (!devInfo.supports(cv::gpu::NATIVE_DOUBLE))
return;
}
cv::Mat mat1 = randomMat(size, stype); cv::Mat mat1 = randomMat(size, stype);
cv::Mat mat2 = randomMat(size, stype); cv::Mat mat2 = randomMat(size, stype);
double scale = randomDouble(0.0, 255.0); double scale = randomDouble(0.0, 255.0);
...@@ -315,9 +283,9 @@ INSTANTIATE_TEST_CASE_P(GPU_Core, Multiply_Array, testing::Combine( ...@@ -315,9 +283,9 @@ INSTANTIATE_TEST_CASE_P(GPU_Core, Multiply_Array, testing::Combine(
WHOLE_SUBMAT)); WHOLE_SUBMAT));
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
// Multiply_Array_Special_Case // Multiply_Array_Special
PARAM_TEST_CASE(Multiply_Array_Special_Case, cv::gpu::DeviceInfo, cv::Size, UseRoi) PARAM_TEST_CASE(Multiply_Array_Special, cv::gpu::DeviceInfo, cv::Size, UseRoi)
{ {
cv::gpu::DeviceInfo devInfo; cv::gpu::DeviceInfo devInfo;
cv::Size size; cv::Size size;
...@@ -333,7 +301,7 @@ PARAM_TEST_CASE(Multiply_Array_Special_Case, cv::gpu::DeviceInfo, cv::Size, UseR ...@@ -333,7 +301,7 @@ PARAM_TEST_CASE(Multiply_Array_Special_Case, cv::gpu::DeviceInfo, cv::Size, UseR
} }
}; };
TEST_P(Multiply_Array_Special_Case, _8UC4x_32FC1) TEST_P(Multiply_Array_Special, Case_8UC4x_32FC1)
{ {
cv::Mat mat1 = randomMat(size, CV_8UC4); cv::Mat mat1 = randomMat(size, CV_8UC4);
cv::Mat mat2 = randomMat(size, CV_32FC1); cv::Mat mat2 = randomMat(size, CV_32FC1);
...@@ -370,7 +338,7 @@ TEST_P(Multiply_Array_Special_Case, _8UC4x_32FC1) ...@@ -370,7 +338,7 @@ TEST_P(Multiply_Array_Special_Case, _8UC4x_32FC1)
} }
} }
TEST_P(Multiply_Array_Special_Case, _16SC4x_32FC1) TEST_P(Multiply_Array_Special, Case_16SC4x_32FC1)
{ {
cv::Mat mat1 = randomMat(size, CV_16SC4); cv::Mat mat1 = randomMat(size, CV_16SC4);
cv::Mat mat2 = randomMat(size, CV_32FC1); cv::Mat mat2 = randomMat(size, CV_32FC1);
...@@ -407,7 +375,7 @@ TEST_P(Multiply_Array_Special_Case, _16SC4x_32FC1) ...@@ -407,7 +375,7 @@ TEST_P(Multiply_Array_Special_Case, _16SC4x_32FC1)
} }
} }
INSTANTIATE_TEST_CASE_P(GPU_Core, Multiply_Array_Special_Case, testing::Combine( INSTANTIATE_TEST_CASE_P(GPU_Core, Multiply_Array_Special, testing::Combine(
ALL_DEVICES, ALL_DEVICES,
DIFFERENT_SIZES, DIFFERENT_SIZES,
WHOLE_SUBMAT)); WHOLE_SUBMAT));
...@@ -435,12 +403,6 @@ PARAM_TEST_CASE(Multiply_Scalar, cv::gpu::DeviceInfo, cv::Size, std::pair<MatDep ...@@ -435,12 +403,6 @@ PARAM_TEST_CASE(Multiply_Scalar, cv::gpu::DeviceInfo, cv::Size, std::pair<MatDep
TEST_P(Multiply_Scalar, Accuracy) TEST_P(Multiply_Scalar, Accuracy)
{ {
if (depth.first == CV_64F || depth.second == CV_64F)
{
if (!devInfo.supports(cv::gpu::NATIVE_DOUBLE))
return;
}
cv::Mat mat = randomMat(size, depth.first); cv::Mat mat = randomMat(size, depth.first);
cv::Scalar val = randomScalar(0, 255); cv::Scalar val = randomScalar(0, 255);
double scale = randomDouble(0.0, 255.0); double scale = randomDouble(0.0, 255.0);
...@@ -491,12 +453,6 @@ PARAM_TEST_CASE(Divide_Array, cv::gpu::DeviceInfo, cv::Size, std::pair<MatDepth, ...@@ -491,12 +453,6 @@ PARAM_TEST_CASE(Divide_Array, cv::gpu::DeviceInfo, cv::Size, std::pair<MatDepth,
TEST_P(Divide_Array, Accuracy) TEST_P(Divide_Array, Accuracy)
{ {
if (depth.first == CV_64F || depth.second == CV_64F)
{
if (!devInfo.supports(cv::gpu::NATIVE_DOUBLE))
return;
}
cv::Mat mat1 = randomMat(size, stype); cv::Mat mat1 = randomMat(size, stype);
cv::Mat mat2 = randomMat(size, stype, 1.0, 255.0); cv::Mat mat2 = randomMat(size, stype, 1.0, 255.0);
double scale = randomDouble(0.0, 255.0); double scale = randomDouble(0.0, 255.0);
...@@ -518,9 +474,9 @@ INSTANTIATE_TEST_CASE_P(GPU_Core, Divide_Array, testing::Combine( ...@@ -518,9 +474,9 @@ INSTANTIATE_TEST_CASE_P(GPU_Core, Divide_Array, testing::Combine(
WHOLE_SUBMAT)); WHOLE_SUBMAT));
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
// Divide_Array_Special_Case // Divide_Array_Special
PARAM_TEST_CASE(Divide_Array_Special_Case, cv::gpu::DeviceInfo, cv::Size, UseRoi) PARAM_TEST_CASE(Divide_Array_Special, cv::gpu::DeviceInfo, cv::Size, UseRoi)
{ {
cv::gpu::DeviceInfo devInfo; cv::gpu::DeviceInfo devInfo;
cv::Size size; cv::Size size;
...@@ -536,7 +492,7 @@ PARAM_TEST_CASE(Divide_Array_Special_Case, cv::gpu::DeviceInfo, cv::Size, UseRoi ...@@ -536,7 +492,7 @@ PARAM_TEST_CASE(Divide_Array_Special_Case, cv::gpu::DeviceInfo, cv::Size, UseRoi
} }
}; };
TEST_P(Divide_Array_Special_Case, _8UC4x_32FC1) TEST_P(Divide_Array_Special, Case_8UC4x_32FC1)
{ {
cv::Mat mat1 = randomMat(size, CV_8UC4); cv::Mat mat1 = randomMat(size, CV_8UC4);
cv::Mat mat2 = randomMat(size, CV_32FC1, 1.0, 255.0); cv::Mat mat2 = randomMat(size, CV_32FC1, 1.0, 255.0);
...@@ -573,7 +529,7 @@ TEST_P(Divide_Array_Special_Case, _8UC4x_32FC1) ...@@ -573,7 +529,7 @@ TEST_P(Divide_Array_Special_Case, _8UC4x_32FC1)
} }
} }
TEST_P(Divide_Array_Special_Case, _16SC4x_32FC1) TEST_P(Divide_Array_Special, Case_16SC4x_32FC1)
{ {
cv::Mat mat1 = randomMat(size, CV_16SC4); cv::Mat mat1 = randomMat(size, CV_16SC4);
cv::Mat mat2 = randomMat(size, CV_32FC1, 1.0, 255.0); cv::Mat mat2 = randomMat(size, CV_32FC1, 1.0, 255.0);
...@@ -610,7 +566,7 @@ TEST_P(Divide_Array_Special_Case, _16SC4x_32FC1) ...@@ -610,7 +566,7 @@ TEST_P(Divide_Array_Special_Case, _16SC4x_32FC1)
} }
} }
INSTANTIATE_TEST_CASE_P(GPU_Core, Divide_Array_Special_Case, testing::Combine( INSTANTIATE_TEST_CASE_P(GPU_Core, Divide_Array_Special, testing::Combine(
ALL_DEVICES, ALL_DEVICES,
DIFFERENT_SIZES, DIFFERENT_SIZES,
WHOLE_SUBMAT)); WHOLE_SUBMAT));
...@@ -638,12 +594,6 @@ PARAM_TEST_CASE(Divide_Scalar, cv::gpu::DeviceInfo, cv::Size, std::pair<MatDepth ...@@ -638,12 +594,6 @@ PARAM_TEST_CASE(Divide_Scalar, cv::gpu::DeviceInfo, cv::Size, std::pair<MatDepth
TEST_P(Divide_Scalar, Accuracy) TEST_P(Divide_Scalar, Accuracy)
{ {
if (depth.first == CV_64F || depth.second == CV_64F)
{
if (!devInfo.supports(cv::gpu::NATIVE_DOUBLE))
return;
}
cv::Mat mat = randomMat(size, depth.first); cv::Mat mat = randomMat(size, depth.first);
cv::Scalar val = randomScalar(1.0, 255.0); cv::Scalar val = randomScalar(1.0, 255.0);
double scale = randomDouble(0.0, 255.0); double scale = randomDouble(0.0, 255.0);
...@@ -686,12 +636,6 @@ PARAM_TEST_CASE(Divide_Scalar_Inv, cv::gpu::DeviceInfo, cv::Size, std::pair<MatD ...@@ -686,12 +636,6 @@ PARAM_TEST_CASE(Divide_Scalar_Inv, cv::gpu::DeviceInfo, cv::Size, std::pair<MatD
TEST_P(Divide_Scalar_Inv, Accuracy) TEST_P(Divide_Scalar_Inv, Accuracy)
{ {
if (depth.first == CV_64F || depth.second == CV_64F)
{
if (!devInfo.supports(cv::gpu::NATIVE_DOUBLE))
return;
}
double scale = randomDouble(0.0, 255.0); double scale = randomDouble(0.0, 255.0);
cv::Mat mat = randomMat(size, depth.first, 1.0, 255.0); cv::Mat mat = randomMat(size, depth.first, 1.0, 255.0);
...@@ -733,12 +677,6 @@ PARAM_TEST_CASE(AbsDiff, cv::gpu::DeviceInfo, cv::Size, MatDepth, UseRoi) ...@@ -733,12 +677,6 @@ PARAM_TEST_CASE(AbsDiff, cv::gpu::DeviceInfo, cv::Size, MatDepth, UseRoi)
TEST_P(AbsDiff, Array) TEST_P(AbsDiff, Array)
{ {
if (depth == CV_64F)
{
if (!devInfo.supports(cv::gpu::NATIVE_DOUBLE))
return;
}
cv::Mat src1 = randomMat(size, depth); cv::Mat src1 = randomMat(size, depth);
cv::Mat src2 = randomMat(size, depth); cv::Mat src2 = randomMat(size, depth);
...@@ -753,12 +691,6 @@ TEST_P(AbsDiff, Array) ...@@ -753,12 +691,6 @@ TEST_P(AbsDiff, Array)
TEST_P(AbsDiff, Scalar) TEST_P(AbsDiff, Scalar)
{ {
if (depth == CV_64F)
{
if (!devInfo.supports(cv::gpu::NATIVE_DOUBLE))
return;
}
cv::Mat src = randomMat(size, depth); cv::Mat src = randomMat(size, depth);
cv::Scalar val = randomScalar(0.0, 255.0); cv::Scalar val = randomScalar(0.0, 255.0);
...@@ -853,7 +785,10 @@ TEST_P(Sqr, Accuracy) ...@@ -853,7 +785,10 @@ TEST_P(Sqr, Accuracy)
INSTANTIATE_TEST_CASE_P(GPU_Core, Sqr, testing::Combine( INSTANTIATE_TEST_CASE_P(GPU_Core, Sqr, testing::Combine(
ALL_DEVICES, ALL_DEVICES,
DIFFERENT_SIZES, DIFFERENT_SIZES,
testing::Values(MatType(CV_8UC1), MatType(CV_16UC1), MatType(CV_16SC1), MatType(CV_32FC1)), testing::Values(MatType(CV_8UC1),
MatType(CV_16UC1),
MatType(CV_16SC1),
MatType(CV_32FC1)),
WHOLE_SUBMAT)); WHOLE_SUBMAT));
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
...@@ -920,7 +855,10 @@ TEST_P(Sqrt, Accuracy) ...@@ -920,7 +855,10 @@ TEST_P(Sqrt, Accuracy)
INSTANTIATE_TEST_CASE_P(GPU_Core, Sqrt, testing::Combine( INSTANTIATE_TEST_CASE_P(GPU_Core, Sqrt, testing::Combine(
ALL_DEVICES, ALL_DEVICES,
DIFFERENT_SIZES, DIFFERENT_SIZES,
testing::Values(MatType(CV_8UC1), MatType(CV_16UC1), MatType(CV_16SC1), MatType(CV_32FC1)), testing::Values(MatType(CV_8UC1),
MatType(CV_16UC1),
MatType(CV_16SC1),
MatType(CV_32FC1)),
WHOLE_SUBMAT)); WHOLE_SUBMAT));
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
...@@ -987,7 +925,10 @@ TEST_P(Log, Accuracy) ...@@ -987,7 +925,10 @@ TEST_P(Log, Accuracy)
INSTANTIATE_TEST_CASE_P(GPU_Core, Log, testing::Combine( INSTANTIATE_TEST_CASE_P(GPU_Core, Log, testing::Combine(
ALL_DEVICES, ALL_DEVICES,
DIFFERENT_SIZES, DIFFERENT_SIZES,
testing::Values(MatType(CV_8UC1), MatType(CV_16UC1), MatType(CV_16SC1), MatType(CV_32FC1)), testing::Values(MatType(CV_8UC1),
MatType(CV_16UC1),
MatType(CV_16SC1),
MatType(CV_32FC1)),
WHOLE_SUBMAT)); WHOLE_SUBMAT));
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
...@@ -1283,7 +1224,11 @@ TEST_P(RShift, Accuracy) ...@@ -1283,7 +1224,11 @@ TEST_P(RShift, Accuracy)
INSTANTIATE_TEST_CASE_P(GPU_Core, RShift, testing::Combine( INSTANTIATE_TEST_CASE_P(GPU_Core, RShift, testing::Combine(
ALL_DEVICES, ALL_DEVICES,
DIFFERENT_SIZES, DIFFERENT_SIZES,
testing::Values(MatDepth(CV_8U), MatDepth(CV_8S), MatDepth(CV_16U), MatDepth(CV_16S), MatDepth(CV_32S)), testing::Values(MatDepth(CV_8U),
MatDepth(CV_8S),
MatDepth(CV_16U),
MatDepth(CV_16S),
MatDepth(CV_32S)),
testing::Values(1, 3, 4), testing::Values(1, 3, 4),
WHOLE_SUBMAT)); WHOLE_SUBMAT));
...@@ -1443,1042 +1388,1104 @@ INSTANTIATE_TEST_CASE_P(GPU_Core, Max, testing::Combine( ...@@ -1443,1042 +1388,1104 @@ INSTANTIATE_TEST_CASE_P(GPU_Core, Max, testing::Combine(
ALL_DEPTH, ALL_DEPTH,
WHOLE_SUBMAT)); WHOLE_SUBMAT));
////////////////////////////////////////////////////////////////////////////////
// Pow
PARAM_TEST_CASE(Pow, cv::gpu::DeviceInfo, cv::Size, MatDepth, UseRoi)
{
cv::gpu::DeviceInfo devInfo;
cv::Size size;
int depth;
bool useRoi;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
size = GET_PARAM(1);
depth = GET_PARAM(2);
useRoi = GET_PARAM(3);
cv::gpu::setDevice(devInfo.deviceID());
}
};
TEST_P(Pow, Accuracy)
{
cv::Mat src = randomMat(size, depth, 0.0, 100.0);
double power = randomDouble(2.0, 4.0);
if (src.depth() < CV_32F)
power = static_cast<int>(power);
cv::gpu::GpuMat dst = createMat(size, depth, useRoi);
cv::gpu::pow(loadMat(src, useRoi), power, dst);
cv::Mat dst_gold;
cv::pow(src, power, dst_gold);
EXPECT_MAT_NEAR(dst_gold, dst, depth < CV_32F ? 0.0 : 1e-6);
}
INSTANTIATE_TEST_CASE_P(GPU_Core, Pow, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
ALL_DEPTH,
WHOLE_SUBMAT));
//////////////////////////////////////////////////////////////////////////////
// AddWeighted
PARAM_TEST_CASE(AddWeighted, cv::gpu::DeviceInfo, cv::Size, MatDepth, MatDepth, MatDepth, UseRoi)
using namespace cvtest;
using namespace testing;
PARAM_TEST_CASE(ArithmTestBase, cv::gpu::DeviceInfo, MatType, UseRoi)
{ {
cv::gpu::DeviceInfo devInfo; cv::gpu::DeviceInfo devInfo;
int type; cv::Size size;
int depth1;
int depth2;
int dst_depth;
bool useRoi; bool useRoi;
cv::Size size;
cv::Mat mat1;
cv::Mat mat2;
cv::Scalar val;
virtual void SetUp() virtual void SetUp()
{ {
devInfo = GET_PARAM(0); devInfo = GET_PARAM(0);
type = GET_PARAM(1); size = GET_PARAM(1);
useRoi = GET_PARAM(2); depth1 = GET_PARAM(2);
depth2 = GET_PARAM(3);
dst_depth = GET_PARAM(4);
useRoi = GET_PARAM(5);
cv::gpu::setDevice(devInfo.deviceID()); cv::gpu::setDevice(devInfo.deviceID());
cv::RNG& rng = TS::ptr()->get_rng();
size = cv::Size(rng.uniform(100, 200), rng.uniform(100, 200));
mat1 = randomMat(rng, size, type, 5, 16, false);
mat2 = randomMat(rng, size, type, 5, 16, false);
val = cv::Scalar(rng.uniform(1, 3), rng.uniform(1, 3), rng.uniform(1, 3), rng.uniform(1, 3));
} }
}; };
//////////////////////////////////////////////////////////////////////////////// TEST_P(AddWeighted, Accuracy)
// transpose
struct Transpose : ArithmTestBase {};
TEST_P(Transpose, Accuracy)
{ {
cv::Mat dst_gold; cv::Mat src1 = randomMat(size, depth1);
cv::transpose(mat1, dst_gold); cv::Mat src2 = randomMat(size, depth2);
double alpha = randomDouble(-10.0, 10.0);
cv::Mat dst; double beta = randomDouble(-10.0, 10.0);
double gamma = randomDouble(-10.0, 10.0);
cv::gpu::GpuMat gpuRes;
cv::gpu::transpose(loadMat(mat1, useRoi), gpuRes); cv::gpu::GpuMat dst = createMat(size, dst_depth, useRoi);
cv::gpu::addWeighted(loadMat(src1, useRoi), alpha, loadMat(src2, useRoi), beta, gamma, dst, dst_depth);
gpuRes.download(dst); cv::Mat dst_gold;
cv::addWeighted(src1, alpha, src2, beta, gamma, dst_gold, dst_depth);
EXPECT_MAT_NEAR(dst_gold, dst, 0.0); EXPECT_MAT_NEAR(dst_gold, dst, dst_depth < CV_32F ? 1.0 : 1e-12);
} }
INSTANTIATE_TEST_CASE_P(Arithm, Transpose, Combine( INSTANTIATE_TEST_CASE_P(GPU_Core, AddWeighted, testing::Combine(
ALL_DEVICES, ALL_DEVICES,
Values(CV_8UC1, CV_8UC4, CV_8SC1, CV_8SC4, CV_16UC2, CV_16SC2, CV_32SC1, CV_32SC2, CV_32FC1, CV_32FC2, CV_64FC1), DIFFERENT_SIZES,
ALL_DEPTH,
ALL_DEPTH,
ALL_DEPTH,
WHOLE_SUBMAT)); WHOLE_SUBMAT));
//////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////
// meanStdDev // GEMM
PARAM_TEST_CASE(MeanStdDev, cv::gpu::DeviceInfo, UseRoi) PARAM_TEST_CASE(GEMM, cv::gpu::DeviceInfo, cv::Size, MatType, GemmFlags, UseRoi)
{ {
cv::gpu::DeviceInfo devInfo; cv::gpu::DeviceInfo devInfo;
bool useRoi;
cv::Size size; cv::Size size;
cv::Mat mat; int type;
int flags;
cv::Scalar mean_gold; bool useRoi;
cv::Scalar stddev_gold;
virtual void SetUp() virtual void SetUp()
{ {
devInfo = GET_PARAM(0); devInfo = GET_PARAM(0);
useRoi = GET_PARAM(1); size = GET_PARAM(1);
type = GET_PARAM(2);
flags = GET_PARAM(3);
useRoi = GET_PARAM(4);
cv::gpu::setDevice(devInfo.deviceID()); cv::gpu::setDevice(devInfo.deviceID());
cv::RNG& rng = TS::ptr()->get_rng();
size = cv::Size(rng.uniform(100, 200), rng.uniform(100, 200));
mat = randomMat(rng, size, CV_8UC1, 1, 255, false);
cv::meanStdDev(mat, mean_gold, stddev_gold);
} }
}; };
TEST_P(MeanStdDev, Accuracy) TEST_P(GEMM, Accuracy)
{ {
cv::Scalar mean; cv::Mat src1 = randomMat(size, type, -10.0, 10.0);
cv::Scalar stddev; cv::Mat src2 = randomMat(size, type, -10.0, 10.0);
cv::Mat src3 = randomMat(size, type, -10.0, 10.0);
double alpha = randomDouble(-10.0, 10.0);
double beta = randomDouble(-10.0, 10.0);
cv::gpu::meanStdDev(loadMat(mat, useRoi), mean, stddev); cv::gpu::GpuMat dst = createMat(size, type, useRoi);
cv::gpu::gemm(loadMat(src1, useRoi), loadMat(src2, useRoi), alpha, loadMat(src3, useRoi), beta, dst, flags);
EXPECT_NEAR(mean_gold[0], mean[0], 1e-5); cv::Mat dst_gold;
EXPECT_NEAR(mean_gold[1], mean[1], 1e-5); cv::gemm(src1, src2, alpha, src3, beta, dst_gold, flags);
EXPECT_NEAR(mean_gold[2], mean[2], 1e-5);
EXPECT_NEAR(mean_gold[3], mean[3], 1e-5);
EXPECT_NEAR(stddev_gold[0], stddev[0], 1e-5); EXPECT_MAT_NEAR(dst_gold, dst, CV_MAT_DEPTH(type) == CV_32F ? 1e-1 : 1e-10);
EXPECT_NEAR(stddev_gold[1], stddev[1], 1e-5);
EXPECT_NEAR(stddev_gold[2], stddev[2], 1e-5);
EXPECT_NEAR(stddev_gold[3], stddev[3], 1e-5);
} }
INSTANTIATE_TEST_CASE_P(Arithm, MeanStdDev, Combine( INSTANTIATE_TEST_CASE_P(GPU_Core, GEMM, testing::Combine(
ALL_DEVICES, ALL_DEVICES,
DIFFERENT_SIZES,
testing::Values(MatType(CV_32FC1), MatType(CV_32FC2), MatType(CV_64FC1), MatType(CV_64FC2)),
ALL_GEMM_FLAGS,
WHOLE_SUBMAT)); WHOLE_SUBMAT));
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
// normDiff // Transpose
PARAM_TEST_CASE(NormDiff, cv::gpu::DeviceInfo, NormCode, UseRoi) PARAM_TEST_CASE(Transpose, cv::gpu::DeviceInfo, cv::Size, MatType, UseRoi)
{ {
cv::gpu::DeviceInfo devInfo; cv::gpu::DeviceInfo devInfo;
int normCode;
bool useRoi;
cv::Size size; cv::Size size;
cv::Mat mat1, mat2; int type;
bool useRoi;
double norm_gold;
virtual void SetUp() virtual void SetUp()
{ {
devInfo = GET_PARAM(0); devInfo = GET_PARAM(0);
normCode = GET_PARAM(1); size = GET_PARAM(1);
useRoi = GET_PARAM(2); type = GET_PARAM(2);
useRoi = GET_PARAM(3);
cv::gpu::setDevice(devInfo.deviceID()); cv::gpu::setDevice(devInfo.deviceID());
cv::RNG& rng = TS::ptr()->get_rng();
size = cv::Size(rng.uniform(100, 200), rng.uniform(100, 200));
mat1 = randomMat(rng, size, CV_8UC1, 1, 255, false);
mat2 = randomMat(rng, size, CV_8UC1, 1, 255, false);
norm_gold = cv::norm(mat1, mat2, normCode);
} }
}; };
TEST_P(NormDiff, Accuracy) TEST_P(Transpose, Accuracy)
{ {
double norm = cv::gpu::norm(loadMat(mat1, useRoi), loadMat(mat2, useRoi), normCode); cv::Mat src = randomMat(size, type);
cv::gpu::GpuMat dst = createMat(cv::Size(size.height, size.width), type, useRoi);
cv::gpu::transpose(loadMat(src, useRoi), dst);
cv::Mat dst_gold;
cv::transpose(src, dst_gold);
EXPECT_NEAR(norm_gold, norm, 1e-6); EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
} }
INSTANTIATE_TEST_CASE_P(Arithm, NormDiff, Combine( INSTANTIATE_TEST_CASE_P(GPU_Core, Transpose, testing::Combine(
ALL_DEVICES, ALL_DEVICES,
Values((int) cv::NORM_INF, (int) cv::NORM_L1, (int) cv::NORM_L2), DIFFERENT_SIZES,
testing::Values(MatType(CV_8UC1),
MatType(CV_8UC4),
MatType(CV_16UC2),
MatType(CV_16SC2),
MatType(CV_32SC1),
MatType(CV_32SC2),
MatType(CV_64FC1)),
WHOLE_SUBMAT)); WHOLE_SUBMAT));
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
// flip // Flip
PARAM_TEST_CASE(Flip, cv::gpu::DeviceInfo, MatType, FlipCode, UseRoi) PARAM_TEST_CASE(Flip, cv::gpu::DeviceInfo, cv::Size, MatType, FlipCode, UseRoi)
{ {
cv::gpu::DeviceInfo devInfo; cv::gpu::DeviceInfo devInfo;
cv::Size size;
int type; int type;
int flip_code; int flip_code;
bool useRoi; bool useRoi;
cv::Size size;
cv::Mat mat;
cv::Mat dst_gold;
virtual void SetUp() virtual void SetUp()
{ {
devInfo = GET_PARAM(0); devInfo = GET_PARAM(0);
type = GET_PARAM(1); size = GET_PARAM(1);
flip_code = GET_PARAM(2); type = GET_PARAM(2);
useRoi = GET_PARAM(3); flip_code = GET_PARAM(3);
useRoi = GET_PARAM(4);
cv::gpu::setDevice(devInfo.deviceID()); cv::gpu::setDevice(devInfo.deviceID());
cv::RNG& rng = TS::ptr()->get_rng();
size = cv::Size(rng.uniform(100, 200), rng.uniform(100, 200));
mat = randomMat(rng, size, type, 1, 255, false);
cv::flip(mat, dst_gold, flip_code);
} }
}; };
TEST_P(Flip, Accuracy) TEST_P(Flip, Accuracy)
{ {
cv::Mat dst; cv::Mat src = randomMat(size, type);
cv::gpu::GpuMat gpu_res;
cv::gpu::flip(loadMat(mat, useRoi), gpu_res, flip_code); cv::gpu::GpuMat dst = createMat(size, type, useRoi);
cv::gpu::flip(loadMat(src, useRoi), dst, flip_code);
gpu_res.download(dst); cv::Mat dst_gold;
cv::flip(src, dst_gold, flip_code);
EXPECT_MAT_NEAR(dst_gold, dst, 0.0); EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
} }
INSTANTIATE_TEST_CASE_P(Arithm, Flip, Combine( INSTANTIATE_TEST_CASE_P(GPU_Core, Flip, testing::Combine(
ALL_DEVICES, ALL_DEVICES,
Values(CV_8UC1, CV_8UC3, CV_8UC4, CV_16UC1, CV_16UC3, CV_16UC4, CV_32SC1, CV_32SC3, CV_32SC4, CV_32FC1, CV_32FC3, CV_32FC4), DIFFERENT_SIZES,
Values((int)FLIP_BOTH, (int)FLIP_X, (int)FLIP_Y), testing::Values(MatType(CV_8UC1),
MatType(CV_8UC3),
MatType(CV_8UC4),
MatType(CV_16UC1),
MatType(CV_16UC3),
MatType(CV_16UC4),
MatType(CV_32SC1),
MatType(CV_32SC3),
MatType(CV_32SC4),
MatType(CV_32FC1),
MatType(CV_32FC3),
MatType(CV_32FC4)),
ALL_FLIP_CODES,
WHOLE_SUBMAT)); WHOLE_SUBMAT));
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
// LUT // LUT
PARAM_TEST_CASE(LUT, cv::gpu::DeviceInfo, MatType, UseRoi) PARAM_TEST_CASE(LUT, cv::gpu::DeviceInfo, cv::Size, MatType, UseRoi)
{ {
cv::gpu::DeviceInfo devInfo; cv::gpu::DeviceInfo devInfo;
cv::Size size;
int type; int type;
bool useRoi; bool useRoi;
cv::Size size;
cv::Mat mat;
cv::Mat lut;
cv::Mat dst_gold;
virtual void SetUp() virtual void SetUp()
{ {
devInfo = GET_PARAM(0); devInfo = GET_PARAM(0);
type = GET_PARAM(1); size = GET_PARAM(1);
useRoi = GET_PARAM(2); type = GET_PARAM(2);
useRoi = GET_PARAM(3);
cv::gpu::setDevice(devInfo.deviceID()); cv::gpu::setDevice(devInfo.deviceID());
}
};
cv::RNG& rng = TS::ptr()->get_rng(); TEST_P(LUT, OneChannel)
{
cv::Mat src = randomMat(size, type);
cv::Mat lut = randomMat(cv::Size(256, 1), CV_8UC1);
size = cv::Size(rng.uniform(100, 200), rng.uniform(100, 200)); cv::gpu::GpuMat dst = createMat(size, CV_MAKE_TYPE(lut.depth(), src.channels()));
cv::gpu::LUT(loadMat(src, useRoi), lut, dst);
mat = randomMat(rng, size, type, 1, 255, false); cv::Mat dst_gold;
lut = randomMat(rng, cv::Size(256, 1), CV_8UC1, 100, 200, false); cv::LUT(src, lut, dst_gold);
cv::LUT(mat, lut, dst_gold); EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
} }
};
TEST_P(LUT, Accuracy) TEST_P(LUT, MultiChannel)
{ {
cv::Mat dst; cv::Mat src = randomMat(size, type);
cv::Mat lut = randomMat(cv::Size(256, 1), CV_MAKE_TYPE(CV_8U, src.channels()));
cv::gpu::GpuMat gpu_res;
cv::gpu::LUT(loadMat(mat, useRoi), lut, gpu_res); cv::gpu::GpuMat dst = createMat(size, CV_MAKE_TYPE(lut.depth(), src.channels()), useRoi);
cv::gpu::LUT(loadMat(src, useRoi), lut, dst);
gpu_res.download(dst); cv::Mat dst_gold;
cv::LUT(src, lut, dst_gold);
EXPECT_MAT_NEAR(dst_gold, dst, 0.0); EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
} }
INSTANTIATE_TEST_CASE_P(Arithm, LUT, Combine( INSTANTIATE_TEST_CASE_P(GPU_Core, LUT, testing::Combine(
ALL_DEVICES, ALL_DEVICES,
Values(CV_8UC1, CV_8UC3), DIFFERENT_SIZES,
testing::Values(MatType(CV_8UC1), MatType(CV_8UC3)),
WHOLE_SUBMAT)); WHOLE_SUBMAT));
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
// pow // Magnitude
PARAM_TEST_CASE(Pow, cv::gpu::DeviceInfo, MatType, UseRoi) PARAM_TEST_CASE(Magnitude, cv::gpu::DeviceInfo, cv::Size, UseRoi)
{ {
cv::gpu::DeviceInfo devInfo; cv::gpu::DeviceInfo devInfo;
int type;
bool useRoi;
double power;
cv::Size size; cv::Size size;
cv::Mat mat; bool useRoi;
cv::Mat dst_gold;
virtual void SetUp() virtual void SetUp()
{ {
devInfo = GET_PARAM(0); devInfo = GET_PARAM(0);
type = GET_PARAM(1); size = GET_PARAM(1);
useRoi = GET_PARAM(2); useRoi = GET_PARAM(2);
cv::gpu::setDevice(devInfo.deviceID()); cv::gpu::setDevice(devInfo.deviceID());
}
};
cv::RNG& rng = TS::ptr()->get_rng(); TEST_P(Magnitude, NPP)
{
cv::Mat src = randomMat(size, CV_32FC2);
size = cv::Size(rng.uniform(100, 200), rng.uniform(100, 200)); cv::gpu::GpuMat dst = createMat(size, CV_32FC1, useRoi);
cv::gpu::magnitude(loadMat(src, useRoi), dst);
mat = randomMat(rng, size, type, 0.0, 100.0, false); cv::Mat arr[2];
cv::split(src, arr);
cv::Mat dst_gold;
cv::magnitude(arr[0], arr[1], dst_gold);
if (mat.depth() == CV_32F) EXPECT_MAT_NEAR(dst_gold, dst, 1e-4);
power = rng.uniform(1.2f, 3.f); }
else
{
int ipower = rng.uniform(2, 8);
power = (float)ipower;
}
cv::pow(mat, power, dst_gold); TEST_P(Magnitude, Sqr_NPP)
} {
}; cv::Mat src = randomMat(size, CV_32FC2);
TEST_P(Pow, Accuracy) cv::gpu::GpuMat dst = createMat(size, CV_32FC1, useRoi);
cv::gpu::magnitudeSqr(loadMat(src, useRoi), dst);
cv::Mat arr[2];
cv::split(src, arr);
cv::Mat dst_gold;
cv::magnitude(arr[0], arr[1], dst_gold);
cv::multiply(dst_gold, dst_gold, dst_gold);
EXPECT_MAT_NEAR(dst_gold, dst, 1e-1);
}
TEST_P(Magnitude, Accuracy)
{ {
cv::Mat dst; cv::Mat x = randomMat(size, CV_32FC1);
cv::Mat y = randomMat(size, CV_32FC1);
cv::gpu::GpuMat dst = createMat(size, CV_32FC1, useRoi);
cv::gpu::magnitude(loadMat(x, useRoi), loadMat(y, useRoi), dst);
cv::Mat dst_gold;
cv::magnitude(x, y, dst_gold);
EXPECT_MAT_NEAR(dst_gold, dst, 1e-4);
}
cv::gpu::GpuMat gpu_res; TEST_P(Magnitude, Sqr_Accuracy)
{
cv::Mat x = randomMat(size, CV_32FC1);
cv::Mat y = randomMat(size, CV_32FC1);
cv::gpu::pow(loadMat(mat, useRoi), power, gpu_res); cv::gpu::GpuMat dst = createMat(size, CV_32FC1, useRoi);
cv::gpu::magnitudeSqr(loadMat(x, useRoi), loadMat(y, useRoi), dst);
gpu_res.download(dst); cv::Mat dst_gold;
cv::magnitude(x, y, dst_gold);
cv::multiply(dst_gold, dst_gold, dst_gold);
EXPECT_MAT_NEAR(dst_gold, dst, 2); EXPECT_MAT_NEAR(dst_gold, dst, 1e-1);
} }
INSTANTIATE_TEST_CASE_P(Arithm, Pow, Combine( INSTANTIATE_TEST_CASE_P(GPU_Core, Magnitude, testing::Combine(
ALL_DEVICES, ALL_DEVICES,
Values(CV_32F, CV_32FC3), DIFFERENT_SIZES,
WHOLE_SUBMAT)); WHOLE_SUBMAT));
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
// magnitude // Phase
PARAM_TEST_CASE(Magnitude, cv::gpu::DeviceInfo, UseRoi) PARAM_TEST_CASE(Phase, cv::gpu::DeviceInfo, cv::Size, bool, UseRoi)
{ {
cv::gpu::DeviceInfo devInfo; cv::gpu::DeviceInfo devInfo;
bool useRoi;
cv::Size size; cv::Size size;
cv::Mat mat1, mat2; bool angleInDegrees;
bool useRoi;
cv::Mat dst_gold;
virtual void SetUp() virtual void SetUp()
{ {
devInfo = GET_PARAM(0); devInfo = GET_PARAM(0);
useRoi = GET_PARAM(1); size = GET_PARAM(1);
angleInDegrees = GET_PARAM(2);
useRoi = GET_PARAM(3);
cv::gpu::setDevice(devInfo.deviceID()); cv::gpu::setDevice(devInfo.deviceID());
cv::RNG& rng = TS::ptr()->get_rng();
size = cv::Size(rng.uniform(100, 200), rng.uniform(100, 200));
mat1 = randomMat(rng, size, CV_32FC1, 0.0, 100.0, false);
mat2 = randomMat(rng, size, CV_32FC1, 0.0, 100.0, false);
cv::magnitude(mat1, mat2, dst_gold);
} }
}; };
TEST_P(Magnitude, Accuracy) TEST_P(Phase, Accuracy)
{ {
cv::Mat dst; cv::Mat x = randomMat(size, CV_32FC1);
cv::Mat y = randomMat(size, CV_32FC1);
cv::gpu::GpuMat gpu_res;
cv::gpu::magnitude(loadMat(mat1, useRoi), loadMat(mat2, useRoi), gpu_res); cv::gpu::GpuMat dst = createMat(size, CV_32FC1, useRoi);
cv::gpu::phase(loadMat(x, useRoi), loadMat(y, useRoi), dst, angleInDegrees);
gpu_res.download(dst); cv::Mat dst_gold;
cv::phase(x, y, dst_gold, angleInDegrees);
EXPECT_MAT_NEAR(dst_gold, dst, 1e-4); EXPECT_MAT_NEAR(dst_gold, dst, angleInDegrees ? 1e-2 : 1e-3);
} }
INSTANTIATE_TEST_CASE_P(Arithm, Magnitude, Combine( INSTANTIATE_TEST_CASE_P(GPU_Core, Phase, testing::Combine(
ALL_DEVICES, ALL_DEVICES,
DIFFERENT_SIZES,
testing::Bool(),
WHOLE_SUBMAT)); WHOLE_SUBMAT));
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
// phase // CartToPolar
PARAM_TEST_CASE(Phase, cv::gpu::DeviceInfo, UseRoi) PARAM_TEST_CASE(CartToPolar, cv::gpu::DeviceInfo, cv::Size, bool, UseRoi)
{ {
cv::gpu::DeviceInfo devInfo; cv::gpu::DeviceInfo devInfo;
cv::Size size;
bool angleInDegrees;
bool useRoi; bool useRoi;
cv::Size size;
cv::Mat mat1, mat2;
cv::Mat dst_gold;
virtual void SetUp() virtual void SetUp()
{ {
devInfo = GET_PARAM(0); devInfo = GET_PARAM(0);
useRoi = GET_PARAM(1); size = GET_PARAM(1);
angleInDegrees = GET_PARAM(2);
useRoi = GET_PARAM(3);
cv::gpu::setDevice(devInfo.deviceID()); cv::gpu::setDevice(devInfo.deviceID());
cv::RNG& rng = TS::ptr()->get_rng();
size = cv::Size(rng.uniform(100, 200), rng.uniform(100, 200));
mat1 = randomMat(rng, size, CV_32FC1, 0.0, 100.0, false);
mat2 = randomMat(rng, size, CV_32FC1, 0.0, 100.0, false);
cv::phase(mat1, mat2, dst_gold);
} }
}; };
TEST_P(Phase, Accuracy) TEST_P(CartToPolar, Accuracy)
{ {
cv::Mat dst; cv::Mat x = randomMat(size, CV_32FC1);
cv::Mat y = randomMat(size, CV_32FC1);
cv::gpu::GpuMat gpu_res; cv::gpu::GpuMat mag = createMat(size, CV_32FC1, useRoi);
cv::gpu::GpuMat angle = createMat(size, CV_32FC1, useRoi);
cv::gpu::cartToPolar(loadMat(x, useRoi), loadMat(y, useRoi), mag, angle, angleInDegrees);
cv::gpu::phase(loadMat(mat1, useRoi), loadMat(mat2, useRoi), gpu_res); cv::Mat mag_gold;
cv::Mat angle_gold;
gpu_res.download(dst); cv::cartToPolar(x, y, mag_gold, angle_gold, angleInDegrees);
EXPECT_MAT_NEAR(dst_gold, dst, 1e-3); EXPECT_MAT_NEAR(mag_gold, mag, 1e-4);
EXPECT_MAT_NEAR(angle_gold, angle, angleInDegrees ? 1e-2 : 1e-3);
} }
INSTANTIATE_TEST_CASE_P(Arithm, Phase, Combine( INSTANTIATE_TEST_CASE_P(GPU_Core, CartToPolar, testing::Combine(
ALL_DEVICES, ALL_DEVICES,
DIFFERENT_SIZES,
testing::Bool(),
WHOLE_SUBMAT)); WHOLE_SUBMAT));
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
// cartToPolar // polarToCart
PARAM_TEST_CASE(CartToPolar, cv::gpu::DeviceInfo, UseRoi) PARAM_TEST_CASE(PolarToCart, cv::gpu::DeviceInfo, cv::Size, bool, UseRoi)
{ {
cv::gpu::DeviceInfo devInfo; cv::gpu::DeviceInfo devInfo;
bool useRoi;
cv::Size size; cv::Size size;
cv::Mat mat1, mat2; bool angleInDegrees;
bool useRoi;
cv::Mat mag_gold;
cv::Mat angle_gold;
virtual void SetUp() virtual void SetUp()
{ {
devInfo = GET_PARAM(0); devInfo = GET_PARAM(0);
useRoi = GET_PARAM(1); size = GET_PARAM(1);
angleInDegrees = GET_PARAM(2);
useRoi = GET_PARAM(3);
cv::gpu::setDevice(devInfo.deviceID()); cv::gpu::setDevice(devInfo.deviceID());
cv::RNG& rng = TS::ptr()->get_rng();
size = cv::Size(rng.uniform(100, 200), rng.uniform(100, 200));
mat1 = randomMat(rng, size, CV_32FC1, -100.0, 100.0, false);
mat2 = randomMat(rng, size, CV_32FC1, -100.0, 100.0, false);
cv::cartToPolar(mat1, mat2, mag_gold, angle_gold);
} }
}; };
TEST_P(CartToPolar, Accuracy) TEST_P(PolarToCart, Accuracy)
{ {
cv::Mat mag, angle; cv::Mat magnitude = randomMat(size, CV_32FC1);
cv::Mat angle = randomMat(size, CV_32FC1);
cv::gpu::GpuMat gpuMag;
cv::gpu::GpuMat gpuAngle;
cv::gpu::cartToPolar(loadMat(mat1, useRoi), loadMat(mat2, useRoi), gpuMag, gpuAngle); cv::gpu::GpuMat x = createMat(size, CV_32FC1, useRoi);
cv::gpu::GpuMat y = createMat(size, CV_32FC1, useRoi);
cv::gpu::polarToCart(loadMat(magnitude, useRoi), loadMat(angle, useRoi), x, y, angleInDegrees);
gpuMag.download(mag); cv::Mat x_gold;
gpuAngle.download(angle); cv::Mat y_gold;
cv::polarToCart(magnitude, angle, x_gold, y_gold, angleInDegrees);
EXPECT_MAT_NEAR(mag_gold, mag, 1e-4); EXPECT_MAT_NEAR(x_gold, x, 1e-4);
EXPECT_MAT_NEAR(angle_gold, angle, 1e-3); EXPECT_MAT_NEAR(y_gold, y, 1e-4);
} }
INSTANTIATE_TEST_CASE_P(Arithm, CartToPolar, Combine( INSTANTIATE_TEST_CASE_P(GPU_Core, PolarToCart, testing::Combine(
ALL_DEVICES, ALL_DEVICES,
DIFFERENT_SIZES,
testing::Bool(),
WHOLE_SUBMAT)); WHOLE_SUBMAT));
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
// polarToCart // MeanStdDev
PARAM_TEST_CASE(PolarToCart, cv::gpu::DeviceInfo, UseRoi) PARAM_TEST_CASE(MeanStdDev, cv::gpu::DeviceInfo, cv::Size, UseRoi)
{ {
cv::gpu::DeviceInfo devInfo; cv::gpu::DeviceInfo devInfo;
bool useRoi;
cv::Size size; cv::Size size;
cv::Mat mag; bool useRoi;
cv::Mat angle;
cv::Mat x_gold;
cv::Mat y_gold;
virtual void SetUp() virtual void SetUp()
{ {
devInfo = GET_PARAM(0); devInfo = GET_PARAM(0);
useRoi = GET_PARAM(1); size = GET_PARAM(1);
useRoi = GET_PARAM(2);
cv::gpu::setDevice(devInfo.deviceID()); cv::gpu::setDevice(devInfo.deviceID());
cv::RNG& rng = TS::ptr()->get_rng();
size = cv::Size(rng.uniform(100, 200), rng.uniform(100, 200));
mag = randomMat(rng, size, CV_32FC1, -100.0, 100.0, false);
angle = randomMat(rng, size, CV_32FC1, 0.0, 2.0 * CV_PI, false);
cv::polarToCart(mag, angle, x_gold, y_gold);
} }
}; };
TEST_P(PolarToCart, Accuracy) TEST_P(MeanStdDev, Accuracy)
{ {
cv::Mat x, y; cv::Mat src = randomMat(size, CV_8UC1);
cv::gpu::GpuMat gpuX;
cv::gpu::GpuMat gpuY;
cv::gpu::polarToCart(loadMat(mag, useRoi), loadMat(angle, useRoi), gpuX, gpuY); cv::Scalar mean;
cv::Scalar stddev;
cv::gpu::meanStdDev(loadMat(src, useRoi), mean, stddev);
gpuX.download(x); cv::Scalar mean_gold;
gpuY.download(y); cv::Scalar stddev_gold;
cv::meanStdDev(src, mean_gold, stddev_gold);
EXPECT_MAT_NEAR(x_gold, x, 1e-4); EXPECT_SCALAR_NEAR(mean_gold, mean, 1e-5);
EXPECT_MAT_NEAR(y_gold, y, 1e-4); EXPECT_SCALAR_NEAR(stddev_gold, stddev, 1e-5);
} }
INSTANTIATE_TEST_CASE_P(Arithm, PolarToCart, Combine( INSTANTIATE_TEST_CASE_P(GPU_Core, MeanStdDev, testing::Combine(
ALL_DEVICES, ALL_DEVICES,
DIFFERENT_SIZES,
WHOLE_SUBMAT)); WHOLE_SUBMAT));
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
// minMax // Norm
PARAM_TEST_CASE(MinMax, cv::gpu::DeviceInfo, MatType, UseRoi) PARAM_TEST_CASE(Norm, cv::gpu::DeviceInfo, cv::Size, MatDepth, NormCode, UseRoi)
{ {
cv::gpu::DeviceInfo devInfo; cv::gpu::DeviceInfo devInfo;
int type;
bool useRoi;
cv::Size size; cv::Size size;
cv::Mat mat; int depth;
cv::Mat mask; int normCode;
bool useRoi;
double minVal_gold;
double maxVal_gold;
virtual void SetUp() virtual void SetUp()
{ {
devInfo = GET_PARAM(0); devInfo = GET_PARAM(0);
type = GET_PARAM(1); size = GET_PARAM(1);
useRoi = GET_PARAM(2); depth = GET_PARAM(2);
normCode = GET_PARAM(3);
useRoi = GET_PARAM(4);
cv::gpu::setDevice(devInfo.deviceID()); cv::gpu::setDevice(devInfo.deviceID());
cv::RNG& rng = TS::ptr()->get_rng();
size = cv::Size(rng.uniform(100, 200), rng.uniform(100, 200));
mat = randomMat(rng, size, type, 0.0, 127.0, false);
mask = randomMat(rng, size, CV_8UC1, 0, 2, false);
if (type != CV_8S)
{
cv::minMaxLoc(mat, &minVal_gold, &maxVal_gold, 0, 0, mask);
}
else
{
// OpenCV's minMaxLoc doesn't support CV_8S type
minVal_gold = std::numeric_limits<double>::max();
maxVal_gold = -std::numeric_limits<double>::max();
for (int i = 0; i < mat.rows; ++i)
{
const signed char* mat_row = mat.ptr<signed char>(i);
const unsigned char* mask_row = mask.ptr<unsigned char>(i);
for (int j = 0; j < mat.cols; ++j)
{
if (mask_row[j])
{
signed char val = mat_row[j];
if (val < minVal_gold) minVal_gold = val;
if (val > maxVal_gold) maxVal_gold = val;
}
}
}
}
} }
}; };
TEST_P(MinMax, Accuracy) TEST_P(Norm, Accuracy)
{ {
if (type == CV_64F && !supportFeature(devInfo, cv::gpu::NATIVE_DOUBLE)) cv::Mat src = randomMat(size, depth);
return;
double minVal, maxVal; double val = cv::gpu::norm(loadMat(src, useRoi), normCode);
cv::gpu::minMax(loadMat(mat, useRoi), &minVal, &maxVal, loadMat(mask, useRoi)); double val_gold = cv::norm(src, normCode);
EXPECT_DOUBLE_EQ(minVal_gold, minVal); EXPECT_NEAR(val_gold, val, depth < CV_32F ? 0.0 : 1.0);
EXPECT_DOUBLE_EQ(maxVal_gold, maxVal);
} }
INSTANTIATE_TEST_CASE_P(Arithm, MinMax, Combine( INSTANTIATE_TEST_CASE_P(GPU_Core, Norm, testing::Combine(
ALL_DEVICES, ALL_DEVICES,
Values(CV_8U, CV_8S, CV_16U, CV_16S, CV_32S, CV_32F, CV_64F), DIFFERENT_SIZES,
testing::Values(MatDepth(CV_8U),
MatDepth(CV_8S),
MatDepth(CV_16U),
MatDepth(CV_16S),
MatDepth(CV_32S),
MatDepth(CV_32F)),
testing::Values(NormCode(cv::NORM_L1), NormCode(cv::NORM_L2), NormCode(cv::NORM_INF)),
WHOLE_SUBMAT)); WHOLE_SUBMAT));
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
// minMaxLoc // normDiff
PARAM_TEST_CASE(MinMaxLoc, cv::gpu::DeviceInfo, MatType, UseRoi) PARAM_TEST_CASE(NormDiff, cv::gpu::DeviceInfo, cv::Size, NormCode, UseRoi)
{ {
cv::gpu::DeviceInfo devInfo; cv::gpu::DeviceInfo devInfo;
int type;
bool useRoi;
cv::Size size; cv::Size size;
cv::Mat mat; int normCode;
cv::Mat mask; bool useRoi;
double minVal_gold;
double maxVal_gold;
cv::Point minLoc_gold;
cv::Point maxLoc_gold;
virtual void SetUp() virtual void SetUp()
{ {
devInfo = GET_PARAM(0); devInfo = GET_PARAM(0);
type = GET_PARAM(1); size = GET_PARAM(1);
useRoi = GET_PARAM(2); normCode = GET_PARAM(2);
useRoi = GET_PARAM(3);
cv::gpu::setDevice(devInfo.deviceID()); cv::gpu::setDevice(devInfo.deviceID());
}
};
cv::RNG& rng = TS::ptr()->get_rng(); TEST_P(NormDiff, Accuracy)
{
cv::Mat src1 = randomMat(size, CV_8UC1);
cv::Mat src2 = randomMat(size, CV_8UC1);
size = cv::Size(rng.uniform(100, 200), rng.uniform(100, 200)); double val = cv::gpu::norm(loadMat(src1, useRoi), loadMat(src2, useRoi), normCode);
mat = randomMat(rng, size, type, 0.0, 127.0, false); double val_gold = cv::norm(src1, src2, normCode);
mask = randomMat(rng, size, CV_8UC1, 0, 2, false);
if (type != CV_8S) EXPECT_NEAR(val_gold, val, 0.0);
{ }
cv::minMaxLoc(mat, &minVal_gold, &maxVal_gold, &minLoc_gold, &maxLoc_gold, mask);
} INSTANTIATE_TEST_CASE_P(GPU_Core, NormDiff, testing::Combine(
else ALL_DEVICES,
{ DIFFERENT_SIZES,
// OpenCV's minMaxLoc doesn't support CV_8S type testing::Values(NormCode(cv::NORM_L1), NormCode(cv::NORM_L2), NormCode(cv::NORM_INF)),
minVal_gold = std::numeric_limits<double>::max(); WHOLE_SUBMAT));
maxVal_gold = -std::numeric_limits<double>::max();
for (int i = 0; i < mat.rows; ++i) //////////////////////////////////////////////////////////////////////////////
// Sum
namespace
{
template <typename T>
cv::Scalar absSumImpl(const cv::Mat& src)
{ {
const signed char* mat_row = mat.ptr<signed char>(i); const int cn = src.channels();
const unsigned char* mask_row = mask.ptr<unsigned char>(i);
for (int j = 0; j < mat.cols; ++j) cv::Scalar sum = cv::Scalar::all(0);
for (int y = 0; y < src.rows; ++y)
{ {
if (mask_row[j]) for (int x = 0; x < src.cols; ++x)
{ {
signed char val = mat_row[j]; for (int c = 0; c < cn; ++c)
if (val < minVal_gold) { minVal_gold = val; minLoc_gold = cv::Point(j, i); } sum[c] += std::abs(src.at<T>(y, x * cn + c));
if (val > maxVal_gold) { maxVal_gold = val; maxLoc_gold = cv::Point(j, i); }
}
}
} }
} }
return sum;
} }
};
TEST_P(MinMaxLoc, Accuracy) cv::Scalar absSumGold(const cv::Mat& src)
{ {
if (type == CV_64F && !supportFeature(devInfo, cv::gpu::NATIVE_DOUBLE)) typedef cv::Scalar (*func_t)(const cv::Mat& src);
return;
double minVal, maxVal; static const func_t funcs[] =
cv::Point minLoc, maxLoc; {
absSumImpl<uchar>,
absSumImpl<schar>,
absSumImpl<ushort>,
absSumImpl<short>,
absSumImpl<int>,
absSumImpl<float>,
absSumImpl<double>
};
cv::gpu::minMaxLoc(loadMat(mat, useRoi), &minVal, &maxVal, &minLoc, &maxLoc, loadMat(mask, useRoi)); return funcs[src.depth()](src);
}
EXPECT_DOUBLE_EQ(minVal_gold, minVal); template <typename T>
EXPECT_DOUBLE_EQ(maxVal_gold, maxVal); cv::Scalar sqrSumImpl(const cv::Mat& src)
{
const int cn = src.channels();
int cmpMinVals = memcmp(mat.data + minLoc_gold.y * mat.step + minLoc_gold.x * mat.elemSize(), cv::Scalar sum = cv::Scalar::all(0);
mat.data + minLoc.y * mat.step + minLoc.x * mat.elemSize(),
mat.elemSize());
int cmpMaxVals = memcmp(mat.data + maxLoc_gold.y * mat.step + maxLoc_gold.x * mat.elemSize(),
mat.data + maxLoc.y * mat.step + maxLoc.x * mat.elemSize(),
mat.elemSize());
EXPECT_EQ(0, cmpMinVals); for (int y = 0; y < src.rows; ++y)
EXPECT_EQ(0, cmpMaxVals); {
} for (int x = 0; x < src.cols; ++x)
{
for (int c = 0; c < cn; ++c)
{
const T val = src.at<T>(y, x * cn + c);
sum[c] += val * val;
}
}
}
INSTANTIATE_TEST_CASE_P(Arithm, MinMaxLoc, Combine( return sum;
ALL_DEVICES, }
Values(CV_8U, CV_8S, CV_16U, CV_16S, CV_32S, CV_32F, CV_64F),
WHOLE_SUBMAT));
//////////////////////////////////////////////////////////////////////////// cv::Scalar sqrSumGold(const cv::Mat& src)
// countNonZero {
typedef cv::Scalar (*func_t)(const cv::Mat& src);
static const func_t funcs[] =
{
sqrSumImpl<uchar>,
sqrSumImpl<schar>,
sqrSumImpl<ushort>,
sqrSumImpl<short>,
sqrSumImpl<int>,
sqrSumImpl<float>,
sqrSumImpl<double>
};
return funcs[src.depth()](src);
}
}
PARAM_TEST_CASE(CountNonZero, cv::gpu::DeviceInfo, MatType, UseRoi) PARAM_TEST_CASE(Sum, cv::gpu::DeviceInfo, cv::Size, MatType, UseRoi)
{ {
cv::gpu::DeviceInfo devInfo; cv::gpu::DeviceInfo devInfo;
cv::Size size;
int type; int type;
bool useRoi; bool useRoi;
cv::Size size; cv::Mat src;
cv::Mat mat;
int n_gold;
virtual void SetUp() virtual void SetUp()
{ {
devInfo = GET_PARAM(0); devInfo = GET_PARAM(0);
type = GET_PARAM(1); size = GET_PARAM(1);
useRoi = GET_PARAM(2); type = GET_PARAM(2);
useRoi = GET_PARAM(3);
cv::gpu::setDevice(devInfo.deviceID()); cv::gpu::setDevice(devInfo.deviceID());
cv::RNG& rng = TS::ptr()->get_rng(); src = randomMat(size, type, -128.0, 128.0);
}
};
size = cv::Size(rng.uniform(100, 200), rng.uniform(100, 200)); TEST_P(Sum, Simple)
{
cv::Scalar val = cv::gpu::sum(loadMat(src, useRoi));
cv::Mat matBase = randomMat(rng, size, CV_8U, 0.0, 1.0, false); cv::Scalar val_gold = cv::sum(src);
matBase.convertTo(mat, type);
n_gold = cv::countNonZero(mat); EXPECT_SCALAR_NEAR(val_gold, val, CV_MAT_DEPTH(type) < CV_32F ? 0.0 : 0.5);
} }
};
TEST_P(CountNonZero, Accuracy) TEST_P(Sum, Abs)
{ {
if (type == CV_64F && !supportFeature(devInfo, cv::gpu::NATIVE_DOUBLE)) cv::Scalar val = cv::gpu::absSum(loadMat(src, useRoi));
return;
int n = cv::gpu::countNonZero(loadMat(mat, useRoi)); cv::Scalar val_gold = absSumGold(src);
ASSERT_EQ(n_gold, n); EXPECT_SCALAR_NEAR(val_gold, val, CV_MAT_DEPTH(type) < CV_32F ? 0.0 : 0.5);
} }
INSTANTIATE_TEST_CASE_P(Arithm, CountNonZero, Combine( TEST_P(Sum, Sqr)
{
cv::Scalar val = cv::gpu::sqrSum(loadMat(src, useRoi));
cv::Scalar val_gold = sqrSumGold(src);
EXPECT_SCALAR_NEAR(val_gold, val, CV_MAT_DEPTH(type) < CV_32F ? 0.0 : 10);
}
INSTANTIATE_TEST_CASE_P(GPU_Core, Sum, testing::Combine(
ALL_DEVICES, ALL_DEVICES,
Values(CV_8U, CV_8S, CV_16U, CV_16S, CV_32S, CV_32F, CV_64F), DIFFERENT_SIZES,
TYPES(CV_8U, CV_32F, 1, 4),
WHOLE_SUBMAT)); WHOLE_SUBMAT));
////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
// sum // MinMax
PARAM_TEST_CASE(Sum, cv::gpu::DeviceInfo, MatType, UseRoi) namespace
{ {
cv::gpu::DeviceInfo devInfo; void minMaxLocGold(const cv::Mat& src, double* minVal_, double* maxVal_ = 0, cv::Point* minLoc_ = 0, cv::Point* maxLoc_ = 0, const cv::Mat& mask = cv::Mat())
int type; {
bool useRoi; if (src.depth() != CV_8S)
{
cv::minMaxLoc(src, minVal_, maxVal_, minLoc_, maxLoc_, mask);
return;
}
// OpenCV's minMaxLoc doesn't support CV_8S type
double minVal = std::numeric_limits<double>::max();
cv::Point minLoc(-1, -1);
double maxVal = -std::numeric_limits<double>::max();
cv::Point maxLoc(-1, -1);
for (int y = 0; y < src.rows; ++y)
{
const schar* src_row = src.ptr<signed char>(y);
const uchar* mask_row = mask.empty() ? 0 : mask.ptr<unsigned char>(y);
for (int x = 0; x < src.cols; ++x)
{
if (!mask_row || mask_row[x])
{
schar val = src_row[x];
if (val < minVal)
{
minVal = val;
minLoc = cv::Point(x, y);
}
if (val > maxVal)
{
maxVal = val;
maxLoc = cv::Point(x, y);
}
}
}
}
if (minVal_) *minVal_ = minVal;
if (maxVal_) *maxVal_ = maxVal;
if (minLoc_) *minLoc_ = minLoc;
if (maxLoc_) *maxLoc_ = maxLoc;
}
}
PARAM_TEST_CASE(MinMax, cv::gpu::DeviceInfo, cv::Size, MatDepth, UseRoi)
{
cv::gpu::DeviceInfo devInfo;
cv::Size size; cv::Size size;
cv::Mat mat; int depth;
bool useRoi;
virtual void SetUp() virtual void SetUp()
{ {
devInfo = GET_PARAM(0); devInfo = GET_PARAM(0);
type = GET_PARAM(1); size = GET_PARAM(1);
useRoi = GET_PARAM(2); depth = GET_PARAM(2);
useRoi = GET_PARAM(3);
cv::gpu::setDevice(devInfo.deviceID()); cv::gpu::setDevice(devInfo.deviceID());
cv::RNG& rng = TS::ptr()->get_rng();
size = cv::Size(rng.uniform(100, 200), rng.uniform(100, 200));
mat = randomMat(rng, size, CV_8U, 0.0, 10.0, false);
} }
}; };
TEST_P(Sum, Simple) TEST_P(MinMax, WithoutMask)
{ {
if (type == CV_64F && !supportFeature(devInfo, cv::gpu::NATIVE_DOUBLE)) cv::Mat src = randomMat(size, depth);
return;
cv::Scalar sum_gold = cv::sum(mat); double minVal, maxVal;
cv::gpu::minMax(loadMat(src, useRoi), &minVal, &maxVal);
cv::Scalar sum = cv::gpu::sum(loadMat(mat, useRoi)); double minVal_gold, maxVal_gold;
minMaxLocGold(src, &minVal_gold, &maxVal_gold);
EXPECT_NEAR(sum[0], sum_gold[0], mat.size().area() * 1e-5); EXPECT_DOUBLE_EQ(minVal_gold, minVal);
EXPECT_NEAR(sum[1], sum_gold[1], mat.size().area() * 1e-5); EXPECT_DOUBLE_EQ(maxVal_gold, maxVal);
EXPECT_NEAR(sum[2], sum_gold[2], mat.size().area() * 1e-5);
EXPECT_NEAR(sum[3], sum_gold[3], mat.size().area() * 1e-5);
} }
TEST_P(Sum, Abs) TEST_P(MinMax, WithMask)
{ {
if (type == CV_64F && !supportFeature(devInfo, cv::gpu::NATIVE_DOUBLE)) cv::Mat src = randomMat(size, depth);
return; cv::Mat mask = randomMat(size, CV_8UC1, 0.0, 2.0);
cv::Scalar sum_gold = cv::norm(mat, cv::NORM_L1); double minVal, maxVal;
cv::gpu::minMax(loadMat(src, useRoi), &minVal, &maxVal, loadMat(mask, useRoi));
cv::Scalar sum = cv::gpu::absSum(loadMat(mat, useRoi)); double minVal_gold, maxVal_gold;
minMaxLocGold(src, &minVal_gold, &maxVal_gold, 0, 0, mask);
EXPECT_NEAR(sum[0], sum_gold[0], mat.size().area() * 1e-5); EXPECT_DOUBLE_EQ(minVal_gold, minVal);
EXPECT_NEAR(sum[1], sum_gold[1], mat.size().area() * 1e-5); EXPECT_DOUBLE_EQ(maxVal_gold, maxVal);
EXPECT_NEAR(sum[2], sum_gold[2], mat.size().area() * 1e-5);
EXPECT_NEAR(sum[3], sum_gold[3], mat.size().area() * 1e-5);
} }
TEST_P(Sum, Sqr) TEST_P(MinMax, NullPtr)
{ {
if (type == CV_64F && !supportFeature(devInfo, cv::gpu::NATIVE_DOUBLE)) cv::Mat src = randomMat(size, depth);
return;
cv::Mat sqrmat;
multiply(mat, mat, sqrmat);
cv::Scalar sum_gold = sum(sqrmat);
cv::Scalar sum = cv::gpu::sqrSum(loadMat(mat, useRoi));
EXPECT_NEAR(sum[0], sum_gold[0], mat.size().area() * 1e-5); cv::gpu::minMax(loadMat(src, useRoi), 0, 0);
EXPECT_NEAR(sum[1], sum_gold[1], mat.size().area() * 1e-5);
EXPECT_NEAR(sum[2], sum_gold[2], mat.size().area() * 1e-5);
EXPECT_NEAR(sum[3], sum_gold[3], mat.size().area() * 1e-5);
} }
INSTANTIATE_TEST_CASE_P(Arithm, Sum, Combine( INSTANTIATE_TEST_CASE_P(GPU_Core, MinMax, testing::Combine(
ALL_DEVICES, ALL_DEVICES,
Values(CV_8U, CV_8S, CV_16U, CV_16S, CV_32S, CV_32F, CV_64F), DIFFERENT_SIZES,
ALL_DEPTH,
WHOLE_SUBMAT)); WHOLE_SUBMAT));
////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
// addWeighted // MinMaxLoc
PARAM_TEST_CASE(AddWeighted, cv::gpu::DeviceInfo, MatType, MatType, MatType, UseRoi) namespace
{ {
cv::gpu::DeviceInfo devInfo; template <typename T>
int type1; void expectEqualImpl(const cv::Mat& src, cv::Point loc_gold, cv::Point loc)
int type2; {
int dtype; EXPECT_EQ(src.at<T>(loc_gold.y, loc_gold.x), src.at<T>(loc.y, loc.x));
bool useRoi; }
cv::Size size; void expectEqual(const cv::Mat& src, cv::Point loc_gold, cv::Point loc)
cv::Mat src1; {
cv::Mat src2; typedef void (*func_t)(const cv::Mat& src, cv::Point loc_gold, cv::Point loc);
double alpha;
double beta;
double gamma;
cv::Mat dst_gold; static const func_t funcs[] =
{
expectEqualImpl<uchar>,
expectEqualImpl<schar>,
expectEqualImpl<ushort>,
expectEqualImpl<short>,
expectEqualImpl<int>,
expectEqualImpl<float>,
expectEqualImpl<double>
};
funcs[src.depth()](src, loc_gold, loc);
}
}
PARAM_TEST_CASE(MinMaxLoc, cv::gpu::DeviceInfo, cv::Size, MatDepth, UseRoi)
{
cv::gpu::DeviceInfo devInfo;
cv::Size size;
int depth;
bool useRoi;
virtual void SetUp() virtual void SetUp()
{ {
devInfo = GET_PARAM(0); devInfo = GET_PARAM(0);
type1 = GET_PARAM(1); size = GET_PARAM(1);
type2 = GET_PARAM(2); depth = GET_PARAM(2);
dtype = GET_PARAM(3); useRoi = GET_PARAM(3);
useRoi = GET_PARAM(4);
cv::gpu::setDevice(devInfo.deviceID()); cv::gpu::setDevice(devInfo.deviceID());
}
};
cv::RNG& rng = TS::ptr()->get_rng(); TEST_P(MinMaxLoc, WithoutMask)
{
cv::Mat src = randomMat(size, depth);
size = cv::Size(rng.uniform(100, 200), rng.uniform(100, 200)); double minVal, maxVal;
cv::Point minLoc, maxLoc;
cv::gpu::minMaxLoc(loadMat(src, useRoi), &minVal, &maxVal, &minLoc, &maxLoc);
src1 = randomMat(rng, size, type1, 0.0, 255.0, false); double minVal_gold, maxVal_gold;
src2 = randomMat(rng, size, type2, 0.0, 255.0, false); cv::Point minLoc_gold, maxLoc_gold;
minMaxLocGold(src, &minVal_gold, &maxVal_gold, &minLoc_gold, &maxLoc_gold);
alpha = rng.uniform(-10.0, 10.0); EXPECT_DOUBLE_EQ(minVal_gold, minVal);
beta = rng.uniform(-10.0, 10.0); EXPECT_DOUBLE_EQ(maxVal_gold, maxVal);
gamma = rng.uniform(-10.0, 10.0);
cv::addWeighted(src1, alpha, src2, beta, gamma, dst_gold, dtype); expectEqual(src, minLoc_gold, minLoc);
} expectEqual(src, maxLoc_gold, maxLoc);
}; }
TEST_P(AddWeighted, Accuracy) TEST_P(MinMaxLoc, WithMask)
{ {
if ((src1.depth() == CV_64F || src2.depth() == CV_64F || dst_gold.depth() == CV_64F) && !supportFeature(devInfo, cv::gpu::NATIVE_DOUBLE)) cv::Mat src = randomMat(size, depth);
return; cv::Mat mask = randomMat(size, CV_8UC1, 0.0, 2.0);
cv::Mat dst; double minVal, maxVal;
cv::Point minLoc, maxLoc;
cv::gpu::minMaxLoc(loadMat(src, useRoi), &minVal, &maxVal, &minLoc, &maxLoc, loadMat(mask, useRoi));
double minVal_gold, maxVal_gold;
cv::Point minLoc_gold, maxLoc_gold;
minMaxLocGold(src, &minVal_gold, &maxVal_gold, &minLoc_gold, &maxLoc_gold, mask);
cv::gpu::GpuMat dev_dst; EXPECT_DOUBLE_EQ(minVal_gold, minVal);
EXPECT_DOUBLE_EQ(maxVal_gold, maxVal);
cv::gpu::addWeighted(loadMat(src1, useRoi), alpha, loadMat(src2, useRoi), beta, gamma, dev_dst, dtype); expectEqual(src, minLoc_gold, minLoc);
expectEqual(src, maxLoc_gold, maxLoc);
}
dev_dst.download(dst); TEST_P(MinMaxLoc, NullPtr)
{
cv::Mat src = randomMat(size, depth);
EXPECT_MAT_NEAR(dst_gold, dst, dtype < CV_32F ? 1.0 : 1e-12); cv::gpu::minMaxLoc(loadMat(src, useRoi), 0, 0, 0, 0);
} }
INSTANTIATE_TEST_CASE_P(Arithm, AddWeighted, Combine( INSTANTIATE_TEST_CASE_P(GPU_Core, MinMaxLoc, testing::Combine(
ALL_DEVICES, ALL_DEVICES,
TYPES(CV_8U, CV_64F, 1, 1), DIFFERENT_SIZES,
TYPES(CV_8U, CV_64F, 1, 1), ALL_DEPTH,
TYPES(CV_8U, CV_64F, 1, 1),
WHOLE_SUBMAT)); WHOLE_SUBMAT));
////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////
// reduce // CountNonZero
PARAM_TEST_CASE(Reduce, cv::gpu::DeviceInfo, MatType, int, ReduceOp, UseRoi) PARAM_TEST_CASE(CountNonZero, cv::gpu::DeviceInfo, cv::Size, MatDepth, UseRoi)
{ {
cv::gpu::DeviceInfo devInfo; cv::gpu::DeviceInfo devInfo;
int type;
int dim;
int reduceOp;
bool useRoi;
cv::Size size; cv::Size size;
cv::Mat src; int depth;
bool useRoi;
cv::Mat dst_gold;
virtual void SetUp() virtual void SetUp()
{ {
devInfo = GET_PARAM(0); devInfo = GET_PARAM(0);
type = GET_PARAM(1); size = GET_PARAM(1);
dim = GET_PARAM(2); depth = GET_PARAM(2);
reduceOp = GET_PARAM(3); useRoi = GET_PARAM(3);
useRoi = GET_PARAM(4);
cv::gpu::setDevice(devInfo.deviceID()); cv::gpu::setDevice(devInfo.deviceID());
cv::RNG& rng = TS::ptr()->get_rng();
size = cv::Size(rng.uniform(100, 400), rng.uniform(100, 400));
src = randomMat(rng, size, type, 0.0, 255.0, false);
cv::reduce(src, dst_gold, dim, reduceOp, reduceOp == CV_REDUCE_SUM || reduceOp == CV_REDUCE_AVG ? CV_32F : CV_MAT_DEPTH(type));
if (dim == 1)
{
dst_gold.cols = dst_gold.rows;
dst_gold.rows = 1;
dst_gold.step = dst_gold.cols * dst_gold.elemSize();
}
} }
}; };
TEST_P(Reduce, Accuracy) TEST_P(CountNonZero, Accuracy)
{ {
cv::Mat dst; cv::Mat srcBase = randomMat(size, CV_8U, 0.0, 1.5);
cv::Mat src;
srcBase.convertTo(src, depth);
cv::gpu::GpuMat dev_dst; int val = cv::gpu::countNonZero(loadMat(src, useRoi));
cv::gpu::reduce(loadMat(src, useRoi), dev_dst, dim, reduceOp, reduceOp == CV_REDUCE_SUM || reduceOp == CV_REDUCE_AVG ? CV_32F : CV_MAT_DEPTH(type)); int val_gold = cv::countNonZero(src);
dev_dst.download(dst);
double norm = reduceOp == CV_REDUCE_SUM || reduceOp == CV_REDUCE_AVG ? 1e-1 : 0.0; ASSERT_EQ(val_gold, val);
EXPECT_MAT_NEAR(dst_gold, dst, norm);
} }
INSTANTIATE_TEST_CASE_P(Arithm, Reduce, Combine( INSTANTIATE_TEST_CASE_P(GPU_Core, CountNonZero, testing::Combine(
ALL_DEVICES, ALL_DEVICES,
Values(CV_8UC1, CV_8UC3, CV_8UC4, CV_16UC1, CV_16UC3, CV_16UC4, CV_32FC1, CV_32FC3, CV_32FC4), DIFFERENT_SIZES,
Values(0, 1), ALL_DEPTH,
Values((int)CV_REDUCE_SUM, (int)CV_REDUCE_AVG, (int)CV_REDUCE_MAX, (int)CV_REDUCE_MIN),
WHOLE_SUBMAT)); WHOLE_SUBMAT));
////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////
// gemm // Reduce
PARAM_TEST_CASE(GEMM, cv::gpu::DeviceInfo, MatType, GemmFlags, UseRoi) PARAM_TEST_CASE(Reduce, cv::gpu::DeviceInfo, cv::Size, MatDepth, int, ReduceCode, UseRoi)
{ {
cv::gpu::DeviceInfo devInfo; cv::gpu::DeviceInfo devInfo;
int type; cv::Size size;
int flags; int depth;
int channels;
int reduceOp;
bool useRoi; bool useRoi;
int size; int type;
cv::Mat src1; int dst_depth;
cv::Mat src2; int dst_type;
cv::Mat src3;
double alpha;
double beta;
cv::Mat dst_gold;
virtual void SetUp() virtual void SetUp()
{ {
devInfo = GET_PARAM(0); devInfo = GET_PARAM(0);
type = GET_PARAM(1); size = GET_PARAM(1);
flags = GET_PARAM(2); depth = GET_PARAM(2);
useRoi = GET_PARAM(3); channels = GET_PARAM(3);
reduceOp = GET_PARAM(4);
useRoi = GET_PARAM(5);
cv::gpu::setDevice(devInfo.deviceID()); cv::gpu::setDevice(devInfo.deviceID());
cv::RNG& rng = TS::ptr()->get_rng(); type = CV_MAKE_TYPE(depth, channels);
dst_depth = (reduceOp == CV_REDUCE_MAX || reduceOp == CV_REDUCE_MIN) ? depth : CV_32F;
size = rng.uniform(100, 200); dst_type = CV_MAKE_TYPE(dst_depth, channels);
src1 = randomMat(rng, cv::Size(size, size), type, -10.0, 10.0, false);
src2 = randomMat(rng, cv::Size(size, size), type, -10.0, 10.0, false);
src3 = randomMat(rng, cv::Size(size, size), type, -10.0, 10.0, false);
alpha = rng.uniform(-10.0, 10.0);
beta = rng.uniform(-10.0, 10.0);
cv::gemm(src1, src2, alpha, src3, beta, dst_gold, flags);
} }
}; };
TEST_P(GEMM, Accuracy) TEST_P(Reduce, Rows)
{ {
cv::Mat dst; cv::Mat src = randomMat(size, type);
cv::gpu::GpuMat dev_dst; cv::gpu::GpuMat dst = createMat(cv::Size(src.cols, 1), dst_type, useRoi);
cv::gpu::reduce(loadMat(src, useRoi), dst, 0, reduceOp, dst_depth);
cv::gpu::gemm(loadMat(src1, useRoi), loadMat(src2, useRoi), alpha, loadMat(src3, useRoi), beta, dev_dst, flags); cv::Mat dst_gold;
cv::reduce(src, dst_gold, 0, reduceOp, dst_depth);
dev_dst.download(dst); EXPECT_MAT_NEAR(dst_gold, dst, dst_depth < CV_32F ? 0.0 : 1e-2);
}
EXPECT_MAT_NEAR(dst_gold, dst, 1e-1); TEST_P(Reduce, Cols)
{
cv::Mat src = randomMat(size, type);
cv::gpu::GpuMat dst = createMat(cv::Size(src.rows, 1), dst_type, useRoi);
cv::gpu::reduce(loadMat(src, useRoi), dst, 1, reduceOp, dst_depth);
cv::Mat dst_gold;
cv::reduce(src, dst_gold, 1, reduceOp, dst_depth);
dst_gold.cols = dst_gold.rows;
dst_gold.rows = 1;
dst_gold.step = dst_gold.cols * dst_gold.elemSize();
EXPECT_MAT_NEAR(dst_gold, dst, dst_depth < CV_32F ? 0.0 : 1e-2);
} }
INSTANTIATE_TEST_CASE_P(Arithm, GEMM, Combine( INSTANTIATE_TEST_CASE_P(GPU_Core, Reduce, testing::Combine(
ALL_DEVICES, ALL_DEVICES,
Values(CV_32FC1, CV_32FC2), DIFFERENT_SIZES,
Values(0, (int) cv::GEMM_1_T, (int) cv::GEMM_2_T, (int) cv::GEMM_3_T), testing::Values(MatDepth(CV_8U),
MatDepth(CV_16U),
MatDepth(CV_16S),
MatDepth(CV_32F)),
testing::Values(1, 2, 3, 4),
ALL_REDUCE_CODES,
WHOLE_SUBMAT)); WHOLE_SUBMAT));
#endif // HAVE_CUDA
...@@ -2362,53 +2362,6 @@ TEST_P(ColumnSum, Accuracy) ...@@ -2362,53 +2362,6 @@ TEST_P(ColumnSum, Accuracy)
INSTANTIATE_TEST_CASE_P(ImgProc, ColumnSum, ALL_DEVICES); INSTANTIATE_TEST_CASE_P(ImgProc, ColumnSum, ALL_DEVICES);
////////////////////////////////////////////////////////////////////////
// Norm
PARAM_TEST_CASE(Norm, cv::gpu::DeviceInfo, MatType, NormCode, UseRoi)
{
cv::gpu::DeviceInfo devInfo;
int type;
int normType;
bool useRoi;
cv::Size size;
cv::Mat src;
double gold;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
type = GET_PARAM(1);
normType = GET_PARAM(2);
useRoi = GET_PARAM(3);
cv::gpu::setDevice(devInfo.deviceID());
cv::RNG& rng = TS::ptr()->get_rng();
size = cv::Size(rng.uniform(100, 400), rng.uniform(100, 400));
src = randomMat(rng, size, type, 0.0, 10.0, false);
gold = cv::norm(src, normType);
}
};
TEST_P(Norm, Accuracy)
{
double res = cv::gpu::norm(loadMat(src, useRoi), normType);
ASSERT_NEAR(res, gold, 0.5);
}
INSTANTIATE_TEST_CASE_P(ImgProc, Norm, Combine(
ALL_DEVICES,
TYPES(CV_8U, CV_32F, 1, 1),
Values((int) cv::NORM_INF, (int) cv::NORM_L1, (int) cv::NORM_L2),
WHOLE_SUBMAT));
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
// reprojectImageTo3D // reprojectImageTo3D
......
...@@ -82,7 +82,7 @@ INSTANTIATE_TEST_CASE_P(GPU_ImgProc, Threshold, testing::Combine( ...@@ -82,7 +82,7 @@ INSTANTIATE_TEST_CASE_P(GPU_ImgProc, Threshold, testing::Combine(
ALL_DEVICES, ALL_DEVICES,
DIFFERENT_SIZES, DIFFERENT_SIZES,
testing::Values(MatType(CV_8UC1), MatType(CV_16SC1), MatType(CV_32FC1)), testing::Values(MatType(CV_8UC1), MatType(CV_16SC1), MatType(CV_32FC1)),
testing::Values(ThreshOp(cv::THRESH_BINARY), ThreshOp(cv::THRESH_BINARY_INV), ThreshOp(cv::THRESH_TRUNC), ThreshOp(cv::THRESH_TOZERO), ThreshOp(cv::THRESH_TOZERO_INV)), ALL_THRESH_OPS,
WHOLE_SUBMAT)); WHOLE_SUBMAT));
#endif // HAVE_CUDA #endif // HAVE_CUDA
...@@ -45,6 +45,7 @@ using namespace std; ...@@ -45,6 +45,7 @@ using namespace std;
using namespace cv; using namespace cv;
using namespace cv::gpu; using namespace cv::gpu;
using namespace cvtest; using namespace cvtest;
using namespace testing;
int randomInt(int minVal, int maxVal) int randomInt(int minVal, int maxVal)
{ {
...@@ -98,35 +99,6 @@ GpuMat loadMat(const Mat& m, bool useRoi) ...@@ -98,35 +99,6 @@ GpuMat loadMat(const Mat& m, bool useRoi)
return d_m; return d_m;
} }
void showDiff(InputArray gold_, InputArray actual_, double eps)
{
Mat gold;
if (gold_.kind() == _InputArray::MAT)
gold = gold_.getMat();
else
gold_.getGpuMat().download(gold);
Mat actual;
if (actual_.kind() == _InputArray::MAT)
actual = actual_.getMat();
else
actual_.getGpuMat().download(actual);
Mat diff;
absdiff(gold, actual, diff);
threshold(diff, diff, eps, 255.0, cv::THRESH_BINARY);
namedWindow("gold", WINDOW_NORMAL);
namedWindow("actual", WINDOW_NORMAL);
namedWindow("diff", WINDOW_NORMAL);
imshow("gold", gold);
imshow("actual", actual);
imshow("diff", diff);
waitKey();
}
bool supportFeature(const DeviceInfo& info, FeatureSet feature) bool supportFeature(const DeviceInfo& info, FeatureSet feature)
{ {
return TargetArchs::builtWith(feature) && info.supports(feature); return TargetArchs::builtWith(feature) && info.supports(feature);
...@@ -220,20 +192,50 @@ Mat readImageType(const string& fname, int type) ...@@ -220,20 +192,50 @@ Mat readImageType(const string& fname, int type)
return src; return src;
} }
double checkNorm(const Mat& m) namespace
{
Mat getMat(InputArray arr)
{
if (arr.kind() == _InputArray::GPU_MAT)
{
Mat m;
arr.getGpuMat().download(m);
return m;
}
return arr.getMat();
}
}
void showDiff(InputArray gold_, InputArray actual_, double eps)
{ {
return norm(m, NORM_INF); Mat gold = getMat(gold_);
Mat actual = getMat(actual_);
Mat diff;
absdiff(gold, actual, diff);
threshold(diff, diff, eps, 255.0, cv::THRESH_BINARY);
namedWindow("gold", WINDOW_NORMAL);
namedWindow("actual", WINDOW_NORMAL);
namedWindow("diff", WINDOW_NORMAL);
imshow("gold", gold);
imshow("actual", actual);
imshow("diff", diff);
waitKey();
} }
double checkNorm(const Mat& m1, const Mat& m2) double checkNorm(InputArray m1, const InputArray m2)
{ {
return norm(m1, m2, NORM_INF); return norm(getMat(m1), getMat(m2), NORM_INF);
} }
double checkSimilarity(const Mat& m1, const Mat& m2) double checkSimilarity(InputArray m1, InputArray m2)
{ {
Mat diff; Mat diff;
matchTemplate(m1, m2, diff, CV_TM_CCORR_NORMED); matchTemplate(getMat(m1), getMat(m2), diff, CV_TM_CCORR_NORMED);
return std::abs(diff.at<float>(0, 0) - 1.f); return std::abs(diff.at<float>(0, 0) - 1.f);
} }
......
...@@ -65,27 +65,30 @@ std::vector<cv::gpu::DeviceInfo> devices(cv::gpu::FeatureSet feature); ...@@ -65,27 +65,30 @@ std::vector<cv::gpu::DeviceInfo> devices(cv::gpu::FeatureSet feature);
cv::Mat readImage(const std::string& fileName, int flags = cv::IMREAD_COLOR); cv::Mat readImage(const std::string& fileName, int flags = cv::IMREAD_COLOR);
cv::Mat readImageType(const std::string& fname, int type); cv::Mat readImageType(const std::string& fname, int type);
double checkNorm(const cv::Mat& m); double checkNorm(cv::InputArray m1, cv::InputArray m2);
double checkNorm(const cv::Mat& m1, const cv::Mat& m2);
double checkSimilarity(const cv::Mat& m1, const cv::Mat& m2);
#define EXPECT_MAT_NORM(mat, eps) \
{ \
EXPECT_LE(checkNorm(cv::Mat(mat)), eps) \
}
#define EXPECT_MAT_NEAR(mat1, mat2, eps) \ #define EXPECT_MAT_NEAR(mat1, mat2, eps) \
{ \ { \
ASSERT_EQ(mat1.type(), mat2.type()); \ ASSERT_EQ(mat1.type(), mat2.type()); \
ASSERT_EQ(mat1.size(), mat2.size()); \ ASSERT_EQ(mat1.size(), mat2.size()); \
EXPECT_LE(checkNorm(cv::Mat(mat1), cv::Mat(mat2)), eps); \ EXPECT_LE(checkNorm(mat1, mat2), eps); \
}
#define EXPECT_SCALAR_NEAR(s1, s2, eps) \
{ \
EXPECT_NEAR(s1[0], s2[0], eps); \
EXPECT_NEAR(s1[1], s2[1], eps); \
EXPECT_NEAR(s1[2], s2[2], eps); \
EXPECT_NEAR(s1[3], s2[3], eps); \
} }
double checkSimilarity(cv::InputArray m1, cv::InputArray m2);
#define EXPECT_MAT_SIMILAR(mat1, mat2, eps) \ #define EXPECT_MAT_SIMILAR(mat1, mat2, eps) \
{ \ { \
ASSERT_EQ(mat1.type(), mat2.type()); \ ASSERT_EQ(mat1.type(), mat2.type()); \
ASSERT_EQ(mat1.size(), mat2.size()); \ ASSERT_EQ(mat1.size(), mat2.size()); \
EXPECT_LE(checkSimilarity(cv::Mat(mat1), cv::Mat(mat2)), eps); \ EXPECT_LE(checkSimilarity(mat1, mat2), eps); \
} }
namespace cv { namespace gpu namespace cv { namespace gpu
...@@ -112,8 +115,10 @@ public: ...@@ -112,8 +115,10 @@ public:
private: private:
bool val_; bool val_;
}; };
void PrintTo(const UseRoi& useRoi, std::ostream* os); void PrintTo(const UseRoi& useRoi, std::ostream* os);
#define WHOLE testing::Values(UseRoi(false))
#define SUBMAT testing::Values(UseRoi(true))
#define WHOLE_SUBMAT testing::Values(UseRoi(false), UseRoi(true))
class Inverse class Inverse
{ {
...@@ -125,25 +130,30 @@ public: ...@@ -125,25 +130,30 @@ public:
private: private:
bool val_; bool val_;
}; };
void PrintTo(const Inverse& useRoi, std::ostream* os); void PrintTo(const Inverse& useRoi, std::ostream* os);
#define DIRECT_INVERSE testing::Values(Inverse(false), Inverse(true))
CV_ENUM(CmpCode, cv::CMP_EQ, cv::CMP_GT, cv::CMP_GE, cv::CMP_LT, cv::CMP_LE, cv::CMP_NE) CV_ENUM(CmpCode, cv::CMP_EQ, cv::CMP_GT, cv::CMP_GE, cv::CMP_LT, cv::CMP_LE, cv::CMP_NE)
#define ALL_CMP_CODES testing::Values(CmpCode(cv::CMP_EQ), CmpCode(cv::CMP_NE), CmpCode(cv::CMP_GT), CmpCode(cv::CMP_GE), CmpCode(cv::CMP_LT), CmpCode(cv::CMP_LE))
CV_ENUM(NormCode, cv::NORM_INF, cv::NORM_L1, cv::NORM_L2, cv::NORM_TYPE_MASK, cv::NORM_RELATIVE, cv::NORM_MINMAX) CV_ENUM(NormCode, cv::NORM_INF, cv::NORM_L1, cv::NORM_L2, cv::NORM_TYPE_MASK, cv::NORM_RELATIVE, cv::NORM_MINMAX)
enum {FLIP_BOTH = 0, FLIP_X = 1, FLIP_Y = -1}; enum {FLIP_BOTH = 0, FLIP_X = 1, FLIP_Y = -1};
CV_ENUM(FlipCode, FLIP_BOTH, FLIP_X, FLIP_Y) CV_ENUM(FlipCode, FLIP_BOTH, FLIP_X, FLIP_Y)
#define ALL_FLIP_CODES testing::Values(FlipCode(FLIP_BOTH), FlipCode(FLIP_X), FlipCode(FLIP_Y))
CV_ENUM(ReduceOp, CV_REDUCE_SUM, CV_REDUCE_AVG, CV_REDUCE_MAX, CV_REDUCE_MIN) CV_ENUM(ReduceCode, CV_REDUCE_SUM, CV_REDUCE_AVG, CV_REDUCE_MAX, CV_REDUCE_MIN)
#define ALL_REDUCE_CODES testing::Values(ReduceCode(CV_REDUCE_SUM), ReduceCode(CV_REDUCE_AVG), ReduceCode(CV_REDUCE_MAX), ReduceCode(CV_REDUCE_MIN))
CV_FLAGS(GemmFlags, cv::GEMM_1_T, cv::GEMM_2_T, cv::GEMM_3_T); CV_FLAGS(GemmFlags, 0, cv::GEMM_1_T, cv::GEMM_2_T, cv::GEMM_3_T);
#define ALL_GEMM_FLAGS testing::Values(GemmFlags(0), GemmFlags(cv::GEMM_1_T), GemmFlags(cv::GEMM_2_T), GemmFlags(cv::GEMM_3_T), GemmFlags(cv::GEMM_1_T | cv::GEMM_2_T), GemmFlags(cv::GEMM_1_T | cv::GEMM_3_T), GemmFlags(cv::GEMM_1_T | cv::GEMM_2_T | cv::GEMM_3_T))
CV_ENUM(DistType, cv::gpu::BruteForceMatcher_GPU_base::L1Dist, cv::gpu::BruteForceMatcher_GPU_base::L2Dist) CV_ENUM(DistType, cv::gpu::BruteForceMatcher_GPU_base::L1Dist, cv::gpu::BruteForceMatcher_GPU_base::L2Dist)
CV_ENUM(MorphOp, cv::MORPH_OPEN, cv::MORPH_CLOSE, cv::MORPH_GRADIENT, cv::MORPH_TOPHAT, cv::MORPH_BLACKHAT) CV_ENUM(MorphOp, cv::MORPH_OPEN, cv::MORPH_CLOSE, cv::MORPH_GRADIENT, cv::MORPH_TOPHAT, cv::MORPH_BLACKHAT)
CV_ENUM(ThreshOp, cv::THRESH_BINARY, cv::THRESH_BINARY_INV, cv::THRESH_TRUNC, cv::THRESH_TOZERO, cv::THRESH_TOZERO_INV) CV_ENUM(ThreshOp, cv::THRESH_BINARY, cv::THRESH_BINARY_INV, cv::THRESH_TRUNC, cv::THRESH_TOZERO, cv::THRESH_TOZERO_INV)
#define ALL_THRESH_OPS testing::Values(ThreshOp(cv::THRESH_BINARY), ThreshOp(cv::THRESH_BINARY_INV), ThreshOp(cv::THRESH_TRUNC), ThreshOp(cv::THRESH_TOZERO), ThreshOp(cv::THRESH_TOZERO_INV))
CV_ENUM(Interpolation, cv::INTER_NEAREST, cv::INTER_LINEAR, cv::INTER_CUBIC) CV_ENUM(Interpolation, cv::INTER_NEAREST, cv::INTER_LINEAR, cv::INTER_CUBIC)
...@@ -194,12 +204,4 @@ CV_FLAGS(DftFlags, cv::DFT_INVERSE, cv::DFT_SCALE, cv::DFT_ROWS, cv::DFT_COMPLEX ...@@ -194,12 +204,4 @@ CV_FLAGS(DftFlags, cv::DFT_INVERSE, cv::DFT_SCALE, cv::DFT_ROWS, cv::DFT_COMPLEX
\ \
std::make_pair(MatDepth(CV_64F), MatDepth(CV_64F))) std::make_pair(MatDepth(CV_64F), MatDepth(CV_64F)))
#define WHOLE testing::Values(UseRoi(false))
#define SUBMAT testing::Values(UseRoi(true))
#define WHOLE_SUBMAT testing::Values(UseRoi(false), UseRoi(true))
#define DIRECT_INVERSE testing::Values(Inverse(false), Inverse(true))
#define ALL_CMP_CODES testing::Values(CmpCode(cv::CMP_EQ), CmpCode(cv::CMP_NE), CmpCode(cv::CMP_GT), CmpCode(cv::CMP_GE), CmpCode(cv::CMP_LT), CmpCode(cv::CMP_LE))
#endif // __OPENCV_TEST_UTILITY_HPP__ #endif // __OPENCV_TEST_UTILITY_HPP__
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment