Commit 844bdea5 authored by Vladislav Vinogradov's avatar Vladislav Vinogradov

fixed several bugs in gpu arithm functions

refactored tests for them
parent f58c40bf
......@@ -638,11 +638,11 @@ CV_EXPORTS void bitwise_xor(const GpuMat& src1, const Scalar& sc, GpuMat& dst, S
//! pixel by pixel right shift of an image by a constant value
//! supports 1, 3 and 4 channels images with integers elements
CV_EXPORTS void rshift(const GpuMat& src, const Scalar& sc, GpuMat& dst, Stream& stream = Stream::Null());
CV_EXPORTS void rshift(const GpuMat& src, Scalar_<int> sc, GpuMat& dst, Stream& stream = Stream::Null());
//! pixel by pixel left shift of an image by a constant value
//! supports 1, 3 and 4 channels images with CV_8U, CV_16U or CV_32S depth
CV_EXPORTS void lshift(const GpuMat& src, const Scalar& sc, GpuMat& dst, Stream& stream = Stream::Null());
CV_EXPORTS void lshift(const GpuMat& src, Scalar_<int> sc, GpuMat& dst, Stream& stream = Stream::Null());
//! computes per-element minimum of two arrays (dst = min(src1, src2))
CV_EXPORTS void min(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, Stream& stream = Stream::Null());
......
......@@ -706,8 +706,8 @@ namespace cv { namespace gpu { namespace device
{
__device__ __forceinline__ short4 operator ()(short4 a, float b) const
{
return b != 0 ? make_short4(saturate_cast<short>(a.x / b), saturate_cast<uchar>(a.y / b),
saturate_cast<short>(a.z / b), saturate_cast<uchar>(a.w / b))
return b != 0 ? make_short4(saturate_cast<short>(a.x / b), saturate_cast<short>(a.y / b),
saturate_cast<short>(a.z / b), saturate_cast<short>(a.w / b))
: make_short4(0,0,0,0);
}
};
......
......@@ -71,8 +71,8 @@ void cv::gpu::bitwise_and(const GpuMat&, const GpuMat&, GpuMat&, const GpuMat&,
void cv::gpu::bitwise_and(const GpuMat&, const Scalar&, GpuMat&, Stream&) { throw_nogpu(); }
void cv::gpu::bitwise_xor(const GpuMat&, const GpuMat&, GpuMat&, const GpuMat&, Stream&) { throw_nogpu(); }
void cv::gpu::bitwise_xor(const GpuMat&, const Scalar&, GpuMat&, Stream&) { throw_nogpu(); }
void cv::gpu::rshift(const GpuMat&, const Scalar&, GpuMat&, Stream&) { throw_nogpu(); }
void cv::gpu::lshift(const GpuMat&, const Scalar&, GpuMat&, Stream&) { throw_nogpu(); }
void cv::gpu::rshift(const GpuMat&, Scalar_<int>, GpuMat&, Stream&) { throw_nogpu(); }
void cv::gpu::lshift(const GpuMat&, Scalar_<int>, GpuMat&, Stream&) { throw_nogpu(); }
void cv::gpu::min(const GpuMat&, const GpuMat&, GpuMat&, Stream&) { throw_nogpu(); }
void cv::gpu::min(const GpuMat&, double, GpuMat&, Stream&) { throw_nogpu(); }
void cv::gpu::max(const GpuMat&, const GpuMat&, GpuMat&, Stream&) { throw_nogpu(); }
......@@ -462,15 +462,14 @@ void cv::gpu::subtract(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, cons
{0/*subtract_gpu<double, unsigned char>*/, 0/*subtract_gpu<double, signed char>*/, 0/*subtract_gpu<double, unsigned short>*/, 0/*subtract_gpu<double, short>*/, 0/*subtract_gpu<double, int>*/, 0/*subtract_gpu<double, float>*/, subtract_gpu<double, double>}
};
static const func_t npp_funcs[7] =
static const func_t npp_funcs[6] =
{
NppArithm<CV_8U, nppiSub_8u_C1RSfs>::call,
0,
NppArithm<CV_16U, nppiSub_16u_C1RSfs>::call,
NppArithm<CV_16S, nppiSub_16s_C1RSfs>::call,
NppArithm<CV_32S, nppiSub_32s_C1RSfs>::call,
NppArithm<CV_32F, nppiSub_32f_C1R>::call,
subtract_gpu<double, double>
NppArithm<CV_32F, nppiSub_32f_C1R>::call
};
CV_Assert(src1.type() != CV_8S);
......@@ -484,7 +483,7 @@ void cv::gpu::subtract(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, cons
cudaStream_t stream = StreamAccessor::getStream(s);
if (mask.empty() && dst.type() == src1.type())
if (mask.empty() && dst.type() == src1.type() && src1.depth() <= CV_32F)
{
npp_funcs[src1.depth()](src2.reshape(1), src1.reshape(1), dst.reshape(1), PtrStepb(), stream);
return;
......@@ -734,15 +733,14 @@ void cv::gpu::divide(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, double
{0/*divide_gpu<double, unsigned char>*/, 0/*divide_gpu<double, signed char>*/, 0/*divide_gpu<double, unsigned short>*/, 0/*divide_gpu<double, short>*/, 0/*divide_gpu<double, int>*/, 0/*divide_gpu<double, float>*/, divide_gpu<double, double>}
};
static const func_t npp_funcs[7] =
static const func_t npp_funcs[6] =
{
NppArithm<CV_8U, nppiDiv_8u_C1RSfs>::call,
0,
NppArithm<CV_16U, nppiDiv_16u_C1RSfs>::call,
NppArithm<CV_16S, nppiDiv_16s_C1RSfs>::call,
NppArithm<CV_32S, nppiDiv_32s_C1RSfs>::call,
NppArithm<CV_32F, nppiDiv_32f_C1R>::call,
divide_gpu<double, double>
NppArithm<CV_32F, nppiDiv_32f_C1R>::call
};
cudaStream_t stream = StreamAccessor::getStream(s);
......@@ -753,7 +751,7 @@ void cv::gpu::divide(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, double
dst.create(src1.size(), src1.type());
multiply_gpu(static_cast<DevMem2D_<uchar4> >(src1), static_cast<DevMem2Df>(src2), static_cast<DevMem2D_<uchar4> >(dst), stream);
divide_gpu(static_cast<DevMem2D_<uchar4> >(src1), static_cast<DevMem2Df>(src2), static_cast<DevMem2D_<uchar4> >(dst), stream);
}
else if (src1.type() == CV_16SC4 && src2.type() == CV_32FC1)
{
......@@ -761,7 +759,7 @@ void cv::gpu::divide(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, double
dst.create(src1.size(), src1.type());
multiply_gpu(static_cast<DevMem2D_<short4> >(src1), static_cast<DevMem2Df>(src2), static_cast<DevMem2D_<short4> >(dst), stream);
divide_gpu(static_cast<DevMem2D_<short4> >(src1), static_cast<DevMem2Df>(src2), static_cast<DevMem2D_<short4> >(dst), stream);
}
else
{
......@@ -773,7 +771,7 @@ void cv::gpu::divide(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, double
dst.create(src1.size(), CV_MAKE_TYPE(CV_MAT_DEPTH(dtype), src1.channels()));
if (scale == 1 && dst.type() == src1.type())
if (scale == 1 && dst.type() == src1.type() && src1.depth() <= CV_32F)
{
npp_funcs[src1.depth()](src2.reshape(1), src1.reshape(1), dst.reshape(1), 1, stream);
return;
......@@ -1729,7 +1727,7 @@ namespace
};
}
void cv::gpu::rshift(const GpuMat& src, const Scalar& sc, GpuMat& dst, Stream& stream)
void cv::gpu::rshift(const GpuMat& src, Scalar_<int> sc, GpuMat& dst, Stream& stream)
{
typedef void (*func_t)(const GpuMat& src, Scalar_<Npp32u> sc, GpuMat& dst, cudaStream_t stream);
static const func_t funcs[5][4] =
......@@ -1749,7 +1747,7 @@ void cv::gpu::rshift(const GpuMat& src, const Scalar& sc, GpuMat& dst, Stream& s
funcs[src.depth()][src.channels() - 1](src, sc, dst, StreamAccessor::getStream(stream));
}
void cv::gpu::lshift(const GpuMat& src, const Scalar& sc, GpuMat& dst, Stream& stream)
void cv::gpu::lshift(const GpuMat& src, Scalar_<int> sc, GpuMat& dst, Stream& stream)
{
typedef void (*func_t)(const GpuMat& src, Scalar_<Npp32u> sc, GpuMat& dst, cudaStream_t stream);
static const func_t funcs[5][4] =
......
......@@ -43,377 +43,1455 @@
#ifdef HAVE_CUDA
using namespace cvtest;
using namespace testing;
////////////////////////////////////////////////////////////////////////////////
// Add_Array
PARAM_TEST_CASE(Add_Array, cv::gpu::DeviceInfo, cv::Size, std::pair<MatDepth, MatDepth>, int, UseRoi)
{
cv::gpu::DeviceInfo devInfo;
cv::Size size;
std::pair<MatType, MatType> depth;
int channels;
bool useRoi;
int stype;
int dtype;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
size = GET_PARAM(1);
depth = GET_PARAM(2);
channels = GET_PARAM(3);
useRoi = GET_PARAM(4);
cv::gpu::setDevice(devInfo.deviceID());
stype = CV_MAKE_TYPE(depth.first, channels);
dtype = CV_MAKE_TYPE(depth.second, channels);
}
};
TEST_P(Add_Array, Accuracy)
{
if (depth.first == CV_64F || depth.second == CV_64F)
{
if (!devInfo.supports(cv::gpu::NATIVE_DOUBLE))
return;
}
cv::Mat mat1 = randomMat(size, stype);
cv::Mat mat2 = randomMat(size, stype);
cv::Mat mask = randomMat(size, CV_8UC1, 0.0, 2.0);
cv::gpu::GpuMat dst = createMat(size, dtype, useRoi);
dst.setTo(cv::Scalar::all(0));
cv::gpu::add(loadMat(mat1, useRoi), loadMat(mat2, useRoi), dst, channels == 1 ? loadMat(mask, useRoi) : cv::gpu::GpuMat(), depth.second);
cv::Mat dst_gold(size, dtype, cv::Scalar::all(0));
cv::add(mat1, mat2, dst_gold, channels == 1 ? mask : cv::noArray(), depth.second);
EXPECT_MAT_NEAR(dst_gold, dst, depth.first >= CV_32F || depth.second >= CV_32F ? 1e-4 : 0.0);
}
INSTANTIATE_TEST_CASE_P(GPU_Core, Add_Array, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
DEPTH_PAIRS,
testing::Values(1, 2, 3, 4),
WHOLE_SUBMAT));
////////////////////////////////////////////////////////////////////////////////
// Add_Scalar
PARAM_TEST_CASE(Add_Scalar, cv::gpu::DeviceInfo, cv::Size, std::pair<MatDepth, MatDepth>, UseRoi)
{
cv::gpu::DeviceInfo devInfo;
cv::Size size;
std::pair<MatType, MatType> depth;
bool useRoi;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
size = GET_PARAM(1);
depth = GET_PARAM(2);
useRoi = GET_PARAM(3);
cv::gpu::setDevice(devInfo.deviceID());
}
};
TEST_P(Add_Scalar, Accuracy)
{
if (depth.first == CV_64F || depth.second == CV_64F)
{
if (!devInfo.supports(cv::gpu::NATIVE_DOUBLE))
return;
}
cv::Mat mat = randomMat(size, depth.first);
cv::Scalar val = randomScalar(0, 255);
cv::Mat mask = randomMat(size, CV_8UC1, 0.0, 2.0);
cv::gpu::GpuMat dst = createMat(size, depth.second, useRoi);
dst.setTo(cv::Scalar::all(0));
cv::gpu::add(loadMat(mat, useRoi), val, dst, loadMat(mask, useRoi), depth.second);
cv::Mat dst_gold(size, depth.second, cv::Scalar::all(0));
cv::add(mat, val, dst_gold, mask, depth.second);
EXPECT_MAT_NEAR(dst_gold, dst, depth.first >= CV_32F || depth.second >= CV_32F ? 1e-4 : 0.0);
}
INSTANTIATE_TEST_CASE_P(GPU_Core, Add_Scalar, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
DEPTH_PAIRS,
WHOLE_SUBMAT));
////////////////////////////////////////////////////////////////////////////////
// Subtract_Array
PARAM_TEST_CASE(Subtract_Array, cv::gpu::DeviceInfo, cv::Size, std::pair<MatDepth, MatDepth>, int, UseRoi)
{
cv::gpu::DeviceInfo devInfo;
cv::Size size;
std::pair<MatType, MatType> depth;
int channels;
bool useRoi;
int stype;
int dtype;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
size = GET_PARAM(1);
depth = GET_PARAM(2);
channels = GET_PARAM(3);
useRoi = GET_PARAM(4);
cv::gpu::setDevice(devInfo.deviceID());
stype = CV_MAKE_TYPE(depth.first, channels);
dtype = CV_MAKE_TYPE(depth.second, channels);
}
};
TEST_P(Subtract_Array, Accuracy)
{
if (depth.first == CV_64F || depth.second == CV_64F)
{
if (!devInfo.supports(cv::gpu::NATIVE_DOUBLE))
return;
}
cv::Mat mat1 = randomMat(size, stype);
cv::Mat mat2 = randomMat(size, stype);
cv::Mat mask = randomMat(size, CV_8UC1, 0.0, 2.0);
cv::gpu::GpuMat dst = createMat(size, dtype, useRoi);
dst.setTo(cv::Scalar::all(0));
cv::gpu::subtract(loadMat(mat1, useRoi), loadMat(mat2, useRoi), dst, channels == 1 ? loadMat(mask, useRoi) : cv::gpu::GpuMat(), depth.second);
cv::Mat dst_gold(size, dtype, cv::Scalar::all(0));
cv::subtract(mat1, mat2, dst_gold, channels == 1 ? mask : cv::noArray(), depth.second);
EXPECT_MAT_NEAR(dst_gold, dst, depth.first >= CV_32F || depth.second >= CV_32F ? 1e-4 : 0.0);
}
INSTANTIATE_TEST_CASE_P(GPU_Core, Subtract_Array, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
DEPTH_PAIRS,
testing::Values(1, 2, 3, 4),
WHOLE_SUBMAT));
////////////////////////////////////////////////////////////////////////////////
// Subtract_Scalar
PARAM_TEST_CASE(Subtract_Scalar, cv::gpu::DeviceInfo, cv::Size, std::pair<MatDepth, MatDepth>, UseRoi)
{
cv::gpu::DeviceInfo devInfo;
cv::Size size;
std::pair<MatType, MatType> depth;
bool useRoi;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
size = GET_PARAM(1);
depth = GET_PARAM(2);
useRoi = GET_PARAM(3);
cv::gpu::setDevice(devInfo.deviceID());
}
};
TEST_P(Subtract_Scalar, Accuracy)
{
if (depth.first == CV_64F || depth.second == CV_64F)
{
if (!devInfo.supports(cv::gpu::NATIVE_DOUBLE))
return;
}
cv::Mat mat = randomMat(size, depth.first);
cv::Scalar val = randomScalar(0, 255);
cv::Mat mask = randomMat(size, CV_8UC1, 0.0, 2.0);
cv::gpu::GpuMat dst = createMat(size, depth.second, useRoi);
dst.setTo(cv::Scalar::all(0));
cv::gpu::subtract(loadMat(mat, useRoi), val, dst, loadMat(mask, useRoi), depth.second);
cv::Mat dst_gold(size, depth.second, cv::Scalar::all(0));
cv::subtract(mat, val, dst_gold, mask, depth.second);
EXPECT_MAT_NEAR(dst_gold, dst, depth.first >= CV_32F || depth.second >= CV_32F ? 1e-4 : 0.0);
}
INSTANTIATE_TEST_CASE_P(GPU_Core, Subtract_Scalar, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
DEPTH_PAIRS,
WHOLE_SUBMAT));
////////////////////////////////////////////////////////////////////////////////
// Multiply_Array
PARAM_TEST_CASE(Multiply_Array, cv::gpu::DeviceInfo, cv::Size, std::pair<MatDepth, MatDepth>, int, UseRoi)
{
cv::gpu::DeviceInfo devInfo;
cv::Size size;
std::pair<MatType, MatType> depth;
int channels;
bool useRoi;
int stype;
int dtype;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
size = GET_PARAM(1);
depth = GET_PARAM(2);
channels = GET_PARAM(3);
useRoi = GET_PARAM(4);
cv::gpu::setDevice(devInfo.deviceID());
stype = CV_MAKE_TYPE(depth.first, channels);
dtype = CV_MAKE_TYPE(depth.second, channels);
}
};
TEST_P(Multiply_Array, Accuracy)
{
if (depth.first == CV_64F || depth.second == CV_64F)
{
if (!devInfo.supports(cv::gpu::NATIVE_DOUBLE))
return;
}
cv::Mat mat1 = randomMat(size, stype);
cv::Mat mat2 = randomMat(size, stype);
double scale = randomDouble(0.0, 255.0);
cv::gpu::GpuMat dst = createMat(size, dtype, useRoi);
cv::gpu::multiply(loadMat(mat1, useRoi), loadMat(mat2, useRoi), dst, scale, depth.second);
cv::Mat dst_gold;
cv::multiply(mat1, mat2, dst_gold, scale, depth.second);
EXPECT_MAT_NEAR(dst_gold, dst, 1.0);
}
INSTANTIATE_TEST_CASE_P(GPU_Core, Multiply_Array, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
DEPTH_PAIRS,
testing::Values(1, 2, 3, 4),
WHOLE_SUBMAT));
////////////////////////////////////////////////////////////////////////////////
// Multiply_Array_Special_Case
PARAM_TEST_CASE(Multiply_Array_Special_Case, cv::gpu::DeviceInfo, cv::Size, UseRoi)
{
cv::gpu::DeviceInfo devInfo;
cv::Size size;
bool useRoi;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
size = GET_PARAM(1);
useRoi = GET_PARAM(2);
cv::gpu::setDevice(devInfo.deviceID());
}
};
TEST_P(Multiply_Array_Special_Case, _8UC4x_32FC1)
{
cv::Mat mat1 = randomMat(size, CV_8UC4);
cv::Mat mat2 = randomMat(size, CV_32FC1);
cv::gpu::GpuMat dst = createMat(size, CV_8UC4, useRoi);
cv::gpu::multiply(loadMat(mat1, useRoi), loadMat(mat2, useRoi), dst);
cv::Mat h_dst(dst);
for (int y = 0; y < h_dst.rows; ++y)
{
const cv::Vec4b* mat1_row = mat1.ptr<cv::Vec4b>(y);
const float* mat2_row = mat2.ptr<float>(y);
const cv::Vec4b* dst_row = h_dst.ptr<cv::Vec4b>(y);
for (int x = 0; x < h_dst.cols; ++x)
{
cv::Vec4b val1 = mat1_row[x];
float val2 = mat2_row[x];
cv::Vec4b actual = dst_row[x];
cv::Vec4b gold;
gold[0] = cv::saturate_cast<uchar>(val1[0] * val2);
gold[1] = cv::saturate_cast<uchar>(val1[1] * val2);
gold[2] = cv::saturate_cast<uchar>(val1[2] * val2);
gold[3] = cv::saturate_cast<uchar>(val1[3] * val2);
ASSERT_LE(std::abs(gold[0] - actual[0]), 1.0);
ASSERT_LE(std::abs(gold[1] - actual[1]), 1.0);
ASSERT_LE(std::abs(gold[1] - actual[1]), 1.0);
ASSERT_LE(std::abs(gold[1] - actual[1]), 1.0);
}
}
}
TEST_P(Multiply_Array_Special_Case, _16SC4x_32FC1)
{
cv::Mat mat1 = randomMat(size, CV_16SC4);
cv::Mat mat2 = randomMat(size, CV_32FC1);
cv::gpu::GpuMat dst = createMat(size, CV_16SC4, useRoi);
cv::gpu::multiply(loadMat(mat1, useRoi), loadMat(mat2, useRoi), dst);
cv::Mat h_dst(dst);
for (int y = 0; y < h_dst.rows; ++y)
{
const cv::Vec4s* mat1_row = mat1.ptr<cv::Vec4s>(y);
const float* mat2_row = mat2.ptr<float>(y);
const cv::Vec4s* dst_row = h_dst.ptr<cv::Vec4s>(y);
for (int x = 0; x < h_dst.cols; ++x)
{
cv::Vec4s val1 = mat1_row[x];
float val2 = mat2_row[x];
cv::Vec4s actual = dst_row[x];
cv::Vec4s gold;
gold[0] = cv::saturate_cast<short>(val1[0] * val2);
gold[1] = cv::saturate_cast<short>(val1[1] * val2);
gold[2] = cv::saturate_cast<short>(val1[2] * val2);
gold[3] = cv::saturate_cast<short>(val1[3] * val2);
ASSERT_LE(std::abs(gold[0] - actual[0]), 1.0);
ASSERT_LE(std::abs(gold[1] - actual[1]), 1.0);
ASSERT_LE(std::abs(gold[1] - actual[1]), 1.0);
ASSERT_LE(std::abs(gold[1] - actual[1]), 1.0);
}
}
}
INSTANTIATE_TEST_CASE_P(GPU_Core, Multiply_Array_Special_Case, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
WHOLE_SUBMAT));
////////////////////////////////////////////////////////////////////////////////
// Multiply_Scalar
PARAM_TEST_CASE(Multiply_Scalar, cv::gpu::DeviceInfo, cv::Size, std::pair<MatDepth, MatDepth>, UseRoi)
{
cv::gpu::DeviceInfo devInfo;
cv::Size size;
std::pair<MatType, MatType> depth;
bool useRoi;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
size = GET_PARAM(1);
depth = GET_PARAM(2);
useRoi = GET_PARAM(3);
cv::gpu::setDevice(devInfo.deviceID());
}
};
TEST_P(Multiply_Scalar, Accuracy)
{
if (depth.first == CV_64F || depth.second == CV_64F)
{
if (!devInfo.supports(cv::gpu::NATIVE_DOUBLE))
return;
}
cv::Mat mat = randomMat(size, depth.first);
cv::Scalar val = randomScalar(0, 255);
double scale = randomDouble(0.0, 255.0);
cv::gpu::GpuMat dst = createMat(size, depth.second, useRoi);
cv::gpu::multiply(loadMat(mat, useRoi), val, dst, scale, depth.second);
cv::Mat dst_gold;
cv::multiply(mat, val, dst_gold, scale, depth.second);
EXPECT_MAT_NEAR(dst_gold, dst, depth.first >= CV_32F || depth.second >= CV_32F ? 1e-4 : 0.0);
}
INSTANTIATE_TEST_CASE_P(GPU_Core, Multiply_Scalar, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
DEPTH_PAIRS,
WHOLE_SUBMAT));
////////////////////////////////////////////////////////////////////////////////
// Divide_Array
PARAM_TEST_CASE(Divide_Array, cv::gpu::DeviceInfo, cv::Size, std::pair<MatDepth, MatDepth>, int, UseRoi)
{
cv::gpu::DeviceInfo devInfo;
cv::Size size;
std::pair<MatType, MatType> depth;
int channels;
bool useRoi;
int stype;
int dtype;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
size = GET_PARAM(1);
depth = GET_PARAM(2);
channels = GET_PARAM(3);
useRoi = GET_PARAM(4);
cv::gpu::setDevice(devInfo.deviceID());
stype = CV_MAKE_TYPE(depth.first, channels);
dtype = CV_MAKE_TYPE(depth.second, channels);
}
};
TEST_P(Divide_Array, Accuracy)
{
if (depth.first == CV_64F || depth.second == CV_64F)
{
if (!devInfo.supports(cv::gpu::NATIVE_DOUBLE))
return;
}
cv::Mat mat1 = randomMat(size, stype);
cv::Mat mat2 = randomMat(size, stype, 1.0, 255.0);
double scale = randomDouble(0.0, 255.0);
cv::gpu::GpuMat dst = createMat(size, dtype, useRoi);
cv::gpu::divide(loadMat(mat1, useRoi), loadMat(mat2, useRoi), dst, scale, depth.second);
cv::Mat dst_gold;
cv::divide(mat1, mat2, dst_gold, scale, depth.second);
EXPECT_MAT_NEAR(dst_gold, dst, 1.0);
}
INSTANTIATE_TEST_CASE_P(GPU_Core, Divide_Array, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
DEPTH_PAIRS,
testing::Values(1, 2, 3, 4),
WHOLE_SUBMAT));
////////////////////////////////////////////////////////////////////////////////
// Divide_Array_Special_Case
PARAM_TEST_CASE(Divide_Array_Special_Case, cv::gpu::DeviceInfo, cv::Size, UseRoi)
{
cv::gpu::DeviceInfo devInfo;
cv::Size size;
bool useRoi;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
size = GET_PARAM(1);
useRoi = GET_PARAM(2);
cv::gpu::setDevice(devInfo.deviceID());
}
};
TEST_P(Divide_Array_Special_Case, _8UC4x_32FC1)
{
cv::Mat mat1 = randomMat(size, CV_8UC4);
cv::Mat mat2 = randomMat(size, CV_32FC1, 1.0, 255.0);
cv::gpu::GpuMat dst = createMat(size, CV_8UC4, useRoi);
cv::gpu::divide(loadMat(mat1, useRoi), loadMat(mat2, useRoi), dst);
cv::Mat h_dst(dst);
for (int y = 0; y < h_dst.rows; ++y)
{
const cv::Vec4b* mat1_row = mat1.ptr<cv::Vec4b>(y);
const float* mat2_row = mat2.ptr<float>(y);
const cv::Vec4b* dst_row = h_dst.ptr<cv::Vec4b>(y);
for (int x = 0; x < h_dst.cols; ++x)
{
cv::Vec4b val1 = mat1_row[x];
float val2 = mat2_row[x];
cv::Vec4b actual = dst_row[x];
cv::Vec4b gold;
gold[0] = cv::saturate_cast<uchar>(val1[0] / val2);
gold[1] = cv::saturate_cast<uchar>(val1[1] / val2);
gold[2] = cv::saturate_cast<uchar>(val1[2] / val2);
gold[3] = cv::saturate_cast<uchar>(val1[3] / val2);
ASSERT_LE(std::abs(gold[0] - actual[0]), 1.0);
ASSERT_LE(std::abs(gold[1] - actual[1]), 1.0);
ASSERT_LE(std::abs(gold[1] - actual[1]), 1.0);
ASSERT_LE(std::abs(gold[1] - actual[1]), 1.0);
}
}
}
TEST_P(Divide_Array_Special_Case, _16SC4x_32FC1)
{
cv::Mat mat1 = randomMat(size, CV_16SC4);
cv::Mat mat2 = randomMat(size, CV_32FC1, 1.0, 255.0);
cv::gpu::GpuMat dst = createMat(size, CV_16SC4, useRoi);
cv::gpu::divide(loadMat(mat1, useRoi), loadMat(mat2, useRoi), dst);
cv::Mat h_dst(dst);
for (int y = 0; y < h_dst.rows; ++y)
{
const cv::Vec4s* mat1_row = mat1.ptr<cv::Vec4s>(y);
const float* mat2_row = mat2.ptr<float>(y);
const cv::Vec4s* dst_row = h_dst.ptr<cv::Vec4s>(y);
for (int x = 0; x < h_dst.cols; ++x)
{
cv::Vec4s val1 = mat1_row[x];
float val2 = mat2_row[x];
cv::Vec4s actual = dst_row[x];
cv::Vec4s gold;
gold[0] = cv::saturate_cast<short>(val1[0] / val2);
gold[1] = cv::saturate_cast<short>(val1[1] / val2);
gold[2] = cv::saturate_cast<short>(val1[2] / val2);
gold[3] = cv::saturate_cast<short>(val1[3] / val2);
ASSERT_LE(std::abs(gold[0] - actual[0]), 1.0);
ASSERT_LE(std::abs(gold[1] - actual[1]), 1.0);
ASSERT_LE(std::abs(gold[1] - actual[1]), 1.0);
ASSERT_LE(std::abs(gold[1] - actual[1]), 1.0);
}
}
}
INSTANTIATE_TEST_CASE_P(GPU_Core, Divide_Array_Special_Case, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
WHOLE_SUBMAT));
////////////////////////////////////////////////////////////////////////////////
// Divide_Scalar
PARAM_TEST_CASE(Divide_Scalar, cv::gpu::DeviceInfo, cv::Size, std::pair<MatDepth, MatDepth>, UseRoi)
{
cv::gpu::DeviceInfo devInfo;
cv::Size size;
std::pair<MatType, MatType> depth;
bool useRoi;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
size = GET_PARAM(1);
depth = GET_PARAM(2);
useRoi = GET_PARAM(3);
cv::gpu::setDevice(devInfo.deviceID());
}
};
PARAM_TEST_CASE(ArithmTestBase, cv::gpu::DeviceInfo, MatType, UseRoi)
TEST_P(Divide_Scalar, Accuracy)
{
if (depth.first == CV_64F || depth.second == CV_64F)
{
if (!devInfo.supports(cv::gpu::NATIVE_DOUBLE))
return;
}
cv::Mat mat = randomMat(size, depth.first);
cv::Scalar val = randomScalar(1.0, 255.0);
double scale = randomDouble(0.0, 255.0);
cv::gpu::GpuMat dst = createMat(size, depth.second, useRoi);
cv::gpu::divide(loadMat(mat, useRoi), val, dst, scale, depth.second);
cv::Mat dst_gold;
cv::divide(mat, val, dst_gold, scale, depth.second);
EXPECT_MAT_NEAR(dst_gold, dst, depth.first >= CV_32F || depth.second >= CV_32F ? 1e-4 : 0.0);
}
INSTANTIATE_TEST_CASE_P(GPU_Core, Divide_Scalar, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
DEPTH_PAIRS,
WHOLE_SUBMAT));
////////////////////////////////////////////////////////////////////////////////
// Divide_Scalar_Inv
PARAM_TEST_CASE(Divide_Scalar_Inv, cv::gpu::DeviceInfo, cv::Size, std::pair<MatDepth, MatDepth>, UseRoi)
{
cv::gpu::DeviceInfo devInfo;
cv::Size size;
std::pair<MatType, MatType> depth;
bool useRoi;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
size = GET_PARAM(1);
depth = GET_PARAM(2);
useRoi = GET_PARAM(3);
cv::gpu::setDevice(devInfo.deviceID());
}
};
TEST_P(Divide_Scalar_Inv, Accuracy)
{
if (depth.first == CV_64F || depth.second == CV_64F)
{
if (!devInfo.supports(cv::gpu::NATIVE_DOUBLE))
return;
}
double scale = randomDouble(0.0, 255.0);
cv::Mat mat = randomMat(size, depth.first, 1.0, 255.0);
cv::gpu::GpuMat dst = createMat(size, depth.second, useRoi);
cv::gpu::divide(scale, loadMat(mat, useRoi), dst, depth.second);
cv::Mat dst_gold;
cv::divide(scale, mat, dst_gold, depth.second);
EXPECT_MAT_NEAR(dst_gold, dst, depth.first >= CV_32F || depth.second >= CV_32F ? 1e-4 : 0.0);
}
INSTANTIATE_TEST_CASE_P(GPU_Core, Divide_Scalar_Inv, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
DEPTH_PAIRS,
WHOLE_SUBMAT));
////////////////////////////////////////////////////////////////////////////////
// AbsDiff
PARAM_TEST_CASE(AbsDiff, cv::gpu::DeviceInfo, cv::Size, MatDepth, UseRoi)
{
cv::gpu::DeviceInfo devInfo;
cv::Size size;
int depth;
bool useRoi;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
size = GET_PARAM(1);
depth = GET_PARAM(2);
useRoi = GET_PARAM(3);
cv::gpu::setDevice(devInfo.deviceID());
}
};
TEST_P(AbsDiff, Array)
{
if (depth == CV_64F)
{
if (!devInfo.supports(cv::gpu::NATIVE_DOUBLE))
return;
}
cv::Mat src1 = randomMat(size, depth);
cv::Mat src2 = randomMat(size, depth);
cv::gpu::GpuMat dst = createMat(size, depth, useRoi);
cv::gpu::absdiff(loadMat(src1, useRoi), loadMat(src2, useRoi), dst);
cv::Mat dst_gold;
cv::absdiff(src1, src2, dst_gold);
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
TEST_P(AbsDiff, Scalar)
{
if (depth == CV_64F)
{
if (!devInfo.supports(cv::gpu::NATIVE_DOUBLE))
return;
}
cv::Mat src = randomMat(size, depth);
cv::Scalar val = randomScalar(0.0, 255.0);
cv::gpu::GpuMat dst = createMat(size, depth, useRoi);
cv::gpu::absdiff(loadMat(src, useRoi), val, dst);
cv::Mat dst_gold;
cv::absdiff(src, val, dst_gold);
EXPECT_MAT_NEAR(dst_gold, dst, depth <= CV_32F ? 1.0 : 1e-5);
}
INSTANTIATE_TEST_CASE_P(GPU_Core, AbsDiff, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
ALL_DEPTH,
WHOLE_SUBMAT));
////////////////////////////////////////////////////////////////////////////////
// Abs
PARAM_TEST_CASE(Abs, cv::gpu::DeviceInfo, cv::Size, MatType, UseRoi)
{
cv::gpu::DeviceInfo devInfo;
cv::Size size;
int type;
bool useRoi;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
size = GET_PARAM(1);
type = GET_PARAM(2);
useRoi = GET_PARAM(3);
cv::gpu::setDevice(devInfo.deviceID());
}
};
TEST_P(Abs, Accuracy)
{
cv::Mat src = randomMat(size, type);
cv::gpu::GpuMat dst = createMat(size, type, useRoi);
cv::gpu::abs(loadMat(src, useRoi), dst);
cv::Mat dst_gold = cv::abs(src);
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
INSTANTIATE_TEST_CASE_P(GPU_Core, Abs, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
testing::Values(MatType(CV_16SC1), MatType(CV_32FC1)),
WHOLE_SUBMAT));
////////////////////////////////////////////////////////////////////////////////
// Sqr
PARAM_TEST_CASE(Sqr, cv::gpu::DeviceInfo, cv::Size, MatType, UseRoi)
{
cv::gpu::DeviceInfo devInfo;
cv::Size size;
int type;
bool useRoi;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
size = GET_PARAM(1);
type = GET_PARAM(2);
useRoi = GET_PARAM(3);
cv::gpu::setDevice(devInfo.deviceID());
}
};
TEST_P(Sqr, Accuracy)
{
cv::Mat src = randomMat(size, type);
cv::gpu::GpuMat dst = createMat(size, type, useRoi);
cv::gpu::sqr(loadMat(src, useRoi), dst);
cv::Mat dst_gold;
cv::multiply(src, src, dst_gold);
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
INSTANTIATE_TEST_CASE_P(GPU_Core, Sqr, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
testing::Values(MatType(CV_8UC1), MatType(CV_16UC1), MatType(CV_16SC1), MatType(CV_32FC1)),
WHOLE_SUBMAT));
////////////////////////////////////////////////////////////////////////////////
// Sqrt
namespace
{
template <typename T> void sqrtImpl(const cv::Mat& src, cv::Mat& dst)
{
dst.create(src.size(), src.type());
for (int y = 0; y < src.rows; ++y)
{
for (int x = 0; x < src.cols; ++x)
dst.at<T>(y, x) = static_cast<T>(std::sqrt(static_cast<float>(src.at<T>(y, x))));
}
}
void sqrtGold(const cv::Mat& src, cv::Mat& dst)
{
typedef void (*func_t)(const cv::Mat& src, cv::Mat& dst);
const func_t funcs[] =
{
sqrtImpl<uchar>, sqrtImpl<schar>, sqrtImpl<ushort>, sqrtImpl<short>,
sqrtImpl<int>, sqrtImpl<float>
};
funcs[src.depth()](src, dst);
}
}
PARAM_TEST_CASE(Sqrt, cv::gpu::DeviceInfo, cv::Size, MatType, UseRoi)
{
cv::gpu::DeviceInfo devInfo;
cv::Size size;
int type;
bool useRoi;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
size = GET_PARAM(1);
type = GET_PARAM(2);
useRoi = GET_PARAM(3);
cv::gpu::setDevice(devInfo.deviceID());
}
};
TEST_P(Sqrt, Accuracy)
{
cv::Mat src = randomMat(size, type);
cv::gpu::GpuMat dst = createMat(size, type, useRoi);
cv::gpu::sqrt(loadMat(src, useRoi), dst);
cv::Mat dst_gold;
sqrtGold(src, dst_gold);
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
INSTANTIATE_TEST_CASE_P(GPU_Core, Sqrt, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
testing::Values(MatType(CV_8UC1), MatType(CV_16UC1), MatType(CV_16SC1), MatType(CV_32FC1)),
WHOLE_SUBMAT));
////////////////////////////////////////////////////////////////////////////////
// Log
namespace
{
template <typename T> void logImpl(const cv::Mat& src, cv::Mat& dst)
{
dst.create(src.size(), src.type());
for (int y = 0; y < src.rows; ++y)
{
for (int x = 0; x < src.cols; ++x)
dst.at<T>(y, x) = static_cast<T>(std::log(static_cast<float>(src.at<T>(y, x))));
}
}
void logGold(const cv::Mat& src, cv::Mat& dst)
{
typedef void (*func_t)(const cv::Mat& src, cv::Mat& dst);
const func_t funcs[] =
{
logImpl<uchar>, logImpl<schar>, logImpl<ushort>, logImpl<short>,
logImpl<int>, logImpl<float>
};
funcs[src.depth()](src, dst);
}
}
PARAM_TEST_CASE(Log, cv::gpu::DeviceInfo, cv::Size, MatType, UseRoi)
{
cv::gpu::DeviceInfo devInfo;
cv::Size size;
int type;
bool useRoi;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
size = GET_PARAM(1);
type = GET_PARAM(2);
useRoi = GET_PARAM(3);
cv::gpu::setDevice(devInfo.deviceID());
}
};
TEST_P(Log, Accuracy)
{
cv::Mat src = randomMat(size, type, 1.0, 255.0);
cv::gpu::GpuMat dst = createMat(size, type, useRoi);
cv::gpu::log(loadMat(src, useRoi), dst);
cv::Mat dst_gold;
logGold(src, dst_gold);
EXPECT_MAT_NEAR(dst_gold, dst, 1e-6);
}
INSTANTIATE_TEST_CASE_P(GPU_Core, Log, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
testing::Values(MatType(CV_8UC1), MatType(CV_16UC1), MatType(CV_16SC1), MatType(CV_32FC1)),
WHOLE_SUBMAT));
////////////////////////////////////////////////////////////////////////////////
// Exp
PARAM_TEST_CASE(Exp, cv::gpu::DeviceInfo, cv::Size, MatType, UseRoi)
{
cv::gpu::DeviceInfo devInfo;
cv::Size size;
int type;
bool useRoi;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
size = GET_PARAM(1);
type = GET_PARAM(2);
useRoi = GET_PARAM(3);
cv::gpu::setDevice(devInfo.deviceID());
}
};
TEST_P(Exp, Accuracy)
{
cv::Mat src = randomMat(size, type, 0.0, 10.0);
cv::gpu::GpuMat dst = createMat(size, type, useRoi);
cv::gpu::exp(loadMat(src, useRoi), dst);
cv::Mat dst_gold;
cv::exp(src, dst_gold);
EXPECT_MAT_NEAR(dst_gold, dst, 1e-2);
}
INSTANTIATE_TEST_CASE_P(GPU_Core, Exp, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
testing::Values(MatType(CV_32FC1)),
WHOLE_SUBMAT));
////////////////////////////////////////////////////////////////////////////////
// compare
PARAM_TEST_CASE(Compare, cv::gpu::DeviceInfo, cv::Size, MatDepth, CmpCode, UseRoi)
{
cv::gpu::DeviceInfo devInfo;
cv::Size size;
cv::Mat mat1;
cv::Mat mat2;
cv::Scalar val;
int depth;
int cmp_code;
bool useRoi;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
type = GET_PARAM(1);
useRoi = GET_PARAM(2);
size = GET_PARAM(1);
depth = GET_PARAM(2);
cmp_code = GET_PARAM(3);
useRoi = GET_PARAM(4);
cv::gpu::setDevice(devInfo.deviceID());
}
};
cv::RNG& rng = TS::ptr()->get_rng();
TEST_P(Compare, Accuracy)
{
cv::Mat src1 = randomMat(size, depth);
cv::Mat src2 = randomMat(size, depth);
size = cv::Size(rng.uniform(100, 200), rng.uniform(100, 200));
cv::gpu::GpuMat dst = createMat(size, CV_8UC1, useRoi);
cv::gpu::compare(loadMat(src1, useRoi), loadMat(src2, useRoi), dst, cmp_code);
mat1 = randomMat(rng, size, type, 5, 16, false);
mat2 = randomMat(rng, size, type, 5, 16, false);
cv::Mat dst_gold;
cv::compare(src1, src2, dst_gold, cmp_code);
val = cv::Scalar(rng.uniform(1, 3), rng.uniform(1, 3), rng.uniform(1, 3), rng.uniform(1, 3));
}
};
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
////////////////////////////////////////////////////////////////////////////////
// add
INSTANTIATE_TEST_CASE_P(GPU_Core, Compare, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
ALL_DEPTH,
ALL_CMP_CODES,
WHOLE_SUBMAT));
struct Add : ArithmTestBase {};
//////////////////////////////////////////////////////////////////////////////
// Bitwise_Array
TEST_P(Add, Array)
PARAM_TEST_CASE(Bitwise_Array, cv::gpu::DeviceInfo, cv::Size, MatType)
{
cv::Mat dst_gold;
cv::add(mat1, mat2, dst_gold);
cv::gpu::DeviceInfo devInfo;
cv::Size size;
int type;
cv::Mat dst;
cv::Mat src1;
cv::Mat src2;
cv::gpu::GpuMat gpuRes;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
size = GET_PARAM(1);
type = GET_PARAM(2);
cv::gpu::setDevice(devInfo.deviceID());
cv::gpu::add(loadMat(mat1, useRoi), loadMat(mat2, useRoi), gpuRes);
src1 = randomMat(size, type, 0.0, std::numeric_limits<int>::max());
src2 = randomMat(size, type, 0.0, std::numeric_limits<int>::max());
}
};
gpuRes.download(dst);
TEST_P(Bitwise_Array, Not)
{
cv::gpu::GpuMat dst;
cv::gpu::bitwise_not(loadMat(src1), dst);
cv::Mat dst_gold = ~src1;
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
TEST_P(Add, Scalar)
TEST_P(Bitwise_Array, Or)
{
cv::Mat dst_gold;
cv::add(mat1, val, dst_gold);
cv::gpu::GpuMat dst;
cv::gpu::bitwise_or(loadMat(src1), loadMat(src2), dst);
cv::Mat dst;
cv::Mat dst_gold = src1 | src2;
cv::gpu::GpuMat gpuRes;
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
cv::gpu::add(loadMat(mat1, useRoi), val, gpuRes);
TEST_P(Bitwise_Array, And)
{
cv::gpu::GpuMat dst;
cv::gpu::bitwise_and(loadMat(src1), loadMat(src2), dst);
gpuRes.download(dst);
cv::Mat dst_gold = src1 & src2;
EXPECT_MAT_NEAR(dst_gold, dst, 1e-5);
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
INSTANTIATE_TEST_CASE_P(Arithm, Add, Combine(
ALL_DEVICES,
Values(CV_8UC1, CV_8UC3, CV_8UC4, CV_16UC1, CV_16UC3, CV_16UC4, CV_16SC1, CV_16SC2, CV_16SC3, CV_16SC4,
CV_32SC1, CV_32SC2, CV_32SC3, CV_32FC1, CV_32FC2, CV_32FC3, CV_32FC4),
WHOLE_SUBMAT));
TEST_P(Bitwise_Array, Xor)
{
cv::gpu::GpuMat dst;
cv::gpu::bitwise_xor(loadMat(src1), loadMat(src2), dst);
////////////////////////////////////////////////////////////////////////////////
// subtract
cv::Mat dst_gold = src1 ^ src2;
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
INSTANTIATE_TEST_CASE_P(GPU_Core, Bitwise_Array, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
TYPES(CV_8U, CV_32S, 1, 4)));
struct Subtract : ArithmTestBase {};
//////////////////////////////////////////////////////////////////////////////
// Bitwise_Scalar
TEST_P(Subtract, Array)
PARAM_TEST_CASE(Bitwise_Scalar, cv::gpu::DeviceInfo, cv::Size, MatDepth, int)
{
cv::Mat dst_gold;
cv::subtract(mat1, mat2, dst_gold);
cv::gpu::DeviceInfo devInfo;
cv::Size size;
int depth;
int channels;
cv::Mat dst;
cv::Mat src;
cv::Scalar val;
cv::gpu::GpuMat gpuRes;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
size = GET_PARAM(1);
depth = GET_PARAM(2);
channels = GET_PARAM(3);
cv::gpu::subtract(loadMat(mat1, useRoi), loadMat(mat2, useRoi), gpuRes);
cv::gpu::setDevice(devInfo.deviceID());
gpuRes.download(dst);
src = randomMat(size, CV_MAKE_TYPE(depth, channels));
cv::Scalar_<int> ival = randomScalar(0.0, 255.0);
val = ival;
}
};
TEST_P(Bitwise_Scalar, Or)
{
cv::gpu::GpuMat dst;
cv::gpu::bitwise_or(loadMat(src), val, dst);
cv::Mat dst_gold;
cv::bitwise_or(src, val, dst_gold);
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
TEST_P(Subtract, Scalar)
TEST_P(Bitwise_Scalar, And)
{
cv::Mat dst_gold;
cv::subtract(mat1, val, dst_gold);
cv::gpu::GpuMat dst;
cv::gpu::bitwise_and(loadMat(src), val, dst);
cv::Mat dst;
cv::Mat dst_gold;
cv::bitwise_and(src, val, dst_gold);
cv::gpu::GpuMat gpuRes;
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
cv::gpu::subtract(loadMat(mat1, useRoi), val, gpuRes);
TEST_P(Bitwise_Scalar, Xor)
{
cv::gpu::GpuMat dst;
cv::gpu::bitwise_xor(loadMat(src), val, dst);
gpuRes.download(dst);
cv::Mat dst_gold;
cv::bitwise_xor(src, val, dst_gold);
EXPECT_MAT_NEAR(dst_gold, dst, 1e-5);
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
INSTANTIATE_TEST_CASE_P(Arithm, Subtract, Combine(
INSTANTIATE_TEST_CASE_P(GPU_Core, Bitwise_Scalar, testing::Combine(
ALL_DEVICES,
Values(CV_8UC1, CV_8UC3, CV_8UC4, CV_16UC1, CV_16UC3, CV_16UC4, CV_16SC1, CV_16SC2, CV_16SC3, CV_16SC4,
CV_32SC1, CV_32SC2, CV_32SC3, CV_32FC1, CV_32FC2, CV_32FC3, CV_32FC4),
WHOLE_SUBMAT));
////////////////////////////////////////////////////////////////////////////////
// multiply
DIFFERENT_SIZES,
testing::Values(MatDepth(CV_8U), MatDepth(CV_16U), MatDepth(CV_32S)),
testing::Values(1, 3, 4)));
struct Multiply : ArithmTestBase {};
//////////////////////////////////////////////////////////////////////////////
// RShift
TEST_P(Multiply, Array)
namespace
{
cv::Mat dst_gold;
cv::multiply(mat1, mat2, dst_gold);
template <typename T> void rhiftImpl(const cv::Mat& src, cv::Scalar_<int> val, cv::Mat& dst)
{
const int cn = src.channels();
cv::Mat dst;
dst.create(src.size(), src.type());
cv::gpu::GpuMat gpuRes;
for (int y = 0; y < src.rows; ++y)
{
for (int x = 0; x < src.cols; ++x)
{
for (int c = 0; c < cn; ++c)
dst.at<T>(y, x * cn + c) = src.at<T>(y, x * cn + c) >> val.val[c];
}
}
}
cv::gpu::multiply(loadMat(mat1, useRoi), loadMat(mat2, useRoi), gpuRes);
void rhiftGold(const cv::Mat& src, cv::Scalar_<int> val, cv::Mat& dst)
{
typedef void (*func_t)(const cv::Mat& src, cv::Scalar_<int> val, cv::Mat& dst);
gpuRes.download(dst);
const func_t funcs[] =
{
rhiftImpl<uchar>, rhiftImpl<schar>, rhiftImpl<ushort>, rhiftImpl<short>, rhiftImpl<int>
};
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
funcs[src.depth()](src, val, dst);
}
}
TEST_P(Multiply, Scalar)
PARAM_TEST_CASE(RShift, cv::gpu::DeviceInfo, cv::Size, MatDepth, int, UseRoi)
{
cv::Mat dst_gold;
cv::multiply(mat1, val, dst_gold);
cv::gpu::DeviceInfo devInfo;
cv::Size size;
int depth;
int channels;
bool useRoi;
cv::Mat dst;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
size = GET_PARAM(1);
depth = GET_PARAM(2);
channels = GET_PARAM(3);
useRoi = GET_PARAM(4);
cv::gpu::GpuMat gpuRes;
cv::gpu::setDevice(devInfo.deviceID());
}
};
TEST_P(RShift, Accuracy)
{
int type = CV_MAKE_TYPE(depth, channels);
cv::Mat src = randomMat(size, type);
cv::Scalar_<int> val = randomScalar(0.0, 8.0);
cv::gpu::multiply(loadMat(mat1, useRoi), val, gpuRes);
cv::gpu::GpuMat dst = createMat(size, type, useRoi);
cv::gpu::rshift(loadMat(src, useRoi), val, dst);
gpuRes.download(dst);
cv::Mat dst_gold;
rhiftGold(src, val, dst_gold);
EXPECT_MAT_NEAR(dst_gold, dst, 1e-5);
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
INSTANTIATE_TEST_CASE_P(Arithm, Multiply, Combine(
INSTANTIATE_TEST_CASE_P(GPU_Core, RShift, testing::Combine(
ALL_DEVICES,
Values(CV_8UC1, CV_8UC3, CV_8UC4, CV_16UC1, CV_16UC3, CV_16UC4, CV_16SC1, CV_16SC3, CV_16SC4,
CV_32SC1, CV_32SC3, CV_32FC1, CV_32FC3, CV_32FC4),
DIFFERENT_SIZES,
testing::Values(MatDepth(CV_8U), MatDepth(CV_8S), MatDepth(CV_16U), MatDepth(CV_16S), MatDepth(CV_32S)),
testing::Values(1, 3, 4),
WHOLE_SUBMAT));
////////////////////////////////////////////////////////////////////////////////
// divide
struct Divide : ArithmTestBase {};
//////////////////////////////////////////////////////////////////////////////
// LShift
TEST_P(Divide, Array)
namespace
{
cv::Mat dst_gold;
cv::divide(mat1, mat2, dst_gold);
template <typename T> void lhiftImpl(const cv::Mat& src, cv::Scalar_<int> val, cv::Mat& dst)
{
const int cn = src.channels();
cv::Mat dst;
dst.create(src.size(), src.type());
cv::gpu::GpuMat gpuRes;
for (int y = 0; y < src.rows; ++y)
{
for (int x = 0; x < src.cols; ++x)
{
for (int c = 0; c < cn; ++c)
dst.at<T>(y, x * cn + c) = src.at<T>(y, x * cn + c) << val.val[c];
}
}
}
cv::gpu::divide(loadMat(mat1, useRoi), loadMat(mat2, useRoi), gpuRes);
void lhiftGold(const cv::Mat& src, cv::Scalar_<int> val, cv::Mat& dst)
{
typedef void (*func_t)(const cv::Mat& src, cv::Scalar_<int> val, cv::Mat& dst);
gpuRes.download(dst);
const func_t funcs[] =
{
lhiftImpl<uchar>, lhiftImpl<schar>, lhiftImpl<ushort>, lhiftImpl<short>, lhiftImpl<int>
};
EXPECT_MAT_NEAR(dst_gold, dst, mat1.depth() == CV_32F ? 1e-5 : 1);
funcs[src.depth()](src, val, dst);
}
}
TEST_P(Divide, Scalar)
PARAM_TEST_CASE(LShift, cv::gpu::DeviceInfo, cv::Size, MatDepth, int, UseRoi)
{
cv::Mat dst_gold;
cv::divide(mat1, val, dst_gold);
cv::gpu::DeviceInfo devInfo;
cv::Size size;
int depth;
int channels;
bool useRoi;
cv::Mat dst;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
size = GET_PARAM(1);
depth = GET_PARAM(2);
channels = GET_PARAM(3);
useRoi = GET_PARAM(4);
cv::gpu::GpuMat gpuRes;
cv::gpu::setDevice(devInfo.deviceID());
}
};
TEST_P(LShift, Accuracy)
{
int type = CV_MAKE_TYPE(depth, channels);
cv::Mat src = randomMat(size, type);
cv::Scalar_<int> val = randomScalar(0.0, 8.0);
cv::gpu::divide(loadMat(mat1, useRoi), val, gpuRes);
cv::gpu::GpuMat dst = createMat(size, type, useRoi);
cv::gpu::rshift(loadMat(src, useRoi), val, dst);
gpuRes.download(dst);
cv::Mat dst_gold;
rhiftGold(src, val, dst_gold);
EXPECT_MAT_NEAR(dst_gold, dst, mat1.depth() == CV_32F ? 1e-5 : 1);
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
INSTANTIATE_TEST_CASE_P(Arithm, Divide, Combine(
INSTANTIATE_TEST_CASE_P(GPU_Core, LShift, testing::Combine(
ALL_DEVICES,
Values(CV_8UC1, CV_8UC3, CV_8UC4, CV_16UC1, CV_16UC3, CV_16UC4, CV_16SC1, CV_16SC3, CV_16SC4,
CV_32SC1, CV_32SC3, CV_32FC1, CV_32FC3, CV_32FC4),
DIFFERENT_SIZES,
testing::Values(MatDepth(CV_8U), MatDepth(CV_16U), MatDepth(CV_32S)),
testing::Values(1, 3, 4),
WHOLE_SUBMAT));
////////////////////////////////////////////////////////////////////////////////
// transpose
struct Transpose : ArithmTestBase {};
//////////////////////////////////////////////////////////////////////////////
// Min
TEST_P(Transpose, Accuracy)
PARAM_TEST_CASE(Min, cv::gpu::DeviceInfo, cv::Size, MatDepth, UseRoi)
{
cv::Mat dst_gold;
cv::transpose(mat1, dst_gold);
cv::gpu::DeviceInfo devInfo;
cv::Size size;
int depth;
bool useRoi;
cv::Mat dst;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
size = GET_PARAM(1);
depth = GET_PARAM(2);
useRoi = GET_PARAM(3);
cv::gpu::GpuMat gpuRes;
cv::gpu::setDevice(devInfo.deviceID());
}
};
cv::gpu::transpose(loadMat(mat1, useRoi), gpuRes);
TEST_P(Min, Accuracy)
{
cv::Mat src1 = randomMat(size, depth);
cv::Mat src2 = randomMat(size, depth);
gpuRes.download(dst);
cv::gpu::GpuMat dst = createMat(size, depth, useRoi);
cv::gpu::min(loadMat(src1, useRoi), loadMat(src2, useRoi), dst);
cv::Mat dst_gold = cv::min(src1, src2);
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
INSTANTIATE_TEST_CASE_P(Arithm, Transpose, Combine(
INSTANTIATE_TEST_CASE_P(GPU_Core, Min, testing::Combine(
ALL_DEVICES,
Values(CV_8UC1, CV_8UC4, CV_8SC1, CV_8SC4, CV_16UC2, CV_16SC2, CV_32SC1, CV_32SC2, CV_32FC1, CV_32FC2, CV_64FC1),
DIFFERENT_SIZES,
ALL_DEPTH,
WHOLE_SUBMAT));
////////////////////////////////////////////////////////////////////////////////
// absdiff
struct Absdiff : ArithmTestBase {};
//////////////////////////////////////////////////////////////////////////////
// Max
TEST_P(Absdiff, Array)
PARAM_TEST_CASE(Max, cv::gpu::DeviceInfo, cv::Size, MatDepth, UseRoi)
{
cv::Mat dst_gold;
cv::absdiff(mat1, mat2, dst_gold);
cv::Mat dst;
cv::gpu::GpuMat gpuRes;
cv::gpu::absdiff(loadMat(mat1, useRoi), loadMat(mat2, useRoi), gpuRes);
cv::gpu::DeviceInfo devInfo;
cv::Size size;
int depth;
bool useRoi;
gpuRes.download(dst);
virtual void SetUp()
{
devInfo = GET_PARAM(0);
size = GET_PARAM(1);
depth = GET_PARAM(2);
useRoi = GET_PARAM(3);
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
cv::gpu::setDevice(devInfo.deviceID());
}
};
TEST_P(Absdiff, Scalar)
TEST_P(Max, Accuracy)
{
cv::Mat dst_gold;
cv::absdiff(mat1, val, dst_gold);
cv::Mat dst;
cv::gpu::GpuMat gpuRes;
cv::Mat src1 = randomMat(size, depth);
cv::Mat src2 = randomMat(size, depth);
cv::gpu::absdiff(loadMat(mat1, useRoi), val, gpuRes);
cv::gpu::GpuMat dst = createMat(size, depth, useRoi);
cv::gpu::max(loadMat(src1, useRoi), loadMat(src2, useRoi), dst);
gpuRes.download(dst);
cv::Mat dst_gold = cv::max(src1, src2);
EXPECT_MAT_NEAR(dst_gold, dst, 1e-5);
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
INSTANTIATE_TEST_CASE_P(Arithm, Absdiff, Combine(
INSTANTIATE_TEST_CASE_P(GPU_Core, Max, testing::Combine(
ALL_DEVICES,
Values(CV_8UC1, CV_16UC1, CV_32SC1, CV_32FC1),
DIFFERENT_SIZES,
ALL_DEPTH,
WHOLE_SUBMAT));
////////////////////////////////////////////////////////////////////////////////
// abs
struct Abs : ArithmTestBase {};
TEST_P(Abs, Array)
{
cv::Mat dst_gold = cv::abs(mat1);
cv::Mat dst;
cv::gpu::GpuMat gpuRes;
cv::gpu::abs(loadMat(mat1, useRoi), gpuRes);
gpuRes.download(dst);
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
INSTANTIATE_TEST_CASE_P(Arithm, Abs, Combine(
ALL_DEVICES,
Values(CV_16SC1, CV_32FC1),
WHOLE_SUBMAT));
////////////////////////////////////////////////////////////////////////////////
// Sqr
struct Sqr : ArithmTestBase {};
TEST_P(Sqr, Array)
{
cv::Mat dst_gold;
cv::multiply(mat1, mat1, dst_gold);
cv::Mat dst;
cv::gpu::GpuMat gpuRes;
cv::gpu::sqr(loadMat(mat1, useRoi), gpuRes);
gpuRes.download(dst);
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
INSTANTIATE_TEST_CASE_P(Arithm, Sqr, Combine(
ALL_DEVICES,
Values(CV_8UC1, CV_16UC1, CV_16SC1, CV_32FC1),
WHOLE_SUBMAT));
////////////////////////////////////////////////////////////////////////////////
// Sqrt
struct Sqrt : ArithmTestBase {};
TEST_P(Sqrt, Array)
{
cv::Mat dst_gold;
cv::sqrt(mat1, dst_gold);
cv::Mat dst;
cv::gpu::GpuMat gpuRes;
cv::gpu::sqrt(loadMat(mat1, useRoi), gpuRes);
gpuRes.download(dst);
EXPECT_MAT_NEAR(dst_gold, dst, 1e-6);
}
INSTANTIATE_TEST_CASE_P(Arithm, Sqrt, Combine(
ALL_DEVICES,
Values(MatType(CV_32FC1)),
WHOLE_SUBMAT));
////////////////////////////////////////////////////////////////////////////////
// compare
PARAM_TEST_CASE(Compare, cv::gpu::DeviceInfo, MatType, CmpCode, UseRoi)
using namespace cvtest;
using namespace testing;
PARAM_TEST_CASE(ArithmTestBase, cv::gpu::DeviceInfo, MatType, UseRoi)
{
cv::gpu::DeviceInfo devInfo;
int type;
int cmp_code;
bool useRoi;
cv::Size size;
cv::Mat mat1, mat2;
cv::Mat dst_gold;
cv::Mat mat1;
cv::Mat mat2;
cv::Scalar val;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
type = GET_PARAM(1);
cmp_code = GET_PARAM(2);
useRoi = GET_PARAM(3);
useRoi = GET_PARAM(2);
cv::gpu::setDevice(devInfo.deviceID());
......@@ -421,30 +1499,37 @@ PARAM_TEST_CASE(Compare, cv::gpu::DeviceInfo, MatType, CmpCode, UseRoi)
size = cv::Size(rng.uniform(100, 200), rng.uniform(100, 200));
mat1 = randomMat(rng, size, type, 1, 16, false);
mat2 = randomMat(rng, size, type, 1, 16, false);
mat1 = randomMat(rng, size, type, 5, 16, false);
mat2 = randomMat(rng, size, type, 5, 16, false);
cv::compare(mat1, mat2, dst_gold, cmp_code);
val = cv::Scalar(rng.uniform(1, 3), rng.uniform(1, 3), rng.uniform(1, 3), rng.uniform(1, 3));
}
};
TEST_P(Compare, Accuracy)
////////////////////////////////////////////////////////////////////////////////
// transpose
struct Transpose : ArithmTestBase {};
TEST_P(Transpose, Accuracy)
{
cv::Mat dst_gold;
cv::transpose(mat1, dst_gold);
cv::Mat dst;
cv::gpu::GpuMat gpuRes;
cv::gpu::compare(loadMat(mat1, useRoi), loadMat(mat2, useRoi), gpuRes, cmp_code);
cv::gpu::transpose(loadMat(mat1, useRoi), gpuRes);
gpuRes.download(dst);
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
INSTANTIATE_TEST_CASE_P(Arithm, Compare, Combine(
INSTANTIATE_TEST_CASE_P(Arithm, Transpose, Combine(
ALL_DEVICES,
Values(CV_8UC1, CV_16UC1, CV_32SC1),
Values((int) cv::CMP_EQ, (int) cv::CMP_GT, (int) cv::CMP_GE, (int) cv::CMP_LT, (int) cv::CMP_LE, (int) cv::CMP_NE),
Values(CV_8UC1, CV_8UC4, CV_8SC1, CV_8SC4, CV_16UC2, CV_16SC2, CV_32SC1, CV_32SC2, CV_32FC1, CV_32FC2, CV_64FC1),
WHOLE_SUBMAT));
////////////////////////////////////////////////////////////////////////////////
......@@ -650,53 +1735,6 @@ INSTANTIATE_TEST_CASE_P(Arithm, LUT, Combine(
Values(CV_8UC1, CV_8UC3),
WHOLE_SUBMAT));
////////////////////////////////////////////////////////////////////////////////
// exp
PARAM_TEST_CASE(Exp, cv::gpu::DeviceInfo, UseRoi)
{
cv::gpu::DeviceInfo devInfo;
bool useRoi;
cv::Size size;
cv::Mat mat;
cv::Mat dst_gold;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
useRoi = GET_PARAM(1);
cv::gpu::setDevice(devInfo.deviceID());
cv::RNG& rng = TS::ptr()->get_rng();
size = cv::Size(rng.uniform(100, 200), rng.uniform(100, 200));
mat = randomMat(rng, size, CV_32FC1, -10.0, 2.0, false);
cv::exp(mat, dst_gold);
}
};
TEST_P(Exp, Accuracy)
{
cv::Mat dst;
cv::gpu::GpuMat gpu_res;
cv::gpu::exp(loadMat(mat, useRoi), gpu_res);
gpu_res.download(dst);
EXPECT_MAT_NEAR(dst_gold, dst, 1e-5);
}
INSTANTIATE_TEST_CASE_P(Arithm, Exp, Combine(
ALL_DEVICES,
WHOLE_SUBMAT));
////////////////////////////////////////////////////////////////////////////////
// pow
......@@ -756,53 +1794,6 @@ INSTANTIATE_TEST_CASE_P(Arithm, Pow, Combine(
Values(CV_32F, CV_32FC3),
WHOLE_SUBMAT));
////////////////////////////////////////////////////////////////////////////////
// log
PARAM_TEST_CASE(Log, cv::gpu::DeviceInfo, UseRoi)
{
cv::gpu::DeviceInfo devInfo;
bool useRoi;
cv::Size size;
cv::Mat mat;
cv::Mat dst_gold;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
useRoi = GET_PARAM(1);
cv::gpu::setDevice(devInfo.deviceID());
cv::RNG& rng = TS::ptr()->get_rng();
size = cv::Size(rng.uniform(100, 200), rng.uniform(100, 200));
mat = randomMat(rng, size, CV_32FC1, 0.0, 100.0, false);
cv::log(mat, dst_gold);
}
};
TEST_P(Log, Accuracy)
{
cv::Mat dst;
cv::gpu::GpuMat gpu_res;
cv::gpu::log(loadMat(mat, useRoi), gpu_res);
gpu_res.download(dst);
EXPECT_MAT_NEAR(dst_gold, dst, 1e-5);
}
INSTANTIATE_TEST_CASE_P(Arithm, Log, Combine(
ALL_DEVICES,
WHOLE_SUBMAT));
////////////////////////////////////////////////////////////////////////////////
// magnitude
......@@ -1297,203 +2288,6 @@ INSTANTIATE_TEST_CASE_P(Arithm, Sum, Combine(
Values(CV_8U, CV_8S, CV_16U, CV_16S, CV_32S, CV_32F, CV_64F),
WHOLE_SUBMAT));
//////////////////////////////////////////////////////////////////////////////
// bitwise
PARAM_TEST_CASE(Bitwise, cv::gpu::DeviceInfo, MatType)
{
cv::gpu::DeviceInfo devInfo;
int type;
cv::Size size;
cv::Mat mat1;
cv::Mat mat2;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
type = GET_PARAM(1);
cv::gpu::setDevice(devInfo.deviceID());
cv::RNG& rng = cvtest::TS::ptr()->get_rng();
size = cv::Size(rng.uniform(100, 200), rng.uniform(100, 200));
mat1.create(size, type);
mat2.create(size, type);
for (int i = 0; i < mat1.rows; ++i)
{
cv::Mat row1(1, static_cast<int>(mat1.cols * mat1.elemSize()), CV_8U, (void*)mat1.ptr(i));
rng.fill(row1, cv::RNG::UNIFORM, cv::Scalar(0), cv::Scalar(255));
cv::Mat row2(1, static_cast<int>(mat2.cols * mat2.elemSize()), CV_8U, (void*)mat2.ptr(i));
rng.fill(row2, cv::RNG::UNIFORM, cv::Scalar(0), cv::Scalar(255));
}
}
};
TEST_P(Bitwise, Not)
{
if (mat1.depth() == CV_64F && !supportFeature(devInfo, cv::gpu::NATIVE_DOUBLE))
return;
cv::Mat dst_gold = ~mat1;
cv::Mat dst;
cv::gpu::GpuMat dev_dst;
cv::gpu::bitwise_not(loadMat(mat1), dev_dst);
dev_dst.download(dst);
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
TEST_P(Bitwise, Or)
{
if (mat1.depth() == CV_64F && !supportFeature(devInfo, cv::gpu::NATIVE_DOUBLE))
return;
cv::Mat dst_gold = mat1 | mat2;
cv::Mat dst;
cv::gpu::GpuMat dev_dst;
cv::gpu::bitwise_or(loadMat(mat1), loadMat(mat2), dev_dst);
dev_dst.download(dst);
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
TEST_P(Bitwise, And)
{
if (mat1.depth() == CV_64F && !supportFeature(devInfo, cv::gpu::NATIVE_DOUBLE))
return;
cv::Mat dst_gold = mat1 & mat2;
cv::Mat dst;
cv::gpu::GpuMat dev_dst;
cv::gpu::bitwise_and(loadMat(mat1), loadMat(mat2), dev_dst);
dev_dst.download(dst);
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
TEST_P(Bitwise, Xor)
{
if (mat1.depth() == CV_64F && !supportFeature(devInfo, cv::gpu::NATIVE_DOUBLE))
return;
cv::Mat dst_gold = mat1 ^ mat2;
cv::Mat dst;
cv::gpu::GpuMat dev_dst;
cv::gpu::bitwise_xor(loadMat(mat1), loadMat(mat2), dev_dst);
dev_dst.download(dst);
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
INSTANTIATE_TEST_CASE_P(Arithm, Bitwise, Combine(
ALL_DEVICES,
ALL_TYPES));
PARAM_TEST_CASE(BitwiseScalar, cv::gpu::DeviceInfo, MatType)
{
cv::gpu::DeviceInfo devInfo;
int type;
cv::Size size;
cv::Mat mat;
cv::Scalar sc;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
type = GET_PARAM(1);
cv::gpu::setDevice(devInfo.deviceID());
cv::RNG& rng = cvtest::TS::ptr()->get_rng();
size = cv::Size(rng.uniform(100, 200), rng.uniform(100, 200));
mat.create(size, type);
for (int i = 0; i < mat.rows; ++i)
{
cv::Mat row(1, static_cast<int>(mat.cols * mat.elemSize()), CV_8U, (void*)mat.ptr(i));
rng.fill(row, cv::RNG::UNIFORM, cv::Scalar(0), cv::Scalar(255));
}
sc = cv::Scalar(rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255));
}
};
TEST_P(BitwiseScalar, Or)
{
cv::Mat dst_gold;
cv::bitwise_or(mat, sc, dst_gold);
cv::Mat dst;
cv::gpu::GpuMat dev_dst;
cv::gpu::bitwise_or(loadMat(mat), sc, dev_dst);
dev_dst.download(dst);
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
TEST_P(BitwiseScalar, And)
{
cv::Mat dst_gold;
cv::bitwise_and(mat, sc, dst_gold);
cv::Mat dst;
cv::gpu::GpuMat dev_dst;
cv::gpu::bitwise_and(loadMat(mat), sc, dev_dst);
dev_dst.download(dst);
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
TEST_P(BitwiseScalar, Xor)
{
cv::Mat dst_gold;
cv::bitwise_xor(mat, sc, dst_gold);
cv::Mat dst;
cv::gpu::GpuMat dev_dst;
cv::gpu::bitwise_xor(loadMat(mat), sc, dev_dst);
dev_dst.download(dst);
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
INSTANTIATE_TEST_CASE_P(Arithm, BitwiseScalar, Combine(
ALL_DEVICES,
Values(CV_8UC1, CV_8UC3, CV_8UC4, CV_16UC1, CV_16UC3, CV_16UC4, CV_32SC1, CV_32SC3, CV_32SC4)));
//////////////////////////////////////////////////////////////////////////////
// addWeighted
......
......@@ -162,10 +162,37 @@ CV_FLAGS(DftFlags, cv::DFT_INVERSE, cv::DFT_SCALE, cv::DFT_ROWS, cv::DFT_COMPLEX
#define ALL_DEVICES testing::ValuesIn(devices())
#define DEVICES(feature) testing::ValuesIn(devices(feature))
#define DIFFERENT_SIZES testing::Values(cv::Size(128, 128), cv::Size(113, 113))
#define ALL_DEPTH testing::Values(MatDepth(CV_8U), MatDepth(CV_8S), MatDepth(CV_16U), MatDepth(CV_16S), MatDepth(CV_32S), MatDepth(CV_32F), MatDepth(CV_64F))
#define ALL_TYPES testing::ValuesIn(all_types())
#define TYPES(depth_start, depth_end, cn_start, cn_end) testing::ValuesIn(types(depth_start, depth_end, cn_start, cn_end))
#define DIFFERENT_SIZES testing::Values(cv::Size(128, 128), cv::Size(113, 113))
#define DEPTH_PAIRS testing::Values(std::make_pair(MatDepth(CV_8U), MatDepth(CV_8U)), \
std::make_pair(MatDepth(CV_8U), MatDepth(CV_16U)), \
std::make_pair(MatDepth(CV_8U), MatDepth(CV_16S)), \
std::make_pair(MatDepth(CV_8U), MatDepth(CV_32S)), \
std::make_pair(MatDepth(CV_8U), MatDepth(CV_32F)), \
std::make_pair(MatDepth(CV_8U), MatDepth(CV_64F)), \
\
std::make_pair(MatDepth(CV_16U), MatDepth(CV_16U)), \
std::make_pair(MatDepth(CV_16U), MatDepth(CV_32S)), \
std::make_pair(MatDepth(CV_16U), MatDepth(CV_32F)), \
std::make_pair(MatDepth(CV_16U), MatDepth(CV_64F)), \
\
std::make_pair(MatDepth(CV_16S), MatDepth(CV_16S)), \
std::make_pair(MatDepth(CV_16S), MatDepth(CV_32S)), \
std::make_pair(MatDepth(CV_16S), MatDepth(CV_32F)), \
std::make_pair(MatDepth(CV_16S), MatDepth(CV_64F)), \
\
std::make_pair(MatDepth(CV_32S), MatDepth(CV_32S)), \
std::make_pair(MatDepth(CV_32S), MatDepth(CV_32F)), \
std::make_pair(MatDepth(CV_32S), MatDepth(CV_64F)), \
\
std::make_pair(MatDepth(CV_32F), MatDepth(CV_32F)), \
std::make_pair(MatDepth(CV_32F), MatDepth(CV_64F)), \
\
std::make_pair(MatDepth(CV_64F), MatDepth(CV_64F)))
#define WHOLE testing::Values(UseRoi(false))
#define SUBMAT testing::Values(UseRoi(true))
......@@ -173,4 +200,6 @@ CV_FLAGS(DftFlags, cv::DFT_INVERSE, cv::DFT_SCALE, cv::DFT_ROWS, cv::DFT_COMPLEX
#define DIRECT_INVERSE testing::Values(Inverse(false), Inverse(true))
#define ALL_CMP_CODES testing::Values(CmpCode(cv::CMP_EQ), CmpCode(cv::CMP_NE), CmpCode(cv::CMP_GT), CmpCode(cv::CMP_GE), CmpCode(cv::CMP_LT), CmpCode(cv::CMP_LE))
#endif // __OPENCV_TEST_UTILITY_HPP__
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment