Commit eaea6782 authored by Vladislav Vinogradov's avatar Vladislav Vinogradov

added more assertion on device features to gpu functions and tests

moved TargerArchs and DeviceInfo to core
fixed bug in GpuMat::copy with mask (incorrect index in function tab)
parent e8fab91d
This diff is collapsed.
...@@ -54,94 +54,6 @@ ...@@ -54,94 +54,6 @@
namespace cv { namespace gpu { namespace cv { namespace gpu {
//////////////////////////////// Initialization & Info ////////////////////////
//! This is the only function that do not throw exceptions if the library is compiled without Cuda.
CV_EXPORTS int getCudaEnabledDeviceCount();
//! Functions below throw cv::Expception if the library is compiled without Cuda.
CV_EXPORTS void setDevice(int device);
CV_EXPORTS int getDevice();
//! Explicitly destroys and cleans up all resources associated with the current device in the current process.
//! Any subsequent API call to this device will reinitialize the device.
CV_EXPORTS void resetDevice();
enum FeatureSet
{
FEATURE_SET_COMPUTE_10 = 10,
FEATURE_SET_COMPUTE_11 = 11,
FEATURE_SET_COMPUTE_12 = 12,
FEATURE_SET_COMPUTE_13 = 13,
FEATURE_SET_COMPUTE_20 = 20,
FEATURE_SET_COMPUTE_21 = 21,
GLOBAL_ATOMICS = FEATURE_SET_COMPUTE_11,
SHARED_ATOMICS = FEATURE_SET_COMPUTE_12,
NATIVE_DOUBLE = FEATURE_SET_COMPUTE_13
};
// Gives information about what GPU archs this OpenCV GPU module was
// compiled for
class CV_EXPORTS TargetArchs
{
public:
static bool builtWith(FeatureSet feature_set);
static bool has(int major, int minor);
static bool hasPtx(int major, int minor);
static bool hasBin(int major, int minor);
static bool hasEqualOrLessPtx(int major, int minor);
static bool hasEqualOrGreater(int major, int minor);
static bool hasEqualOrGreaterPtx(int major, int minor);
static bool hasEqualOrGreaterBin(int major, int minor);
private:
TargetArchs();
};
// Gives information about the given GPU
class CV_EXPORTS DeviceInfo
{
public:
// Creates DeviceInfo object for the current GPU
DeviceInfo() : device_id_(getDevice()) { query(); }
// Creates DeviceInfo object for the given GPU
DeviceInfo(int device_id) : device_id_(device_id) { query(); }
std::string name() const { return name_; }
// Return compute capability versions
int majorVersion() const { return majorVersion_; }
int minorVersion() const { return minorVersion_; }
int multiProcessorCount() const { return multi_processor_count_; }
size_t freeMemory() const;
size_t totalMemory() const;
// Checks whether device supports the given feature
bool supports(FeatureSet feature_set) const;
// Checks whether the GPU module can be run on the given device
bool isCompatible() const;
int deviceID() const { return device_id_; }
private:
void query();
void queryMemory(size_t& free_memory, size_t& total_memory) const;
int device_id_;
std::string name_;
int multi_processor_count_;
int majorVersion_;
int minorVersion_;
};
CV_EXPORTS void printCudaDeviceInfo(int device);
CV_EXPORTS void printShortCudaDeviceInfo(int device);
//////////////////////////////// CudaMem //////////////////////////////// //////////////////////////////// CudaMem ////////////////////////////////
// CudaMem is limited cv::Mat with page locked memory allocation. // CudaMem is limited cv::Mat with page locked memory allocation.
// Page locked memory is only needed for async and faster coping to GPU. // Page locked memory is only needed for async and faster coping to GPU.
......
This diff is collapsed.
...@@ -118,6 +118,9 @@ void cv::gpu::meanStdDev(const GpuMat& src, Scalar& mean, Scalar& stddev, GpuMat ...@@ -118,6 +118,9 @@ void cv::gpu::meanStdDev(const GpuMat& src, Scalar& mean, Scalar& stddev, GpuMat
{ {
CV_Assert(src.type() == CV_8UC1); CV_Assert(src.type() == CV_8UC1);
if (!TargetArchs::builtWith(FEATURE_SET_COMPUTE_13) || !DeviceInfo().supports(FEATURE_SET_COMPUTE_13))
CV_Error(CV_StsNotImplemented, "Not sufficient compute capebility");
NppiSize sz; NppiSize sz;
sz.width = src.cols; sz.width = src.cols;
sz.height = src.rows; sz.height = src.rows;
......
...@@ -55,10 +55,10 @@ void cv::gpu::split(const GpuMat& /*src*/, vector<GpuMat>& /*dst*/, Stream& /*st ...@@ -55,10 +55,10 @@ void cv::gpu::split(const GpuMat& /*src*/, vector<GpuMat>& /*dst*/, Stream& /*st
#else /* !defined (HAVE_CUDA) */ #else /* !defined (HAVE_CUDA) */
namespace cv { namespace gpu { namespace device namespace cv { namespace gpu { namespace device
{ {
namespace split_merge namespace split_merge
{ {
void merge_caller(const DevMem2Db* src, DevMem2Db& dst, int total_channels, size_t elem_size, const cudaStream_t& stream); void merge_caller(const DevMem2Db* src, DevMem2Db& dst, int total_channels, size_t elem_size, const cudaStream_t& stream);
void split_caller(const DevMem2Db& src, DevMem2Db* dst, int num_channels, size_t elem_size1, const cudaStream_t& stream); void split_caller(const DevMem2Db& src, DevMem2Db* dst, int num_channels, size_t elem_size1, const cudaStream_t& stream);
} }
...@@ -66,7 +66,7 @@ namespace cv { namespace gpu { namespace device ...@@ -66,7 +66,7 @@ namespace cv { namespace gpu { namespace device
namespace namespace
{ {
void merge(const GpuMat* src, size_t n, GpuMat& dst, const cudaStream_t& stream) void merge(const GpuMat* src, size_t n, GpuMat& dst, const cudaStream_t& stream)
{ {
using namespace ::cv::gpu::device::split_merge; using namespace ::cv::gpu::device::split_merge;
...@@ -76,6 +76,12 @@ namespace ...@@ -76,6 +76,12 @@ namespace
int depth = src[0].depth(); int depth = src[0].depth();
Size size = src[0].size(); Size size = src[0].size();
if (depth == CV_64F)
{
if (!TargetArchs::builtWith(NATIVE_DOUBLE) || !DeviceInfo().supports(NATIVE_DOUBLE))
CV_Error(CV_StsUnsupportedFormat, "The device doesn't support double");
}
bool single_channel_only = true; bool single_channel_only = true;
int total_channels = 0; int total_channels = 0;
...@@ -90,9 +96,9 @@ namespace ...@@ -90,9 +96,9 @@ namespace
CV_Assert(single_channel_only); CV_Assert(single_channel_only);
CV_Assert(total_channels <= 4); CV_Assert(total_channels <= 4);
if (total_channels == 1) if (total_channels == 1)
src[0].copyTo(dst); src[0].copyTo(dst);
else else
{ {
dst.create(size, CV_MAKETYPE(depth, total_channels)); dst.create(size, CV_MAKETYPE(depth, total_channels));
...@@ -102,10 +108,10 @@ namespace ...@@ -102,10 +108,10 @@ namespace
DevMem2Db dst_as_devmem(dst); DevMem2Db dst_as_devmem(dst);
merge_caller(src_as_devmem, dst_as_devmem, total_channels, CV_ELEM_SIZE(depth), stream); merge_caller(src_as_devmem, dst_as_devmem, total_channels, CV_ELEM_SIZE(depth), stream);
} }
} }
void split(const GpuMat& src, GpuMat* dst, const cudaStream_t& stream) void split(const GpuMat& src, GpuMat* dst, const cudaStream_t& stream)
{ {
using namespace ::cv::gpu::device::split_merge; using namespace ::cv::gpu::device::split_merge;
...@@ -115,6 +121,12 @@ namespace ...@@ -115,6 +121,12 @@ namespace
int num_channels = src.channels(); int num_channels = src.channels();
Size size = src.size(); Size size = src.size();
if (depth == CV_64F)
{
if (!TargetArchs::builtWith(NATIVE_DOUBLE) || !DeviceInfo().supports(NATIVE_DOUBLE))
CV_Error(CV_StsUnsupportedFormat, "The device doesn't support double");
}
if (num_channels == 1) if (num_channels == 1)
{ {
src.copyTo(dst[0]); src.copyTo(dst[0]);
...@@ -135,23 +147,23 @@ namespace ...@@ -135,23 +147,23 @@ namespace
} }
} }
void cv::gpu::merge(const GpuMat* src, size_t n, GpuMat& dst, Stream& stream) void cv::gpu::merge(const GpuMat* src, size_t n, GpuMat& dst, Stream& stream)
{ {
::merge(src, n, dst, StreamAccessor::getStream(stream)); ::merge(src, n, dst, StreamAccessor::getStream(stream));
} }
void cv::gpu::merge(const vector<GpuMat>& src, GpuMat& dst, Stream& stream) void cv::gpu::merge(const vector<GpuMat>& src, GpuMat& dst, Stream& stream)
{ {
::merge(&src[0], src.size(), dst, StreamAccessor::getStream(stream)); ::merge(&src[0], src.size(), dst, StreamAccessor::getStream(stream));
} }
void cv::gpu::split(const GpuMat& src, GpuMat* dst, Stream& stream) void cv::gpu::split(const GpuMat& src, GpuMat* dst, Stream& stream)
{ {
::split(src, dst, StreamAccessor::getStream(stream)); ::split(src, dst, StreamAccessor::getStream(stream));
} }
void cv::gpu::split(const GpuMat& src, vector<GpuMat>& dst, Stream& stream) void cv::gpu::split(const GpuMat& src, vector<GpuMat>& dst, Stream& stream)
{ {
dst.resize(src.channels()); dst.resize(src.channels());
if(src.channels() > 0) if(src.channels() > 0)
......
...@@ -43,6 +43,138 @@ ...@@ -43,6 +43,138 @@
namespace { namespace {
////////////////////////////////////////////////////////////////////////////////
// Merge
PARAM_TEST_CASE(Merge, cv::gpu::DeviceInfo, cv::Size, MatDepth, Channels, UseRoi)
{
cv::gpu::DeviceInfo devInfo;
cv::Size size;
int depth;
int channels;
bool useRoi;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
size = GET_PARAM(1);
depth = GET_PARAM(2);
channels = GET_PARAM(3);
useRoi = GET_PARAM(4);
cv::gpu::setDevice(devInfo.deviceID());
}
};
TEST_P(Merge, Accuracy)
{
std::vector<cv::Mat> src;
src.reserve(channels);
for (int i = 0; i < channels; ++i)
src.push_back(cv::Mat(size, depth, cv::Scalar::all(i)));
std::vector<cv::gpu::GpuMat> d_src;
for (int i = 0; i < channels; ++i)
d_src.push_back(loadMat(src[i], useRoi));
if (depth == CV_64F && !supportFeature(devInfo, cv::gpu::NATIVE_DOUBLE))
{
try
{
cv::gpu::GpuMat dst;
cv::gpu::merge(d_src, dst);
}
catch (const cv::Exception& e)
{
ASSERT_EQ(CV_StsUnsupportedFormat, e.code);
}
}
else
{
cv::gpu::GpuMat dst;
cv::gpu::merge(d_src, dst);
cv::Mat dst_gold;
cv::merge(src, dst_gold);
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
}
INSTANTIATE_TEST_CASE_P(GPU_Core, Merge, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
ALL_DEPTH,
testing::Values(1, 2, 3, 4),
WHOLE_SUBMAT));
////////////////////////////////////////////////////////////////////////////////
// Split
PARAM_TEST_CASE(Split, cv::gpu::DeviceInfo, cv::Size, MatDepth, Channels, UseRoi)
{
cv::gpu::DeviceInfo devInfo;
cv::Size size;
int depth;
int channels;
bool useRoi;
int type;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
size = GET_PARAM(1);
depth = GET_PARAM(2);
channels = GET_PARAM(3);
useRoi = GET_PARAM(4);
cv::gpu::setDevice(devInfo.deviceID());
type = CV_MAKE_TYPE(depth, channels);
}
};
TEST_P(Split, Accuracy)
{
cv::Mat src = randomMat(size, type);
if (depth == CV_64F && !supportFeature(devInfo, cv::gpu::NATIVE_DOUBLE))
{
try
{
std::vector<cv::gpu::GpuMat> dst;
cv::gpu::split(loadMat(src), dst);
}
catch (const cv::Exception& e)
{
ASSERT_EQ(CV_StsUnsupportedFormat, e.code);
}
}
else
{
std::vector<cv::gpu::GpuMat> dst;
cv::gpu::split(loadMat(src, useRoi), dst);
std::vector<cv::Mat> dst_gold;
cv::split(src, dst_gold);
ASSERT_EQ(dst_gold.size(), dst.size());
for (size_t i = 0; i < dst_gold.size(); ++i)
{
EXPECT_MAT_NEAR(dst_gold[i], dst[i], 0.0);
}
}
}
INSTANTIATE_TEST_CASE_P(GPU_Core, Split, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
ALL_DEPTH,
testing::Values(1, 2, 3, 4),
WHOLE_SUBMAT));
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
// Add_Array // Add_Array
...@@ -1974,7 +2106,7 @@ TEST_P(AddWeighted, Accuracy) ...@@ -1974,7 +2106,7 @@ TEST_P(AddWeighted, Accuracy)
cv::Mat dst_gold; cv::Mat dst_gold;
cv::addWeighted(src1, alpha, src2, beta, gamma, dst_gold, dst_depth); cv::addWeighted(src1, alpha, src2, beta, gamma, dst_gold, dst_depth);
EXPECT_MAT_NEAR(dst_gold, dst, dst_depth < CV_32F ? 1.0 : 1e-12); EXPECT_MAT_NEAR(dst_gold, dst, dst_depth < CV_32F ? 1.0 : 1e-3);
} }
} }
...@@ -2487,16 +2619,32 @@ TEST_P(MeanStdDev, Accuracy) ...@@ -2487,16 +2619,32 @@ TEST_P(MeanStdDev, Accuracy)
{ {
cv::Mat src = randomMat(size, CV_8UC1); cv::Mat src = randomMat(size, CV_8UC1);
cv::Scalar mean; if (!supportFeature(devInfo, cv::gpu::FEATURE_SET_COMPUTE_13))
cv::Scalar stddev; {
cv::gpu::meanStdDev(loadMat(src, useRoi), mean, stddev); try
{
cv::Scalar mean;
cv::Scalar stddev;
cv::gpu::meanStdDev(loadMat(src, useRoi), mean, stddev);
}
catch (const cv::Exception& e)
{
ASSERT_EQ(CV_StsNotImplemented, e.code);
}
}
else
{
cv::Scalar mean;
cv::Scalar stddev;
cv::gpu::meanStdDev(loadMat(src, useRoi), mean, stddev);
cv::Scalar mean_gold; cv::Scalar mean_gold;
cv::Scalar stddev_gold; cv::Scalar stddev_gold;
cv::meanStdDev(src, mean_gold, stddev_gold); cv::meanStdDev(src, mean_gold, stddev_gold);
EXPECT_SCALAR_NEAR(mean_gold, mean, 1e-5); EXPECT_SCALAR_NEAR(mean_gold, mean, 1e-5);
EXPECT_SCALAR_NEAR(stddev_gold, stddev, 1e-5); EXPECT_SCALAR_NEAR(stddev_gold, stddev, 1e-5);
}
} }
INSTANTIATE_TEST_CASE_P(GPU_Core, MeanStdDev, testing::Combine( INSTANTIATE_TEST_CASE_P(GPU_Core, MeanStdDev, testing::Combine(
......
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other GpuMaterials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or bpied warranties, including, but not limited to, the bpied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
namespace {
////////////////////////////////////////////////////////////////////////////////
// SetTo
PARAM_TEST_CASE(SetTo, cv::gpu::DeviceInfo, cv::Size, MatType, UseRoi)
{
cv::gpu::DeviceInfo devInfo;
cv::Size size;
int type;
bool useRoi;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
size = GET_PARAM(1);
type = GET_PARAM(2);
useRoi = GET_PARAM(3);
cv::gpu::setDevice(devInfo.deviceID());
}
};
TEST_P(SetTo, Zero)
{
cv::Scalar zero = cv::Scalar::all(0);
cv::gpu::GpuMat mat = createMat(size, type, useRoi);
mat.setTo(zero);
EXPECT_MAT_NEAR(cv::Mat::zeros(size, type), mat, 0.0);
}
TEST_P(SetTo, SameVal)
{
cv::Scalar val = cv::Scalar::all(randomDouble(0.0, 255.0));
if (CV_MAT_DEPTH(type) == CV_64F && !supportFeature(devInfo, cv::gpu::NATIVE_DOUBLE))
{
try
{
cv::gpu::GpuMat mat = createMat(size, type, useRoi);
mat.setTo(val);
}
catch (const cv::Exception& e)
{
ASSERT_EQ(CV_StsUnsupportedFormat, e.code);
}
}
else
{
cv::gpu::GpuMat mat = createMat(size, type, useRoi);
mat.setTo(val);
EXPECT_MAT_NEAR(cv::Mat(size, type, val), mat, 0.0);
}
}
TEST_P(SetTo, DifferentVal)
{
cv::Scalar val = randomScalar(0.0, 255.0);
if (CV_MAT_DEPTH(type) == CV_64F && !supportFeature(devInfo, cv::gpu::NATIVE_DOUBLE))
{
try
{
cv::gpu::GpuMat mat = createMat(size, type, useRoi);
mat.setTo(val);
}
catch (const cv::Exception& e)
{
ASSERT_EQ(CV_StsUnsupportedFormat, e.code);
}
}
else
{
cv::gpu::GpuMat mat = createMat(size, type, useRoi);
mat.setTo(val);
EXPECT_MAT_NEAR(cv::Mat(size, type, val), mat, 0.0);
}
}
TEST_P(SetTo, Masked)
{
cv::Scalar val = randomScalar(0.0, 255.0);
cv::Mat mat_gold = randomMat(size, type);
cv::Mat mask = randomMat(size, CV_8UC1, 0.0, 2.0);
if (CV_MAT_DEPTH(type) == CV_64F && !supportFeature(devInfo, cv::gpu::NATIVE_DOUBLE))
{
try
{
cv::gpu::GpuMat mat = createMat(size, type, useRoi);
mat.setTo(val, loadMat(mask));
}
catch (const cv::Exception& e)
{
ASSERT_EQ(CV_StsUnsupportedFormat, e.code);
}
}
else
{
cv::gpu::GpuMat mat = loadMat(mat_gold, useRoi);
mat.setTo(val, loadMat(mask, useRoi));
mat_gold.setTo(val, mask);
EXPECT_MAT_NEAR(mat_gold, mat, 0.0);
}
}
INSTANTIATE_TEST_CASE_P(GPU_GpuMat, SetTo, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
ALL_TYPES,
WHOLE_SUBMAT));
////////////////////////////////////////////////////////////////////////////////
// CopyTo
PARAM_TEST_CASE(CopyTo, cv::gpu::DeviceInfo, cv::Size, MatType, UseRoi)
{
cv::gpu::DeviceInfo devInfo;
cv::Size size;
int type;
bool useRoi;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
size = GET_PARAM(1);
type = GET_PARAM(2);
useRoi = GET_PARAM(3);
cv::gpu::setDevice(devInfo.deviceID());
}
};
TEST_P(CopyTo, WithOutMask)
{
cv::Mat src = randomMat(size, type);
cv::gpu::GpuMat d_src = loadMat(src, useRoi);
cv::gpu::GpuMat dst = createMat(size, type, useRoi);
d_src.copyTo(dst);
EXPECT_MAT_NEAR(src, dst, 0.0);
}
TEST_P(CopyTo, Masked)
{
cv::Mat src = randomMat(size, type);
cv::Mat mask = randomMat(size, CV_8UC1, 0.0, 2.0);
if (CV_MAT_DEPTH(type) == CV_64F && !supportFeature(devInfo, cv::gpu::NATIVE_DOUBLE))
{
try
{
cv::gpu::GpuMat d_src = loadMat(src);
cv::gpu::GpuMat dst;
d_src.copyTo(dst, loadMat(mask, useRoi));
}
catch (const cv::Exception& e)
{
ASSERT_EQ(CV_StsUnsupportedFormat, e.code);
}
}
else
{
cv::gpu::GpuMat d_src = loadMat(src, useRoi);
cv::gpu::GpuMat dst = loadMat(cv::Mat::zeros(size, type), useRoi);
d_src.copyTo(dst, loadMat(mask, useRoi));
cv::Mat dst_gold = cv::Mat::zeros(size, type);
src.copyTo(dst_gold, mask);
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
}
INSTANTIATE_TEST_CASE_P(GPU_GpuMat, CopyTo, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
ALL_TYPES,
WHOLE_SUBMAT));
////////////////////////////////////////////////////////////////////////////////
// ConvertTo
PARAM_TEST_CASE(ConvertTo, cv::gpu::DeviceInfo, cv::Size, MatDepth, MatDepth, UseRoi)
{
cv::gpu::DeviceInfo devInfo;
cv::Size size;
int depth1;
int depth2;
bool useRoi;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
size = GET_PARAM(1);
depth1 = GET_PARAM(2);
depth2 = GET_PARAM(3);
useRoi = GET_PARAM(4);
cv::gpu::setDevice(devInfo.deviceID());
}
};
TEST_P(ConvertTo, WithOutScaling)
{
cv::Mat src = randomMat(size, depth1);
if ((depth1 == CV_64F || depth2 == CV_64F) && !supportFeature(devInfo, cv::gpu::NATIVE_DOUBLE))
{
try
{
cv::gpu::GpuMat d_src = loadMat(src);
cv::gpu::GpuMat dst;
d_src.convertTo(dst, depth2);
}
catch (const cv::Exception& e)
{
ASSERT_EQ(CV_StsUnsupportedFormat, e.code);
}
}
else
{
cv::gpu::GpuMat d_src = loadMat(src, useRoi);
cv::gpu::GpuMat dst = createMat(size, depth2, useRoi);
d_src.convertTo(dst, depth2);
cv::Mat dst_gold;
src.convertTo(dst_gold, depth2);
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
}
TEST_P(ConvertTo, WithScaling)
{
cv::Mat src = randomMat(size, depth1);
double a = randomDouble(0.0, 1.0);
double b = randomDouble(-10.0, 10.0);
if ((depth1 == CV_64F || depth2 == CV_64F) && !supportFeature(devInfo, cv::gpu::NATIVE_DOUBLE))
{
try
{
cv::gpu::GpuMat d_src = loadMat(src);
cv::gpu::GpuMat dst;
d_src.convertTo(dst, depth2, a, b);
}
catch (const cv::Exception& e)
{
ASSERT_EQ(CV_StsUnsupportedFormat, e.code);
}
}
else
{
cv::gpu::GpuMat d_src = loadMat(src, useRoi);
cv::gpu::GpuMat dst = createMat(size, depth2, useRoi);
d_src.convertTo(dst, depth2, a, b);
cv::Mat dst_gold;
src.convertTo(dst_gold, depth2, a, b);
EXPECT_MAT_NEAR(dst_gold, dst, depth2 < CV_32F ? 0.0 : 1e-4);
}
}
INSTANTIATE_TEST_CASE_P(GPU_GpuMat, ConvertTo, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
ALL_DEPTH,
ALL_DEPTH,
WHOLE_SUBMAT));
} // namespace
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment