Commit eaea6782 authored by Vladislav Vinogradov's avatar Vladislav Vinogradov

added more assertion on device features to gpu functions and tests

moved TargerArchs and DeviceInfo to core
fixed bug in GpuMat::copy with mask (incorrect index in function tab)
parent e8fab91d
...@@ -50,6 +50,96 @@ ...@@ -50,6 +50,96 @@
namespace cv { namespace gpu namespace cv { namespace gpu
{ {
//////////////////////////////// Initialization & Info ////////////////////////
//! This is the only function that do not throw exceptions if the library is compiled without Cuda.
CV_EXPORTS int getCudaEnabledDeviceCount();
//! Functions below throw cv::Expception if the library is compiled without Cuda.
CV_EXPORTS void setDevice(int device);
CV_EXPORTS int getDevice();
//! Explicitly destroys and cleans up all resources associated with the current device in the current process.
//! Any subsequent API call to this device will reinitialize the device.
CV_EXPORTS void resetDevice();
enum FeatureSet
{
FEATURE_SET_COMPUTE_10 = 10,
FEATURE_SET_COMPUTE_11 = 11,
FEATURE_SET_COMPUTE_12 = 12,
FEATURE_SET_COMPUTE_13 = 13,
FEATURE_SET_COMPUTE_20 = 20,
FEATURE_SET_COMPUTE_21 = 21,
GLOBAL_ATOMICS = FEATURE_SET_COMPUTE_11,
SHARED_ATOMICS = FEATURE_SET_COMPUTE_12,
NATIVE_DOUBLE = FEATURE_SET_COMPUTE_13
};
// Gives information about what GPU archs this OpenCV GPU module was
// compiled for
class CV_EXPORTS TargetArchs
{
public:
static bool builtWith(FeatureSet feature_set);
static bool has(int major, int minor);
static bool hasPtx(int major, int minor);
static bool hasBin(int major, int minor);
static bool hasEqualOrLessPtx(int major, int minor);
static bool hasEqualOrGreater(int major, int minor);
static bool hasEqualOrGreaterPtx(int major, int minor);
static bool hasEqualOrGreaterBin(int major, int minor);
private:
TargetArchs();
};
// Gives information about the given GPU
class CV_EXPORTS DeviceInfo
{
public:
// Creates DeviceInfo object for the current GPU
DeviceInfo() : device_id_(getDevice()) { query(); }
// Creates DeviceInfo object for the given GPU
DeviceInfo(int device_id) : device_id_(device_id) { query(); }
std::string name() const { return name_; }
// Return compute capability versions
int majorVersion() const { return majorVersion_; }
int minorVersion() const { return minorVersion_; }
int multiProcessorCount() const { return multi_processor_count_; }
size_t freeMemory() const;
size_t totalMemory() const;
// Checks whether device supports the given feature
bool supports(FeatureSet feature_set) const;
// Checks whether the GPU module can be run on the given device
bool isCompatible() const;
int deviceID() const { return device_id_; }
private:
void query();
void queryMemory(size_t& free_memory, size_t& total_memory) const;
int device_id_;
std::string name_;
int multi_processor_count_;
int majorVersion_;
int minorVersion_;
};
CV_EXPORTS void printCudaDeviceInfo(int device);
CV_EXPORTS void printShortCudaDeviceInfo(int device);
//////////////////////////////// GpuMat ///////////////////////////////
//! Smart pointer for GPU memory with reference counting. Its interface is mostly similar with cv::Mat. //! Smart pointer for GPU memory with reference counting. Its interface is mostly similar with cv::Mat.
class CV_EXPORTS GpuMat class CV_EXPORTS GpuMat
{ {
......
...@@ -46,7 +46,8 @@ ...@@ -46,7 +46,8 @@
#include <iostream> #include <iostream>
#ifdef HAVE_CUDA #ifdef HAVE_CUDA
#include <cuda_runtime.h> #include <cuda.h>
#include <cuda_runtime_api.h>
#include <npp.h> #include <npp.h>
#define CUDART_MINIMUM_REQUIRED_VERSION 4010 #define CUDART_MINIMUM_REQUIRED_VERSION 4010
...@@ -65,6 +66,408 @@ using namespace std; ...@@ -65,6 +66,408 @@ using namespace std;
using namespace cv; using namespace cv;
using namespace cv::gpu; using namespace cv::gpu;
//////////////////////////////// Initialization & Info ////////////////////////
namespace
{
// Compares value to set using the given comparator. Returns true if
// there is at least one element x in the set satisfying to: x cmp value
// predicate.
template <typename Comparer>
bool compareToSet(const std::string& set_as_str, int value, Comparer cmp)
{
if (set_as_str.find_first_not_of(" ") == string::npos)
return false;
std::stringstream stream(set_as_str);
int cur_value;
while (!stream.eof())
{
stream >> cur_value;
if (cmp(cur_value, value))
return true;
}
return false;
}
}
bool cv::gpu::TargetArchs::builtWith(cv::gpu::FeatureSet feature_set)
{
#ifdef HAVE_CUDA
return ::compareToSet(CUDA_ARCH_FEATURES, feature_set, std::greater_equal<int>());
#else
(void)feature_set;
return false;
#endif
}
bool cv::gpu::TargetArchs::has(int major, int minor)
{
return hasPtx(major, minor) || hasBin(major, minor);
}
bool cv::gpu::TargetArchs::hasPtx(int major, int minor)
{
#ifdef HAVE_CUDA
return ::compareToSet(CUDA_ARCH_PTX, major * 10 + minor, std::equal_to<int>());
#else
(void)major;
(void)minor;
return false;
#endif
}
bool cv::gpu::TargetArchs::hasBin(int major, int minor)
{
#if defined (HAVE_CUDA)
return ::compareToSet(CUDA_ARCH_BIN, major * 10 + minor, std::equal_to<int>());
#else
(void)major;
(void)minor;
return false;
#endif
}
bool cv::gpu::TargetArchs::hasEqualOrLessPtx(int major, int minor)
{
#ifdef HAVE_CUDA
return ::compareToSet(CUDA_ARCH_PTX, major * 10 + minor,
std::less_equal<int>());
#else
(void)major;
(void)minor;
return false;
#endif
}
bool cv::gpu::TargetArchs::hasEqualOrGreater(int major, int minor)
{
return hasEqualOrGreaterPtx(major, minor) ||
hasEqualOrGreaterBin(major, minor);
}
bool cv::gpu::TargetArchs::hasEqualOrGreaterPtx(int major, int minor)
{
#ifdef HAVE_CUDA
return ::compareToSet(CUDA_ARCH_PTX, major * 10 + minor,
std::greater_equal<int>());
#else
(void)major;
(void)minor;
return false;
#endif
}
bool cv::gpu::TargetArchs::hasEqualOrGreaterBin(int major, int minor)
{
#ifdef HAVE_CUDA
return ::compareToSet(CUDA_ARCH_BIN, major * 10 + minor,
std::greater_equal<int>());
#else
(void)major;
(void)minor;
return false;
#endif
}
#ifndef HAVE_CUDA
#define throw_nogpu CV_Error(CV_GpuNotSupported, "The library is compiled without CUDA support")
int cv::gpu::getCudaEnabledDeviceCount() { return 0; }
void cv::gpu::setDevice(int) { throw_nogpu; }
int cv::gpu::getDevice() { throw_nogpu; return 0; }
void cv::gpu::resetDevice() { throw_nogpu; }
size_t cv::gpu::DeviceInfo::freeMemory() const { throw_nogpu; return 0; }
size_t cv::gpu::DeviceInfo::totalMemory() const { throw_nogpu; return 0; }
bool cv::gpu::DeviceInfo::supports(cv::gpu::FeatureSet) const { throw_nogpu; return false; }
bool cv::gpu::DeviceInfo::isCompatible() const { throw_nogpu; return false; }
void cv::gpu::DeviceInfo::query() { throw_nogpu; }
void cv::gpu::DeviceInfo::queryMemory(size_t&, size_t&) const { throw_nogpu; }
void cv::gpu::printCudaDeviceInfo(int) { throw_nogpu; }
void cv::gpu::printShortCudaDeviceInfo(int) { throw_nogpu; }
#undef throw_nogpu
#else // HAVE_CUDA
namespace
{
#if defined(__GNUC__)
#define cudaSafeCall(expr) ___cudaSafeCall(expr, __FILE__, __LINE__, __func__)
#define nppSafeCall(expr) ___nppSafeCall(expr, __FILE__, __LINE__, __func__)
#else /* defined(__CUDACC__) || defined(__MSVC__) */
#define cudaSafeCall(expr) ___cudaSafeCall(expr, __FILE__, __LINE__)
#define nppSafeCall(expr) ___nppSafeCall(expr, __FILE__, __LINE__)
#endif
inline void ___cudaSafeCall(cudaError_t err, const char *file, const int line, const char *func = "")
{
if (cudaSuccess != err)
cv::gpu::error(cudaGetErrorString(err), file, line, func);
}
inline void ___nppSafeCall(int err, const char *file, const int line, const char *func = "")
{
if (err < 0)
{
std::ostringstream msg;
msg << "NPP API Call Error: " << err;
cv::gpu::error(msg.str().c_str(), file, line, func);
}
}
}
int cv::gpu::getCudaEnabledDeviceCount()
{
int count;
cudaError_t error = cudaGetDeviceCount( &count );
if (error == cudaErrorInsufficientDriver)
return -1;
if (error == cudaErrorNoDevice)
return 0;
cudaSafeCall(error);
return count;
}
void cv::gpu::setDevice(int device)
{
cudaSafeCall( cudaSetDevice( device ) );
}
int cv::gpu::getDevice()
{
int device;
cudaSafeCall( cudaGetDevice( &device ) );
return device;
}
void cv::gpu::resetDevice()
{
cudaSafeCall( cudaDeviceReset() );
}
size_t cv::gpu::DeviceInfo::freeMemory() const
{
size_t free_memory, total_memory;
queryMemory(free_memory, total_memory);
return free_memory;
}
size_t cv::gpu::DeviceInfo::totalMemory() const
{
size_t free_memory, total_memory;
queryMemory(free_memory, total_memory);
return total_memory;
}
bool cv::gpu::DeviceInfo::supports(cv::gpu::FeatureSet feature_set) const
{
int version = majorVersion() * 10 + minorVersion();
return version >= feature_set;
}
bool cv::gpu::DeviceInfo::isCompatible() const
{
// Check PTX compatibility
if (TargetArchs::hasEqualOrLessPtx(majorVersion(), minorVersion()))
return true;
// Check BIN compatibility
for (int i = minorVersion(); i >= 0; --i)
if (TargetArchs::hasBin(majorVersion(), i))
return true;
return false;
}
void cv::gpu::DeviceInfo::query()
{
cudaDeviceProp prop;
cudaSafeCall(cudaGetDeviceProperties(&prop, device_id_));
name_ = prop.name;
multi_processor_count_ = prop.multiProcessorCount;
majorVersion_ = prop.major;
minorVersion_ = prop.minor;
}
void cv::gpu::DeviceInfo::queryMemory(size_t& free_memory, size_t& total_memory) const
{
int prev_device_id = getDevice();
if (prev_device_id != device_id_)
setDevice(device_id_);
cudaSafeCall(cudaMemGetInfo(&free_memory, &total_memory));
if (prev_device_id != device_id_)
setDevice(prev_device_id);
}
namespace
{
template <class T> void getCudaAttribute(T *attribute, CUdevice_attribute device_attribute, int device)
{
*attribute = T();
CUresult error = CUDA_SUCCESS;// = cuDeviceGetAttribute( attribute, device_attribute, device ); why link erros under ubuntu??
if( CUDA_SUCCESS == error )
return;
printf("Driver API error = %04d\n", error);
cv::gpu::error("driver API error", __FILE__, __LINE__);
}
int convertSMVer2Cores(int major, int minor)
{
// Defines for GPU Architecture types (using the SM version to determine the # of cores per SM
typedef struct {
int SM; // 0xMm (hexidecimal notation), M = SM Major version, and m = SM minor version
int Cores;
} SMtoCores;
SMtoCores gpuArchCoresPerSM[] = { { 0x10, 8 }, { 0x11, 8 }, { 0x12, 8 }, { 0x13, 8 }, { 0x20, 32 }, { 0x21, 48 }, { -1, -1 } };
int index = 0;
while (gpuArchCoresPerSM[index].SM != -1)
{
if (gpuArchCoresPerSM[index].SM == ((major << 4) + minor) )
return gpuArchCoresPerSM[index].Cores;
index++;
}
printf("MapSMtoCores undefined SMversion %d.%d!\n", major, minor);
return -1;
}
}
void cv::gpu::printCudaDeviceInfo(int device)
{
int count = getCudaEnabledDeviceCount();
bool valid = (device >= 0) && (device < count);
int beg = valid ? device : 0;
int end = valid ? device+1 : count;
printf("*** CUDA Device Query (Runtime API) version (CUDART static linking) *** \n\n");
printf("Device count: %d\n", count);
int driverVersion = 0, runtimeVersion = 0;
cudaSafeCall( cudaDriverGetVersion(&driverVersion) );
cudaSafeCall( cudaRuntimeGetVersion(&runtimeVersion) );
const char *computeMode[] = {
"Default (multiple host threads can use ::cudaSetDevice() with device simultaneously)",
"Exclusive (only one host thread in one process is able to use ::cudaSetDevice() with this device)",
"Prohibited (no host thread can use ::cudaSetDevice() with this device)",
"Exclusive Process (many threads in one process is able to use ::cudaSetDevice() with this device)",
"Unknown",
NULL
};
for(int dev = beg; dev < end; ++dev)
{
cudaDeviceProp prop;
cudaSafeCall( cudaGetDeviceProperties(&prop, dev) );
printf("\nDevice %d: \"%s\"\n", dev, prop.name);
printf(" CUDA Driver Version / Runtime Version %d.%d / %d.%d\n", driverVersion/1000, driverVersion%100, runtimeVersion/1000, runtimeVersion%100);
printf(" CUDA Capability Major/Minor version number: %d.%d\n", prop.major, prop.minor);
printf(" Total amount of global memory: %.0f MBytes (%llu bytes)\n", (float)prop.totalGlobalMem/1048576.0f, (unsigned long long) prop.totalGlobalMem);
printf(" (%2d) Multiprocessors x (%2d) CUDA Cores/MP: %d CUDA Cores\n",
prop.multiProcessorCount, convertSMVer2Cores(prop.major, prop.minor),
convertSMVer2Cores(prop.major, prop.minor) * prop.multiProcessorCount);
printf(" GPU Clock Speed: %.2f GHz\n", prop.clockRate * 1e-6f);
// This is not available in the CUDA Runtime API, so we make the necessary calls the driver API to support this for output
int memoryClock, memBusWidth, L2CacheSize;
getCudaAttribute<int>( &memoryClock, CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE, dev );
getCudaAttribute<int>( &memBusWidth, CU_DEVICE_ATTRIBUTE_GLOBAL_MEMORY_BUS_WIDTH, dev );
getCudaAttribute<int>( &L2CacheSize, CU_DEVICE_ATTRIBUTE_L2_CACHE_SIZE, dev );
printf(" Memory Clock rate: %.2f Mhz\n", memoryClock * 1e-3f);
printf(" Memory Bus Width: %d-bit\n", memBusWidth);
if (L2CacheSize)
printf(" L2 Cache Size: %d bytes\n", L2CacheSize);
printf(" Max Texture Dimension Size (x,y,z) 1D=(%d), 2D=(%d,%d), 3D=(%d,%d,%d)\n",
prop.maxTexture1D, prop.maxTexture2D[0], prop.maxTexture2D[1],
prop.maxTexture3D[0], prop.maxTexture3D[1], prop.maxTexture3D[2]);
printf(" Max Layered Texture Size (dim) x layers 1D=(%d) x %d, 2D=(%d,%d) x %d\n",
prop.maxTexture1DLayered[0], prop.maxTexture1DLayered[1],
prop.maxTexture2DLayered[0], prop.maxTexture2DLayered[1], prop.maxTexture2DLayered[2]);
printf(" Total amount of constant memory: %u bytes\n", (int)prop.totalConstMem);
printf(" Total amount of shared memory per block: %u bytes\n", (int)prop.sharedMemPerBlock);
printf(" Total number of registers available per block: %d\n", prop.regsPerBlock);
printf(" Warp size: %d\n", prop.warpSize);
printf(" Maximum number of threads per block: %d\n", prop.maxThreadsPerBlock);
printf(" Maximum sizes of each dimension of a block: %d x %d x %d\n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2]);
printf(" Maximum sizes of each dimension of a grid: %d x %d x %d\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]);
printf(" Maximum memory pitch: %u bytes\n", (int)prop.memPitch);
printf(" Texture alignment: %u bytes\n", (int)prop.textureAlignment);
printf(" Concurrent copy and execution: %s with %d copy engine(s)\n", (prop.deviceOverlap ? "Yes" : "No"), prop.asyncEngineCount);
printf(" Run time limit on kernels: %s\n", prop.kernelExecTimeoutEnabled ? "Yes" : "No");
printf(" Integrated GPU sharing Host Memory: %s\n", prop.integrated ? "Yes" : "No");
printf(" Support host page-locked memory mapping: %s\n", prop.canMapHostMemory ? "Yes" : "No");
printf(" Concurrent kernel execution: %s\n", prop.concurrentKernels ? "Yes" : "No");
printf(" Alignment requirement for Surfaces: %s\n", prop.surfaceAlignment ? "Yes" : "No");
printf(" Device has ECC support enabled: %s\n", prop.ECCEnabled ? "Yes" : "No");
printf(" Device is using TCC driver mode: %s\n", prop.tccDriver ? "Yes" : "No");
printf(" Device supports Unified Addressing (UVA): %s\n", prop.unifiedAddressing ? "Yes" : "No");
printf(" Device PCI Bus ID / PCI location ID: %d / %d\n", prop.pciBusID, prop.pciDeviceID );
printf(" Compute Mode:\n");
printf(" %s \n", computeMode[prop.computeMode]);
}
printf("\n");
printf("deviceQuery, CUDA Driver = CUDART");
printf(", CUDA Driver Version = %d.%d", driverVersion / 1000, driverVersion % 100);
printf(", CUDA Runtime Version = %d.%d", runtimeVersion/1000, runtimeVersion%100);
printf(", NumDevs = %d\n\n", count);
fflush(stdout);
}
void cv::gpu::printShortCudaDeviceInfo(int device)
{
int count = getCudaEnabledDeviceCount();
bool valid = (device >= 0) && (device < count);
int beg = valid ? device : 0;
int end = valid ? device+1 : count;
int driverVersion = 0, runtimeVersion = 0;
cudaSafeCall( cudaDriverGetVersion(&driverVersion) );
cudaSafeCall( cudaRuntimeGetVersion(&runtimeVersion) );
for(int dev = beg; dev < end; ++dev)
{
cudaDeviceProp prop;
cudaSafeCall( cudaGetDeviceProperties(&prop, dev) );
const char *arch_str = prop.major < 2 ? " (not Fermi)" : "";
printf("Device %d: \"%s\" %.0fMb", dev, prop.name, (float)prop.totalGlobalMem/1048576.0f);
printf(", sm_%d%d%s, %d cores", prop.major, prop.minor, arch_str, convertSMVer2Cores(prop.major, prop.minor) * prop.multiProcessorCount);
printf(", Driver/Runtime ver.%d.%d/%d.%d\n", driverVersion/1000, driverVersion%100, runtimeVersion/1000, runtimeVersion%100);
}
fflush(stdout);
}
#endif // HAVE_CUDA
//////////////////////////////// GpuMat ///////////////////////////////
cv::gpu::GpuMat::GpuMat(const GpuMat& m) cv::gpu::GpuMat::GpuMat(const GpuMat& m)
: flags(m.flags), rows(m.rows), cols(m.cols), step(m.step), data(m.data), refcount(m.refcount), datastart(m.datastart), dataend(m.dataend) : flags(m.flags), rows(m.rows), cols(m.cols), step(m.step), data(m.data), refcount(m.refcount), datastart(m.datastart), dataend(m.dataend)
{ {
...@@ -326,25 +729,23 @@ namespace ...@@ -326,25 +729,23 @@ namespace
#ifndef HAVE_CUDA #ifndef HAVE_CUDA
#define throw_nocuda CV_Error(CV_GpuNotSupported, "The library is compiled without CUDA support")
namespace namespace
{ {
class EmptyFuncTable : public GpuFuncTable class EmptyFuncTable : public GpuFuncTable
{ {
public: public:
void copy(const Mat&, GpuMat&) const { throw_nocuda; } void copy(const Mat&, GpuMat&) const { CV_Error(CV_GpuNotSupported, "The library is compiled without CUDA support"); }
void copy(const GpuMat&, Mat&) const { throw_nocuda; } void copy(const GpuMat&, Mat&) const { CV_Error(CV_GpuNotSupported, "The library is compiled without CUDA support"); }
void copy(const GpuMat&, GpuMat&) const { throw_nocuda; } void copy(const GpuMat&, GpuMat&) const { CV_Error(CV_GpuNotSupported, "The library is compiled without CUDA support"); }
void copyWithMask(const GpuMat&, GpuMat&, const GpuMat&) const { throw_nocuda; } void copyWithMask(const GpuMat&, GpuMat&, const GpuMat&) const { CV_Error(CV_GpuNotSupported, "The library is compiled without CUDA support"); }
void convert(const GpuMat&, GpuMat&) const { throw_nocuda; } void convert(const GpuMat&, GpuMat&) const { CV_Error(CV_GpuNotSupported, "The library is compiled without CUDA support"); }
void convert(const GpuMat&, GpuMat&, double, double) const { throw_nocuda; } void convert(const GpuMat&, GpuMat&, double, double) const { CV_Error(CV_GpuNotSupported, "The library is compiled without CUDA support"); }
void setTo(GpuMat&, Scalar, const GpuMat&) const { throw_nocuda; } void setTo(GpuMat&, Scalar, const GpuMat&) const { CV_Error(CV_GpuNotSupported, "The library is compiled without CUDA support"); }
void mallocPitch(void**, size_t*, size_t, size_t) const { throw_nocuda; } void mallocPitch(void**, size_t*, size_t, size_t) const { CV_Error(CV_GpuNotSupported, "The library is compiled without CUDA support"); }
void free(void*) const {} void free(void*) const {}
}; };
...@@ -370,33 +771,6 @@ namespace cv { namespace gpu { namespace device ...@@ -370,33 +771,6 @@ namespace cv { namespace gpu { namespace device
void convert_gpu(DevMem2Db src, int sdepth, DevMem2Db dst, int ddepth, double alpha, double beta, cudaStream_t stream); void convert_gpu(DevMem2Db src, int sdepth, DevMem2Db dst, int ddepth, double alpha, double beta, cudaStream_t stream);
}}} }}}
namespace
{
#if defined(__GNUC__)
#define cudaSafeCall(expr) ___cudaSafeCall(expr, __FILE__, __LINE__, __func__)
#define nppSafeCall(expr) ___nppSafeCall(expr, __FILE__, __LINE__, __func__)
#else /* defined(__CUDACC__) || defined(__MSVC__) */
#define cudaSafeCall(expr) ___cudaSafeCall(expr, __FILE__, __LINE__)
#define nppSafeCall(expr) ___nppSafeCall(expr, __FILE__, __LINE__)
#endif
inline void ___cudaSafeCall(cudaError_t err, const char *file, const int line, const char *func = "")
{
if (cudaSuccess != err)
cv::gpu::error(cudaGetErrorString(err), file, line, func);
}
inline void ___nppSafeCall(int err, const char *file, const int line, const char *func = "")
{
if (err < 0)
{
std::ostringstream msg;
msg << "NPP API Call Error: " << err;
cv::gpu::error(msg.str().c_str(), file, line, func);
}
}
}
namespace namespace
{ {
template <typename T> void kernelSetCaller(GpuMat& src, Scalar s, cudaStream_t stream) template <typename T> void kernelSetCaller(GpuMat& src, Scalar s, cudaStream_t stream)
...@@ -502,7 +876,7 @@ namespace ...@@ -502,7 +876,7 @@ namespace
typedef typename NPPTypeTraits<SDEPTH>::npp_type src_t; typedef typename NPPTypeTraits<SDEPTH>::npp_type src_t;
typedef typename NPPTypeTraits<DDEPTH>::npp_type dst_t; typedef typename NPPTypeTraits<DDEPTH>::npp_type dst_t;
static void cvt(const GpuMat& src, GpuMat& dst) static void call(const GpuMat& src, GpuMat& dst)
{ {
NppiSize sz; NppiSize sz;
sz.width = src.cols; sz.width = src.cols;
...@@ -517,7 +891,7 @@ namespace ...@@ -517,7 +891,7 @@ namespace
{ {
typedef typename NPPTypeTraits<DDEPTH>::npp_type dst_t; typedef typename NPPTypeTraits<DDEPTH>::npp_type dst_t;
static void cvt(const GpuMat& src, GpuMat& dst) static void call(const GpuMat& src, GpuMat& dst)
{ {
NppiSize sz; NppiSize sz;
sz.width = src.cols; sz.width = src.cols;
...@@ -557,7 +931,7 @@ namespace ...@@ -557,7 +931,7 @@ namespace
{ {
typedef typename NPPTypeTraits<SDEPTH>::npp_type src_t; typedef typename NPPTypeTraits<SDEPTH>::npp_type src_t;
static void set(GpuMat& src, Scalar s) static void call(GpuMat& src, Scalar s)
{ {
NppiSize sz; NppiSize sz;
sz.width = src.cols; sz.width = src.cols;
...@@ -574,7 +948,7 @@ namespace ...@@ -574,7 +948,7 @@ namespace
{ {
typedef typename NPPTypeTraits<SDEPTH>::npp_type src_t; typedef typename NPPTypeTraits<SDEPTH>::npp_type src_t;
static void set(GpuMat& src, Scalar s) static void call(GpuMat& src, Scalar s)
{ {
NppiSize sz; NppiSize sz;
sz.width = src.cols; sz.width = src.cols;
...@@ -605,7 +979,7 @@ namespace ...@@ -605,7 +979,7 @@ namespace
{ {
typedef typename NPPTypeTraits<SDEPTH>::npp_type src_t; typedef typename NPPTypeTraits<SDEPTH>::npp_type src_t;
static void set(GpuMat& src, Scalar s, const GpuMat& mask) static void call(GpuMat& src, Scalar s, const GpuMat& mask)
{ {
NppiSize sz; NppiSize sz;
sz.width = src.cols; sz.width = src.cols;
...@@ -622,7 +996,7 @@ namespace ...@@ -622,7 +996,7 @@ namespace
{ {
typedef typename NPPTypeTraits<SDEPTH>::npp_type src_t; typedef typename NPPTypeTraits<SDEPTH>::npp_type src_t;
static void set(GpuMat& src, Scalar s, const GpuMat& mask) static void call(GpuMat& src, Scalar s, const GpuMat& mask)
{ {
NppiSize sz; NppiSize sz;
sz.width = src.cols; sz.width = src.cols;
...@@ -650,7 +1024,7 @@ namespace ...@@ -650,7 +1024,7 @@ namespace
{ {
typedef typename NPPTypeTraits<SDEPTH>::npp_type src_t; typedef typename NPPTypeTraits<SDEPTH>::npp_type src_t;
static void copyMasked(const GpuMat& src, GpuMat& dst, const GpuMat& mask, cudaStream_t /*stream*/) static void call(const GpuMat& src, GpuMat& dst, const GpuMat& mask, cudaStream_t /*stream*/)
{ {
NppiSize sz; NppiSize sz;
sz.width = src.cols; sz.width = src.cols;
...@@ -683,99 +1057,114 @@ namespace ...@@ -683,99 +1057,114 @@ namespace
void copyWithMask(const GpuMat& src, GpuMat& dst, const GpuMat& mask) const void copyWithMask(const GpuMat& src, GpuMat& dst, const GpuMat& mask) const
{ {
CV_Assert(src.depth() <= CV_64F && src.channels() <= 4);
CV_Assert(src.size() == dst.size() && src.type() == dst.type()); CV_Assert(src.size() == dst.size() && src.type() == dst.type());
CV_Assert(src.size() == mask.size() && mask.depth() == CV_8U && (mask.channels() == 1 || mask.channels() == src.channels())); CV_Assert(src.size() == mask.size() && mask.depth() == CV_8U && (mask.channels() == 1 || mask.channels() == src.channels()));
typedef void (*caller_t)(const GpuMat& src, GpuMat& dst, const GpuMat& mask, cudaStream_t stream); if (src.depth() == CV_64F)
{
if (!TargetArchs::builtWith(NATIVE_DOUBLE) || !DeviceInfo().supports(NATIVE_DOUBLE))
CV_Error(CV_StsUnsupportedFormat, "The device doesn't support double");
}
static const caller_t callers[7][4] = typedef void (*func_t)(const GpuMat& src, GpuMat& dst, const GpuMat& mask, cudaStream_t stream);
static const func_t funcs[7][4] =
{ {
/* 8U */ {NppCopyMasked<CV_8U, nppiCopy_8u_C1MR>::copyMasked, cv::gpu::copyWithMask, NppCopyMasked<CV_8U, nppiCopy_8u_C3MR>::copyMasked, NppCopyMasked<CV_8U, nppiCopy_8u_C4MR>::copyMasked}, /* 8U */ {NppCopyMasked<CV_8U , nppiCopy_8u_C1MR >::call, cv::gpu::copyWithMask, NppCopyMasked<CV_8U , nppiCopy_8u_C3MR >::call, NppCopyMasked<CV_8U , nppiCopy_8u_C4MR >::call},
/* 8S */ {cv::gpu::copyWithMask, cv::gpu::copyWithMask, cv::gpu::copyWithMask, cv::gpu::copyWithMask}, /* 8S */ {cv::gpu::copyWithMask , cv::gpu::copyWithMask, cv::gpu::copyWithMask , cv::gpu::copyWithMask },
/* 16U */ {NppCopyMasked<CV_16U, nppiCopy_16u_C1MR>::copyMasked, cv::gpu::copyWithMask, NppCopyMasked<CV_16U, nppiCopy_16u_C3MR>::copyMasked, NppCopyMasked<CV_16U, nppiCopy_16u_C4MR>::copyMasked}, /* 16U */ {NppCopyMasked<CV_16U, nppiCopy_16u_C1MR>::call, cv::gpu::copyWithMask, NppCopyMasked<CV_16U, nppiCopy_16u_C3MR>::call, NppCopyMasked<CV_16U, nppiCopy_16u_C4MR>::call},
/* 16S */ {NppCopyMasked<CV_16S, nppiCopy_16s_C1MR>::copyMasked, cv::gpu::copyWithMask, NppCopyMasked<CV_16S, nppiCopy_16s_C3MR>::copyMasked, NppCopyMasked<CV_16S, nppiCopy_16s_C4MR>::copyMasked}, /* 16S */ {NppCopyMasked<CV_16S, nppiCopy_16s_C1MR>::call, cv::gpu::copyWithMask, NppCopyMasked<CV_16S, nppiCopy_16s_C3MR>::call, NppCopyMasked<CV_16S, nppiCopy_16s_C4MR>::call},
/* 32S */ {NppCopyMasked<CV_32S, nppiCopy_32s_C1MR>::copyMasked, cv::gpu::copyWithMask, NppCopyMasked<CV_32S, nppiCopy_32s_C3MR>::copyMasked, NppCopyMasked<CV_32S, nppiCopy_32s_C4MR>::copyMasked}, /* 32S */ {NppCopyMasked<CV_32S, nppiCopy_32s_C1MR>::call, cv::gpu::copyWithMask, NppCopyMasked<CV_32S, nppiCopy_32s_C3MR>::call, NppCopyMasked<CV_32S, nppiCopy_32s_C4MR>::call},
/* 32F */ {NppCopyMasked<CV_32F, nppiCopy_32f_C1MR>::copyMasked, cv::gpu::copyWithMask, NppCopyMasked<CV_32F, nppiCopy_32f_C3MR>::copyMasked, NppCopyMasked<CV_32F, nppiCopy_32f_C4MR>::copyMasked}, /* 32F */ {NppCopyMasked<CV_32F, nppiCopy_32f_C1MR>::call, cv::gpu::copyWithMask, NppCopyMasked<CV_32F, nppiCopy_32f_C3MR>::call, NppCopyMasked<CV_32F, nppiCopy_32f_C4MR>::call},
/* 64F */ {cv::gpu::copyWithMask, cv::gpu::copyWithMask, cv::gpu::copyWithMask, cv::gpu::copyWithMask} /* 64F */ {cv::gpu::copyWithMask , cv::gpu::copyWithMask, cv::gpu::copyWithMask , cv::gpu::copyWithMask }
}; };
caller_t func = mask.channels() == src.channels() ? callers[src.depth()][src.channels()] : cv::gpu::copyWithMask; const func_t func = mask.channels() == src.channels() ? funcs[src.depth()][src.channels() - 1] : cv::gpu::copyWithMask;
CV_DbgAssert(func != 0);
func(src, dst, mask, 0); func(src, dst, mask, 0);
} }
void convert(const GpuMat& src, GpuMat& dst) const void convert(const GpuMat& src, GpuMat& dst) const
{ {
typedef void (*caller_t)(const GpuMat& src, GpuMat& dst); typedef void (*func_t)(const GpuMat& src, GpuMat& dst);
static const caller_t callers[7][7][7] = static const func_t funcs[7][7][4] =
{ {
{ {
/* 8U -> 8U */ {0, 0, 0, 0}, /* 8U -> 8U */ {0, 0, 0, 0},
/* 8U -> 8S */ {cv::gpu::convertTo, cv::gpu::convertTo, cv::gpu::convertTo, cv::gpu::convertTo}, /* 8U -> 8S */ {cv::gpu::convertTo , cv::gpu::convertTo, cv::gpu::convertTo, cv::gpu::convertTo },
/* 8U -> 16U */ {NppCvt<CV_8U, CV_16U, nppiConvert_8u16u_C1R>::cvt,cv::gpu::convertTo,cv::gpu::convertTo,NppCvt<CV_8U, CV_16U, nppiConvert_8u16u_C4R>::cvt}, /* 8U -> 16U */ {NppCvt<CV_8U, CV_16U, nppiConvert_8u16u_C1R>::call, cv::gpu::convertTo, cv::gpu::convertTo, NppCvt<CV_8U, CV_16U, nppiConvert_8u16u_C4R>::call},
/* 8U -> 16S */ {NppCvt<CV_8U, CV_16S, nppiConvert_8u16s_C1R>::cvt,cv::gpu::convertTo,cv::gpu::convertTo,NppCvt<CV_8U, CV_16S, nppiConvert_8u16s_C4R>::cvt}, /* 8U -> 16S */ {NppCvt<CV_8U, CV_16S, nppiConvert_8u16s_C1R>::call, cv::gpu::convertTo, cv::gpu::convertTo, NppCvt<CV_8U, CV_16S, nppiConvert_8u16s_C4R>::call},
/* 8U -> 32S */ {cv::gpu::convertTo,cv::gpu::convertTo,cv::gpu::convertTo,cv::gpu::convertTo}, /* 8U -> 32S */ {cv::gpu::convertTo , cv::gpu::convertTo, cv::gpu::convertTo, cv::gpu::convertTo },
/* 8U -> 32F */ {NppCvt<CV_8U, CV_32F, nppiConvert_8u32f_C1R>::cvt,cv::gpu::convertTo,cv::gpu::convertTo,cv::gpu::convertTo}, /* 8U -> 32F */ {NppCvt<CV_8U, CV_32F, nppiConvert_8u32f_C1R>::call, cv::gpu::convertTo, cv::gpu::convertTo, cv::gpu::convertTo },
/* 8U -> 64F */ {cv::gpu::convertTo,cv::gpu::convertTo,cv::gpu::convertTo,cv::gpu::convertTo} /* 8U -> 64F */ {cv::gpu::convertTo , cv::gpu::convertTo, cv::gpu::convertTo, cv::gpu::convertTo }
}, },
{ {
/* 8S -> 8U */ {cv::gpu::convertTo,cv::gpu::convertTo,cv::gpu::convertTo,cv::gpu::convertTo}, /* 8S -> 8U */ {cv::gpu::convertTo, cv::gpu::convertTo, cv::gpu::convertTo, cv::gpu::convertTo},
/* 8S -> 8S */ {0,0,0,0}, /* 8S -> 8S */ {0,0,0,0},
/* 8S -> 16U */ {cv::gpu::convertTo,cv::gpu::convertTo,cv::gpu::convertTo,cv::gpu::convertTo}, /* 8S -> 16U */ {cv::gpu::convertTo, cv::gpu::convertTo, cv::gpu::convertTo, cv::gpu::convertTo},
/* 8S -> 16S */ {cv::gpu::convertTo,cv::gpu::convertTo,cv::gpu::convertTo,cv::gpu::convertTo}, /* 8S -> 16S */ {cv::gpu::convertTo, cv::gpu::convertTo, cv::gpu::convertTo, cv::gpu::convertTo},
/* 8S -> 32S */ {cv::gpu::convertTo,cv::gpu::convertTo,cv::gpu::convertTo,cv::gpu::convertTo}, /* 8S -> 32S */ {cv::gpu::convertTo, cv::gpu::convertTo, cv::gpu::convertTo, cv::gpu::convertTo},
/* 8S -> 32F */ {cv::gpu::convertTo,cv::gpu::convertTo,cv::gpu::convertTo,cv::gpu::convertTo}, /* 8S -> 32F */ {cv::gpu::convertTo, cv::gpu::convertTo, cv::gpu::convertTo, cv::gpu::convertTo},
/* 8S -> 64F */ {cv::gpu::convertTo,cv::gpu::convertTo,cv::gpu::convertTo,cv::gpu::convertTo} /* 8S -> 64F */ {cv::gpu::convertTo, cv::gpu::convertTo, cv::gpu::convertTo, cv::gpu::convertTo}
}, },
{ {
/* 16U -> 8U */ {NppCvt<CV_16U, CV_8U, nppiConvert_16u8u_C1R>::cvt,cv::gpu::convertTo,cv::gpu::convertTo,NppCvt<CV_16U, CV_8U, nppiConvert_16u8u_C4R>::cvt}, /* 16U -> 8U */ {NppCvt<CV_16U, CV_8U , nppiConvert_16u8u_C1R >::call, cv::gpu::convertTo, cv::gpu::convertTo, NppCvt<CV_16U, CV_8U, nppiConvert_16u8u_C4R>::call},
/* 16U -> 8S */ {cv::gpu::convertTo,cv::gpu::convertTo,cv::gpu::convertTo,cv::gpu::convertTo}, /* 16U -> 8S */ {cv::gpu::convertTo , cv::gpu::convertTo, cv::gpu::convertTo, cv::gpu::convertTo },
/* 16U -> 16U */ {0,0,0,0}, /* 16U -> 16U */ {0,0,0,0},
/* 16U -> 16S */ {cv::gpu::convertTo,cv::gpu::convertTo,cv::gpu::convertTo,cv::gpu::convertTo}, /* 16U -> 16S */ {cv::gpu::convertTo , cv::gpu::convertTo, cv::gpu::convertTo, cv::gpu::convertTo },
/* 16U -> 32S */ {NppCvt<CV_16U, CV_32S, nppiConvert_16u32s_C1R>::cvt,cv::gpu::convertTo,cv::gpu::convertTo,cv::gpu::convertTo}, /* 16U -> 32S */ {NppCvt<CV_16U, CV_32S, nppiConvert_16u32s_C1R>::call, cv::gpu::convertTo, cv::gpu::convertTo, cv::gpu::convertTo },
/* 16U -> 32F */ {NppCvt<CV_16U, CV_32F, nppiConvert_16u32f_C1R>::cvt,cv::gpu::convertTo,cv::gpu::convertTo,cv::gpu::convertTo}, /* 16U -> 32F */ {NppCvt<CV_16U, CV_32F, nppiConvert_16u32f_C1R>::call, cv::gpu::convertTo, cv::gpu::convertTo, cv::gpu::convertTo },
/* 16U -> 64F */ {cv::gpu::convertTo,cv::gpu::convertTo,cv::gpu::convertTo,cv::gpu::convertTo} /* 16U -> 64F */ {cv::gpu::convertTo , cv::gpu::convertTo, cv::gpu::convertTo, cv::gpu::convertTo }
}, },
{ {
/* 16S -> 8U */ {NppCvt<CV_16S, CV_8U, nppiConvert_16s8u_C1R>::cvt,cv::gpu::convertTo,cv::gpu::convertTo,NppCvt<CV_16S, CV_8U, nppiConvert_16s8u_C4R>::cvt}, /* 16S -> 8U */ {NppCvt<CV_16S, CV_8U , nppiConvert_16s8u_C1R >::call, cv::gpu::convertTo, cv::gpu::convertTo, NppCvt<CV_16S, CV_8U, nppiConvert_16s8u_C4R>::call},
/* 16S -> 8S */ {cv::gpu::convertTo,cv::gpu::convertTo,cv::gpu::convertTo,cv::gpu::convertTo}, /* 16S -> 8S */ {cv::gpu::convertTo , cv::gpu::convertTo, cv::gpu::convertTo, cv::gpu::convertTo },
/* 16S -> 16U */ {cv::gpu::convertTo,cv::gpu::convertTo,cv::gpu::convertTo,cv::gpu::convertTo}, /* 16S -> 16U */ {cv::gpu::convertTo , cv::gpu::convertTo, cv::gpu::convertTo, cv::gpu::convertTo },
/* 16S -> 16S */ {0,0,0,0}, /* 16S -> 16S */ {0,0,0,0},
/* 16S -> 32S */ {NppCvt<CV_16S, CV_32S, nppiConvert_16s32s_C1R>::cvt,cv::gpu::convertTo,cv::gpu::convertTo,cv::gpu::convertTo}, /* 16S -> 32S */ {NppCvt<CV_16S, CV_32S, nppiConvert_16s32s_C1R>::call, cv::gpu::convertTo, cv::gpu::convertTo, cv::gpu::convertTo },
/* 16S -> 32F */ {NppCvt<CV_16S, CV_32F, nppiConvert_16s32f_C1R>::cvt,cv::gpu::convertTo,cv::gpu::convertTo,cv::gpu::convertTo}, /* 16S -> 32F */ {NppCvt<CV_16S, CV_32F, nppiConvert_16s32f_C1R>::call, cv::gpu::convertTo, cv::gpu::convertTo, cv::gpu::convertTo },
/* 16S -> 64F */ {cv::gpu::convertTo,cv::gpu::convertTo,cv::gpu::convertTo,cv::gpu::convertTo} /* 16S -> 64F */ {cv::gpu::convertTo , cv::gpu::convertTo, cv::gpu::convertTo, cv::gpu::convertTo }
}, },
{ {
/* 32S -> 8U */ {cv::gpu::convertTo,cv::gpu::convertTo,cv::gpu::convertTo,cv::gpu::convertTo}, /* 32S -> 8U */ {cv::gpu::convertTo, cv::gpu::convertTo, cv::gpu::convertTo, cv::gpu::convertTo},
/* 32S -> 8S */ {cv::gpu::convertTo,cv::gpu::convertTo,cv::gpu::convertTo,cv::gpu::convertTo}, /* 32S -> 8S */ {cv::gpu::convertTo, cv::gpu::convertTo, cv::gpu::convertTo, cv::gpu::convertTo},
/* 32S -> 16U */ {cv::gpu::convertTo,cv::gpu::convertTo,cv::gpu::convertTo,cv::gpu::convertTo}, /* 32S -> 16U */ {cv::gpu::convertTo, cv::gpu::convertTo, cv::gpu::convertTo, cv::gpu::convertTo},
/* 32S -> 16S */ {cv::gpu::convertTo,cv::gpu::convertTo,cv::gpu::convertTo,cv::gpu::convertTo}, /* 32S -> 16S */ {cv::gpu::convertTo, cv::gpu::convertTo, cv::gpu::convertTo, cv::gpu::convertTo},
/* 32S -> 32S */ {0,0,0,0}, /* 32S -> 32S */ {0,0,0,0},
/* 32S -> 32F */ {cv::gpu::convertTo,cv::gpu::convertTo,cv::gpu::convertTo,cv::gpu::convertTo}, /* 32S -> 32F */ {cv::gpu::convertTo, cv::gpu::convertTo, cv::gpu::convertTo, cv::gpu::convertTo},
/* 32S -> 64F */ {cv::gpu::convertTo,cv::gpu::convertTo,cv::gpu::convertTo,cv::gpu::convertTo} /* 32S -> 64F */ {cv::gpu::convertTo, cv::gpu::convertTo, cv::gpu::convertTo, cv::gpu::convertTo}
}, },
{ {
/* 32F -> 8U */ {NppCvt<CV_32F, CV_8U, nppiConvert_32f8u_C1R>::cvt,cv::gpu::convertTo,cv::gpu::convertTo,cv::gpu::convertTo}, /* 32F -> 8U */ {NppCvt<CV_32F, CV_8U , nppiConvert_32f8u_C1R >::call, cv::gpu::convertTo, cv::gpu::convertTo, cv::gpu::convertTo},
/* 32F -> 8S */ {cv::gpu::convertTo,cv::gpu::convertTo,cv::gpu::convertTo,cv::gpu::convertTo}, /* 32F -> 8S */ {cv::gpu::convertTo , cv::gpu::convertTo, cv::gpu::convertTo, cv::gpu::convertTo},
/* 32F -> 16U */ {NppCvt<CV_32F, CV_16U, nppiConvert_32f16u_C1R>::cvt,cv::gpu::convertTo,cv::gpu::convertTo,cv::gpu::convertTo}, /* 32F -> 16U */ {NppCvt<CV_32F, CV_16U, nppiConvert_32f16u_C1R>::call, cv::gpu::convertTo, cv::gpu::convertTo, cv::gpu::convertTo},
/* 32F -> 16S */ {NppCvt<CV_32F, CV_16S, nppiConvert_32f16s_C1R>::cvt,cv::gpu::convertTo,cv::gpu::convertTo,cv::gpu::convertTo}, /* 32F -> 16S */ {NppCvt<CV_32F, CV_16S, nppiConvert_32f16s_C1R>::call, cv::gpu::convertTo, cv::gpu::convertTo, cv::gpu::convertTo},
/* 32F -> 32S */ {cv::gpu::convertTo,cv::gpu::convertTo,cv::gpu::convertTo,cv::gpu::convertTo}, /* 32F -> 32S */ {cv::gpu::convertTo , cv::gpu::convertTo, cv::gpu::convertTo, cv::gpu::convertTo},
/* 32F -> 32F */ {0,0,0,0}, /* 32F -> 32F */ {0,0,0,0},
/* 32F -> 64F */ {cv::gpu::convertTo,cv::gpu::convertTo,cv::gpu::convertTo,cv::gpu::convertTo} /* 32F -> 64F */ {cv::gpu::convertTo , cv::gpu::convertTo, cv::gpu::convertTo, cv::gpu::convertTo}
}, },
{ {
/* 64F -> 8U */ {cv::gpu::convertTo,cv::gpu::convertTo,cv::gpu::convertTo,cv::gpu::convertTo}, /* 64F -> 8U */ {cv::gpu::convertTo, cv::gpu::convertTo, cv::gpu::convertTo, cv::gpu::convertTo},
/* 64F -> 8S */ {cv::gpu::convertTo,cv::gpu::convertTo,cv::gpu::convertTo,cv::gpu::convertTo}, /* 64F -> 8S */ {cv::gpu::convertTo, cv::gpu::convertTo, cv::gpu::convertTo, cv::gpu::convertTo},
/* 64F -> 16U */ {cv::gpu::convertTo,cv::gpu::convertTo,cv::gpu::convertTo,cv::gpu::convertTo}, /* 64F -> 16U */ {cv::gpu::convertTo, cv::gpu::convertTo, cv::gpu::convertTo, cv::gpu::convertTo},
/* 64F -> 16S */ {cv::gpu::convertTo,cv::gpu::convertTo,cv::gpu::convertTo,cv::gpu::convertTo}, /* 64F -> 16S */ {cv::gpu::convertTo, cv::gpu::convertTo, cv::gpu::convertTo, cv::gpu::convertTo},
/* 64F -> 32S */ {cv::gpu::convertTo,cv::gpu::convertTo,cv::gpu::convertTo,cv::gpu::convertTo}, /* 64F -> 32S */ {cv::gpu::convertTo, cv::gpu::convertTo, cv::gpu::convertTo, cv::gpu::convertTo},
/* 64F -> 32F */ {cv::gpu::convertTo,cv::gpu::convertTo,cv::gpu::convertTo,cv::gpu::convertTo}, /* 64F -> 32F */ {cv::gpu::convertTo, cv::gpu::convertTo, cv::gpu::convertTo, cv::gpu::convertTo},
/* 64F -> 64F */ {0,0,0,0} /* 64F -> 64F */ {0,0,0,0}
} }
}; };
caller_t func = callers[src.depth()][dst.depth()][src.channels() - 1]; CV_Assert(src.depth() <= CV_64F && src.channels() <= 4);
CV_Assert(dst.depth() <= CV_64F);
CV_Assert(src.size() == dst.size() && src.channels() == dst.channels());
if (src.depth() == CV_64F || dst.depth() == CV_64F)
{
if (!TargetArchs::builtWith(NATIVE_DOUBLE) || !DeviceInfo().supports(NATIVE_DOUBLE))
CV_Error(CV_StsUnsupportedFormat, "The device doesn't support double");
}
const func_t func = funcs[src.depth()][dst.depth()][src.channels() - 1];
CV_DbgAssert(func != 0); CV_DbgAssert(func != 0);
func(src, dst); func(src, dst);
...@@ -783,6 +1172,15 @@ namespace ...@@ -783,6 +1172,15 @@ namespace
void convert(const GpuMat& src, GpuMat& dst, double alpha, double beta) const void convert(const GpuMat& src, GpuMat& dst, double alpha, double beta) const
{ {
CV_Assert(src.depth() <= CV_64F && src.channels() <= 4);
CV_Assert(dst.depth() <= CV_64F);
if (src.depth() == CV_64F || dst.depth() == CV_64F)
{
if (!TargetArchs::builtWith(NATIVE_DOUBLE) || !DeviceInfo().supports(NATIVE_DOUBLE))
CV_Error(CV_StsUnsupportedFormat, "The device doesn't support double");
}
cv::gpu::convertTo(src, dst, alpha, beta); cv::gpu::convertTo(src, dst, alpha, beta);
} }
...@@ -812,36 +1210,51 @@ namespace ...@@ -812,36 +1210,51 @@ namespace
} }
} }
typedef void (*caller_t)(GpuMat& src, Scalar s); typedef void (*func_t)(GpuMat& src, Scalar s);
static const caller_t callers[7][4] = static const func_t funcs[7][4] =
{ {
{NppSet<CV_8U, 1, nppiSet_8u_C1R>::set, cv::gpu::setTo, cv::gpu::setTo, NppSet<CV_8U, 4, nppiSet_8u_C4R>::set}, {NppSet<CV_8U , 1, nppiSet_8u_C1R >::call, cv::gpu::setTo , cv::gpu::setTo , NppSet<CV_8U , 4, nppiSet_8u_C4R >::call},
{NppSet<CV_8S, 1, nppiSet_8s_C1R>::set, NppSet<CV_8S, 2, nppiSet_8s_C2R>::set, NppSet<CV_8S, 3, nppiSet_8s_C3R>::set, NppSet<CV_8S, 4, nppiSet_8s_C4R>::set}, {NppSet<CV_8S , 1, nppiSet_8s_C1R >::call, NppSet<CV_8S , 2, nppiSet_8s_C2R >::call, NppSet<CV_8S, 3, nppiSet_8s_C3R>::call, NppSet<CV_8S , 4, nppiSet_8s_C4R >::call},
{NppSet<CV_16U, 1, nppiSet_16u_C1R>::set, NppSet<CV_16U, 2, nppiSet_16u_C2R>::set, cv::gpu::setTo, NppSet<CV_16U, 4, nppiSet_16u_C4R>::set}, {NppSet<CV_16U, 1, nppiSet_16u_C1R>::call, NppSet<CV_16U, 2, nppiSet_16u_C2R>::call, cv::gpu::setTo , NppSet<CV_16U, 4, nppiSet_16u_C4R>::call},
{NppSet<CV_16S, 1, nppiSet_16s_C1R>::set, NppSet<CV_16S, 2, nppiSet_16s_C2R>::set, cv::gpu::setTo, NppSet<CV_16S, 4, nppiSet_16s_C4R>::set}, {NppSet<CV_16S, 1, nppiSet_16s_C1R>::call, NppSet<CV_16S, 2, nppiSet_16s_C2R>::call, cv::gpu::setTo , NppSet<CV_16S, 4, nppiSet_16s_C4R>::call},
{NppSet<CV_32S, 1, nppiSet_32s_C1R>::set, cv::gpu::setTo, cv::gpu::setTo, NppSet<CV_32S, 4, nppiSet_32s_C4R>::set}, {NppSet<CV_32S, 1, nppiSet_32s_C1R>::call, cv::gpu::setTo , cv::gpu::setTo , NppSet<CV_32S, 4, nppiSet_32s_C4R>::call},
{NppSet<CV_32F, 1, nppiSet_32f_C1R>::set, cv::gpu::setTo, cv::gpu::setTo, NppSet<CV_32F, 4, nppiSet_32f_C4R>::set}, {NppSet<CV_32F, 1, nppiSet_32f_C1R>::call, cv::gpu::setTo , cv::gpu::setTo , NppSet<CV_32F, 4, nppiSet_32f_C4R>::call},
{cv::gpu::setTo, cv::gpu::setTo, cv::gpu::setTo, cv::gpu::setTo} {cv::gpu::setTo , cv::gpu::setTo , cv::gpu::setTo , cv::gpu::setTo }
}; };
callers[m.depth()][m.channels() - 1](m, s); CV_Assert(m.depth() <= CV_64F && m.channels() <= 4);
if (m.depth() == CV_64F)
{
if (!TargetArchs::builtWith(NATIVE_DOUBLE) || !DeviceInfo().supports(NATIVE_DOUBLE))
CV_Error(CV_StsUnsupportedFormat, "The device doesn't support double");
}
funcs[m.depth()][m.channels() - 1](m, s);
} }
else else
{ {
typedef void (*caller_t)(GpuMat& src, Scalar s, const GpuMat& mask); typedef void (*func_t)(GpuMat& src, Scalar s, const GpuMat& mask);
static const func_t funcs[7][4] =
static const caller_t callers[7][4] =
{ {
{NppSetMask<CV_8U, 1, nppiSet_8u_C1MR>::set, cv::gpu::setTo, cv::gpu::setTo, NppSetMask<CV_8U, 4, nppiSet_8u_C4MR>::set}, {NppSetMask<CV_8U , 1, nppiSet_8u_C1MR >::call, cv::gpu::setTo, cv::gpu::setTo, NppSetMask<CV_8U , 4, nppiSet_8u_C4MR >::call},
{cv::gpu::setTo, cv::gpu::setTo, cv::gpu::setTo, cv::gpu::setTo}, {cv::gpu::setTo , cv::gpu::setTo, cv::gpu::setTo, cv::gpu::setTo },
{NppSetMask<CV_16U, 1, nppiSet_16u_C1MR>::set, cv::gpu::setTo, cv::gpu::setTo, NppSetMask<CV_16U, 4, nppiSet_16u_C4MR>::set}, {NppSetMask<CV_16U, 1, nppiSet_16u_C1MR>::call, cv::gpu::setTo, cv::gpu::setTo, NppSetMask<CV_16U, 4, nppiSet_16u_C4MR>::call},
{NppSetMask<CV_16S, 1, nppiSet_16s_C1MR>::set, cv::gpu::setTo, cv::gpu::setTo, NppSetMask<CV_16S, 4, nppiSet_16s_C4MR>::set}, {NppSetMask<CV_16S, 1, nppiSet_16s_C1MR>::call, cv::gpu::setTo, cv::gpu::setTo, NppSetMask<CV_16S, 4, nppiSet_16s_C4MR>::call},
{NppSetMask<CV_32S, 1, nppiSet_32s_C1MR>::set, cv::gpu::setTo, cv::gpu::setTo, NppSetMask<CV_32S, 4, nppiSet_32s_C4MR>::set}, {NppSetMask<CV_32S, 1, nppiSet_32s_C1MR>::call, cv::gpu::setTo, cv::gpu::setTo, NppSetMask<CV_32S, 4, nppiSet_32s_C4MR>::call},
{NppSetMask<CV_32F, 1, nppiSet_32f_C1MR>::set, cv::gpu::setTo, cv::gpu::setTo, NppSetMask<CV_32F, 4, nppiSet_32f_C4MR>::set}, {NppSetMask<CV_32F, 1, nppiSet_32f_C1MR>::call, cv::gpu::setTo, cv::gpu::setTo, NppSetMask<CV_32F, 4, nppiSet_32f_C4MR>::call},
{cv::gpu::setTo, cv::gpu::setTo, cv::gpu::setTo, cv::gpu::setTo} {cv::gpu::setTo , cv::gpu::setTo, cv::gpu::setTo, cv::gpu::setTo }
}; };
callers[m.depth()][m.channels() - 1](m, s, mask); CV_Assert(m.depth() <= CV_64F && m.channels() <= 4);
if (m.depth() == CV_64F)
{
if (!TargetArchs::builtWith(NATIVE_DOUBLE) || !DeviceInfo().supports(NATIVE_DOUBLE))
CV_Error(CV_StsUnsupportedFormat, "The device doesn't support double");
}
funcs[m.depth()][m.channels() - 1](m, s, mask);
} }
} }
......
...@@ -54,94 +54,6 @@ ...@@ -54,94 +54,6 @@
namespace cv { namespace gpu { namespace cv { namespace gpu {
//////////////////////////////// Initialization & Info ////////////////////////
//! This is the only function that do not throw exceptions if the library is compiled without Cuda.
CV_EXPORTS int getCudaEnabledDeviceCount();
//! Functions below throw cv::Expception if the library is compiled without Cuda.
CV_EXPORTS void setDevice(int device);
CV_EXPORTS int getDevice();
//! Explicitly destroys and cleans up all resources associated with the current device in the current process.
//! Any subsequent API call to this device will reinitialize the device.
CV_EXPORTS void resetDevice();
enum FeatureSet
{
FEATURE_SET_COMPUTE_10 = 10,
FEATURE_SET_COMPUTE_11 = 11,
FEATURE_SET_COMPUTE_12 = 12,
FEATURE_SET_COMPUTE_13 = 13,
FEATURE_SET_COMPUTE_20 = 20,
FEATURE_SET_COMPUTE_21 = 21,
GLOBAL_ATOMICS = FEATURE_SET_COMPUTE_11,
SHARED_ATOMICS = FEATURE_SET_COMPUTE_12,
NATIVE_DOUBLE = FEATURE_SET_COMPUTE_13
};
// Gives information about what GPU archs this OpenCV GPU module was
// compiled for
class CV_EXPORTS TargetArchs
{
public:
static bool builtWith(FeatureSet feature_set);
static bool has(int major, int minor);
static bool hasPtx(int major, int minor);
static bool hasBin(int major, int minor);
static bool hasEqualOrLessPtx(int major, int minor);
static bool hasEqualOrGreater(int major, int minor);
static bool hasEqualOrGreaterPtx(int major, int minor);
static bool hasEqualOrGreaterBin(int major, int minor);
private:
TargetArchs();
};
// Gives information about the given GPU
class CV_EXPORTS DeviceInfo
{
public:
// Creates DeviceInfo object for the current GPU
DeviceInfo() : device_id_(getDevice()) { query(); }
// Creates DeviceInfo object for the given GPU
DeviceInfo(int device_id) : device_id_(device_id) { query(); }
std::string name() const { return name_; }
// Return compute capability versions
int majorVersion() const { return majorVersion_; }
int minorVersion() const { return minorVersion_; }
int multiProcessorCount() const { return multi_processor_count_; }
size_t freeMemory() const;
size_t totalMemory() const;
// Checks whether device supports the given feature
bool supports(FeatureSet feature_set) const;
// Checks whether the GPU module can be run on the given device
bool isCompatible() const;
int deviceID() const { return device_id_; }
private:
void query();
void queryMemory(size_t& free_memory, size_t& total_memory) const;
int device_id_;
std::string name_;
int multi_processor_count_;
int majorVersion_;
int minorVersion_;
};
CV_EXPORTS void printCudaDeviceInfo(int device);
CV_EXPORTS void printShortCudaDeviceInfo(int device);
//////////////////////////////// CudaMem //////////////////////////////// //////////////////////////////// CudaMem ////////////////////////////////
// CudaMem is limited cv::Mat with page locked memory allocation. // CudaMem is limited cv::Mat with page locked memory allocation.
// Page locked memory is only needed for async and faster coping to GPU. // Page locked memory is only needed for async and faster coping to GPU.
......
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
using namespace cv;
using namespace cv::gpu;
namespace
{
// Compares value to set using the given comparator. Returns true if
// there is at least one element x in the set satisfying to: x cmp value
// predicate.
template <typename Comparer>
bool compareToSet(const std::string& set_as_str, int value, Comparer cmp)
{
if (set_as_str.find_first_not_of(" ") == string::npos)
return false;
std::stringstream stream(set_as_str);
int cur_value;
while (!stream.eof())
{
stream >> cur_value;
if (cmp(cur_value, value))
return true;
}
return false;
}
}
bool cv::gpu::TargetArchs::builtWith(cv::gpu::FeatureSet feature_set)
{
#if defined (HAVE_CUDA)
return ::compareToSet(CUDA_ARCH_FEATURES, feature_set, std::greater_equal<int>());
#else
(void)feature_set;
return false;
#endif
}
bool cv::gpu::TargetArchs::has(int major, int minor)
{
return hasPtx(major, minor) || hasBin(major, minor);
}
bool cv::gpu::TargetArchs::hasPtx(int major, int minor)
{
#if defined (HAVE_CUDA)
return ::compareToSet(CUDA_ARCH_PTX, major * 10 + minor, std::equal_to<int>());
#else
(void)major;
(void)minor;
return false;
#endif
}
bool cv::gpu::TargetArchs::hasBin(int major, int minor)
{
#if defined (HAVE_CUDA)
return ::compareToSet(CUDA_ARCH_BIN, major * 10 + minor, std::equal_to<int>());
#else
(void)major;
(void)minor;
return false;
#endif
}
bool cv::gpu::TargetArchs::hasEqualOrLessPtx(int major, int minor)
{
#if defined (HAVE_CUDA)
return ::compareToSet(CUDA_ARCH_PTX, major * 10 + minor,
std::less_equal<int>());
#else
(void)major;
(void)minor;
return false;
#endif
}
bool cv::gpu::TargetArchs::hasEqualOrGreater(int major, int minor)
{
return hasEqualOrGreaterPtx(major, minor) ||
hasEqualOrGreaterBin(major, minor);
}
bool cv::gpu::TargetArchs::hasEqualOrGreaterPtx(int major, int minor)
{
#if defined (HAVE_CUDA)
return ::compareToSet(CUDA_ARCH_PTX, major * 10 + minor,
std::greater_equal<int>());
#else
(void)major;
(void)minor;
return false;
#endif
}
bool cv::gpu::TargetArchs::hasEqualOrGreaterBin(int major, int minor)
{
#if defined (HAVE_CUDA)
return ::compareToSet(CUDA_ARCH_BIN, major * 10 + minor,
std::greater_equal<int>());
#else
(void)major;
(void)minor;
return false;
#endif
}
#if !defined (HAVE_CUDA)
int cv::gpu::getCudaEnabledDeviceCount() { return 0; }
void cv::gpu::setDevice(int) { throw_nogpu(); }
int cv::gpu::getDevice() { throw_nogpu(); return 0; }
void cv::gpu::resetDevice() { throw_nogpu(); }
size_t cv::gpu::DeviceInfo::freeMemory() const { throw_nogpu(); return 0; }
size_t cv::gpu::DeviceInfo::totalMemory() const { throw_nogpu(); return 0; }
bool cv::gpu::DeviceInfo::supports(cv::gpu::FeatureSet) const { throw_nogpu(); return false; }
bool cv::gpu::DeviceInfo::isCompatible() const { throw_nogpu(); return false; }
void cv::gpu::DeviceInfo::query() { throw_nogpu(); }
void cv::gpu::DeviceInfo::queryMemory(size_t&, size_t&) const { throw_nogpu(); }
void cv::gpu::printCudaDeviceInfo(int) { throw_nogpu(); }
void cv::gpu::printShortCudaDeviceInfo(int) { throw_nogpu(); }
#else /* !defined (HAVE_CUDA) */
int cv::gpu::getCudaEnabledDeviceCount()
{
int count;
cudaError_t error = cudaGetDeviceCount( &count );
if (error == cudaErrorInsufficientDriver)
return -1;
if (error == cudaErrorNoDevice)
return 0;
cudaSafeCall(error);
return count;
}
void cv::gpu::setDevice(int device)
{
cudaSafeCall( cudaSetDevice( device ) );
}
int cv::gpu::getDevice()
{
int device;
cudaSafeCall( cudaGetDevice( &device ) );
return device;
}
void cv::gpu::resetDevice()
{
cudaSafeCall( cudaDeviceReset() );
}
size_t cv::gpu::DeviceInfo::freeMemory() const
{
size_t free_memory, total_memory;
queryMemory(free_memory, total_memory);
return free_memory;
}
size_t cv::gpu::DeviceInfo::totalMemory() const
{
size_t free_memory, total_memory;
queryMemory(free_memory, total_memory);
return total_memory;
}
bool cv::gpu::DeviceInfo::supports(cv::gpu::FeatureSet feature_set) const
{
int version = majorVersion() * 10 + minorVersion();
return version >= feature_set;
}
bool cv::gpu::DeviceInfo::isCompatible() const
{
// Check PTX compatibility
if (TargetArchs::hasEqualOrLessPtx(majorVersion(), minorVersion()))
return true;
// Check BIN compatibility
for (int i = minorVersion(); i >= 0; --i)
if (TargetArchs::hasBin(majorVersion(), i))
return true;
return false;
}
void cv::gpu::DeviceInfo::query()
{
cudaDeviceProp prop;
cudaSafeCall(cudaGetDeviceProperties(&prop, device_id_));
name_ = prop.name;
multi_processor_count_ = prop.multiProcessorCount;
majorVersion_ = prop.major;
minorVersion_ = prop.minor;
}
void cv::gpu::DeviceInfo::queryMemory(size_t& free_memory, size_t& total_memory) const
{
int prev_device_id = getDevice();
if (prev_device_id != device_id_)
setDevice(device_id_);
cudaSafeCall(cudaMemGetInfo(&free_memory, &total_memory));
if (prev_device_id != device_id_)
setDevice(prev_device_id);
}
namespace
{
template <class T> void getCudaAttribute(T *attribute, CUdevice_attribute device_attribute, int device)
{
*attribute = T();
CUresult error = CUDA_SUCCESS;// = cuDeviceGetAttribute( attribute, device_attribute, device ); why link erros under ubuntu??
if( CUDA_SUCCESS == error )
return;
printf("Driver API error = %04d\n", error);
cv::gpu::error("driver API error", __FILE__, __LINE__);
}
int convertSMVer2Cores(int major, int minor)
{
// Defines for GPU Architecture types (using the SM version to determine the # of cores per SM
typedef struct {
int SM; // 0xMm (hexidecimal notation), M = SM Major version, and m = SM minor version
int Cores;
} SMtoCores;
SMtoCores gpuArchCoresPerSM[] = { { 0x10, 8 }, { 0x11, 8 }, { 0x12, 8 }, { 0x13, 8 }, { 0x20, 32 }, { 0x21, 48 }, { -1, -1 } };
int index = 0;
while (gpuArchCoresPerSM[index].SM != -1)
{
if (gpuArchCoresPerSM[index].SM == ((major << 4) + minor) )
return gpuArchCoresPerSM[index].Cores;
index++;
}
printf("MapSMtoCores undefined SMversion %d.%d!\n", major, minor);
return -1;
}
}
void cv::gpu::printCudaDeviceInfo(int device)
{
int count = getCudaEnabledDeviceCount();
bool valid = (device >= 0) && (device < count);
int beg = valid ? device : 0;
int end = valid ? device+1 : count;
printf("*** CUDA Device Query (Runtime API) version (CUDART static linking) *** \n\n");
printf("Device count: %d\n", count);
int driverVersion = 0, runtimeVersion = 0;
cudaSafeCall( cudaDriverGetVersion(&driverVersion) );
cudaSafeCall( cudaRuntimeGetVersion(&runtimeVersion) );
const char *computeMode[] = {
"Default (multiple host threads can use ::cudaSetDevice() with device simultaneously)",
"Exclusive (only one host thread in one process is able to use ::cudaSetDevice() with this device)",
"Prohibited (no host thread can use ::cudaSetDevice() with this device)",
"Exclusive Process (many threads in one process is able to use ::cudaSetDevice() with this device)",
"Unknown",
NULL
};
for(int dev = beg; dev < end; ++dev)
{
cudaDeviceProp prop;
cudaSafeCall( cudaGetDeviceProperties(&prop, dev) );
printf("\nDevice %d: \"%s\"\n", dev, prop.name);
printf(" CUDA Driver Version / Runtime Version %d.%d / %d.%d\n", driverVersion/1000, driverVersion%100, runtimeVersion/1000, runtimeVersion%100);
printf(" CUDA Capability Major/Minor version number: %d.%d\n", prop.major, prop.minor);
printf(" Total amount of global memory: %.0f MBytes (%llu bytes)\n", (float)prop.totalGlobalMem/1048576.0f, (unsigned long long) prop.totalGlobalMem);
printf(" (%2d) Multiprocessors x (%2d) CUDA Cores/MP: %d CUDA Cores\n",
prop.multiProcessorCount, convertSMVer2Cores(prop.major, prop.minor),
convertSMVer2Cores(prop.major, prop.minor) * prop.multiProcessorCount);
printf(" GPU Clock Speed: %.2f GHz\n", prop.clockRate * 1e-6f);
// This is not available in the CUDA Runtime API, so we make the necessary calls the driver API to support this for output
int memoryClock, memBusWidth, L2CacheSize;
getCudaAttribute<int>( &memoryClock, CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE, dev );
getCudaAttribute<int>( &memBusWidth, CU_DEVICE_ATTRIBUTE_GLOBAL_MEMORY_BUS_WIDTH, dev );
getCudaAttribute<int>( &L2CacheSize, CU_DEVICE_ATTRIBUTE_L2_CACHE_SIZE, dev );
printf(" Memory Clock rate: %.2f Mhz\n", memoryClock * 1e-3f);
printf(" Memory Bus Width: %d-bit\n", memBusWidth);
if (L2CacheSize)
printf(" L2 Cache Size: %d bytes\n", L2CacheSize);
printf(" Max Texture Dimension Size (x,y,z) 1D=(%d), 2D=(%d,%d), 3D=(%d,%d,%d)\n",
prop.maxTexture1D, prop.maxTexture2D[0], prop.maxTexture2D[1],
prop.maxTexture3D[0], prop.maxTexture3D[1], prop.maxTexture3D[2]);
printf(" Max Layered Texture Size (dim) x layers 1D=(%d) x %d, 2D=(%d,%d) x %d\n",
prop.maxTexture1DLayered[0], prop.maxTexture1DLayered[1],
prop.maxTexture2DLayered[0], prop.maxTexture2DLayered[1], prop.maxTexture2DLayered[2]);
printf(" Total amount of constant memory: %u bytes\n", (int)prop.totalConstMem);
printf(" Total amount of shared memory per block: %u bytes\n", (int)prop.sharedMemPerBlock);
printf(" Total number of registers available per block: %d\n", prop.regsPerBlock);
printf(" Warp size: %d\n", prop.warpSize);
printf(" Maximum number of threads per block: %d\n", prop.maxThreadsPerBlock);
printf(" Maximum sizes of each dimension of a block: %d x %d x %d\n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2]);
printf(" Maximum sizes of each dimension of a grid: %d x %d x %d\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]);
printf(" Maximum memory pitch: %u bytes\n", (int)prop.memPitch);
printf(" Texture alignment: %u bytes\n", (int)prop.textureAlignment);
printf(" Concurrent copy and execution: %s with %d copy engine(s)\n", (prop.deviceOverlap ? "Yes" : "No"), prop.asyncEngineCount);
printf(" Run time limit on kernels: %s\n", prop.kernelExecTimeoutEnabled ? "Yes" : "No");
printf(" Integrated GPU sharing Host Memory: %s\n", prop.integrated ? "Yes" : "No");
printf(" Support host page-locked memory mapping: %s\n", prop.canMapHostMemory ? "Yes" : "No");
printf(" Concurrent kernel execution: %s\n", prop.concurrentKernels ? "Yes" : "No");
printf(" Alignment requirement for Surfaces: %s\n", prop.surfaceAlignment ? "Yes" : "No");
printf(" Device has ECC support enabled: %s\n", prop.ECCEnabled ? "Yes" : "No");
printf(" Device is using TCC driver mode: %s\n", prop.tccDriver ? "Yes" : "No");
printf(" Device supports Unified Addressing (UVA): %s\n", prop.unifiedAddressing ? "Yes" : "No");
printf(" Device PCI Bus ID / PCI location ID: %d / %d\n", prop.pciBusID, prop.pciDeviceID );
printf(" Compute Mode:\n");
printf(" %s \n", computeMode[prop.computeMode]);
}
printf("\n");
printf("deviceQuery, CUDA Driver = CUDART");
printf(", CUDA Driver Version = %d.%d", driverVersion / 1000, driverVersion % 100);
printf(", CUDA Runtime Version = %d.%d", runtimeVersion/1000, runtimeVersion%100);
printf(", NumDevs = %d\n\n", count);
fflush(stdout);
}
void cv::gpu::printShortCudaDeviceInfo(int device)
{
int count = getCudaEnabledDeviceCount();
bool valid = (device >= 0) && (device < count);
int beg = valid ? device : 0;
int end = valid ? device+1 : count;
int driverVersion = 0, runtimeVersion = 0;
cudaSafeCall( cudaDriverGetVersion(&driverVersion) );
cudaSafeCall( cudaRuntimeGetVersion(&runtimeVersion) );
for(int dev = beg; dev < end; ++dev)
{
cudaDeviceProp prop;
cudaSafeCall( cudaGetDeviceProperties(&prop, dev) );
const char *arch_str = prop.major < 2 ? " (not Fermi)" : "";
printf("Device %d: \"%s\" %.0fMb", dev, prop.name, (float)prop.totalGlobalMem/1048576.0f);
printf(", sm_%d%d%s, %d cores", prop.major, prop.minor, arch_str, convertSMVer2Cores(prop.major, prop.minor) * prop.multiProcessorCount);
printf(", Driver/Runtime ver.%d.%d/%d.%d\n", driverVersion/1000, driverVersion%100, runtimeVersion/1000, runtimeVersion%100);
}
fflush(stdout);
}
#endif
...@@ -118,6 +118,9 @@ void cv::gpu::meanStdDev(const GpuMat& src, Scalar& mean, Scalar& stddev, GpuMat ...@@ -118,6 +118,9 @@ void cv::gpu::meanStdDev(const GpuMat& src, Scalar& mean, Scalar& stddev, GpuMat
{ {
CV_Assert(src.type() == CV_8UC1); CV_Assert(src.type() == CV_8UC1);
if (!TargetArchs::builtWith(FEATURE_SET_COMPUTE_13) || !DeviceInfo().supports(FEATURE_SET_COMPUTE_13))
CV_Error(CV_StsNotImplemented, "Not sufficient compute capebility");
NppiSize sz; NppiSize sz;
sz.width = src.cols; sz.width = src.cols;
sz.height = src.rows; sz.height = src.rows;
......
...@@ -76,6 +76,12 @@ namespace ...@@ -76,6 +76,12 @@ namespace
int depth = src[0].depth(); int depth = src[0].depth();
Size size = src[0].size(); Size size = src[0].size();
if (depth == CV_64F)
{
if (!TargetArchs::builtWith(NATIVE_DOUBLE) || !DeviceInfo().supports(NATIVE_DOUBLE))
CV_Error(CV_StsUnsupportedFormat, "The device doesn't support double");
}
bool single_channel_only = true; bool single_channel_only = true;
int total_channels = 0; int total_channels = 0;
...@@ -115,6 +121,12 @@ namespace ...@@ -115,6 +121,12 @@ namespace
int num_channels = src.channels(); int num_channels = src.channels();
Size size = src.size(); Size size = src.size();
if (depth == CV_64F)
{
if (!TargetArchs::builtWith(NATIVE_DOUBLE) || !DeviceInfo().supports(NATIVE_DOUBLE))
CV_Error(CV_StsUnsupportedFormat, "The device doesn't support double");
}
if (num_channels == 1) if (num_channels == 1)
{ {
src.copyTo(dst[0]); src.copyTo(dst[0]);
......
...@@ -43,6 +43,138 @@ ...@@ -43,6 +43,138 @@
namespace { namespace {
////////////////////////////////////////////////////////////////////////////////
// Merge
PARAM_TEST_CASE(Merge, cv::gpu::DeviceInfo, cv::Size, MatDepth, Channels, UseRoi)
{
cv::gpu::DeviceInfo devInfo;
cv::Size size;
int depth;
int channels;
bool useRoi;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
size = GET_PARAM(1);
depth = GET_PARAM(2);
channels = GET_PARAM(3);
useRoi = GET_PARAM(4);
cv::gpu::setDevice(devInfo.deviceID());
}
};
TEST_P(Merge, Accuracy)
{
std::vector<cv::Mat> src;
src.reserve(channels);
for (int i = 0; i < channels; ++i)
src.push_back(cv::Mat(size, depth, cv::Scalar::all(i)));
std::vector<cv::gpu::GpuMat> d_src;
for (int i = 0; i < channels; ++i)
d_src.push_back(loadMat(src[i], useRoi));
if (depth == CV_64F && !supportFeature(devInfo, cv::gpu::NATIVE_DOUBLE))
{
try
{
cv::gpu::GpuMat dst;
cv::gpu::merge(d_src, dst);
}
catch (const cv::Exception& e)
{
ASSERT_EQ(CV_StsUnsupportedFormat, e.code);
}
}
else
{
cv::gpu::GpuMat dst;
cv::gpu::merge(d_src, dst);
cv::Mat dst_gold;
cv::merge(src, dst_gold);
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
}
INSTANTIATE_TEST_CASE_P(GPU_Core, Merge, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
ALL_DEPTH,
testing::Values(1, 2, 3, 4),
WHOLE_SUBMAT));
////////////////////////////////////////////////////////////////////////////////
// Split
PARAM_TEST_CASE(Split, cv::gpu::DeviceInfo, cv::Size, MatDepth, Channels, UseRoi)
{
cv::gpu::DeviceInfo devInfo;
cv::Size size;
int depth;
int channels;
bool useRoi;
int type;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
size = GET_PARAM(1);
depth = GET_PARAM(2);
channels = GET_PARAM(3);
useRoi = GET_PARAM(4);
cv::gpu::setDevice(devInfo.deviceID());
type = CV_MAKE_TYPE(depth, channels);
}
};
TEST_P(Split, Accuracy)
{
cv::Mat src = randomMat(size, type);
if (depth == CV_64F && !supportFeature(devInfo, cv::gpu::NATIVE_DOUBLE))
{
try
{
std::vector<cv::gpu::GpuMat> dst;
cv::gpu::split(loadMat(src), dst);
}
catch (const cv::Exception& e)
{
ASSERT_EQ(CV_StsUnsupportedFormat, e.code);
}
}
else
{
std::vector<cv::gpu::GpuMat> dst;
cv::gpu::split(loadMat(src, useRoi), dst);
std::vector<cv::Mat> dst_gold;
cv::split(src, dst_gold);
ASSERT_EQ(dst_gold.size(), dst.size());
for (size_t i = 0; i < dst_gold.size(); ++i)
{
EXPECT_MAT_NEAR(dst_gold[i], dst[i], 0.0);
}
}
}
INSTANTIATE_TEST_CASE_P(GPU_Core, Split, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
ALL_DEPTH,
testing::Values(1, 2, 3, 4),
WHOLE_SUBMAT));
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
// Add_Array // Add_Array
...@@ -1974,7 +2106,7 @@ TEST_P(AddWeighted, Accuracy) ...@@ -1974,7 +2106,7 @@ TEST_P(AddWeighted, Accuracy)
cv::Mat dst_gold; cv::Mat dst_gold;
cv::addWeighted(src1, alpha, src2, beta, gamma, dst_gold, dst_depth); cv::addWeighted(src1, alpha, src2, beta, gamma, dst_gold, dst_depth);
EXPECT_MAT_NEAR(dst_gold, dst, dst_depth < CV_32F ? 1.0 : 1e-12); EXPECT_MAT_NEAR(dst_gold, dst, dst_depth < CV_32F ? 1.0 : 1e-3);
} }
} }
...@@ -2487,6 +2619,21 @@ TEST_P(MeanStdDev, Accuracy) ...@@ -2487,6 +2619,21 @@ TEST_P(MeanStdDev, Accuracy)
{ {
cv::Mat src = randomMat(size, CV_8UC1); cv::Mat src = randomMat(size, CV_8UC1);
if (!supportFeature(devInfo, cv::gpu::FEATURE_SET_COMPUTE_13))
{
try
{
cv::Scalar mean;
cv::Scalar stddev;
cv::gpu::meanStdDev(loadMat(src, useRoi), mean, stddev);
}
catch (const cv::Exception& e)
{
ASSERT_EQ(CV_StsNotImplemented, e.code);
}
}
else
{
cv::Scalar mean; cv::Scalar mean;
cv::Scalar stddev; cv::Scalar stddev;
cv::gpu::meanStdDev(loadMat(src, useRoi), mean, stddev); cv::gpu::meanStdDev(loadMat(src, useRoi), mean, stddev);
...@@ -2497,6 +2644,7 @@ TEST_P(MeanStdDev, Accuracy) ...@@ -2497,6 +2644,7 @@ TEST_P(MeanStdDev, Accuracy)
EXPECT_SCALAR_NEAR(mean_gold, mean, 1e-5); EXPECT_SCALAR_NEAR(mean_gold, mean, 1e-5);
EXPECT_SCALAR_NEAR(stddev_gold, stddev, 1e-5); EXPECT_SCALAR_NEAR(stddev_gold, stddev, 1e-5);
}
} }
INSTANTIATE_TEST_CASE_P(GPU_Core, MeanStdDev, testing::Combine( INSTANTIATE_TEST_CASE_P(GPU_Core, MeanStdDev, testing::Combine(
......
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other GpuMaterials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or bpied warranties, including, but not limited to, the bpied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
namespace {
////////////////////////////////////////////////////////////////////////////////
// SetTo
PARAM_TEST_CASE(SetTo, cv::gpu::DeviceInfo, cv::Size, MatType, UseRoi)
{
cv::gpu::DeviceInfo devInfo;
cv::Size size;
int type;
bool useRoi;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
size = GET_PARAM(1);
type = GET_PARAM(2);
useRoi = GET_PARAM(3);
cv::gpu::setDevice(devInfo.deviceID());
}
};
TEST_P(SetTo, Zero)
{
cv::Scalar zero = cv::Scalar::all(0);
cv::gpu::GpuMat mat = createMat(size, type, useRoi);
mat.setTo(zero);
EXPECT_MAT_NEAR(cv::Mat::zeros(size, type), mat, 0.0);
}
TEST_P(SetTo, SameVal)
{
cv::Scalar val = cv::Scalar::all(randomDouble(0.0, 255.0));
if (CV_MAT_DEPTH(type) == CV_64F && !supportFeature(devInfo, cv::gpu::NATIVE_DOUBLE))
{
try
{
cv::gpu::GpuMat mat = createMat(size, type, useRoi);
mat.setTo(val);
}
catch (const cv::Exception& e)
{
ASSERT_EQ(CV_StsUnsupportedFormat, e.code);
}
}
else
{
cv::gpu::GpuMat mat = createMat(size, type, useRoi);
mat.setTo(val);
EXPECT_MAT_NEAR(cv::Mat(size, type, val), mat, 0.0);
}
}
TEST_P(SetTo, DifferentVal)
{
cv::Scalar val = randomScalar(0.0, 255.0);
if (CV_MAT_DEPTH(type) == CV_64F && !supportFeature(devInfo, cv::gpu::NATIVE_DOUBLE))
{
try
{
cv::gpu::GpuMat mat = createMat(size, type, useRoi);
mat.setTo(val);
}
catch (const cv::Exception& e)
{
ASSERT_EQ(CV_StsUnsupportedFormat, e.code);
}
}
else
{
cv::gpu::GpuMat mat = createMat(size, type, useRoi);
mat.setTo(val);
EXPECT_MAT_NEAR(cv::Mat(size, type, val), mat, 0.0);
}
}
TEST_P(SetTo, Masked)
{
cv::Scalar val = randomScalar(0.0, 255.0);
cv::Mat mat_gold = randomMat(size, type);
cv::Mat mask = randomMat(size, CV_8UC1, 0.0, 2.0);
if (CV_MAT_DEPTH(type) == CV_64F && !supportFeature(devInfo, cv::gpu::NATIVE_DOUBLE))
{
try
{
cv::gpu::GpuMat mat = createMat(size, type, useRoi);
mat.setTo(val, loadMat(mask));
}
catch (const cv::Exception& e)
{
ASSERT_EQ(CV_StsUnsupportedFormat, e.code);
}
}
else
{
cv::gpu::GpuMat mat = loadMat(mat_gold, useRoi);
mat.setTo(val, loadMat(mask, useRoi));
mat_gold.setTo(val, mask);
EXPECT_MAT_NEAR(mat_gold, mat, 0.0);
}
}
INSTANTIATE_TEST_CASE_P(GPU_GpuMat, SetTo, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
ALL_TYPES,
WHOLE_SUBMAT));
////////////////////////////////////////////////////////////////////////////////
// CopyTo
PARAM_TEST_CASE(CopyTo, cv::gpu::DeviceInfo, cv::Size, MatType, UseRoi)
{
cv::gpu::DeviceInfo devInfo;
cv::Size size;
int type;
bool useRoi;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
size = GET_PARAM(1);
type = GET_PARAM(2);
useRoi = GET_PARAM(3);
cv::gpu::setDevice(devInfo.deviceID());
}
};
TEST_P(CopyTo, WithOutMask)
{
cv::Mat src = randomMat(size, type);
cv::gpu::GpuMat d_src = loadMat(src, useRoi);
cv::gpu::GpuMat dst = createMat(size, type, useRoi);
d_src.copyTo(dst);
EXPECT_MAT_NEAR(src, dst, 0.0);
}
TEST_P(CopyTo, Masked)
{
cv::Mat src = randomMat(size, type);
cv::Mat mask = randomMat(size, CV_8UC1, 0.0, 2.0);
if (CV_MAT_DEPTH(type) == CV_64F && !supportFeature(devInfo, cv::gpu::NATIVE_DOUBLE))
{
try
{
cv::gpu::GpuMat d_src = loadMat(src);
cv::gpu::GpuMat dst;
d_src.copyTo(dst, loadMat(mask, useRoi));
}
catch (const cv::Exception& e)
{
ASSERT_EQ(CV_StsUnsupportedFormat, e.code);
}
}
else
{
cv::gpu::GpuMat d_src = loadMat(src, useRoi);
cv::gpu::GpuMat dst = loadMat(cv::Mat::zeros(size, type), useRoi);
d_src.copyTo(dst, loadMat(mask, useRoi));
cv::Mat dst_gold = cv::Mat::zeros(size, type);
src.copyTo(dst_gold, mask);
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
}
INSTANTIATE_TEST_CASE_P(GPU_GpuMat, CopyTo, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
ALL_TYPES,
WHOLE_SUBMAT));
////////////////////////////////////////////////////////////////////////////////
// ConvertTo
PARAM_TEST_CASE(ConvertTo, cv::gpu::DeviceInfo, cv::Size, MatDepth, MatDepth, UseRoi)
{
cv::gpu::DeviceInfo devInfo;
cv::Size size;
int depth1;
int depth2;
bool useRoi;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
size = GET_PARAM(1);
depth1 = GET_PARAM(2);
depth2 = GET_PARAM(3);
useRoi = GET_PARAM(4);
cv::gpu::setDevice(devInfo.deviceID());
}
};
TEST_P(ConvertTo, WithOutScaling)
{
cv::Mat src = randomMat(size, depth1);
if ((depth1 == CV_64F || depth2 == CV_64F) && !supportFeature(devInfo, cv::gpu::NATIVE_DOUBLE))
{
try
{
cv::gpu::GpuMat d_src = loadMat(src);
cv::gpu::GpuMat dst;
d_src.convertTo(dst, depth2);
}
catch (const cv::Exception& e)
{
ASSERT_EQ(CV_StsUnsupportedFormat, e.code);
}
}
else
{
cv::gpu::GpuMat d_src = loadMat(src, useRoi);
cv::gpu::GpuMat dst = createMat(size, depth2, useRoi);
d_src.convertTo(dst, depth2);
cv::Mat dst_gold;
src.convertTo(dst_gold, depth2);
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
}
TEST_P(ConvertTo, WithScaling)
{
cv::Mat src = randomMat(size, depth1);
double a = randomDouble(0.0, 1.0);
double b = randomDouble(-10.0, 10.0);
if ((depth1 == CV_64F || depth2 == CV_64F) && !supportFeature(devInfo, cv::gpu::NATIVE_DOUBLE))
{
try
{
cv::gpu::GpuMat d_src = loadMat(src);
cv::gpu::GpuMat dst;
d_src.convertTo(dst, depth2, a, b);
}
catch (const cv::Exception& e)
{
ASSERT_EQ(CV_StsUnsupportedFormat, e.code);
}
}
else
{
cv::gpu::GpuMat d_src = loadMat(src, useRoi);
cv::gpu::GpuMat dst = createMat(size, depth2, useRoi);
d_src.convertTo(dst, depth2, a, b);
cv::Mat dst_gold;
src.convertTo(dst_gold, depth2, a, b);
EXPECT_MAT_NEAR(dst_gold, dst, depth2 < CV_32F ? 0.0 : 1e-4);
}
}
INSTANTIATE_TEST_CASE_P(GPU_GpuMat, ConvertTo, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
ALL_DEPTH,
ALL_DEPTH,
WHOLE_SUBMAT));
} // namespace
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other GpuMaterials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or bpied warranties, including, but not limited to, the bpied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#ifdef HAVE_CUDA
using namespace cvtest;
using namespace testing;
////////////////////////////////////////////////////////////////////////////////
// merge
PARAM_TEST_CASE(Merge, cv::gpu::DeviceInfo, MatType, UseRoi)
{
cv::gpu::DeviceInfo devInfo;
int type;
bool useRoi;
cv::Size size;
std::vector<cv::Mat> src;
cv::Mat dst_gold;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
type = GET_PARAM(1);
useRoi = GET_PARAM(2);
cv::gpu::setDevice(devInfo.deviceID());
cv::RNG& rng = TS::ptr()->get_rng();
size = cv::Size(rng.uniform(20, 150), rng.uniform(20, 150));
int depth = CV_MAT_DEPTH(type);
int num_channels = CV_MAT_CN(type);
src.reserve(num_channels);
for (int i = 0; i < num_channels; ++i)
src.push_back(cv::Mat(size, depth, cv::Scalar::all(i)));
cv::merge(src, dst_gold);
}
};
TEST_P(Merge, Accuracy)
{
if (CV_MAT_DEPTH(type) == CV_64F && !supportFeature(devInfo, cv::gpu::NATIVE_DOUBLE))
return;
cv::Mat dst;
std::vector<cv::gpu::GpuMat> dev_src;
cv::gpu::GpuMat dev_dst;
for (size_t i = 0; i < src.size(); ++i)
dev_src.push_back(loadMat(src[i], useRoi));
cv::gpu::merge(dev_src, dev_dst);
dev_dst.download(dst);
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
INSTANTIATE_TEST_CASE_P(MatOp, Merge, Combine(
ALL_DEVICES,
ALL_TYPES,
WHOLE_SUBMAT));
////////////////////////////////////////////////////////////////////////////////
// split
PARAM_TEST_CASE(Split, cv::gpu::DeviceInfo, MatType, UseRoi)
{
cv::gpu::DeviceInfo devInfo;
int type;
bool useRoi;
cv::Size size;
cv::Mat src;
std::vector<cv::Mat> dst_gold;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
type = GET_PARAM(1);
useRoi = GET_PARAM(2);
cv::gpu::setDevice(devInfo.deviceID());
cv::RNG& rng = TS::ptr()->get_rng();
size = cv::Size(rng.uniform(20, 150), rng.uniform(20, 150));
src.create(size, type);
src.setTo(cv::Scalar(1.0, 2.0, 3.0, 4.0));
cv::split(src, dst_gold);
}
};
TEST_P(Split, Accuracy)
{
if (CV_MAT_DEPTH(type) == CV_64F && !supportFeature(devInfo, cv::gpu::NATIVE_DOUBLE))
return;
std::vector<cv::Mat> dst;
std::vector<cv::gpu::GpuMat> dev_dst;
cv::gpu::split(loadMat(src, useRoi), dev_dst);
dst.resize(dev_dst.size());
for (size_t i = 0; i < dev_dst.size(); ++i)
dev_dst[i].download(dst[i]);
ASSERT_EQ(dst_gold.size(), dst.size());
for (size_t i = 0; i < dst_gold.size(); ++i)
{
EXPECT_MAT_NEAR(dst_gold[i], dst[i], 0.0);
}
}
INSTANTIATE_TEST_CASE_P(MatOp, Split, Combine(
ALL_DEVICES,
ALL_TYPES,
WHOLE_SUBMAT));
////////////////////////////////////////////////////////////////////////////////
// split_merge_consistency
PARAM_TEST_CASE(SplitMerge, cv::gpu::DeviceInfo, MatType)
{
cv::gpu::DeviceInfo devInfo;
int type;
cv::Size size;
cv::Mat orig;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
type = GET_PARAM(1);
cv::gpu::setDevice(devInfo.deviceID());
cv::RNG& rng = TS::ptr()->get_rng();
size = cv::Size(rng.uniform(20, 150), rng.uniform(20, 150));
orig.create(size, type);
orig.setTo(cv::Scalar(1.0, 2.0, 3.0, 4.0));
}
};
TEST_P(SplitMerge, Consistency)
{
if (CV_MAT_DEPTH(type) == CV_64F && !supportFeature(devInfo, cv::gpu::NATIVE_DOUBLE))
return;
cv::Mat final;
std::vector<cv::gpu::GpuMat> dev_vec;
cv::gpu::GpuMat dev_final;
cv::gpu::split(loadMat(orig), dev_vec);
cv::gpu::merge(dev_vec, dev_final);
dev_final.download(final);
EXPECT_MAT_NEAR(orig, final, 0.0);
}
INSTANTIATE_TEST_CASE_P(MatOp, SplitMerge, Combine(
ALL_DEVICES,
ALL_TYPES));
////////////////////////////////////////////////////////////////////////////////
// setTo
PARAM_TEST_CASE(SetTo, cv::gpu::DeviceInfo, MatType, UseRoi)
{
cv::gpu::DeviceInfo devInfo;
int type;
bool useRoi;
cv::Size size;
cv::Mat mat_gold;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
type = GET_PARAM(1);
useRoi = GET_PARAM(2);
cv::gpu::setDevice(devInfo.deviceID());
cv::RNG& rng = TS::ptr()->get_rng();
size = cv::Size(rng.uniform(20, 150), rng.uniform(20, 150));
mat_gold.create(size, type);
}
};
TEST_P(SetTo, Zero)
{
if (CV_MAT_DEPTH(type) == CV_64F && !supportFeature(devInfo, cv::gpu::NATIVE_DOUBLE))
return;
cv::Scalar zero = cv::Scalar::all(0);
cv::Mat mat;
cv::gpu::GpuMat dev_mat = loadMat(mat_gold, useRoi);
mat_gold.setTo(zero);
dev_mat.setTo(zero);
dev_mat.download(mat);
EXPECT_MAT_NEAR(mat_gold, mat, 0.0);
}
TEST_P(SetTo, SameVal)
{
if (CV_MAT_DEPTH(type) == CV_64F && !supportFeature(devInfo, cv::gpu::NATIVE_DOUBLE))
return;
cv::Scalar s = cv::Scalar::all(1);
cv::Mat mat;
cv::gpu::GpuMat dev_mat(mat_gold);
mat_gold.setTo(s);
dev_mat.setTo(s);
dev_mat.download(mat);
EXPECT_MAT_NEAR(mat_gold, mat, 0.0);
}
TEST_P(SetTo, DifferentVal)
{
if (CV_MAT_DEPTH(type) == CV_64F && !supportFeature(devInfo, cv::gpu::NATIVE_DOUBLE))
return;
cv::Scalar s = cv::Scalar(1, 2, 3, 4);
cv::Mat mat;
cv::gpu::GpuMat dev_mat = loadMat(mat_gold, useRoi);
mat_gold.setTo(s);
dev_mat.setTo(s);
dev_mat.download(mat);
EXPECT_MAT_NEAR(mat_gold, mat, 0.0);
}
TEST_P(SetTo, Masked)
{
if (CV_MAT_DEPTH(type) == CV_64F && !supportFeature(devInfo, cv::gpu::NATIVE_DOUBLE))
return;
cv::Scalar s = cv::Scalar(1, 2, 3, 4);
cv::RNG& rng = TS::ptr()->get_rng();
cv::Mat mask = randomMat(rng, mat_gold.size(), CV_8UC1, 0.0, 1.5, false);
cv::Mat mat;
cv::gpu::GpuMat dev_mat = loadMat(mat_gold, useRoi);
mat_gold.setTo(s, mask);
dev_mat.setTo(s, loadMat(mask, useRoi));
dev_mat.download(mat);
EXPECT_MAT_NEAR(mat_gold, mat, 0.0);
}
INSTANTIATE_TEST_CASE_P(MatOp, SetTo, Combine(
ALL_DEVICES,
ALL_TYPES,
WHOLE_SUBMAT));
////////////////////////////////////////////////////////////////////////////////
// copyTo
PARAM_TEST_CASE(CopyTo, cv::gpu::DeviceInfo, MatType, UseRoi)
{
cv::gpu::DeviceInfo devInfo;
int type;
bool useRoi;
cv::Size size;
cv::Mat src;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
type = GET_PARAM(1);
useRoi = GET_PARAM(2);
cv::gpu::setDevice(devInfo.deviceID());
cv::RNG& rng = TS::ptr()->get_rng();
size = cv::Size(rng.uniform(20, 150), rng.uniform(20, 150));
src = randomMat(rng, size, type, 0.0, 127.0, false);
}
};
TEST_P(CopyTo, WithoutMask)
{
if (CV_MAT_DEPTH(type) == CV_64F && !supportFeature(devInfo, cv::gpu::NATIVE_DOUBLE))
return;
cv::Mat dst_gold;
src.copyTo(dst_gold);
cv::Mat dst;
cv::gpu::GpuMat dev_src = loadMat(src, useRoi);
cv::gpu::GpuMat dev_dst = loadMat(src, useRoi);
dev_src.copyTo(dev_dst);
dev_dst.download(dst);
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
TEST_P(CopyTo, Masked)
{
if (CV_MAT_DEPTH(type) == CV_64F && !supportFeature(devInfo, cv::gpu::NATIVE_DOUBLE))
return;
cv::RNG& rng = TS::ptr()->get_rng();
cv::Mat mask = randomMat(rng, src.size(), CV_8UC1, 0.0, 2.0, false);
cv::Mat zeroMat(src.size(), src.type(), cv::Scalar::all(0));
cv::Mat dst_gold = zeroMat.clone();
src.copyTo(dst_gold, mask);
cv::Mat dst;
cv::gpu::GpuMat dev_src = loadMat(src, useRoi);
cv::gpu::GpuMat dev_dst = loadMat(zeroMat, useRoi);
dev_src.copyTo(dev_dst, loadMat(mask, useRoi));
dev_dst.download(dst);
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
INSTANTIATE_TEST_CASE_P(MatOp, CopyTo, Combine(
ALL_DEVICES,
ALL_TYPES,
WHOLE_SUBMAT));
////////////////////////////////////////////////////////////////////////////////
// convertTo
PARAM_TEST_CASE(ConvertTo, cv::gpu::DeviceInfo, MatType, MatType, UseRoi)
{
cv::gpu::DeviceInfo devInfo;
int depth1;
int depth2;
bool useRoi;
cv::Size size;
cv::Mat src;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
depth1 = GET_PARAM(1);
depth2 = GET_PARAM(2);
useRoi = GET_PARAM(3);
cv::gpu::setDevice(devInfo.deviceID());
cv::RNG& rng = TS::ptr()->get_rng();
size = cv::Size(rng.uniform(20, 150), rng.uniform(20, 150));
src = randomMat(rng, size, depth1, 0.0, 127.0, false);
}
};
TEST_P(ConvertTo, WithoutScaling)
{
if ((depth1 == CV_64F || depth2 == CV_64F) && !supportFeature(devInfo, cv::gpu::NATIVE_DOUBLE))
return;
cv::Mat dst_gold;
src.convertTo(dst_gold, depth2);
cv::Mat dst;
cv::gpu::GpuMat dev_src = loadMat(src, useRoi);
cv::gpu::GpuMat dev_dst;
dev_src.convertTo(dev_dst, depth2);
dev_dst.download(dst);
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
TEST_P(ConvertTo, WithScaling)
{
if ((depth1 == CV_64F || depth2 == CV_64F) && !supportFeature(devInfo, cv::gpu::NATIVE_DOUBLE))
return;
cv::RNG& rng = TS::ptr()->get_rng();
const double a = rng.uniform(0.0, 1.0);
const double b = rng.uniform(-10.0, 10.0);
cv::Mat dst_gold;
src.convertTo(dst_gold, depth2, a, b);
cv::Mat dst;
cv::gpu::GpuMat dev_src = loadMat(src, useRoi);
cv::gpu::GpuMat dev_dst;
dev_src.convertTo(dev_dst, depth2, a, b);
dev_dst.download(dst);
const double eps = depth2 < CV_32F ? 1 : 1e-4;
EXPECT_MAT_NEAR(dst_gold, dst, eps);
}
INSTANTIATE_TEST_CASE_P(MatOp, ConvertTo, Combine(
ALL_DEVICES,
TYPES(CV_8U, CV_64F, 1, 1),
TYPES(CV_8U, CV_64F, 1, 1),
WHOLE_SUBMAT));
////////////////////////////////////////////////////////////////////////////////
// async
struct Async : TestWithParam<cv::gpu::DeviceInfo>
{
cv::gpu::DeviceInfo devInfo;
cv::gpu::CudaMem src;
cv::Mat dst_gold0;
cv::Mat dst_gold1;
virtual void SetUp()
{
devInfo = GetParam();
cv::gpu::setDevice(devInfo.deviceID());
cv::RNG& rng = TS::ptr()->get_rng();
int rows = rng.uniform(100, 200);
int cols = rng.uniform(100, 200);
src = cv::gpu::CudaMem(cv::Mat::zeros(rows, cols, CV_8UC1));
dst_gold0 = cv::Mat(rows, cols, CV_8UC1, cv::Scalar::all(255));
dst_gold1 = cv::Mat(rows, cols, CV_8UC1, cv::Scalar::all(128));
}
};
TEST_P(Async, Accuracy)
{
cv::Mat dst0, dst1;
cv::gpu::CudaMem cpudst0;
cv::gpu::CudaMem cpudst1;
cv::gpu::GpuMat gpusrc;
cv::gpu::GpuMat gpudst0;
cv::gpu::GpuMat gpudst1(src.rows, src.cols, CV_8UC1);
cv::gpu::Stream stream0;
cv::gpu::Stream stream1;
stream0.enqueueUpload(src, gpusrc);
cv::gpu::bitwise_not(gpusrc, gpudst0, cv::gpu::GpuMat(), stream0);
stream0.enqueueDownload(gpudst0, cpudst0);
stream1.enqueueMemSet(gpudst1, cv::Scalar::all(128));
stream1.enqueueDownload(gpudst1, cpudst1);
stream0.waitForCompletion();
stream1.waitForCompletion();
dst0 = cpudst0.createMatHeader();
dst1 = cpudst1.createMatHeader();
EXPECT_MAT_NEAR(dst_gold0, dst0, 0.0);
EXPECT_MAT_NEAR(dst_gold1, dst1, 0.0);
}
INSTANTIATE_TEST_CASE_P(MatOp, Async, ALL_DEVICES);
#endif // HAVE_CUDA
...@@ -41,38 +41,31 @@ ...@@ -41,38 +41,31 @@
#include "precomp.hpp" #include "precomp.hpp"
#ifdef HAVE_CUDA namespace {
using namespace cvtest;
using namespace testing;
//#define DUMP //#define DUMP
struct CV_GpuHogDetectTestRunner : cv::gpu::HOGDescriptor struct HOG : testing::TestWithParam<cv::gpu::DeviceInfo>, cv::gpu::HOGDescriptor
{ {
void run() cv::gpu::DeviceInfo devInfo;
{
cv::Mat img_rgb = readImage("hog/road.png");
ASSERT_FALSE(img_rgb.empty());
#ifdef DUMP #ifdef DUMP
f.open((std::string(cvtest::TS::ptr()->get_data_path()) + "hog/expected_output.bin").c_str(), std::ios_base::binary); std::ofstream f;
ASSERT_TRUE(f.is_open());
#else #else
f.open((std::string(cvtest::TS::ptr()->get_data_path()) + "hog/expected_output.bin").c_str(), std::ios_base::binary); std::ifstream f;
ASSERT_TRUE(f.is_open());
#endif #endif
// Test on color image int wins_per_img_x;
cv::Mat img; int wins_per_img_y;
cv::cvtColor(img_rgb, img, CV_BGR2BGRA); int blocks_per_win_x;
test(img); int blocks_per_win_y;
int block_hist_size;
// Test on gray image virtual void SetUp()
cv::cvtColor(img_rgb, img, CV_BGR2GRAY); {
test(img); devInfo = GetParam();
f.close(); cv::gpu::setDevice(devInfo.deviceID());
} }
#ifdef DUMP #ifdef DUMP
...@@ -80,6 +73,7 @@ struct CV_GpuHogDetectTestRunner : cv::gpu::HOGDescriptor ...@@ -80,6 +73,7 @@ struct CV_GpuHogDetectTestRunner : cv::gpu::HOGDescriptor
{ {
f.write((char*)&block_hists.rows, sizeof(block_hists.rows)); f.write((char*)&block_hists.rows, sizeof(block_hists.rows));
f.write((char*)&block_hists.cols, sizeof(block_hists.cols)); f.write((char*)&block_hists.cols, sizeof(block_hists.cols));
for (int i = 0; i < block_hists.rows; ++i) for (int i = 0; i < block_hists.rows; ++i)
{ {
for (int j = 0; j < block_hists.cols; ++j) for (int j = 0; j < block_hists.cols; ++j)
...@@ -88,8 +82,10 @@ struct CV_GpuHogDetectTestRunner : cv::gpu::HOGDescriptor ...@@ -88,8 +82,10 @@ struct CV_GpuHogDetectTestRunner : cv::gpu::HOGDescriptor
f.write((char*)&val, sizeof(val)); f.write((char*)&val, sizeof(val));
} }
} }
int nlocations = locations.size(); int nlocations = locations.size();
f.write((char*)&nlocations, sizeof(nlocations)); f.write((char*)&nlocations, sizeof(nlocations));
for (int i = 0; i < locations.size(); ++i) for (int i = 0; i < locations.size(); ++i)
f.write((char*)&locations[i], sizeof(locations[i])); f.write((char*)&locations[i], sizeof(locations[i]));
} }
...@@ -97,12 +93,11 @@ struct CV_GpuHogDetectTestRunner : cv::gpu::HOGDescriptor ...@@ -97,12 +93,11 @@ struct CV_GpuHogDetectTestRunner : cv::gpu::HOGDescriptor
void compare(const cv::Mat& block_hists, const std::vector<cv::Point>& locations) void compare(const cv::Mat& block_hists, const std::vector<cv::Point>& locations)
{ {
int rows, cols; int rows, cols;
int nlocations;
f.read((char*)&rows, sizeof(rows)); f.read((char*)&rows, sizeof(rows));
f.read((char*)&cols, sizeof(cols)); f.read((char*)&cols, sizeof(cols));
ASSERT_EQ(rows, block_hists.rows); ASSERT_EQ(rows, block_hists.rows);
ASSERT_EQ(cols, block_hists.cols); ASSERT_EQ(cols, block_hists.cols);
for (int i = 0; i < block_hists.rows; ++i) for (int i = 0; i < block_hists.rows; ++i)
{ {
for (int j = 0; j < block_hists.cols; ++j) for (int j = 0; j < block_hists.cols; ++j)
...@@ -112,8 +107,11 @@ struct CV_GpuHogDetectTestRunner : cv::gpu::HOGDescriptor ...@@ -112,8 +107,11 @@ struct CV_GpuHogDetectTestRunner : cv::gpu::HOGDescriptor
ASSERT_NEAR(val, block_hists.at<float>(i, j), 1e-3); ASSERT_NEAR(val, block_hists.at<float>(i, j), 1e-3);
} }
} }
int nlocations;
f.read((char*)&nlocations, sizeof(nlocations)); f.read((char*)&nlocations, sizeof(nlocations));
ASSERT_EQ(nlocations, static_cast<int>(locations.size())); ASSERT_EQ(nlocations, static_cast<int>(locations.size()));
for (int i = 0; i < nlocations; ++i) for (int i = 0; i < nlocations; ++i)
{ {
cv::Point location; cv::Point location;
...@@ -123,22 +121,18 @@ struct CV_GpuHogDetectTestRunner : cv::gpu::HOGDescriptor ...@@ -123,22 +121,18 @@ struct CV_GpuHogDetectTestRunner : cv::gpu::HOGDescriptor
} }
#endif #endif
void test(const cv::Mat& img) void testDetect(const cv::Mat& img)
{ {
cv::gpu::GpuMat d_img(img);
gamma_correction = false; gamma_correction = false;
setSVMDetector(cv::gpu::HOGDescriptor::getDefaultPeopleDetector()); setSVMDetector(cv::gpu::HOGDescriptor::getDefaultPeopleDetector());
//cpu detector may be updated soon
//hog.setSVMDetector(cv::HOGDescriptor::getDefaultPeopleDetector());
std::vector<cv::Point> locations; std::vector<cv::Point> locations;
// Test detect // Test detect
detect(d_img, locations, 0); detect(loadMat(img), locations, 0);
#ifdef DUMP #ifdef DUMP
dump(block_hists, locations); dump(cv::Mat(block_hists), locations);
#else #else
compare(cv::Mat(block_hists), locations); compare(cv::Mat(block_hists), locations);
#endif #endif
...@@ -146,58 +140,66 @@ struct CV_GpuHogDetectTestRunner : cv::gpu::HOGDescriptor ...@@ -146,58 +140,66 @@ struct CV_GpuHogDetectTestRunner : cv::gpu::HOGDescriptor
// Test detect on smaller image // Test detect on smaller image
cv::Mat img2; cv::Mat img2;
cv::resize(img, img2, cv::Size(img.cols / 2, img.rows / 2)); cv::resize(img, img2, cv::Size(img.cols / 2, img.rows / 2));
detect(cv::gpu::GpuMat(img2), locations, 0); detect(loadMat(img2), locations, 0);
#ifdef DUMP #ifdef DUMP
dump(block_hists, locations); dump(cv::Mat(block_hists), locations);
#else #else
compare(cv::Mat(block_hists), locations); compare(cv::Mat(block_hists), locations);
#endif #endif
// Test detect on greater image // Test detect on greater image
cv::resize(img, img2, cv::Size(img.cols * 2, img.rows * 2)); cv::resize(img, img2, cv::Size(img.cols * 2, img.rows * 2));
detect(cv::gpu::GpuMat(img2), locations, 0); detect(loadMat(img2), locations, 0);
#ifdef DUMP #ifdef DUMP
dump(block_hists, locations); dump(cv::Mat(block_hists), locations);
#else #else
compare(cv::Mat(block_hists), locations); compare(cv::Mat(block_hists), locations);
#endif #endif
} }
#ifdef DUMP // Does not compare border value, as interpolation leads to delta
std::ofstream f; void compare_inner_parts(cv::Mat d1, cv::Mat d2)
#else {
std::ifstream f; for (int i = 1; i < blocks_per_win_y - 1; ++i)
#endif for (int j = 1; j < blocks_per_win_x - 1; ++j)
for (int k = 0; k < block_hist_size; ++k)
{
float a = d1.at<float>(0, (i * blocks_per_win_x + j) * block_hist_size);
float b = d2.at<float>(0, (i * blocks_per_win_x + j) * block_hist_size);
ASSERT_FLOAT_EQ(a, b);
}
}
}; };
struct Detect : TestWithParam<cv::gpu::DeviceInfo> TEST_P(HOG, Detect)
{ {
cv::gpu::DeviceInfo devInfo; cv::Mat img_rgb = readImage("hog/road.png");
ASSERT_FALSE(img_rgb.empty());
virtual void SetUp() #ifdef DUMP
{ f.open((std::string(cvtest::TS::ptr()->get_data_path()) + "hog/expected_output.bin").c_str(), std::ios_base::binary);
devInfo = GetParam(); ASSERT_TRUE(f.is_open());
#else
f.open((std::string(cvtest::TS::ptr()->get_data_path()) + "hog/expected_output.bin").c_str(), std::ios_base::binary);
ASSERT_TRUE(f.is_open());
#endif
cv::gpu::setDevice(devInfo.deviceID()); // Test on color image
} cv::Mat img;
}; cv::cvtColor(img_rgb, img, CV_BGR2BGRA);
testDetect(img);
TEST_P(Detect, Accuracy) // Test on gray image
{ cv::cvtColor(img_rgb, img, CV_BGR2GRAY);
CV_GpuHogDetectTestRunner runner; testDetect(img);
runner.run();
}
INSTANTIATE_TEST_CASE_P(HOG, Detect, ALL_DEVICES); f.close();
}
struct CV_GpuHogGetDescriptorsTestRunner : cv::gpu::HOGDescriptor TEST_P(HOG, GetDescriptors)
{ {
CV_GpuHogGetDescriptorsTestRunner(): cv::gpu::HOGDescriptor(cv::Size(64, 128)) {}
void run()
{
// Load image (e.g. train data, composed from windows) // Load image (e.g. train data, composed from windows)
cv::Mat img_rgb = readImage("hog/train_data.png"); cv::Mat img_rgb = readImage("hog/train_data.png");
ASSERT_FALSE(img_rgb.empty()); ASSERT_FALSE(img_rgb.empty());
...@@ -278,46 +280,8 @@ struct CV_GpuHogGetDescriptorsTestRunner : cv::gpu::HOGDescriptor ...@@ -278,46 +280,8 @@ struct CV_GpuHogGetDescriptorsTestRunner : cv::gpu::HOGDescriptor
cv::cvtColor(img_rgb, img, CV_BGR2BGRA); cv::cvtColor(img_rgb, img, CV_BGR2BGRA);
computeBlockHistograms(cv::gpu::GpuMat(img)); computeBlockHistograms(cv::gpu::GpuMat(img));
compare_inner_parts(cv::Mat(block_hists), cv::Mat(descriptors.rowRange(5, 6))); compare_inner_parts(cv::Mat(block_hists), cv::Mat(descriptors.rowRange(5, 6)));
}
// Does not compare border value, as interpolation leads to delta
void compare_inner_parts(cv::Mat d1, cv::Mat d2)
{
for (int i = 1; i < blocks_per_win_y - 1; ++i)
for (int j = 1; j < blocks_per_win_x - 1; ++j)
for (int k = 0; k < block_hist_size; ++k)
{
float a = d1.at<float>(0, (i * blocks_per_win_x + j) * block_hist_size);
float b = d2.at<float>(0, (i * blocks_per_win_x + j) * block_hist_size);
ASSERT_FLOAT_EQ(a, b);
}
}
int wins_per_img_x;
int wins_per_img_y;
int blocks_per_win_x;
int blocks_per_win_y;
int block_hist_size;
};
struct GetDescriptors : TestWithParam<cv::gpu::DeviceInfo>
{
cv::gpu::DeviceInfo devInfo;
virtual void SetUp()
{
devInfo = GetParam();
cv::gpu::setDevice(devInfo.deviceID());
}
};
TEST_P(GetDescriptors, Accuracy)
{
CV_GpuHogGetDescriptorsTestRunner runner;
runner.run();
} }
INSTANTIATE_TEST_CASE_P(HOG, GetDescriptors, ALL_DEVICES); INSTANTIATE_TEST_CASE_P(GPU_ObjDetect, HOG, ALL_DEVICES);
#endif // HAVE_CUDA } // namespace
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment