Commit 863d61e9 authored by Vladislav Vinogradov's avatar Vladislav Vinogradov

fix gpu module compilation under linux

parent 4cdcf371
This diff is collapsed.
......@@ -48,8 +48,11 @@ void cv::gpu::graphcut(GpuMat&, GpuMat&, GpuMat&, GpuMat&, GpuMat&, GpuMat&, Gpu
#else /* !defined (HAVE_CUDA) */
#define NPP_VERSION (10 * NPP_VERSION_MAJOR + NPP_VERSION_MINOR)
void cv::gpu::graphcut(GpuMat& terminals, GpuMat& leftTransp, GpuMat& rightTransp, GpuMat& top, GpuMat& bottom, GpuMat& labels, GpuMat& buf)
{
#if NPP_VERSION >= 32
CV_Assert(leftTransp.type() == CV_32S && rightTransp.type() == CV_32S);
CV_Assert(terminals.type() == CV_32S && bottom.type() == CV_32S && top.type() == CV_32S);
CV_Assert(terminals.size() == leftTransp.size());
......@@ -71,6 +74,9 @@ void cv::gpu::graphcut(GpuMat& terminals, GpuMat& leftTransp, GpuMat& rightTrans
nppSafeCall( nppiGraphcut_32s8u(terminals.ptr<Npp32s>(), leftTransp.ptr<Npp32s>(), rightTransp.ptr<Npp32s>(), top.ptr<Npp32s>(), bottom.ptr<Npp32s>(),
terminals.step, leftTransp.step, sznpp, labels.ptr<Npp8u>(), labels.step, buf.ptr<Npp8u>()) );
#else
CV_Assert(!"This function doesn't supported");
#endif
}
......
......@@ -71,6 +71,8 @@ void cv::gpu::histRange(const GpuMat&, GpuMat*, const GpuMat*) { throw_nogpu();
#else /* !defined (HAVE_CUDA) */
#define NPP_VERSION (10 * NPP_VERSION_MAJOR + NPP_VERSION_MINOR)
namespace cv { namespace gpu { namespace imgproc
{
void remap_gpu_1c(const DevMem2D& src, const DevMem2Df& xmap, const DevMem2Df& ymap, DevMem2D dst);
......@@ -577,6 +579,7 @@ void cv::gpu::rectStdDev(const GpuMat& src, const GpuMat& sqr, GpuMat& dst, cons
void cv::gpu::Canny(const GpuMat& image, GpuMat& edges, double threshold1, double threshold2, int apertureSize)
{
#if NPP_VERSION >= 32
CV_Assert(!"disabled until fix crash");
CV_Assert(image.type() == CV_8UC1);
......@@ -600,6 +603,9 @@ void cv::gpu::Canny(const GpuMat& image, GpuMat& edges, double threshold1, doubl
nppSafeCall( nppiCanny_32f8u_C1R(srcDx.ptr<Npp32f>(), srcDx.step, srcDy.ptr<Npp32f>(), srcDy.step,
edges.ptr<Npp8u>(), edges.step, sz, (Npp32f)threshold1, (Npp32f)threshold2, buf.ptr<Npp8u>()) );
#else
CV_Assert(!"This function doesn't supported");
#endif
}
////////////////////////////////////////////////////////////////////////
......@@ -783,13 +789,18 @@ namespace
void cv::gpu::evenLevels(GpuMat& levels, int nLevels, int lowerLevel, int upperLevel)
{
#if NPP_VERSION >= 32
Mat host_levels(1, nLevels, CV_32SC1);
nppSafeCall( nppiEvenLevelsHost_32s(host_levels.ptr<Npp32s>(), nLevels, lowerLevel, upperLevel) );
levels.upload(host_levels);
#else
CV_Assert(!"This function doesn't supported");
#endif
}
void cv::gpu::histEven(const GpuMat& src, GpuMat& hist, int histSize, int lowerLevel, int upperLevel)
{
#if NPP_VERSION >= 32
CV_Assert(src.type() == CV_8UC1 || src.type() == CV_16UC1 || src.type() == CV_16SC1 );
typedef void (*hist_t)(const GpuMat& src, GpuMat& hist, int levels, int lowerLevel, int upperLevel);
......@@ -802,10 +813,14 @@ void cv::gpu::histEven(const GpuMat& src, GpuMat& hist, int histSize, int lowerL
};
hist_callers[src.depth()](src, hist, histSize, lowerLevel, upperLevel);
#else
CV_Assert(!"This function doesn't supported");
#endif
}
void cv::gpu::histEven(const GpuMat& src, GpuMat hist[4], int histSize[4], int lowerLevel[4], int upperLevel[4])
{
#if NPP_VERSION >= 32
CV_Assert(src.type() == CV_8UC4 || src.type() == CV_16UC4 || src.type() == CV_16SC4 );
typedef void (*hist_t)(const GpuMat& src, GpuMat hist[4], int levels[4], int lowerLevel[4], int upperLevel[4]);
......@@ -818,10 +833,14 @@ void cv::gpu::histEven(const GpuMat& src, GpuMat hist[4], int histSize[4], int l
};
hist_callers[src.depth()](src, hist, histSize, lowerLevel, upperLevel);
#else
CV_Assert(!"This function doesn't supported");
#endif
}
void cv::gpu::histRange(const GpuMat& src, GpuMat& hist, const GpuMat& levels)
{
#if NPP_VERSION >= 32
CV_Assert(src.type() == CV_8UC1 || src.type() == CV_16UC1 || src.type() == CV_16SC1 || src.type() == CV_32FC1);
typedef void (*hist_t)(const GpuMat& src, GpuMat& hist, const GpuMat& levels);
......@@ -836,10 +855,14 @@ void cv::gpu::histRange(const GpuMat& src, GpuMat& hist, const GpuMat& levels)
};
hist_callers[src.depth()](src, hist, levels);
#else
CV_Assert(!"This function doesn't supported");
#endif
}
void cv::gpu::histRange(const GpuMat& src, GpuMat hist[4], const GpuMat levels[4])
{
#if NPP_VERSION >= 32
CV_Assert(src.type() == CV_8UC4 || src.type() == CV_16UC4 || src.type() == CV_16SC4 || src.type() == CV_32FC4);
typedef void (*hist_t)(const GpuMat& src, GpuMat hist[4], const GpuMat levels[4]);
......@@ -854,6 +877,9 @@ void cv::gpu::histRange(const GpuMat& src, GpuMat hist[4], const GpuMat levels[4
};
hist_callers[src.depth()](src, hist, levels);
#else
CV_Assert(!"This function doesn't supported");
#endif
}
#endif /* !defined (HAVE_CUDA) */
......@@ -77,6 +77,8 @@ namespace cv
#else /* !defined (HAVE_CUDA) */
#define NPP_VERSION (10 * NPP_VERSION_MAJOR + NPP_VERSION_MINOR)
namespace cv
{
namespace gpu
......@@ -232,7 +234,11 @@ void cv::gpu::GpuMat::convertTo( GpuMat& dst, int rtype, double alpha, double be
{NppCvt<CV_8U, CV_16U, nppiConvert_8u16u_C1R>::cvt,convertToKernelCaller,convertToKernelCaller,NppCvt<CV_8U, CV_16U, nppiConvert_8u16u_C4R>::cvt},
{NppCvt<CV_8U, CV_16S, nppiConvert_8u16s_C1R>::cvt,convertToKernelCaller,convertToKernelCaller,NppCvt<CV_8U, CV_16S, nppiConvert_8u16s_C4R>::cvt},
{convertToKernelCaller,convertToKernelCaller,convertToKernelCaller,convertToKernelCaller},
#if NPP_VERSION >= 32
{NppCvt<CV_8U, CV_32F, nppiConvert_8u32f_C1R>::cvt,convertToKernelCaller,convertToKernelCaller,convertToKernelCaller},
#else
{convertToKernelCaller,convertToKernelCaller,convertToKernelCaller,convertToKernelCaller},
#endif
{convertToKernelCaller,convertToKernelCaller,convertToKernelCaller,convertToKernelCaller},
{0,0,0,0}
},
......@@ -277,7 +283,11 @@ void cv::gpu::GpuMat::convertTo( GpuMat& dst, int rtype, double alpha, double be
{0,0,0,0}
},
{
#if NPP_VERSION >= 32
{NppCvt<CV_32F, CV_8U, nppiConvert_32f8u_C1R>::cvt,convertToKernelCaller,convertToKernelCaller,convertToKernelCaller},
#else
{convertToKernelCaller,convertToKernelCaller,convertToKernelCaller,convertToKernelCaller},
#endif
{convertToKernelCaller,convertToKernelCaller,convertToKernelCaller,convertToKernelCaller},
{NppCvt<CV_32F, CV_16U, nppiConvert_32f16u_C1R>::cvt,convertToKernelCaller,convertToKernelCaller,convertToKernelCaller},
{NppCvt<CV_32F, CV_16S, nppiConvert_32f16s_C1R>::cvt,convertToKernelCaller,convertToKernelCaller,convertToKernelCaller},
......@@ -421,10 +431,26 @@ GpuMat& GpuMat::setTo(const Scalar& s, const GpuMat& mask)
{
{NppSet<CV_8U, 1, nppiSet_8u_C1R>::set,kernelSet,kernelSet,NppSet<CV_8U, 4, nppiSet_8u_C4R>::set},
{kernelSet,kernelSet,kernelSet,kernelSet},
#if NPP_VERSION >= 32
{NppSet<CV_16U, 1, nppiSet_16u_C1R>::set,kernelSet,kernelSet,NppSet<CV_16U, 4, nppiSet_16u_C4R>::set},
#else
{kernelSet,kernelSet,kernelSet,kernelSet},
#endif
#if NPP_VERSION >= 32
{NppSet<CV_16S, 1, nppiSet_16s_C1R>::set,kernelSet,kernelSet,NppSet<CV_16S, 4, nppiSet_16s_C4R>::set},
#else
{kernelSet,kernelSet,kernelSet,kernelSet},
#endif
#if NPP_VERSION >= 32
{NppSet<CV_32S, 1, nppiSet_32s_C1R>::set,kernelSet,kernelSet,NppSet<CV_32S, 4, nppiSet_32s_C4R>::set},
#else
{NppSet<CV_32S, 1, nppiSet_32s_C1R>::set,kernelSet,kernelSet,kernelSet},
#endif
#if NPP_VERSION >= 32
{NppSet<CV_32F, 1, nppiSet_32f_C1R>::set,kernelSet,kernelSet,NppSet<CV_32F, 4, nppiSet_32f_C4R>::set},
#else
{NppSet<CV_32F, 1, nppiSet_32f_C1R>::set,kernelSet,kernelSet,kernelSet},
#endif
{kernelSet,kernelSet,kernelSet,kernelSet},
{0,0,0,0}
};
......@@ -432,6 +458,7 @@ GpuMat& GpuMat::setTo(const Scalar& s, const GpuMat& mask)
}
else
{
#if NPP_VERSION >= 32
typedef void (*set_caller_t)(GpuMat& src, const Scalar& s, const GpuMat& mask);
static const set_caller_t set_callers[8][4] =
{
......@@ -445,6 +472,9 @@ GpuMat& GpuMat::setTo(const Scalar& s, const GpuMat& mask)
{0,0,0,0}
};
set_callers[depth()][channels()-1](*this, s, mask);
#else
kernelSetMask(*this, s, mask);
#endif
}
return *this;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment