Commit 7076dfd7 authored by Anatoly Baksheev's avatar Anatoly Baksheev

gpu module: refactored devmem2d.hpp (atomic bomb)

parent 916703c6
......@@ -40,12 +40,8 @@
//
//M*/
#ifndef __OPENCV_GPU_DEVMEM2D_HPP__
#define __OPENCV_GPU_DEVMEM2D_HPP__
#if defined(__DEVCLASES_ADD_THRUST_BEGIN_END__)
#include "thrust/device_ptr.h"
#endif
#ifndef __OPENCV_GPU_DevMem2D_HPP__
#define __OPENCV_GPU_DevMem2D_HPP__
namespace cv
......@@ -63,97 +59,103 @@ namespace cv
template <bool expr> struct StaticAssert;
template <> struct StaticAssert<true> {static __CV_GPU_HOST_DEVICE__ void check(){}};
template <typename T> struct DevMem2D_
{
typedef T elem_type;
typedef int index_type;
int cols;
int rows;
T* data;
size_t step;
DevMem2D_() : cols(0), rows(0), data(0), step(0) {}
DevMem2D_(int rows_, int cols_, T *data_, size_t step_)
: cols(cols_), rows(rows_), data(data_), step(step_) {}
template <typename U>
explicit DevMem2D_(const DevMem2D_<U>& d)
: cols(d.cols), rows(d.rows), data((T*)d.data), step(d.step) {}
template<typename T> struct DevPtr
{
typedef T elem_type;
typedef int index_type;
enum { elem_size = sizeof(elem_type) };
enum { elem_size = sizeof(elem_type) };
__CV_GPU_HOST_DEVICE__ size_t elemSize() const { return elem_size; }
__CV_GPU_HOST_DEVICE__ T* ptr(int y = 0) { return (T*)( (char*)data + y * step ); }
__CV_GPU_HOST_DEVICE__ const T* ptr(int y = 0) const { return (const T*)( (const char*)data + y * step ); }
T* data;
__CV_GPU_HOST_DEVICE__ operator T*() const { return data; }
__CV_GPU_HOST_DEVICE__ DevPtr() : data(0) {}
__CV_GPU_HOST_DEVICE__ DevPtr(T* data_) : data(data_) {}
__CV_GPU_HOST_DEVICE__ T& operator ()(int y, int x) { return ptr(y)[x]; }
__CV_GPU_HOST_DEVICE__ const T& operator ()(int y, int x) const { return ptr(y)[x]; }
__CV_GPU_HOST_DEVICE__ size_t elemSize() const { return elem_size; }
__CV_GPU_HOST_DEVICE__ operator T*() { return data; }
__CV_GPU_HOST_DEVICE__ operator const T*() const { return data; }
};
template<typename T> struct PtrSz : public DevPtr<T>
{
__CV_GPU_HOST_DEVICE__ PtrSz() : size(0) {}
__CV_GPU_HOST_DEVICE__ PtrSz(T* data_, size_t size_) : DevPtr<T>(data_), size(size_) {}
#if defined(__DEVCLASES_ADD_THRUST_BEGIN_END__)
thrust::device_ptr<T> begin() const { return thrust::device_ptr<T>(data); }
thrust::device_ptr<T> end() const { return thrust::device_ptr<T>(data) + cols * rows; }
#endif
size_t size;
};
template<typename T> struct PtrStep_
{
typedef T elem_type;
typedef int index_type;
T* data;
size_t step;
PtrStep_() : data(0), step(0) {}
PtrStep_(const DevMem2D_<T>& mem) : data(mem.data), step(mem.step) {}
template<typename T> struct PtrStep : public DevPtr<T>
{
__CV_GPU_HOST_DEVICE__ PtrStep() : step(0) {}
__CV_GPU_HOST_DEVICE__ PtrStep(T* data_, size_t step_) : DevPtr<T>(data_), step(step_) {}
enum { elem_size = sizeof(elem_type) };
/** \brief stride between two consecutive rows in bytes. Step is stored always and everywhere in bytes!!! */
size_t step;
__CV_GPU_HOST_DEVICE__ size_t elemSize() const { return elem_size; }
__CV_GPU_HOST_DEVICE__ T* ptr(int y = 0) { return (T*)( (char*)data + y * step); }
__CV_GPU_HOST_DEVICE__ const T* ptr(int y = 0) const { return (const T*)( (const char*)data + y * step); }
__CV_GPU_HOST_DEVICE__ T* ptr(int y = 0) { return ( T*)( ( char*)DevPtr<T>::data + y * step); }
__CV_GPU_HOST_DEVICE__ const T* ptr(int y = 0) const { return (const T*)( (const char*)DevPtr<T>::data + y * step); }
__CV_GPU_HOST_DEVICE__ T& operator ()(int y, int x) { return ptr(y)[x]; }
__CV_GPU_HOST_DEVICE__ T& operator ()(int y, int x) { return ptr(y)[x]; }
__CV_GPU_HOST_DEVICE__ const T& operator ()(int y, int x) const { return ptr(y)[x]; }
};
#if defined(__DEVCLASES_ADD_THRUST_BEGIN_END__)
thrust::device_ptr<T> begin() const { return thrust::device_ptr<T>(data); }
#endif
template <typename T> struct PtrStepSz : public PtrStep<T>
{
__CV_GPU_HOST_DEVICE__ PtrStepSz() : cols(0), rows(0) {}
__CV_GPU_HOST_DEVICE__ PtrStepSz(int rows_, int cols_, T* data_, size_t step_)
: PtrStep<T>(data_, step_), cols(cols_), rows(rows_) {}
int cols;
int rows;
};
template<typename T> struct PtrElemStep_ : public PtrStep_<T>
template <typename T> struct DevMem2D_ : public PtrStepSz<T>
{
DevMem2D_() {}
DevMem2D_(int rows_, int cols_, T *data_, size_t step_) : PtrStepSz<T>(rows_, cols_, data_, step_) {}
template <typename U>
explicit DevMem2D_(const DevMem2D_<U>& d) : PtrStepSz<T>(d.rows, d.cols, (T*)d.data, d.step) {}
};
template<typename T> struct PtrElemStep_ : public PtrStep<T>
{
PtrElemStep_(const DevMem2D_<T>& mem) : PtrStep_<T>(mem)
PtrElemStep_(const DevMem2D_<T>& mem) : PtrStep<T>(mem.data, mem.step)
{
StaticAssert<256 % sizeof(T) == 0>::check();
PtrStep_<T>::step /= PtrStep_<T>::elem_size;
PtrStep<T>::step /= PtrStep<T>::elem_size;
}
__CV_GPU_HOST_DEVICE__ T* ptr(int y = 0) { return PtrStep_<T>::data + y * PtrStep_<T>::step; }
__CV_GPU_HOST_DEVICE__ const T* ptr(int y = 0) const { return PtrStep_<T>::data + y * PtrStep_<T>::step; }
__CV_GPU_HOST_DEVICE__ T* ptr(int y = 0) { return PtrStep<T>::data + y * PtrStep<T>::step; }
__CV_GPU_HOST_DEVICE__ const T* ptr(int y = 0) const { return PtrStep<T>::data + y * PtrStep<T>::step; }
__CV_GPU_HOST_DEVICE__ T& operator ()(int y, int x) { return ptr(y)[x]; }
__CV_GPU_HOST_DEVICE__ const T& operator ()(int y, int x) const { return ptr(y)[x]; }
};
typedef DevMem2D_<unsigned char> DevMem2D;
template<typename T> struct PtrStep_ : public PtrStep<T>
{
PtrStep_() {}
PtrStep_(const DevMem2D_<T>& mem) : PtrStep<T>(mem.data, mem.step) {}
};
#undef __CV_GPU_HOST_DEVICE__
typedef DevMem2D_<unsigned char> DevMem2Db;
typedef DevMem2Db DevMem2D;
typedef DevMem2D_<float> DevMem2Df;
typedef DevMem2D_<int> DevMem2Di;
typedef PtrStep_<unsigned char> PtrStep;
typedef PtrStep_<float> PtrStepf;
typedef PtrStep_<int> PtrStepi;
typedef PtrStep<unsigned char> PtrStepb;
typedef PtrStep<float> PtrStepf;
typedef PtrStep<int> PtrStepi;
typedef PtrElemStep_<unsigned char> PtrElemStep;
typedef PtrElemStep_<float> PtrElemStepf;
typedef PtrElemStep_<int> PtrElemStepi;
#undef __CV_GPU_HOST_DEVICE__
typedef PtrElemStep_<int> PtrElemStepi;
}
}
#endif /* __OPENCV_GPU_DEVMEM2D_HPP__ */
#endif /* __OPENCV_GPU_DevMem2D_HPP__ */
......@@ -89,6 +89,7 @@ namespace cv { namespace gpu
// Contains just image size, data ptr and step.
template <class T> operator DevMem2D_<T>() const;
template <class T> operator PtrStep_<T>() const;
template <class T> operator PtrStep<T>() const;
//! pefroms blocking upload data to GpuMat.
void upload(const cv::Mat& m);
......@@ -238,6 +239,7 @@ namespace cv { namespace gpu
template <class T> inline GpuMat::operator DevMem2D_<T>() const { return DevMem2D_<T>(rows, cols, (T*)data, step); }
template <class T> inline GpuMat::operator PtrStep_<T>() const { return PtrStep_<T>(static_cast< DevMem2D_<T> >(*this)); }
template <class T> inline GpuMat::operator PtrStep<T>() const { return PtrStep<T>((T*)data, step); }
inline GpuMat GpuMat::clone() const
{
......
......@@ -59,8 +59,8 @@ namespace cv { namespace gpu { namespace bf
{
void load_constants(float* table_color, const DevMem2Df& table_space, int ndisp, int radius, short edge_disc, short max_disc);
void bilateral_filter_gpu(const DevMem2D& disp, const DevMem2D& img, int channels, int iters, cudaStream_t stream);
void bilateral_filter_gpu(const DevMem2D_<short>& disp, const DevMem2D& img, int channels, int iters, cudaStream_t stream);
void bilateral_filter_gpu(const DevMem2Db& disp, const DevMem2Db& img, int channels, int iters, cudaStream_t stream);
void bilateral_filter_gpu(const DevMem2D_<short>& disp, const DevMem2Db& img, int channels, int iters, cudaStream_t stream);
}}}
namespace
......
......@@ -55,11 +55,11 @@ void cv::gpu::blendLinear(const GpuMat&, const GpuMat&, const GpuMat&, const Gpu
namespace cv { namespace gpu
{
template <typename T>
void blendLinearCaller(int rows, int cols, int cn, const PtrStep_<T> img1, const PtrStep_<T> img2,
const PtrStep_<float> weights1, const PtrStep_<float> weights2, PtrStep_<T> result, cudaStream_t stream);
void blendLinearCaller(int rows, int cols, int cn, const PtrStep<T>& img1, const PtrStep<T>& img2,
const PtrStepf& weights1, const PtrStepf& weights2, PtrStep<T> result, cudaStream_t stream);
void blendLinearCaller8UC4(int rows, int cols, const PtrStep img1, const PtrStep img2,
const PtrStepf weights1, const PtrStepf weights2, PtrStep result, cudaStream_t stream);
void blendLinearCaller8UC4(int rows, int cols, const PtrStepb& img1, const PtrStepb& img2,
const PtrStepf& weights1, const PtrStepf& weights2, PtrStepb result, cudaStream_t stream);
}}
void cv::gpu::blendLinear(const GpuMat& img1, const GpuMat& img2, const GpuMat& weights1, const GpuMat& weights2,
......
This diff is collapsed.
......@@ -54,7 +54,7 @@ void cv::gpu::cvtColor(const GpuMat&, GpuMat&, int, int, Stream&) { throw_nogpu(
namespace cv { namespace gpu { namespace device
{
#define OPENCV_GPU_DECLARE_CVTCOLOR_ONE(name) \
void name(const DevMem2D& src, const DevMem2D& dst, cudaStream_t stream);
void name(const DevMem2Db& src, const DevMem2Db& dst, cudaStream_t stream);
#define OPENCV_GPU_DECLARE_CVTCOLOR_ALL(name) \
OPENCV_GPU_DECLARE_CVTCOLOR_ONE(name ## _8u) \
......@@ -203,7 +203,7 @@ namespace cv { namespace gpu { namespace device
namespace
{
typedef void (*gpu_func_t)(const DevMem2D& src, const DevMem2D& dst, cudaStream_t stream);
typedef void (*gpu_func_t)(const DevMem2Db& src, const DevMem2Db& dst, cudaStream_t stream);
void bgr_to_rgb(const GpuMat& src, GpuMat& dst, int, Stream& stream)
{
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -186,7 +186,7 @@ namespace bf_krnls
namespace cv { namespace gpu { namespace bf
{
template <typename T>
void bilateral_filter_caller(const DevMem2D_<T>& disp, const DevMem2D& img, int channels, int iters, cudaStream_t stream)
void bilateral_filter_caller(const DevMem2D_<T>& disp, const DevMem2Db& img, int channels, int iters, cudaStream_t stream)
{
dim3 threads(32, 8, 1);
dim3 grid(1, 1, 1);
......@@ -221,12 +221,12 @@ namespace cv { namespace gpu { namespace bf
cudaSafeCall( cudaDeviceSynchronize() );
}
void bilateral_filter_gpu(const DevMem2D& disp, const DevMem2D& img, int channels, int iters, cudaStream_t stream)
void bilateral_filter_gpu(const DevMem2Db& disp, const DevMem2Db& img, int channels, int iters, cudaStream_t stream)
{
bilateral_filter_caller(disp, img, channels, iters, stream);
}
void bilateral_filter_gpu(const DevMem2D_<short>& disp, const DevMem2D& img, int channels, int iters, cudaStream_t stream)
void bilateral_filter_gpu(const DevMem2D_<short>& disp, const DevMem2Db& img, int channels, int iters, cudaStream_t stream)
{
bilateral_filter_caller(disp, img, channels, iters, stream);
}
......
......@@ -48,8 +48,8 @@ namespace cv { namespace gpu
{
template <typename T>
__global__ void blendLinearKernel(int rows, int cols, int cn, const PtrStep_<T> img1, const PtrStep_<T> img2,
const PtrStepf weights1, const PtrStepf weights2, PtrStep_<T> result)
__global__ void blendLinearKernel(int rows, int cols, int cn, const PtrStep<T> img1, const PtrStep<T> img2,
const PtrStepf weights1, const PtrStepf weights2, PtrStep<T> result)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
......@@ -63,12 +63,11 @@ namespace cv { namespace gpu
T p2 = img2.ptr(y)[x];
result.ptr(y)[x] = (p1 * w1 + p2 * w2) / (w1 + w2 + 1e-5f);
}
}
}
template <typename T>
void blendLinearCaller(int rows, int cols, int cn, const PtrStep_<T> img1, const PtrStep_<T> img2,
const PtrStepf weights1, const PtrStepf weights2, PtrStep_<T> result, cudaStream_t stream)
void blendLinearCaller(int rows, int cols, int cn, const PtrStep<T>& img1, const PtrStep<T>& img2,
const PtrStepf& weights1, const PtrStepf& weights2, PtrStep<T> result, cudaStream_t stream)
{
dim3 threads(16, 16);
dim3 grid(divUp(cols * cn, threads.x), divUp(rows, threads.y));
......@@ -80,14 +79,14 @@ namespace cv { namespace gpu
cudaSafeCall(cudaDeviceSynchronize());
}
template void blendLinearCaller<uchar>(int, int, int, const PtrStep, const PtrStep,
const PtrStepf, const PtrStepf, PtrStep, cudaStream_t stream);
template void blendLinearCaller<float>(int, int, int, const PtrStepf, const PtrStepf,
const PtrStepf, const PtrStepf, PtrStepf, cudaStream_t stream);
template void blendLinearCaller<uchar>(int, int, int, const PtrStep<uchar>&, const PtrStep<uchar>&,
const PtrStepf&, const PtrStepf&, PtrStep<uchar>, cudaStream_t stream);
template void blendLinearCaller<float>(int, int, int, const PtrStep<float>&, const PtrStep<float>&,
const PtrStepf&, const PtrStepf&, PtrStep<float>, cudaStream_t stream);
__global__ void blendLinearKernel8UC4(int rows, int cols, const PtrStep img1, const PtrStep img2,
const PtrStepf weights1, const PtrStepf weights2, PtrStep result)
__global__ void blendLinearKernel8UC4(int rows, int cols, const PtrStepb img1, const PtrStepb img2,
const PtrStepf weights1, const PtrStepf weights2, PtrStepb result)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
......@@ -107,8 +106,8 @@ namespace cv { namespace gpu
}
void blendLinearCaller8UC4(int rows, int cols, const PtrStep img1, const PtrStep img2,
const PtrStepf weights1, const PtrStepf weights2, PtrStep result, cudaStream_t stream)
void blendLinearCaller8UC4(int rows, int cols, const PtrStepb& img1, const PtrStepb& img2,
const PtrStepf& weights1, const PtrStepf& weights2, PtrStepb result, cudaStream_t stream)
{
dim3 threads(16, 16);
dim3 grid(divUp(cols, threads.x), divUp(rows, threads.y));
......
......@@ -48,7 +48,7 @@ using namespace cv::gpu;
namespace cv { namespace gpu { namespace canny
{
__global__ void calcSobelRowPass(const PtrStep src, PtrStepi dx_buf, PtrStepi dy_buf, int rows, int cols)
__global__ void calcSobelRowPass(const PtrStepb src, PtrStepi dx_buf, PtrStepi dy_buf, int rows, int cols)
{
__shared__ int smem[16][18];
......@@ -73,7 +73,7 @@ namespace cv { namespace gpu { namespace canny
}
}
void calcSobelRowPass_gpu(PtrStep src, PtrStepi dx_buf, PtrStepi dy_buf, int rows, int cols)
void calcSobelRowPass_gpu(PtrStepb src, PtrStepi dx_buf, PtrStepi dy_buf, int rows, int cols)
{
dim3 block(16, 16, 1);
dim3 grid(divUp(cols, block.x), divUp(rows, block.y), 1);
......@@ -468,7 +468,7 @@ namespace cv { namespace gpu { namespace canny
}
}
__global__ void getEdges(PtrStepi map, PtrStep dst, int rows, int cols)
__global__ void getEdges(PtrStepi map, PtrStepb dst, int rows, int cols)
{
const int j = blockIdx.x * 16 + threadIdx.x;
const int i = blockIdx.y * 16 + threadIdx.y;
......@@ -477,7 +477,7 @@ namespace cv { namespace gpu { namespace canny
dst.ptr(i)[j] = (uchar)(-(map.ptr(i + 1)[j + 1] >> 1));
}
void getEdges_gpu(PtrStepi map, PtrStep dst, int rows, int cols)
void getEdges_gpu(PtrStepi map, PtrStepb dst, int rows, int cols)
{
dim3 block(16, 16, 1);
dim3 grid(divUp(cols, block.x), divUp(rows, block.y), 1);
......
......@@ -221,7 +221,7 @@ namespace cv { namespace gpu { namespace device
};
#define OPENCV_GPU_IMPLEMENT_CVTCOLOR(name, traits) \
void name(const DevMem2D& src, const DevMem2D& dst, cudaStream_t stream) \
void name(const DevMem2Db& src, const DevMem2Db& dst, cudaStream_t stream) \
{ \
traits::functor_type functor = traits::create_functor(); \
typedef typename traits::functor_type::argument_type src_t; \
......
......@@ -66,7 +66,7 @@ namespace filter_column
}
template <int KERNEL_SIZE, typename T, typename D, typename B>
__global__ void linearColumnFilter(const DevMem2D_<T> src, PtrStep_<D> dst, int anchor, const B b)
__global__ void linearColumnFilter(const DevMem2D_<T> src, PtrStep<D> dst, int anchor, const B b)
{
typedef typename TypeVec<float, VecTraits<T>::cn>::vec_type sum_t;
......@@ -133,7 +133,7 @@ namespace cv { namespace gpu { namespace filters
}
template <typename T, typename D>
void linearColumnFilter_gpu(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor, int brd_type, cudaStream_t stream)
void linearColumnFilter_gpu(const DevMem2Db& src, const DevMem2Db& dst, const float kernel[], int ksize, int anchor, int brd_type, cudaStream_t stream)
{
typedef void (*caller_t)(const DevMem2D_<T>& src, const DevMem2D_<D>& dst, int anchor, cudaStream_t stream);
static const caller_t callers[5][17] =
......@@ -240,11 +240,11 @@ namespace cv { namespace gpu { namespace filters
callers[brd_type][ksize]((DevMem2D_<T>)src, (DevMem2D_<D>)dst, anchor, stream);
}
template void linearColumnFilter_gpu<float , uchar >(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor, int brd_type, cudaStream_t stream);
template void linearColumnFilter_gpu<float4, uchar4>(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor, int brd_type, cudaStream_t stream);
//template void linearColumnFilter_gpu<float , short >(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor, int brd_type, cudaStream_t stream);
//template void linearColumnFilter_gpu<float2, short2>(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor, int brd_type, cudaStream_t stream);
template void linearColumnFilter_gpu<float3, short3>(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor, int brd_type, cudaStream_t stream);
template void linearColumnFilter_gpu<float , int >(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor, int brd_type, cudaStream_t stream);
template void linearColumnFilter_gpu<float , float >(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor, int brd_type, cudaStream_t stream);
template void linearColumnFilter_gpu<float , uchar >(const DevMem2Db& src, const DevMem2Db& dst, const float kernel[], int ksize, int anchor, int brd_type, cudaStream_t stream);
template void linearColumnFilter_gpu<float4, uchar4>(const DevMem2Db& src, const DevMem2Db& dst, const float kernel[], int ksize, int anchor, int brd_type, cudaStream_t stream);
//template void linearColumnFilter_gpu<float , short >(const DevMem2Db& src, const DevMem2Db& dst, const float kernel[], int ksize, int anchor, int brd_type, cudaStream_t stream);
//template void linearColumnFilter_gpu<float2, short2>(const DevMem2Db& src, const DevMem2Db& dst, const float kernel[], int ksize, int anchor, int brd_type, cudaStream_t stream);
template void linearColumnFilter_gpu<float3, short3>(const DevMem2Db& src, const DevMem2Db& dst, const float kernel[], int ksize, int anchor, int brd_type, cudaStream_t stream);
template void linearColumnFilter_gpu<float , int >(const DevMem2Db& src, const DevMem2Db& dst, const float kernel[], int ksize, int anchor, int brd_type, cudaStream_t stream);
template void linearColumnFilter_gpu<float , float >(const DevMem2Db& src, const DevMem2Db& dst, const float kernel[], int ksize, int anchor, int brd_type, cudaStream_t stream);
}}}
......@@ -66,7 +66,7 @@ namespace cv { namespace gpu { namespace imgproc
dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
B<T> brd(src.rows, src.cols, VecTraits<T>::make(borderValue));
BorderReader< PtrStep_<T>, B<T> > brdSrc(src, brd);
BorderReader< PtrStep<T>, B<T> > brdSrc(src, brd);
copyMakeBorder<<<grid, block, 0, stream>>>(brdSrc, dst, top, left);
cudaSafeCall( cudaGetLastError() );
......@@ -76,7 +76,7 @@ namespace cv { namespace gpu { namespace imgproc
}
};
template <typename T, int cn> void copyMakeBorder_gpu(const DevMem2D& src, const DevMem2D& dst, int top, int left, int borderMode,
template <typename T, int cn> void copyMakeBorder_gpu(const DevMem2Db& src, const DevMem2Db& dst, int top, int left, int borderMode,
const T* borderValue, cudaStream_t stream)
{
typedef typename TypeVec<T, cn>::vec_type vec_type;
......@@ -95,33 +95,33 @@ namespace cv { namespace gpu { namespace imgproc
callers[borderMode](DevMem2D_<vec_type>(src), DevMem2D_<vec_type>(dst), top, left, borderValue, stream);
}
template void copyMakeBorder_gpu<uchar, 1>(const DevMem2D& src, const DevMem2D& dst, int top, int left, int borderMode, const uchar* borderValue, cudaStream_t stream);
//template void copyMakeBorder_gpu<uchar, 2>(const DevMem2D& src, const DevMem2D& dst, int top, int left, int borderMode, const uchar* borderValue, cudaStream_t stream);
template void copyMakeBorder_gpu<uchar, 3>(const DevMem2D& src, const DevMem2D& dst, int top, int left, int borderMode, const uchar* borderValue, cudaStream_t stream);
template void copyMakeBorder_gpu<uchar, 4>(const DevMem2D& src, const DevMem2D& dst, int top, int left, int borderMode, const uchar* borderValue, cudaStream_t stream);
template void copyMakeBorder_gpu<uchar, 1>(const DevMem2Db& src, const DevMem2Db& dst, int top, int left, int borderMode, const uchar* borderValue, cudaStream_t stream);
//template void copyMakeBorder_gpu<uchar, 2>(const DevMem2Db& src, const DevMem2Db& dst, int top, int left, int borderMode, const uchar* borderValue, cudaStream_t stream);
template void copyMakeBorder_gpu<uchar, 3>(const DevMem2Db& src, const DevMem2Db& dst, int top, int left, int borderMode, const uchar* borderValue, cudaStream_t stream);
template void copyMakeBorder_gpu<uchar, 4>(const DevMem2Db& src, const DevMem2Db& dst, int top, int left, int borderMode, const uchar* borderValue, cudaStream_t stream);
//template void copyMakeBorder_gpu<schar, 1>(const DevMem2D& src, const DevMem2D& dst, int top, int left, int borderMode, const schar* borderValue, cudaStream_t stream);
//template void copyMakeBorder_gpu<schar, 2>(const DevMem2D& src, const DevMem2D& dst, int top, int left, int borderMode, const schar* borderValue, cudaStream_t stream);
//template void copyMakeBorder_gpu<schar, 3>(const DevMem2D& src, const DevMem2D& dst, int top, int left, int borderMode, const schar* borderValue, cudaStream_t stream);
//template void copyMakeBorder_gpu<schar, 4>(const DevMem2D& src, const DevMem2D& dst, int top, int left, int borderMode, const schar* borderValue, cudaStream_t stream);
//template void copyMakeBorder_gpu<schar, 1>(const DevMem2Db& src, const DevMem2Db& dst, int top, int left, int borderMode, const schar* borderValue, cudaStream_t stream);
//template void copyMakeBorder_gpu<schar, 2>(const DevMem2Db& src, const DevMem2Db& dst, int top, int left, int borderMode, const schar* borderValue, cudaStream_t stream);
//template void copyMakeBorder_gpu<schar, 3>(const DevMem2Db& src, const DevMem2Db& dst, int top, int left, int borderMode, const schar* borderValue, cudaStream_t stream);
//template void copyMakeBorder_gpu<schar, 4>(const DevMem2Db& src, const DevMem2Db& dst, int top, int left, int borderMode, const schar* borderValue, cudaStream_t stream);
template void copyMakeBorder_gpu<ushort, 1>(const DevMem2D& src, const DevMem2D& dst, int top, int left, int borderMode, const ushort* borderValue, cudaStream_t stream);
//template void copyMakeBorder_gpu<ushort, 2>(const DevMem2D& src, const DevMem2D& dst, int top, int left, int borderMode, const ushort* borderValue, cudaStream_t stream);
template void copyMakeBorder_gpu<ushort, 3>(const DevMem2D& src, const DevMem2D& dst, int top, int left, int borderMode, const ushort* borderValue, cudaStream_t stream);
template void copyMakeBorder_gpu<ushort, 4>(const DevMem2D& src, const DevMem2D& dst, int top, int left, int borderMode, const ushort* borderValue, cudaStream_t stream);
template void copyMakeBorder_gpu<ushort, 1>(const DevMem2Db& src, const DevMem2Db& dst, int top, int left, int borderMode, const ushort* borderValue, cudaStream_t stream);
//template void copyMakeBorder_gpu<ushort, 2>(const DevMem2Db& src, const DevMem2Db& dst, int top, int left, int borderMode, const ushort* borderValue, cudaStream_t stream);
template void copyMakeBorder_gpu<ushort, 3>(const DevMem2Db& src, const DevMem2Db& dst, int top, int left, int borderMode, const ushort* borderValue, cudaStream_t stream);
template void copyMakeBorder_gpu<ushort, 4>(const DevMem2Db& src, const DevMem2Db& dst, int top, int left, int borderMode, const ushort* borderValue, cudaStream_t stream);
template void copyMakeBorder_gpu<short, 1>(const DevMem2D& src, const DevMem2D& dst, int top, int left, int borderMode, const short* borderValue, cudaStream_t stream);
//template void copyMakeBorder_gpu<short, 2>(const DevMem2D& src, const DevMem2D& dst, int top, int left, int borderMode, const short* borderValue, cudaStream_t stream);
template void copyMakeBorder_gpu<short, 3>(const DevMem2D& src, const DevMem2D& dst, int top, int left, int borderMode, const short* borderValue, cudaStream_t stream);
template void copyMakeBorder_gpu<short, 4>(const DevMem2D& src, const DevMem2D& dst, int top, int left, int borderMode, const short* borderValue, cudaStream_t stream);
template void copyMakeBorder_gpu<short, 1>(const DevMem2Db& src, const DevMem2Db& dst, int top, int left, int borderMode, const short* borderValue, cudaStream_t stream);
//template void copyMakeBorder_gpu<short, 2>(const DevMem2Db& src, const DevMem2Db& dst, int top, int left, int borderMode, const short* borderValue, cudaStream_t stream);
template void copyMakeBorder_gpu<short, 3>(const DevMem2Db& src, const DevMem2Db& dst, int top, int left, int borderMode, const short* borderValue, cudaStream_t stream);
template void copyMakeBorder_gpu<short, 4>(const DevMem2Db& src, const DevMem2Db& dst, int top, int left, int borderMode, const short* borderValue, cudaStream_t stream);
//template void copyMakeBorder_gpu<int, 1>(const DevMem2D& src, const DevMem2D& dst, int top, int left, int borderMode, const int* borderValue, cudaStream_t stream);
//template void copyMakeBorder_gpu<int, 2>(const DevMem2D& src, const DevMem2D& dst, int top, int left, int borderMode, const int* borderValue, cudaStream_t stream);
//template void copyMakeBorder_gpu<int, 3>(const DevMem2D& src, const DevMem2D& dst, int top, int left, int borderMode, const int* borderValue, cudaStream_t stream);
//template void copyMakeBorder_gpu<int, 4>(const DevMem2D& src, const DevMem2D& dst, int top, int left, int borderMode, const int* borderValue, cudaStream_t stream);
//template void copyMakeBorder_gpu<int, 1>(const DevMem2Db& src, const DevMem2Db& dst, int top, int left, int borderMode, const int* borderValue, cudaStream_t stream);
//template void copyMakeBorder_gpu<int, 2>(const DevMem2Db& src, const DevMem2Db& dst, int top, int left, int borderMode, const int* borderValue, cudaStream_t stream);
//template void copyMakeBorder_gpu<int, 3>(const DevMem2Db& src, const DevMem2Db& dst, int top, int left, int borderMode, const int* borderValue, cudaStream_t stream);
//template void copyMakeBorder_gpu<int, 4>(const DevMem2Db& src, const DevMem2Db& dst, int top, int left, int borderMode, const int* borderValue, cudaStream_t stream);
template void copyMakeBorder_gpu<float, 1>(const DevMem2D& src, const DevMem2D& dst, int top, int left, int borderMode, const float* borderValue, cudaStream_t stream);
//template void copyMakeBorder_gpu<float, 2>(const DevMem2D& src, const DevMem2D& dst, int top, int left, int borderMode, const float* borderValue, cudaStream_t stream);
template void copyMakeBorder_gpu<float, 3>(const DevMem2D& src, const DevMem2D& dst, int top, int left, int borderMode, const float* borderValue, cudaStream_t stream);
template void copyMakeBorder_gpu<float, 4>(const DevMem2D& src, const DevMem2D& dst, int top, int left, int borderMode, const float* borderValue, cudaStream_t stream);
template void copyMakeBorder_gpu<float, 1>(const DevMem2Db& src, const DevMem2Db& dst, int top, int left, int borderMode, const float* borderValue, cudaStream_t stream);
//template void copyMakeBorder_gpu<float, 2>(const DevMem2Db& src, const DevMem2Db& dst, int top, int left, int borderMode, const float* borderValue, cudaStream_t stream);
template void copyMakeBorder_gpu<float, 3>(const DevMem2Db& src, const DevMem2Db& dst, int top, int left, int borderMode, const float* borderValue, cudaStream_t stream);
template void copyMakeBorder_gpu<float, 4>(const DevMem2Db& src, const DevMem2Db& dst, int top, int left, int borderMode, const float* borderValue, cudaStream_t stream);
}}}
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -105,7 +105,7 @@ namespace cv { namespace gpu { namespace histograms
if (x + 3 < cols) addByte(s_WarpHist, (data >> 24) & 0xFFU, tag);
}
__global__ void histogram256(const PtrStep_<uint> d_Data, uint* d_PartialHistograms, uint dataCount, uint cols)
__global__ void histogram256(const PtrStep<uint> d_Data, uint* d_PartialHistograms, uint dataCount, uint cols)
{
//Per-warp subhistogram storage
__shared__ uint s_Hist[HISTOGRAM256_THREADBLOCK_MEMORY];
......@@ -171,7 +171,7 @@ namespace cv { namespace gpu { namespace histograms
d_Histogram[blockIdx.x] = saturate_cast<int>(data[0]);
}
void histogram256_gpu(DevMem2D src, int* hist, uint* buf, cudaStream_t stream)
void histogram256_gpu(DevMem2Db src, int* hist, uint* buf, cudaStream_t stream)
{
histogram256<<<PARTIAL_HISTOGRAM256_COUNT, HISTOGRAM256_THREADBLOCK_SIZE, 0, stream>>>(
DevMem2D_<uint>(src),
......@@ -191,7 +191,7 @@ namespace cv { namespace gpu { namespace histograms
__constant__ int c_lut[256];
__global__ void equalizeHist(const DevMem2D src, PtrStep dst)
__global__ void equalizeHist(const DevMem2Db src, PtrStepb dst)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
......@@ -204,7 +204,7 @@ namespace cv { namespace gpu { namespace histograms
}
}
void equalizeHist_gpu(DevMem2D src, DevMem2D dst, const int* lut, cudaStream_t stream)
void equalizeHist_gpu(DevMem2Db src, DevMem2Db dst, const int* lut, cudaStream_t stream)
{
dim3 block(16, 16);
dim3 grid(divUp(src.cols, block.x), divUp(src.rows, block.y));
......
......@@ -187,7 +187,7 @@ __global__ void compute_hists_kernel_many_blocks(const int img_block_width, cons
void compute_hists(int nbins, int block_stride_x, int block_stride_y,
int height, int width, const DevMem2Df& grad,
const DevMem2D& qangle, float sigma, float* block_hists)
const DevMem2Db& qangle, float sigma, float* block_hists)
{
const int nblocks = 1;
......@@ -614,8 +614,8 @@ __global__ void compute_gradients_8UC4_kernel(int height, int width, const PtrEl
}
void compute_gradients_8UC4(int nbins, int height, int width, const DevMem2D& img,
float angle_scale, DevMem2Df grad, DevMem2D qangle, bool correct_gamma)
void compute_gradients_8UC4(int nbins, int height, int width, const DevMem2Db& img,
float angle_scale, DevMem2Df grad, DevMem2Db qangle, bool correct_gamma)
{
const int nthreads = 256;
......@@ -686,8 +686,8 @@ __global__ void compute_gradients_8UC1_kernel(int height, int width, const PtrEl
}
void compute_gradients_8UC1(int nbins, int height, int width, const DevMem2D& img,
float angle_scale, DevMem2Df grad, DevMem2D qangle, bool correct_gamma)
void compute_gradients_8UC1(int nbins, int height, int width, const DevMem2Db& img,
float angle_scale, DevMem2Df grad, DevMem2Db qangle, bool correct_gamma)
{
const int nthreads = 256;
......@@ -734,7 +734,7 @@ __global__ void resize_for_hog_kernel(float sx, float sy, DevMem2D_<uchar4> dst,
}
template<class T, class TEX>
static void resize_for_hog(const DevMem2D& src, DevMem2D dst, TEX& tex)
static void resize_for_hog(const DevMem2Db& src, DevMem2Db dst, TEX& tex)
{
tex.filterMode = cudaFilterModeLinear;
......@@ -765,7 +765,7 @@ static void resize_for_hog(const DevMem2D& src, DevMem2D dst, TEX& tex)
cudaSafeCall( cudaUnbindTexture(tex) );
}
void resize_8UC1(const DevMem2D& src, DevMem2D dst) { resize_for_hog<uchar> (src, dst, resize8UC1_tex); }
void resize_8UC4(const DevMem2D& src, DevMem2D dst) { resize_for_hog<uchar4>(src, dst, resize8UC4_tex); }
void resize_8UC1(const DevMem2Db& src, DevMem2Db dst) { resize_for_hog<uchar> (src, dst, resize8UC1_tex); }
void resize_8UC4(const DevMem2Db& src, DevMem2Db dst) { resize_for_hog<uchar4>(src, dst, resize8UC4_tex); }
}}}
......@@ -145,7 +145,7 @@ namespace cv { namespace gpu { namespace imgproc
}
}
extern "C" void meanShiftFiltering_gpu(const DevMem2D& src, DevMem2D dst, int sp, int sr, int maxIter, float eps)
extern "C" void meanShiftFiltering_gpu(const DevMem2Db& src, DevMem2Db dst, int sp, int sr, int maxIter, float eps)
{
dim3 grid(1, 1, 1);
dim3 threads(32, 8, 1);
......@@ -161,7 +161,7 @@ namespace cv { namespace gpu { namespace imgproc
cudaSafeCall( cudaDeviceSynchronize() );
cudaSafeCall( cudaUnbindTexture( tex_meanshift ) );
}
extern "C" void meanShiftProc_gpu(const DevMem2D& src, DevMem2D dstr, DevMem2D dstsp, int sp, int sr, int maxIter, float eps)
extern "C" void meanShiftProc_gpu(const DevMem2Db& src, DevMem2Db dstr, DevMem2Db dstsp, int sp, int sr, int maxIter, float eps)
{
dim3 grid(1, 1, 1);
dim3 threads(32, 8, 1);
......@@ -281,7 +281,7 @@ namespace cv { namespace gpu { namespace imgproc
}
void drawColorDisp_gpu(const DevMem2D& src, const DevMem2D& dst, int ndisp, const cudaStream_t& stream)
void drawColorDisp_gpu(const DevMem2Db& src, const DevMem2Db& dst, int ndisp, const cudaStream_t& stream)
{
dim3 threads(16, 16, 1);
dim3 grid(1, 1, 1);
......@@ -295,7 +295,7 @@ namespace cv { namespace gpu { namespace imgproc
cudaSafeCall( cudaDeviceSynchronize() );
}
void drawColorDisp_gpu(const DevMem2D_<short>& src, const DevMem2D& dst, int ndisp, const cudaStream_t& stream)
void drawColorDisp_gpu(const DevMem2D_<short>& src, const DevMem2Db& dst, int ndisp, const cudaStream_t& stream)
{
dim3 threads(32, 8, 1);
dim3 grid(1, 1, 1);
......@@ -360,7 +360,7 @@ namespace cv { namespace gpu { namespace imgproc
cudaSafeCall( cudaDeviceSynchronize() );
}
void reprojectImageTo3D_gpu(const DevMem2D& disp, const DevMem2Df& xyzw, const float* q, const cudaStream_t& stream)
void reprojectImageTo3D_gpu(const DevMem2Db& disp, const DevMem2Df& xyzw, const float* q, const cudaStream_t& stream)
{
reprojectImageTo3D_caller(disp, xyzw, q, stream);
}
......@@ -406,7 +406,7 @@ namespace cv { namespace gpu { namespace imgproc
texture<float, 2> harrisDyTex;
__global__ void cornerHarris_kernel(const int cols, const int rows, const int block_size, const float k,
PtrStep dst)
PtrStepb dst)
{
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
......@@ -440,7 +440,7 @@ namespace cv { namespace gpu { namespace imgproc
template <typename BR, typename BC>
__global__ void cornerHarris_kernel(const int cols, const int rows, const int block_size, const float k,
PtrStep dst, BR border_row, BC border_col)
PtrStepb dst, BR border_row, BC border_col)
{
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
......@@ -474,7 +474,7 @@ namespace cv { namespace gpu { namespace imgproc
}
}
void cornerHarris_caller(const int block_size, const float k, const DevMem2D Dx, const DevMem2D Dy, DevMem2D dst,
void cornerHarris_caller(const int block_size, const float k, const DevMem2Db Dx, const DevMem2Db Dy, DevMem2Db dst,
int border_type)
{
const int rows = Dx.rows;
......@@ -518,7 +518,7 @@ namespace cv { namespace gpu { namespace imgproc
texture<float, 2> minEigenValDyTex;
__global__ void cornerMinEigenVal_kernel(const int cols, const int rows, const int block_size,
PtrStep dst)
PtrStepb dst)
{
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
......@@ -555,7 +555,7 @@ namespace cv { namespace gpu { namespace imgproc
template <typename BR, typename BC>
__global__ void cornerMinEigenVal_kernel(const int cols, const int rows, const int block_size,
PtrStep dst, BR border_row, BC border_col)
PtrStepb dst, BR border_row, BC border_col)
{
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
......@@ -591,7 +591,7 @@ namespace cv { namespace gpu { namespace imgproc
}
}
void cornerMinEigenVal_caller(const int block_size, const DevMem2D Dx, const DevMem2D Dy, DevMem2D dst,
void cornerMinEigenVal_caller(const int block_size, const DevMem2Db Dx, const DevMem2Db Dy, DevMem2Db dst,
int border_type)
{
const int rows = Dx.rows;
......@@ -631,7 +631,7 @@ namespace cv { namespace gpu { namespace imgproc
////////////////////////////// Column Sum //////////////////////////////////////
__global__ void column_sumKernel_32F(int cols, int rows, const PtrStep src, const PtrStep dst)
__global__ void column_sumKernel_32F(int cols, int rows, const PtrStepb src, const PtrStepb dst)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
......@@ -652,7 +652,7 @@ namespace cv { namespace gpu { namespace imgproc
}
void columnSum_32F(const DevMem2D src, const DevMem2D dst)
void columnSum_32F(const DevMem2Db src, const DevMem2Db dst)
{
dim3 threads(256);
dim3 grid(divUp(src.cols, threads.x));
......@@ -667,7 +667,7 @@ namespace cv { namespace gpu { namespace imgproc
//////////////////////////////////////////////////////////////////////////
// mulSpectrums
__global__ void mulSpectrumsKernel(const PtrStep_<cufftComplex> a, const PtrStep_<cufftComplex> b,
__global__ void mulSpectrumsKernel(const PtrStep<cufftComplex> a, const PtrStep<cufftComplex> b,
DevMem2D_<cufftComplex> c)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
......@@ -680,7 +680,7 @@ namespace cv { namespace gpu { namespace imgproc
}
void mulSpectrums(const PtrStep_<cufftComplex> a, const PtrStep_<cufftComplex> b,
void mulSpectrums(const PtrStep<cufftComplex> a, const PtrStep<cufftComplex> b,
DevMem2D_<cufftComplex> c)
{
dim3 threads(256);
......@@ -697,7 +697,7 @@ namespace cv { namespace gpu { namespace imgproc
// mulSpectrums_CONJ
__global__ void mulSpectrumsKernel_CONJ(
const PtrStep_<cufftComplex> a, const PtrStep_<cufftComplex> b,
const PtrStep<cufftComplex> a, const PtrStep<cufftComplex> b,
DevMem2D_<cufftComplex> c)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
......@@ -710,7 +710,7 @@ namespace cv { namespace gpu { namespace imgproc
}
void mulSpectrums_CONJ(const PtrStep_<cufftComplex> a, const PtrStep_<cufftComplex> b,
void mulSpectrums_CONJ(const PtrStep<cufftComplex> a, const PtrStep<cufftComplex> b,
DevMem2D_<cufftComplex> c)
{
dim3 threads(256);
......@@ -727,7 +727,7 @@ namespace cv { namespace gpu { namespace imgproc
// mulAndScaleSpectrums
__global__ void mulAndScaleSpectrumsKernel(
const PtrStep_<cufftComplex> a, const PtrStep_<cufftComplex> b,
const PtrStep<cufftComplex> a, const PtrStep<cufftComplex> b,
float scale, DevMem2D_<cufftComplex> c)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
......@@ -741,7 +741,7 @@ namespace cv { namespace gpu { namespace imgproc
}
void mulAndScaleSpectrums(const PtrStep_<cufftComplex> a, const PtrStep_<cufftComplex> b,
void mulAndScaleSpectrums(const PtrStep<cufftComplex> a, const PtrStep<cufftComplex> b,
float scale, DevMem2D_<cufftComplex> c)
{
dim3 threads(256);
......@@ -758,7 +758,7 @@ namespace cv { namespace gpu { namespace imgproc
// mulAndScaleSpectrums_CONJ
__global__ void mulAndScaleSpectrumsKernel_CONJ(
const PtrStep_<cufftComplex> a, const PtrStep_<cufftComplex> b,
const PtrStep<cufftComplex> a, const PtrStep<cufftComplex> b,
float scale, DevMem2D_<cufftComplex> c)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
......@@ -772,7 +772,7 @@ namespace cv { namespace gpu { namespace imgproc
}
void mulAndScaleSpectrums_CONJ(const PtrStep_<cufftComplex> a, const PtrStep_<cufftComplex> b,
void mulAndScaleSpectrums_CONJ(const PtrStep<cufftComplex> a, const PtrStep<cufftComplex> b,
float scale, DevMem2D_<cufftComplex> c)
{
dim3 threads(256);
......
......@@ -81,7 +81,7 @@ __device__ __forceinline__ float4 sub(uchar4 a, uchar4 b) { return make_float4(a
template <typename T, int cn>
__global__ void matchTemplateNaiveKernel_CCORR(
int w, int h, const PtrStep image, const PtrStep templ,
int w, int h, const PtrStepb image, const PtrStepb templ,
DevMem2Df result)
{
typedef typename TypeVec<T, cn>::vec_type Type;
......@@ -107,7 +107,7 @@ __global__ void matchTemplateNaiveKernel_CCORR(
}
void matchTemplateNaive_CCORR_32F(const DevMem2D image, const DevMem2D templ,
void matchTemplateNaive_CCORR_32F(const DevMem2Db image, const DevMem2Db templ,
DevMem2Df result, int cn)
{
dim3 threads(32, 8);
......@@ -138,7 +138,7 @@ void matchTemplateNaive_CCORR_32F(const DevMem2D image, const DevMem2D templ,
}
void matchTemplateNaive_CCORR_8U(const DevMem2D image, const DevMem2D templ,
void matchTemplateNaive_CCORR_8U(const DevMem2Db image, const DevMem2Db templ,
DevMem2Df result, int cn)
{
dim3 threads(32, 8);
......@@ -171,7 +171,7 @@ void matchTemplateNaive_CCORR_8U(const DevMem2D image, const DevMem2D templ,
template <typename T, int cn>
__global__ void matchTemplateNaiveKernel_SQDIFF(
int w, int h, const PtrStep image, const PtrStep templ,
int w, int h, const PtrStepb image, const PtrStepb templ,
DevMem2Df result)
{
typedef typename TypeVec<T, cn>::vec_type Type;
......@@ -201,7 +201,7 @@ __global__ void matchTemplateNaiveKernel_SQDIFF(
}
void matchTemplateNaive_SQDIFF_32F(const DevMem2D image, const DevMem2D templ,
void matchTemplateNaive_SQDIFF_32F(const DevMem2Db image, const DevMem2Db templ,
DevMem2Df result, int cn)
{
dim3 threads(32, 8);
......@@ -232,7 +232,7 @@ void matchTemplateNaive_SQDIFF_32F(const DevMem2D image, const DevMem2D templ,
}
void matchTemplateNaive_SQDIFF_8U(const DevMem2D image, const DevMem2D templ,
void matchTemplateNaive_SQDIFF_8U(const DevMem2Db image, const DevMem2Db templ,
DevMem2Df result, int cn)
{
dim3 threads(32, 8);
......@@ -265,7 +265,7 @@ void matchTemplateNaive_SQDIFF_8U(const DevMem2D image, const DevMem2D templ,
template <int cn>
__global__ void matchTemplatePreparedKernel_SQDIFF_8U(
int w, int h, const PtrStep_<unsigned long long> image_sqsum,
int w, int h, const PtrStep<unsigned long long> image_sqsum,
unsigned int templ_sqsum, DevMem2Df result)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
......@@ -338,7 +338,7 @@ __device__ float normAcc_SQDIFF(float num, float denum)
template <int cn>
__global__ void matchTemplatePreparedKernel_SQDIFF_NORMED_8U(
int w, int h, const PtrStep_<unsigned long long> image_sqsum,
int w, int h, const PtrStep<unsigned long long> image_sqsum,
unsigned int templ_sqsum, DevMem2Df result)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
......@@ -389,7 +389,7 @@ void matchTemplatePrepared_SQDIFF_NORMED_8U(
__global__ void matchTemplatePreparedKernel_CCOFF_8U(
int w, int h, float templ_sum_scale,
const PtrStep_<unsigned int> image_sum, DevMem2Df result)
const PtrStep<unsigned int> image_sum, DevMem2Df result)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
......@@ -421,8 +421,8 @@ void matchTemplatePrepared_CCOFF_8U(
__global__ void matchTemplatePreparedKernel_CCOFF_8UC2(
int w, int h, float templ_sum_scale_r, float templ_sum_scale_g,
const PtrStep_<unsigned int> image_sum_r,
const PtrStep_<unsigned int> image_sum_g,
const PtrStep<unsigned int> image_sum_r,
const PtrStep<unsigned int> image_sum_g,
DevMem2Df result)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
......@@ -466,9 +466,9 @@ __global__ void matchTemplatePreparedKernel_CCOFF_8UC3(
float templ_sum_scale_r,
float templ_sum_scale_g,
float templ_sum_scale_b,
const PtrStep_<unsigned int> image_sum_r,
const PtrStep_<unsigned int> image_sum_g,
const PtrStep_<unsigned int> image_sum_b,
const PtrStep<unsigned int> image_sum_r,
const PtrStep<unsigned int> image_sum_g,
const PtrStep<unsigned int> image_sum_b,
DevMem2Df result)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
......@@ -523,10 +523,10 @@ __global__ void matchTemplatePreparedKernel_CCOFF_8UC4(
float templ_sum_scale_g,
float templ_sum_scale_b,
float templ_sum_scale_a,
const PtrStep_<unsigned int> image_sum_r,
const PtrStep_<unsigned int> image_sum_g,
const PtrStep_<unsigned int> image_sum_b,
const PtrStep_<unsigned int> image_sum_a,
const PtrStep<unsigned int> image_sum_r,
const PtrStep<unsigned int> image_sum_g,
const PtrStep<unsigned int> image_sum_b,
const PtrStep<unsigned int> image_sum_a,
DevMem2Df result)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
......@@ -586,8 +586,8 @@ void matchTemplatePrepared_CCOFF_8UC4(
__global__ void matchTemplatePreparedKernel_CCOFF_NORMED_8U(
int w, int h, float weight,
float templ_sum_scale, float templ_sqsum_scale,
const PtrStep_<unsigned int> image_sum,
const PtrStep_<unsigned long long> image_sqsum,
const PtrStep<unsigned int> image_sum,
const PtrStep<unsigned long long> image_sqsum,
DevMem2Df result)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
......@@ -633,8 +633,8 @@ __global__ void matchTemplatePreparedKernel_CCOFF_NORMED_8UC2(
int w, int h, float weight,
float templ_sum_scale_r, float templ_sum_scale_g,
float templ_sqsum_scale,
const PtrStep_<unsigned int> image_sum_r, const PtrStep_<unsigned long long> image_sqsum_r,
const PtrStep_<unsigned int> image_sum_g, const PtrStep_<unsigned long long> image_sqsum_g,
const PtrStep<unsigned int> image_sum_r, const PtrStep<unsigned long long> image_sqsum_r,
const PtrStep<unsigned int> image_sum_g, const PtrStep<unsigned long long> image_sqsum_g,
DevMem2Df result)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
......@@ -697,9 +697,9 @@ __global__ void matchTemplatePreparedKernel_CCOFF_NORMED_8UC3(
int w, int h, float weight,
float templ_sum_scale_r, float templ_sum_scale_g, float templ_sum_scale_b,
float templ_sqsum_scale,
const PtrStep_<unsigned int> image_sum_r, const PtrStep_<unsigned long long> image_sqsum_r,
const PtrStep_<unsigned int> image_sum_g, const PtrStep_<unsigned long long> image_sqsum_g,
const PtrStep_<unsigned int> image_sum_b, const PtrStep_<unsigned long long> image_sqsum_b,
const PtrStep<unsigned int> image_sum_r, const PtrStep<unsigned long long> image_sqsum_r,
const PtrStep<unsigned int> image_sum_g, const PtrStep<unsigned long long> image_sqsum_g,
const PtrStep<unsigned int> image_sum_b, const PtrStep<unsigned long long> image_sqsum_b,
DevMem2Df result)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
......@@ -775,10 +775,10 @@ __global__ void matchTemplatePreparedKernel_CCOFF_NORMED_8UC4(
int w, int h, float weight,
float templ_sum_scale_r, float templ_sum_scale_g, float templ_sum_scale_b,
float templ_sum_scale_a, float templ_sqsum_scale,
const PtrStep_<unsigned int> image_sum_r, const PtrStep_<unsigned long long> image_sqsum_r,
const PtrStep_<unsigned int> image_sum_g, const PtrStep_<unsigned long long> image_sqsum_g,
const PtrStep_<unsigned int> image_sum_b, const PtrStep_<unsigned long long> image_sqsum_b,
const PtrStep_<unsigned int> image_sum_a, const PtrStep_<unsigned long long> image_sqsum_a,
const PtrStep<unsigned int> image_sum_r, const PtrStep<unsigned long long> image_sqsum_r,
const PtrStep<unsigned int> image_sum_g, const PtrStep<unsigned long long> image_sqsum_g,
const PtrStep<unsigned int> image_sum_b, const PtrStep<unsigned long long> image_sqsum_b,
const PtrStep<unsigned int> image_sum_a, const PtrStep<unsigned long long> image_sqsum_a,
DevMem2Df result)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
......@@ -863,7 +863,7 @@ void matchTemplatePrepared_CCOFF_NORMED_8UC4(
template <int cn>
__global__ void normalizeKernel_8U(
int w, int h, const PtrStep_<unsigned long long> image_sqsum,
int w, int h, const PtrStep<unsigned long long> image_sqsum,
unsigned int templ_sqsum, DevMem2Df result)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
......@@ -906,7 +906,7 @@ void normalize_8U(int w, int h, const DevMem2D_<unsigned long long> image_sqsum,
template <int cn>
__global__ void extractFirstChannel_32F(const PtrStep image, DevMem2Df result)
__global__ void extractFirstChannel_32F(const PtrStepb image, DevMem2Df result)
{
typedef typename TypeVec<float, cn>::vec_type Typef;
......@@ -921,7 +921,7 @@ __global__ void extractFirstChannel_32F(const PtrStep image, DevMem2Df result)
}
void extractFirstChannel_32F(const DevMem2D image, DevMem2Df result, int cn)
void extractFirstChannel_32F(const DevMem2Db image, DevMem2Df result, int cn)
{
dim3 threads(32, 8);
dim3 grid(divUp(result.cols, threads.x), divUp(result.rows, threads.y));
......
......@@ -73,10 +73,10 @@ namespace cv { namespace gpu { namespace device {
mat_dst[idx] = mat_src[idx];
}
}
typedef void (*CopyToFunc)(const DevMem2D& mat_src, const DevMem2D& mat_dst, const DevMem2D& mask, int channels, const cudaStream_t & stream);
typedef void (*CopyToFunc)(const DevMem2Db& mat_src, const DevMem2Db& mat_dst, const DevMem2Db& mask, int channels, const cudaStream_t & stream);
template<typename T>
void copy_to_with_mask_run(const DevMem2D& mat_src, const DevMem2D& mat_dst, const DevMem2D& mask, int channels, const cudaStream_t & stream)
void copy_to_with_mask_run(const DevMem2Db& mat_src, const DevMem2Db& mat_dst, const DevMem2Db& mask, int channels, const cudaStream_t & stream)
{
dim3 threadsPerBlock(16,16, 1);
dim3 numBlocks ( divUp(mat_src.cols * channels , threadsPerBlock.x) , divUp(mat_src.rows , threadsPerBlock.y), 1);
......@@ -89,7 +89,7 @@ namespace cv { namespace gpu { namespace device {
cudaSafeCall ( cudaDeviceSynchronize() );
}
void copy_to_with_mask(const DevMem2D& mat_src, DevMem2D mat_dst, int depth, const DevMem2D& mask, int channels, const cudaStream_t & stream)
void copy_to_with_mask(const DevMem2Db& mat_src, DevMem2Db mat_dst, int depth, const DevMem2Db& mask, int channels, const cudaStream_t & stream)
{
static CopyToFunc tab[8] =
{
......@@ -187,7 +187,7 @@ namespace cv { namespace gpu { namespace device {
}
}
template <typename T>
void set_to_gpu(const DevMem2D& mat, const T* scalar, const DevMem2D& mask, int channels, cudaStream_t stream)
void set_to_gpu(const DevMem2Db& mat, const T* scalar, const DevMem2Db& mask, int channels, cudaStream_t stream)
{
writeScalar(scalar);
......@@ -201,16 +201,16 @@ namespace cv { namespace gpu { namespace device {
cudaSafeCall ( cudaDeviceSynchronize() );
}
template void set_to_gpu<uchar >(const DevMem2D& mat, const uchar* scalar, const DevMem2D& mask, int channels, cudaStream_t stream);
template void set_to_gpu<schar >(const DevMem2D& mat, const schar* scalar, const DevMem2D& mask, int channels, cudaStream_t stream);
template void set_to_gpu<ushort>(const DevMem2D& mat, const ushort* scalar, const DevMem2D& mask, int channels, cudaStream_t stream);
template void set_to_gpu<short >(const DevMem2D& mat, const short* scalar, const DevMem2D& mask, int channels, cudaStream_t stream);
template void set_to_gpu<int >(const DevMem2D& mat, const int* scalar, const DevMem2D& mask, int channels, cudaStream_t stream);
template void set_to_gpu<float >(const DevMem2D& mat, const float* scalar, const DevMem2D& mask, int channels, cudaStream_t stream);
template void set_to_gpu<double>(const DevMem2D& mat, const double* scalar, const DevMem2D& mask, int channels, cudaStream_t stream);
template void set_to_gpu<uchar >(const DevMem2Db& mat, const uchar* scalar, const DevMem2Db& mask, int channels, cudaStream_t stream);
template void set_to_gpu<schar >(const DevMem2Db& mat, const schar* scalar, const DevMem2Db& mask, int channels, cudaStream_t stream);
template void set_to_gpu<ushort>(const DevMem2Db& mat, const ushort* scalar, const DevMem2Db& mask, int channels, cudaStream_t stream);
template void set_to_gpu<short >(const DevMem2Db& mat, const short* scalar, const DevMem2Db& mask, int channels, cudaStream_t stream);
template void set_to_gpu<int >(const DevMem2Db& mat, const int* scalar, const DevMem2Db& mask, int channels, cudaStream_t stream);
template void set_to_gpu<float >(const DevMem2Db& mat, const float* scalar, const DevMem2Db& mask, int channels, cudaStream_t stream);
template void set_to_gpu<double>(const DevMem2Db& mat, const double* scalar, const DevMem2Db& mask, int channels, cudaStream_t stream);
template <typename T>
void set_to_gpu(const DevMem2D& mat, const T* scalar, int channels, cudaStream_t stream)
void set_to_gpu(const DevMem2Db& mat, const T* scalar, int channels, cudaStream_t stream)
{
writeScalar(scalar);
......@@ -224,13 +224,13 @@ namespace cv { namespace gpu { namespace device {
cudaSafeCall ( cudaDeviceSynchronize() );
}
template void set_to_gpu<uchar >(const DevMem2D& mat, const uchar* scalar, int channels, cudaStream_t stream);
template void set_to_gpu<schar >(const DevMem2D& mat, const schar* scalar, int channels, cudaStream_t stream);
template void set_to_gpu<ushort>(const DevMem2D& mat, const ushort* scalar, int channels, cudaStream_t stream);
template void set_to_gpu<short >(const DevMem2D& mat, const short* scalar, int channels, cudaStream_t stream);
template void set_to_gpu<int >(const DevMem2D& mat, const int* scalar, int channels, cudaStream_t stream);
template void set_to_gpu<float >(const DevMem2D& mat, const float* scalar, int channels, cudaStream_t stream);
template void set_to_gpu<double>(const DevMem2D& mat, const double* scalar, int channels, cudaStream_t stream);
template void set_to_gpu<uchar >(const DevMem2Db& mat, const uchar* scalar, int channels, cudaStream_t stream);
template void set_to_gpu<schar >(const DevMem2Db& mat, const schar* scalar, int channels, cudaStream_t stream);
template void set_to_gpu<ushort>(const DevMem2Db& mat, const ushort* scalar, int channels, cudaStream_t stream);
template void set_to_gpu<short >(const DevMem2Db& mat, const short* scalar, int channels, cudaStream_t stream);
template void set_to_gpu<int >(const DevMem2Db& mat, const int* scalar, int channels, cudaStream_t stream);
template void set_to_gpu<float >(const DevMem2Db& mat, const float* scalar, int channels, cudaStream_t stream);
template void set_to_gpu<double>(const DevMem2Db& mat, const double* scalar, int channels, cudaStream_t stream);
///////////////////////////////////////////////////////////////////////////
//////////////////////////////// ConvertTo ////////////////////////////////
......@@ -297,7 +297,7 @@ namespace cv { namespace gpu { namespace device {
};
template<typename T, typename D>
void cvt_(const DevMem2D& src, const DevMem2D& dst, double alpha, double beta, cudaStream_t stream)
void cvt_(const DevMem2Db& src, const DevMem2Db& dst, double alpha, double beta, cudaStream_t stream)
{
cudaSafeCall( cudaSetDoubleForDevice(&alpha) );
cudaSafeCall( cudaSetDoubleForDevice(&beta) );
......@@ -305,10 +305,10 @@ namespace cv { namespace gpu { namespace device {
transform((DevMem2D_<T>)src, (DevMem2D_<D>)dst, op, stream);
}
void convert_gpu(const DevMem2D& src, int sdepth, const DevMem2D& dst, int ddepth, double alpha, double beta,
void convert_gpu(const DevMem2Db& src, int sdepth, const DevMem2Db& dst, int ddepth, double alpha, double beta,
cudaStream_t stream = 0)
{
typedef void (*caller_t)(const DevMem2D& src, const DevMem2D& dst, double alpha, double beta,
typedef void (*caller_t)(const DevMem2Db& src, const DevMem2Db& dst, double alpha, double beta,
cudaStream_t stream);
static const caller_t tab[8][8] =
......
This diff is collapsed.
......@@ -51,7 +51,7 @@ using namespace cv::gpu::device;
namespace cv { namespace gpu { namespace imgproc
{
template <typename T, typename B> __global__ void pyrDown(const PtrStep_<T> src, PtrStep_<T> dst, const B b, int dst_cols)
template <typename T, typename B> __global__ void pyrDown(const PtrStep<T> src, PtrStep<T> dst, const B b, int dst_cols)
{
typedef typename TypeVec<float, VecTraits<T>::cn>::vec_type value_type;
......@@ -139,7 +139,7 @@ namespace cv { namespace gpu { namespace imgproc
cudaSafeCall( cudaDeviceSynchronize() );
}
template <typename T, int cn> void pyrDown_gpu(const DevMem2D& src, const DevMem2D& dst, int borderType, cudaStream_t stream)
template <typename T, int cn> void pyrDown_gpu(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream)
{
typedef typename TypeVec<T, cn>::vec_type type;
......@@ -153,33 +153,33 @@ namespace cv { namespace gpu { namespace imgproc
callers[borderType](static_cast< DevMem2D_<type> >(src), static_cast< DevMem2D_<type> >(dst), stream);
}
template void pyrDown_gpu<uchar, 1>(const DevMem2D& src, const DevMem2D& dst, int borderType, cudaStream_t stream);
template void pyrDown_gpu<uchar, 2>(const DevMem2D& src, const DevMem2D& dst, int borderType, cudaStream_t stream);
template void pyrDown_gpu<uchar, 3>(const DevMem2D& src, const DevMem2D& dst, int borderType, cudaStream_t stream);
template void pyrDown_gpu<uchar, 4>(const DevMem2D& src, const DevMem2D& dst, int borderType, cudaStream_t stream);
template void pyrDown_gpu<schar, 1>(const DevMem2D& src, const DevMem2D& dst, int borderType, cudaStream_t stream);
template void pyrDown_gpu<schar, 2>(const DevMem2D& src, const DevMem2D& dst, int borderType, cudaStream_t stream);
template void pyrDown_gpu<schar, 3>(const DevMem2D& src, const DevMem2D& dst, int borderType, cudaStream_t stream);
template void pyrDown_gpu<schar, 4>(const DevMem2D& src, const DevMem2D& dst, int borderType, cudaStream_t stream);
template void pyrDown_gpu<ushort, 1>(const DevMem2D& src, const DevMem2D& dst, int borderType, cudaStream_t stream);
template void pyrDown_gpu<ushort, 2>(const DevMem2D& src, const DevMem2D& dst, int borderType, cudaStream_t stream);
template void pyrDown_gpu<ushort, 3>(const DevMem2D& src, const DevMem2D& dst, int borderType, cudaStream_t stream);
template void pyrDown_gpu<ushort, 4>(const DevMem2D& src, const DevMem2D& dst, int borderType, cudaStream_t stream);
template void pyrDown_gpu<short, 1>(const DevMem2D& src, const DevMem2D& dst, int borderType, cudaStream_t stream);
template void pyrDown_gpu<short, 2>(const DevMem2D& src, const DevMem2D& dst, int borderType, cudaStream_t stream);
template void pyrDown_gpu<short, 3>(const DevMem2D& src, const DevMem2D& dst, int borderType, cudaStream_t stream);
template void pyrDown_gpu<short, 4>(const DevMem2D& src, const DevMem2D& dst, int borderType, cudaStream_t stream);
template void pyrDown_gpu<int, 1>(const DevMem2D& src, const DevMem2D& dst, int borderType, cudaStream_t stream);
template void pyrDown_gpu<int, 2>(const DevMem2D& src, const DevMem2D& dst, int borderType, cudaStream_t stream);
template void pyrDown_gpu<int, 3>(const DevMem2D& src, const DevMem2D& dst, int borderType, cudaStream_t stream);
template void pyrDown_gpu<int, 4>(const DevMem2D& src, const DevMem2D& dst, int borderType, cudaStream_t stream);
template void pyrDown_gpu<float, 1>(const DevMem2D& src, const DevMem2D& dst, int borderType, cudaStream_t stream);
template void pyrDown_gpu<float, 2>(const DevMem2D& src, const DevMem2D& dst, int borderType, cudaStream_t stream);
template void pyrDown_gpu<float, 3>(const DevMem2D& src, const DevMem2D& dst, int borderType, cudaStream_t stream);
template void pyrDown_gpu<float, 4>(const DevMem2D& src, const DevMem2D& dst, int borderType, cudaStream_t stream);
template void pyrDown_gpu<uchar, 1>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
template void pyrDown_gpu<uchar, 2>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
template void pyrDown_gpu<uchar, 3>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
template void pyrDown_gpu<uchar, 4>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
template void pyrDown_gpu<schar, 1>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
template void pyrDown_gpu<schar, 2>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
template void pyrDown_gpu<schar, 3>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
template void pyrDown_gpu<schar, 4>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
template void pyrDown_gpu<ushort, 1>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
template void pyrDown_gpu<ushort, 2>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
template void pyrDown_gpu<ushort, 3>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
template void pyrDown_gpu<ushort, 4>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
template void pyrDown_gpu<short, 1>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
template void pyrDown_gpu<short, 2>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
template void pyrDown_gpu<short, 3>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
template void pyrDown_gpu<short, 4>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
template void pyrDown_gpu<int, 1>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
template void pyrDown_gpu<int, 2>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
template void pyrDown_gpu<int, 3>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
template void pyrDown_gpu<int, 4>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
template void pyrDown_gpu<float, 1>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
template void pyrDown_gpu<float, 2>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
template void pyrDown_gpu<float, 3>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
template void pyrDown_gpu<float, 4>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
}}}
......@@ -51,7 +51,7 @@ using namespace cv::gpu::device;
namespace cv { namespace gpu { namespace imgproc
{
template <typename T, typename B> __global__ void pyrUp(const PtrStep_<T> src, DevMem2D_<T> dst, const B b)
template <typename T, typename B> __global__ void pyrUp(const PtrStep<T> src, DevMem2D_<T> dst, const B b)
{
typedef typename TypeVec<float, VecTraits<T>::cn>::vec_type value_type;
......@@ -134,7 +134,7 @@ namespace cv { namespace gpu { namespace imgproc
cudaSafeCall( cudaDeviceSynchronize() );
}
template <typename T, int cn> void pyrUp_gpu(const DevMem2D& src, const DevMem2D& dst, int borderType, cudaStream_t stream)
template <typename T, int cn> void pyrUp_gpu(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream)
{
typedef typename TypeVec<T, cn>::vec_type type;
......@@ -148,33 +148,33 @@ namespace cv { namespace gpu { namespace imgproc
callers[borderType](static_cast< DevMem2D_<type> >(src), static_cast< DevMem2D_<type> >(dst), stream);
}
template void pyrUp_gpu<uchar, 1>(const DevMem2D& src, const DevMem2D& dst, int borderType, cudaStream_t stream);
template void pyrUp_gpu<uchar, 2>(const DevMem2D& src, const DevMem2D& dst, int borderType, cudaStream_t stream);
template void pyrUp_gpu<uchar, 3>(const DevMem2D& src, const DevMem2D& dst, int borderType, cudaStream_t stream);
template void pyrUp_gpu<uchar, 4>(const DevMem2D& src, const DevMem2D& dst, int borderType, cudaStream_t stream);
template void pyrUp_gpu<schar, 1>(const DevMem2D& src, const DevMem2D& dst, int borderType, cudaStream_t stream);
template void pyrUp_gpu<schar, 2>(const DevMem2D& src, const DevMem2D& dst, int borderType, cudaStream_t stream);
template void pyrUp_gpu<schar, 3>(const DevMem2D& src, const DevMem2D& dst, int borderType, cudaStream_t stream);
template void pyrUp_gpu<schar, 4>(const DevMem2D& src, const DevMem2D& dst, int borderType, cudaStream_t stream);
template void pyrUp_gpu<ushort, 1>(const DevMem2D& src, const DevMem2D& dst, int borderType, cudaStream_t stream);
template void pyrUp_gpu<ushort, 2>(const DevMem2D& src, const DevMem2D& dst, int borderType, cudaStream_t stream);
template void pyrUp_gpu<ushort, 3>(const DevMem2D& src, const DevMem2D& dst, int borderType, cudaStream_t stream);
template void pyrUp_gpu<ushort, 4>(const DevMem2D& src, const DevMem2D& dst, int borderType, cudaStream_t stream);
template void pyrUp_gpu<short, 1>(const DevMem2D& src, const DevMem2D& dst, int borderType, cudaStream_t stream);
template void pyrUp_gpu<short, 2>(const DevMem2D& src, const DevMem2D& dst, int borderType, cudaStream_t stream);
template void pyrUp_gpu<short, 3>(const DevMem2D& src, const DevMem2D& dst, int borderType, cudaStream_t stream);
template void pyrUp_gpu<short, 4>(const DevMem2D& src, const DevMem2D& dst, int borderType, cudaStream_t stream);
template void pyrUp_gpu<int, 1>(const DevMem2D& src, const DevMem2D& dst, int borderType, cudaStream_t stream);
template void pyrUp_gpu<int, 2>(const DevMem2D& src, const DevMem2D& dst, int borderType, cudaStream_t stream);
template void pyrUp_gpu<int, 3>(const DevMem2D& src, const DevMem2D& dst, int borderType, cudaStream_t stream);
template void pyrUp_gpu<int, 4>(const DevMem2D& src, const DevMem2D& dst, int borderType, cudaStream_t stream);
template void pyrUp_gpu<float, 1>(const DevMem2D& src, const DevMem2D& dst, int borderType, cudaStream_t stream);
template void pyrUp_gpu<float, 2>(const DevMem2D& src, const DevMem2D& dst, int borderType, cudaStream_t stream);
template void pyrUp_gpu<float, 3>(const DevMem2D& src, const DevMem2D& dst, int borderType, cudaStream_t stream);
template void pyrUp_gpu<float, 4>(const DevMem2D& src, const DevMem2D& dst, int borderType, cudaStream_t stream);
template void pyrUp_gpu<uchar, 1>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
template void pyrUp_gpu<uchar, 2>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
template void pyrUp_gpu<uchar, 3>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
template void pyrUp_gpu<uchar, 4>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
template void pyrUp_gpu<schar, 1>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
template void pyrUp_gpu<schar, 2>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
template void pyrUp_gpu<schar, 3>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
template void pyrUp_gpu<schar, 4>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
template void pyrUp_gpu<ushort, 1>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
template void pyrUp_gpu<ushort, 2>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
template void pyrUp_gpu<ushort, 3>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
template void pyrUp_gpu<ushort, 4>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
template void pyrUp_gpu<short, 1>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
template void pyrUp_gpu<short, 2>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
template void pyrUp_gpu<short, 3>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
template void pyrUp_gpu<short, 4>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
template void pyrUp_gpu<int, 1>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
template void pyrUp_gpu<int, 2>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
template void pyrUp_gpu<int, 3>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
template void pyrUp_gpu<int, 4>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
template void pyrUp_gpu<float, 1>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
template void pyrUp_gpu<float, 2>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
template void pyrUp_gpu<float, 3>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
template void pyrUp_gpu<float, 4>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
}}}
This diff is collapsed.
......@@ -88,8 +88,8 @@ namespace cv { namespace gpu { namespace imgproc
dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
BrdReplicate<T> brd(src.rows, src.cols);
BorderReader< PtrStep_<T>, BrdReplicate<T> > brdSrc(src, brd);
Filter< BorderReader< PtrStep_<T>, BrdReplicate<T> > > filter_src(brdSrc);
BorderReader< PtrStep<T>, BrdReplicate<T> > brdSrc(src, brd);
Filter< BorderReader< PtrStep<T>, BrdReplicate<T> > > filter_src(brdSrc);
resize<<<grid, block, 0, stream>>>(filter_src, fx, fy, dst);
cudaSafeCall( cudaGetLastError() );
......@@ -103,7 +103,7 @@ namespace cv { namespace gpu { namespace imgproc
dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
BrdReplicate<T> brd(src.rows, src.cols);
BorderReader< PtrStep_<T>, BrdReplicate<T> > brdSrc(src, brd);
BorderReader< PtrStep<T>, BrdReplicate<T> > brdSrc(src, brd);
resizeNN<<<grid, block, 0, stream>>>(brdSrc, fx, fy, dst);
cudaSafeCall( cudaGetLastError() );
......@@ -118,8 +118,8 @@ namespace cv { namespace gpu { namespace imgproc
dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
BrdReplicate<T> brd(src.rows, src.cols);
BorderReader< PtrStep_<T>, BrdReplicate<T> > brdSrc(src, brd);
Filter< BorderReader< PtrStep_<T>, BrdReplicate<T> > > filter_src(brdSrc);
BorderReader< PtrStep<T>, BrdReplicate<T> > brdSrc(src, brd);
Filter< BorderReader< PtrStep<T>, BrdReplicate<T> > > filter_src(brdSrc);
resize<<<grid, block>>>(filter_src, fx, fy, dst);
cudaSafeCall( cudaGetLastError() );
......@@ -135,7 +135,7 @@ namespace cv { namespace gpu { namespace imgproc
dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
BrdReplicate<T> brd(src.rows, src.cols);
BorderReader< PtrStep_<T>, BrdReplicate<T> > brdSrc(src, brd);
BorderReader< PtrStep<T>, BrdReplicate<T> > brdSrc(src, brd);
resizeNN<<<grid, block>>>(brdSrc, fx, fy, dst);
cudaSafeCall( cudaGetLastError() );
......@@ -220,7 +220,7 @@ namespace cv { namespace gpu { namespace imgproc
}
};
template <typename T> void resize_gpu(const DevMem2D& src, float fx, float fy, const DevMem2D& dst, int interpolation, cudaStream_t stream)
template <typename T> void resize_gpu(const DevMem2Db& src, float fx, float fy, const DevMem2Db& dst, int interpolation, cudaStream_t stream)
{
typedef void (*caller_t)(const DevMem2D_<T>& src, float fx, float fy, const DevMem2D_<T>& dst, cudaStream_t stream);
......@@ -232,33 +232,33 @@ namespace cv { namespace gpu { namespace imgproc
callers[interpolation](static_cast< DevMem2D_<T> >(src), fx, fy, static_cast< DevMem2D_<T> >(dst), stream);
}
template void resize_gpu<uchar >(const DevMem2D& src, float fx, float fy, const DevMem2D& dst, int interpolation, cudaStream_t stream);
//template void resize_gpu<uchar2>(const DevMem2D& src, float fx, float fy, const DevMem2D& dst, int interpolation, cudaStream_t stream);
template void resize_gpu<uchar3>(const DevMem2D& src, float fx, float fy, const DevMem2D& dst, int interpolation, cudaStream_t stream);
template void resize_gpu<uchar4>(const DevMem2D& src, float fx, float fy, const DevMem2D& dst, int interpolation, cudaStream_t stream);
template void resize_gpu<uchar >(const DevMem2Db& src, float fx, float fy, const DevMem2Db& dst, int interpolation, cudaStream_t stream);
//template void resize_gpu<uchar2>(const DevMem2Db& src, float fx, float fy, const DevMem2Db& dst, int interpolation, cudaStream_t stream);
template void resize_gpu<uchar3>(const DevMem2Db& src, float fx, float fy, const DevMem2Db& dst, int interpolation, cudaStream_t stream);
template void resize_gpu<uchar4>(const DevMem2Db& src, float fx, float fy, const DevMem2Db& dst, int interpolation, cudaStream_t stream);
//template void resize_gpu<schar>(const DevMem2D& src, float fx, float fy, const DevMem2D& dst, int interpolation, cudaStream_t stream);
//template void resize_gpu<char2>(const DevMem2D& src, float fx, float fy, const DevMem2D& dst, int interpolation, cudaStream_t stream);
//template void resize_gpu<char3>(const DevMem2D& src, float fx, float fy, const DevMem2D& dst, int interpolation, cudaStream_t stream);
//template void resize_gpu<char4>(const DevMem2D& src, float fx, float fy, const DevMem2D& dst, int interpolation, cudaStream_t stream);
//template void resize_gpu<schar>(const DevMem2Db& src, float fx, float fy, const DevMem2Db& dst, int interpolation, cudaStream_t stream);
//template void resize_gpu<char2>(const DevMem2Db& src, float fx, float fy, const DevMem2Db& dst, int interpolation, cudaStream_t stream);
//template void resize_gpu<char3>(const DevMem2Db& src, float fx, float fy, const DevMem2Db& dst, int interpolation, cudaStream_t stream);
//template void resize_gpu<char4>(const DevMem2Db& src, float fx, float fy, const DevMem2Db& dst, int interpolation, cudaStream_t stream);
template void resize_gpu<ushort >(const DevMem2D& src,float fx, float fy, const DevMem2D& dst, int interpolation, cudaStream_t stream);
//template void resize_gpu<ushort2>(const DevMem2D& src,float fx, float fy, const DevMem2D& dst, int interpolation, cudaStream_t stream);
template void resize_gpu<ushort3>(const DevMem2D& src,float fx, float fy, const DevMem2D& dst, int interpolation, cudaStream_t stream);
template void resize_gpu<ushort4>(const DevMem2D& src,float fx, float fy, const DevMem2D& dst, int interpolation, cudaStream_t stream);
template void resize_gpu<ushort >(const DevMem2Db& src,float fx, float fy, const DevMem2Db& dst, int interpolation, cudaStream_t stream);
//template void resize_gpu<ushort2>(const DevMem2Db& src,float fx, float fy, const DevMem2Db& dst, int interpolation, cudaStream_t stream);
template void resize_gpu<ushort3>(const DevMem2Db& src,float fx, float fy, const DevMem2Db& dst, int interpolation, cudaStream_t stream);
template void resize_gpu<ushort4>(const DevMem2Db& src,float fx, float fy, const DevMem2Db& dst, int interpolation, cudaStream_t stream);
template void resize_gpu<short >(const DevMem2D& src, float fx, float fy, const DevMem2D& dst, int interpolation, cudaStream_t stream);
//template void resize_gpu<short2>(const DevMem2D& src, float fx, float fy, const DevMem2D& dst, int interpolation, cudaStream_t stream);
template void resize_gpu<short3>(const DevMem2D& src, float fx, float fy, const DevMem2D& dst, int interpolation, cudaStream_t stream);
template void resize_gpu<short4>(const DevMem2D& src, float fx, float fy, const DevMem2D& dst, int interpolation, cudaStream_t stream);
template void resize_gpu<short >(const DevMem2Db& src, float fx, float fy, const DevMem2Db& dst, int interpolation, cudaStream_t stream);
//template void resize_gpu<short2>(const DevMem2Db& src, float fx, float fy, const DevMem2Db& dst, int interpolation, cudaStream_t stream);
template void resize_gpu<short3>(const DevMem2Db& src, float fx, float fy, const DevMem2Db& dst, int interpolation, cudaStream_t stream);
template void resize_gpu<short4>(const DevMem2Db& src, float fx, float fy, const DevMem2Db& dst, int interpolation, cudaStream_t stream);
//template void resize_gpu<int >(const DevMem2D& src, float fx, float fy, const DevMem2D& dst, int interpolation, cudaStream_t stream);
//template void resize_gpu<int2>(const DevMem2D& src, float fx, float fy, const DevMem2D& dst, int interpolation, cudaStream_t stream);
//template void resize_gpu<int3>(const DevMem2D& src, float fx, float fy, const DevMem2D& dst, int interpolation, cudaStream_t stream);
//template void resize_gpu<int4>(const DevMem2D& src, float fx, float fy, const DevMem2D& dst, int interpolation, cudaStream_t stream);
//template void resize_gpu<int >(const DevMem2Db& src, float fx, float fy, const DevMem2Db& dst, int interpolation, cudaStream_t stream);
//template void resize_gpu<int2>(const DevMem2Db& src, float fx, float fy, const DevMem2Db& dst, int interpolation, cudaStream_t stream);
//template void resize_gpu<int3>(const DevMem2Db& src, float fx, float fy, const DevMem2Db& dst, int interpolation, cudaStream_t stream);
//template void resize_gpu<int4>(const DevMem2Db& src, float fx, float fy, const DevMem2Db& dst, int interpolation, cudaStream_t stream);
template void resize_gpu<float >(const DevMem2D& src, float fx, float fy, const DevMem2D& dst, int interpolation, cudaStream_t stream);
//template void resize_gpu<float2>(const DevMem2D& src, float fx, float fy, const DevMem2D& dst, int interpolation, cudaStream_t stream);
template void resize_gpu<float3>(const DevMem2D& src, float fx, float fy, const DevMem2D& dst, int interpolation, cudaStream_t stream);
template void resize_gpu<float4>(const DevMem2D& src, float fx, float fy, const DevMem2D& dst, int interpolation, cudaStream_t stream);
template void resize_gpu<float >(const DevMem2Db& src, float fx, float fy, const DevMem2Db& dst, int interpolation, cudaStream_t stream);
//template void resize_gpu<float2>(const DevMem2Db& src, float fx, float fy, const DevMem2Db& dst, int interpolation, cudaStream_t stream);
template void resize_gpu<float3>(const DevMem2Db& src, float fx, float fy, const DevMem2Db& dst, int interpolation, cudaStream_t stream);
template void resize_gpu<float4>(const DevMem2Db& src, float fx, float fy, const DevMem2Db& dst, int interpolation, cudaStream_t stream);
}}}
......@@ -84,7 +84,7 @@ namespace filter_row
};
template <int KERNEL_SIZE, typename T, typename D, typename B>
__global__ void linearRowFilter(const DevMem2D_<T> src, PtrStep_<D> dst, int anchor, const B b)
__global__ void linearRowFilter(const DevMem2D_<T> src, PtrStep<D> dst, int anchor, const B b)
{
typedef typename SmemType<T>::smem_t smem_t;
typedef typename TypeVec<float, VecTraits<T>::cn>::vec_type sum_t;
......@@ -156,7 +156,7 @@ namespace cv { namespace gpu { namespace filters
}
template <typename T, typename D>
void linearRowFilter_gpu(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor, int brd_type, cudaStream_t stream)
void linearRowFilter_gpu(const DevMem2Db& src, const DevMem2Db& dst, const float kernel[], int ksize, int anchor, int brd_type, cudaStream_t stream)
{
typedef void (*caller_t)(const DevMem2D_<T>& src, const DevMem2D_<D>& dst, int anchor, cudaStream_t stream);
static const caller_t callers[5][17] =
......@@ -263,11 +263,11 @@ namespace cv { namespace gpu { namespace filters
callers[brd_type][ksize]((DevMem2D_<T>)src, (DevMem2D_<D>)dst, anchor, stream);
}
template void linearRowFilter_gpu<uchar , float >(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor, int brd_type, cudaStream_t stream);
template void linearRowFilter_gpu<uchar4, float4>(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor, int brd_type, cudaStream_t stream);
//template void linearRowFilter_gpu<short , float >(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor, int brd_type, cudaStream_t stream);
//template void linearRowFilter_gpu<short2, float2>(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor, int brd_type, cudaStream_t stream);
template void linearRowFilter_gpu<short3, float3>(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor, int brd_type, cudaStream_t stream);
template void linearRowFilter_gpu<int , float >(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor, int brd_type, cudaStream_t stream);
template void linearRowFilter_gpu<float , float >(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor, int brd_type, cudaStream_t stream);
template void linearRowFilter_gpu<uchar , float >(const DevMem2Db& src, const DevMem2Db& dst, const float kernel[], int ksize, int anchor, int brd_type, cudaStream_t stream);
template void linearRowFilter_gpu<uchar4, float4>(const DevMem2Db& src, const DevMem2Db& dst, const float kernel[], int ksize, int anchor, int brd_type, cudaStream_t stream);
//template void linearRowFilter_gpu<short , float >(const DevMem2Db& src, const DevMem2Db& dst, const float kernel[], int ksize, int anchor, int brd_type, cudaStream_t stream);
//template void linearRowFilter_gpu<short2, float2>(const DevMem2Db& src, const DevMem2Db& dst, const float kernel[], int ksize, int anchor, int brd_type, cudaStream_t stream);
template void linearRowFilter_gpu<short3, float3>(const DevMem2Db& src, const DevMem2Db& dst, const float kernel[], int ksize, int anchor, int brd_type, cudaStream_t stream);
template void linearRowFilter_gpu<int , float >(const DevMem2Db& src, const DevMem2Db& dst, const float kernel[], int ksize, int anchor, int brd_type, cudaStream_t stream);
template void linearRowFilter_gpu<float , float >(const DevMem2Db& src, const DevMem2Db& dst, const float kernel[], int ksize, int anchor, int brd_type, cudaStream_t stream);
}}}
......@@ -89,8 +89,8 @@ namespace cv { namespace gpu { namespace split_merge {
//typedef double4 type3;
};
typedef void (*MergeFunction)(const DevMem2D* src, DevMem2D& dst, const cudaStream_t& stream);
typedef void (*SplitFunction)(const DevMem2D& src, DevMem2D* dst, const cudaStream_t& stream);
typedef void (*MergeFunction)(const DevMem2Db* src, DevMem2Db& dst, const cudaStream_t& stream);
typedef void (*SplitFunction)(const DevMem2Db& src, DevMem2Db* dst, const cudaStream_t& stream);
//------------------------------------------------------------
// Merge
......@@ -224,7 +224,7 @@ namespace cv { namespace gpu { namespace split_merge {
template <typename T>
static void mergeC2_(const DevMem2D* src, DevMem2D& dst, const cudaStream_t& stream)
static void mergeC2_(const DevMem2Db* src, DevMem2Db& dst, const cudaStream_t& stream)
{
dim3 blockDim(32, 8);
dim3 gridDim(divUp(dst.cols, blockDim.x), divUp(dst.rows, blockDim.y));
......@@ -240,7 +240,7 @@ namespace cv { namespace gpu { namespace split_merge {
template <typename T>
static void mergeC3_(const DevMem2D* src, DevMem2D& dst, const cudaStream_t& stream)
static void mergeC3_(const DevMem2Db* src, DevMem2Db& dst, const cudaStream_t& stream)
{
dim3 blockDim(32, 8);
dim3 gridDim(divUp(dst.cols, blockDim.x), divUp(dst.rows, blockDim.y));
......@@ -257,7 +257,7 @@ namespace cv { namespace gpu { namespace split_merge {
template <typename T>
static void mergeC4_(const DevMem2D* src, DevMem2D& dst, const cudaStream_t& stream)
static void mergeC4_(const DevMem2Db* src, DevMem2Db& dst, const cudaStream_t& stream)
{
dim3 blockDim(32, 8);
dim3 gridDim(divUp(dst.cols, blockDim.x), divUp(dst.rows, blockDim.y));
......@@ -274,7 +274,7 @@ namespace cv { namespace gpu { namespace split_merge {
}
extern "C" void merge_caller(const DevMem2D* src, DevMem2D& dst,
extern "C" void merge_caller(const DevMem2Db* src, DevMem2Db& dst,
int total_channels, size_t elem_size,
const cudaStream_t& stream)
{
......@@ -433,7 +433,7 @@ namespace cv { namespace gpu { namespace split_merge {
}
template <typename T>
static void splitC2_(const DevMem2D& src, DevMem2D* dst, const cudaStream_t& stream)
static void splitC2_(const DevMem2Db& src, DevMem2Db* dst, const cudaStream_t& stream)
{
dim3 blockDim(32, 8);
dim3 gridDim(divUp(src.cols, blockDim.x), divUp(src.rows, blockDim.y));
......@@ -449,7 +449,7 @@ namespace cv { namespace gpu { namespace split_merge {
template <typename T>
static void splitC3_(const DevMem2D& src, DevMem2D* dst, const cudaStream_t& stream)
static void splitC3_(const DevMem2Db& src, DevMem2Db* dst, const cudaStream_t& stream)
{
dim3 blockDim(32, 8);
dim3 gridDim(divUp(src.cols, blockDim.x), divUp(src.rows, blockDim.y));
......@@ -466,7 +466,7 @@ namespace cv { namespace gpu { namespace split_merge {
template <typename T>
static void splitC4_(const DevMem2D& src, DevMem2D* dst, const cudaStream_t& stream)
static void splitC4_(const DevMem2Db& src, DevMem2Db* dst, const cudaStream_t& stream)
{
dim3 blockDim(32, 8);
dim3 gridDim(divUp(src.cols, blockDim.x), divUp(src.rows, blockDim.y));
......@@ -483,7 +483,7 @@ namespace cv { namespace gpu { namespace split_merge {
}
extern "C" void split_caller(const DevMem2D& src, DevMem2D* dst,
extern "C" void split_caller(const DevMem2Db& src, DevMem2Db* dst,
int num_channels, size_t elem_size1,
const cudaStream_t& stream)
{
......
......@@ -232,7 +232,7 @@ __device__ void InitColSSD(int x_tex, int y_tex, int im_pitch, unsigned char* im
}
template<int RADIUS>
__global__ void stereoKernel(unsigned char *left, unsigned char *right, size_t img_step, PtrStep disp, int maxdisp)
__global__ void stereoKernel(unsigned char *left, unsigned char *right, size_t img_step, PtrStepb disp, int maxdisp)
{
extern __shared__ unsigned int col_ssd_cache[];
volatile unsigned int *col_ssd = col_ssd_cache + BLOCK_W + threadIdx.x;
......@@ -313,7 +313,7 @@ __global__ void stereoKernel(unsigned char *left, unsigned char *right, size_t i
}
template<int RADIUS> void kernel_caller(const DevMem2D& left, const DevMem2D& right, const DevMem2D& disp, int maxdisp, cudaStream_t & stream)
template<int RADIUS> void kernel_caller(const DevMem2Db& left, const DevMem2Db& right, const DevMem2Db& disp, int maxdisp, cudaStream_t & stream)
{
dim3 grid(1,1,1);
dim3 threads(BLOCK_W, 1, 1);
......@@ -331,7 +331,7 @@ template<int RADIUS> void kernel_caller(const DevMem2D& left, const DevMem2D& ri
cudaSafeCall( cudaDeviceSynchronize() );
};
typedef void (*kernel_caller_t)(const DevMem2D& left, const DevMem2D& right, const DevMem2D& disp, int maxdisp, cudaStream_t & stream);
typedef void (*kernel_caller_t)(const DevMem2Db& left, const DevMem2Db& right, const DevMem2Db& disp, int maxdisp, cudaStream_t & stream);
const static kernel_caller_t callers[] =
{
......@@ -346,7 +346,7 @@ const static kernel_caller_t callers[] =
};
const int calles_num = sizeof(callers)/sizeof(callers[0]);
extern "C" void stereoBM_GPU(const DevMem2D& left, const DevMem2D& right, const DevMem2D& disp, int maxdisp, int winsz, const DevMem2D_<unsigned int>& minSSD_buf, cudaStream_t& stream)
extern "C" void stereoBM_GPU(const DevMem2Db& left, const DevMem2Db& right, const DevMem2Db& disp, int maxdisp, int winsz, const DevMem2D_<unsigned int>& minSSD_buf, cudaStream_t& stream)
{
int winsz2 = winsz >> 1;
......@@ -375,7 +375,7 @@ extern "C" void stereoBM_GPU(const DevMem2D& left, const DevMem2D& right, const
texture<unsigned char, 2, cudaReadModeElementType> texForSobel;
extern "C" __global__ void prefilter_kernel(DevMem2D output, int prefilterCap)
extern "C" __global__ void prefilter_kernel(DevMem2Db output, int prefilterCap)
{
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
......@@ -392,7 +392,7 @@ extern "C" __global__ void prefilter_kernel(DevMem2D output, int prefilterCap)
}
}
extern "C" void prefilter_xsobel(const DevMem2D& input, const DevMem2D& output, int prefilterCap, cudaStream_t & stream)
extern "C" void prefilter_xsobel(const DevMem2Db& input, const DevMem2Db& output, int prefilterCap, cudaStream_t & stream)
{
cudaChannelFormatDesc desc = cudaCreateChannelDesc<unsigned char>();
cudaSafeCall( cudaBindTexture2D( 0, texForSobel, input.data, desc, input.cols, input.rows, input.step ) );
......@@ -451,7 +451,7 @@ __device__ float CalcSums(float *cols, float *cols_cache, int winsz)
#define RpT (2 * ROWSperTHREAD) // got experimentally
extern "C" __global__ void textureness_kernel(DevMem2D disp, int winsz, float threshold)
extern "C" __global__ void textureness_kernel(DevMem2Db disp, int winsz, float threshold)
{
int winsz2 = winsz/2;
int n_dirty_pixels = (winsz2) * 2;
......@@ -510,7 +510,7 @@ extern "C" __global__ void textureness_kernel(DevMem2D disp, int winsz, float th
}
}
extern "C" void postfilter_textureness(const DevMem2D& input, int winsz, float avgTexturenessThreshold, const DevMem2D& disp, cudaStream_t & stream)
extern "C" void postfilter_textureness(const DevMem2Db& input, int winsz, float avgTexturenessThreshold, const DevMem2Db& disp, cudaStream_t & stream)
{
avgTexturenessThreshold *= winsz * winsz;
......
......@@ -129,7 +129,7 @@ namespace cv { namespace gpu { namespace bp
};
template <int cn, typename D>
__global__ void comp_data(const DevMem2D left, const PtrStep right, PtrElemStep_<D> data)
__global__ void comp_data(const DevMem2Db left, const PtrStepb right, PtrElemStep_<D> data)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
......@@ -160,9 +160,9 @@ namespace cv { namespace gpu { namespace bp
}
template<typename T, typename D>
void comp_data_gpu(const DevMem2D& left, const DevMem2D& right, const DevMem2D& data, cudaStream_t stream);
void comp_data_gpu(const DevMem2Db& left, const DevMem2Db& right, const DevMem2Db& data, cudaStream_t stream);
template <> void comp_data_gpu<uchar, short>(const DevMem2D& left, const DevMem2D& right, const DevMem2D& data, cudaStream_t stream)
template <> void comp_data_gpu<uchar, short>(const DevMem2Db& left, const DevMem2Db& right, const DevMem2Db& data, cudaStream_t stream)
{
dim3 threads(32, 8, 1);
dim3 grid(1, 1, 1);
......@@ -176,7 +176,7 @@ namespace cv { namespace gpu { namespace bp
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template <> void comp_data_gpu<uchar, float>(const DevMem2D& left, const DevMem2D& right, const DevMem2D& data, cudaStream_t stream)
template <> void comp_data_gpu<uchar, float>(const DevMem2Db& left, const DevMem2Db& right, const DevMem2Db& data, cudaStream_t stream)
{
dim3 threads(32, 8, 1);
dim3 grid(1, 1, 1);
......@@ -191,7 +191,7 @@ namespace cv { namespace gpu { namespace bp
cudaSafeCall( cudaDeviceSynchronize() );
}
template <> void comp_data_gpu<uchar3, short>(const DevMem2D& left, const DevMem2D& right, const DevMem2D& data, cudaStream_t stream)
template <> void comp_data_gpu<uchar3, short>(const DevMem2Db& left, const DevMem2Db& right, const DevMem2Db& data, cudaStream_t stream)
{
dim3 threads(32, 8, 1);
dim3 grid(1, 1, 1);
......@@ -205,7 +205,7 @@ namespace cv { namespace gpu { namespace bp
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template <> void comp_data_gpu<uchar3, float>(const DevMem2D& left, const DevMem2D& right, const DevMem2D& data, cudaStream_t stream)
template <> void comp_data_gpu<uchar3, float>(const DevMem2Db& left, const DevMem2Db& right, const DevMem2Db& data, cudaStream_t stream)
{
dim3 threads(32, 8, 1);
dim3 grid(1, 1, 1);
......@@ -220,7 +220,7 @@ namespace cv { namespace gpu { namespace bp
cudaSafeCall( cudaDeviceSynchronize() );
}
template <> void comp_data_gpu<uchar4, short>(const DevMem2D& left, const DevMem2D& right, const DevMem2D& data, cudaStream_t stream)
template <> void comp_data_gpu<uchar4, short>(const DevMem2Db& left, const DevMem2Db& right, const DevMem2Db& data, cudaStream_t stream)
{
dim3 threads(32, 8, 1);
dim3 grid(1, 1, 1);
......@@ -234,7 +234,7 @@ namespace cv { namespace gpu { namespace bp
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template <> void comp_data_gpu<uchar4, float>(const DevMem2D& left, const DevMem2D& right, const DevMem2D& data, cudaStream_t stream)
template <> void comp_data_gpu<uchar4, float>(const DevMem2Db& left, const DevMem2Db& right, const DevMem2Db& data, cudaStream_t stream)
{
dim3 threads(32, 8, 1);
dim3 grid(1, 1, 1);
......@@ -254,7 +254,7 @@ namespace cv { namespace gpu { namespace bp
///////////////////////////////////////////////////////////////
template <typename T>
__global__ void data_step_down(int dst_cols, int dst_rows, int src_rows, const PtrStep_<T> src, PtrStep_<T> dst)
__global__ void data_step_down(int dst_cols, int dst_rows, int src_rows, const PtrStep<T> src, PtrStep<T> dst)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
......@@ -274,7 +274,7 @@ namespace cv { namespace gpu { namespace bp
}
template<typename T>
void data_step_down_gpu(int dst_cols, int dst_rows, int src_rows, const DevMem2D& src, const DevMem2D& dst, cudaStream_t stream)
void data_step_down_gpu(int dst_cols, int dst_rows, int src_rows, const DevMem2Db& src, const DevMem2Db& dst, cudaStream_t stream)
{
dim3 threads(32, 8, 1);
dim3 grid(1, 1, 1);
......@@ -289,8 +289,8 @@ namespace cv { namespace gpu { namespace bp
cudaSafeCall( cudaDeviceSynchronize() );
}
template void data_step_down_gpu<short>(int dst_cols, int dst_rows, int src_rows, const DevMem2D& src, const DevMem2D& dst, cudaStream_t stream);
template void data_step_down_gpu<float>(int dst_cols, int dst_rows, int src_rows, const DevMem2D& src, const DevMem2D& dst, cudaStream_t stream);
template void data_step_down_gpu<short>(int dst_cols, int dst_rows, int src_rows, const DevMem2Db& src, const DevMem2Db& dst, cudaStream_t stream);
template void data_step_down_gpu<float>(int dst_cols, int dst_rows, int src_rows, const DevMem2Db& src, const DevMem2Db& dst, cudaStream_t stream);
///////////////////////////////////////////////////////////////
/////////////////// level up messages ////////////////////////
......@@ -316,7 +316,7 @@ namespace cv { namespace gpu { namespace bp
}
template <typename T>
void level_up_messages_gpu(int dst_idx, int dst_cols, int dst_rows, int src_rows, DevMem2D* mus, DevMem2D* mds, DevMem2D* mls, DevMem2D* mrs, cudaStream_t stream)
void level_up_messages_gpu(int dst_idx, int dst_cols, int dst_rows, int src_rows, DevMem2Db* mus, DevMem2Db* mds, DevMem2Db* mls, DevMem2Db* mrs, cudaStream_t stream)
{
dim3 threads(32, 8, 1);
dim3 grid(1, 1, 1);
......@@ -339,8 +339,8 @@ namespace cv { namespace gpu { namespace bp
cudaSafeCall( cudaDeviceSynchronize() );
}
template void level_up_messages_gpu<short>(int dst_idx, int dst_cols, int dst_rows, int src_rows, DevMem2D* mus, DevMem2D* mds, DevMem2D* mls, DevMem2D* mrs, cudaStream_t stream);
template void level_up_messages_gpu<float>(int dst_idx, int dst_cols, int dst_rows, int src_rows, DevMem2D* mus, DevMem2D* mds, DevMem2D* mls, DevMem2D* mrs, cudaStream_t stream);
template void level_up_messages_gpu<short>(int dst_idx, int dst_cols, int dst_rows, int src_rows, DevMem2Db* mus, DevMem2Db* mds, DevMem2Db* mls, DevMem2Db* mrs, cudaStream_t stream);
template void level_up_messages_gpu<float>(int dst_idx, int dst_cols, int dst_rows, int src_rows, DevMem2Db* mus, DevMem2Db* mds, DevMem2Db* mls, DevMem2Db* mrs, cudaStream_t stream);
///////////////////////////////////////////////////////////////
//////////////////// calc all iterations /////////////////////
......@@ -441,8 +441,8 @@ namespace cv { namespace gpu { namespace bp
}
template <typename T>
void calc_all_iterations_gpu(int cols, int rows, int iters, const DevMem2D& u, const DevMem2D& d,
const DevMem2D& l, const DevMem2D& r, const DevMem2D& data, cudaStream_t stream)
void calc_all_iterations_gpu(int cols, int rows, int iters, const DevMem2Db& u, const DevMem2Db& d,
const DevMem2Db& l, const DevMem2Db& r, const DevMem2Db& data, cudaStream_t stream)
{
dim3 threads(32, 8, 1);
dim3 grid(1, 1, 1);
......@@ -460,8 +460,8 @@ namespace cv { namespace gpu { namespace bp
}
}
template void calc_all_iterations_gpu<short>(int cols, int rows, int iters, const DevMem2D& u, const DevMem2D& d, const DevMem2D& l, const DevMem2D& r, const DevMem2D& data, cudaStream_t stream);
template void calc_all_iterations_gpu<float>(int cols, int rows, int iters, const DevMem2D& u, const DevMem2D& d, const DevMem2D& l, const DevMem2D& r, const DevMem2D& data, cudaStream_t stream);
template void calc_all_iterations_gpu<short>(int cols, int rows, int iters, const DevMem2Db& u, const DevMem2Db& d, const DevMem2Db& l, const DevMem2Db& r, const DevMem2Db& data, cudaStream_t stream);
template void calc_all_iterations_gpu<float>(int cols, int rows, int iters, const DevMem2Db& u, const DevMem2Db& d, const DevMem2Db& l, const DevMem2Db& r, const DevMem2Db& data, cudaStream_t stream);
///////////////////////////////////////////////////////////////
/////////////////////////// output ////////////////////////////
......@@ -506,7 +506,7 @@ namespace cv { namespace gpu { namespace bp
}
template <typename T>
void output_gpu(const DevMem2D& u, const DevMem2D& d, const DevMem2D& l, const DevMem2D& r, const DevMem2D& data,
void output_gpu(const DevMem2Db& u, const DevMem2Db& d, const DevMem2Db& l, const DevMem2Db& r, const DevMem2Db& data,
const DevMem2D_<short>& disp, cudaStream_t stream)
{
dim3 threads(32, 8, 1);
......@@ -522,6 +522,6 @@ namespace cv { namespace gpu { namespace bp
cudaSafeCall( cudaDeviceSynchronize() );
}
template void output_gpu<short>(const DevMem2D& u, const DevMem2D& d, const DevMem2D& l, const DevMem2D& r, const DevMem2D& data, const DevMem2D_<short>& disp, cudaStream_t stream);
template void output_gpu<float>(const DevMem2D& u, const DevMem2D& d, const DevMem2D& l, const DevMem2D& r, const DevMem2D& data, const DevMem2D_<short>& disp, cudaStream_t stream);
template void output_gpu<short>(const DevMem2Db& u, const DevMem2Db& d, const DevMem2Db& l, const DevMem2Db& r, const DevMem2Db& data, const DevMem2D_<short>& disp, cudaStream_t stream);
template void output_gpu<float>(const DevMem2Db& u, const DevMem2Db& d, const DevMem2Db& l, const DevMem2Db& r, const DevMem2Db& data, const DevMem2D_<short>& disp, cudaStream_t stream);
}}}
......@@ -76,7 +76,7 @@ namespace cv { namespace gpu { namespace csbp
void load_constants(int ndisp, float max_data_term, float data_weight, float max_disc_term, float disc_single_jump, int min_disp_th,
const DevMem2D& left, const DevMem2D& right, const DevMem2D& temp)
const DevMem2Db& left, const DevMem2Db& right, const DevMem2Db& temp)
{
cudaSafeCall( cudaMemcpyToSymbol(cndisp, &ndisp, sizeof(int)) );
......
......@@ -72,14 +72,14 @@ cv::gpu::Stream::operator bool() const { throw_nogpu(); return false; }
#include "opencv2/gpu/stream_accessor.hpp"
namespace cv { namespace gpu { namespace device {
void copy_to_with_mask(const DevMem2D& src, DevMem2D dst, int depth, const DevMem2D& mask, int channels, const cudaStream_t & stream = 0);
void copy_to_with_mask(const DevMem2Db& src, DevMem2Db dst, int depth, const DevMem2Db& mask, int channels, const cudaStream_t & stream = 0);
template <typename T>
void set_to_gpu(const DevMem2D& mat, const T* scalar, int channels, cudaStream_t stream);
void set_to_gpu(const DevMem2Db& mat, const T* scalar, int channels, cudaStream_t stream);
template <typename T>
void set_to_gpu(const DevMem2D& mat, const T* scalar, const DevMem2D& mask, int channels, cudaStream_t stream);
void set_to_gpu(const DevMem2Db& mat, const T* scalar, const DevMem2Db& mask, int channels, cudaStream_t stream);
void convert_gpu(const DevMem2D& src, int sdepth, const DevMem2D& dst, int ddepth, double alpha, double beta, cudaStream_t stream = 0);
void convert_gpu(const DevMem2Db& src, int sdepth, const DevMem2Db& dst, int ddepth, double alpha, double beta, cudaStream_t stream = 0);
}}}
struct Stream::Impl
......
This diff is collapsed.
......@@ -738,10 +738,10 @@ void cv::gpu::filter2D(const GpuMat& src, GpuMat& dst, int ddepth, const Mat& ke
namespace cv { namespace gpu { namespace filters
{
template <typename T, typename D>
void linearRowFilter_gpu(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor, int brd_type, cudaStream_t stream);
void linearRowFilter_gpu(const DevMem2Db& src, const DevMem2Db& dst, const float kernel[], int ksize, int anchor, int brd_type, cudaStream_t stream);
template <typename T, typename D>
void linearColumnFilter_gpu(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor, int brd_type, cudaStream_t stream);
void linearColumnFilter_gpu(const DevMem2Db& src, const DevMem2Db& dst, const float kernel[], int ksize, int anchor, int brd_type, cudaStream_t stream);
}}}
namespace
......@@ -749,7 +749,7 @@ namespace
typedef NppStatus (*nppFilter1D_t)(const Npp8u * pSrc, Npp32s nSrcStep, Npp8u * pDst, Npp32s nDstStep, NppiSize oROI,
const Npp32s * pKernel, Npp32s nMaskSize, Npp32s nAnchor, Npp32s nDivisor);
typedef void (*gpuFilter1D_t)(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor, int brd_type, cudaStream_t stream);
typedef void (*gpuFilter1D_t)(const DevMem2Db& src, const DevMem2Db& dst, const float kernel[], int ksize, int anchor, int brd_type, cudaStream_t stream);
struct NppLinearRowFilter : public BaseRowFilter_GPU
{
......
......@@ -395,14 +395,14 @@ void cv::gpu::ensureSizeIsEnough(int, int, int, GpuMat&) { throw_nogpu(); }
namespace cv { namespace gpu { namespace device
{
void copy_to_with_mask(const DevMem2D& src, DevMem2D dst, int depth, const DevMem2D& mask, int channels, const cudaStream_t & stream = 0);
void copy_to_with_mask(const DevMem2Db& src, DevMem2Db dst, int depth, const DevMem2Db& mask, int channels, const cudaStream_t & stream = 0);
template <typename T>
void set_to_gpu(const DevMem2D& mat, const T* scalar, int channels, cudaStream_t stream);
void set_to_gpu(const DevMem2Db& mat, const T* scalar, int channels, cudaStream_t stream);
template <typename T>
void set_to_gpu(const DevMem2D& mat, const T* scalar, const DevMem2D& mask, int channels, cudaStream_t stream);
void set_to_gpu(const DevMem2Db& mat, const T* scalar, const DevMem2Db& mask, int channels, cudaStream_t stream);
void convert_gpu(const DevMem2D& src, int sdepth, const DevMem2D& dst, int ddepth, double alpha, double beta, cudaStream_t stream = 0);
void convert_gpu(const DevMem2Db& src, int sdepth, const DevMem2Db& dst, int ddepth, double alpha, double beta, cudaStream_t stream = 0);
}}}
......
......@@ -67,7 +67,7 @@ void set_up_constants(int nbins, int block_stride_x, int block_stride_y,
void compute_hists(int nbins, int block_stride_x, int blovck_stride_y,
int height, int width, const cv::gpu::DevMem2Df& grad,
const cv::gpu::DevMem2D& qangle, float sigma, float* block_hists);
const cv::gpu::DevMem2Db& qangle, float sigma, float* block_hists);
void normalize_hists(int nbins, int block_stride_x, int block_stride_y,
int height, int width, float* block_hists, float threshold);
......@@ -84,13 +84,13 @@ void extract_descrs_by_cols(int win_height, int win_width, int block_stride_y, i
int win_stride_y, int win_stride_x, int height, int width, float* block_hists,
cv::gpu::DevMem2Df descriptors);
void compute_gradients_8UC1(int nbins, int height, int width, const cv::gpu::DevMem2D& img,
float angle_scale, cv::gpu::DevMem2Df grad, cv::gpu::DevMem2D qangle, bool correct_gamma);
void compute_gradients_8UC4(int nbins, int height, int width, const cv::gpu::DevMem2D& img,
float angle_scale, cv::gpu::DevMem2Df grad, cv::gpu::DevMem2D qangle, bool correct_gamma);
void compute_gradients_8UC1(int nbins, int height, int width, const cv::gpu::DevMem2Db& img,
float angle_scale, cv::gpu::DevMem2Df grad, cv::gpu::DevMem2Db qangle, bool correct_gamma);
void compute_gradients_8UC4(int nbins, int height, int width, const cv::gpu::DevMem2Db& img,
float angle_scale, cv::gpu::DevMem2Df grad, cv::gpu::DevMem2Db qangle, bool correct_gamma);
void resize_8UC1(const cv::gpu::DevMem2D& src, cv::gpu::DevMem2D dst);
void resize_8UC4(const cv::gpu::DevMem2D& src, cv::gpu::DevMem2D dst);
void resize_8UC1(const cv::gpu::DevMem2Db& src, cv::gpu::DevMem2Db dst);
void resize_8UC4(const cv::gpu::DevMem2Db& src, cv::gpu::DevMem2Db dst);
}}}
......
This diff is collapsed.
......@@ -54,16 +54,16 @@ void cv::gpu::matchTemplate(const GpuMat&, const GpuMat&, GpuMat&, int) { throw_
namespace cv { namespace gpu { namespace imgproc
{
void matchTemplateNaive_CCORR_8U(
const DevMem2D image, const DevMem2D templ, DevMem2Df result, int cn);
const DevMem2Db image, const DevMem2Db templ, DevMem2Df result, int cn);
void matchTemplateNaive_CCORR_32F(
const DevMem2D image, const DevMem2D templ, DevMem2Df result, int cn);
const DevMem2Db image, const DevMem2Db templ, DevMem2Df result, int cn);
void matchTemplateNaive_SQDIFF_8U(
const DevMem2D image, const DevMem2D templ, DevMem2Df result, int cn);
const DevMem2Db image, const DevMem2Db templ, DevMem2Df result, int cn);
void matchTemplateNaive_SQDIFF_32F(
const DevMem2D image, const DevMem2D templ, DevMem2Df result, int cn);
const DevMem2Db image, const DevMem2Db templ, DevMem2Df result, int cn);
void matchTemplatePrepared_SQDIFF_8U(
int w, int h, const DevMem2D_<unsigned long long> image_sqsum,
......@@ -145,7 +145,7 @@ namespace cv { namespace gpu { namespace imgproc
void normalize_8U(int w, int h, const DevMem2D_<unsigned long long> image_sqsum,
unsigned int templ_sqsum, DevMem2Df result, int cn);
void extractFirstChannel_32F(const DevMem2D image, DevMem2Df result, int cn);
void extractFirstChannel_32F(const DevMem2Db image, DevMem2Df result, int cn);
}}}
......
......@@ -193,22 +193,22 @@ double cv::gpu::norm(const GpuMat& src1, const GpuMat& src2, int normType)
namespace cv { namespace gpu { namespace mathfunc
{
template <typename T>
void sumCaller(const DevMem2D src, PtrStep buf, double* sum, int cn);
void sumCaller(const DevMem2Db src, PtrStepb buf, double* sum, int cn);
template <typename T>
void sumMultipassCaller(const DevMem2D src, PtrStep buf, double* sum, int cn);
void sumMultipassCaller(const DevMem2Db src, PtrStepb buf, double* sum, int cn);
template <typename T>
void absSumCaller(const DevMem2D src, PtrStep buf, double* sum, int cn);
void absSumCaller(const DevMem2Db src, PtrStepb buf, double* sum, int cn);
template <typename T>
void absSumMultipassCaller(const DevMem2D src, PtrStep buf, double* sum, int cn);
void absSumMultipassCaller(const DevMem2Db src, PtrStepb buf, double* sum, int cn);
template <typename T>
void sqrSumCaller(const DevMem2D src, PtrStep buf, double* sum, int cn);
void sqrSumCaller(const DevMem2Db src, PtrStepb buf, double* sum, int cn);
template <typename T>
void sqrSumMultipassCaller(const DevMem2D src, PtrStep buf, double* sum, int cn);
void sqrSumMultipassCaller(const DevMem2Db src, PtrStepb buf, double* sum, int cn);
namespace sums
{
......@@ -228,7 +228,7 @@ Scalar cv::gpu::sum(const GpuMat& src, GpuMat& buf)
{
using namespace mathfunc;
typedef void (*Caller)(const DevMem2D, PtrStep, double*, int);
typedef void (*Caller)(const DevMem2Db, PtrStepb, double*, int);
static Caller multipass_callers[7] = {
sumMultipassCaller<unsigned char>, sumMultipassCaller<char>,
......@@ -269,7 +269,7 @@ Scalar cv::gpu::absSum(const GpuMat& src, GpuMat& buf)
{
using namespace mathfunc;
typedef void (*Caller)(const DevMem2D, PtrStep, double*, int);
typedef void (*Caller)(const DevMem2Db, PtrStepb, double*, int);
static Caller multipass_callers[7] = {
absSumMultipassCaller<unsigned char>, absSumMultipassCaller<char>,
......@@ -310,7 +310,7 @@ Scalar cv::gpu::sqrSum(const GpuMat& src, GpuMat& buf)
{
using namespace mathfunc;
typedef void (*Caller)(const DevMem2D, PtrStep, double*, int);
typedef void (*Caller)(const DevMem2Db, PtrStepb, double*, int);
static Caller multipass_callers[7] = {
sqrSumMultipassCaller<unsigned char>, sqrSumMultipassCaller<char>,
......@@ -350,16 +350,16 @@ namespace cv { namespace gpu { namespace mathfunc { namespace minmax {
void getBufSizeRequired(int cols, int rows, int elem_size, int& bufcols, int& bufrows);
template <typename T>
void minMaxCaller(const DevMem2D src, double* minval, double* maxval, PtrStep buf);
void minMaxCaller(const DevMem2Db src, double* minval, double* maxval, PtrStepb buf);
template <typename T>
void minMaxMaskCaller(const DevMem2D src, const PtrStep mask, double* minval, double* maxval, PtrStep buf);
void minMaxMaskCaller(const DevMem2Db src, const PtrStepb mask, double* minval, double* maxval, PtrStepb buf);
template <typename T>
void minMaxMultipassCaller(const DevMem2D src, double* minval, double* maxval, PtrStep buf);
void minMaxMultipassCaller(const DevMem2Db src, double* minval, double* maxval, PtrStepb buf);
template <typename T>
void minMaxMaskMultipassCaller(const DevMem2D src, const PtrStep mask, double* minval, double* maxval, PtrStep buf);
void minMaxMaskMultipassCaller(const DevMem2Db src, const PtrStepb mask, double* minval, double* maxval, PtrStepb buf);
}}}}
......@@ -375,8 +375,8 @@ void cv::gpu::minMax(const GpuMat& src, double* minVal, double* maxVal, const Gp
{
using namespace mathfunc::minmax;
typedef void (*Caller)(const DevMem2D, double*, double*, PtrStep);
typedef void (*MaskedCaller)(const DevMem2D, const PtrStep, double*, double*, PtrStep);
typedef void (*Caller)(const DevMem2Db, double*, double*, PtrStepb);
typedef void (*MaskedCaller)(const DevMem2Db, const PtrStepb, double*, double*, PtrStepb);
static Caller multipass_callers[7] = {
minMaxMultipassCaller<unsigned char>, minMaxMultipassCaller<char>,
......@@ -445,20 +445,20 @@ namespace cv { namespace gpu { namespace mathfunc { namespace minmaxloc {
int& b1rows, int& b2cols, int& b2rows);
template <typename T>
void minMaxLocCaller(const DevMem2D src, double* minval, double* maxval,
int minloc[2], int maxloc[2], PtrStep valBuf, PtrStep locBuf);
void minMaxLocCaller(const DevMem2Db src, double* minval, double* maxval,
int minloc[2], int maxloc[2], PtrStepb valBuf, PtrStepb locBuf);
template <typename T>
void minMaxLocMaskCaller(const DevMem2D src, const PtrStep mask, double* minval, double* maxval,
int minloc[2], int maxloc[2], PtrStep valBuf, PtrStep locBuf);
void minMaxLocMaskCaller(const DevMem2Db src, const PtrStepb mask, double* minval, double* maxval,
int minloc[2], int maxloc[2], PtrStepb valBuf, PtrStepb locBuf);
template <typename T>
void minMaxLocMultipassCaller(const DevMem2D src, double* minval, double* maxval,
int minloc[2], int maxloc[2], PtrStep valBuf, PtrStep locBuf);
void minMaxLocMultipassCaller(const DevMem2Db src, double* minval, double* maxval,
int minloc[2], int maxloc[2], PtrStepb valBuf, PtrStepb locBuf);
template <typename T>
void minMaxLocMaskMultipassCaller(const DevMem2D src, const PtrStep mask, double* minval, double* maxval,
int minloc[2], int maxloc[2], PtrStep valBuf, PtrStep locBuf);
void minMaxLocMaskMultipassCaller(const DevMem2Db src, const PtrStepb mask, double* minval, double* maxval,
int minloc[2], int maxloc[2], PtrStepb valBuf, PtrStepb locBuf);
}}}}
......@@ -474,8 +474,8 @@ void cv::gpu::minMaxLoc(const GpuMat& src, double* minVal, double* maxVal, Point
{
using namespace mathfunc::minmaxloc;
typedef void (*Caller)(const DevMem2D, double*, double*, int[2], int[2], PtrStep, PtrStep);
typedef void (*MaskedCaller)(const DevMem2D, const PtrStep, double*, double*, int[2], int[2], PtrStep, PtrStep);
typedef void (*Caller)(const DevMem2Db, double*, double*, int[2], int[2], PtrStepb, PtrStepb);
typedef void (*MaskedCaller)(const DevMem2Db, const PtrStepb, double*, double*, int[2], int[2], PtrStepb, PtrStepb);
static Caller multipass_callers[7] = {
minMaxLocMultipassCaller<unsigned char>, minMaxLocMultipassCaller<char>,
......@@ -549,10 +549,10 @@ namespace cv { namespace gpu { namespace mathfunc { namespace countnonzero {
void getBufSizeRequired(int cols, int rows, int& bufcols, int& bufrows);
template <typename T>
int countNonZeroCaller(const DevMem2D src, PtrStep buf);
int countNonZeroCaller(const DevMem2Db src, PtrStepb buf);
template <typename T>
int countNonZeroMultipassCaller(const DevMem2D src, PtrStep buf);
int countNonZeroMultipassCaller(const DevMem2Db src, PtrStepb buf);
}}}}
......@@ -568,7 +568,7 @@ int cv::gpu::countNonZero(const GpuMat& src, GpuMat& buf)
{
using namespace mathfunc::countnonzero;
typedef int (*Caller)(const DevMem2D src, PtrStep buf);
typedef int (*Caller)(const DevMem2Db src, PtrStepb buf);
static Caller multipass_callers[7] = {
countNonZeroMultipassCaller<unsigned char>, countNonZeroMultipassCaller<char>,
......@@ -603,8 +603,8 @@ int cv::gpu::countNonZero(const GpuMat& src, GpuMat& buf)
// reduce
namespace cv { namespace gpu { namespace mathfunc {
template <typename T, typename S, typename D> void reduceRows_gpu(const DevMem2D& src, const DevMem2D& dst, int reduceOp, cudaStream_t stream);
template <typename T, typename S, typename D> void reduceCols_gpu(const DevMem2D& src, int cn, const DevMem2D& dst, int reduceOp, cudaStream_t stream);
template <typename T, typename S, typename D> void reduceRows_gpu(const DevMem2Db& src, const DevMem2Db& dst, int reduceOp, cudaStream_t stream);
template <typename T, typename S, typename D> void reduceCols_gpu(const DevMem2Db& src, int cn, const DevMem2Db& dst, int reduceOp, cudaStream_t stream);
}}}
void cv::gpu::reduce(const GpuMat& src, GpuMat& dst, int dim, int reduceOp, int dtype, Stream& stream)
......@@ -621,7 +621,7 @@ void cv::gpu::reduce(const GpuMat& src, GpuMat& dst, int dim, int reduceOp, int
if (dim == 0)
{
typedef void (*caller_t)(const DevMem2D& src, const DevMem2D& dst, int reduceOp, cudaStream_t stream);
typedef void (*caller_t)(const DevMem2Db& src, const DevMem2Db& dst, int reduceOp, cudaStream_t stream);
static const caller_t callers[6][6] =
{
......@@ -683,7 +683,7 @@ void cv::gpu::reduce(const GpuMat& src, GpuMat& dst, int dim, int reduceOp, int
}
else
{
typedef void (*caller_t)(const DevMem2D& src, int cn, const DevMem2D& dst, int reduceOp, cudaStream_t stream);
typedef void (*caller_t)(const DevMem2Db& src, int cn, const DevMem2Db& dst, int reduceOp, cudaStream_t stream);
static const caller_t callers[6][6] =
{
......
......@@ -55,11 +55,11 @@ namespace cv { namespace gpu { namespace device
struct MaskReader
{
explicit MaskReader(const PtrStep& mask_): mask(mask_) {}
explicit MaskReader(const PtrStepb& mask_): mask(mask_) {}
__device__ __forceinline__ bool operator()(int y, int x) const { return mask.ptr(y)[x]; }
const PtrStep mask;
const PtrStepb mask;
};
struct NoMask
......@@ -219,7 +219,7 @@ namespace cv { namespace gpu { namespace device
};
template <typename T, typename D, typename UnOp, typename Mask>
__global__ static void transformSmart(const DevMem2D_<T> src_, PtrStep_<D> dst_, const Mask mask, const UnOp op)
__global__ static void transformSmart(const DevMem2D_<T> src_, PtrStep<D> dst_, const Mask mask, const UnOp op)
{
typedef TransformFunctorTraits<UnOp> ft;
typedef typename UnaryReadWriteTraits<T, D, ft::smart_shift>::read_type read_type;
......@@ -255,7 +255,7 @@ namespace cv { namespace gpu { namespace device
}
template <typename T, typename D, typename UnOp, typename Mask>
static __global__ void transformSimple(const DevMem2D_<T> src, PtrStep_<D> dst, const Mask mask, const UnOp op)
static __global__ void transformSimple(const DevMem2D_<T> src, PtrStep<D> dst, const Mask mask, const UnOp op)
{
const int x = blockDim.x * blockIdx.x + threadIdx.x;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
......@@ -267,7 +267,7 @@ namespace cv { namespace gpu { namespace device
}
template <typename T1, typename T2, typename D, typename BinOp, typename Mask>
__global__ static void transformSmart(const DevMem2D_<T1> src1_, const PtrStep_<T2> src2_, PtrStep_<D> dst_,
__global__ static void transformSmart(const DevMem2D_<T1> src1_, const PtrStep<T2> src2_, PtrStep<D> dst_,
const Mask mask, const BinOp op)
{
typedef TransformFunctorTraits<BinOp> ft;
......@@ -307,7 +307,7 @@ namespace cv { namespace gpu { namespace device
}
template <typename T1, typename T2, typename D, typename BinOp, typename Mask>
static __global__ void transformSimple(const DevMem2D_<T1> src1, const PtrStep_<T2> src2, PtrStep_<D> dst,
static __global__ void transformSimple(const DevMem2D_<T1> src1, const PtrStep<T2> src2, PtrStep<D> dst,
const Mask mask, const BinOp op)
{
const int x = blockDim.x * blockIdx.x + threadIdx.x;
......
......@@ -54,7 +54,7 @@ namespace cv { namespace gpu { namespace device
detail::transform_caller(src, dst, op, WithOutMask(), stream);
}
template <typename T, typename D, typename UnOp>
void transform(const DevMem2D_<T>& src, const DevMem2D_<D>& dst, const PtrStep& mask, const UnOp& op, cudaStream_t stream = 0)
void transform(const DevMem2D_<T>& src, const DevMem2D_<D>& dst, const PtrStepb& mask, const UnOp& op, cudaStream_t stream = 0)
{
detail::transform_caller(src, dst, op, SingleMask(mask), stream);
}
......@@ -65,7 +65,7 @@ namespace cv { namespace gpu { namespace device
detail::transform_caller(src1, src2, dst, op, WithOutMask(), stream);
}
template <typename T1, typename T2, typename D, typename BinOp>
void transform(const DevMem2D_<T1>& src1, const DevMem2D_<T2>& src2, const DevMem2D_<D>& dst, const PtrStep& mask, const BinOp& op, cudaStream_t stream = 0)
void transform(const DevMem2D_<T1>& src1, const DevMem2D_<T2>& src2, const DevMem2D_<D>& dst, const PtrStepb& mask, const BinOp& op, cudaStream_t stream = 0)
{
detail::transform_caller(src1, src2, dst, op, SingleMask(mask), stream);
}
......
......@@ -70,19 +70,19 @@ namespace cv { namespace gpu { namespace device
struct SingleMask
{
explicit __host__ __device__ __forceinline__ SingleMask(const PtrStep& mask_) : mask(mask_) {}
explicit __host__ __device__ __forceinline__ SingleMask(const PtrStepb& mask_) : mask(mask_) {}
__device__ __forceinline__ bool operator()(int y, int x) const
{
return mask.ptr(y)[x] != 0;
}
const PtrStep mask;
const PtrStepb mask;
};
struct MaskCollection
{
explicit __host__ __device__ __forceinline__ MaskCollection(PtrStep* maskCollection_) : maskCollection(maskCollection_) {}
explicit __host__ __device__ __forceinline__ MaskCollection(PtrStepb* maskCollection_) : maskCollection(maskCollection_) {}
__device__ __forceinline__ void next()
{
......@@ -99,8 +99,8 @@ namespace cv { namespace gpu { namespace device
return curMask.data == 0 || (ForceGlob<uchar>::Load(curMask.ptr(y), x, val), (val != 0));
}
const PtrStep* maskCollection;
PtrStep curMask;
const PtrStepb* maskCollection;
PtrStepb curMask;
};
struct WithOutMask
......
......@@ -55,11 +55,11 @@ void cv::gpu::split(const GpuMat& /*src*/, vector<GpuMat>& /*dst*/, Stream& /*st
namespace cv { namespace gpu { namespace split_merge
{
extern "C" void merge_caller(const DevMem2D* src, DevMem2D& dst,
extern "C" void merge_caller(const DevMem2Db* src, DevMem2Db& dst,
int total_channels, size_t elem_size,
const cudaStream_t& stream);
extern "C" void split_caller(const DevMem2D& src, DevMem2D* dst,
extern "C" void split_caller(const DevMem2Db& src, DevMem2Db* dst,
int num_channels, size_t elem_size1,
const cudaStream_t& stream);
......@@ -95,11 +95,11 @@ namespace cv { namespace gpu { namespace split_merge
{
dst.create(size, CV_MAKETYPE(depth, total_channels));
DevMem2D src_as_devmem[4];
DevMem2Db src_as_devmem[4];
for(size_t i = 0; i < n; ++i)
src_as_devmem[i] = src[i];
DevMem2D dst_as_devmem(dst);
DevMem2Db dst_as_devmem(dst);
split_merge::merge_caller(src_as_devmem, dst_as_devmem,
total_channels, CV_ELEM_SIZE(depth),
stream);
......@@ -130,11 +130,11 @@ namespace cv { namespace gpu { namespace split_merge
CV_Assert(num_channels <= 4);
DevMem2D dst_as_devmem[4];
DevMem2Db dst_as_devmem[4];
for (int i = 0; i < num_channels; ++i)
dst_as_devmem[i] = dst[i];
DevMem2D src_as_devmem(src);
DevMem2Db src_as_devmem(src);
split_merge::split_caller(src_as_devmem, dst_as_devmem,
num_channels, src.elemSize1(),
stream);
......
......@@ -59,10 +59,10 @@ namespace cv { namespace gpu
{
namespace bm
{
//extern "C" void stereoBM_GPU(const DevMem2D& left, const DevMem2D& right, const DevMem2D& disp, int ndisp, int winsz, const DevMem2D_<uint>& minSSD_buf);
extern "C" void stereoBM_GPU(const DevMem2D& left, const DevMem2D& right, const DevMem2D& disp, int ndisp, int winsz, const DevMem2D_<uint>& minSSD_buf, cudaStream_t & stream);
extern "C" void prefilter_xsobel(const DevMem2D& input, const DevMem2D& output, int prefilterCap /*= 31*/, cudaStream_t & stream);
extern "C" void postfilter_textureness(const DevMem2D& input, int winsz, float avgTexturenessThreshold, const DevMem2D& disp, cudaStream_t & stream);
//extern "C" void stereoBM_GPU(const DevMem2Db& left, const DevMem2Db& right, const DevMem2Db& disp, int ndisp, int winsz, const DevMem2D_<uint>& minSSD_buf);
extern "C" void stereoBM_GPU(const DevMem2Db& left, const DevMem2Db& right, const DevMem2Db& disp, int ndisp, int winsz, const DevMem2D_<uint>& minSSD_buf, cudaStream_t & stream);
extern "C" void prefilter_xsobel(const DevMem2Db& input, const DevMem2Db& output, int prefilterCap /*= 31*/, cudaStream_t & stream);
extern "C" void postfilter_textureness(const DevMem2Db& input, int winsz, float avgTexturenessThreshold, const DevMem2Db& disp, cudaStream_t & stream);
}
}}
......
......@@ -63,16 +63,16 @@ namespace cv { namespace gpu { namespace bp
{
void load_constants(int ndisp, float max_data_term, float data_weight, float max_disc_term, float disc_single_jump);
template<typename T, typename D>
void comp_data_gpu(const DevMem2D& left, const DevMem2D& right, const DevMem2D& data, cudaStream_t stream);
void comp_data_gpu(const DevMem2Db& left, const DevMem2Db& right, const DevMem2Db& data, cudaStream_t stream);
template<typename T>
void data_step_down_gpu(int dst_cols, int dst_rows, int src_rows, const DevMem2D& src, const DevMem2D& dst, cudaStream_t stream);
void data_step_down_gpu(int dst_cols, int dst_rows, int src_rows, const DevMem2Db& src, const DevMem2Db& dst, cudaStream_t stream);
template <typename T>
void level_up_messages_gpu(int dst_idx, int dst_cols, int dst_rows, int src_rows, DevMem2D* mus, DevMem2D* mds, DevMem2D* mls, DevMem2D* mrs, cudaStream_t stream);
void level_up_messages_gpu(int dst_idx, int dst_cols, int dst_rows, int src_rows, DevMem2Db* mus, DevMem2Db* mds, DevMem2Db* mls, DevMem2Db* mrs, cudaStream_t stream);
template <typename T>
void calc_all_iterations_gpu(int cols, int rows, int iters, const DevMem2D& u, const DevMem2D& d,
const DevMem2D& l, const DevMem2D& r, const DevMem2D& data, cudaStream_t stream);
void calc_all_iterations_gpu(int cols, int rows, int iters, const DevMem2Db& u, const DevMem2Db& d,
const DevMem2Db& l, const DevMem2Db& r, const DevMem2Db& data, cudaStream_t stream);
template <typename T>
void output_gpu(const DevMem2D& u, const DevMem2D& d, const DevMem2D& l, const DevMem2D& r, const DevMem2D& data,
void output_gpu(const DevMem2Db& u, const DevMem2Db& d, const DevMem2Db& l, const DevMem2Db& r, const DevMem2Db& data,
const DevMem2D_<short>& disp, cudaStream_t stream);
}}}
......@@ -133,7 +133,7 @@ namespace
void operator()(const GpuMat& left, const GpuMat& right, GpuMat& disp, Stream& stream)
{
typedef void (*comp_data_t)(const DevMem2D& left, const DevMem2D& right, const DevMem2D& data, cudaStream_t stream);
typedef void (*comp_data_t)(const DevMem2Db& left, const DevMem2Db& right, const DevMem2Db& data, cudaStream_t stream);
static const comp_data_t comp_data_callers[2][5] =
{
{0, bp::comp_data_gpu<unsigned char, short>, 0, bp::comp_data_gpu<uchar3, short>, bp::comp_data_gpu<uchar4, short>},
......@@ -251,25 +251,25 @@ namespace
{
using namespace cv::gpu::bp;
typedef void (*data_step_down_t)(int dst_cols, int dst_rows, int src_rows, const DevMem2D& src, const DevMem2D& dst, cudaStream_t stream);
typedef void (*data_step_down_t)(int dst_cols, int dst_rows, int src_rows, const DevMem2Db& src, const DevMem2Db& dst, cudaStream_t stream);
static const data_step_down_t data_step_down_callers[2] =
{
data_step_down_gpu<short>, data_step_down_gpu<float>
};
typedef void (*level_up_messages_t)(int dst_idx, int dst_cols, int dst_rows, int src_rows, DevMem2D* mus, DevMem2D* mds, DevMem2D* mls, DevMem2D* mrs, cudaStream_t stream);
typedef void (*level_up_messages_t)(int dst_idx, int dst_cols, int dst_rows, int src_rows, DevMem2Db* mus, DevMem2Db* mds, DevMem2Db* mls, DevMem2Db* mrs, cudaStream_t stream);
static const level_up_messages_t level_up_messages_callers[2] =
{
level_up_messages_gpu<short>, level_up_messages_gpu<float>
};
typedef void (*calc_all_iterations_t)(int cols, int rows, int iters, const DevMem2D& u, const DevMem2D& d, const DevMem2D& l, const DevMem2D& r, const DevMem2D& data, cudaStream_t stream);
typedef void (*calc_all_iterations_t)(int cols, int rows, int iters, const DevMem2Db& u, const DevMem2Db& d, const DevMem2Db& l, const DevMem2Db& r, const DevMem2Db& data, cudaStream_t stream);
static const calc_all_iterations_t calc_all_iterations_callers[2] =
{
calc_all_iterations_gpu<short>, calc_all_iterations_gpu<float>
};
typedef void (*output_t)(const DevMem2D& u, const DevMem2D& d, const DevMem2D& l, const DevMem2D& r, const DevMem2D& data, const DevMem2D_<short>& disp, cudaStream_t stream);
typedef void (*output_t)(const DevMem2Db& u, const DevMem2Db& d, const DevMem2Db& l, const DevMem2Db& r, const DevMem2Db& data, const DevMem2D_<short>& disp, cudaStream_t stream);
static const output_t output_callers[2] =
{
output_gpu<short>, output_gpu<float>
......@@ -289,10 +289,10 @@ namespace
data_step_down_callers[funcIdx](cols_all[i], rows_all[i], rows_all[i-1], datas[i-1], datas[i], cudaStream);
}
DevMem2D mus[] = {u, u2};
DevMem2D mds[] = {d, d2};
DevMem2D mrs[] = {r, r2};
DevMem2D mls[] = {l, l2};
DevMem2Db mus[] = {u, u2};
DevMem2Db mds[] = {d, d2};
DevMem2Db mrs[] = {r, r2};
DevMem2Db mls[] = {l, l2};
int mem_idx = (rthis.levels & 1) ? 0 : 1;
......
......@@ -60,7 +60,7 @@ void cv::gpu::StereoConstantSpaceBP::operator()(const GpuMat&, const GpuMat&, Gp
namespace cv { namespace gpu { namespace csbp
{
void load_constants(int ndisp, float max_data_term, float data_weight, float max_disc_term, float disc_single_jump, int min_disp_th,
const DevMem2D& left, const DevMem2D& right, const DevMem2D& temp);
const DevMem2Db& left, const DevMem2Db& right, const DevMem2Db& temp);
template<class T>
void init_data_cost(int rows, int cols, T* disp_selected_pyr, T* data_cost_selected, size_t msg_step,
......
......@@ -143,7 +143,7 @@ namespace
uploadConstant("cv::gpu::surf::c_nOctaveLayers", nOctaveLayers);
uploadConstant("cv::gpu::surf::c_hessianThreshold", static_cast<float>(hessianThreshold));
imgTex.bind("cv::gpu::surf::imgTex", (DevMem2D)img);
imgTex.bind("cv::gpu::surf::imgTex", (DevMem2Db)img);
integralBuffered(img, sum, intBuffer);
sumTex.bind("cv::gpu::surf::sumTex", (DevMem2D_<unsigned int>)sum);
......
......@@ -474,7 +474,7 @@ public:
{
public:
virtual cv::Mat generateMask(const cv::Mat& src)=0;
virtual void initializeMask(const cv::Mat& src) {};
virtual void initializeMask(const cv::Mat& /*src*/) {};
};
void setMaskGenerator(Ptr<MaskGenerator> maskGenerator);
Ptr<MaskGenerator> getMaskGenerator();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment