Commit 3ebec744 authored by Vladislav Vinogradov's avatar Vladislav Vinogradov

minor refactoring:

moved lbp.hpp to src/cuda folder
added missing cv::gpu::device namespace
deleted whitespaces
parent a703df54
...@@ -42,9 +42,9 @@ ...@@ -42,9 +42,9 @@
#if !defined CUDA_DISABLER #if !defined CUDA_DISABLER
#include <opencv2/gpu/device/lbp.hpp> #include "lbp.hpp"
#include <opencv2/gpu/device/vec_traits.hpp> #include "opencv2/gpu/device/vec_traits.hpp"
#include <opencv2/gpu/device/saturate_cast.hpp> #include "opencv2/gpu/device/saturate_cast.hpp"
namespace cv { namespace gpu { namespace device namespace cv { namespace gpu { namespace device
{ {
...@@ -299,4 +299,4 @@ namespace cv { namespace gpu { namespace device ...@@ -299,4 +299,4 @@ namespace cv { namespace gpu { namespace device
} }
}}} }}}
#endif /* CUDA_DISABLER */ #endif /* CUDA_DISABLER */
\ No newline at end of file
...@@ -1535,6 +1535,8 @@ namespace cv { namespace gpu { namespace device ...@@ -1535,6 +1535,8 @@ namespace cv { namespace gpu { namespace device
return functor_type(); \ return functor_type(); \
} \ } \
}; };
#undef CV_DESCALE
}}} // namespace cv { namespace gpu { namespace device }}} // namespace cv { namespace gpu { namespace device
#endif // __OPENCV_GPU_COLOR_DETAIL_HPP__ #endif // __OPENCV_GPU_COLOR_DETAIL_HPP__
...@@ -47,7 +47,7 @@ ...@@ -47,7 +47,7 @@
#include "../vec_traits.hpp" #include "../vec_traits.hpp"
#include "../functional.hpp" #include "../functional.hpp"
namespace cv { namespace gpu { namespace device namespace cv { namespace gpu { namespace device
{ {
namespace transform_detail namespace transform_detail
{ {
...@@ -203,7 +203,7 @@ namespace cv { namespace gpu { namespace device ...@@ -203,7 +203,7 @@ namespace cv { namespace gpu { namespace device
}; };
template <typename T, typename D, typename UnOp, typename Mask> template <typename T, typename D, typename UnOp, typename Mask>
__global__ static void transformSmart(const PtrStepSz<T> src_, PtrStep<D> dst_, const Mask mask, const UnOp op) static __global__ void transformSmart(const PtrStepSz<T> src_, PtrStep<D> dst_, const Mask mask, const UnOp op)
{ {
typedef TransformFunctorTraits<UnOp> ft; typedef TransformFunctorTraits<UnOp> ft;
typedef typename UnaryReadWriteTraits<T, D, ft::smart_shift>::read_type read_type; typedef typename UnaryReadWriteTraits<T, D, ft::smart_shift>::read_type read_type;
...@@ -239,10 +239,10 @@ namespace cv { namespace gpu { namespace device ...@@ -239,10 +239,10 @@ namespace cv { namespace gpu { namespace device
} }
template <typename T, typename D, typename UnOp, typename Mask> template <typename T, typename D, typename UnOp, typename Mask>
static __global__ void transformSimple(const PtrStepSz<T> src, PtrStep<D> dst, const Mask mask, const UnOp op) __global__ static void transformSimple(const PtrStepSz<T> src, PtrStep<D> dst, const Mask mask, const UnOp op)
{ {
const int x = blockDim.x * blockIdx.x + threadIdx.x; const int x = blockDim.x * blockIdx.x + threadIdx.x;
const int y = blockDim.y * blockIdx.y + threadIdx.y; const int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x < src.cols && y < src.rows && mask(y, x)) if (x < src.cols && y < src.rows && mask(y, x))
{ {
...@@ -251,7 +251,7 @@ namespace cv { namespace gpu { namespace device ...@@ -251,7 +251,7 @@ namespace cv { namespace gpu { namespace device
} }
template <typename T1, typename T2, typename D, typename BinOp, typename Mask> template <typename T1, typename T2, typename D, typename BinOp, typename Mask>
__global__ static void transformSmart(const PtrStepSz<T1> src1_, const PtrStep<T2> src2_, PtrStep<D> dst_, static __global__ void transformSmart(const PtrStepSz<T1> src1_, const PtrStep<T2> src2_, PtrStep<D> dst_,
const Mask mask, const BinOp op) const Mask mask, const BinOp op)
{ {
typedef TransformFunctorTraits<BinOp> ft; typedef TransformFunctorTraits<BinOp> ft;
...@@ -274,7 +274,7 @@ namespace cv { namespace gpu { namespace device ...@@ -274,7 +274,7 @@ namespace cv { namespace gpu { namespace device
const read_type1 src1_n_el = ((const read_type1*)src1)[x]; const read_type1 src1_n_el = ((const read_type1*)src1)[x];
const read_type2 src2_n_el = ((const read_type2*)src2)[x]; const read_type2 src2_n_el = ((const read_type2*)src2)[x];
write_type dst_n_el = ((const write_type*)dst)[x]; write_type dst_n_el = ((const write_type*)dst)[x];
OpUnroller<ft::smart_shift>::unroll(src1_n_el, src2_n_el, dst_n_el, mask, op, x_shifted, y); OpUnroller<ft::smart_shift>::unroll(src1_n_el, src2_n_el, dst_n_el, mask, op, x_shifted, y);
((write_type*)dst)[x] = dst_n_el; ((write_type*)dst)[x] = dst_n_el;
...@@ -291,11 +291,11 @@ namespace cv { namespace gpu { namespace device ...@@ -291,11 +291,11 @@ namespace cv { namespace gpu { namespace device
} }
template <typename T1, typename T2, typename D, typename BinOp, typename Mask> template <typename T1, typename T2, typename D, typename BinOp, typename Mask>
static __global__ void transformSimple(const PtrStepSz<T1> src1, const PtrStep<T2> src2, PtrStep<D> dst, static __global__ void transformSimple(const PtrStepSz<T1> src1, const PtrStep<T2> src2, PtrStep<D> dst,
const Mask mask, const BinOp op) const Mask mask, const BinOp op)
{ {
const int x = blockDim.x * blockIdx.x + threadIdx.x; const int x = blockDim.x * blockIdx.x + threadIdx.x;
const int y = blockDim.y * blockIdx.y + threadIdx.y; const int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x < src1.cols && y < src1.rows && mask(y, x)) if (x < src1.cols && y < src1.rows && mask(y, x))
{ {
...@@ -314,13 +314,13 @@ namespace cv { namespace gpu { namespace device ...@@ -314,13 +314,13 @@ namespace cv { namespace gpu { namespace device
typedef TransformFunctorTraits<UnOp> ft; typedef TransformFunctorTraits<UnOp> ft;
const dim3 threads(ft::simple_block_dim_x, ft::simple_block_dim_y, 1); const dim3 threads(ft::simple_block_dim_x, ft::simple_block_dim_y, 1);
const dim3 grid(divUp(src.cols, threads.x), divUp(src.rows, threads.y), 1); const dim3 grid(divUp(src.cols, threads.x), divUp(src.rows, threads.y), 1);
transformSimple<T, D><<<grid, threads, 0, stream>>>(src, dst, mask, op); transformSimple<T, D><<<grid, threads, 0, stream>>>(src, dst, mask, op);
cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaGetLastError() );
if (stream == 0) if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() ); cudaSafeCall( cudaDeviceSynchronize() );
} }
template <typename T1, typename T2, typename D, typename BinOp, typename Mask> template <typename T1, typename T2, typename D, typename BinOp, typename Mask>
...@@ -329,13 +329,13 @@ namespace cv { namespace gpu { namespace device ...@@ -329,13 +329,13 @@ namespace cv { namespace gpu { namespace device
typedef TransformFunctorTraits<BinOp> ft; typedef TransformFunctorTraits<BinOp> ft;
const dim3 threads(ft::simple_block_dim_x, ft::simple_block_dim_y, 1); const dim3 threads(ft::simple_block_dim_x, ft::simple_block_dim_y, 1);
const dim3 grid(divUp(src1.cols, threads.x), divUp(src1.rows, threads.y), 1); const dim3 grid(divUp(src1.cols, threads.x), divUp(src1.rows, threads.y), 1);
transformSimple<T1, T2, D><<<grid, threads, 0, stream>>>(src1, src2, dst, mask, op); transformSimple<T1, T2, D><<<grid, threads, 0, stream>>>(src1, src2, dst, mask, op);
cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaGetLastError() );
if (stream == 0) if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() ); cudaSafeCall( cudaDeviceSynchronize() );
} }
}; };
template<> struct TransformDispatcher<true> template<> struct TransformDispatcher<true>
...@@ -347,7 +347,7 @@ namespace cv { namespace gpu { namespace device ...@@ -347,7 +347,7 @@ namespace cv { namespace gpu { namespace device
StaticAssert<ft::smart_shift != 1>::check(); StaticAssert<ft::smart_shift != 1>::check();
if (!isAligned(src.data, ft::smart_shift * sizeof(T)) || !isAligned(src.step, ft::smart_shift * sizeof(T)) || if (!isAligned(src.data, ft::smart_shift * sizeof(T)) || !isAligned(src.step, ft::smart_shift * sizeof(T)) ||
!isAligned(dst.data, ft::smart_shift * sizeof(D)) || !isAligned(dst.step, ft::smart_shift * sizeof(D))) !isAligned(dst.data, ft::smart_shift * sizeof(D)) || !isAligned(dst.step, ft::smart_shift * sizeof(D)))
{ {
TransformDispatcher<false>::call(src, dst, op, mask, stream); TransformDispatcher<false>::call(src, dst, op, mask, stream);
...@@ -355,7 +355,7 @@ namespace cv { namespace gpu { namespace device ...@@ -355,7 +355,7 @@ namespace cv { namespace gpu { namespace device
} }
const dim3 threads(ft::smart_block_dim_x, ft::smart_block_dim_y, 1); const dim3 threads(ft::smart_block_dim_x, ft::smart_block_dim_y, 1);
const dim3 grid(divUp(src.cols, threads.x * ft::smart_shift), divUp(src.rows, threads.y), 1); const dim3 grid(divUp(src.cols, threads.x * ft::smart_shift), divUp(src.rows, threads.y), 1);
transformSmart<T, D><<<grid, threads, 0, stream>>>(src, dst, mask, op); transformSmart<T, D><<<grid, threads, 0, stream>>>(src, dst, mask, op);
cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaGetLastError() );
...@@ -380,15 +380,15 @@ namespace cv { namespace gpu { namespace device ...@@ -380,15 +380,15 @@ namespace cv { namespace gpu { namespace device
} }
const dim3 threads(ft::smart_block_dim_x, ft::smart_block_dim_y, 1); const dim3 threads(ft::smart_block_dim_x, ft::smart_block_dim_y, 1);
const dim3 grid(divUp(src1.cols, threads.x * ft::smart_shift), divUp(src1.rows, threads.y), 1); const dim3 grid(divUp(src1.cols, threads.x * ft::smart_shift), divUp(src1.rows, threads.y), 1);
transformSmart<T1, T2, D><<<grid, threads, 0, stream>>>(src1, src2, dst, mask, op); transformSmart<T1, T2, D><<<grid, threads, 0, stream>>>(src1, src2, dst, mask, op);
cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaGetLastError() );
if (stream == 0) if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() ); cudaSafeCall( cudaDeviceSynchronize() );
} }
}; };
} // namespace transform_detail } // namespace transform_detail
}}} // namespace cv { namespace gpu { namespace device }}} // namespace cv { namespace gpu { namespace device
......
...@@ -43,124 +43,129 @@ ...@@ -43,124 +43,129 @@
#ifndef __OPENCV_GPU_SCAN_HPP__ #ifndef __OPENCV_GPU_SCAN_HPP__
#define __OPENCV_GPU_SCAN_HPP__ #define __OPENCV_GPU_SCAN_HPP__
enum ScanKind { EXCLUSIVE = 0, INCLUSIVE = 1 }; #include "common.hpp"
template <ScanKind Kind, typename T, typename F> struct WarpScan namespace cv { namespace gpu { namespace device
{ {
__device__ __forceinline__ WarpScan() {} enum ScanKind { EXCLUSIVE = 0, INCLUSIVE = 1 };
__device__ __forceinline__ WarpScan(const WarpScan& other) { (void)other; }
__device__ __forceinline__ T operator()( volatile T *ptr , const unsigned int idx)
{
const unsigned int lane = idx & 31;
F op;
if ( lane >= 1) ptr [idx ] = op(ptr [idx - 1], ptr [idx]); template <ScanKind Kind, typename T, typename F> struct WarpScan
if ( lane >= 2) ptr [idx ] = op(ptr [idx - 2], ptr [idx]); {
if ( lane >= 4) ptr [idx ] = op(ptr [idx - 4], ptr [idx]); __device__ __forceinline__ WarpScan() {}
if ( lane >= 8) ptr [idx ] = op(ptr [idx - 8], ptr [idx]); __device__ __forceinline__ WarpScan(const WarpScan& other) { (void)other; }
if ( lane >= 16) ptr [idx ] = op(ptr [idx - 16], ptr [idx]);
if( Kind == INCLUSIVE ) __device__ __forceinline__ T operator()( volatile T *ptr , const unsigned int idx)
return ptr [idx]; {
else const unsigned int lane = idx & 31;
return (lane > 0) ? ptr [idx - 1] : 0; F op;
}
if ( lane >= 1) ptr [idx ] = op(ptr [idx - 1], ptr [idx]);
if ( lane >= 2) ptr [idx ] = op(ptr [idx - 2], ptr [idx]);
if ( lane >= 4) ptr [idx ] = op(ptr [idx - 4], ptr [idx]);
if ( lane >= 8) ptr [idx ] = op(ptr [idx - 8], ptr [idx]);
if ( lane >= 16) ptr [idx ] = op(ptr [idx - 16], ptr [idx]);
if( Kind == INCLUSIVE )
return ptr [idx];
else
return (lane > 0) ? ptr [idx - 1] : 0;
}
__device__ __forceinline__ unsigned int index(const unsigned int tid)
{
return tid;
}
__device__ __forceinline__ unsigned int index(const unsigned int tid) __device__ __forceinline__ void init(volatile T *ptr){}
{
return tid;
}
__device__ __forceinline__ void init(volatile T *ptr){} static const int warp_offset = 0;
static const int warp_offset = 0; typedef WarpScan<INCLUSIVE, T, F> merge;
};
typedef WarpScan<INCLUSIVE, T, F> merge; template <ScanKind Kind , typename T, typename F> struct WarpScanNoComp
}; {
__device__ __forceinline__ WarpScanNoComp() {}
__device__ __forceinline__ WarpScanNoComp(const WarpScanNoComp& other) { (void)other; }
template <ScanKind Kind , typename T, typename F> struct WarpScanNoComp __device__ __forceinline__ T operator()( volatile T *ptr , const unsigned int idx)
{
const unsigned int lane = threadIdx.x & 31;
F op;
ptr [idx ] = op(ptr [idx - 1], ptr [idx]);
ptr [idx ] = op(ptr [idx - 2], ptr [idx]);
ptr [idx ] = op(ptr [idx - 4], ptr [idx]);
ptr [idx ] = op(ptr [idx - 8], ptr [idx]);
ptr [idx ] = op(ptr [idx - 16], ptr [idx]);
if( Kind == INCLUSIVE )
return ptr [idx];
else
return (lane > 0) ? ptr [idx - 1] : 0;
}
__device__ __forceinline__ unsigned int index(const unsigned int tid)
{ {
__device__ __forceinline__ WarpScanNoComp() {} return (tid >> warp_log) * warp_smem_stride + 16 + (tid & warp_mask);
__device__ __forceinline__ WarpScanNoComp(const WarpScanNoComp& other) { (void)other; } }
__device__ __forceinline__ T operator()( volatile T *ptr , const unsigned int idx) __device__ __forceinline__ void init(volatile T *ptr)
{
const unsigned int lane = threadIdx.x & 31;
F op;
ptr [idx ] = op(ptr [idx - 1], ptr [idx]);
ptr [idx ] = op(ptr [idx - 2], ptr [idx]);
ptr [idx ] = op(ptr [idx - 4], ptr [idx]);
ptr [idx ] = op(ptr [idx - 8], ptr [idx]);
ptr [idx ] = op(ptr [idx - 16], ptr [idx]);
if( Kind == INCLUSIVE )
return ptr [idx];
else
return (lane > 0) ? ptr [idx - 1] : 0;
}
__device__ __forceinline__ unsigned int index(const unsigned int tid)
{
return (tid >> warp_log) * warp_smem_stride + 16 + (tid & warp_mask);
}
__device__ __forceinline__ void init(volatile T *ptr)
{
ptr[threadIdx.x] = 0;
}
static const int warp_smem_stride = 32 + 16 + 1;
static const int warp_offset = 16;
static const int warp_log = 5;
static const int warp_mask = 31;
typedef WarpScanNoComp<INCLUSIVE, T, F> merge;
};
template <ScanKind Kind , typename T, typename Sc, typename F> struct BlockScan
{ {
__device__ __forceinline__ BlockScan() {} ptr[threadIdx.x] = 0;
__device__ __forceinline__ BlockScan(const BlockScan& other) { (void)other; } }
static const int warp_smem_stride = 32 + 16 + 1;
static const int warp_offset = 16;
static const int warp_log = 5;
static const int warp_mask = 31;
typedef WarpScanNoComp<INCLUSIVE, T, F> merge;
};
__device__ __forceinline__ T operator()(volatile T *ptr) template <ScanKind Kind , typename T, typename Sc, typename F> struct BlockScan
{ {
const unsigned int tid = threadIdx.x; __device__ __forceinline__ BlockScan() {}
const unsigned int lane = tid & warp_mask; __device__ __forceinline__ BlockScan(const BlockScan& other) { (void)other; }
const unsigned int warp = tid >> warp_log;
__device__ __forceinline__ T operator()(volatile T *ptr)
{
const unsigned int tid = threadIdx.x;
const unsigned int lane = tid & warp_mask;
const unsigned int warp = tid >> warp_log;
Sc scan; Sc scan;
typename Sc::merge merge_scan; typename Sc::merge merge_scan;
const unsigned int idx = scan.index(tid); const unsigned int idx = scan.index(tid);
T val = scan(ptr, idx); T val = scan(ptr, idx);
__syncthreads (); __syncthreads ();
if( warp == 0) if( warp == 0)
scan.init(ptr); scan.init(ptr);
__syncthreads (); __syncthreads ();
if( lane == 31 ) if( lane == 31 )
ptr [scan.warp_offset + warp ] = (Kind == INCLUSIVE) ? val : ptr [idx]; ptr [scan.warp_offset + warp ] = (Kind == INCLUSIVE) ? val : ptr [idx];
__syncthreads (); __syncthreads ();
if( warp == 0 ) if( warp == 0 )
merge_scan(ptr, idx); merge_scan(ptr, idx);
__syncthreads(); __syncthreads();
if ( warp > 0) if ( warp > 0)
val = ptr [scan.warp_offset + warp - 1] + val; val = ptr [scan.warp_offset + warp - 1] + val;
__syncthreads (); __syncthreads ();
ptr[idx] = val; ptr[idx] = val;
__syncthreads (); __syncthreads ();
return val ; return val ;
} }
static const int warp_log = 5; static const int warp_log = 5;
static const int warp_mask = 31; static const int warp_mask = 31;
}; };
}}}
#endif #endif // __OPENCV_GPU_SCAN_HPP__
\ No newline at end of file
...@@ -60,10 +60,8 @@ namespace cv { namespace gpu ...@@ -60,10 +60,8 @@ namespace cv { namespace gpu
__OPENCV_GPU_HOST_DEVICE__ static void check() {}; __OPENCV_GPU_HOST_DEVICE__ static void check() {};
}; };
} }
using ::cv::gpu::device::Static;
}} }}
#undef __OPENCV_GPU_HOST_DEVICE__ #undef __OPENCV_GPU_HOST_DEVICE__
#endif /* __OPENCV_GPU_GPU_DEVICE_STATIC_CHECK_HPP__ */ #endif /* __OPENCV_GPU_GPU_DEVICE_STATIC_CHECK_HPP__ */
\ No newline at end of file
...@@ -45,7 +45,7 @@ ...@@ -45,7 +45,7 @@
#include "saturate_cast.hpp" #include "saturate_cast.hpp"
#include "datamov_utils.hpp" #include "datamov_utils.hpp"
#include "detail/utility_detail.hpp" #include "detail/reduction_detail.hpp"
namespace cv { namespace gpu { namespace device namespace cv { namespace gpu { namespace device
{ {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment