Commit 1c1a61dd authored by Vladislav Vinogradov's avatar Vladislav Vinogradov

added __forceinline__ to device functions

fixed BFM warning ("cannot tell what pointer points to")
parent 79f3260b
...@@ -56,7 +56,7 @@ namespace cv ...@@ -56,7 +56,7 @@ namespace cv
// It is intended to pass to nvcc-compiled code. GpuMat depends on headers that nvcc can't compile // It is intended to pass to nvcc-compiled code. GpuMat depends on headers that nvcc can't compile
#if defined(__CUDACC__) #if defined(__CUDACC__)
#define __CV_GPU_HOST_DEVICE__ __host__ __device__ #define __CV_GPU_HOST_DEVICE__ __host__ __device__ __forceinline__
#else #else
#define __CV_GPU_HOST_DEVICE__ #define __CV_GPU_HOST_DEVICE__
#endif #endif
......
This diff is collapsed.
...@@ -56,7 +56,7 @@ namespace cv { namespace gpu ...@@ -56,7 +56,7 @@ namespace cv { namespace gpu
struct TransformOp struct TransformOp
{ {
__device__ float3 operator()(float3 p) const __device__ __forceinline__ float3 operator()(float3 p) const
{ {
return make_float3( return make_float3(
crot0.x * p.x + crot0.y * p.y + crot0.z * p.z + ctransl.x, crot0.x * p.x + crot0.y * p.y + crot0.z * p.z + ctransl.x,
...@@ -89,7 +89,7 @@ namespace cv { namespace gpu ...@@ -89,7 +89,7 @@ namespace cv { namespace gpu
struct ProjectOp struct ProjectOp
{ {
__device__ float2 operator()(float3 p) const __device__ __forceinline__ float2 operator()(float3 p) const
{ {
// Rotate and translate in 3D // Rotate and translate in 3D
float3 t = make_float3( float3 t = make_float3(
...@@ -128,7 +128,7 @@ namespace cv { namespace gpu ...@@ -128,7 +128,7 @@ namespace cv { namespace gpu
return SOLVE_PNP_RANSAC_MAX_NUM_ITERS; return SOLVE_PNP_RANSAC_MAX_NUM_ITERS;
} }
__device__ float sqr(float x) __device__ __forceinline__ float sqr(float x)
{ {
return x * x; return x * x;
} }
......
This diff is collapsed.
...@@ -57,7 +57,7 @@ namespace cv { namespace gpu { namespace mathfunc ...@@ -57,7 +57,7 @@ namespace cv { namespace gpu { namespace mathfunc
template <typename T1, typename T2> template <typename T1, typename T2>
struct NotEqual struct NotEqual
{ {
__device__ uchar operator()(const T1& src1, const T2& src2) __device__ __forceinline__ uchar operator()(const T1& src1, const T2& src2)
{ {
return static_cast<uchar>(static_cast<int>(src1 != src2) * 255); return static_cast<uchar>(static_cast<int>(src1 != src2) * 255);
} }
...@@ -91,7 +91,7 @@ namespace cv { namespace gpu { namespace mathfunc ...@@ -91,7 +91,7 @@ namespace cv { namespace gpu { namespace mathfunc
template <typename T> template <typename T>
struct UnOp<T, UN_OP_NOT> struct UnOp<T, UN_OP_NOT>
{ {
static __device__ T call(T v) { return ~v; } static __device__ __forceinline__ T call(T v) { return ~v; }
}; };
...@@ -199,20 +199,20 @@ namespace cv { namespace gpu { namespace mathfunc ...@@ -199,20 +199,20 @@ namespace cv { namespace gpu { namespace mathfunc
template <typename T> template <typename T>
struct BinOp<T, BIN_OP_OR> struct BinOp<T, BIN_OP_OR>
{ {
static __device__ T call(T a, T b) { return a | b; } static __device__ __forceinline__ T call(T a, T b) { return a | b; }
}; };
template <typename T> template <typename T>
struct BinOp<T, BIN_OP_AND> struct BinOp<T, BIN_OP_AND>
{ {
static __device__ T call(T a, T b) { return a & b; } static __device__ __forceinline__ T call(T a, T b) { return a & b; }
}; };
template <typename T> template <typename T>
struct BinOp<T, BIN_OP_XOR> struct BinOp<T, BIN_OP_XOR>
{ {
static __device__ T call(T a, T b) { return a ^ b; } static __device__ __forceinline__ T call(T a, T b) { return a ^ b; }
}; };
...@@ -357,15 +357,15 @@ namespace cv { namespace gpu { namespace mathfunc ...@@ -357,15 +357,15 @@ namespace cv { namespace gpu { namespace mathfunc
struct MinOp struct MinOp
{ {
template <typename T> template <typename T>
__device__ T operator()(T a, T b) __device__ __forceinline__ T operator()(T a, T b)
{ {
return min(a, b); return min(a, b);
} }
__device__ float operator()(float a, float b) __device__ __forceinline__ float operator()(float a, float b)
{ {
return fmin(a, b); return fmin(a, b);
} }
__device__ double operator()(double a, double b) __device__ __forceinline__ double operator()(double a, double b)
{ {
return fmin(a, b); return fmin(a, b);
} }
...@@ -374,15 +374,15 @@ namespace cv { namespace gpu { namespace mathfunc ...@@ -374,15 +374,15 @@ namespace cv { namespace gpu { namespace mathfunc
struct MaxOp struct MaxOp
{ {
template <typename T> template <typename T>
__device__ T operator()(T a, T b) __device__ __forceinline__ T operator()(T a, T b)
{ {
return max(a, b); return max(a, b);
} }
__device__ float operator()(float a, float b) __device__ __forceinline__ float operator()(float a, float b)
{ {
return fmax(a, b); return fmax(a, b);
} }
__device__ double operator()(double a, double b) __device__ __forceinline__ double operator()(double a, double b)
{ {
return fmax(a, b); return fmax(a, b);
} }
...@@ -394,7 +394,7 @@ namespace cv { namespace gpu { namespace mathfunc ...@@ -394,7 +394,7 @@ namespace cv { namespace gpu { namespace mathfunc
explicit ScalarMinOp(T s_) : s(s_) {} explicit ScalarMinOp(T s_) : s(s_) {}
__device__ T operator()(T a) __device__ __forceinline__ T operator()(T a)
{ {
return min(a, s); return min(a, s);
} }
...@@ -405,7 +405,7 @@ namespace cv { namespace gpu { namespace mathfunc ...@@ -405,7 +405,7 @@ namespace cv { namespace gpu { namespace mathfunc
explicit ScalarMinOp(float s_) : s(s_) {} explicit ScalarMinOp(float s_) : s(s_) {}
__device__ float operator()(float a) __device__ __forceinline__ float operator()(float a)
{ {
return fmin(a, s); return fmin(a, s);
} }
...@@ -416,7 +416,7 @@ namespace cv { namespace gpu { namespace mathfunc ...@@ -416,7 +416,7 @@ namespace cv { namespace gpu { namespace mathfunc
explicit ScalarMinOp(double s_) : s(s_) {} explicit ScalarMinOp(double s_) : s(s_) {}
__device__ double operator()(double a) __device__ __forceinline__ double operator()(double a)
{ {
return fmin(a, s); return fmin(a, s);
} }
...@@ -428,7 +428,7 @@ namespace cv { namespace gpu { namespace mathfunc ...@@ -428,7 +428,7 @@ namespace cv { namespace gpu { namespace mathfunc
explicit ScalarMaxOp(T s_) : s(s_) {} explicit ScalarMaxOp(T s_) : s(s_) {}
__device__ T operator()(T a) __device__ __forceinline__ T operator()(T a)
{ {
return max(a, s); return max(a, s);
} }
...@@ -439,7 +439,7 @@ namespace cv { namespace gpu { namespace mathfunc ...@@ -439,7 +439,7 @@ namespace cv { namespace gpu { namespace mathfunc
explicit ScalarMaxOp(float s_) : s(s_) {} explicit ScalarMaxOp(float s_) : s(s_) {}
__device__ float operator()(float a) __device__ __forceinline__ float operator()(float a)
{ {
return fmax(a, s); return fmax(a, s);
} }
...@@ -450,7 +450,7 @@ namespace cv { namespace gpu { namespace mathfunc ...@@ -450,7 +450,7 @@ namespace cv { namespace gpu { namespace mathfunc
explicit ScalarMaxOp(double s_) : s(s_) {} explicit ScalarMaxOp(double s_) : s(s_) {}
__device__ double operator()(double a) __device__ __forceinline__ double operator()(double a)
{ {
return fmax(a, s); return fmax(a, s);
} }
...@@ -524,7 +524,7 @@ namespace cv { namespace gpu { namespace mathfunc ...@@ -524,7 +524,7 @@ namespace cv { namespace gpu { namespace mathfunc
{ {
ThreshBinary(T thresh_, T maxVal_) : thresh(thresh_), maxVal(maxVal_) {} ThreshBinary(T thresh_, T maxVal_) : thresh(thresh_), maxVal(maxVal_) {}
__device__ T operator()(const T& src) const __device__ __forceinline__ T operator()(const T& src) const
{ {
return src > thresh ? maxVal : 0; return src > thresh ? maxVal : 0;
} }
...@@ -538,7 +538,7 @@ namespace cv { namespace gpu { namespace mathfunc ...@@ -538,7 +538,7 @@ namespace cv { namespace gpu { namespace mathfunc
{ {
ThreshBinaryInv(T thresh_, T maxVal_) : thresh(thresh_), maxVal(maxVal_) {} ThreshBinaryInv(T thresh_, T maxVal_) : thresh(thresh_), maxVal(maxVal_) {}
__device__ T operator()(const T& src) const __device__ __forceinline__ T operator()(const T& src) const
{ {
return src > thresh ? 0 : maxVal; return src > thresh ? 0 : maxVal;
} }
...@@ -552,7 +552,7 @@ namespace cv { namespace gpu { namespace mathfunc ...@@ -552,7 +552,7 @@ namespace cv { namespace gpu { namespace mathfunc
{ {
ThreshTrunc(T thresh_, T) : thresh(thresh_) {} ThreshTrunc(T thresh_, T) : thresh(thresh_) {}
__device__ T operator()(const T& src) const __device__ __forceinline__ T operator()(const T& src) const
{ {
return min(src, thresh); return min(src, thresh);
} }
...@@ -564,7 +564,7 @@ namespace cv { namespace gpu { namespace mathfunc ...@@ -564,7 +564,7 @@ namespace cv { namespace gpu { namespace mathfunc
{ {
ThreshTrunc(float thresh_, float) : thresh(thresh_) {} ThreshTrunc(float thresh_, float) : thresh(thresh_) {}
__device__ float operator()(const float& src) const __device__ __forceinline__ float operator()(const float& src) const
{ {
return fmin(src, thresh); return fmin(src, thresh);
} }
...@@ -576,7 +576,7 @@ namespace cv { namespace gpu { namespace mathfunc ...@@ -576,7 +576,7 @@ namespace cv { namespace gpu { namespace mathfunc
{ {
ThreshTrunc(double thresh_, double) : thresh(thresh_) {} ThreshTrunc(double thresh_, double) : thresh(thresh_) {}
__device__ double operator()(const double& src) const __device__ __forceinline__ double operator()(const double& src) const
{ {
return fmin(src, thresh); return fmin(src, thresh);
} }
...@@ -590,7 +590,7 @@ namespace cv { namespace gpu { namespace mathfunc ...@@ -590,7 +590,7 @@ namespace cv { namespace gpu { namespace mathfunc
public: public:
ThreshToZero(T thresh_, T) : thresh(thresh_) {} ThreshToZero(T thresh_, T) : thresh(thresh_) {}
__device__ T operator()(const T& src) const __device__ __forceinline__ T operator()(const T& src) const
{ {
return src > thresh ? src : 0; return src > thresh ? src : 0;
} }
...@@ -604,7 +604,7 @@ namespace cv { namespace gpu { namespace mathfunc ...@@ -604,7 +604,7 @@ namespace cv { namespace gpu { namespace mathfunc
public: public:
ThreshToZeroInv(T thresh_, T) : thresh(thresh_) {} ThreshToZeroInv(T thresh_, T) : thresh(thresh_) {}
__device__ T operator()(const T& src) const __device__ __forceinline__ T operator()(const T& src) const
{ {
return src > thresh ? 0 : src; return src > thresh ? 0 : src;
} }
......
...@@ -406,7 +406,7 @@ namespace bf_krnls ...@@ -406,7 +406,7 @@ namespace bf_krnls
template <int channels> template <int channels>
struct DistRgbMax struct DistRgbMax
{ {
static __device__ uchar calc(const uchar* a, const uchar* b) static __device__ __forceinline__ uchar calc(const uchar* a, const uchar* b)
{ {
uchar x = abs(a[0] - b[0]); uchar x = abs(a[0] - b[0]);
uchar y = abs(a[1] - b[1]); uchar y = abs(a[1] - b[1]);
...@@ -418,7 +418,7 @@ namespace bf_krnls ...@@ -418,7 +418,7 @@ namespace bf_krnls
template <> template <>
struct DistRgbMax<1> struct DistRgbMax<1>
{ {
static __device__ uchar calc(const uchar* a, const uchar* b) static __device__ __forceinline__ uchar calc(const uchar* a, const uchar* b)
{ {
return abs(a[0] - b[0]); return abs(a[0] - b[0]);
} }
......
...@@ -48,35 +48,35 @@ using namespace cv::gpu::device; ...@@ -48,35 +48,35 @@ using namespace cv::gpu::device;
namespace cv { namespace gpu { namespace imgproc { namespace cv { namespace gpu { namespace imgproc {
__device__ float sum(float v) { return v; } __device__ __forceinline__ float sum(float v) { return v; }
__device__ float sum(float2 v) { return v.x + v.y; } __device__ __forceinline__ float sum(float2 v) { return v.x + v.y; }
__device__ float sum(float3 v) { return v.x + v.y + v.z; } __device__ __forceinline__ float sum(float3 v) { return v.x + v.y + v.z; }
__device__ float sum(float4 v) { return v.x + v.y + v.z + v.w; } __device__ __forceinline__ float sum(float4 v) { return v.x + v.y + v.z + v.w; }
__device__ float first(float v) { return v; } __device__ __forceinline__ float first(float v) { return v; }
__device__ float first(float2 v) { return v.x; } __device__ __forceinline__ float first(float2 v) { return v.x; }
__device__ float first(float3 v) { return v.x; } __device__ __forceinline__ float first(float3 v) { return v.x; }
__device__ float first(float4 v) { return v.x; } __device__ __forceinline__ float first(float4 v) { return v.x; }
__device__ float mul(float a, float b) { return a * b; } __device__ __forceinline__ float mul(float a, float b) { return a * b; }
__device__ float2 mul(float2 a, float2 b) { return make_float2(a.x * b.x, a.y * b.y); } __device__ __forceinline__ float2 mul(float2 a, float2 b) { return make_float2(a.x * b.x, a.y * b.y); }
__device__ float3 mul(float3 a, float3 b) { return make_float3(a.x * b.x, a.y * b.y, a.z * b.z); } __device__ __forceinline__ float3 mul(float3 a, float3 b) { return make_float3(a.x * b.x, a.y * b.y, a.z * b.z); }
__device__ float4 mul(float4 a, float4 b) { return make_float4(a.x * b.x, a.y * b.y, a.z * b.z, a.w * b.w); } __device__ __forceinline__ float4 mul(float4 a, float4 b) { return make_float4(a.x * b.x, a.y * b.y, a.z * b.z, a.w * b.w); }
__device__ float mul(uchar a, uchar b) { return a * b; } __device__ __forceinline__ float mul(uchar a, uchar b) { return a * b; }
__device__ float2 mul(uchar2 a, uchar2 b) { return make_float2(a.x * b.x, a.y * b.y); } __device__ __forceinline__ float2 mul(uchar2 a, uchar2 b) { return make_float2(a.x * b.x, a.y * b.y); }
__device__ float3 mul(uchar3 a, uchar3 b) { return make_float3(a.x * b.x, a.y * b.y, a.z * b.z); } __device__ __forceinline__ float3 mul(uchar3 a, uchar3 b) { return make_float3(a.x * b.x, a.y * b.y, a.z * b.z); }
__device__ float4 mul(uchar4 a, uchar4 b) { return make_float4(a.x * b.x, a.y * b.y, a.z * b.z, a.w * b.w); } __device__ __forceinline__ float4 mul(uchar4 a, uchar4 b) { return make_float4(a.x * b.x, a.y * b.y, a.z * b.z, a.w * b.w); }
__device__ float sub(float a, float b) { return a - b; } __device__ __forceinline__ float sub(float a, float b) { return a - b; }
__device__ float2 sub(float2 a, float2 b) { return make_float2(a.x - b.x, a.y - b.y); } __device__ __forceinline__ float2 sub(float2 a, float2 b) { return make_float2(a.x - b.x, a.y - b.y); }
__device__ float3 sub(float3 a, float3 b) { return make_float3(a.x - b.x, a.y - b.y, a.z - b.z); } __device__ __forceinline__ float3 sub(float3 a, float3 b) { return make_float3(a.x - b.x, a.y - b.y, a.z - b.z); }
__device__ float4 sub(float4 a, float4 b) { return make_float4(a.x - b.x, a.y - b.y, a.z - b.z, a.w - b.w); } __device__ __forceinline__ float4 sub(float4 a, float4 b) { return make_float4(a.x - b.x, a.y - b.y, a.z - b.z, a.w - b.w); }
__device__ float sub(uchar a, uchar b) { return a - b; } __device__ __forceinline__ float sub(uchar a, uchar b) { return a - b; }
__device__ float2 sub(uchar2 a, uchar2 b) { return make_float2(a.x - b.x, a.y - b.y); } __device__ __forceinline__ float2 sub(uchar2 a, uchar2 b) { return make_float2(a.x - b.x, a.y - b.y); }
__device__ float3 sub(uchar3 a, uchar3 b) { return make_float3(a.x - b.x, a.y - b.y, a.z - b.z); } __device__ __forceinline__ float3 sub(uchar3 a, uchar3 b) { return make_float3(a.x - b.x, a.y - b.y, a.z - b.z); }
__device__ float4 sub(uchar4 a, uchar4 b) { return make_float4(a.x - b.x, a.y - b.y, a.z - b.z, a.w - b.w); } __device__ __forceinline__ float4 sub(uchar4 a, uchar4 b) { return make_float4(a.x - b.x, a.y - b.y, a.z - b.z, a.w - b.w); }
template <typename T, int cn> template <typename T, int cn>
......
...@@ -60,27 +60,27 @@ namespace cv { namespace gpu { namespace mathfunc ...@@ -60,27 +60,27 @@ namespace cv { namespace gpu { namespace mathfunc
{ {
struct Nothing struct Nothing
{ {
static __device__ void calc(int, int, float, float, float*, size_t, float) static __device__ __forceinline__ void calc(int, int, float, float, float*, size_t, float)
{ {
} }
}; };
struct Magnitude struct Magnitude
{ {
static __device__ void calc(int x, int y, float x_data, float y_data, float* dst, size_t dst_step, float) static __device__ __forceinline__ void calc(int x, int y, float x_data, float y_data, float* dst, size_t dst_step, float)
{ {
dst[y * dst_step + x] = sqrtf(x_data * x_data + y_data * y_data); dst[y * dst_step + x] = sqrtf(x_data * x_data + y_data * y_data);
} }
}; };
struct MagnitudeSqr struct MagnitudeSqr
{ {
static __device__ void calc(int x, int y, float x_data, float y_data, float* dst, size_t dst_step, float) static __device__ __forceinline__ void calc(int x, int y, float x_data, float y_data, float* dst, size_t dst_step, float)
{ {
dst[y * dst_step + x] = x_data * x_data + y_data * y_data; dst[y * dst_step + x] = x_data * x_data + y_data * y_data;
} }
}; };
struct Atan2 struct Atan2
{ {
static __device__ void calc(int x, int y, float x_data, float y_data, float* dst, size_t dst_step, float scale) static __device__ __forceinline__ void calc(int x, int y, float x_data, float y_data, float* dst, size_t dst_step, float scale)
{ {
dst[y * dst_step + x] = scale * atan2f(y_data, x_data); dst[y * dst_step + x] = scale * atan2f(y_data, x_data);
} }
...@@ -104,14 +104,14 @@ namespace cv { namespace gpu { namespace mathfunc ...@@ -104,14 +104,14 @@ namespace cv { namespace gpu { namespace mathfunc
struct NonEmptyMag struct NonEmptyMag
{ {
static __device__ float get(const float* mag, size_t mag_step, int x, int y) static __device__ __forceinline__ float get(const float* mag, size_t mag_step, int x, int y)
{ {
return mag[y * mag_step + x]; return mag[y * mag_step + x];
} }
}; };
struct EmptyMag struct EmptyMag
{ {
static __device__ float get(const float*, size_t, int, int) static __device__ __forceinline__ float get(const float*, size_t, int, int)
{ {
return 1.0f; return 1.0f;
} }
......
...@@ -123,14 +123,14 @@ namespace cv { namespace gpu { namespace matrix_operations { ...@@ -123,14 +123,14 @@ namespace cv { namespace gpu { namespace matrix_operations {
__constant__ float scalar_32f[4]; __constant__ float scalar_32f[4];
__constant__ double scalar_64f[4]; __constant__ double scalar_64f[4];
template <typename T> __device__ T readScalar(int i); template <typename T> __device__ __forceinline__ T readScalar(int i);
template <> __device__ uchar readScalar<uchar>(int i) {return scalar_8u[i];} template <> __device__ __forceinline__ uchar readScalar<uchar>(int i) {return scalar_8u[i];}
template <> __device__ schar readScalar<schar>(int i) {return scalar_8s[i];} template <> __device__ __forceinline__ schar readScalar<schar>(int i) {return scalar_8s[i];}
template <> __device__ ushort readScalar<ushort>(int i) {return scalar_16u[i];} template <> __device__ __forceinline__ ushort readScalar<ushort>(int i) {return scalar_16u[i];}
template <> __device__ short readScalar<short>(int i) {return scalar_16s[i];} template <> __device__ __forceinline__ short readScalar<short>(int i) {return scalar_16s[i];}
template <> __device__ int readScalar<int>(int i) {return scalar_32s[i];} template <> __device__ __forceinline__ int readScalar<int>(int i) {return scalar_32s[i];}
template <> __device__ float readScalar<float>(int i) {return scalar_32f[i];} template <> __device__ __forceinline__ float readScalar<float>(int i) {return scalar_32f[i];}
template <> __device__ double readScalar<double>(int i) {return scalar_64f[i];} template <> __device__ __forceinline__ double readScalar<double>(int i) {return scalar_64f[i];}
void writeScalar(const uchar* vals) void writeScalar(const uchar* vals)
{ {
...@@ -243,7 +243,7 @@ namespace cv { namespace gpu { namespace matrix_operations { ...@@ -243,7 +243,7 @@ namespace cv { namespace gpu { namespace matrix_operations {
public: public:
Convertor(double alpha_, double beta_) : alpha(alpha_), beta(beta_) {} Convertor(double alpha_, double beta_) : alpha(alpha_), beta(beta_) {}
__device__ D operator()(const T& src) __device__ __forceinline__ D operator()(const T& src)
{ {
return saturate_cast<D>(alpha * src + beta); return saturate_cast<D>(alpha * src + beta);
} }
......
...@@ -78,7 +78,7 @@ namespace cv { namespace gpu { namespace mathfunc ...@@ -78,7 +78,7 @@ namespace cv { namespace gpu { namespace mathfunc
{ {
explicit Mask8U(PtrStep mask): mask(mask) {} explicit Mask8U(PtrStep mask): mask(mask) {}
__device__ bool operator()(int y, int x) const __device__ __forceinline__ bool operator()(int y, int x) const
{ {
return mask.ptr(y)[x]; return mask.ptr(y)[x];
} }
...@@ -89,7 +89,7 @@ namespace cv { namespace gpu { namespace mathfunc ...@@ -89,7 +89,7 @@ namespace cv { namespace gpu { namespace mathfunc
struct MaskTrue struct MaskTrue
{ {
__device__ bool operator()(int y, int x) const __device__ __forceinline__ bool operator()(int y, int x) const
{ {
return true; return true;
} }
...@@ -153,7 +153,7 @@ namespace cv { namespace gpu { namespace mathfunc ...@@ -153,7 +153,7 @@ namespace cv { namespace gpu { namespace mathfunc
// Does min and max in shared memory // Does min and max in shared memory
template <typename T> template <typename T>
__device__ void merge(uint tid, uint offset, volatile T* minval, volatile T* maxval) __device__ __forceinline__ void merge(uint tid, uint offset, volatile T* minval, volatile T* maxval)
{ {
minval[tid] = min(minval[tid], minval[tid + offset]); minval[tid] = min(minval[tid], minval[tid + offset]);
maxval[tid] = max(maxval[tid], maxval[tid + offset]); maxval[tid] = max(maxval[tid], maxval[tid + offset]);
...@@ -976,16 +976,16 @@ namespace cv { namespace gpu { namespace mathfunc ...@@ -976,16 +976,16 @@ namespace cv { namespace gpu { namespace mathfunc
template <> struct SumType<double> { typedef double R; }; template <> struct SumType<double> { typedef double R; };
template <typename R> template <typename R>
struct IdentityOp { static __device__ R call(R x) { return x; } }; struct IdentityOp { static __device__ __forceinline__ R call(R x) { return x; } };
template <typename R> template <typename R>
struct AbsOp { static __device__ R call(R x) { return abs(x); } }; struct AbsOp { static __device__ __forceinline__ R call(R x) { return abs(x); } };
template <> template <>
struct AbsOp<uint> { static __device__ uint call(uint x) { return x; } }; struct AbsOp<uint> { static __device__ __forceinline__ uint call(uint x) { return x; } };
template <typename R> template <typename R>
struct SqrOp { static __device__ R call(R x) { return x * x; } }; struct SqrOp { static __device__ __forceinline__ R call(R x) { return x * x; } };
__constant__ int ctwidth; __constant__ int ctwidth;
__constant__ int ctheight; __constant__ int ctheight;
......
...@@ -68,7 +68,7 @@ __constant__ size_t cminSSD_step; ...@@ -68,7 +68,7 @@ __constant__ size_t cminSSD_step;
__constant__ int cwidth; __constant__ int cwidth;
__constant__ int cheight; __constant__ int cheight;
__device__ int SQ(int a) __device__ __forceinline__ int SQ(int a)
{ {
return a * a; return a * a;
} }
...@@ -419,7 +419,7 @@ extern "C" void prefilter_xsobel(const DevMem2D& input, const DevMem2D& output, ...@@ -419,7 +419,7 @@ extern "C" void prefilter_xsobel(const DevMem2D& input, const DevMem2D& output,
texture<unsigned char, 2, cudaReadModeNormalizedFloat> texForTF; texture<unsigned char, 2, cudaReadModeNormalizedFloat> texForTF;
__device__ float sobel(int x, int y) __device__ __forceinline__ float sobel(int x, int y)
{ {
float conv = tex2D(texForTF, x - 1, y - 1) * (-1) + tex2D(texForTF, x + 1, y - 1) * (1) + float conv = tex2D(texForTF, x - 1, y - 1) * (-1) + tex2D(texForTF, x + 1, y - 1) * (1) +
tex2D(texForTF, x - 1, y ) * (-2) + tex2D(texForTF, x + 1, y ) * (2) + tex2D(texForTF, x - 1, y ) * (-2) + tex2D(texForTF, x + 1, y ) * (2) +
......
...@@ -76,11 +76,11 @@ namespace cv { namespace gpu { namespace bp ...@@ -76,11 +76,11 @@ namespace cv { namespace gpu { namespace bp
template <int cn> struct PixDiff; template <int cn> struct PixDiff;
template <> struct PixDiff<1> template <> struct PixDiff<1>
{ {
__device__ PixDiff(const uchar* ls) __device__ __forceinline__ PixDiff(const uchar* ls)
{ {
l = *ls; l = *ls;
} }
__device__ float operator()(const uchar* rs) const __device__ __forceinline__ float operator()(const uchar* rs) const
{ {
return abs((int)l - *rs); return abs((int)l - *rs);
} }
...@@ -88,11 +88,11 @@ namespace cv { namespace gpu { namespace bp ...@@ -88,11 +88,11 @@ namespace cv { namespace gpu { namespace bp
}; };
template <> struct PixDiff<3> template <> struct PixDiff<3>
{ {
__device__ PixDiff(const uchar* ls) __device__ __forceinline__ PixDiff(const uchar* ls)
{ {
l = *((uchar3*)ls); l = *((uchar3*)ls);
} }
__device__ float operator()(const uchar* rs) const __device__ __forceinline__ float operator()(const uchar* rs) const
{ {
const float tr = 0.299f; const float tr = 0.299f;
const float tg = 0.587f; const float tg = 0.587f;
...@@ -108,11 +108,11 @@ namespace cv { namespace gpu { namespace bp ...@@ -108,11 +108,11 @@ namespace cv { namespace gpu { namespace bp
}; };
template <> struct PixDiff<4> template <> struct PixDiff<4>
{ {
__device__ PixDiff(const uchar* ls) __device__ __forceinline__ PixDiff(const uchar* ls)
{ {
l = *((uchar4*)ls); l = *((uchar4*)ls);
} }
__device__ float operator()(const uchar* rs) const __device__ __forceinline__ float operator()(const uchar* rs) const
{ {
const float tr = 0.299f; const float tr = 0.299f;
const float tg = 0.587f; const float tg = 0.587f;
......
...@@ -102,14 +102,14 @@ namespace cv { namespace gpu { namespace csbp ...@@ -102,14 +102,14 @@ namespace cv { namespace gpu { namespace csbp
template <int channels> struct DataCostPerPixel; template <int channels> struct DataCostPerPixel;
template <> struct DataCostPerPixel<1> template <> struct DataCostPerPixel<1>
{ {
static __device__ float compute(const uchar* left, const uchar* right) static __device__ __forceinline__ float compute(const uchar* left, const uchar* right)
{ {
return fmin(cdata_weight * abs((int)*left - *right), cdata_weight * cmax_data_term); return fmin(cdata_weight * abs((int)*left - *right), cdata_weight * cmax_data_term);
} }
}; };
template <> struct DataCostPerPixel<3> template <> struct DataCostPerPixel<3>
{ {
static __device__ float compute(const uchar* left, const uchar* right) static __device__ __forceinline__ float compute(const uchar* left, const uchar* right)
{ {
float tb = 0.114f * abs((int)left[0] - right[0]); float tb = 0.114f * abs((int)left[0] - right[0]);
float tg = 0.587f * abs((int)left[1] - right[1]); float tg = 0.587f * abs((int)left[1] - right[1]);
...@@ -120,7 +120,7 @@ namespace cv { namespace gpu { namespace csbp ...@@ -120,7 +120,7 @@ namespace cv { namespace gpu { namespace csbp
}; };
template <> struct DataCostPerPixel<4> template <> struct DataCostPerPixel<4>
{ {
static __device__ float compute(const uchar* left, const uchar* right) static __device__ __forceinline__ float compute(const uchar* left, const uchar* right)
{ {
uchar4 l = *((const uchar4*)left); uchar4 l = *((const uchar4*)left);
uchar4 r = *((const uchar4*)right); uchar4 r = *((const uchar4*)right);
......
...@@ -122,7 +122,7 @@ namespace cv { namespace gpu { namespace surf ...@@ -122,7 +122,7 @@ namespace cv { namespace gpu { namespace surf
__constant__ float c_DY [3][5] = { {2, 0, 7, 3, 1}, {2, 3, 7, 6, -2}, {2, 6, 7, 9, 1} }; __constant__ float c_DY [3][5] = { {2, 0, 7, 3, 1}, {2, 3, 7, 6, -2}, {2, 6, 7, 9, 1} };
__constant__ float c_DXY[4][5] = { {1, 1, 4, 4, 1}, {5, 1, 8, 4, -1}, {1, 5, 4, 8, -1}, {5, 5, 8, 8, 1} }; __constant__ float c_DXY[4][5] = { {1, 1, 4, 4, 1}, {5, 1, 8, 4, -1}, {1, 5, 4, 8, -1}, {5, 5, 8, 8, 1} };
__host__ __device__ int calcSize(int octave, int layer) __host__ __device__ __forceinline__ int calcSize(int octave, int layer)
{ {
/* Wavelet size at first layer of first octave. */ /* Wavelet size at first layer of first octave. */
const int HAAR_SIZE0 = 9; const int HAAR_SIZE0 = 9;
...@@ -189,7 +189,7 @@ namespace cv { namespace gpu { namespace surf ...@@ -189,7 +189,7 @@ namespace cv { namespace gpu { namespace surf
struct WithOutMask struct WithOutMask
{ {
static __device__ bool check(int, int, int) static __device__ __forceinline__ bool check(int, int, int)
{ {
return true; return true;
} }
...@@ -708,7 +708,7 @@ namespace cv { namespace gpu { namespace surf ...@@ -708,7 +708,7 @@ namespace cv { namespace gpu { namespace surf
3.695352233989979e-006f, 8.444558261544444e-006f, 1.760426494001877e-005f, 3.34794785885606e-005f, 5.808438800158911e-005f, 9.193058212986216e-005f, 0.0001327334757661447f, 0.0001748319627949968f, 0.0002100782439811155f, 0.0002302826324012131f, 0.0002302826324012131f, 0.0002100782439811155f, 0.0001748319627949968f, 0.0001327334757661447f, 9.193058212986216e-005f, 5.808438800158911e-005f, 3.34794785885606e-005f, 1.760426494001877e-005f, 8.444558261544444e-006f, 3.695352233989979e-006f 3.695352233989979e-006f, 8.444558261544444e-006f, 1.760426494001877e-005f, 3.34794785885606e-005f, 5.808438800158911e-005f, 9.193058212986216e-005f, 0.0001327334757661447f, 0.0001748319627949968f, 0.0002100782439811155f, 0.0002302826324012131f, 0.0002302826324012131f, 0.0002100782439811155f, 0.0001748319627949968f, 0.0001327334757661447f, 9.193058212986216e-005f, 5.808438800158911e-005f, 3.34794785885606e-005f, 1.760426494001877e-005f, 8.444558261544444e-006f, 3.695352233989979e-006f
}; };
__device__ unsigned char calcWin(int i, int j, float centerX, float centerY, float win_offset, float cos_dir, float sin_dir) __device__ __forceinline__ unsigned char calcWin(int i, int j, float centerX, float centerY, float win_offset, float cos_dir, float sin_dir)
{ {
float pixel_x = centerX + (win_offset + j) * cos_dir + (win_offset + i) * sin_dir; float pixel_x = centerX + (win_offset + j) * cos_dir + (win_offset + i) * sin_dir;
float pixel_y = centerY - (win_offset + j) * sin_dir + (win_offset + i) * cos_dir; float pixel_y = centerY - (win_offset + j) * sin_dir + (win_offset + i) * cos_dir;
......
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or bpied warranties, including, but not limited to, the bpied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __OPENCV_GPU_DATAMOV_UTILS_HPP__
#define __OPENCV_GPU_DATAMOV_UTILS_HPP__
#include "internal_shared.hpp"
#if __CUDA_ARCH__ >= 200 namespace cv { namespace gpu { namespace device
{
#if __CUDA_ARCH__ >= 200
// for Fermi memory space is detected automatically // for Fermi memory space is detected automatically
template <typename T> struct ForceGlobLoad template <typename T> struct ForceGlob
{ {
__device__ __forceinline__ static void Ld(T* ptr, int offset, T& val) { val = d_ptr[offset]; } __device__ __forceinline__ static void Load(const T* ptr, int offset, T& val) { val = d_ptr[offset]; }
}; };
#else #else // __CUDA_ARCH__ >= 200
#if defined(_WIN64) || defined(__LP64__)
// 64-bit register modifier for inlined asm
#define _OPENCV_ASM_PTR_ "l"
#else
// 32-bit register modifier for inlined asm
#define _OPENCV_ASM_PTR_ "r"
#endif
#if defined(_WIN64) || defined(__LP64__) template<class T> struct ForceGlob;
// 64-bit register modifier for inlined asm
#define _OPENCV_ASM_PTR_ "l"
#else
// 32-bit register modifier for inlined asm
#define _OPENCV_ASM_PTR_ "r"
#endif
template<class T> struct ForceGlobLoad; #define DEFINE_FORCE_GLOB(base_type, ptx_type, reg_mod) \
template <> struct ForceGlob<base_type> \
{ \
__device__ __forceinline__ static void Load(const base_type* ptr, int offset, base_type& val) \
{ \
asm("ld.global."#ptx_type" %0, [%1];" : "="#reg_mod(val) : _OPENCV_ASM_PTR_(ptr + offset)); \
} \
};
#define DEFINE_FORCE_GLOB_B(base_type, ptx_type) \
template <> struct ForceGlob<base_type> \
{ \
__device__ __forceinline__ static void Load(const base_type* ptr, int offset, base_type& val) \
{ \
asm("ld.global."#ptx_type" %0, [%1];" : "=r"(*reinterpret_cast<uint*>(&val)) : _OPENCV_ASM_PTR_(ptr + offset)); \
} \
};
DEFINE_FORCE_GLOB_B(uchar, u8)
DEFINE_FORCE_GLOB_B(schar, s8)
DEFINE_FORCE_GLOB_B(char, b8)
DEFINE_FORCE_GLOB (ushort, u16, h)
DEFINE_FORCE_GLOB (short, s16, h)
DEFINE_FORCE_GLOB (uint, u32, r)
DEFINE_FORCE_GLOB (int, s32, r)
DEFINE_FORCE_GLOB (float, f32, f)
DEFINE_FORCE_GLOB (double, f64, d)
#undef DEFINE_FORCE_GLOB
#undef DEFINE_FORCE_GLOB_B
#undef _OPENCV_ASM_PTR_
#endif // __CUDA_ARCH__ >= 200
}}}
#define DEFINE_FORCE_GLOB_LOAD(base_type, ptx_type, reg_mod) \ #endif // __OPENCV_GPU_DATAMOV_UTILS_HPP__
template <> struct ForceGlobLoad<base_type> \
{ \
__device__ __forceinline__ static void Ld(type* ptr, int offset, type& val) \
{ \
asm("ld.global."#ptx_type" %0, [%1];" : "="#reg_mod(val) : _OPENCV_ASM_PTR_(d_ptr + offset)); \
} \
};
DEFINE_FORCE_GLOB_LOAD(int, s32, r)
DEFINE_FORCE_GLOB_LOAD(float, f32, f)
#undef DEFINE_FORCE_GLOB_LOAD
#endif
...@@ -40,44 +40,41 @@ ...@@ -40,44 +40,41 @@
// //
//M*/ //M*/
#ifndef __OPENCV_GPU_DYNAMIC_SMEM_HPP__
#define __OPENCV_GPU_DYNAMIC_SMEM_HPP__
namespace cv namespace cv { namespace gpu { namespace device
{ {
namespace gpu template<class T> struct DynamicSharedMem
{ {
namespace device __device__ __forceinline__ operator T*()
{ {
template<class T> struct DynamicSharedMem extern __shared__ int __smem[];
{ return (T*)__smem;
__device__ operator T*() }
{
extern __shared__ int __smem[];
return (T*)__smem;
}
__device__ operator const T*() const __device__ __forceinline__ operator const T*() const
{ {
extern __shared__ int __smem[]; extern __shared__ int __smem[];
return (T*)__smem; return (T*)__smem;
} }
}; };
// specialize for double to avoid unaligned memory access compile errors // specialize for double to avoid unaligned memory access compile errors
template<> struct DynamicSharedMem<double> template<> struct DynamicSharedMem<double>
{ {
__device__ operator double*() __device__ __forceinline__ operator double*()
{ {
extern __shared__ double __smem_d[]; extern __shared__ double __smem_d[];
return (double*)__smem_d; return (double*)__smem_d;
} }
__device__ operator const double*() const __device__ __forceinline__ operator const double*() const
{ {
extern __shared__ double __smem_d[]; extern __shared__ double __smem_d[];
return (double*)__smem_d; return (double*)__smem_d;
}
};
} }
};
}}}
} #endif // __OPENCV_GPU_DYNAMIC_SMEM_HPP__
}
\ No newline at end of file
...@@ -51,29 +51,29 @@ namespace cv ...@@ -51,29 +51,29 @@ namespace cv
{ {
namespace device namespace device
{ {
template<typename _Tp> static __device__ _Tp saturate_cast(uchar v) { return _Tp(v); } template<typename _Tp> static __device__ __forceinline__ _Tp saturate_cast(uchar v) { return _Tp(v); }
template<typename _Tp> static __device__ _Tp saturate_cast(schar v) { return _Tp(v); } template<typename _Tp> static __device__ __forceinline__ _Tp saturate_cast(schar v) { return _Tp(v); }
template<typename _Tp> static __device__ _Tp saturate_cast(ushort v) { return _Tp(v); } template<typename _Tp> static __device__ __forceinline__ _Tp saturate_cast(ushort v) { return _Tp(v); }
template<typename _Tp> static __device__ _Tp saturate_cast(short v) { return _Tp(v); } template<typename _Tp> static __device__ __forceinline__ _Tp saturate_cast(short v) { return _Tp(v); }
template<typename _Tp> static __device__ _Tp saturate_cast(uint v) { return _Tp(v); } template<typename _Tp> static __device__ __forceinline__ _Tp saturate_cast(uint v) { return _Tp(v); }
template<typename _Tp> static __device__ _Tp saturate_cast(int v) { return _Tp(v); } template<typename _Tp> static __device__ __forceinline__ _Tp saturate_cast(int v) { return _Tp(v); }
template<typename _Tp> static __device__ _Tp saturate_cast(float v) { return _Tp(v); } template<typename _Tp> static __device__ __forceinline__ _Tp saturate_cast(float v) { return _Tp(v); }
template<typename _Tp> static __device__ _Tp saturate_cast(double v) { return _Tp(v); } template<typename _Tp> static __device__ __forceinline__ _Tp saturate_cast(double v) { return _Tp(v); }
template<> static __device__ uchar saturate_cast<uchar>(schar v) template<> static __device__ __forceinline__ uchar saturate_cast<uchar>(schar v)
{ return (uchar)max((int)v, 0); } { return (uchar)max((int)v, 0); }
template<> static __device__ uchar saturate_cast<uchar>(ushort v) template<> static __device__ __forceinline__ uchar saturate_cast<uchar>(ushort v)
{ return (uchar)min((uint)v, (uint)UCHAR_MAX); } { return (uchar)min((uint)v, (uint)UCHAR_MAX); }
template<> static __device__ uchar saturate_cast<uchar>(int v) template<> static __device__ __forceinline__ uchar saturate_cast<uchar>(int v)
{ return (uchar)((uint)v <= UCHAR_MAX ? v : v > 0 ? UCHAR_MAX : 0); } { return (uchar)((uint)v <= UCHAR_MAX ? v : v > 0 ? UCHAR_MAX : 0); }
template<> static __device__ uchar saturate_cast<uchar>(uint v) template<> static __device__ __forceinline__ uchar saturate_cast<uchar>(uint v)
{ return (uchar)min(v, (uint)UCHAR_MAX); } { return (uchar)min(v, (uint)UCHAR_MAX); }
template<> static __device__ uchar saturate_cast<uchar>(short v) template<> static __device__ __forceinline__ uchar saturate_cast<uchar>(short v)
{ return saturate_cast<uchar>((uint)v); } { return saturate_cast<uchar>((uint)v); }
template<> static __device__ uchar saturate_cast<uchar>(float v) template<> static __device__ __forceinline__ uchar saturate_cast<uchar>(float v)
{ int iv = __float2int_rn(v); return saturate_cast<uchar>(iv); } { int iv = __float2int_rn(v); return saturate_cast<uchar>(iv); }
template<> static __device__ uchar saturate_cast<uchar>(double v) template<> static __device__ __forceinline__ uchar saturate_cast<uchar>(double v)
{ {
#if defined (__CUDA_ARCH__) && __CUDA_ARCH__ >= 130 #if defined (__CUDA_ARCH__) && __CUDA_ARCH__ >= 130
int iv = __double2int_rn(v); return saturate_cast<uchar>(iv); int iv = __double2int_rn(v); return saturate_cast<uchar>(iv);
...@@ -82,23 +82,23 @@ namespace cv ...@@ -82,23 +82,23 @@ namespace cv
#endif #endif
} }
template<> static __device__ schar saturate_cast<schar>(uchar v) template<> static __device__ __forceinline__ schar saturate_cast<schar>(uchar v)
{ return (schar)min((int)v, SCHAR_MAX); } { return (schar)min((int)v, SCHAR_MAX); }
template<> static __device__ schar saturate_cast<schar>(ushort v) template<> static __device__ __forceinline__ schar saturate_cast<schar>(ushort v)
{ return (schar)min((uint)v, (uint)SCHAR_MAX); } { return (schar)min((uint)v, (uint)SCHAR_MAX); }
template<> static __device__ schar saturate_cast<schar>(int v) template<> static __device__ __forceinline__ schar saturate_cast<schar>(int v)
{ {
return (schar)((uint)(v-SCHAR_MIN) <= (uint)UCHAR_MAX ? return (schar)((uint)(v-SCHAR_MIN) <= (uint)UCHAR_MAX ?
v : v > 0 ? SCHAR_MAX : SCHAR_MIN); v : v > 0 ? SCHAR_MAX : SCHAR_MIN);
} }
template<> static __device__ schar saturate_cast<schar>(short v) template<> static __device__ __forceinline__ schar saturate_cast<schar>(short v)
{ return saturate_cast<schar>((int)v); } { return saturate_cast<schar>((int)v); }
template<> static __device__ schar saturate_cast<schar>(uint v) template<> static __device__ __forceinline__ schar saturate_cast<schar>(uint v)
{ return (schar)min(v, (uint)SCHAR_MAX); } { return (schar)min(v, (uint)SCHAR_MAX); }
template<> static __device__ schar saturate_cast<schar>(float v) template<> static __device__ __forceinline__ schar saturate_cast<schar>(float v)
{ int iv = __float2int_rn(v); return saturate_cast<schar>(iv); } { int iv = __float2int_rn(v); return saturate_cast<schar>(iv); }
template<> static __device__ schar saturate_cast<schar>(double v) template<> static __device__ __forceinline__ schar saturate_cast<schar>(double v)
{ {
#if defined (__CUDA_ARCH__) && __CUDA_ARCH__ >= 130 #if defined (__CUDA_ARCH__) && __CUDA_ARCH__ >= 130
int iv = __double2int_rn(v); return saturate_cast<schar>(iv); int iv = __double2int_rn(v); return saturate_cast<schar>(iv);
...@@ -107,17 +107,17 @@ namespace cv ...@@ -107,17 +107,17 @@ namespace cv
#endif #endif
} }
template<> static __device__ ushort saturate_cast<ushort>(schar v) template<> static __device__ __forceinline__ ushort saturate_cast<ushort>(schar v)
{ return (ushort)max((int)v, 0); } { return (ushort)max((int)v, 0); }
template<> static __device__ ushort saturate_cast<ushort>(short v) template<> static __device__ __forceinline__ ushort saturate_cast<ushort>(short v)
{ return (ushort)max((int)v, 0); } { return (ushort)max((int)v, 0); }
template<> static __device__ ushort saturate_cast<ushort>(int v) template<> static __device__ __forceinline__ ushort saturate_cast<ushort>(int v)
{ return (ushort)((uint)v <= (uint)USHRT_MAX ? v : v > 0 ? USHRT_MAX : 0); } { return (ushort)((uint)v <= (uint)USHRT_MAX ? v : v > 0 ? USHRT_MAX : 0); }
template<> static __device__ ushort saturate_cast<ushort>(uint v) template<> static __device__ __forceinline__ ushort saturate_cast<ushort>(uint v)
{ return (ushort)min(v, (uint)USHRT_MAX); } { return (ushort)min(v, (uint)USHRT_MAX); }
template<> static __device__ ushort saturate_cast<ushort>(float v) template<> static __device__ __forceinline__ ushort saturate_cast<ushort>(float v)
{ int iv = __float2int_rn(v); return saturate_cast<ushort>(iv); } { int iv = __float2int_rn(v); return saturate_cast<ushort>(iv); }
template<> static __device__ ushort saturate_cast<ushort>(double v) template<> static __device__ __forceinline__ ushort saturate_cast<ushort>(double v)
{ {
#if defined (__CUDA_ARCH__) && __CUDA_ARCH__ >= 130 #if defined (__CUDA_ARCH__) && __CUDA_ARCH__ >= 130
int iv = __double2int_rn(v); return saturate_cast<ushort>(iv); int iv = __double2int_rn(v); return saturate_cast<ushort>(iv);
...@@ -126,18 +126,18 @@ namespace cv ...@@ -126,18 +126,18 @@ namespace cv
#endif #endif
} }
template<> static __device__ short saturate_cast<short>(ushort v) template<> static __device__ __forceinline__ short saturate_cast<short>(ushort v)
{ return (short)min((int)v, SHRT_MAX); } { return (short)min((int)v, SHRT_MAX); }
template<> static __device__ short saturate_cast<short>(int v) template<> static __device__ __forceinline__ short saturate_cast<short>(int v)
{ {
return (short)((uint)(v - SHRT_MIN) <= (uint)USHRT_MAX ? return (short)((uint)(v - SHRT_MIN) <= (uint)USHRT_MAX ?
v : v > 0 ? SHRT_MAX : SHRT_MIN); v : v > 0 ? SHRT_MAX : SHRT_MIN);
} }
template<> static __device__ short saturate_cast<short>(uint v) template<> static __device__ __forceinline__ short saturate_cast<short>(uint v)
{ return (short)min(v, (uint)SHRT_MAX); } { return (short)min(v, (uint)SHRT_MAX); }
template<> static __device__ short saturate_cast<short>(float v) template<> static __device__ __forceinline__ short saturate_cast<short>(float v)
{ int iv = __float2int_rn(v); return saturate_cast<short>(iv); } { int iv = __float2int_rn(v); return saturate_cast<short>(iv); }
template<> static __device__ short saturate_cast<short>(double v) template<> static __device__ __forceinline__ short saturate_cast<short>(double v)
{ {
#if defined (__CUDA_ARCH__) && __CUDA_ARCH__ >= 130 #if defined (__CUDA_ARCH__) && __CUDA_ARCH__ >= 130
int iv = __double2int_rn(v); return saturate_cast<short>(iv); int iv = __double2int_rn(v); return saturate_cast<short>(iv);
...@@ -146,8 +146,8 @@ namespace cv ...@@ -146,8 +146,8 @@ namespace cv
#endif #endif
} }
template<> static __device__ int saturate_cast<int>(float v) { return __float2int_rn(v); } template<> static __device__ __forceinline__ int saturate_cast<int>(float v) { return __float2int_rn(v); }
template<> static __device__ int saturate_cast<int>(double v) template<> static __device__ __forceinline__ int saturate_cast<int>(double v)
{ {
#if defined (__CUDA_ARCH__) && __CUDA_ARCH__ >= 130 #if defined (__CUDA_ARCH__) && __CUDA_ARCH__ >= 130
return __double2int_rn(v); return __double2int_rn(v);
...@@ -156,8 +156,8 @@ namespace cv ...@@ -156,8 +156,8 @@ namespace cv
#endif #endif
} }
template<> static __device__ uint saturate_cast<uint>(float v){ return __float2uint_rn(v); } template<> static __device__ __forceinline__ uint saturate_cast<uint>(float v){ return __float2uint_rn(v); }
template<> static __device__ uint saturate_cast<uint>(double v) template<> static __device__ __forceinline__ uint saturate_cast<uint>(double v)
{ {
#if defined (__CUDA_ARCH__) && __CUDA_ARCH__ >= 130 #if defined (__CUDA_ARCH__) && __CUDA_ARCH__ >= 130
return __double2uint_rn(v); return __double2uint_rn(v);
......
...@@ -55,7 +55,7 @@ namespace cv { namespace gpu { namespace device ...@@ -55,7 +55,7 @@ namespace cv { namespace gpu { namespace device
public: public:
explicit MaskReader(const PtrStep& mask_): mask(mask_) {} explicit MaskReader(const PtrStep& mask_): mask(mask_) {}
__device__ bool operator()(int y, int x) const { return mask.ptr(y)[x]; } __device__ __forceinline__ bool operator()(int y, int x) const { return mask.ptr(y)[x]; }
private: private:
PtrStep mask; PtrStep mask;
...@@ -63,7 +63,7 @@ namespace cv { namespace gpu { namespace device ...@@ -63,7 +63,7 @@ namespace cv { namespace gpu { namespace device
struct NoMask struct NoMask
{ {
__device__ bool operator()(int y, int x) const { return true; } __device__ __forceinline__ bool operator()(int y, int x) const { return true; }
}; };
//! Read Write Traits //! Read Write Traits
...@@ -121,14 +121,14 @@ namespace cv { namespace gpu { namespace device ...@@ -121,14 +121,14 @@ namespace cv { namespace gpu { namespace device
template <> struct OpUnroller<1> template <> struct OpUnroller<1>
{ {
template <typename T, typename D, typename UnOp, typename Mask> template <typename T, typename D, typename UnOp, typename Mask>
static __device__ void unroll(const T& src, D& dst, const Mask& mask, UnOp& op, int x_shifted, int y) static __device__ __forceinline__ void unroll(const T& src, D& dst, const Mask& mask, UnOp& op, int x_shifted, int y)
{ {
if (mask(y, x_shifted)) if (mask(y, x_shifted))
dst.x = op(src.x); dst.x = op(src.x);
} }
template <typename T1, typename T2, typename D, typename BinOp, typename Mask> template <typename T1, typename T2, typename D, typename BinOp, typename Mask>
static __device__ void unroll(const T1& src1, const T2& src2, D& dst, const Mask& mask, BinOp& op, int x_shifted, int y) static __device__ __forceinline__ void unroll(const T1& src1, const T2& src2, D& dst, const Mask& mask, BinOp& op, int x_shifted, int y)
{ {
if (mask(y, x_shifted)) if (mask(y, x_shifted))
dst.x = op(src1.x, src2.x); dst.x = op(src1.x, src2.x);
...@@ -137,7 +137,7 @@ namespace cv { namespace gpu { namespace device ...@@ -137,7 +137,7 @@ namespace cv { namespace gpu { namespace device
template <> struct OpUnroller<2> template <> struct OpUnroller<2>
{ {
template <typename T, typename D, typename UnOp, typename Mask> template <typename T, typename D, typename UnOp, typename Mask>
static __device__ void unroll(const T& src, D& dst, const Mask& mask, UnOp& op, int x_shifted, int y) static __device__ __forceinline__ void unroll(const T& src, D& dst, const Mask& mask, UnOp& op, int x_shifted, int y)
{ {
if (mask(y, x_shifted)) if (mask(y, x_shifted))
dst.x = op(src.x); dst.x = op(src.x);
...@@ -146,7 +146,7 @@ namespace cv { namespace gpu { namespace device ...@@ -146,7 +146,7 @@ namespace cv { namespace gpu { namespace device
} }
template <typename T1, typename T2, typename D, typename BinOp, typename Mask> template <typename T1, typename T2, typename D, typename BinOp, typename Mask>
static __device__ void unroll(const T1& src1, const T2& src2, D& dst, const Mask& mask, BinOp& op, int x_shifted, int y) static __device__ __forceinline__ void unroll(const T1& src1, const T2& src2, D& dst, const Mask& mask, BinOp& op, int x_shifted, int y)
{ {
if (mask(y, x_shifted)) if (mask(y, x_shifted))
dst.x = op(src1.x, src2.x); dst.x = op(src1.x, src2.x);
...@@ -157,7 +157,7 @@ namespace cv { namespace gpu { namespace device ...@@ -157,7 +157,7 @@ namespace cv { namespace gpu { namespace device
template <> struct OpUnroller<3> template <> struct OpUnroller<3>
{ {
template <typename T, typename D, typename UnOp, typename Mask> template <typename T, typename D, typename UnOp, typename Mask>
static __device__ void unroll(const T& src, D& dst, const Mask& mask, UnOp& op, int x_shifted, int y) static __device__ __forceinline__ void unroll(const T& src, D& dst, const Mask& mask, UnOp& op, int x_shifted, int y)
{ {
if (mask(y, x_shifted)) if (mask(y, x_shifted))
dst.x = op(src.x); dst.x = op(src.x);
...@@ -168,7 +168,7 @@ namespace cv { namespace gpu { namespace device ...@@ -168,7 +168,7 @@ namespace cv { namespace gpu { namespace device
} }
template <typename T1, typename T2, typename D, typename BinOp, typename Mask> template <typename T1, typename T2, typename D, typename BinOp, typename Mask>
static __device__ void unroll(const T1& src1, const T2& src2, D& dst, const Mask& mask, BinOp& op, int x_shifted, int y) static __device__ __forceinline__ void unroll(const T1& src1, const T2& src2, D& dst, const Mask& mask, BinOp& op, int x_shifted, int y)
{ {
if (mask(y, x_shifted)) if (mask(y, x_shifted))
dst.x = op(src1.x, src2.x); dst.x = op(src1.x, src2.x);
...@@ -181,7 +181,7 @@ namespace cv { namespace gpu { namespace device ...@@ -181,7 +181,7 @@ namespace cv { namespace gpu { namespace device
template <> struct OpUnroller<4> template <> struct OpUnroller<4>
{ {
template <typename T, typename D, typename UnOp, typename Mask> template <typename T, typename D, typename UnOp, typename Mask>
static __device__ void unroll(const T& src, D& dst, const Mask& mask, UnOp& op, int x_shifted, int y) static __device__ __forceinline__ void unroll(const T& src, D& dst, const Mask& mask, UnOp& op, int x_shifted, int y)
{ {
if (mask(y, x_shifted)) if (mask(y, x_shifted))
dst.x = op(src.x); dst.x = op(src.x);
...@@ -194,7 +194,7 @@ namespace cv { namespace gpu { namespace device ...@@ -194,7 +194,7 @@ namespace cv { namespace gpu { namespace device
} }
template <typename T1, typename T2, typename D, typename BinOp, typename Mask> template <typename T1, typename T2, typename D, typename BinOp, typename Mask>
static __device__ void unroll(const T1& src1, const T2& src2, D& dst, const Mask& mask, BinOp& op, int x_shifted, int y) static __device__ __forceinline__ void unroll(const T1& src1, const T2& src2, D& dst, const Mask& mask, BinOp& op, int x_shifted, int y)
{ {
if (mask(y, x_shifted)) if (mask(y, x_shifted))
dst.x = op(src1.x, src2.x); dst.x = op(src1.x, src2.x);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment