Commit 91838187 authored by Vladislav Vinogradov's avatar Vladislav Vinogradov

rewrite gpu/device/vec_math.hpp file

old version isn't compiled with CUDA 5.5
new version doesn't depend on functional.hpp
parent 119b7a29
...@@ -153,7 +153,7 @@ namespace cv { namespace gpu { namespace device ...@@ -153,7 +153,7 @@ namespace cv { namespace gpu { namespace device
template<typename I> __device__ __forceinline__ bool operator() (const I& a, const I& b) const template<typename I> __device__ __forceinline__ bool operator() (const I& a, const I& b) const
{ {
I d = a - b; I d = saturate_cast<I>(a - b);
return lo.x <= d.x && d.x <= hi.x && return lo.x <= d.x && d.x <= hi.x &&
lo.y <= d.y && d.y <= hi.y && lo.y <= d.y && d.y <= hi.y &&
lo.z <= d.z && d.z <= hi.z; lo.z <= d.z && d.z <= hi.z;
...@@ -169,7 +169,7 @@ namespace cv { namespace gpu { namespace device ...@@ -169,7 +169,7 @@ namespace cv { namespace gpu { namespace device
template<typename I> __device__ __forceinline__ bool operator() (const I& a, const I& b) const template<typename I> __device__ __forceinline__ bool operator() (const I& a, const I& b) const
{ {
I d = a - b; I d = saturate_cast<I>(a - b);
return lo.x <= d.x && d.x <= hi.x && return lo.x <= d.x && d.x <= hi.x &&
lo.y <= d.y && d.y <= hi.y && lo.y <= d.y && d.y <= hi.y &&
lo.z <= d.z && d.z <= hi.z && lo.z <= d.z && d.z <= hi.z &&
......
...@@ -48,6 +48,7 @@ ...@@ -48,6 +48,7 @@
#include "opencv2/gpu/device/common.hpp" #include "opencv2/gpu/device/common.hpp"
#include "opencv2/gpu/device/emulation.hpp" #include "opencv2/gpu/device/emulation.hpp"
#include "opencv2/gpu/device/vec_math.hpp" #include "opencv2/gpu/device/vec_math.hpp"
#include "opencv2/gpu/device/functional.hpp"
#include "opencv2/gpu/device/limits.hpp" #include "opencv2/gpu/device/limits.hpp"
#include "opencv2/gpu/device/dynamic_smem.hpp" #include "opencv2/gpu/device/dynamic_smem.hpp"
...@@ -811,7 +812,7 @@ namespace cv { namespace gpu { namespace device ...@@ -811,7 +812,7 @@ namespace cv { namespace gpu { namespace device
const int ind = ::atomicAdd(r_sizes + n, 1); const int ind = ::atomicAdd(r_sizes + n, 1);
if (ind < maxSize) if (ind < maxSize)
r_table(n, ind) = p - templCenter; r_table(n, ind) = saturate_cast<short2>(p - templCenter);
} }
void buildRTable_gpu(const unsigned int* coordList, const float* thetaList, int pointsCount, void buildRTable_gpu(const unsigned int* coordList, const float* thetaList, int pointsCount,
...@@ -855,7 +856,7 @@ namespace cv { namespace gpu { namespace device ...@@ -855,7 +856,7 @@ namespace cv { namespace gpu { namespace device
for (int j = 0; j < r_row_size; ++j) for (int j = 0; j < r_row_size; ++j)
{ {
short2 c = p - r_row[j]; int2 c = p - r_row[j];
c.x = __float2int_rn(c.x * idp); c.x = __float2int_rn(c.x * idp);
c.y = __float2int_rn(c.y * idp); c.y = __float2int_rn(c.y * idp);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment