Commit 9034a2d6 authored by Vladislav Vinogradov's avatar Vladislav Vinogradov

fixed gpu arithm functions (mismatch with cpu version)

parent 15902284
...@@ -488,11 +488,29 @@ namespace cv { namespace gpu { namespace device ...@@ -488,11 +488,29 @@ namespace cv { namespace gpu { namespace device
template <typename T, typename D> struct Multiply : binary_function<T, T, D> template <typename T, typename D> struct Multiply : binary_function<T, T, D>
{ {
Multiply(double scale_) : scale(scale_) {} Multiply(float scale_) : scale(scale_) {}
__device__ __forceinline__ D operator ()(T a, T b) const __device__ __forceinline__ D operator ()(T a, T b) const
{ {
return saturate_cast<D>(scale * a * b); return saturate_cast<D>(scale * a * b);
} }
const float scale;
};
template <typename T> struct Multiply<T, double> : binary_function<T, T, double>
{
Multiply(double scale_) : scale(scale_) {}
__device__ __forceinline__ double operator ()(T a, T b) const
{
return scale * a * b;
}
const double scale;
};
template <> struct Multiply<int, int> : binary_function<int, int, int>
{
Multiply(double scale_) : scale(scale_) {}
__device__ __forceinline__ int operator ()(int a, int b) const
{
return saturate_cast<int>(scale * a * b);
}
const double scale; const double scale;
}; };
...@@ -517,12 +535,37 @@ namespace cv { namespace gpu { namespace device ...@@ -517,12 +535,37 @@ namespace cv { namespace gpu { namespace device
enum { smart_shift = 4 }; enum { smart_shift = 4 };
}; };
template <typename T, typename D> void multiply_gpu(const DevMem2Db& src1, const DevMem2Db& src2, const DevMem2Db& dst, double scale, cudaStream_t stream) template <typename T, typename D> struct MultiplyCaller
{ {
cudaSafeCall( cudaSetDoubleForDevice(&scale) ); static void call(const DevMem2Db& src1, const DevMem2Db& src2, const DevMem2Db& dst, double scale, cudaStream_t stream)
Multiply<T, D> op(scale); {
Multiply<T, D> op(static_cast<float>(scale));
cv::gpu::device::transform((DevMem2D_<T>)src1, (DevMem2D_<T>)src2, (DevMem2D_<D>)dst, op, WithOutMask(), stream); cv::gpu::device::transform((DevMem2D_<T>)src1, (DevMem2D_<T>)src2, (DevMem2D_<D>)dst, op, WithOutMask(), stream);
} }
};
template <typename T> struct MultiplyCaller<T, double>
{
static void call(const DevMem2Db& src1, const DevMem2Db& src2, const DevMem2Db& dst, double scale, cudaStream_t stream)
{
cudaSafeCall( cudaSetDoubleForDevice(&scale) );
Multiply<T, double> op(scale);
cv::gpu::device::transform((DevMem2D_<T>)src1, (DevMem2D_<T>)src2, (DevMem2D_<double>)dst, op, WithOutMask(), stream);
}
};
template <> struct MultiplyCaller<int, int>
{
static void call(const DevMem2Db& src1, const DevMem2Db& src2, const DevMem2Db& dst, double scale, cudaStream_t stream)
{
cudaSafeCall( cudaSetDoubleForDevice(&scale) );
Multiply<int, int> op(scale);
cv::gpu::device::transform((DevMem2D_<int>)src1, (DevMem2D_<int>)src2, (DevMem2D_<int>)dst, op, WithOutMask(), stream);
}
};
template <typename T, typename D> void multiply_gpu(const DevMem2Db& src1, const DevMem2Db& src2, const DevMem2Db& dst, double scale, cudaStream_t stream)
{
MultiplyCaller<T, D>::call(src1, src2, dst, scale, stream);
}
template void multiply_gpu<uchar, uchar >(const DevMem2Db& src1, const DevMem2Db& src2, const DevMem2Db& dst, double scale, cudaStream_t stream); template void multiply_gpu<uchar, uchar >(const DevMem2Db& src1, const DevMem2Db& src2, const DevMem2Db& dst, double scale, cudaStream_t stream);
//template void multiply_gpu<uchar, schar >(const DevMem2Db& src1, const DevMem2Db& src2, const DevMem2Db& dst, double scale, cudaStream_t stream); //template void multiply_gpu<uchar, schar >(const DevMem2Db& src1, const DevMem2Db& src2, const DevMem2Db& dst, double scale, cudaStream_t stream);
...@@ -729,7 +772,7 @@ namespace cv { namespace gpu { namespace device ...@@ -729,7 +772,7 @@ namespace cv { namespace gpu { namespace device
Divide(double scale_) : scale(scale_) {} Divide(double scale_) : scale(scale_) {}
__device__ __forceinline__ D operator ()(T a, T b) const __device__ __forceinline__ D operator ()(T a, T b) const
{ {
return b != 0 ? saturate_cast<D>(scale * a / b) : 0; return b != 0 ? saturate_cast<D>(a * scale / b) : 0;
} }
const double scale; const double scale;
}; };
......
This diff is collapsed.
...@@ -65,7 +65,10 @@ namespace cv { namespace gpu { namespace device ...@@ -65,7 +65,10 @@ namespace cv { namespace gpu { namespace device
} }
}}} }}}
const float defaultAvgTexThreshold = 3; namespace
{
const float defaultAvgTexThreshold = 3;
}
cv::gpu::StereoBM_GPU::StereoBM_GPU() cv::gpu::StereoBM_GPU::StereoBM_GPU()
: preset(BASIC_PRESET), ndisp(DEFAULT_NDISP), winSize(DEFAULT_WINSZ), avergeTexThreshold(defaultAvgTexThreshold) : preset(BASIC_PRESET), ndisp(DEFAULT_NDISP), winSize(DEFAULT_WINSZ), avergeTexThreshold(defaultAvgTexThreshold)
...@@ -100,9 +103,9 @@ namespace ...@@ -100,9 +103,9 @@ namespace
{ {
using namespace ::cv::gpu::device::stereobm; using namespace ::cv::gpu::device::stereobm;
CV_DbgAssert(left.rows == right.rows && left.cols == right.cols); CV_Assert(left.rows == right.rows && left.cols == right.cols);
CV_DbgAssert(left.type() == CV_8UC1); CV_Assert(left.type() == CV_8UC1);
CV_DbgAssert(right.type() == CV_8UC1); CV_Assert(right.type() == CV_8UC1);
disparity.create(left.size(), CV_8U); disparity.create(left.size(), CV_8U);
minSSD.create(left.size(), CV_32S); minSSD.create(left.size(), CV_32S);
......
This diff is collapsed.
...@@ -960,13 +960,13 @@ namespace ...@@ -960,13 +960,13 @@ namespace
void releaseGlContext(CvWindow* window) void releaseGlContext(CvWindow* window)
{ {
CV_FUNCNAME( "releaseGlContext" ); //CV_FUNCNAME( "releaseGlContext" );
__BEGIN__; //__BEGIN__;
window->useGl = false; window->useGl = false;
__END__; //__END__;
} }
void drawGl(CvWindow* window) void drawGl(CvWindow* window)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment