Commit 37c6357b authored by Andrey Kamaev's avatar Andrey Kamaev Committed by OpenCV Buildbot

Merge pull request #485 from jet47:gpu-new-functionality

parents 8547984d a3a09cf4
...@@ -21,7 +21,7 @@ source_group("Src\\Cuda" FILES ${lib_cuda} ${lib_cuda_hdrs}) ...@@ -21,7 +21,7 @@ source_group("Src\\Cuda" FILES ${lib_cuda} ${lib_cuda_hdrs})
source_group("Device" FILES ${lib_device_hdrs}) source_group("Device" FILES ${lib_device_hdrs})
source_group("Device\\Detail" FILES ${lib_device_hdrs_detail}) source_group("Device\\Detail" FILES ${lib_device_hdrs_detail})
if (HAVE_CUDA) if(HAVE_CUDA)
file(GLOB_RECURSE ncv_srcs "src/nvidia/*.cpp" "src/nvidia/*.h*") file(GLOB_RECURSE ncv_srcs "src/nvidia/*.cpp" "src/nvidia/*.h*")
file(GLOB_RECURSE ncv_cuda "src/nvidia/*.cu") file(GLOB_RECURSE ncv_cuda "src/nvidia/*.cu")
set(ncv_files ${ncv_srcs} ${ncv_cuda}) set(ncv_files ${ncv_srcs} ${ncv_cuda})
...@@ -104,3 +104,7 @@ ocv_add_accuracy_tests(FILES "Include" ${test_hdrs} ...@@ -104,3 +104,7 @@ ocv_add_accuracy_tests(FILES "Include" ${test_hdrs}
FILES "Src" ${test_srcs} FILES "Src" ${test_srcs}
${nvidia}) ${nvidia})
ocv_add_perf_tests() ocv_add_perf_tests()
if(HAVE_CUDA)
add_subdirectory(perf4au)
endif()
...@@ -271,41 +271,37 @@ This class encapsulates a queue of asynchronous calls. Some functions have overl ...@@ -271,41 +271,37 @@ This class encapsulates a queue of asynchronous calls. Some functions have overl
class CV_EXPORTS Stream class CV_EXPORTS Stream
{ {
public: public:
Stream(); Stream();
~Stream(); ~Stream();
Stream(const Stream&); Stream(const Stream&);
Stream& operator=(const Stream&); Stream& operator=(const Stream&);
bool queryIfComplete(); bool queryIfComplete();
void waitForCompletion(); void waitForCompletion();
//! downloads asynchronously. void enqueueDownload(const GpuMat& src, CudaMem& dst);
// Warning! cv::Mat must point to page locked memory void enqueueDownload(const GpuMat& src, Mat& dst);
(i.e. to CudaMem data or to its subMat)
void enqueueDownload(const GpuMat& src, CudaMem& dst);
void enqueueDownload(const GpuMat& src, Mat& dst);
//! uploads asynchronously. void enqueueUpload(const CudaMem& src, GpuMat& dst);
// Warning! cv::Mat must point to page locked memory void enqueueUpload(const Mat& src, GpuMat& dst);
(i.e. to CudaMem data or to its ROI)
void enqueueUpload(const CudaMem& src, GpuMat& dst);
void enqueueUpload(const Mat& src, GpuMat& dst);
void enqueueCopy(const GpuMat& src, GpuMat& dst); void enqueueCopy(const GpuMat& src, GpuMat& dst);
void enqueueMemSet(const GpuMat& src, Scalar val); void enqueueMemSet(const GpuMat& src, Scalar val);
void enqueueMemSet(const GpuMat& src, Scalar val, const GpuMat& mask); void enqueueMemSet(const GpuMat& src, Scalar val, const GpuMat& mask);
// converts matrix type, ex from float to uchar depending on type void enqueueConvert(const GpuMat& src, GpuMat& dst, int type,
void enqueueConvert(const GpuMat& src, GpuMat& dst, int type, double a = 1, double b = 0);
double a = 1, double b = 0);
typedef void (*StreamCallback)(Stream& stream, int status, void* userData);
void enqueueHostCallback(StreamCallback callback, void* userData);
}; };
gpu::Stream::queryIfComplete gpu::Stream::queryIfComplete
-------------------------------- ----------------------------
Returns ``true`` if the current stream queue is finished. Otherwise, it returns false. Returns ``true`` if the current stream queue is finished. Otherwise, it returns false.
.. ocv:function:: bool gpu::Stream::queryIfComplete() .. ocv:function:: bool gpu::Stream::queryIfComplete()
...@@ -313,13 +309,73 @@ Returns ``true`` if the current stream queue is finished. Otherwise, it returns ...@@ -313,13 +309,73 @@ Returns ``true`` if the current stream queue is finished. Otherwise, it returns
gpu::Stream::waitForCompletion gpu::Stream::waitForCompletion
---------------------------------- ------------------------------
Blocks the current CPU thread until all operations in the stream are complete. Blocks the current CPU thread until all operations in the stream are complete.
.. ocv:function:: void gpu::Stream::waitForCompletion() .. ocv:function:: void gpu::Stream::waitForCompletion()
gpu::Stream::enqueueDownload
----------------------------
Copies data from device to host.
.. ocv:function:: void gpu::Stream::enqueueDownload(const GpuMat& src, CudaMem& dst)
.. ocv:function:: void gpu::Stream::enqueueDownload(const GpuMat& src, Mat& dst)
.. note:: ``cv::Mat`` must point to page locked memory (i.e. to ``CudaMem`` data or to its subMat) or must be registered with :ocv:func:`gpu::registerPageLocked` .
gpu::Stream::enqueueUpload
--------------------------
Copies data from host to device.
.. ocv:function:: void gpu::Stream::enqueueUpload(const CudaMem& src, GpuMat& dst)
.. ocv:function:: void gpu::Stream::enqueueUpload(const Mat& src, GpuMat& dst)
.. note:: ``cv::Mat`` must point to page locked memory (i.e. to ``CudaMem`` data or to its subMat) or must be registered with :ocv:func:`gpu::registerPageLocked` .
gpu::Stream::enqueueCopy
------------------------
Copies data from device to device.
.. ocv:function:: void gpu::Stream::enqueueCopy(const GpuMat& src, GpuMat& dst)
gpu::Stream::enqueueMemSet
--------------------------
Initializes or sets device memory to a value.
.. ocv:function:: void gpu::Stream::enqueueMemSet(const GpuMat& src, Scalar val)
.. ocv:function:: void gpu::Stream::enqueueMemSet(const GpuMat& src, Scalar val, const GpuMat& mask)
gpu::Stream::enqueueConvert
---------------------------
Converts matrix type, ex from float to uchar depending on type.
.. ocv:function:: void gpu::Stream::enqueueConvert(const GpuMat& src, GpuMat& dst, int type, double a = 1, double b = 0)
gpu::Stream::enqueueHostCallback
--------------------------------
Adds a callback to be called on the host after all currently enqueued items in the stream have completed.
.. ocv:function:: void gpu::Stream::enqueueHostCallback(StreamCallback callback, void* userData)
.. note:: Callbacks must not make any CUDA API calls. Callbacks must not perform any synchronization that may depend on outstanding device work or other callbacks that are not mandated to run earlier. Callbacks without a mandated order (in independent streams) execute in undefined order and may be serialized.
gpu::StreamAccessor gpu::StreamAccessor
------------------- -------------------
.. ocv:struct:: gpu::StreamAccessor .. ocv:struct:: gpu::StreamAccessor
......
...@@ -32,6 +32,8 @@ Returns the norm of a matrix (or difference of two matrices). ...@@ -32,6 +32,8 @@ Returns the norm of a matrix (or difference of two matrices).
.. ocv:function:: double gpu::norm(const GpuMat& src1, int normType, GpuMat& buf) .. ocv:function:: double gpu::norm(const GpuMat& src1, int normType, GpuMat& buf)
.. ocv:function:: double gpu::norm(const GpuMat& src1, int normType, const GpuMat& mask, GpuMat& buf)
.. ocv:function:: double gpu::norm(const GpuMat& src1, const GpuMat& src2, int normType=NORM_L2) .. ocv:function:: double gpu::norm(const GpuMat& src1, const GpuMat& src2, int normType=NORM_L2)
:param src1: Source matrix. Any matrices except 64F are supported. :param src1: Source matrix. Any matrices except 64F are supported.
...@@ -40,6 +42,8 @@ Returns the norm of a matrix (or difference of two matrices). ...@@ -40,6 +42,8 @@ Returns the norm of a matrix (or difference of two matrices).
:param normType: Norm type. ``NORM_L1`` , ``NORM_L2`` , and ``NORM_INF`` are supported for now. :param normType: Norm type. ``NORM_L1`` , ``NORM_L2`` , and ``NORM_INF`` are supported for now.
:param mask: optional operation mask; it must have the same size as ``src1`` and ``CV_8UC1`` type.
:param buf: Optional buffer to avoid extra memory allocations. It is resized automatically. :param buf: Optional buffer to avoid extra memory allocations. It is resized automatically.
.. seealso:: :ocv:func:`norm` .. seealso:: :ocv:func:`norm`
...@@ -54,8 +58,12 @@ Returns the sum of matrix elements. ...@@ -54,8 +58,12 @@ Returns the sum of matrix elements.
.. ocv:function:: Scalar gpu::sum(const GpuMat& src, GpuMat& buf) .. ocv:function:: Scalar gpu::sum(const GpuMat& src, GpuMat& buf)
.. ocv:function:: Scalar gpu::sum(const GpuMat& src, const GpuMat& mask, GpuMat& buf)
:param src: Source image of any depth except for ``CV_64F`` . :param src: Source image of any depth except for ``CV_64F`` .
:param mask: optional operation mask; it must have the same size as ``src1`` and ``CV_8UC1`` type.
:param buf: Optional buffer to avoid extra memory allocations. It is resized automatically. :param buf: Optional buffer to avoid extra memory allocations. It is resized automatically.
.. seealso:: :ocv:func:`sum` .. seealso:: :ocv:func:`sum`
...@@ -70,8 +78,12 @@ Returns the sum of absolute values for matrix elements. ...@@ -70,8 +78,12 @@ Returns the sum of absolute values for matrix elements.
.. ocv:function:: Scalar gpu::absSum(const GpuMat& src, GpuMat& buf) .. ocv:function:: Scalar gpu::absSum(const GpuMat& src, GpuMat& buf)
.. ocv:function:: Scalar gpu::absSum(const GpuMat& src, const GpuMat& mask, GpuMat& buf)
:param src: Source image of any depth except for ``CV_64F`` . :param src: Source image of any depth except for ``CV_64F`` .
:param mask: optional operation mask; it must have the same size as ``src1`` and ``CV_8UC1`` type.
:param buf: Optional buffer to avoid extra memory allocations. It is resized automatically. :param buf: Optional buffer to avoid extra memory allocations. It is resized automatically.
...@@ -84,8 +96,12 @@ Returns the squared sum of matrix elements. ...@@ -84,8 +96,12 @@ Returns the squared sum of matrix elements.
.. ocv:function:: Scalar gpu::sqrSum(const GpuMat& src, GpuMat& buf) .. ocv:function:: Scalar gpu::sqrSum(const GpuMat& src, GpuMat& buf)
.. ocv:function:: Scalar gpu::sqrSum(const GpuMat& src, const GpuMat& mask, GpuMat& buf)
:param src: Source image of any depth except for ``CV_64F`` . :param src: Source image of any depth except for ``CV_64F`` .
:param mask: optional operation mask; it must have the same size as ``src1`` and ``CV_8UC1`` type.
:param buf: Optional buffer to avoid extra memory allocations. It is resized automatically. :param buf: Optional buffer to avoid extra memory allocations. It is resized automatically.
......
...@@ -242,3 +242,33 @@ Converts polar coordinates into Cartesian. ...@@ -242,3 +242,33 @@ Converts polar coordinates into Cartesian.
:param stream: Stream for the asynchronous version. :param stream: Stream for the asynchronous version.
.. seealso:: :ocv:func:`polarToCart` .. seealso:: :ocv:func:`polarToCart`
gpu::normalize
--------------
Normalizes the norm or value range of an array.
.. ocv:function:: void gpu::normalize(const GpuMat& src, GpuMat& dst, double alpha = 1, double beta = 0, int norm_type = NORM_L2, int dtype = -1, const GpuMat& mask = GpuMat())
.. ocv:function:: void gpu::normalize(const GpuMat& src, GpuMat& dst, double a, double b, int norm_type, int dtype, const GpuMat& mask, GpuMat& norm_buf, GpuMat& cvt_buf)
:param src: input array.
:param dst: output array of the same size as ``src`` .
:param alpha: norm value to normalize to or the lower range boundary in case of the range normalization.
:param beta: upper range boundary in case of the range normalization; it is not used for the norm normalization.
:param normType: normalization type (see the details below).
:param dtype: when negative, the output array has the same type as ``src``; otherwise, it has the same number of channels as ``src`` and the depth ``=CV_MAT_DEPTH(dtype)``.
:param mask: optional operation mask.
:param norm_buf: Optional buffer to avoid extra memory allocations. It is resized automatically.
:param cvt_buf: Optional buffer to avoid extra memory allocations. It is resized automatically.
.. seealso:: :ocv:func:`normalize`
...@@ -276,6 +276,8 @@ Compares elements of two matrices. ...@@ -276,6 +276,8 @@ Compares elements of two matrices.
.. ocv:function:: void gpu::compare( const GpuMat& a, const GpuMat& b, GpuMat& c, int cmpop, Stream& stream=Stream::Null() ) .. ocv:function:: void gpu::compare( const GpuMat& a, const GpuMat& b, GpuMat& c, int cmpop, Stream& stream=Stream::Null() )
.. ocv:function:: void gpu::compare(const GpuMat& a, Scalar sc, GpuMat& c, int cmpop, Stream& stream = Stream::Null())
:param a: First source matrix. :param a: First source matrix.
:param b: Second source matrix with the same size and type as ``a`` . :param b: Second source matrix with the same size and type as ``a`` .
......
...@@ -97,6 +97,25 @@ namespace cv { namespace gpu { namespace device ...@@ -97,6 +97,25 @@ namespace cv { namespace gpu { namespace device
return out; return out;
} }
template <class T, class BinOp>
static __device__ __forceinline__ T reduce(volatile T *ptr, BinOp op)
{
const unsigned int lane = laneId();
if (lane < 16)
{
T partial = ptr[lane];
ptr[lane] = partial = op(partial, ptr[lane + 16]);
ptr[lane] = partial = op(partial, ptr[lane + 8]);
ptr[lane] = partial = op(partial, ptr[lane + 4]);
ptr[lane] = partial = op(partial, ptr[lane + 2]);
ptr[lane] = partial = op(partial, ptr[lane + 1]);
}
return *ptr;
}
template<typename OutIt, typename T> template<typename OutIt, typename T>
static __device__ __forceinline__ void yota(OutIt beg, OutIt end, T value) static __device__ __forceinline__ void yota(OutIt beg, OutIt end, T value)
{ {
...@@ -109,4 +128,4 @@ namespace cv { namespace gpu { namespace device ...@@ -109,4 +128,4 @@ namespace cv { namespace gpu { namespace device
}; };
}}} // namespace cv { namespace gpu { namespace device }}} // namespace cv { namespace gpu { namespace device
#endif /* __OPENCV_GPU_DEVICE_WARP_HPP__ */ #endif /* __OPENCV_GPU_DEVICE_WARP_HPP__ */
\ No newline at end of file
This diff is collapsed.
...@@ -647,6 +647,39 @@ PERF_TEST_P(Sz_Depth_Code, Core_CompareMat, Combine(GPU_TYPICAL_MAT_SIZES, ARITH ...@@ -647,6 +647,39 @@ PERF_TEST_P(Sz_Depth_Code, Core_CompareMat, Combine(GPU_TYPICAL_MAT_SIZES, ARITH
} }
} }
//////////////////////////////////////////////////////////////////////
// CompareScalar
PERF_TEST_P(Sz_Depth_Code, Core_CompareScalar, Combine(GPU_TYPICAL_MAT_SIZES, ARITHM_MAT_DEPTH, ALL_CMP_CODES))
{
const cv::Size size = GET_PARAM(0);
const int depth = GET_PARAM(1);
const int cmp_code = GET_PARAM(2);
cv::Mat src(size, depth);
fillRandom(src);
cv::Scalar s = cv::Scalar::all(100);
if (PERF_RUN_GPU())
{
cv::gpu::GpuMat d_src(src);
cv::gpu::GpuMat d_dst;
TEST_CYCLE() cv::gpu::compare(d_src, s, d_dst, cmp_code);
GPU_SANITY_CHECK(d_dst);
}
else
{
cv::Mat dst;
TEST_CYCLE() cv::compare(src, s, dst, cmp_code);
CPU_SANITY_CHECK(dst);
}
}
////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////
// BitwiseNot // BitwiseNot
...@@ -1598,7 +1631,7 @@ PERF_TEST_P(Sz_Depth_Norm, Core_Norm, Combine( ...@@ -1598,7 +1631,7 @@ PERF_TEST_P(Sz_Depth_Norm, Core_Norm, Combine(
cv::gpu::GpuMat d_src(src); cv::gpu::GpuMat d_src(src);
cv::gpu::GpuMat d_buf; cv::gpu::GpuMat d_buf;
TEST_CYCLE() dst = cv::gpu::norm(d_src, normType, d_buf); TEST_CYCLE() dst = cv::gpu::norm(d_src, normType, cv::gpu::GpuMat(), d_buf);
} }
else else
{ {
...@@ -1668,7 +1701,7 @@ PERF_TEST_P(Sz_Depth_Cn, Core_Sum, Combine( ...@@ -1668,7 +1701,7 @@ PERF_TEST_P(Sz_Depth_Cn, Core_Sum, Combine(
cv::gpu::GpuMat d_src(src); cv::gpu::GpuMat d_src(src);
cv::gpu::GpuMat d_buf; cv::gpu::GpuMat d_buf;
TEST_CYCLE() dst = cv::gpu::sum(d_src, d_buf); TEST_CYCLE() dst = cv::gpu::sum(d_src, cv::gpu::GpuMat(), d_buf);
} }
else else
{ {
...@@ -1703,7 +1736,7 @@ PERF_TEST_P(Sz_Depth_Cn, Core_SumAbs, Combine( ...@@ -1703,7 +1736,7 @@ PERF_TEST_P(Sz_Depth_Cn, Core_SumAbs, Combine(
cv::gpu::GpuMat d_src(src); cv::gpu::GpuMat d_src(src);
cv::gpu::GpuMat d_buf; cv::gpu::GpuMat d_buf;
TEST_CYCLE() dst = cv::gpu::absSum(d_src, d_buf); TEST_CYCLE() dst = cv::gpu::absSum(d_src, cv::gpu::GpuMat(), d_buf);
SANITY_CHECK(dst, 1e-6); SANITY_CHECK(dst, 1e-6);
} }
...@@ -1737,7 +1770,7 @@ PERF_TEST_P(Sz_Depth_Cn, Core_SumSqr, Combine( ...@@ -1737,7 +1770,7 @@ PERF_TEST_P(Sz_Depth_Cn, Core_SumSqr, Combine(
cv::gpu::GpuMat d_src(src); cv::gpu::GpuMat d_src(src);
cv::gpu::GpuMat d_buf; cv::gpu::GpuMat d_buf;
TEST_CYCLE() dst = cv::gpu::sqrSum(d_src, d_buf); TEST_CYCLE() dst = cv::gpu::sqrSum(d_src, cv::gpu::GpuMat(), d_buf);
SANITY_CHECK(dst, 1e-6); SANITY_CHECK(dst, 1e-6);
} }
...@@ -1893,4 +1926,48 @@ PERF_TEST_P(Sz_Depth_Cn_Code_Dim, Core_Reduce, Combine( ...@@ -1893,4 +1926,48 @@ PERF_TEST_P(Sz_Depth_Cn_Code_Dim, Core_Reduce, Combine(
} }
} }
//////////////////////////////////////////////////////////////////////
// Normalize
DEF_PARAM_TEST(Sz_Depth_NormType, cv::Size, MatDepth, NormType);
PERF_TEST_P(Sz_Depth_NormType, Core_Normalize, Combine(
GPU_TYPICAL_MAT_SIZES,
Values(CV_8U, CV_16U, CV_32F, CV_64F),
Values(NormType(cv::NORM_INF),
NormType(cv::NORM_L1),
NormType(cv::NORM_L2),
NormType(cv::NORM_MINMAX))
))
{
cv::Size size = GET_PARAM(0);
int type = GET_PARAM(1);
int norm_type = GET_PARAM(2);
double alpha = 1;
double beta = 0;
cv::Mat src(size, type);
fillRandom(src);
if (PERF_RUN_GPU())
{
cv::gpu::GpuMat d_src(src);
cv::gpu::GpuMat d_dst;
cv::gpu::GpuMat d_norm_buf, d_cvt_buf;
TEST_CYCLE() cv::gpu::normalize(d_src, d_dst, alpha, beta, norm_type, type, cv::gpu::GpuMat(), d_norm_buf, d_cvt_buf);
GPU_SANITY_CHECK(d_dst, 1);
}
else
{
cv::Mat dst;
TEST_CYCLE() cv::normalize(src, dst, alpha, beta, norm_type, type);
CPU_SANITY_CHECK(dst, 1);
}
}
} // namespace } // namespace
...@@ -1706,6 +1706,16 @@ PERF_TEST_P(Sz_Depth_Cn, ImgProc_ImagePyramidGetLayer, Combine(GPU_TYPICAL_MAT_S ...@@ -1706,6 +1706,16 @@ PERF_TEST_P(Sz_Depth_Cn, ImgProc_ImagePyramidGetLayer, Combine(GPU_TYPICAL_MAT_S
} }
namespace { namespace {
struct Vec4iComparator
{
bool operator()(const cv::Vec4i& a, const cv::Vec4i b) const
{
if (a[0] != b[0]) return a[0] < b[0];
else if(a[1] != b[1]) return a[1] < b[1];
else if(a[2] != b[2]) return a[2] < b[2];
else return a[3] < b[3];
}
};
struct Vec3fComparator struct Vec3fComparator
{ {
bool operator()(const cv::Vec3f& a, const cv::Vec3f b) const bool operator()(const cv::Vec3f& a, const cv::Vec3f b) const
...@@ -1784,6 +1794,62 @@ PERF_TEST_P(Sz, ImgProc_HoughLines, GPU_TYPICAL_MAT_SIZES) ...@@ -1784,6 +1794,62 @@ PERF_TEST_P(Sz, ImgProc_HoughLines, GPU_TYPICAL_MAT_SIZES)
} }
} }
//////////////////////////////////////////////////////////////////////
// HoughLinesP
DEF_PARAM_TEST_1(Image, std::string);
PERF_TEST_P(Image, ImgProc_HoughLinesP, testing::Values("cv/shared/pic5.png", "stitching/a1.png"))
{
declare.time(30.0);
std::string fileName = getDataPath(GetParam());
const float rho = 1.0f;
const float theta = static_cast<float>(CV_PI / 180.0);
const int threshold = 100;
const int minLineLenght = 50;
const int maxLineGap = 5;
cv::Mat image = cv::imread(fileName, cv::IMREAD_GRAYSCALE);
cv::Mat mask;
cv::Canny(image, mask, 50, 100);
if (PERF_RUN_GPU())
{
cv::gpu::GpuMat d_mask(mask);
cv::gpu::GpuMat d_lines;
cv::gpu::HoughLinesBuf d_buf;
cv::gpu::HoughLinesP(d_mask, d_lines, d_buf, rho, theta, minLineLenght, maxLineGap);
TEST_CYCLE()
{
cv::gpu::HoughLinesP(d_mask, d_lines, d_buf, rho, theta, minLineLenght, maxLineGap);
}
cv::Mat h_lines(d_lines);
cv::Vec4i* begin = h_lines.ptr<cv::Vec4i>();
cv::Vec4i* end = h_lines.ptr<cv::Vec4i>() + h_lines.cols;
std::sort(begin, end, Vec4iComparator());
SANITY_CHECK(h_lines);
}
else
{
std::vector<cv::Vec4i> lines;
cv::HoughLinesP(mask, lines, rho, theta, threshold, minLineLenght, maxLineGap);
TEST_CYCLE()
{
cv::HoughLinesP(mask, lines, rho, theta, threshold, minLineLenght, maxLineGap);
}
std::sort(lines.begin(), lines.end(), Vec4iComparator());
SANITY_CHECK(lines);
}
}
////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////
// HoughCircles // HoughCircles
......
...@@ -394,6 +394,173 @@ PERF_TEST_P(ImagePair, Video_FarnebackOpticalFlow, ...@@ -394,6 +394,173 @@ PERF_TEST_P(ImagePair, Video_FarnebackOpticalFlow,
} }
} }
//////////////////////////////////////////////////////
// OpticalFlowDual_TVL1
PERF_TEST_P(ImagePair, Video_OpticalFlowDual_TVL1,
Values<pair_string>(make_pair("gpu/opticalflow/frame0.png", "gpu/opticalflow/frame1.png")))
{
declare.time(20);
cv::Mat frame0 = readImage(GetParam().first, cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(frame0.empty());
cv::Mat frame1 = readImage(GetParam().second, cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(frame1.empty());
if (PERF_RUN_GPU())
{
cv::gpu::GpuMat d_frame0(frame0);
cv::gpu::GpuMat d_frame1(frame1);
cv::gpu::GpuMat d_flowx;
cv::gpu::GpuMat d_flowy;
cv::gpu::OpticalFlowDual_TVL1_GPU d_alg;
d_alg(d_frame0, d_frame1, d_flowx, d_flowy);
TEST_CYCLE()
{
d_alg(d_frame0, d_frame1, d_flowx, d_flowy);
}
GPU_SANITY_CHECK(d_flowx);
GPU_SANITY_CHECK(d_flowy);
}
else
{
cv::Mat flow;
cv::Ptr<cv::DenseOpticalFlow> alg = cv::createOptFlow_DualTVL1();
alg->calc(frame0, frame1, flow);
TEST_CYCLE()
{
alg->calc(frame0, frame1, flow);
}
CPU_SANITY_CHECK(flow);
}
}
//////////////////////////////////////////////////////
// OpticalFlowBM
void calcOpticalFlowBM(const cv::Mat& prev, const cv::Mat& curr,
cv::Size bSize, cv::Size shiftSize, cv::Size maxRange, int usePrevious,
cv::Mat& velx, cv::Mat& vely)
{
cv::Size sz((curr.cols - bSize.width + shiftSize.width)/shiftSize.width, (curr.rows - bSize.height + shiftSize.height)/shiftSize.height);
velx.create(sz, CV_32FC1);
vely.create(sz, CV_32FC1);
CvMat cvprev = prev;
CvMat cvcurr = curr;
CvMat cvvelx = velx;
CvMat cvvely = vely;
cvCalcOpticalFlowBM(&cvprev, &cvcurr, bSize, shiftSize, maxRange, usePrevious, &cvvelx, &cvvely);
}
PERF_TEST_P(ImagePair, Video_OpticalFlowBM,
Values<pair_string>(make_pair("gpu/opticalflow/frame0.png", "gpu/opticalflow/frame1.png")))
{
declare.time(400);
cv::Mat frame0 = readImage(GetParam().first, cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(frame0.empty());
cv::Mat frame1 = readImage(GetParam().second, cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(frame1.empty());
cv::Size block_size(16, 16);
cv::Size shift_size(1, 1);
cv::Size max_range(16, 16);
if (PERF_RUN_GPU())
{
cv::gpu::GpuMat d_frame0(frame0);
cv::gpu::GpuMat d_frame1(frame1);
cv::gpu::GpuMat d_velx, d_vely, buf;
cv::gpu::calcOpticalFlowBM(d_frame0, d_frame1, block_size, shift_size, max_range, false, d_velx, d_vely, buf);
TEST_CYCLE()
{
cv::gpu::calcOpticalFlowBM(d_frame0, d_frame1, block_size, shift_size, max_range, false, d_velx, d_vely, buf);
}
GPU_SANITY_CHECK(d_velx);
GPU_SANITY_CHECK(d_vely);
}
else
{
cv::Mat velx, vely;
calcOpticalFlowBM(frame0, frame1, block_size, shift_size, max_range, false, velx, vely);
TEST_CYCLE()
{
calcOpticalFlowBM(frame0, frame1, block_size, shift_size, max_range, false, velx, vely);
}
CPU_SANITY_CHECK(velx);
CPU_SANITY_CHECK(vely);
}
}
PERF_TEST_P(ImagePair, Video_FastOpticalFlowBM,
Values<pair_string>(make_pair("gpu/opticalflow/frame0.png", "gpu/opticalflow/frame1.png")))
{
declare.time(400);
cv::Mat frame0 = readImage(GetParam().first, cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(frame0.empty());
cv::Mat frame1 = readImage(GetParam().second, cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(frame1.empty());
cv::Size block_size(16, 16);
cv::Size shift_size(1, 1);
cv::Size max_range(16, 16);
if (PERF_RUN_GPU())
{
cv::gpu::GpuMat d_frame0(frame0);
cv::gpu::GpuMat d_frame1(frame1);
cv::gpu::GpuMat d_velx, d_vely;
cv::gpu::FastOpticalFlowBM fastBM;
fastBM(d_frame0, d_frame1, d_velx, d_vely, max_range.width, block_size.width);
TEST_CYCLE()
{
fastBM(d_frame0, d_frame1, d_velx, d_vely, max_range.width, block_size.width);
}
GPU_SANITY_CHECK(d_velx);
GPU_SANITY_CHECK(d_vely);
}
else
{
cv::Mat velx, vely;
calcOpticalFlowBM(frame0, frame1, block_size, shift_size, max_range, false, velx, vely);
TEST_CYCLE()
{
calcOpticalFlowBM(frame0, frame1, block_size, shift_size, max_range, false, velx, vely);
}
CPU_SANITY_CHECK(velx);
CPU_SANITY_CHECK(vely);
}
}
////////////////////////////////////////////////////// //////////////////////////////////////////////////////
// FGDStatModel // FGDStatModel
......
...@@ -17,7 +17,7 @@ CV_ENUM(BorderMode, cv::BORDER_REFLECT101, cv::BORDER_REPLICATE, cv::BORDER_CONS ...@@ -17,7 +17,7 @@ CV_ENUM(BorderMode, cv::BORDER_REFLECT101, cv::BORDER_REPLICATE, cv::BORDER_CONS
CV_ENUM(Interpolation, cv::INTER_NEAREST, cv::INTER_LINEAR, cv::INTER_CUBIC, cv::INTER_AREA) CV_ENUM(Interpolation, cv::INTER_NEAREST, cv::INTER_LINEAR, cv::INTER_CUBIC, cv::INTER_AREA)
#define ALL_INTERPOLATIONS testing::ValuesIn(Interpolation::all()) #define ALL_INTERPOLATIONS testing::ValuesIn(Interpolation::all())
CV_ENUM(NormType, cv::NORM_INF, cv::NORM_L1, cv::NORM_L2, cv::NORM_HAMMING) CV_ENUM(NormType, cv::NORM_INF, cv::NORM_L1, cv::NORM_L2, cv::NORM_HAMMING, cv::NORM_MINMAX)
const int Gray = 1, TwoChannel = 2, BGR = 3, BGRA = 4; const int Gray = 1, TwoChannel = 2, BGR = 3, BGRA = 4;
CV_ENUM(MatCn, Gray, TwoChannel, BGR, BGRA) CV_ENUM(MatCn, Gray, TwoChannel, BGR, BGRA)
......
set(PERF4AU_REQUIRED_DEPS opencv_core opencv_imgproc opencv_highgui opencv_video opencv_legacy opencv_gpu opencv_ts)
ocv_check_dependencies(${PERF4AU_REQUIRED_DEPS})
set(the_target gpu_perf4au)
project(${the_target})
ocv_include_modules(${PERF4AU_REQUIRED_DEPS})
if(CMAKE_COMPILER_IS_GNUCXX AND NOT ENABLE_NOISY_WARNINGS)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-unused-function")
endif()
file(GLOB srcs RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} *.cpp *.h *.hpp)
add_executable(${the_target} ${srcs})
target_link_libraries(${the_target} ${OPENCV_LINKER_LIBS} ${PERF4AU_REQUIRED_DEPS})
if(ENABLE_SOLUTION_FOLDERS)
set_target_properties(${the_target} PROPERTIES FOLDER "tests performance")
endif()
if(WIN32)
if(MSVC AND NOT BUILD_SHARED_LIBS)
set_target_properties(${the_target} PROPERTIES LINK_FLAGS "/NODEFAULTLIB:atlthunk.lib /NODEFAULTLIB:atlsd.lib /DEBUG")
endif()
endif()
This diff is collapsed.
...@@ -59,6 +59,8 @@ void cv::gpu::magnitudeSqr(const GpuMat&, const GpuMat&, GpuMat&, Stream&) { thr ...@@ -59,6 +59,8 @@ void cv::gpu::magnitudeSqr(const GpuMat&, const GpuMat&, GpuMat&, Stream&) { thr
void cv::gpu::phase(const GpuMat&, const GpuMat&, GpuMat&, bool, Stream&) { throw_nogpu(); } void cv::gpu::phase(const GpuMat&, const GpuMat&, GpuMat&, bool, Stream&) { throw_nogpu(); }
void cv::gpu::cartToPolar(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&, bool, Stream&) { throw_nogpu(); } void cv::gpu::cartToPolar(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&, bool, Stream&) { throw_nogpu(); }
void cv::gpu::polarToCart(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&, bool, Stream&) { throw_nogpu(); } void cv::gpu::polarToCart(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&, bool, Stream&) { throw_nogpu(); }
void cv::gpu::normalize(const GpuMat&, GpuMat&, double, double, int, int, const GpuMat&) { throw_nogpu(); }
void cv::gpu::normalize(const GpuMat&, GpuMat&, double, double, int, int, const GpuMat&, GpuMat&, GpuMat&) { throw_nogpu(); }
#else /* !defined (HAVE_CUDA) */ #else /* !defined (HAVE_CUDA) */
...@@ -529,4 +531,47 @@ void cv::gpu::polarToCart(const GpuMat& magnitude, const GpuMat& angle, GpuMat& ...@@ -529,4 +531,47 @@ void cv::gpu::polarToCart(const GpuMat& magnitude, const GpuMat& angle, GpuMat&
polarToCart_caller(magnitude, angle, x, y, angleInDegrees, StreamAccessor::getStream(stream)); polarToCart_caller(magnitude, angle, x, y, angleInDegrees, StreamAccessor::getStream(stream));
} }
////////////////////////////////////////////////////////////////////////
// normalize
void cv::gpu::normalize(const GpuMat& src, GpuMat& dst, double a, double b, int norm_type, int dtype, const GpuMat& mask)
{
GpuMat norm_buf;
GpuMat cvt_buf;
normalize(src, dst, a, b, norm_type, dtype, mask, norm_buf, cvt_buf);
}
void cv::gpu::normalize(const GpuMat& src, GpuMat& dst, double a, double b, int norm_type, int dtype, const GpuMat& mask, GpuMat& norm_buf, GpuMat& cvt_buf)
{
double scale = 1, shift = 0;
if (norm_type == NORM_MINMAX)
{
double smin = 0, smax = 0;
double dmin = std::min(a, b), dmax = std::max(a, b);
minMax(src, &smin, &smax, mask, norm_buf);
scale = (dmax - dmin) * (smax - smin > numeric_limits<double>::epsilon() ? 1.0 / (smax - smin) : 0.0);
shift = dmin - smin * scale;
}
else if (norm_type == NORM_L2 || norm_type == NORM_L1 || norm_type == NORM_INF)
{
scale = norm(src, norm_type, mask, norm_buf);
scale = scale > numeric_limits<double>::epsilon() ? a / scale : 0.0;
shift = 0;
}
else
{
CV_Error(CV_StsBadArg, "Unknown/unsupported norm type");
}
if (mask.empty())
{
src.convertTo(dst, dtype, scale, shift);
}
else
{
src.convertTo(cvt_buf, dtype, scale, shift);
cvt_buf.copyTo(dst, mask);
}
}
#endif /* !defined (HAVE_CUDA) */ #endif /* !defined (HAVE_CUDA) */
This diff is collapsed.
...@@ -293,6 +293,201 @@ namespace cv { namespace gpu { namespace device ...@@ -293,6 +293,201 @@ namespace cv { namespace gpu { namespace device
return totalCount; return totalCount;
} }
////////////////////////////////////////////////////////////////////////
// houghLinesProbabilistic
texture<uchar, cudaTextureType2D, cudaReadModeElementType> tex_mask(false, cudaFilterModePoint, cudaAddressModeClamp);
__global__ void houghLinesProbabilistic(const PtrStepSzi accum,
int4* out, const int maxSize,
const float rho, const float theta,
const int lineGap, const int lineLength,
const int rows, const int cols)
{
const int r = blockIdx.x * blockDim.x + threadIdx.x;
const int n = blockIdx.y * blockDim.y + threadIdx.y;
if (r >= accum.cols - 2 || n >= accum.rows - 2)
return;
const int curVotes = accum(n + 1, r + 1);
if (curVotes >= lineLength &&
curVotes > accum(n, r) &&
curVotes > accum(n, r + 1) &&
curVotes > accum(n, r + 2) &&
curVotes > accum(n + 1, r) &&
curVotes > accum(n + 1, r + 2) &&
curVotes > accum(n + 2, r) &&
curVotes > accum(n + 2, r + 1) &&
curVotes > accum(n + 2, r + 2))
{
const float radius = (r - (accum.cols - 2 - 1) * 0.5f) * rho;
const float angle = n * theta;
float cosa;
float sina;
sincosf(angle, &sina, &cosa);
float2 p0 = make_float2(cosa * radius, sina * radius);
float2 dir = make_float2(-sina, cosa);
float2 pb[4] = {make_float2(-1, -1), make_float2(-1, -1), make_float2(-1, -1), make_float2(-1, -1)};
float a;
if (dir.x != 0)
{
a = -p0.x / dir.x;
pb[0].x = 0;
pb[0].y = p0.y + a * dir.y;
a = (cols - 1 - p0.x) / dir.x;
pb[1].x = cols - 1;
pb[1].y = p0.y + a * dir.y;
}
if (dir.y != 0)
{
a = -p0.y / dir.y;
pb[2].x = p0.x + a * dir.x;
pb[2].y = 0;
a = (rows - 1 - p0.y) / dir.y;
pb[3].x = p0.x + a * dir.x;
pb[3].y = rows - 1;
}
if (pb[0].x == 0 && (pb[0].y >= 0 && pb[0].y < rows))
{
p0 = pb[0];
if (dir.x < 0)
dir = -dir;
}
else if (pb[1].x == cols - 1 && (pb[0].y >= 0 && pb[0].y < rows))
{
p0 = pb[1];
if (dir.x > 0)
dir = -dir;
}
else if (pb[2].y == 0 && (pb[2].x >= 0 && pb[2].x < cols))
{
p0 = pb[2];
if (dir.y < 0)
dir = -dir;
}
else if (pb[3].y == rows - 1 && (pb[3].x >= 0 && pb[3].x < cols))
{
p0 = pb[3];
if (dir.y > 0)
dir = -dir;
}
float2 d;
if (::fabsf(dir.x) > ::fabsf(dir.y))
{
d.x = dir.x > 0 ? 1 : -1;
d.y = dir.y / ::fabsf(dir.x);
}
else
{
d.x = dir.x / ::fabsf(dir.y);
d.y = dir.y > 0 ? 1 : -1;
}
float2 line_end[2];
int gap;
bool inLine = false;
float2 p1 = p0;
if (p1.x < 0 || p1.x >= cols || p1.y < 0 || p1.y >= rows)
return;
for (;;)
{
if (tex2D(tex_mask, p1.x, p1.y))
{
gap = 0;
if (!inLine)
{
line_end[0] = p1;
line_end[1] = p1;
inLine = true;
}
else
{
line_end[1] = p1;
}
}
else if (inLine)
{
if (++gap > lineGap)
{
bool good_line = ::abs(line_end[1].x - line_end[0].x) >= lineLength ||
::abs(line_end[1].y - line_end[0].y) >= lineLength;
if (good_line)
{
const int ind = ::atomicAdd(&g_counter, 1);
if (ind < maxSize)
out[ind] = make_int4(line_end[0].x, line_end[0].y, line_end[1].x, line_end[1].y);
}
gap = 0;
inLine = false;
}
}
p1 = p1 + d;
if (p1.x < 0 || p1.x >= cols || p1.y < 0 || p1.y >= rows)
{
if (inLine)
{
bool good_line = ::abs(line_end[1].x - line_end[0].x) >= lineLength ||
::abs(line_end[1].y - line_end[0].y) >= lineLength;
if (good_line)
{
const int ind = ::atomicAdd(&g_counter, 1);
if (ind < maxSize)
out[ind] = make_int4(line_end[0].x, line_end[0].y, line_end[1].x, line_end[1].y);
}
}
break;
}
}
}
}
int houghLinesProbabilistic_gpu(PtrStepSzb mask, PtrStepSzi accum, int4* out, int maxSize, float rho, float theta, int lineGap, int lineLength)
{
void* counterPtr;
cudaSafeCall( cudaGetSymbolAddress(&counterPtr, g_counter) );
cudaSafeCall( cudaMemset(counterPtr, 0, sizeof(int)) );
const dim3 block(32, 8);
const dim3 grid(divUp(accum.cols - 2, block.x), divUp(accum.rows - 2, block.y));
bindTexture(&tex_mask, mask);
houghLinesProbabilistic<<<grid, block>>>(accum,
out, maxSize,
rho, theta,
lineGap, lineLength,
mask.rows, mask.cols);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
int totalCount;
cudaSafeCall( cudaMemcpy(&totalCount, counterPtr, sizeof(int), cudaMemcpyDeviceToHost) );
totalCount = ::min(totalCount, maxSize);
return totalCount;
}
//////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////
// circlesAccumCenters // circlesAccumCenters
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -64,6 +64,7 @@ void cv::gpu::sqrt(const GpuMat&, GpuMat&, Stream&) { throw_nogpu(); } ...@@ -64,6 +64,7 @@ void cv::gpu::sqrt(const GpuMat&, GpuMat&, Stream&) { throw_nogpu(); }
void cv::gpu::exp(const GpuMat&, GpuMat&, Stream&) { throw_nogpu(); } void cv::gpu::exp(const GpuMat&, GpuMat&, Stream&) { throw_nogpu(); }
void cv::gpu::log(const GpuMat&, GpuMat&, Stream&) { throw_nogpu(); } void cv::gpu::log(const GpuMat&, GpuMat&, Stream&) { throw_nogpu(); }
void cv::gpu::compare(const GpuMat&, const GpuMat&, GpuMat&, int, Stream&) { throw_nogpu(); } void cv::gpu::compare(const GpuMat&, const GpuMat&, GpuMat&, int, Stream&) { throw_nogpu(); }
void cv::gpu::compare(const GpuMat&, Scalar, GpuMat&, int, Stream&) { throw_nogpu(); }
void cv::gpu::bitwise_not(const GpuMat&, GpuMat&, const GpuMat&, Stream&) { throw_nogpu(); } void cv::gpu::bitwise_not(const GpuMat&, GpuMat&, const GpuMat&, Stream&) { throw_nogpu(); }
void cv::gpu::bitwise_or(const GpuMat&, const GpuMat&, GpuMat&, const GpuMat&, Stream&) { throw_nogpu(); } void cv::gpu::bitwise_or(const GpuMat&, const GpuMat&, GpuMat&, const GpuMat&, Stream&) { throw_nogpu(); }
void cv::gpu::bitwise_or(const GpuMat&, const Scalar&, GpuMat&, Stream&) { throw_nogpu(); } void cv::gpu::bitwise_or(const GpuMat&, const Scalar&, GpuMat&, Stream&) { throw_nogpu(); }
...@@ -2001,6 +2002,69 @@ void cv::gpu::compare(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, int c ...@@ -2001,6 +2002,69 @@ void cv::gpu::compare(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, int c
func(src1_, src2_, dst_, stream); func(src1_, src2_, dst_, stream);
} }
namespace arithm
{
template <typename T> void cmpScalarEq(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template <typename T> void cmpScalarNe(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template <typename T> void cmpScalarLt(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template <typename T> void cmpScalarLe(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template <typename T> void cmpScalarGt(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template <typename T> void cmpScalarGe(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
}
namespace
{
template <typename T> void castScalar(Scalar& sc)
{
sc.val[0] = saturate_cast<T>(sc.val[0]);
sc.val[1] = saturate_cast<T>(sc.val[1]);
sc.val[2] = saturate_cast<T>(sc.val[2]);
sc.val[3] = saturate_cast<T>(sc.val[3]);
}
}
void cv::gpu::compare(const GpuMat& src, Scalar sc, GpuMat& dst, int cmpop, Stream& stream)
{
using namespace arithm;
typedef void (*func_t)(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
static const func_t funcs[7][6] =
{
{cmpScalarEq<unsigned char> , cmpScalarGt<unsigned char> , cmpScalarGe<unsigned char> , cmpScalarLt<unsigned char> , cmpScalarLe<unsigned char> , cmpScalarNe<unsigned char> },
{cmpScalarEq<signed char> , cmpScalarGt<signed char> , cmpScalarGe<signed char> , cmpScalarLt<signed char> , cmpScalarLe<signed char> , cmpScalarNe<signed char> },
{cmpScalarEq<unsigned short>, cmpScalarGt<unsigned short>, cmpScalarGe<unsigned short>, cmpScalarLt<unsigned short>, cmpScalarLe<unsigned short>, cmpScalarNe<unsigned short>},
{cmpScalarEq<short> , cmpScalarGt<short> , cmpScalarGe<short> , cmpScalarLt<short> , cmpScalarLe<short> , cmpScalarNe<short> },
{cmpScalarEq<int> , cmpScalarGt<int> , cmpScalarGe<int> , cmpScalarLt<int> , cmpScalarLe<int> , cmpScalarNe<int> },
{cmpScalarEq<float> , cmpScalarGt<float> , cmpScalarGe<float> , cmpScalarLt<float> , cmpScalarLe<float> , cmpScalarNe<float> },
{cmpScalarEq<double> , cmpScalarGt<double> , cmpScalarGe<double> , cmpScalarLt<double> , cmpScalarLe<double> , cmpScalarNe<double> }
};
typedef void (*cast_func_t)(Scalar& sc);
static const cast_func_t cast_func[] =
{
castScalar<unsigned char>, castScalar<signed char>, castScalar<unsigned short>, castScalar<short>, castScalar<int>, castScalar<float>, castScalar<double>
};
const int depth = src.depth();
const int cn = src.channels();
CV_Assert( depth <= CV_64F );
CV_Assert( cn <= 4 );
CV_Assert( cmpop >= CMP_EQ && cmpop <= CMP_NE );
if (depth == CV_64F)
{
if (!deviceSupports(NATIVE_DOUBLE))
CV_Error(CV_StsUnsupportedFormat, "The device doesn't support double");
}
dst.create(src.size(), CV_MAKE_TYPE(CV_8U, cn));
cast_func[depth](sc);
funcs[depth][cmpop](src, cn, sc.val, dst, StreamAccessor::getStream(stream));
}
////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////
// Unary bitwise logical operations // Unary bitwise logical operations
......
...@@ -52,6 +52,8 @@ void cv::gpu::HoughLines(const GpuMat&, GpuMat&, float, float, int, bool, int) { ...@@ -52,6 +52,8 @@ void cv::gpu::HoughLines(const GpuMat&, GpuMat&, float, float, int, bool, int) {
void cv::gpu::HoughLines(const GpuMat&, GpuMat&, HoughLinesBuf&, float, float, int, bool, int) { throw_nogpu(); } void cv::gpu::HoughLines(const GpuMat&, GpuMat&, HoughLinesBuf&, float, float, int, bool, int) { throw_nogpu(); }
void cv::gpu::HoughLinesDownload(const GpuMat&, OutputArray, OutputArray) { throw_nogpu(); } void cv::gpu::HoughLinesDownload(const GpuMat&, OutputArray, OutputArray) { throw_nogpu(); }
void cv::gpu::HoughLinesP(const GpuMat&, GpuMat&, HoughLinesBuf&, float, float, int, int, int) { throw_nogpu(); }
void cv::gpu::HoughCircles(const GpuMat&, GpuMat&, int, float, float, int, int, int, int, int) { throw_nogpu(); } void cv::gpu::HoughCircles(const GpuMat&, GpuMat&, int, float, float, int, int, int, int, int) { throw_nogpu(); }
void cv::gpu::HoughCircles(const GpuMat&, GpuMat&, HoughCirclesBuf&, int, float, float, int, int, int, int, int) { throw_nogpu(); } void cv::gpu::HoughCircles(const GpuMat&, GpuMat&, HoughCirclesBuf&, int, float, float, int, int, int, int, int) { throw_nogpu(); }
void cv::gpu::HoughCirclesDownload(const GpuMat&, OutputArray) { throw_nogpu(); } void cv::gpu::HoughCirclesDownload(const GpuMat&, OutputArray) { throw_nogpu(); }
...@@ -157,6 +159,57 @@ void cv::gpu::HoughLinesDownload(const GpuMat& d_lines, OutputArray h_lines_, Ou ...@@ -157,6 +159,57 @@ void cv::gpu::HoughLinesDownload(const GpuMat& d_lines, OutputArray h_lines_, Ou
} }
} }
//////////////////////////////////////////////////////////
// HoughLinesP
namespace cv { namespace gpu { namespace device
{
namespace hough
{
int houghLinesProbabilistic_gpu(PtrStepSzb mask, PtrStepSzi accum, int4* out, int maxSize, float rho, float theta, int lineGap, int lineLength);
}
}}}
void cv::gpu::HoughLinesP(const GpuMat& src, GpuMat& lines, HoughLinesBuf& buf, float rho, float theta, int minLineLength, int maxLineGap, int maxLines)
{
using namespace cv::gpu::device::hough;
CV_Assert( src.type() == CV_8UC1 );
CV_Assert( src.cols < std::numeric_limits<unsigned short>::max() );
CV_Assert( src.rows < std::numeric_limits<unsigned short>::max() );
ensureSizeIsEnough(1, src.size().area(), CV_32SC1, buf.list);
unsigned int* srcPoints = buf.list.ptr<unsigned int>();
const int pointsCount = buildPointList_gpu(src, srcPoints);
if (pointsCount == 0)
{
lines.release();
return;
}
const int numangle = cvRound(CV_PI / theta);
const int numrho = cvRound(((src.cols + src.rows) * 2 + 1) / rho);
CV_Assert( numangle > 0 && numrho > 0 );
ensureSizeIsEnough(numangle + 2, numrho + 2, CV_32SC1, buf.accum);
buf.accum.setTo(Scalar::all(0));
DeviceInfo devInfo;
cudaDeviceProp prop;
cudaSafeCall(cudaGetDeviceProperties(&prop, devInfo.deviceID()));
linesAccum_gpu(srcPoints, pointsCount, buf.accum, rho, theta, prop.sharedMemPerBlock, devInfo.supports(FEATURE_SET_COMPUTE_20));
ensureSizeIsEnough(1, maxLines, CV_32SC4, lines);
int linesCount = houghLinesProbabilistic_gpu(src, buf.accum, lines.ptr<int4>(), maxLines, rho, theta, maxLineGap, minLineLength);
if (linesCount > 0)
lines.cols = linesCount;
else
lines.release();
}
////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////
// HoughCircles // HoughCircles
......
...@@ -51,13 +51,17 @@ void cv::gpu::meanStdDev(const GpuMat&, Scalar&, Scalar&) { throw_nogpu(); } ...@@ -51,13 +51,17 @@ void cv::gpu::meanStdDev(const GpuMat&, Scalar&, Scalar&) { throw_nogpu(); }
void cv::gpu::meanStdDev(const GpuMat&, Scalar&, Scalar&, GpuMat&) { throw_nogpu(); } void cv::gpu::meanStdDev(const GpuMat&, Scalar&, Scalar&, GpuMat&) { throw_nogpu(); }
double cv::gpu::norm(const GpuMat&, int) { throw_nogpu(); return 0.0; } double cv::gpu::norm(const GpuMat&, int) { throw_nogpu(); return 0.0; }
double cv::gpu::norm(const GpuMat&, int, GpuMat&) { throw_nogpu(); return 0.0; } double cv::gpu::norm(const GpuMat&, int, GpuMat&) { throw_nogpu(); return 0.0; }
double cv::gpu::norm(const GpuMat&, int, const GpuMat&, GpuMat&) { throw_nogpu(); return 0.0; }
double cv::gpu::norm(const GpuMat&, const GpuMat&, int) { throw_nogpu(); return 0.0; } double cv::gpu::norm(const GpuMat&, const GpuMat&, int) { throw_nogpu(); return 0.0; }
Scalar cv::gpu::sum(const GpuMat&) { throw_nogpu(); return Scalar(); } Scalar cv::gpu::sum(const GpuMat&) { throw_nogpu(); return Scalar(); }
Scalar cv::gpu::sum(const GpuMat&, GpuMat&) { throw_nogpu(); return Scalar(); } Scalar cv::gpu::sum(const GpuMat&, GpuMat&) { throw_nogpu(); return Scalar(); }
Scalar cv::gpu::sum(const GpuMat&, const GpuMat&, GpuMat&) { throw_nogpu(); return Scalar(); }
Scalar cv::gpu::absSum(const GpuMat&) { throw_nogpu(); return Scalar(); } Scalar cv::gpu::absSum(const GpuMat&) { throw_nogpu(); return Scalar(); }
Scalar cv::gpu::absSum(const GpuMat&, GpuMat&) { throw_nogpu(); return Scalar(); } Scalar cv::gpu::absSum(const GpuMat&, GpuMat&) { throw_nogpu(); return Scalar(); }
Scalar cv::gpu::absSum(const GpuMat&, const GpuMat&, GpuMat&) { throw_nogpu(); return Scalar(); }
Scalar cv::gpu::sqrSum(const GpuMat&) { throw_nogpu(); return Scalar(); } Scalar cv::gpu::sqrSum(const GpuMat&) { throw_nogpu(); return Scalar(); }
Scalar cv::gpu::sqrSum(const GpuMat&, GpuMat&) { throw_nogpu(); return Scalar(); } Scalar cv::gpu::sqrSum(const GpuMat&, GpuMat&) { throw_nogpu(); return Scalar(); }
Scalar cv::gpu::sqrSum(const GpuMat&, const GpuMat&, GpuMat&) { throw_nogpu(); return Scalar(); }
void cv::gpu::minMax(const GpuMat&, double*, double*, const GpuMat&) { throw_nogpu(); } void cv::gpu::minMax(const GpuMat&, double*, double*, const GpuMat&) { throw_nogpu(); }
void cv::gpu::minMax(const GpuMat&, double*, double*, const GpuMat&, GpuMat&) { throw_nogpu(); } void cv::gpu::minMax(const GpuMat&, double*, double*, const GpuMat&, GpuMat&) { throw_nogpu(); }
void cv::gpu::minMaxLoc(const GpuMat&, double*, double*, Point*, Point*, const GpuMat&) { throw_nogpu(); } void cv::gpu::minMaxLoc(const GpuMat&, double*, double*, Point*, Point*, const GpuMat&) { throw_nogpu(); }
...@@ -150,24 +154,30 @@ void cv::gpu::meanStdDev(const GpuMat& src, Scalar& mean, Scalar& stddev, GpuMat ...@@ -150,24 +154,30 @@ void cv::gpu::meanStdDev(const GpuMat& src, Scalar& mean, Scalar& stddev, GpuMat
double cv::gpu::norm(const GpuMat& src, int normType) double cv::gpu::norm(const GpuMat& src, int normType)
{ {
GpuMat buf; GpuMat buf;
return norm(src, normType, buf); return norm(src, normType, GpuMat(), buf);
} }
double cv::gpu::norm(const GpuMat& src, int normType, GpuMat& buf) double cv::gpu::norm(const GpuMat& src, int normType, GpuMat& buf)
{
return norm(src, normType, GpuMat(), buf);
}
double cv::gpu::norm(const GpuMat& src, int normType, const GpuMat& mask, GpuMat& buf)
{ {
CV_Assert(normType == NORM_INF || normType == NORM_L1 || normType == NORM_L2); CV_Assert(normType == NORM_INF || normType == NORM_L1 || normType == NORM_L2);
CV_Assert(mask.empty() || (mask.type() == CV_8UC1 && mask.size() == src.size() && src.channels() == 1));
GpuMat src_single_channel = src.reshape(1); GpuMat src_single_channel = src.reshape(1);
if (normType == NORM_L1) if (normType == NORM_L1)
return absSum(src_single_channel, buf)[0]; return absSum(src_single_channel, mask, buf)[0];
if (normType == NORM_L2) if (normType == NORM_L2)
return std::sqrt(sqrSum(src_single_channel, buf)[0]); return std::sqrt(sqrSum(src_single_channel, mask, buf)[0]);
// NORM_INF // NORM_INF
double min_val, max_val; double min_val, max_val;
minMax(src_single_channel, &min_val, &max_val, GpuMat(), buf); minMax(src_single_channel, &min_val, &max_val, mask, buf);
return std::max(std::abs(min_val), std::abs(max_val)); return std::max(std::abs(min_val), std::abs(max_val));
} }
...@@ -209,24 +219,29 @@ namespace sum ...@@ -209,24 +219,29 @@ namespace sum
void getBufSize(int cols, int rows, int cn, int& bufcols, int& bufrows); void getBufSize(int cols, int rows, int cn, int& bufcols, int& bufrows);
template <typename T, int cn> template <typename T, int cn>
void run(PtrStepSzb src, void* buf, double* sum); void run(PtrStepSzb src, void* buf, double* sum, PtrStepSzb mask);
template <typename T, int cn> template <typename T, int cn>
void runAbs(PtrStepSzb src, void* buf, double* sum); void runAbs(PtrStepSzb src, void* buf, double* sum, PtrStepSzb mask);
template <typename T, int cn> template <typename T, int cn>
void runSqr(PtrStepSzb src, void* buf, double* sum); void runSqr(PtrStepSzb src, void* buf, double* sum, PtrStepSzb mask);
} }
Scalar cv::gpu::sum(const GpuMat& src) Scalar cv::gpu::sum(const GpuMat& src)
{ {
GpuMat buf; GpuMat buf;
return sum(src, buf); return sum(src, GpuMat(), buf);
} }
Scalar cv::gpu::sum(const GpuMat& src, GpuMat& buf) Scalar cv::gpu::sum(const GpuMat& src, GpuMat& buf)
{ {
typedef void (*func_t)(PtrStepSzb src, void* buf, double* sum); return sum(src, GpuMat(), buf);
}
Scalar cv::gpu::sum(const GpuMat& src, const GpuMat& mask, GpuMat& buf)
{
typedef void (*func_t)(PtrStepSzb src, void* buf, double* sum, PtrStepSzb mask);
static const func_t funcs[7][5] = static const func_t funcs[7][5] =
{ {
{0, ::sum::run<uchar , 1>, ::sum::run<uchar , 2>, ::sum::run<uchar , 3>, ::sum::run<uchar , 4>}, {0, ::sum::run<uchar , 1>, ::sum::run<uchar , 2>, ::sum::run<uchar , 3>, ::sum::run<uchar , 4>},
...@@ -238,6 +253,8 @@ Scalar cv::gpu::sum(const GpuMat& src, GpuMat& buf) ...@@ -238,6 +253,8 @@ Scalar cv::gpu::sum(const GpuMat& src, GpuMat& buf)
{0, ::sum::run<double, 1>, ::sum::run<double, 2>, ::sum::run<double, 3>, ::sum::run<double, 4>} {0, ::sum::run<double, 1>, ::sum::run<double, 2>, ::sum::run<double, 3>, ::sum::run<double, 4>}
}; };
CV_Assert( mask.empty() || (mask.type() == CV_8UC1 && mask.size() == src.size()) );
if (src.depth() == CV_64F) if (src.depth() == CV_64F)
{ {
if (!deviceSupports(NATIVE_DOUBLE)) if (!deviceSupports(NATIVE_DOUBLE))
...@@ -252,7 +269,7 @@ Scalar cv::gpu::sum(const GpuMat& src, GpuMat& buf) ...@@ -252,7 +269,7 @@ Scalar cv::gpu::sum(const GpuMat& src, GpuMat& buf)
const func_t func = funcs[src.depth()][src.channels()]; const func_t func = funcs[src.depth()][src.channels()];
double result[4]; double result[4];
func(src, buf.data, result); func(src, buf.data, result, mask);
return Scalar(result[0], result[1], result[2], result[3]); return Scalar(result[0], result[1], result[2], result[3]);
} }
...@@ -260,12 +277,17 @@ Scalar cv::gpu::sum(const GpuMat& src, GpuMat& buf) ...@@ -260,12 +277,17 @@ Scalar cv::gpu::sum(const GpuMat& src, GpuMat& buf)
Scalar cv::gpu::absSum(const GpuMat& src) Scalar cv::gpu::absSum(const GpuMat& src)
{ {
GpuMat buf; GpuMat buf;
return absSum(src, buf); return absSum(src, GpuMat(), buf);
} }
Scalar cv::gpu::absSum(const GpuMat& src, GpuMat& buf) Scalar cv::gpu::absSum(const GpuMat& src, GpuMat& buf)
{ {
typedef void (*func_t)(PtrStepSzb src, void* buf, double* sum); return absSum(src, GpuMat(), buf);
}
Scalar cv::gpu::absSum(const GpuMat& src, const GpuMat& mask, GpuMat& buf)
{
typedef void (*func_t)(PtrStepSzb src, void* buf, double* sum, PtrStepSzb mask);
static const func_t funcs[7][5] = static const func_t funcs[7][5] =
{ {
{0, ::sum::runAbs<uchar , 1>, ::sum::runAbs<uchar , 2>, ::sum::runAbs<uchar , 3>, ::sum::runAbs<uchar , 4>}, {0, ::sum::runAbs<uchar , 1>, ::sum::runAbs<uchar , 2>, ::sum::runAbs<uchar , 3>, ::sum::runAbs<uchar , 4>},
...@@ -277,6 +299,8 @@ Scalar cv::gpu::absSum(const GpuMat& src, GpuMat& buf) ...@@ -277,6 +299,8 @@ Scalar cv::gpu::absSum(const GpuMat& src, GpuMat& buf)
{0, ::sum::runAbs<double, 1>, ::sum::runAbs<double, 2>, ::sum::runAbs<double, 3>, ::sum::runAbs<double, 4>} {0, ::sum::runAbs<double, 1>, ::sum::runAbs<double, 2>, ::sum::runAbs<double, 3>, ::sum::runAbs<double, 4>}
}; };
CV_Assert( mask.empty() || (mask.type() == CV_8UC1 && mask.size() == src.size()) );
if (src.depth() == CV_64F) if (src.depth() == CV_64F)
{ {
if (!deviceSupports(NATIVE_DOUBLE)) if (!deviceSupports(NATIVE_DOUBLE))
...@@ -291,7 +315,7 @@ Scalar cv::gpu::absSum(const GpuMat& src, GpuMat& buf) ...@@ -291,7 +315,7 @@ Scalar cv::gpu::absSum(const GpuMat& src, GpuMat& buf)
const func_t func = funcs[src.depth()][src.channels()]; const func_t func = funcs[src.depth()][src.channels()];
double result[4]; double result[4];
func(src, buf.data, result); func(src, buf.data, result, mask);
return Scalar(result[0], result[1], result[2], result[3]); return Scalar(result[0], result[1], result[2], result[3]);
} }
...@@ -299,12 +323,17 @@ Scalar cv::gpu::absSum(const GpuMat& src, GpuMat& buf) ...@@ -299,12 +323,17 @@ Scalar cv::gpu::absSum(const GpuMat& src, GpuMat& buf)
Scalar cv::gpu::sqrSum(const GpuMat& src) Scalar cv::gpu::sqrSum(const GpuMat& src)
{ {
GpuMat buf; GpuMat buf;
return sqrSum(src, buf); return sqrSum(src, GpuMat(), buf);
} }
Scalar cv::gpu::sqrSum(const GpuMat& src, GpuMat& buf) Scalar cv::gpu::sqrSum(const GpuMat& src, GpuMat& buf)
{ {
typedef void (*func_t)(PtrStepSzb src, void* buf, double* sum); return sqrSum(src, GpuMat(), buf);
}
Scalar cv::gpu::sqrSum(const GpuMat& src, const GpuMat& mask, GpuMat& buf)
{
typedef void (*func_t)(PtrStepSzb src, void* buf, double* sum, PtrStepSzb mask);
static const func_t funcs[7][5] = static const func_t funcs[7][5] =
{ {
{0, ::sum::runSqr<uchar , 1>, ::sum::runSqr<uchar , 2>, ::sum::runSqr<uchar , 3>, ::sum::runSqr<uchar , 4>}, {0, ::sum::runSqr<uchar , 1>, ::sum::runSqr<uchar , 2>, ::sum::runSqr<uchar , 3>, ::sum::runSqr<uchar , 4>},
...@@ -316,6 +345,8 @@ Scalar cv::gpu::sqrSum(const GpuMat& src, GpuMat& buf) ...@@ -316,6 +345,8 @@ Scalar cv::gpu::sqrSum(const GpuMat& src, GpuMat& buf)
{0, ::sum::runSqr<double, 1>, ::sum::runSqr<double, 2>, ::sum::runSqr<double, 3>, ::sum::runSqr<double, 4>} {0, ::sum::runSqr<double, 1>, ::sum::runSqr<double, 2>, ::sum::runSqr<double, 3>, ::sum::runSqr<double, 4>}
}; };
CV_Assert( mask.empty() || (mask.type() == CV_8UC1 && mask.size() == src.size()) );
if (src.depth() == CV_64F) if (src.depth() == CV_64F)
{ {
if (!deviceSupports(NATIVE_DOUBLE)) if (!deviceSupports(NATIVE_DOUBLE))
...@@ -330,7 +361,7 @@ Scalar cv::gpu::sqrSum(const GpuMat& src, GpuMat& buf) ...@@ -330,7 +361,7 @@ Scalar cv::gpu::sqrSum(const GpuMat& src, GpuMat& buf)
const func_t func = funcs[src.depth()][src.channels()]; const func_t func = funcs[src.depth()][src.channels()];
double result[4]; double result[4];
func(src, buf.data, result); func(src, buf.data, result, mask);
return Scalar(result[0], result[1], result[2], result[3]); return Scalar(result[0], result[1], result[2], result[3]);
} }
......
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
using namespace std;
using namespace cv;
using namespace cv::gpu;
#if !defined HAVE_CUDA || defined(CUDA_DISABLER)
void cv::gpu::calcOpticalFlowBM(const GpuMat&, const GpuMat&, Size, Size, Size, bool, GpuMat&, GpuMat&, GpuMat&, Stream&) { throw_nogpu(); }
void cv::gpu::FastOpticalFlowBM::operator ()(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&, int, int, Stream&) { throw_nogpu(); }
#else // HAVE_CUDA
namespace optflowbm
{
void calc(PtrStepSzb prev, PtrStepSzb curr, PtrStepSzf velx, PtrStepSzf vely, int2 blockSize, int2 shiftSize, bool usePrevious,
int maxX, int maxY, int acceptLevel, int escapeLevel, const short2* ss, int ssCount, cudaStream_t stream);
}
void cv::gpu::calcOpticalFlowBM(const GpuMat& prev, const GpuMat& curr, Size blockSize, Size shiftSize, Size maxRange, bool usePrevious, GpuMat& velx, GpuMat& vely, GpuMat& buf, Stream& st)
{
CV_Assert( prev.type() == CV_8UC1 );
CV_Assert( curr.size() == prev.size() && curr.type() == prev.type() );
const Size velSize((prev.cols - blockSize.width + shiftSize.width) / shiftSize.width,
(prev.rows - blockSize.height + shiftSize.height) / shiftSize.height);
velx.create(velSize, CV_32FC1);
vely.create(velSize, CV_32FC1);
// scanning scheme coordinates
vector<short2> ss((2 * maxRange.width + 1) * (2 * maxRange.height + 1));
int ssCount = 0;
// Calculate scanning scheme
const int minCount = std::min(maxRange.width, maxRange.height);
// use spiral search pattern
//
// 9 10 11 12
// 8 1 2 13
// 7 * 3 14
// 6 5 4 15
//... 20 19 18 17
//
for (int i = 0; i < minCount; ++i)
{
// four cycles along sides
int x = -i - 1, y = x;
// upper side
for (int j = -i; j <= i + 1; ++j, ++ssCount)
{
ss[ssCount].x = ++x;
ss[ssCount].y = y;
}
// right side
for (int j = -i; j <= i + 1; ++j, ++ssCount)
{
ss[ssCount].x = x;
ss[ssCount].y = ++y;
}
// bottom side
for (int j = -i; j <= i + 1; ++j, ++ssCount)
{
ss[ssCount].x = --x;
ss[ssCount].y = y;
}
// left side
for (int j = -i; j <= i + 1; ++j, ++ssCount)
{
ss[ssCount].x = x;
ss[ssCount].y = --y;
}
}
// the rest part
if (maxRange.width < maxRange.height)
{
const int xleft = -minCount;
// cycle by neighbor rings
for (int i = minCount; i < maxRange.height; ++i)
{
// two cycles by x
int y = -(i + 1);
int x = xleft;
// upper side
for (int j = -maxRange.width; j <= maxRange.width; ++j, ++ssCount, ++x)
{
ss[ssCount].x = x;
ss[ssCount].y = y;
}
x = xleft;
y = -y;
// bottom side
for (int j = -maxRange.width; j <= maxRange.width; ++j, ++ssCount, ++x)
{
ss[ssCount].x = x;
ss[ssCount].y = y;
}
}
}
else if (maxRange.width > maxRange.height)
{
const int yupper = -minCount;
// cycle by neighbor rings
for (int i = minCount; i < maxRange.width; ++i)
{
// two cycles by y
int x = -(i + 1);
int y = yupper;
// left side
for (int j = -maxRange.height; j <= maxRange.height; ++j, ++ssCount, ++y)
{
ss[ssCount].x = x;
ss[ssCount].y = y;
}
y = yupper;
x = -x;
// right side
for (int j = -maxRange.height; j <= maxRange.height; ++j, ++ssCount, ++y)
{
ss[ssCount].x = x;
ss[ssCount].y = y;
}
}
}
const cudaStream_t stream = StreamAccessor::getStream(st);
ensureSizeIsEnough(1, ssCount, CV_16SC2, buf);
if (stream == 0)
cudaSafeCall( cudaMemcpy(buf.data, &ss[0], ssCount * sizeof(short2), cudaMemcpyHostToDevice) );
else
cudaSafeCall( cudaMemcpyAsync(buf.data, &ss[0], ssCount * sizeof(short2), cudaMemcpyHostToDevice, stream) );
const int maxX = prev.cols - blockSize.width;
const int maxY = prev.rows - blockSize.height;
const int SMALL_DIFF = 2;
const int BIG_DIFF = 128;
const int blSize = blockSize.area();
const int acceptLevel = blSize * SMALL_DIFF;
const int escapeLevel = blSize * BIG_DIFF;
optflowbm::calc(prev, curr, velx, vely,
make_int2(blockSize.width, blockSize.height), make_int2(shiftSize.width, shiftSize.height), usePrevious,
maxX, maxY, acceptLevel, escapeLevel, buf.ptr<short2>(), ssCount, stream);
}
namespace optflowbm_fast
{
void get_buffer_size(int src_cols, int src_rows, int search_window, int block_window, int& buffer_cols, int& buffer_rows);
template <typename T>
void calc(PtrStepSzb I0, PtrStepSzb I1, PtrStepSzf velx, PtrStepSzf vely, PtrStepi buffer, int search_window, int block_window, cudaStream_t stream);
}
void cv::gpu::FastOpticalFlowBM::operator ()(const GpuMat& I0, const GpuMat& I1, GpuMat& flowx, GpuMat& flowy, int search_window, int block_window, Stream& stream)
{
CV_Assert( I0.type() == CV_8UC1 );
CV_Assert( I1.size() == I0.size() && I1.type() == I0.type() );
int border_size = search_window / 2 + block_window / 2;
Size esize = I0.size() + Size(border_size, border_size) * 2;
ensureSizeIsEnough(esize, I0.type(), extended_I0);
ensureSizeIsEnough(esize, I0.type(), extended_I1);
copyMakeBorder(I0, extended_I0, border_size, border_size, border_size, border_size, cv::BORDER_DEFAULT, Scalar(), stream);
copyMakeBorder(I1, extended_I1, border_size, border_size, border_size, border_size, cv::BORDER_DEFAULT, Scalar(), stream);
GpuMat I0_hdr = extended_I0(Rect(Point2i(border_size, border_size), I0.size()));
GpuMat I1_hdr = extended_I1(Rect(Point2i(border_size, border_size), I0.size()));
int bcols, brows;
optflowbm_fast::get_buffer_size(I0.cols, I0.rows, search_window, block_window, bcols, brows);
ensureSizeIsEnough(brows, bcols, CV_32SC1, buffer);
flowx.create(I0.size(), CV_32FC1);
flowy.create(I0.size(), CV_32FC1);
optflowbm_fast::calc<uchar>(I0_hdr, I1_hdr, flowx, flowy, buffer, search_window, block_window, StreamAccessor::getStream(stream));
}
#endif // HAVE_CUDA
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#if !defined HAVE_CUDA || defined(CUDA_DISABLER)
cv::gpu::OpticalFlowDual_TVL1_GPU::OpticalFlowDual_TVL1_GPU() { throw_nogpu(); }
void cv::gpu::OpticalFlowDual_TVL1_GPU::operator ()(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&) { throw_nogpu(); }
void cv::gpu::OpticalFlowDual_TVL1_GPU::collectGarbage() {}
void cv::gpu::OpticalFlowDual_TVL1_GPU::procOneScale(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&) { throw_nogpu(); }
#else
using namespace std;
using namespace cv;
using namespace cv::gpu;
cv::gpu::OpticalFlowDual_TVL1_GPU::OpticalFlowDual_TVL1_GPU()
{
tau = 0.25;
lambda = 0.15;
theta = 0.3;
nscales = 5;
warps = 5;
epsilon = 0.01;
iterations = 300;
useInitialFlow = false;
}
void cv::gpu::OpticalFlowDual_TVL1_GPU::operator ()(const GpuMat& I0, const GpuMat& I1, GpuMat& flowx, GpuMat& flowy)
{
CV_Assert( I0.type() == CV_8UC1 || I0.type() == CV_32FC1 );
CV_Assert( I0.size() == I1.size() );
CV_Assert( I0.type() == I1.type() );
CV_Assert( !useInitialFlow || (flowx.size() == I0.size() && flowx.type() == CV_32FC1 && flowy.size() == flowx.size() && flowy.type() == flowx.type()) );
CV_Assert( nscales > 0 );
// allocate memory for the pyramid structure
I0s.resize(nscales);
I1s.resize(nscales);
u1s.resize(nscales);
u2s.resize(nscales);
I0.convertTo(I0s[0], CV_32F, I0.depth() == CV_8U ? 1.0 : 255.0);
I1.convertTo(I1s[0], CV_32F, I1.depth() == CV_8U ? 1.0 : 255.0);
if (!useInitialFlow)
{
flowx.create(I0.size(), CV_32FC1);
flowy.create(I0.size(), CV_32FC1);
}
u1s[0] = flowx;
u2s[0] = flowy;
I1x_buf.create(I0.size(), CV_32FC1);
I1y_buf.create(I0.size(), CV_32FC1);
I1w_buf.create(I0.size(), CV_32FC1);
I1wx_buf.create(I0.size(), CV_32FC1);
I1wy_buf.create(I0.size(), CV_32FC1);
grad_buf.create(I0.size(), CV_32FC1);
rho_c_buf.create(I0.size(), CV_32FC1);
p11_buf.create(I0.size(), CV_32FC1);
p12_buf.create(I0.size(), CV_32FC1);
p21_buf.create(I0.size(), CV_32FC1);
p22_buf.create(I0.size(), CV_32FC1);
diff_buf.create(I0.size(), CV_32FC1);
// create the scales
for (int s = 1; s < nscales; ++s)
{
gpu::pyrDown(I0s[s - 1], I0s[s]);
gpu::pyrDown(I1s[s - 1], I1s[s]);
if (I0s[s].cols < 16 || I0s[s].rows < 16)
{
nscales = s;
break;
}
if (useInitialFlow)
{
gpu::pyrDown(u1s[s - 1], u1s[s]);
gpu::pyrDown(u2s[s - 1], u2s[s]);
gpu::multiply(u1s[s], Scalar::all(0.5), u1s[s]);
gpu::multiply(u2s[s], Scalar::all(0.5), u2s[s]);
}
}
// pyramidal structure for computing the optical flow
for (int s = nscales - 1; s >= 0; --s)
{
// compute the optical flow at the current scale
procOneScale(I0s[s], I1s[s], u1s[s], u2s[s]);
// if this was the last scale, finish now
if (s == 0)
break;
// otherwise, upsample the optical flow
// zoom the optical flow for the next finer scale
gpu::resize(u1s[s], u1s[s - 1], I0s[s - 1].size());
gpu::resize(u2s[s], u2s[s - 1], I0s[s - 1].size());
// scale the optical flow with the appropriate zoom factor
gpu::multiply(u1s[s - 1], Scalar::all(2), u1s[s - 1]);
gpu::multiply(u2s[s - 1], Scalar::all(2), u2s[s - 1]);
}
}
namespace tvl1flow
{
void centeredGradient(PtrStepSzf src, PtrStepSzf dx, PtrStepSzf dy);
void warpBackward(PtrStepSzf I0, PtrStepSzf I1, PtrStepSzf I1x, PtrStepSzf I1y, PtrStepSzf u1, PtrStepSzf u2, PtrStepSzf I1w, PtrStepSzf I1wx, PtrStepSzf I1wy, PtrStepSzf grad, PtrStepSzf rho);
void estimateU(PtrStepSzf I1wx, PtrStepSzf I1wy,
PtrStepSzf grad, PtrStepSzf rho_c,
PtrStepSzf p11, PtrStepSzf p12, PtrStepSzf p21, PtrStepSzf p22,
PtrStepSzf u1, PtrStepSzf u2, PtrStepSzf error,
float l_t, float theta);
void estimateDualVariables(PtrStepSzf u1, PtrStepSzf u2, PtrStepSzf p11, PtrStepSzf p12, PtrStepSzf p21, PtrStepSzf p22, float taut);
}
void cv::gpu::OpticalFlowDual_TVL1_GPU::procOneScale(const GpuMat& I0, const GpuMat& I1, GpuMat& u1, GpuMat& u2)
{
using namespace tvl1flow;
const double scaledEpsilon = epsilon * epsilon * I0.size().area();
CV_DbgAssert( I1.size() == I0.size() );
CV_DbgAssert( I1.type() == I0.type() );
CV_DbgAssert( u1.empty() || u1.size() == I0.size() );
CV_DbgAssert( u2.size() == u1.size() );
if (u1.empty())
{
u1.create(I0.size(), CV_32FC1);
u1.setTo(Scalar::all(0));
u2.create(I0.size(), CV_32FC1);
u2.setTo(Scalar::all(0));
}
GpuMat I1x = I1x_buf(Rect(0, 0, I0.cols, I0.rows));
GpuMat I1y = I1y_buf(Rect(0, 0, I0.cols, I0.rows));
centeredGradient(I1, I1x, I1y);
GpuMat I1w = I1w_buf(Rect(0, 0, I0.cols, I0.rows));
GpuMat I1wx = I1wx_buf(Rect(0, 0, I0.cols, I0.rows));
GpuMat I1wy = I1wy_buf(Rect(0, 0, I0.cols, I0.rows));
GpuMat grad = grad_buf(Rect(0, 0, I0.cols, I0.rows));
GpuMat rho_c = rho_c_buf(Rect(0, 0, I0.cols, I0.rows));
GpuMat p11 = p11_buf(Rect(0, 0, I0.cols, I0.rows));
GpuMat p12 = p12_buf(Rect(0, 0, I0.cols, I0.rows));
GpuMat p21 = p21_buf(Rect(0, 0, I0.cols, I0.rows));
GpuMat p22 = p22_buf(Rect(0, 0, I0.cols, I0.rows));
p11.setTo(Scalar::all(0));
p12.setTo(Scalar::all(0));
p21.setTo(Scalar::all(0));
p22.setTo(Scalar::all(0));
GpuMat diff = diff_buf(Rect(0, 0, I0.cols, I0.rows));
const float l_t = static_cast<float>(lambda * theta);
const float taut = static_cast<float>(tau / theta);
for (int warpings = 0; warpings < warps; ++warpings)
{
warpBackward(I0, I1, I1x, I1y, u1, u2, I1w, I1wx, I1wy, grad, rho_c);
double error = numeric_limits<double>::max();
for (int n = 0; error > scaledEpsilon && n < iterations; ++n)
{
estimateU(I1wx, I1wy, grad, rho_c, p11, p12, p21, p22, u1, u2, diff, l_t, static_cast<float>(theta));
error = gpu::sum(diff, norm_buf)[0];
estimateDualVariables(u1, u2, p11, p12, p21, p22, taut);
}
}
}
void cv::gpu::OpticalFlowDual_TVL1_GPU::collectGarbage()
{
I0s.clear();
I1s.clear();
u1s.clear();
u2s.clear();
I1x_buf.release();
I1y_buf.release();
I1w_buf.release();
I1wx_buf.release();
I1wy_buf.release();
grad_buf.release();
rho_c_buf.release();
p11_buf.release();
p12_buf.release();
p21_buf.release();
p22_buf.release();
diff_buf.release();
norm_buf.release();
}
#endif // !defined HAVE_CUDA || defined(CUDA_DISABLER)
...@@ -1669,6 +1669,117 @@ INSTANTIATE_TEST_CASE_P(GPU_Core, Compare_Array, testing::Combine( ...@@ -1669,6 +1669,117 @@ INSTANTIATE_TEST_CASE_P(GPU_Core, Compare_Array, testing::Combine(
ALL_CMP_CODES, ALL_CMP_CODES,
WHOLE_SUBMAT)); WHOLE_SUBMAT));
////////////////////////////////////////////////////////////////////////////////
// Compare_Scalar
namespace
{
template <template <typename> class Op, typename T>
void compareScalarImpl(const cv::Mat& src, cv::Scalar sc, cv::Mat& dst)
{
Op<T> op;
const int cn = src.channels();
dst.create(src.size(), CV_MAKE_TYPE(CV_8U, cn));
for (int y = 0; y < src.rows; ++y)
{
for (int x = 0; x < src.cols; ++x)
{
for (int c = 0; c < cn; ++c)
{
T src_val = src.at<T>(y, x * cn + c);
T sc_val = cv::saturate_cast<T>(sc.val[c]);
dst.at<uchar>(y, x * cn + c) = static_cast<uchar>(static_cast<int>(op(src_val, sc_val)) * 255);
}
}
}
}
void compareScalarGold(const cv::Mat& src, cv::Scalar sc, cv::Mat& dst, int cmpop)
{
typedef void (*func_t)(const cv::Mat& src, cv::Scalar sc, cv::Mat& dst);
static const func_t funcs[7][6] =
{
{compareScalarImpl<std::equal_to, unsigned char> , compareScalarImpl<std::greater, unsigned char> , compareScalarImpl<std::greater_equal, unsigned char> , compareScalarImpl<std::less, unsigned char> , compareScalarImpl<std::less_equal, unsigned char> , compareScalarImpl<std::not_equal_to, unsigned char> },
{compareScalarImpl<std::equal_to, signed char> , compareScalarImpl<std::greater, signed char> , compareScalarImpl<std::greater_equal, signed char> , compareScalarImpl<std::less, signed char> , compareScalarImpl<std::less_equal, signed char> , compareScalarImpl<std::not_equal_to, signed char> },
{compareScalarImpl<std::equal_to, unsigned short>, compareScalarImpl<std::greater, unsigned short>, compareScalarImpl<std::greater_equal, unsigned short>, compareScalarImpl<std::less, unsigned short>, compareScalarImpl<std::less_equal, unsigned short>, compareScalarImpl<std::not_equal_to, unsigned short>},
{compareScalarImpl<std::equal_to, short> , compareScalarImpl<std::greater, short> , compareScalarImpl<std::greater_equal, short> , compareScalarImpl<std::less, short> , compareScalarImpl<std::less_equal, short> , compareScalarImpl<std::not_equal_to, short> },
{compareScalarImpl<std::equal_to, int> , compareScalarImpl<std::greater, int> , compareScalarImpl<std::greater_equal, int> , compareScalarImpl<std::less, int> , compareScalarImpl<std::less_equal, int> , compareScalarImpl<std::not_equal_to, int> },
{compareScalarImpl<std::equal_to, float> , compareScalarImpl<std::greater, float> , compareScalarImpl<std::greater_equal, float> , compareScalarImpl<std::less, float> , compareScalarImpl<std::less_equal, float> , compareScalarImpl<std::not_equal_to, float> },
{compareScalarImpl<std::equal_to, double> , compareScalarImpl<std::greater, double> , compareScalarImpl<std::greater_equal, double> , compareScalarImpl<std::less, double> , compareScalarImpl<std::less_equal, double> , compareScalarImpl<std::not_equal_to, double> }
};
funcs[src.depth()][cmpop](src, sc, dst);
}
}
PARAM_TEST_CASE(Compare_Scalar, cv::gpu::DeviceInfo, cv::Size, MatType, CmpCode, UseRoi)
{
cv::gpu::DeviceInfo devInfo;
cv::Size size;
int type;
int cmp_code;
bool useRoi;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
size = GET_PARAM(1);
type = GET_PARAM(2);
cmp_code = GET_PARAM(3);
useRoi = GET_PARAM(4);
cv::gpu::setDevice(devInfo.deviceID());
}
};
GPU_TEST_P(Compare_Scalar, Accuracy)
{
cv::Mat src = randomMat(size, type);
cv::Scalar sc = randomScalar(0.0, 255.0);
if (src.depth() < CV_32F)
{
sc.val[0] = cvRound(sc.val[0]);
sc.val[1] = cvRound(sc.val[1]);
sc.val[2] = cvRound(sc.val[2]);
sc.val[3] = cvRound(sc.val[3]);
}
if (src.depth() == CV_64F && !supportFeature(devInfo, cv::gpu::NATIVE_DOUBLE))
{
try
{
cv::gpu::GpuMat dst;
cv::gpu::compare(loadMat(src), sc, dst, cmp_code);
}
catch (const cv::Exception& e)
{
ASSERT_EQ(CV_StsUnsupportedFormat, e.code);
}
}
else
{
cv::gpu::GpuMat dst = createMat(size, CV_MAKE_TYPE(CV_8U, src.channels()), useRoi);
cv::gpu::compare(loadMat(src, useRoi), sc, dst, cmp_code);
cv::Mat dst_gold;
compareScalarGold(src, sc, dst_gold, cmp_code);
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
}
INSTANTIATE_TEST_CASE_P(GPU_Core, Compare_Scalar, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
TYPES(CV_8U, CV_64F, 1, 4),
ALL_CMP_CODES,
WHOLE_SUBMAT));
////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////
// Bitwise_Array // Bitwise_Array
...@@ -2807,10 +2918,12 @@ PARAM_TEST_CASE(Norm, cv::gpu::DeviceInfo, cv::Size, MatDepth, NormCode, UseRoi) ...@@ -2807,10 +2918,12 @@ PARAM_TEST_CASE(Norm, cv::gpu::DeviceInfo, cv::Size, MatDepth, NormCode, UseRoi)
GPU_TEST_P(Norm, Accuracy) GPU_TEST_P(Norm, Accuracy)
{ {
cv::Mat src = randomMat(size, depth); cv::Mat src = randomMat(size, depth);
cv::Mat mask = randomMat(size, CV_8UC1, 0, 2);
double val = cv::gpu::norm(loadMat(src, useRoi), normCode); cv::gpu::GpuMat d_buf;
double val = cv::gpu::norm(loadMat(src, useRoi), normCode, loadMat(mask, useRoi), d_buf);
double val_gold = cv::norm(src, normCode); double val_gold = cv::norm(src, normCode, mask);
EXPECT_NEAR(val_gold, val, depth < CV_32F ? 0.0 : 1.0); EXPECT_NEAR(val_gold, val, depth < CV_32F ? 0.0 : 1.0);
} }
...@@ -3427,4 +3540,70 @@ INSTANTIATE_TEST_CASE_P(GPU_Core, Reduce, testing::Combine( ...@@ -3427,4 +3540,70 @@ INSTANTIATE_TEST_CASE_P(GPU_Core, Reduce, testing::Combine(
ALL_REDUCE_CODES, ALL_REDUCE_CODES,
WHOLE_SUBMAT)); WHOLE_SUBMAT));
//////////////////////////////////////////////////////////////////////////////
// Normalize
PARAM_TEST_CASE(Normalize, cv::gpu::DeviceInfo, cv::Size, MatDepth, NormCode, UseRoi)
{
cv::gpu::DeviceInfo devInfo;
cv::Size size;
int type;
int norm_type;
bool useRoi;
double alpha;
double beta;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
size = GET_PARAM(1);
type = GET_PARAM(2);
norm_type = GET_PARAM(3);
useRoi = GET_PARAM(4);
cv::gpu::setDevice(devInfo.deviceID());
alpha = 1;
beta = 0;
}
};
GPU_TEST_P(Normalize, WithOutMask)
{
cv::Mat src = randomMat(size, type);
cv::gpu::GpuMat dst = createMat(size, type, useRoi);
cv::gpu::normalize(loadMat(src, useRoi), dst, alpha, beta, norm_type, type);
cv::Mat dst_gold;
cv::normalize(src, dst_gold, alpha, beta, norm_type, type);
EXPECT_MAT_NEAR(dst_gold, dst, 1e-6);
}
GPU_TEST_P(Normalize, WithMask)
{
cv::Mat src = randomMat(size, type);
cv::Mat mask = randomMat(size, CV_8UC1, 0, 2);
cv::gpu::GpuMat dst = createMat(size, type, useRoi);
dst.setTo(cv::Scalar::all(0));
cv::gpu::normalize(loadMat(src, useRoi), dst, alpha, beta, norm_type, type, loadMat(mask, useRoi));
cv::Mat dst_gold(size, type);
dst_gold.setTo(cv::Scalar::all(0));
cv::normalize(src, dst_gold, alpha, beta, norm_type, type, mask);
EXPECT_MAT_NEAR(dst_gold, dst, 1e-6);
}
INSTANTIATE_TEST_CASE_P(GPU_Core, Normalize, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
ALL_DEPTH,
testing::Values(NormCode(cv::NORM_L1), NormCode(cv::NORM_L2), NormCode(cv::NORM_INF), NormCode(cv::NORM_MINMAX)),
WHOLE_SUBMAT));
#endif // HAVE_CUDA #endif // HAVE_CUDA
...@@ -401,4 +401,223 @@ INSTANTIATE_TEST_CASE_P(GPU_Video, FarnebackOpticalFlow, testing::Combine( ...@@ -401,4 +401,223 @@ INSTANTIATE_TEST_CASE_P(GPU_Video, FarnebackOpticalFlow, testing::Combine(
testing::Values(FarnebackOptFlowFlags(0), FarnebackOptFlowFlags(cv::OPTFLOW_FARNEBACK_GAUSSIAN)), testing::Values(FarnebackOptFlowFlags(0), FarnebackOptFlowFlags(cv::OPTFLOW_FARNEBACK_GAUSSIAN)),
testing::Values(UseInitFlow(false), UseInitFlow(true)))); testing::Values(UseInitFlow(false), UseInitFlow(true))));
//////////////////////////////////////////////////////
// OpticalFlowDual_TVL1
PARAM_TEST_CASE(OpticalFlowDual_TVL1, cv::gpu::DeviceInfo, UseRoi)
{
cv::gpu::DeviceInfo devInfo;
bool useRoi;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
useRoi = GET_PARAM(1);
cv::gpu::setDevice(devInfo.deviceID());
}
};
GPU_TEST_P(OpticalFlowDual_TVL1, Accuracy)
{
cv::Mat frame0 = readImage("opticalflow/rubberwhale1.png", cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(frame0.empty());
cv::Mat frame1 = readImage("opticalflow/rubberwhale2.png", cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(frame1.empty());
cv::gpu::OpticalFlowDual_TVL1_GPU d_alg;
cv::gpu::GpuMat d_flowx = createMat(frame0.size(), CV_32FC1, useRoi);
cv::gpu::GpuMat d_flowy = createMat(frame0.size(), CV_32FC1, useRoi);
d_alg(loadMat(frame0, useRoi), loadMat(frame1, useRoi), d_flowx, d_flowy);
cv::Ptr<cv::DenseOpticalFlow> alg = cv::createOptFlow_DualTVL1();
cv::Mat flow;
alg->calc(frame0, frame1, flow);
cv::Mat gold[2];
cv::split(flow, gold);
EXPECT_MAT_SIMILAR(gold[0], d_flowx, 3e-3);
EXPECT_MAT_SIMILAR(gold[1], d_flowy, 3e-3);
}
INSTANTIATE_TEST_CASE_P(GPU_Video, OpticalFlowDual_TVL1, testing::Combine(
ALL_DEVICES,
WHOLE_SUBMAT));
//////////////////////////////////////////////////////
// OpticalFlowBM
namespace
{
void calcOpticalFlowBM(const cv::Mat& prev, const cv::Mat& curr,
cv::Size bSize, cv::Size shiftSize, cv::Size maxRange, int usePrevious,
cv::Mat& velx, cv::Mat& vely)
{
cv::Size sz((curr.cols - bSize.width + shiftSize.width)/shiftSize.width, (curr.rows - bSize.height + shiftSize.height)/shiftSize.height);
velx.create(sz, CV_32FC1);
vely.create(sz, CV_32FC1);
CvMat cvprev = prev;
CvMat cvcurr = curr;
CvMat cvvelx = velx;
CvMat cvvely = vely;
cvCalcOpticalFlowBM(&cvprev, &cvcurr, bSize, shiftSize, maxRange, usePrevious, &cvvelx, &cvvely);
}
}
struct OpticalFlowBM : testing::TestWithParam<cv::gpu::DeviceInfo>
{
};
GPU_TEST_P(OpticalFlowBM, Accuracy)
{
cv::gpu::DeviceInfo devInfo = GetParam();
cv::gpu::setDevice(devInfo.deviceID());
cv::Mat frame0 = readImage("opticalflow/rubberwhale1.png", cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(frame0.empty());
cv::Mat frame1 = readImage("opticalflow/rubberwhale2.png", cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(frame1.empty());
cv::Size block_size(16, 16);
cv::Size shift_size(1, 1);
cv::Size max_range(16, 16);
cv::gpu::GpuMat d_velx, d_vely, buf;
cv::gpu::calcOpticalFlowBM(loadMat(frame0), loadMat(frame1),
block_size, shift_size, max_range, false,
d_velx, d_vely, buf);
cv::Mat velx, vely;
calcOpticalFlowBM(frame0, frame1, block_size, shift_size, max_range, false, velx, vely);
EXPECT_MAT_NEAR(velx, d_velx, 0);
EXPECT_MAT_NEAR(vely, d_vely, 0);
}
INSTANTIATE_TEST_CASE_P(GPU_Video, OpticalFlowBM, ALL_DEVICES);
//////////////////////////////////////////////////////
// FastOpticalFlowBM
namespace
{
void FastOpticalFlowBM_gold(const cv::Mat_<uchar>& I0, const cv::Mat_<uchar>& I1, cv::Mat_<float>& velx, cv::Mat_<float>& vely, int search_window, int block_window)
{
velx.create(I0.size());
vely.create(I0.size());
int search_radius = search_window / 2;
int block_radius = block_window / 2;
for (int y = 0; y < I0.rows; ++y)
{
for (int x = 0; x < I0.cols; ++x)
{
int bestDist = std::numeric_limits<int>::max();
int bestDx = 0;
int bestDy = 0;
for (int dy = -search_radius; dy <= search_radius; ++dy)
{
for (int dx = -search_radius; dx <= search_radius; ++dx)
{
int dist = 0;
for (int by = -block_radius; by <= block_radius; ++by)
{
for (int bx = -block_radius; bx <= block_radius; ++bx)
{
int I0_val = I0(cv::borderInterpolate(y + by, I0.rows, cv::BORDER_DEFAULT), cv::borderInterpolate(x + bx, I0.cols, cv::BORDER_DEFAULT));
int I1_val = I1(cv::borderInterpolate(y + dy + by, I0.rows, cv::BORDER_DEFAULT), cv::borderInterpolate(x + dx + bx, I0.cols, cv::BORDER_DEFAULT));
dist += std::abs(I0_val - I1_val);
}
}
if (dist < bestDist)
{
bestDist = dist;
bestDx = dx;
bestDy = dy;
}
}
}
velx(y, x) = (float) bestDx;
vely(y, x) = (float) bestDy;
}
}
}
double calc_rmse(const cv::Mat_<float>& flow1, const cv::Mat_<float>& flow2)
{
double sum = 0.0;
for (int y = 0; y < flow1.rows; ++y)
{
for (int x = 0; x < flow1.cols; ++x)
{
double diff = flow1(y, x) - flow2(y, x);
sum += diff * diff;
}
}
return std::sqrt(sum / flow1.size().area());
}
}
struct FastOpticalFlowBM : testing::TestWithParam<cv::gpu::DeviceInfo>
{
};
GPU_TEST_P(FastOpticalFlowBM, Accuracy)
{
const double MAX_RMSE = 0.6;
int search_window = 15;
int block_window = 5;
cv::gpu::DeviceInfo devInfo = GetParam();
cv::gpu::setDevice(devInfo.deviceID());
cv::Mat frame0 = readImage("opticalflow/rubberwhale1.png", cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(frame0.empty());
cv::Mat frame1 = readImage("opticalflow/rubberwhale2.png", cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(frame1.empty());
cv::Size smallSize(320, 240);
cv::Mat frame0_small;
cv::Mat frame1_small;
cv::resize(frame0, frame0_small, smallSize);
cv::resize(frame1, frame1_small, smallSize);
cv::gpu::GpuMat d_flowx;
cv::gpu::GpuMat d_flowy;
cv::gpu::FastOpticalFlowBM fastBM;
fastBM(loadMat(frame0_small), loadMat(frame1_small), d_flowx, d_flowy, search_window, block_window);
cv::Mat_<float> flowx;
cv::Mat_<float> flowy;
FastOpticalFlowBM_gold(frame0_small, frame1_small, flowx, flowy, search_window, block_window);
double err;
err = calc_rmse(flowx, cv::Mat(d_flowx));
EXPECT_LE(err, MAX_RMSE);
err = calc_rmse(flowy, cv::Mat(d_flowy));
EXPECT_LE(err, MAX_RMSE);
}
INSTANTIATE_TEST_CASE_P(GPU_Video, FastOpticalFlowBM, ALL_DEVICES);
#endif // HAVE_CUDA #endif // HAVE_CUDA
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other GpuMaterials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or bpied warranties, including, but not limited to, the bpied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "test_precomp.hpp"
#ifdef HAVE_CUDA
#if CUDA_VERSION >= 5000
struct Async : testing::TestWithParam<cv::gpu::DeviceInfo>
{
cv::gpu::CudaMem src;
cv::gpu::GpuMat d_src;
cv::gpu::CudaMem dst;
cv::gpu::GpuMat d_dst;
virtual void SetUp()
{
cv::gpu::DeviceInfo devInfo = GetParam();
cv::gpu::setDevice(devInfo.deviceID());
cv::Mat m = randomMat(cv::Size(128, 128), CV_8UC1);
src.create(m.size(), m.type(), cv::gpu::CudaMem::ALLOC_PAGE_LOCKED);
m.copyTo(src.createMatHeader());
}
};
void checkMemSet(cv::gpu::Stream&, int status, void* userData)
{
ASSERT_EQ(cudaSuccess, status);
Async* test = reinterpret_cast<Async*>(userData);
cv::Mat src = test->src;
cv::Mat dst = test->dst;
cv::Mat dst_gold = cv::Mat::zeros(src.size(), src.type());
ASSERT_MAT_NEAR(dst_gold, dst, 0);
}
GPU_TEST_P(Async, MemSet)
{
cv::gpu::Stream stream;
d_dst.upload(src);
stream.enqueueMemSet(d_dst, cv::Scalar::all(0));
stream.enqueueDownload(d_dst, dst);
Async* test = this;
stream.enqueueHostCallback(checkMemSet, test);
stream.waitForCompletion();
}
void checkConvert(cv::gpu::Stream&, int status, void* userData)
{
ASSERT_EQ(cudaSuccess, status);
Async* test = reinterpret_cast<Async*>(userData);
cv::Mat src = test->src;
cv::Mat dst = test->dst;
cv::Mat dst_gold;
src.convertTo(dst_gold, CV_32S);
ASSERT_MAT_NEAR(dst_gold, dst, 0);
}
GPU_TEST_P(Async, Convert)
{
cv::gpu::Stream stream;
stream.enqueueUpload(src, d_src);
stream.enqueueConvert(d_src, d_dst, CV_32S);
stream.enqueueDownload(d_dst, dst);
Async* test = this;
stream.enqueueHostCallback(checkConvert, test);
stream.waitForCompletion();
}
INSTANTIATE_TEST_CASE_P(GPU_Stream, Async, ALL_DEVICES);
#endif
#endif // HAVE_CUDA
...@@ -641,6 +641,72 @@ Calculate an optical flow using "SimpleFlow" algorithm. ...@@ -641,6 +641,72 @@ Calculate an optical flow using "SimpleFlow" algorithm.
See [Tao2012]_. And site of project - http://graphics.berkeley.edu/papers/Tao-SAN-2012-05/. See [Tao2012]_. And site of project - http://graphics.berkeley.edu/papers/Tao-SAN-2012-05/.
createOptFlow_DualTVL1
----------------------
"Dual TV L1" Optical Flow Algorithm.
.. ocv:function:: Ptr<DenseOpticalFlow> createOptFlow_DualTVL1()
The class implements the "Dual TV L1" optical flow algorithm described in [Zach2007]_ and [Javier2012]_ .
Here are important members of the class that control the algorithm, which you can set after constructing the class instance:
.. ocv:member:: double tau
Time step of the numerical scheme.
.. ocv:member:: double lambda
Weight parameter for the data term, attachment parameter. This is the most relevant parameter, which determines the smoothness of the output. The smaller this parameter is, the smoother the solutions we obtain. It depends on the range of motions of the images, so its value should be adapted to each image sequence.
.. ocv:member:: double theta
Weight parameter for (u - v)^2, tightness parameter. It serves as a link between the attachment and the regularization terms. In theory, it should have a small value in order to maintain both parts in correspondence. The method is stable for a large range of values of this parameter.
.. ocv:member:: int nscales
Number of scales used to create the pyramid of images.
.. ocv:member:: int warps
Number of warpings per scale. Represents the number of times that I1(x+u0) and grad( I1(x+u0) ) are computed per scale. This is a parameter that assures the stability of the method. It also affects the running time, so it is a compromise between speed and accuracy.
.. ocv:member:: double epsilon
Stopping criterion threshold used in the numerical scheme, which is a trade-off between precision and running time. A small value will yield more accurate solutions at the expense of a slower convergence.
.. ocv:member:: int iterations
Stopping criterion iterations number used in the numerical scheme.
DenseOpticalFlow::calc
--------------------------
Calculates an optical flow.
.. ocv:function:: void DenseOpticalFlow::calc(InputArray I0, InputArray I1, InputOutputArray flow)
:param prev: first 8-bit single-channel input image.
:param next: second input image of the same size and the same type as ``prev`` .
:param flow: computed flow image that has the same size as ``prev`` and type ``CV_32FC2`` .
DenseOpticalFlow::collectGarbage
--------------------------------
Releases all inner buffers.
.. ocv:function:: void DenseOpticalFlow::collectGarbage()
.. [Bouguet00] Jean-Yves Bouguet. Pyramidal Implementation of the Lucas Kanade Feature Tracker. .. [Bouguet00] Jean-Yves Bouguet. Pyramidal Implementation of the Lucas Kanade Feature Tracker.
.. [Bradski98] Bradski, G.R. "Computer Vision Face Tracking for Use in a Perceptual User Interface", Intel, 1998 .. [Bradski98] Bradski, G.R. "Computer Vision Face Tracking for Use in a Perceptual User Interface", Intel, 1998
...@@ -658,3 +724,7 @@ See [Tao2012]_. And site of project - http://graphics.berkeley.edu/papers/Tao-SA ...@@ -658,3 +724,7 @@ See [Tao2012]_. And site of project - http://graphics.berkeley.edu/papers/Tao-SA
.. [Welch95] Greg Welch and Gary Bishop “An Introduction to the Kalman Filter”, 1995 .. [Welch95] Greg Welch and Gary Bishop “An Introduction to the Kalman Filter”, 1995
.. [Tao2012] Michael Tao, Jiamin Bai, Pushmeet Kohli and Sylvain Paris. SimpleFlow: A Non-iterative, Sublinear Optical Flow Algorithm. Computer Graphics Forum (Eurographics 2012) .. [Tao2012] Michael Tao, Jiamin Bai, Pushmeet Kohli and Sylvain Paris. SimpleFlow: A Non-iterative, Sublinear Optical Flow Algorithm. Computer Graphics Forum (Eurographics 2012)
.. [Zach2007] C. Zach, T. Pock and H. Bischof. "A Duality Based Approach for Realtime TV-L1 Optical Flow", In Proceedings of Pattern Recognition (DAGM), Heidelberg, Germany, pp. 214-223, 2007
.. [Javier2012] Javier Sanchez, Enric Meinhardt-Llopis and Gabriele Facciolo. "TV-L1 Optical Flow Estimation".
...@@ -352,6 +352,20 @@ CV_EXPORTS_W void calcOpticalFlowSF(Mat& from, ...@@ -352,6 +352,20 @@ CV_EXPORTS_W void calcOpticalFlowSF(Mat& from,
double upscale_sigma_color, double upscale_sigma_color,
double speed_up_thr); double speed_up_thr);
class CV_EXPORTS DenseOpticalFlow : public Algorithm
{
public:
virtual void calc(InputArray I0, InputArray I1, InputOutputArray flow) = 0;
virtual void collectGarbage() = 0;
};
// Implementation of the Zach, Pock and Bischof Dual TV-L1 Optical Flow method
//
// see reference:
// [1] C. Zach, T. Pock and H. Bischof, "A Duality Based Approach for Realtime TV-L1 Optical Flow".
// [2] Javier Sanchez, Enric Meinhardt-Llopis and Gabriele Facciolo. "TV-L1 Optical Flow Estimation".
CV_EXPORTS Ptr<DenseOpticalFlow> createOptFlow_DualTVL1();
} }
#endif #endif
......
#include "perf_precomp.hpp"
using namespace std;
using namespace cv;
using namespace perf;
typedef TestBaseWithParam< pair<string, string> > ImagePair;
pair<string, string> impair(const char* im1, const char* im2)
{
return make_pair(string(im1), string(im2));
}
PERF_TEST_P(ImagePair, OpticalFlowDual_TVL1, testing::Values(impair("cv/optflow/RubberWhale1.png", "cv/optflow/RubberWhale2.png")))
{
declare.time(40);
Mat frame1 = imread(getDataPath(GetParam().first), IMREAD_GRAYSCALE);
Mat frame2 = imread(getDataPath(GetParam().second), IMREAD_GRAYSCALE);
ASSERT_FALSE(frame1.empty());
ASSERT_FALSE(frame2.empty());
Mat flow;
Ptr<DenseOpticalFlow> tvl1 = createOptFlow_DualTVL1();
TEST_CYCLE_N(10) tvl1->calc(frame1, frame2, flow);
SANITY_CHECK(flow, 0.5);
}
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
#include <cmath>
#include <iostream>
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/gpu/gpu.hpp"
using namespace std;
using namespace cv;
using namespace cv::gpu;
static void help()
{
cout << "This program demonstrates line finding with the Hough transform." << endl;
cout << "Usage:" << endl;
cout << "./gpu-example-houghlines <image_name>, Default is pic1.png\n" << endl;
}
int main(int argc, const char* argv[])
{
const string filename = argc >= 2 ? argv[1] : "pic1.png";
Mat src = imread(filename, IMREAD_GRAYSCALE);
if (src.empty())
{
help();
cout << "can not open " << filename << endl;
return -1;
}
Mat mask;
Canny(src, mask, 100, 200, 3);
Mat dst_cpu;
cvtColor(mask, dst_cpu, CV_GRAY2BGR);
Mat dst_gpu = dst_cpu.clone();
vector<Vec4i> lines_cpu;
{
const int64 start = getTickCount();
HoughLinesP(mask, lines_cpu, 1, CV_PI / 180, 50, 60, 5);
const double timeSec = (getTickCount() - start) / getTickFrequency();
cout << "CPU Time : " << timeSec * 1000 << " ms" << endl;
cout << "CPU Found : " << lines_cpu.size() << endl;
}
for (size_t i = 0; i < lines_cpu.size(); ++i)
{
Vec4i l = lines_cpu[i];
line(dst_cpu, Point(l[0], l[1]), Point(l[2], l[3]), Scalar(0, 0, 255), 3, CV_AA);
}
GpuMat d_src(mask);
GpuMat d_lines;
HoughLinesBuf d_buf;
{
const int64 start = getTickCount();
gpu::HoughLinesP(d_src, d_lines, d_buf, 1.0f, (float) (CV_PI / 180.0f), 50, 5);
const double timeSec = (getTickCount() - start) / getTickFrequency();
cout << "GPU Time : " << timeSec * 1000 << " ms" << endl;
cout << "GPU Found : " << d_lines.cols << endl;
}
vector<Vec4i> lines_gpu;
if (!d_lines.empty())
{
lines_gpu.resize(d_lines.cols);
Mat h_lines(1, d_lines.cols, CV_32SC4, &lines_gpu[0]);
d_lines.download(h_lines);
}
for (size_t i = 0; i < lines_gpu.size(); ++i)
{
Vec4i l = lines_gpu[i];
line(dst_gpu, Point(l[0], l[1]), Point(l[2], l[3]), Scalar(0, 0, 255), 3, CV_AA);
}
imshow("source", src);
imshow("detected lines [CPU]", dst_cpu);
imshow("detected lines [GPU]", dst_gpu);
waitKey();
return 0;
}
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment