Commit 242e4254 authored by Roman Donchenko's avatar Roman Donchenko Committed by OpenCV Buildbot

Merge pull request #1427 from SpecLad:merge-2.4

parents 72384798 95c2e8b5
...@@ -8,3 +8,4 @@ tegra/ ...@@ -8,3 +8,4 @@ tegra/
.*.swp .*.swp
tags tags
Thumbs.db Thumbs.db
*.autosave
...@@ -33,6 +33,7 @@ ...@@ -33,6 +33,7 @@
# <add extra installation rules> # <add extra installation rules>
# ocv_add_accuracy_tests(<extra dependencies>) # ocv_add_accuracy_tests(<extra dependencies>)
# ocv_add_perf_tests(<extra dependencies>) # ocv_add_perf_tests(<extra dependencies>)
# ocv_add_samples(<extra dependencies>)
# #
# #
# If module have no "extra" then you can define it in one line: # If module have no "extra" then you can define it in one line:
...@@ -581,6 +582,7 @@ macro(ocv_define_module module_name) ...@@ -581,6 +582,7 @@ macro(ocv_define_module module_name)
ocv_add_accuracy_tests() ocv_add_accuracy_tests()
ocv_add_perf_tests() ocv_add_perf_tests()
ocv_add_samples()
endmacro() endmacro()
# ensures that all passed modules are available # ensures that all passed modules are available
...@@ -725,6 +727,48 @@ function(ocv_add_accuracy_tests) ...@@ -725,6 +727,48 @@ function(ocv_add_accuracy_tests)
endif() endif()
endfunction() endfunction()
function(ocv_add_samples)
set(samples_path "${CMAKE_CURRENT_SOURCE_DIR}/samples")
string(REGEX REPLACE "^opencv_" "" module_id ${the_module})
if(BUILD_EXAMPLES AND EXISTS "${samples_path}")
set(samples_deps ${the_module} ${OPENCV_MODULE_${the_module}_DEPS} opencv_highgui ${ARGN})
ocv_check_dependencies(${samples_deps})
if(OCV_DEPENDENCIES_FOUND)
file(GLOB sample_sources "${samples_path}/*.cpp")
ocv_include_modules(${OPENCV_MODULE_${the_module}_DEPS})
foreach(source ${sample_sources})
get_filename_component(name "${source}" NAME_WE)
set(the_target "example_${module_id}_${name}")
add_executable(${the_target} "${source}")
target_link_libraries(${the_target} ${samples_deps})
set_target_properties(${the_target} PROPERTIES PROJECT_LABEL "(sample) ${name}")
if(ENABLE_SOLUTION_FOLDERS)
set_target_properties(${the_target} PROPERTIES
OUTPUT_NAME "${module_id}-example-${name}"
FOLDER "samples/${module_id}")
endif()
if(WIN32)
install(TARGETS ${the_target} RUNTIME DESTINATION "samples/${module_id}" COMPONENT main)
endif()
endforeach()
endif()
endif()
if(INSTALL_C_EXAMPLES AND NOT WIN32 AND EXISTS "${samples_path}")
file(GLOB sample_files "${samples_path}/*")
install(FILES ${sample_files}
DESTINATION share/OpenCV/samples/${module_id}
PERMISSIONS OWNER_READ GROUP_READ WORLD_READ)
endif()
endfunction()
# internal macro; finds all link dependencies of the module # internal macro; finds all link dependencies of the module
# should be used at the end of CMake processing # should be used at the end of CMake processing
macro(__ocv_track_module_link_dependencies the_module optkind) macro(__ocv_track_module_link_dependencies the_module optkind)
......
...@@ -40,7 +40,7 @@ Code ...@@ -40,7 +40,7 @@ Code
* Display the detected circle in a window. * Display the detected circle in a window.
.. |TutorialHoughCirclesSimpleDownload| replace:: here .. |TutorialHoughCirclesSimpleDownload| replace:: here
.. _TutorialHoughCirclesSimpleDownload: http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/houghlines.cpp .. _TutorialHoughCirclesSimpleDownload: http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/houghcircles.cpp
.. |TutorialHoughCirclesFancyDownload| replace:: here .. |TutorialHoughCirclesFancyDownload| replace:: here
.. _TutorialHoughCirclesFancyDownload: http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/ImgTrans/HoughCircle_Demo.cpp .. _TutorialHoughCirclesFancyDownload: http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/ImgTrans/HoughCircle_Demo.cpp
......
...@@ -70,6 +70,8 @@ Moreover every :ocv:class:`FaceRecognizer` supports the: ...@@ -70,6 +70,8 @@ Moreover every :ocv:class:`FaceRecognizer` supports the:
* **Loading/Saving** the model state from/to a given XML or YAML. * **Loading/Saving** the model state from/to a given XML or YAML.
.. note:: When using the FaceRecognizer interface in combination with Python, please stick to Python 2. Some underlying scripts like create_csv will not work in other versions, like Python 3.
Setting the Thresholds Setting the Thresholds
+++++++++++++++++++++++ +++++++++++++++++++++++
......
...@@ -257,8 +257,7 @@ public: ...@@ -257,8 +257,7 @@ public:
const T& cast() const const T& cast() const
{ {
if (policy->type() != typeid(T)) throw anyimpl::bad_any_cast(); if (policy->type() != typeid(T)) throw anyimpl::bad_any_cast();
void* obj = const_cast<void*>(object); T* r = reinterpret_cast<T*>(policy->get_value(const_cast<void **>(&object)));
T* r = reinterpret_cast<T*>(policy->get_value(&obj));
return *r; return *r;
} }
......
...@@ -194,7 +194,7 @@ namespace cv { namespace gpu { namespace cudev ...@@ -194,7 +194,7 @@ namespace cv { namespace gpu { namespace cudev
} }
template <typename T> template <typename T>
void call_resize_nearest_tex(const PtrStepSz<T>& src, const PtrStepSz<T>& srcWhole, int yoff, int xoff, const PtrStepSz<T>& dst, float fy, float fx) void call_resize_nearest_tex(const PtrStepSz<T>& /*src*/, const PtrStepSz<T>& srcWhole, int yoff, int xoff, const PtrStepSz<T>& dst, float fy, float fx)
{ {
const dim3 block(32, 8); const dim3 block(32, 8);
const dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y)); const dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
...@@ -301,7 +301,7 @@ namespace cv { namespace gpu { namespace cudev ...@@ -301,7 +301,7 @@ namespace cv { namespace gpu { namespace cudev
template <typename T> struct ResizeNearestDispatcher template <typename T> struct ResizeNearestDispatcher
{ {
static void call(const PtrStepSz<T>& src, const PtrStepSz<T>& srcWhole, int yoff, int xoff, const PtrStepSz<T>& dst, float fy, float fx, cudaStream_t stream) static void call(const PtrStepSz<T>& src, const PtrStepSz<T>& /*srcWhole*/, int /*yoff*/, int /*xoff*/, const PtrStepSz<T>& dst, float fy, float fx, cudaStream_t stream)
{ {
call_resize_nearest_glob(src, dst, fy, fx, stream); call_resize_nearest_glob(src, dst, fy, fx, stream);
} }
......
...@@ -412,6 +412,28 @@ http://www.dai.ed.ac.uk/CVonline/LOCAL\_COPIES/MANDUCHI1/Bilateral\_Filtering.ht ...@@ -412,6 +412,28 @@ http://www.dai.ed.ac.uk/CVonline/LOCAL\_COPIES/MANDUCHI1/Bilateral\_Filtering.ht
This filter does not work inplace. This filter does not work inplace.
adaptiveBilateralFilter
-----------------------
Applies the adaptive bilateral filter to an image.
.. ocv:function:: void adaptiveBilateralFilter( InputArray src, OutputArray dst, Size ksize, double sigmaSpace, Point anchor=Point(-1, -1), int borderType=BORDER_DEFAULT )
.. ocv:pyfunction:: cv2.adaptiveBilateralFilter(src, ksize, sigmaSpace[, dst[, anchor[, borderType]]]) -> dst
:param src: Source 8-bit, 1-channel or 3-channel image.
:param dst: Destination image of the same size and type as ``src`` .
:param ksize: filter kernel size.
:param sigmaSpace: Filter sigma in the coordinate space. It has similar meaning with ``sigmaSpace`` in ``bilateralFilter``.
:param anchor: anchor point; default value ``Point(-1,-1)`` means that the anchor is at the kernel center. Only default value is supported now.
:param borderType: border mode used to extrapolate pixels outside of the image.
The function applies adaptive bilateral filtering to the input image. This filter is similar to ``bilateralFilter``, in that dissimilarity from and distance to the center pixel is punished. Instead of using ``sigmaColor``, we employ the variance of pixel values in the neighbourhood.
blur blur
......
...@@ -1060,6 +1060,11 @@ CV_EXPORTS_W void bilateralFilter( InputArray src, OutputArray dst, int d, ...@@ -1060,6 +1060,11 @@ CV_EXPORTS_W void bilateralFilter( InputArray src, OutputArray dst, int d,
double sigmaColor, double sigmaSpace, double sigmaColor, double sigmaSpace,
int borderType = BORDER_DEFAULT ); int borderType = BORDER_DEFAULT );
//! smooths the image using adaptive bilateral filter
CV_EXPORTS_W void adaptiveBilateralFilter( InputArray src, OutputArray dst, Size ksize,
double sigmaSpace, Point anchor=Point(-1, -1),
int borderType=BORDER_DEFAULT );
//! smooths the image using the box filter. Each pixel is processed in O(1) time //! smooths the image using the box filter. Each pixel is processed in O(1) time
CV_EXPORTS_W void boxFilter( InputArray src, OutputArray dst, int ddepth, CV_EXPORTS_W void boxFilter( InputArray src, OutputArray dst, int ddepth,
Size ksize, Point anchor = Point(-1,-1), Size ksize, Point anchor = Point(-1,-1),
......
...@@ -254,19 +254,19 @@ bool CvtColorIPPLoopCopy(Mat& src, Mat& dst, const Cvt& cvt) ...@@ -254,19 +254,19 @@ bool CvtColorIPPLoopCopy(Mat& src, Mat& dst, const Cvt& cvt)
return ok; return ok;
} }
IppStatus __stdcall ippiSwapChannels_8u_C3C4Rf(const Ipp8u* pSrc, int srcStep, Ipp8u* pDst, int dstStep, static IppStatus CV_STDCALL ippiSwapChannels_8u_C3C4Rf(const Ipp8u* pSrc, int srcStep, Ipp8u* pDst, int dstStep,
IppiSize roiSize, const int *dstOrder) IppiSize roiSize, const int *dstOrder)
{ {
return ippiSwapChannels_8u_C3C4R(pSrc, srcStep, pDst, dstStep, roiSize, dstOrder, MAX_IPP8u); return ippiSwapChannels_8u_C3C4R(pSrc, srcStep, pDst, dstStep, roiSize, dstOrder, MAX_IPP8u);
} }
IppStatus __stdcall ippiSwapChannels_16u_C3C4Rf(const Ipp16u* pSrc, int srcStep, Ipp16u* pDst, int dstStep, static IppStatus CV_STDCALL ippiSwapChannels_16u_C3C4Rf(const Ipp16u* pSrc, int srcStep, Ipp16u* pDst, int dstStep,
IppiSize roiSize, const int *dstOrder) IppiSize roiSize, const int *dstOrder)
{ {
return ippiSwapChannels_16u_C3C4R(pSrc, srcStep, pDst, dstStep, roiSize, dstOrder, MAX_IPP16u); return ippiSwapChannels_16u_C3C4R(pSrc, srcStep, pDst, dstStep, roiSize, dstOrder, MAX_IPP16u);
} }
IppStatus __stdcall ippiSwapChannels_32f_C3C4Rf(const Ipp32f* pSrc, int srcStep, Ipp32f* pDst, int dstStep, static IppStatus CV_STDCALL ippiSwapChannels_32f_C3C4Rf(const Ipp32f* pSrc, int srcStep, Ipp32f* pDst, int dstStep,
IppiSize roiSize, const int *dstOrder) IppiSize roiSize, const int *dstOrder)
{ {
return ippiSwapChannels_32f_C3C4R(pSrc, srcStep, pDst, dstStep, roiSize, dstOrder, MAX_IPP32f); return ippiSwapChannels_32f_C3C4R(pSrc, srcStep, pDst, dstStep, roiSize, dstOrder, MAX_IPP32f);
......
This diff is collapsed.
...@@ -1213,11 +1213,10 @@ static bool IPPMorphReplicate(int op, const Mat &src, Mat &dst, const Mat &kerne ...@@ -1213,11 +1213,10 @@ static bool IPPMorphReplicate(int op, const Mat &src, Mat &dst, const Mat &kerne
} }
static bool IPPMorphOp(int op, InputArray _src, OutputArray _dst, static bool IPPMorphOp(int op, InputArray _src, OutputArray _dst,
InputArray _kernel, const Mat& _kernel, Point anchor, int iterations,
const Point &anchor, int iterations,
int borderType, const Scalar &borderValue) int borderType, const Scalar &borderValue)
{ {
Mat src = _src.getMat(), kernel = _kernel.getMat(); Mat src = _src.getMat(), kernel = _kernel;
if( !( src.depth() == CV_8U || src.depth() == CV_32F ) || ( iterations > 1 ) || if( !( src.depth() == CV_8U || src.depth() == CV_32F ) || ( iterations > 1 ) ||
!( borderType == cv::BORDER_REPLICATE || (borderType == cv::BORDER_CONSTANT && borderValue == morphologyDefaultBorderValue()) ) !( borderType == cv::BORDER_REPLICATE || (borderType == cv::BORDER_CONSTANT && borderValue == morphologyDefaultBorderValue()) )
|| !( op == MORPH_DILATE || op == MORPH_ERODE) ) || !( op == MORPH_DILATE || op == MORPH_ERODE) )
...@@ -1248,9 +1247,6 @@ static bool IPPMorphOp(int op, InputArray _src, OutputArray _dst, ...@@ -1248,9 +1247,6 @@ static bool IPPMorphOp(int op, InputArray _src, OutputArray _dst,
} }
Size ksize = kernel.data ? kernel.size() : Size(3,3); Size ksize = kernel.data ? kernel.size() : Size(3,3);
Point normanchor = normalizeAnchor(anchor, ksize);
CV_Assert( normanchor.inside(Rect(0, 0, ksize.width, ksize.height)) );
_dst.create( src.size(), src.type() ); _dst.create( src.size(), src.type() );
Mat dst = _dst.getMat(); Mat dst = _dst.getMat();
...@@ -1265,7 +1261,7 @@ static bool IPPMorphOp(int op, InputArray _src, OutputArray _dst, ...@@ -1265,7 +1261,7 @@ static bool IPPMorphOp(int op, InputArray _src, OutputArray _dst,
if( !kernel.data ) if( !kernel.data )
{ {
ksize = Size(1+iterations*2,1+iterations*2); ksize = Size(1+iterations*2,1+iterations*2);
normanchor = Point(iterations, iterations); anchor = Point(iterations, iterations);
rectKernel = true; rectKernel = true;
iterations = 1; iterations = 1;
} }
...@@ -1273,7 +1269,7 @@ static bool IPPMorphOp(int op, InputArray _src, OutputArray _dst, ...@@ -1273,7 +1269,7 @@ static bool IPPMorphOp(int op, InputArray _src, OutputArray _dst,
{ {
ksize = Size(ksize.width + (iterations-1)*(ksize.width-1), ksize = Size(ksize.width + (iterations-1)*(ksize.width-1),
ksize.height + (iterations-1)*(ksize.height-1)), ksize.height + (iterations-1)*(ksize.height-1)),
normanchor = Point(normanchor.x*iterations, normanchor.y*iterations); anchor = Point(anchor.x*iterations, anchor.y*iterations);
kernel = Mat(); kernel = Mat();
rectKernel = true; rectKernel = true;
iterations = 1; iterations = 1;
...@@ -1283,7 +1279,7 @@ static bool IPPMorphOp(int op, InputArray _src, OutputArray _dst, ...@@ -1283,7 +1279,7 @@ static bool IPPMorphOp(int op, InputArray _src, OutputArray _dst,
if( iterations > 1 ) if( iterations > 1 )
return false; return false;
return IPPMorphReplicate( op, src, dst, kernel, ksize, normanchor, rectKernel ); return IPPMorphReplicate( op, src, dst, kernel, ksize, anchor, rectKernel );
} }
#endif #endif
...@@ -1292,17 +1288,18 @@ static void morphOp( int op, InputArray _src, OutputArray _dst, ...@@ -1292,17 +1288,18 @@ static void morphOp( int op, InputArray _src, OutputArray _dst,
Point anchor, int iterations, Point anchor, int iterations,
int borderType, const Scalar& borderValue ) int borderType, const Scalar& borderValue )
{ {
Mat kernel = _kernel.getMat();
Size ksize = kernel.data ? kernel.size() : Size(3,3);
anchor = normalizeAnchor(anchor, ksize);
CV_Assert( anchor.inside(Rect(0, 0, ksize.width, ksize.height)) );
#if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7) #if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7)
if( IPPMorphOp(op, _src, _dst, _kernel, anchor, iterations, borderType, borderValue) ) if( IPPMorphOp(op, _src, _dst, kernel, anchor, iterations, borderType, borderValue) )
return; return;
#endif #endif
Mat src = _src.getMat(), kernel = _kernel.getMat(); Mat src = _src.getMat();
Size ksize = kernel.data ? kernel.size() : Size(3,3);
anchor = normalizeAnchor(anchor, ksize);
CV_Assert( anchor.inside(Rect(0, 0, ksize.width, ksize.height)) );
_dst.create( src.size(), src.type() ); _dst.create( src.size(), src.type() );
Mat dst = _dst.getMat(); Mat dst = _dst.getMat();
......
This diff is collapsed.
...@@ -251,7 +251,7 @@ namespace cvtest ...@@ -251,7 +251,7 @@ namespace cvtest
int CV_BilateralFilterTest::validate_test_results(int test_case_index) int CV_BilateralFilterTest::validate_test_results(int test_case_index)
{ {
static const double eps = 1; static const double eps = 4;
Mat reference_dst, reference_src; Mat reference_dst, reference_src;
if (_src.depth() == CV_32F) if (_src.depth() == CV_32F)
......
...@@ -1424,7 +1424,7 @@ TEST(Imgproc_fitLine_vector_2d, regression) ...@@ -1424,7 +1424,7 @@ TEST(Imgproc_fitLine_vector_2d, regression)
TEST(Imgproc_fitLine_Mat_2dC2, regression) TEST(Imgproc_fitLine_Mat_2dC2, regression)
{ {
cv::Mat mat1(3, 1, CV_32SC2); cv::Mat mat1 = Mat::zeros(3, 1, CV_32SC2);
std::vector<float> line1; std::vector<float> line1;
cv::fitLine(mat1, line1, CV_DIST_L2, 0 ,0 ,0); cv::fitLine(mat1, line1, CV_DIST_L2, 0 ,0 ,0);
...@@ -1444,7 +1444,7 @@ TEST(Imgproc_fitLine_Mat_2dC1, regression) ...@@ -1444,7 +1444,7 @@ TEST(Imgproc_fitLine_Mat_2dC1, regression)
TEST(Imgproc_fitLine_Mat_3dC3, regression) TEST(Imgproc_fitLine_Mat_3dC3, regression)
{ {
cv::Mat mat1(2, 1, CV_32SC3); cv::Mat mat1 = Mat::zeros(2, 1, CV_32SC3);
std::vector<float> line1; std::vector<float> line1;
cv::fitLine(mat1, line1, CV_DIST_L2, 0 ,0 ,0); cv::fitLine(mat1, line1, CV_DIST_L2, 0 ,0 ,0);
...@@ -1454,7 +1454,7 @@ TEST(Imgproc_fitLine_Mat_3dC3, regression) ...@@ -1454,7 +1454,7 @@ TEST(Imgproc_fitLine_Mat_3dC3, regression)
TEST(Imgproc_fitLine_Mat_3dC1, regression) TEST(Imgproc_fitLine_Mat_3dC1, regression)
{ {
cv::Mat mat2(2, 3, CV_32SC1); cv::Mat mat2 = Mat::zeros(2, 3, CV_32SC1);
std::vector<float> line2; std::vector<float> line2;
cv::fitLine(mat2, line2, CV_DIST_L2, 0 ,0 ,0); cv::fitLine(mat2, line2, CV_DIST_L2, 0 ,0 ,0);
......
...@@ -678,8 +678,8 @@ void CV_Remap_Test::generate_test_data() ...@@ -678,8 +678,8 @@ void CV_Remap_Test::generate_test_data()
MatIterator_<Vec2s> begin_x = mapx.begin<Vec2s>(), end_x = mapx.end<Vec2s>(); MatIterator_<Vec2s> begin_x = mapx.begin<Vec2s>(), end_x = mapx.end<Vec2s>();
for ( ; begin_x != end_x; ++begin_x) for ( ; begin_x != end_x; ++begin_x)
{ {
begin_x[0] = static_cast<short>(rng.uniform(static_cast<int>(_n), std::max(src.cols + n - 1, 0))); (*begin_x)[0] = static_cast<short>(rng.uniform(static_cast<int>(_n), std::max(src.cols + n - 1, 0)));
begin_x[1] = static_cast<short>(rng.uniform(static_cast<int>(_n), std::max(src.rows + n - 1, 0))); (*begin_x)[1] = static_cast<short>(rng.uniform(static_cast<int>(_n), std::max(src.rows + n - 1, 0)));
} }
if (interpolation != INTER_NEAREST) if (interpolation != INTER_NEAREST)
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
package="org.opencv.test" package="org.opencv.test"
android:versionCode="1" android:versionCode="1"
android:versionName="1.0"> android:versionName="1.0">
<uses-sdk android:minSdkVersion="8" /> <uses-sdk android:minSdkVersion="8" />
<!-- We add an application tag here just so that we can indicate that <!-- We add an application tag here just so that we can indicate that
...@@ -20,7 +20,7 @@ ...@@ -20,7 +20,7 @@
<instrumentation android:name="org.opencv.test.OpenCVTestRunner" <instrumentation android:name="org.opencv.test.OpenCVTestRunner"
android:targetPackage="org.opencv.test" android:targetPackage="org.opencv.test"
android:label="Tests for org.opencv"/> android:label="Tests for org.opencv"/>
<uses-permission android:name="android.permission.CAMERA"/> <uses-permission android:name="android.permission.CAMERA"/>
<uses-feature android:name="android.hardware.camera" /> <uses-feature android:name="android.hardware.camera" />
<uses-feature android:name="android.hardware.camera.autofocus" /> <uses-feature android:name="android.hardware.camera.autofocus" />
......
...@@ -4,9 +4,9 @@ ...@@ -4,9 +4,9 @@
android:layout_width="fill_parent" android:layout_width="fill_parent"
android:layout_height="fill_parent" android:layout_height="fill_parent"
> >
<TextView <TextView
android:layout_width="fill_parent" android:layout_width="fill_parent"
android:layout_height="wrap_content" android:layout_height="wrap_content"
android:text="@string/hello" android:text="@string/hello"
/> />
</LinearLayout> </LinearLayout>
...@@ -162,7 +162,7 @@ ocl::bilateralFilter ...@@ -162,7 +162,7 @@ ocl::bilateralFilter
-------------------- --------------------
Returns void Returns void
.. ocv:function:: void ocl::bilateralFilter(const oclMat &src, oclMat &dst, int d, double sigmaColor, double sigmaSpave, int borderType=BORDER_DEFAULT) .. ocv:function:: void ocl::bilateralFilter(const oclMat &src, oclMat &dst, int d, double sigmaColor, double sigmaSpace, int borderType=BORDER_DEFAULT)
:param src: The source image :param src: The source image
......
...@@ -519,7 +519,15 @@ namespace cv ...@@ -519,7 +519,15 @@ namespace cv
//! bilateralFilter //! bilateralFilter
// supports 8UC1 8UC4 // supports 8UC1 8UC4
CV_EXPORTS void bilateralFilter(const oclMat& src, oclMat& dst, int d, double sigmaColor, double sigmaSpave, int borderType=BORDER_DEFAULT); CV_EXPORTS void bilateralFilter(const oclMat& src, oclMat& dst, int d, double sigmaColor, double sigmaSpace, int borderType=BORDER_DEFAULT);
//! Applies an adaptive bilateral filter to the input image
// This is not truly a bilateral filter. Instead of using user provided fixed parameters,
// the function calculates a constant at each window based on local standard deviation,
// and use this constant to do filtering.
// supports 8UC1 8UC3
CV_EXPORTS void adaptiveBilateralFilter(const oclMat& src, oclMat& dst, Size ksize, double sigmaSpace, Point anchor = Point(-1, -1), int borderType=BORDER_DEFAULT);
//! computes exponent of each matrix element (b = e**a) //! computes exponent of each matrix element (b = e**a)
// supports only CV_32FC1 type // supports only CV_32FC1 type
CV_EXPORTS void exp(const oclMat &a, oclMat &b); CV_EXPORTS void exp(const oclMat &a, oclMat &b);
...@@ -1797,6 +1805,155 @@ namespace cv ...@@ -1797,6 +1805,155 @@ namespace cv
// keys = {1, 2, 3} (CV_8UC1) // keys = {1, 2, 3} (CV_8UC1)
// values = {6,2, 10,5, 4,3} (CV_8UC2) // values = {6,2, 10,5, 4,3} (CV_8UC2)
void CV_EXPORTS sortByKey(oclMat& keys, oclMat& values, int method, bool isGreaterThan = false); void CV_EXPORTS sortByKey(oclMat& keys, oclMat& values, int method, bool isGreaterThan = false);
/*!Base class for MOG and MOG2!*/
class CV_EXPORTS BackgroundSubtractor
{
public:
//! the virtual destructor
virtual ~BackgroundSubtractor();
//! the update operator that takes the next video frame and returns the current foreground mask as 8-bit binary image.
virtual void operator()(const oclMat& image, oclMat& fgmask, float learningRate);
//! computes a background image
virtual void getBackgroundImage(oclMat& backgroundImage) const = 0;
};
/*!
Gaussian Mixture-based Backbround/Foreground Segmentation Algorithm
The class implements the following algorithm:
"An improved adaptive background mixture model for real-time tracking with shadow detection"
P. KadewTraKuPong and R. Bowden,
Proc. 2nd European Workshp on Advanced Video-Based Surveillance Systems, 2001."
http://personal.ee.surrey.ac.uk/Personal/R.Bowden/publications/avbs01/avbs01.pdf
*/
class CV_EXPORTS MOG: public cv::ocl::BackgroundSubtractor
{
public:
//! the default constructor
MOG(int nmixtures = -1);
//! re-initiaization method
void initialize(Size frameSize, int frameType);
//! the update operator
void operator()(const oclMat& frame, oclMat& fgmask, float learningRate = 0.f);
//! computes a background image which are the mean of all background gaussians
void getBackgroundImage(oclMat& backgroundImage) const;
//! releases all inner buffers
void release();
int history;
float varThreshold;
float backgroundRatio;
float noiseSigma;
private:
int nmixtures_;
Size frameSize_;
int frameType_;
int nframes_;
oclMat weight_;
oclMat sortKey_;
oclMat mean_;
oclMat var_;
};
/*!
The class implements the following algorithm:
"Improved adaptive Gausian mixture model for background subtraction"
Z.Zivkovic
International Conference Pattern Recognition, UK, August, 2004.
http://www.zoranz.net/Publications/zivkovic2004ICPR.pdf
*/
class CV_EXPORTS MOG2: public cv::ocl::BackgroundSubtractor
{
public:
//! the default constructor
MOG2(int nmixtures = -1);
//! re-initiaization method
void initialize(Size frameSize, int frameType);
//! the update operator
void operator()(const oclMat& frame, oclMat& fgmask, float learningRate = -1.0f);
//! computes a background image which are the mean of all background gaussians
void getBackgroundImage(oclMat& backgroundImage) const;
//! releases all inner buffers
void release();
// parameters
// you should call initialize after parameters changes
int history;
//! here it is the maximum allowed number of mixture components.
//! Actual number is determined dynamically per pixel
float varThreshold;
// threshold on the squared Mahalanobis distance to decide if it is well described
// by the background model or not. Related to Cthr from the paper.
// This does not influence the update of the background. A typical value could be 4 sigma
// and that is varThreshold=4*4=16; Corresponds to Tb in the paper.
/////////////////////////
// less important parameters - things you might change but be carefull
////////////////////////
float backgroundRatio;
// corresponds to fTB=1-cf from the paper
// TB - threshold when the component becomes significant enough to be included into
// the background model. It is the TB=1-cf from the paper. So I use cf=0.1 => TB=0.
// For alpha=0.001 it means that the mode should exist for approximately 105 frames before
// it is considered foreground
// float noiseSigma;
float varThresholdGen;
//correspondts to Tg - threshold on the squared Mahalan. dist. to decide
//when a sample is close to the existing components. If it is not close
//to any a new component will be generated. I use 3 sigma => Tg=3*3=9.
//Smaller Tg leads to more generated components and higher Tg might make
//lead to small number of components but they can grow too large
float fVarInit;
float fVarMin;
float fVarMax;
//initial variance for the newly generated components.
//It will will influence the speed of adaptation. A good guess should be made.
//A simple way is to estimate the typical standard deviation from the images.
//I used here 10 as a reasonable value
// min and max can be used to further control the variance
float fCT; //CT - complexity reduction prior
//this is related to the number of samples needed to accept that a component
//actually exists. We use CT=0.05 of all the samples. By setting CT=0 you get
//the standard Stauffer&Grimson algorithm (maybe not exact but very similar)
//shadow detection parameters
bool bShadowDetection; //default 1 - do shadow detection
unsigned char nShadowDetection; //do shadow detection - insert this value as the detection result - 127 default value
float fTau;
// Tau - shadow threshold. The shadow is detected if the pixel is darker
//version of the background. Tau is a threshold on how much darker the shadow can be.
//Tau= 0.5 means that if pixel is more than 2 times darker then it is not shadow
//See: Prati,Mikic,Trivedi,Cucchiarra,"Detecting Moving Shadows...",IEEE PAMI,2003.
private:
int nmixtures_;
Size frameSize_;
int frameType_;
int nframes_;
oclMat weight_;
oclMat variance_;
oclMat mean_;
oclMat bgmodelUsedModes_; //keep track of number of modes per pixel
};
} }
} }
#if defined _MSC_VER && _MSC_VER >= 1200 #if defined _MSC_VER && _MSC_VER >= 1200
......
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2010-2012, Multicoreware, Inc., all rights reserved.
// Copyright (C) 2010-2012, Advanced Micro Devices, Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// @Authors
// Fangfang Bai, fangfang@multicorewareinc.com
// Jin Ma, jin@multicorewareinc.com
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other oclMaterials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors as is and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "perf_precomp.hpp"
using namespace perf;
using namespace std;
using namespace cv::ocl;
using namespace cv;
using std::tr1::tuple;
using std::tr1::get;
#if defined(HAVE_XINE) || \
defined(HAVE_GSTREAMER) || \
defined(HAVE_QUICKTIME) || \
defined(HAVE_AVFOUNDATION) || \
defined(HAVE_FFMPEG) || \
defined(WIN32)
# define BUILD_WITH_VIDEO_INPUT_SUPPORT 1
#else
# define BUILD_WITH_VIDEO_INPUT_SUPPORT 0
#endif
#if BUILD_WITH_VIDEO_INPUT_SUPPORT
static void cvtFrameFmt(vector<Mat>& input, vector<Mat>& output)
{
for(int i = 0; i< (int)(input.size()); i++)
{
cvtColor(input[i], output[i], COLOR_RGB2GRAY);
}
}
//prepare data for CPU
static void prepareData(VideoCapture& cap, int cn, vector<Mat>& frame_buffer)
{
cv::Mat frame;
std::vector<Mat> frame_buffer_init;
int nFrame = (int)frame_buffer.size();
for(int i = 0; i < nFrame; i++)
{
cap >> frame;
ASSERT_FALSE(frame.empty());
frame_buffer_init.push_back(frame);
}
if(cn == 1)
cvtFrameFmt(frame_buffer_init, frame_buffer);
else
frame_buffer = frame_buffer_init;
}
//copy CPU data to GPU
static void prepareData(vector<Mat>& frame_buffer, vector<oclMat>& frame_buffer_ocl)
{
for(int i = 0; i < (int)frame_buffer.size(); i++)
frame_buffer_ocl.push_back(cv::ocl::oclMat(frame_buffer[i]));
}
#endif
///////////// MOG ////////////////////////
#if BUILD_WITH_VIDEO_INPUT_SUPPORT
typedef tuple<string, int, double> VideoMOGParamType;
typedef TestBaseWithParam<VideoMOGParamType> VideoMOGFixture;
PERF_TEST_P(VideoMOGFixture, MOG,
::testing::Combine(::testing::Values("gpu/video/768x576.avi", "gpu/video/1920x1080.avi"),
::testing::Values(1, 3),
::testing::Values(0.0, 0.01)))
{
VideoMOGParamType params = GetParam();
const string inputFile = perf::TestBase::getDataPath(get<0>(params));
const int cn = get<1>(params);
const float learningRate = static_cast<float>(get<2>(params));
const int nFrame = 5;
Mat foreground_cpu;
std::vector<Mat> frame_buffer(nFrame);
std::vector<oclMat> frame_buffer_ocl;
cv::VideoCapture cap(inputFile);
ASSERT_TRUE(cap.isOpened());
prepareData(cap, cn, frame_buffer);
cv::Mat foreground;
cv::ocl::oclMat foreground_d;
if(RUN_PLAIN_IMPL)
{
TEST_CYCLE()
{
cv::Ptr<cv::BackgroundSubtractorMOG> mog = createBackgroundSubtractorMOG();
foreground.release();
for (int i = 0; i < nFrame; i++)
{
mog->apply(frame_buffer[i], foreground, learningRate);
}
}
SANITY_CHECK(foreground);
}else if(RUN_OCL_IMPL)
{
prepareData(frame_buffer, frame_buffer_ocl);
CV_Assert((int)(frame_buffer_ocl.size()) == nFrame);
OCL_TEST_CYCLE()
{
cv::ocl::MOG d_mog;
foreground_d.release();
for (int i = 0; i < nFrame; ++i)
{
d_mog(frame_buffer_ocl[i], foreground_d, learningRate);
}
}
foreground_d.download(foreground);
SANITY_CHECK(foreground);
}else
OCL_PERF_ELSE
}
#endif
///////////// MOG2 ////////////////////////
#if BUILD_WITH_VIDEO_INPUT_SUPPORT
typedef tuple<string, int> VideoMOG2ParamType;
typedef TestBaseWithParam<VideoMOG2ParamType> VideoMOG2Fixture;
PERF_TEST_P(VideoMOG2Fixture, MOG2,
::testing::Combine(::testing::Values("gpu/video/768x576.avi", "gpu/video/1920x1080.avi"),
::testing::Values(1, 3)))
{
VideoMOG2ParamType params = GetParam();
const string inputFile = perf::TestBase::getDataPath(get<0>(params));
const int cn = get<1>(params);
int nFrame = 5;
std::vector<cv::Mat> frame_buffer(nFrame);
std::vector<cv::ocl::oclMat> frame_buffer_ocl;
cv::VideoCapture cap(inputFile);
ASSERT_TRUE(cap.isOpened());
prepareData(cap, cn, frame_buffer);
cv::Mat foreground;
cv::ocl::oclMat foreground_d;
if(RUN_PLAIN_IMPL)
{
TEST_CYCLE()
{
cv::Ptr<cv::BackgroundSubtractorMOG2> mog2 = createBackgroundSubtractorMOG2();
mog2->set("detectShadows", false);
foreground.release();
for (int i = 0; i < nFrame; i++)
{
mog2->apply(frame_buffer[i], foreground);
}
}
SANITY_CHECK(foreground);
}else if(RUN_OCL_IMPL)
{
prepareData(frame_buffer, frame_buffer_ocl);
CV_Assert((int)(frame_buffer_ocl.size()) == nFrame);
OCL_TEST_CYCLE()
{
cv::ocl::MOG2 d_mog2;
foreground_d.release();
for (int i = 0; i < nFrame; i++)
{
d_mog2(frame_buffer_ocl[i], foreground_d);
}
}
foreground_d.download(foreground);
SANITY_CHECK(foreground);
}else
OCL_PERF_ELSE
}
#endif
///////////// MOG2_GetBackgroundImage //////////////////
#if BUILD_WITH_VIDEO_INPUT_SUPPORT
typedef TestBaseWithParam<VideoMOG2ParamType> Video_MOG2GetBackgroundImage;
PERF_TEST_P(Video_MOG2GetBackgroundImage, MOG2,
::testing::Combine(::testing::Values("gpu/video/768x576.avi", "gpu/video/1920x1080.avi"),
::testing::Values(3)))
{
VideoMOG2ParamType params = GetParam();
const string inputFile = perf::TestBase::getDataPath(get<0>(params));
const int cn = get<1>(params);
int nFrame = 5;
std::vector<cv::Mat> frame_buffer(nFrame);
std::vector<cv::ocl::oclMat> frame_buffer_ocl;
cv::VideoCapture cap(inputFile);
ASSERT_TRUE(cap.isOpened());
prepareData(cap, cn, frame_buffer);
cv::Mat foreground;
cv::Mat background;
cv::ocl::oclMat foreground_d;
cv::ocl::oclMat background_d;
if(RUN_PLAIN_IMPL)
{
TEST_CYCLE()
{
cv::Ptr<cv::BackgroundSubtractorMOG2> mog2 = createBackgroundSubtractorMOG2();
mog2->set("detectShadows", false);
foreground.release();
background.release();
for (int i = 0; i < nFrame; i++)
{
mog2->apply(frame_buffer[i], foreground);
}
mog2->getBackgroundImage(background);
}
SANITY_CHECK(background);
}else if(RUN_OCL_IMPL)
{
prepareData(frame_buffer, frame_buffer_ocl);
CV_Assert((int)(frame_buffer_ocl.size()) == nFrame);
OCL_TEST_CYCLE()
{
cv::ocl::MOG2 d_mog2;
foreground_d.release();
background_d.release();
for (int i = 0; i < nFrame; i++)
{
d_mog2(frame_buffer_ocl[i], foreground_d);
}
d_mog2.getBackgroundImage(background_d);
}
background_d.download(background);
SANITY_CHECK(background);
}else
OCL_PERF_ELSE
}
#endif
...@@ -43,6 +43,7 @@ ...@@ -43,6 +43,7 @@
// the use of this software, even if advised of the possibility of such damage. // the use of this software, even if advised of the possibility of such damage.
// //
//M*/ //M*/
#include "perf_precomp.hpp" #include "perf_precomp.hpp"
using namespace perf; using namespace perf;
...@@ -51,7 +52,9 @@ using namespace perf; ...@@ -51,7 +52,9 @@ using namespace perf;
typedef TestBaseWithParam<Size> dftFixture; typedef TestBaseWithParam<Size> dftFixture;
PERF_TEST_P(dftFixture, DISABLED_dft, OCL_TYPICAL_MAT_SIZES) // TODO not implemented #ifdef HAVE_CLAMDFFT
PERF_TEST_P(dftFixture, dft, OCL_TYPICAL_MAT_SIZES)
{ {
const Size srcSize = GetParam(); const Size srcSize = GetParam();
...@@ -70,7 +73,7 @@ PERF_TEST_P(dftFixture, DISABLED_dft, OCL_TYPICAL_MAT_SIZES) // TODO not impleme ...@@ -70,7 +73,7 @@ PERF_TEST_P(dftFixture, DISABLED_dft, OCL_TYPICAL_MAT_SIZES) // TODO not impleme
oclDst.download(dst); oclDst.download(dst);
SANITY_CHECK(dst); SANITY_CHECK(dst, 1.5);
} }
else if (RUN_PLAIN_IMPL) else if (RUN_PLAIN_IMPL)
{ {
...@@ -81,3 +84,5 @@ PERF_TEST_P(dftFixture, DISABLED_dft, OCL_TYPICAL_MAT_SIZES) // TODO not impleme ...@@ -81,3 +84,5 @@ PERF_TEST_P(dftFixture, DISABLED_dft, OCL_TYPICAL_MAT_SIZES) // TODO not impleme
else else
OCL_PERF_ELSE OCL_PERF_ELSE
} }
#endif
...@@ -321,3 +321,82 @@ PERF_TEST_P(filter2DFixture, filter2D, ...@@ -321,3 +321,82 @@ PERF_TEST_P(filter2DFixture, filter2D,
else else
OCL_PERF_ELSE OCL_PERF_ELSE
} }
///////////// Bilateral////////////////////////
typedef Size_MatType BilateralFixture;
PERF_TEST_P(BilateralFixture, Bilateral,
::testing::Combine(OCL_TYPICAL_MAT_SIZES,
OCL_PERF_ENUM(CV_8UC1, CV_8UC3)))
{
const Size_MatType_t params = GetParam();
const Size srcSize = get<0>(params);
const int type = get<1>(params), d = 7;
double sigmacolor = 50.0, sigmaspace = 50.0;
Mat src(srcSize, type), dst(srcSize, type);
declare.in(src, WARMUP_RNG).out(dst);
if (srcSize == OCL_SIZE_4000 && type == CV_8UC3)
declare.time(8);
if (RUN_OCL_IMPL)
{
ocl::oclMat oclSrc(src), oclDst(srcSize, type);
OCL_TEST_CYCLE() cv::ocl::bilateralFilter(oclSrc, oclDst, d, sigmacolor, sigmaspace);
oclDst.download(dst);
SANITY_CHECK(dst);
}
else if (RUN_PLAIN_IMPL)
{
TEST_CYCLE() cv::bilateralFilter(src, dst, d, sigmacolor, sigmaspace);
SANITY_CHECK(dst);
}
else
OCL_PERF_ELSE
}
///////////// adaptiveBilateral////////////////////////
typedef Size_MatType adaptiveBilateralFixture;
PERF_TEST_P(adaptiveBilateralFixture, adaptiveBilateral,
::testing::Combine(OCL_TYPICAL_MAT_SIZES,
OCL_PERF_ENUM(CV_8UC1, CV_8UC3)))
{
const Size_MatType_t params = GetParam();
const Size srcSize = get<0>(params);
const int type = get<1>(params);
double sigmaspace = 10.0;
Size ksize(9,9);
Mat src(srcSize, type), dst(srcSize, type);
declare.in(src, WARMUP_RNG).out(dst);
if (srcSize == OCL_SIZE_4000)
declare.time(15);
if (RUN_OCL_IMPL)
{
ocl::oclMat oclSrc(src), oclDst(srcSize, type);
OCL_TEST_CYCLE() cv::ocl::adaptiveBilateralFilter(oclSrc, oclDst, ksize, sigmaspace);
oclDst.download(dst);
SANITY_CHECK(dst, 1.);
}
else if (RUN_PLAIN_IMPL)
{
TEST_CYCLE() cv::adaptiveBilateralFilter(src, dst, ksize, sigmaspace);
SANITY_CHECK(dst);
}
else
OCL_PERF_ELSE
}
...@@ -51,8 +51,9 @@ using namespace perf; ...@@ -51,8 +51,9 @@ using namespace perf;
typedef TestBaseWithParam<Size> gemmFixture; typedef TestBaseWithParam<Size> gemmFixture;
PERF_TEST_P(gemmFixture, DISABLED_gemm, #ifdef HAVE_CLAMDBLAS
::testing::Values(OCL_SIZE_1000, OCL_SIZE_2000)) // TODO not implemented
PERF_TEST_P(gemmFixture, gemm, ::testing::Values(OCL_SIZE_1000, OCL_SIZE_2000))
{ {
const Size srcSize = GetParam(); const Size srcSize = GetParam();
...@@ -72,14 +73,16 @@ PERF_TEST_P(gemmFixture, DISABLED_gemm, ...@@ -72,14 +73,16 @@ PERF_TEST_P(gemmFixture, DISABLED_gemm,
oclDst.download(dst); oclDst.download(dst);
SANITY_CHECK(dst); SANITY_CHECK(dst, 0.01);
} }
else if (RUN_PLAIN_IMPL) else if (RUN_PLAIN_IMPL)
{ {
TEST_CYCLE() cv::gemm(src1, src2, 1.0, src3, 1.0, dst); TEST_CYCLE() cv::gemm(src1, src2, 1.0, src3, 1.0, dst);
SANITY_CHECK(dst); SANITY_CHECK(dst, 0.01);
} }
else else
OCL_PERF_ELSE OCL_PERF_ELSE
} }
#endif
...@@ -67,6 +67,7 @@ ...@@ -67,6 +67,7 @@
#include <vector> #include <vector>
#include <numeric> #include <numeric>
#include "cvconfig.h"
#include "opencv2/core.hpp" #include "opencv2/core.hpp"
#include "opencv2/core/utility.hpp" #include "opencv2/core/utility.hpp"
#include "opencv2/imgproc.hpp" #include "opencv2/imgproc.hpp"
...@@ -102,7 +103,7 @@ using namespace cv; ...@@ -102,7 +103,7 @@ using namespace cv;
#ifdef HAVE_OPENCV_GPU #ifdef HAVE_OPENCV_GPU
#define OCL_PERF_ELSE \ #define OCL_PERF_ELSE \
if (RUN_GPU_IMPL) \ if (RUN_GPU_IMPL) \
CV_TEST_FAIL_NO_IMPL(); \ CV_TEST_FAIL_NO_IMPL(); \
else \ else \
CV_TEST_FAIL_NO_IMPL(); CV_TEST_FAIL_NO_IMPL();
......
This diff is collapsed.
...@@ -63,6 +63,7 @@ extern const char *filter_sep_row; ...@@ -63,6 +63,7 @@ extern const char *filter_sep_row;
extern const char *filter_sep_col; extern const char *filter_sep_col;
extern const char *filtering_laplacian; extern const char *filtering_laplacian;
extern const char *filtering_morph; extern const char *filtering_morph;
extern const char *filtering_adaptive_bilateral;
} }
} }
...@@ -1616,3 +1617,100 @@ void cv::ocl::GaussianBlur(const oclMat &src, oclMat &dst, Size ksize, double si ...@@ -1616,3 +1617,100 @@ void cv::ocl::GaussianBlur(const oclMat &src, oclMat &dst, Size ksize, double si
Ptr<FilterEngine_GPU> f = createGaussianFilter_GPU(src.type(), ksize, sigma1, sigma2, bordertype); Ptr<FilterEngine_GPU> f = createGaussianFilter_GPU(src.type(), ksize, sigma1, sigma2, bordertype);
f->apply(src, dst); f->apply(src, dst);
} }
////////////////////////////////////////////////////////////////////////////////////////////////////
// Adaptive Bilateral Filter
void cv::ocl::adaptiveBilateralFilter(const oclMat& src, oclMat& dst, Size ksize, double sigmaSpace, Point anchor, int borderType)
{
CV_Assert((ksize.width & 1) && (ksize.height & 1)); // ksize must be odd
CV_Assert(src.type() == CV_8UC1 || src.type() == CV_8UC3); // source must be 8bit RGB image
if( sigmaSpace <= 0 )
sigmaSpace = 1;
Mat lut(Size(ksize.width, ksize.height), CV_32FC1);
double sigma2 = sigmaSpace * sigmaSpace;
int idx = 0;
int w = ksize.width / 2;
int h = ksize.height / 2;
for(int y=-h; y<=h; y++)
for(int x=-w; x<=w; x++)
{
lut.at<float>(idx++) = sigma2 / (sigma2 + x * x + y * y);
}
oclMat dlut(lut);
int depth = src.depth();
int cn = src.oclchannels();
normalizeAnchor(anchor, ksize);
const static String kernelName = "edgeEnhancingFilter";
dst.create(src.size(), src.type());
char btype[30];
switch(borderType)
{
case BORDER_CONSTANT:
sprintf(btype, "BORDER_CONSTANT");
break;
case BORDER_REPLICATE:
sprintf(btype, "BORDER_REPLICATE");
break;
case BORDER_REFLECT:
sprintf(btype, "BORDER_REFLECT");
break;
case BORDER_WRAP:
sprintf(btype, "BORDER_WRAP");
break;
case BORDER_REFLECT101:
sprintf(btype, "BORDER_REFLECT_101");
break;
default:
CV_Error(CV_StsBadArg, "This border type is not supported");
break;
}
//the following constants may be adjusted for performance concerns
const static size_t blockSizeX = 64, blockSizeY = 1, EXTRA = ksize.height - 1;
//Normalize the result by default
const float alpha = ksize.height * ksize.width;
const size_t gSize = blockSizeX - ksize.width / 2 * 2;
const size_t globalSizeX = (src.cols) % gSize == 0 ?
src.cols / gSize * blockSizeX :
(src.cols / gSize + 1) * blockSizeX;
const size_t rows_per_thread = 1 + EXTRA;
const size_t globalSizeY = ((src.rows + rows_per_thread - 1) / rows_per_thread) % blockSizeY == 0 ?
((src.rows + rows_per_thread - 1) / rows_per_thread) :
(((src.rows + rows_per_thread - 1) / rows_per_thread) / blockSizeY + 1) * blockSizeY;
size_t globalThreads[3] = { globalSizeX, globalSizeY, 1};
size_t localThreads[3] = { blockSizeX, blockSizeY, 1};
char build_options[250];
//LDATATYPESIZE is sizeof local data store. This is to exemplify effect of LDS on kernel performance
sprintf(build_options,
"-D VAR_PER_CHANNEL=1 -D CALCVAR=1 -D FIXED_WEIGHT=0 -D EXTRA=%d"
" -D THREADS=%d -D anX=%d -D anY=%d -D ksX=%d -D ksY=%d -D %s",
static_cast<int>(EXTRA), static_cast<int>(blockSizeX), anchor.x, anchor.y, ksize.width, ksize.height, btype);
std::vector<std::pair<size_t , const void *> > args;
args.push_back(std::make_pair(sizeof(cl_mem), &src.data));
args.push_back(std::make_pair(sizeof(cl_mem), &dst.data));
args.push_back(std::make_pair(sizeof(cl_float), (void *)&alpha));
args.push_back(std::make_pair(sizeof(cl_int), (void *)&src.offset));
args.push_back(std::make_pair(sizeof(cl_int), (void *)&src.wholerows));
args.push_back(std::make_pair(sizeof(cl_int), (void *)&src.wholecols));
args.push_back(std::make_pair(sizeof(cl_int), (void *)&src.step));
args.push_back(std::make_pair(sizeof(cl_int), (void *)&dst.offset));
args.push_back(std::make_pair(sizeof(cl_int), (void *)&dst.rows));
args.push_back(std::make_pair(sizeof(cl_int), (void *)&dst.cols));
args.push_back(std::make_pair(sizeof(cl_int), (void *)&dst.step));
args.push_back(std::make_pair(sizeof(cl_mem), &dlut.data));
int lut_step = dlut.step1();
args.push_back(std::make_pair(sizeof(cl_int), (void *)&lut_step));
openCLExecuteKernel(Context::getContext(), &filtering_adaptive_bilateral, kernelName,
globalThreads, localThreads, args, cn, depth, build_options);
}
...@@ -46,16 +46,62 @@ ...@@ -46,16 +46,62 @@
#include <iomanip> #include <iomanip>
#include "precomp.hpp" #include "precomp.hpp"
namespace cv { namespace ocl {
// used for clAmdBlas library to avoid redundant setup/teardown
void clBlasSetup();
void clBlasTeardown();
}} /* namespace cv { namespace ocl */
#if !defined HAVE_CLAMDBLAS #if !defined HAVE_CLAMDBLAS
void cv::ocl::gemm(const oclMat&, const oclMat&, double, void cv::ocl::gemm(const oclMat&, const oclMat&, double,
const oclMat&, double, oclMat&, int) const oclMat&, double, oclMat&, int)
{ {
CV_Error(Error::StsNotImplemented, "OpenCL BLAS is not implemented"); CV_Error(Error::StsNotImplemented, "OpenCL BLAS is not implemented");
} }
void cv::ocl::clBlasSetup()
{
CV_Error(CV_StsNotImplemented, "OpenCL BLAS is not implemented");
}
void cv::ocl::clBlasTeardown()
{
//intentionally do nothing
}
#else #else
#include "clAmdBlas.h" #include "clAmdBlas.h"
using namespace cv; using namespace cv;
static bool clBlasInitialized = false;
static Mutex cs;
void cv::ocl::clBlasSetup()
{
if(!clBlasInitialized)
{
AutoLock al(cs);
if(!clBlasInitialized)
{
openCLSafeCall(clAmdBlasSetup());
clBlasInitialized = true;
}
}
}
void cv::ocl::clBlasTeardown()
{
AutoLock al(cs);
if(clBlasInitialized)
{
clAmdBlasTeardown();
clBlasInitialized = false;
}
}
void cv::ocl::gemm(const oclMat &src1, const oclMat &src2, double alpha, void cv::ocl::gemm(const oclMat &src1, const oclMat &src2, double alpha,
const oclMat &src3, double beta, oclMat &dst, int flags) const oclMat &src3, double beta, oclMat &dst, int flags)
{ {
...@@ -71,7 +117,8 @@ void cv::ocl::gemm(const oclMat &src1, const oclMat &src2, double alpha, ...@@ -71,7 +117,8 @@ void cv::ocl::gemm(const oclMat &src1, const oclMat &src2, double alpha,
dst.create(src1.rows, src2.cols, src1.type()); dst.create(src1.rows, src2.cols, src1.type());
dst.setTo(Scalar::all(0)); dst.setTo(Scalar::all(0));
} }
openCLSafeCall( clAmdBlasSetup() );
clBlasSetup();
const clAmdBlasTranspose transA = (cv::GEMM_1_T & flags) ? clAmdBlasTrans : clAmdBlasNoTrans; const clAmdBlasTranspose transA = (cv::GEMM_1_T & flags) ? clAmdBlasTrans : clAmdBlasNoTrans;
const clAmdBlasTranspose transB = (cv::GEMM_2_T & flags) ? clAmdBlasTrans : clAmdBlasNoTrans; const clAmdBlasTranspose transB = (cv::GEMM_2_T & flags) ? clAmdBlasTrans : clAmdBlasNoTrans;
...@@ -156,6 +203,5 @@ void cv::ocl::gemm(const oclMat &src1, const oclMat &src2, double alpha, ...@@ -156,6 +203,5 @@ void cv::ocl::gemm(const oclMat &src1, const oclMat &src2, double alpha,
} }
break; break;
} }
clAmdBlasTeardown();
} }
#endif #endif
...@@ -65,6 +65,7 @@ namespace cv ...@@ -65,6 +65,7 @@ namespace cv
namespace ocl namespace ocl
{ {
extern void fft_teardown(); extern void fft_teardown();
extern void clBlasTeardown();
/* /*
* The binary caching system to eliminate redundant program source compilation. * The binary caching system to eliminate redundant program source compilation.
* Strictly, this is not a cache because we do not implement evictions right now. * Strictly, this is not a cache because we do not implement evictions right now.
...@@ -1058,6 +1059,7 @@ namespace cv ...@@ -1058,6 +1059,7 @@ namespace cv
void Info::release() void Info::release()
{ {
fft_teardown(); fft_teardown();
clBlasTeardown();
impl->release(); impl->release();
impl = new Impl; impl = new Impl;
DeviceName.clear(); DeviceName.clear();
...@@ -1067,6 +1069,7 @@ namespace cv ...@@ -1067,6 +1069,7 @@ namespace cv
Info::~Info() Info::~Info()
{ {
fft_teardown(); fft_teardown();
clBlasTeardown();
impl->release(); impl->release();
} }
......
This diff is collapsed.
This diff is collapsed.
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2010-2013, Multicoreware, Inc., all rights reserved.
// Copyright (C) 2010-2013, Advanced Micro Devices, Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// @Authors
// Jin Ma, jin@multicorewareinc.com
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other oclMaterials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "test_precomp.hpp"
#ifdef HAVE_OPENCL
using namespace cv;
using namespace cv::ocl;
using namespace cvtest;
using namespace testing;
using namespace std;
extern string workdir;
//////////////////////////////////////////////////////
// MOG
namespace
{
IMPLEMENT_PARAM_CLASS(UseGray, bool)
IMPLEMENT_PARAM_CLASS(LearningRate, double)
}
PARAM_TEST_CASE(mog, UseGray, LearningRate, bool)
{
bool useGray;
double learningRate;
bool useRoi;
virtual void SetUp()
{
useGray = GET_PARAM(0);
learningRate = GET_PARAM(1);
useRoi = GET_PARAM(2);
}
};
TEST_P(mog, Update)
{
std::string inputFile = string(cvtest::TS::ptr()->get_data_path()) + "gpu/video/768x576.avi";
cv::VideoCapture cap(inputFile);
ASSERT_TRUE(cap.isOpened());
cv::Mat frame;
cap >> frame;
ASSERT_FALSE(frame.empty());
cv::ocl::MOG mog;
cv::ocl::oclMat foreground = createMat_ocl(frame.size(), CV_8UC1, useRoi);
Ptr<cv::BackgroundSubtractorMOG> mog_gold = createBackgroundSubtractorMOG();
cv::Mat foreground_gold;
for (int i = 0; i < 10; ++i)
{
cap >> frame;
ASSERT_FALSE(frame.empty());
if (useGray)
{
cv::Mat temp;
cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY);
cv::swap(temp, frame);
}
mog(loadMat_ocl(frame, useRoi), foreground, (float)learningRate);
mog_gold->apply(frame, foreground_gold, learningRate);
EXPECT_MAT_NEAR(foreground_gold, foreground, 0.0);
}
}
INSTANTIATE_TEST_CASE_P(OCL_Video, mog, testing::Combine(
testing::Values(UseGray(false), UseGray(true)),
testing::Values(LearningRate(0.0), LearningRate(0.01)),
Values(true, false)));
//////////////////////////////////////////////////////
// MOG2
namespace
{
IMPLEMENT_PARAM_CLASS(DetectShadow, bool)
}
PARAM_TEST_CASE(mog2, UseGray, DetectShadow, bool)
{
bool useGray;
bool detectShadow;
bool useRoi;
virtual void SetUp()
{
useGray = GET_PARAM(0);
detectShadow = GET_PARAM(1);
useRoi = GET_PARAM(2);
}
};
TEST_P(mog2, Update)
{
std::string inputFile = string(cvtest::TS::ptr()->get_data_path()) + "gpu/video/768x576.avi";
cv::VideoCapture cap(inputFile);
ASSERT_TRUE(cap.isOpened());
cv::Mat frame;
cap >> frame;
ASSERT_FALSE(frame.empty());
cv::ocl::MOG2 mog2;
mog2.bShadowDetection = detectShadow;
cv::ocl::oclMat foreground = createMat_ocl(frame.size(), CV_8UC1, useRoi);
cv::Ptr<cv::BackgroundSubtractorMOG2> mog2_gold = createBackgroundSubtractorMOG2();
mog2_gold->set("detectShadows", detectShadow);
cv::Mat foreground_gold;
for (int i = 0; i < 10; ++i)
{
cap >> frame;
ASSERT_FALSE(frame.empty());
if (useGray)
{
cv::Mat temp;
cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY);
cv::swap(temp, frame);
}
mog2(loadMat_ocl(frame, useRoi), foreground);
mog2_gold->apply(frame, foreground_gold);
if (detectShadow)
EXPECT_MAT_SIMILAR(foreground_gold, foreground, 15e-3)
else
EXPECT_MAT_NEAR(foreground_gold, foreground, 0)
}
}
TEST_P(mog2, getBackgroundImage)
{
if (useGray)
return;
std::string inputFile = string(cvtest::TS::ptr()->get_data_path()) + "gpu/video/768x576.avi";
cv::VideoCapture cap(inputFile);
ASSERT_TRUE(cap.isOpened());
cv::Mat frame;
cv::ocl::MOG2 mog2;
mog2.bShadowDetection = detectShadow;
cv::ocl::oclMat foreground;
cv::Ptr<cv::BackgroundSubtractorMOG2> mog2_gold = createBackgroundSubtractorMOG2();
mog2_gold->set("detectShadows", detectShadow);
cv::Mat foreground_gold;
for (int i = 0; i < 10; ++i)
{
cap >> frame;
ASSERT_FALSE(frame.empty());
mog2(loadMat_ocl(frame, useRoi), foreground);
mog2_gold->apply(frame, foreground_gold);
}
cv::ocl::oclMat background = createMat_ocl(frame.size(), frame.type(), useRoi);
mog2.getBackgroundImage(background);
cv::Mat background_gold;
mog2_gold->getBackgroundImage(background_gold);
EXPECT_MAT_NEAR(background_gold, background, 1.0);
}
INSTANTIATE_TEST_CASE_P(OCL_Video, mog2, testing::Combine(
testing::Values(UseGray(true), UseGray(false)),
testing::Values(DetectShadow(true), DetectShadow(false)),
Values(true, false)));
#endif
...@@ -353,6 +353,69 @@ TEST_P(Filter2D, Mat) ...@@ -353,6 +353,69 @@ TEST_P(Filter2D, Mat)
Near(1); Near(1);
} }
} }
////////////////////////////////////////////////////////////////////////////////////////////////////
// Bilateral
struct Bilateral : FilterTestBase
{
int type;
cv::Size ksize;
int bordertype;
double sigmacolor, sigmaspace;
virtual void SetUp()
{
type = GET_PARAM(0);
ksize = GET_PARAM(1);
bordertype = GET_PARAM(3);
Init(type);
cv::RNG &rng = TS::ptr()->get_rng();
sigmacolor = rng.uniform(20, 100);
sigmaspace = rng.uniform(10, 40);
}
};
TEST_P(Bilateral, Mat)
{
for(int j = 0; j < LOOP_TIMES; j++)
{
random_roi();
cv::bilateralFilter(mat1_roi, dst_roi, ksize.width, sigmacolor, sigmaspace, bordertype);
cv::ocl::bilateralFilter(gmat1, gdst, ksize.width, sigmacolor, sigmaspace, bordertype);
Near(1);
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
// AdaptiveBilateral
struct AdaptiveBilateral : FilterTestBase
{
int type;
cv::Size ksize;
int bordertype;
Point anchor;
virtual void SetUp()
{
type = GET_PARAM(0);
ksize = GET_PARAM(1);
bordertype = GET_PARAM(3);
Init(type);
anchor = Point(-1,-1);
}
};
TEST_P(AdaptiveBilateral, Mat)
{
for(int j = 0; j < LOOP_TIMES; j++)
{
random_roi();
cv::adaptiveBilateralFilter(mat1_roi, dst_roi, ksize, 5, anchor, bordertype);
cv::ocl::adaptiveBilateralFilter(gmat1, gdst, ksize, 5, anchor, bordertype);
Near(1);
}
}
INSTANTIATE_TEST_CASE_P(Filter, Blur, Combine( INSTANTIATE_TEST_CASE_P(Filter, Blur, Combine(
Values(CV_8UC1, CV_8UC3, CV_8UC4, CV_32FC1, CV_32FC4), Values(CV_8UC1, CV_8UC3, CV_8UC4, CV_32FC1, CV_32FC4),
Values(cv::Size(3, 3), cv::Size(5, 5), cv::Size(7, 7)), Values(cv::Size(3, 3), cv::Size(5, 5), cv::Size(7, 7)),
...@@ -400,4 +463,17 @@ INSTANTIATE_TEST_CASE_P(Filter, Filter2D, testing::Combine( ...@@ -400,4 +463,17 @@ INSTANTIATE_TEST_CASE_P(Filter, Filter2D, testing::Combine(
Values(Size(0, 0)), //not use Values(Size(0, 0)), //not use
Values((MatType)cv::BORDER_CONSTANT, (MatType)cv::BORDER_REFLECT101, (MatType)cv::BORDER_REPLICATE, (MatType)cv::BORDER_REFLECT))); Values((MatType)cv::BORDER_CONSTANT, (MatType)cv::BORDER_REFLECT101, (MatType)cv::BORDER_REPLICATE, (MatType)cv::BORDER_REFLECT)));
INSTANTIATE_TEST_CASE_P(Filter, Bilateral, Combine(
Values(CV_8UC1, CV_8UC3),
Values(Size(5, 5), Size(9, 9)),
Values(Size(0, 0)), //not use
Values((MatType)cv::BORDER_CONSTANT, (MatType)cv::BORDER_REPLICATE,
(MatType)cv::BORDER_REFLECT, (MatType)cv::BORDER_WRAP, (MatType)cv::BORDER_REFLECT_101)));
INSTANTIATE_TEST_CASE_P(Filter, AdaptiveBilateral, Combine(
Values(CV_8UC1, CV_8UC3),
Values(Size(5, 5), Size(9, 9)),
Values(Size(0, 0)), //not use
Values((MatType)cv::BORDER_CONSTANT, (MatType)cv::BORDER_REPLICATE,
(MatType)cv::BORDER_REFLECT, (MatType)cv::BORDER_REFLECT_101)));
#endif // HAVE_OPENCL #endif // HAVE_OPENCL
...@@ -475,56 +475,6 @@ TEST_P(equalizeHist, Mat) ...@@ -475,56 +475,6 @@ TEST_P(equalizeHist, Mat)
} }
////////////////////////////////bilateralFilter////////////////////////////////////////////
struct bilateralFilter : ImgprocTestBase {};
TEST_P(bilateralFilter, Mat)
{
double sigmacolor = 50.0;
int radius = 9;
int d = 2 * radius + 1;
double sigmaspace = 20.0;
int bordertype[] = {cv::BORDER_CONSTANT, cv::BORDER_REPLICATE, cv::BORDER_REFLECT, cv::BORDER_WRAP, cv::BORDER_REFLECT_101};
//const char *borderstr[] = {"BORDER_CONSTANT", "BORDER_REPLICATE", "BORDER_REFLECT", "BORDER_WRAP", "BORDER_REFLECT_101"};
if (mat1.depth() != CV_8U || mat1.type() != dst.type())
{
cout << "Unsupported type" << endl;
EXPECT_DOUBLE_EQ(0.0, 0.0);
}
else
{
for(size_t i = 0; i < sizeof(bordertype) / sizeof(int); i++)
for(int j = 0; j < LOOP_TIMES; j++)
{
random_roi();
if(((bordertype[i] != cv::BORDER_CONSTANT) && (bordertype[i] != cv::BORDER_REPLICATE) && (mat1_roi.cols <= radius)) || (mat1_roi.cols <= radius) || (mat1_roi.rows <= radius) || (mat1_roi.rows <= radius))
{
continue;
}
//if((dstx>=radius) && (dsty >= radius) && (dstx+cldst_roi.cols+radius <=cldst_roi.wholecols) && (dsty+cldst_roi.rows+radius <= cldst_roi.wholerows))
//{
// dst_roi.adjustROI(radius, radius, radius, radius);
// cldst_roi.adjustROI(radius, radius, radius, radius);
//}
//else
//{
// continue;
//}
cv::bilateralFilter(mat1_roi, dst_roi, d, sigmacolor, sigmaspace, bordertype[i] | cv::BORDER_ISOLATED);
cv::ocl::bilateralFilter(clmat1_roi, cldst_roi, d, sigmacolor, sigmaspace, bordertype[i] | cv::BORDER_ISOLATED);
Near(1.);
}
}
}
////////////////////////////////copyMakeBorder//////////////////////////////////////////// ////////////////////////////////copyMakeBorder////////////////////////////////////////////
struct CopyMakeBorder : ImgprocTestBase {}; struct CopyMakeBorder : ImgprocTestBase {};
...@@ -1396,14 +1346,10 @@ TEST_P(calcHist, Mat) ...@@ -1396,14 +1346,10 @@ TEST_P(calcHist, Mat)
} }
/////////////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////////////
// CLAHE // CLAHE
namespace
{
IMPLEMENT_PARAM_CLASS(ClipLimit, double)
}
PARAM_TEST_CASE(CLAHE, cv::Size, ClipLimit) PARAM_TEST_CASE(CLAHE, cv::Size, double)
{ {
cv::Size size; cv::Size gridSize;
double clipLimit; double clipLimit;
cv::Mat src; cv::Mat src;
...@@ -1414,22 +1360,22 @@ PARAM_TEST_CASE(CLAHE, cv::Size, ClipLimit) ...@@ -1414,22 +1360,22 @@ PARAM_TEST_CASE(CLAHE, cv::Size, ClipLimit)
virtual void SetUp() virtual void SetUp()
{ {
size = GET_PARAM(0); gridSize = GET_PARAM(0);
clipLimit = GET_PARAM(1); clipLimit = GET_PARAM(1);
cv::RNG &rng = TS::ptr()->get_rng(); cv::RNG &rng = TS::ptr()->get_rng();
src = randomMat(rng, size, CV_8UC1, 0, 256, false); src = randomMat(rng, cv::Size(MWIDTH, MHEIGHT), CV_8UC1, 0, 256, false);
g_src.upload(src); g_src.upload(src);
} }
}; };
TEST_P(CLAHE, Accuracy) TEST_P(CLAHE, Accuracy)
{ {
cv::Ptr<cv::CLAHE> clahe = cv::ocl::createCLAHE(clipLimit); cv::Ptr<cv::CLAHE> clahe = cv::ocl::createCLAHE(clipLimit, gridSize);
clahe->apply(g_src, g_dst); clahe->apply(g_src, g_dst);
cv::Mat dst(g_dst); cv::Mat dst(g_dst);
cv::Ptr<cv::CLAHE> clahe_gold = cv::createCLAHE(clipLimit); cv::Ptr<cv::CLAHE> clahe_gold = cv::createCLAHE(clipLimit, gridSize);
clahe_gold->apply(src, dst_gold); clahe_gold->apply(src, dst_gold);
EXPECT_MAT_NEAR(dst_gold, dst, 1.0); EXPECT_MAT_NEAR(dst_gold, dst, 1.0);
...@@ -1622,21 +1568,6 @@ INSTANTIATE_TEST_CASE_P(ImgprocTestBase, equalizeHist, Combine( ...@@ -1622,21 +1568,6 @@ INSTANTIATE_TEST_CASE_P(ImgprocTestBase, equalizeHist, Combine(
NULL_TYPE, NULL_TYPE,
Values(false))); // Values(false) is the reserved parameter Values(false))); // Values(false) is the reserved parameter
//INSTANTIATE_TEST_CASE_P(ImgprocTestBase, bilateralFilter, Combine(
// ONE_TYPE(CV_8UC1),
// NULL_TYPE,
// ONE_TYPE(CV_8UC1),
// NULL_TYPE,
// NULL_TYPE,
// Values(false))); // Values(false) is the reserved parameter
INSTANTIATE_TEST_CASE_P(ImgprocTestBase, bilateralFilter, Combine(
Values(CV_8UC1, CV_8UC3),
NULL_TYPE,
Values(CV_8UC1, CV_8UC3),
NULL_TYPE,
NULL_TYPE,
Values(false))); // Values(false) is the reserved parameter
INSTANTIATE_TEST_CASE_P(ImgprocTestBase, CopyMakeBorder, Combine( INSTANTIATE_TEST_CASE_P(ImgprocTestBase, CopyMakeBorder, Combine(
Values(CV_8UC1, CV_8UC3, CV_8UC4, CV_32SC1, CV_32SC3, CV_32SC4, CV_32FC1, CV_32FC3, CV_32FC4), Values(CV_8UC1, CV_8UC3, CV_8UC4, CV_32SC1, CV_32SC3, CV_32SC4, CV_32FC1, CV_32FC3, CV_32FC4),
...@@ -1725,10 +1656,10 @@ INSTANTIATE_TEST_CASE_P(histTestBase, calcHist, Combine( ...@@ -1725,10 +1656,10 @@ INSTANTIATE_TEST_CASE_P(histTestBase, calcHist, Combine(
ONE_TYPE(CV_32SC1) //no use ONE_TYPE(CV_32SC1) //no use
)); ));
INSTANTIATE_TEST_CASE_P(ImgProc, CLAHE, Combine( INSTANTIATE_TEST_CASE_P(Imgproc, CLAHE, Combine(
Values(cv::Size(128, 128), cv::Size(113, 113), cv::Size(1300, 1300)), Values(cv::Size(4, 4), cv::Size(32, 8), cv::Size(8, 64)),
Values(0.0, 40.0))); Values(0.0, 10.0, 62.0, 300.0)));
INSTANTIATE_TEST_CASE_P(OCL_ImgProc, ColumnSum, DIFFERENT_SIZES); INSTANTIATE_TEST_CASE_P(Imgproc, ColumnSum, DIFFERENT_SIZES);
#endif // HAVE_OPENCL #endif // HAVE_OPENCL
...@@ -164,7 +164,7 @@ TEST_P(TVL1, DISABLED_Accuracy) // TODO implementations of TV1 in video module a ...@@ -164,7 +164,7 @@ TEST_P(TVL1, DISABLED_Accuracy) // TODO implementations of TV1 in video module a
EXPECT_MAT_SIMILAR(gold[0], d_flowx, 3e-3); EXPECT_MAT_SIMILAR(gold[0], d_flowx, 3e-3);
EXPECT_MAT_SIMILAR(gold[1], d_flowy, 3e-3); EXPECT_MAT_SIMILAR(gold[1], d_flowy, 3e-3);
} }
INSTANTIATE_TEST_CASE_P(OCL_Video, TVL1, Values(true, false)); INSTANTIATE_TEST_CASE_P(OCL_Video, TVL1, Values(false, true));
///////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////
......
...@@ -100,6 +100,44 @@ Mat randomMat(Size size, int type, double minVal, double maxVal) ...@@ -100,6 +100,44 @@ Mat randomMat(Size size, int type, double minVal, double maxVal)
return randomMat(TS::ptr()->get_rng(), size, type, minVal, maxVal, false); return randomMat(TS::ptr()->get_rng(), size, type, minVal, maxVal, false);
} }
cv::ocl::oclMat createMat_ocl(Size size, int type, bool useRoi)
{
Size size0 = size;
if (useRoi)
{
size0.width += randomInt(5, 15);
size0.height += randomInt(5, 15);
}
cv::ocl::oclMat d_m(size0, type);
if (size0 != size)
d_m = d_m(Rect((size0.width - size.width) / 2, (size0.height - size.height) / 2, size.width, size.height));
return d_m;
}
cv::ocl::oclMat loadMat_ocl(const Mat& m, bool useRoi)
{
CV_Assert(m.type() == CV_8UC1 || m.type() == CV_8UC3);
cv::ocl::oclMat d_m;
d_m = createMat_ocl(m.size(), m.type(), useRoi);
Size ls;
Point pt;
d_m.locateROI(ls, pt);
Rect roi(pt.x, pt.y, d_m.size().width, d_m.size().height);
cv::ocl::oclMat m_ocl(m);
cv::ocl::oclMat d_m_roi(d_m, roi);
m_ocl.copyTo(d_m);
return d_m;
}
/* /*
void showDiff(InputArray gold_, InputArray actual_, double eps) void showDiff(InputArray gold_, InputArray actual_, double eps)
{ {
......
...@@ -72,6 +72,9 @@ double checkNorm(const cv::Mat &m); ...@@ -72,6 +72,9 @@ double checkNorm(const cv::Mat &m);
double checkNorm(const cv::Mat &m1, const cv::Mat &m2); double checkNorm(const cv::Mat &m1, const cv::Mat &m2);
double checkSimilarity(const cv::Mat &m1, const cv::Mat &m2); double checkSimilarity(const cv::Mat &m1, const cv::Mat &m2);
//oclMat create
cv::ocl::oclMat createMat_ocl(cv::Size size, int type, bool useRoi = false);
cv::ocl::oclMat loadMat_ocl(const cv::Mat& m, bool useRoi = false);
#define EXPECT_MAT_NORM(mat, eps) \ #define EXPECT_MAT_NORM(mat, eps) \
{ \ { \
EXPECT_LE(checkNorm(cv::Mat(mat)), eps) \ EXPECT_LE(checkNorm(cv::Mat(mat)), eps) \
......
...@@ -54,6 +54,6 @@ endif() ...@@ -54,6 +54,6 @@ endif()
if (INSTALL_C_EXAMPLES AND NOT WIN32) if (INSTALL_C_EXAMPLES AND NOT WIN32)
file(GLOB install_list *.c *.cpp *.jpg *.png *.data makefile.* build_all.sh *.dsp *.cmd ) file(GLOB install_list *.c *.cpp *.jpg *.png *.data makefile.* build_all.sh *.dsp *.cmd )
install(FILES ${install_list} install(FILES ${install_list}
DESTINATION share/opencv/samples/${project} DESTINATION share/OpenCV/samples/${project}
PERMISSIONS OWNER_READ GROUP_READ WORLD_READ) PERMISSIONS OWNER_READ GROUP_READ WORLD_READ)
endif() endif()
// This sample shows the difference of adaptive bilateral filter and bilateral filter.
#include "opencv2/core.hpp"
#include "opencv2/core/utility.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/ocl.hpp"
using namespace cv;
using namespace std;
int main( int argc, const char** argv )
{
const char* keys =
"{ i input | | specify input image }"
"{ k ksize | 5 | specify kernel size }";
CommandLineParser cmd(argc, argv, keys);
string src_path = cmd.get<string>("i");
int ks = cmd.get<int>("k");
const char * winName[] = {"input", "adaptive bilateral CPU", "adaptive bilateral OpenCL", "bilateralFilter OpenCL"};
Mat src = imread(src_path);
Mat abFilterCPU;
if(src.empty()){
//cout << "error read image: " << src_path << endl;
return -1;
}
std::vector<ocl::Info> infos;
ocl::getDevice(infos);
ocl::oclMat dsrc(src), dABFilter, dBFilter;
Size ksize(ks, ks);
adaptiveBilateralFilter(src,abFilterCPU, ksize, 10);
ocl::adaptiveBilateralFilter(dsrc, dABFilter, ksize, 10);
ocl::bilateralFilter(dsrc, dBFilter, ks, 30, 9);
Mat abFilter = dABFilter;
Mat bFilter = dBFilter;
imshow(winName[0], src);
imshow(winName[1], abFilterCPU);
imshow(winName[2], abFilter);
imshow(winName[3], bFilter);
waitKey();
return 0;
}
#include <iostream>
#include <string>
#include "opencv2/core.hpp"
#include "opencv2/core/utility.hpp"
#include "opencv2/ocl.hpp"
#include "opencv2/highgui.hpp"
using namespace std;
using namespace cv;
using namespace cv::ocl;
#define M_MOG 1
#define M_MOG2 2
int main(int argc, const char** argv)
{
cv::CommandLineParser cmd(argc, argv,
"{ c camera | false | use camera }"
"{ f file | 768x576.avi | input video file }"
"{ m method | mog | method (mog, mog2) }"
"{ h help | false | print help message }");
if (cmd.get<bool>("help"))
{
cout << "Usage : bgfg_segm [options]" << endl;
cout << "Avaible options:" << endl;
cmd.printMessage();
return 0;
}
bool useCamera = cmd.get<bool>("camera");
string file = cmd.get<string>("file");
string method = cmd.get<string>("method");
if (method != "mog" && method != "mog2")
{
cerr << "Incorrect method" << endl;
return -1;
}
int m = method == "mog" ? M_MOG : M_MOG2;
VideoCapture cap;
if (useCamera)
cap.open(0);
else
cap.open(file);
if (!cap.isOpened())
{
cerr << "can not open camera or video file" << endl;
return -1;
}
std::vector<cv::ocl::Info>info;
cv::ocl::getDevice(info);
Mat frame;
cap >> frame;
oclMat d_frame(frame);
cv::ocl::MOG mog;
cv::ocl::MOG2 mog2;
oclMat d_fgmask;
oclMat d_fgimg;
oclMat d_bgimg;
d_fgimg.create(d_frame.size(), d_frame.type());
Mat fgmask;
Mat fgimg;
Mat bgimg;
switch (m)
{
case M_MOG:
mog(d_frame, d_fgmask, 0.01f);
break;
case M_MOG2:
mog2(d_frame, d_fgmask);
break;
}
for(;;)
{
cap >> frame;
if (frame.empty())
break;
d_frame.upload(frame);
int64 start = cv::getTickCount();
//update the model
switch (m)
{
case M_MOG:
mog(d_frame, d_fgmask, 0.01f);
mog.getBackgroundImage(d_bgimg);
break;
case M_MOG2:
mog2(d_frame, d_fgmask);
mog2.getBackgroundImage(d_bgimg);
break;
}
double fps = cv::getTickFrequency() / (cv::getTickCount() - start);
std::cout << "FPS : " << fps << std::endl;
d_fgimg.setTo(Scalar::all(0));
d_frame.copyTo(d_fgimg, d_fgmask);
d_fgmask.download(fgmask);
d_fgimg.download(fgimg);
if (!d_bgimg.empty())
d_bgimg.download(bgimg);
imshow("image", frame);
imshow("foreground mask", fgmask);
imshow("foreground image", fgimg);
if (!bgimg.empty())
imshow("mean background image", bgimg);
int key = waitKey(30);
if (key == 27)
break;
}
return 0;
}
...@@ -45,6 +45,10 @@ int main(int argc, char** argv) ...@@ -45,6 +45,10 @@ int main(int argc, char** argv)
namedWindow("CLAHE"); namedWindow("CLAHE");
createTrackbar("Tile Size", "CLAHE", &tilesize, 32, (TrackbarCallback)TSize_Callback); createTrackbar("Tile Size", "CLAHE", &tilesize, 32, (TrackbarCallback)TSize_Callback);
createTrackbar("Clip Limit", "CLAHE", &cliplimit, 20, (TrackbarCallback)Clip_Callback); createTrackbar("Clip Limit", "CLAHE", &cliplimit, 20, (TrackbarCallback)Clip_Callback);
vector<ocl::Info> info;
CV_Assert(ocl::getDevice(info));
Mat frame, outframe; Mat frame, outframe;
ocl::oclMat d_outframe; ocl::oclMat d_outframe;
......
...@@ -31,6 +31,11 @@ ...@@ -31,6 +31,11 @@
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE. # POSSIBILITY OF SUCH DAMAGE.
# ------------------------------------------------------------------------------------------------
# Note:
# When using the FaceRecognizer interface in combination with Python, please stick to Python 2.
# Some underlying scripts like create_csv will not work in other versions, like Python 3.
# ------------------------------------------------------------------------------------------------
import os import os
import sys import sys
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment