Commit 949c4edf authored by Andrey Kamaev's avatar Andrey Kamaev

Merged the trunk r8408:8457 (inclusive)

parent 7b5a45ea
......@@ -5,20 +5,31 @@ endif()
project(tbb)
# 4.0 update 4 - works fine
set(tbb_ver "tbb40_20120408oss")
set(tbb_url "http://threadingbuildingblocks.org/uploads/77/185/4.0%20update%204/tbb40_20120408oss_src.tgz")
set(tbb_md5 "734b356da7fe0ed308741f3e6018251e")
set(tbb_version_file "version_string.ver")
# 4.0 update 3 - build broken
#set(tbb_ver "tbb40_20120201oss")
#set(tbb_url "http://threadingbuildingblocks.org/uploads/77/182/4.0%20update%203/tbb40_20120201oss_src.tgz")
#set(tbb_md5 "4669e7d4adee018de7a7b8b972987218")
#set(tbb_version_file "version_string.tmp")
# 4.0 update 2 - works fine
set(tbb_ver "tbb40_20111130oss")
set(tbb_url "http://threadingbuildingblocks.org/uploads/77/180/4.0%20update%202/tbb40_20111130oss_src.tgz")
set(tbb_md5 "1e6926b21e865e79772119cd44fc3ad8")
#set(tbb_ver "tbb40_20111130oss")
#set(tbb_url "http://threadingbuildingblocks.org/uploads/77/180/4.0%20update%202/tbb40_20111130oss_src.tgz")
#set(tbb_md5 "1e6926b21e865e79772119cd44fc3ad8")
#set(tbb_version_file "version_string.tmp")
#set(tbb_need_GENERIC_DWORD_LOAD_STORE TRUE)
# 4.0 update 1 - works fine
#set(tbb_ver "tbb40_20111003oss")
#set(tbb_url "http://threadingbuildingblocks.org/uploads/77/177/4.0%20update%201/tbb40_20111003oss_src.tgz")
#set(tbb_md5 "7b5d94eb35a563b29ef402e0fd8f15c9")
#set(tbb_version_file "version_string.tmp")
#set(tbb_need_GENERIC_DWORD_LOAD_STORE TRUE)
set(tbb_tarball "${CMAKE_CURRENT_SOURCE_DIR}/${tbb_ver}_src.tgz")
set(tbb_src_dir "${CMAKE_CURRENT_BINARY_DIR}/${tbb_ver}")
......@@ -92,16 +103,20 @@ list(APPEND lib_srcs "${tbb_src_dir}/src/rml/client/rml_tbb.cpp")
add_definitions(-D__TBB_DYNAMIC_LOAD_ENABLED=0 #required
-D__TBB_BUILD=1 #required
-D__TBB_SURVIVE_THREAD_SWITCH=0 #no cilk on Android
-D__TBB_USE_GENERIC_DWORD_LOAD_STORE=1 #needed by TBB 4.0 update 1,2; fixed in TBB 4.0 update 3 but it has 2 new problems
-D__TBB_SURVIVE_THREAD_SWITCH=0 #no cilk on Android ?
-DUSE_PTHREAD #required
-DTBB_USE_GCC_BUILTINS=1 #required
-DTBB_USE_DEBUG=0 #just ot be sure
-DTBB_USE_DEBUG=0 #just to be sure
-DTBB_NO_LEGACY=1 #don't need backward compatibility
-DDO_ITT_NOTIFY=0 #it seems that we don't need these notifications
)
add_library(tbb STATIC ${lib_srcs} ${lib_hdrs} "${CMAKE_CURRENT_SOURCE_DIR}/android_additional.h" "${CMAKE_CURRENT_SOURCE_DIR}/version_string.tmp")
if(tbb_need_GENERIC_DWORD_LOAD_STORE)
#needed by TBB 4.0 update 1,2; fixed in TBB 4.0 update 3 but it has 2 new problems
add_definitions(-D__TBB_USE_GENERIC_DWORD_LOAD_STORE=1)
endif()
add_library(tbb STATIC ${lib_srcs} ${lib_hdrs} "${CMAKE_CURRENT_SOURCE_DIR}/android_additional.h" "${CMAKE_CURRENT_SOURCE_DIR}/${tbb_version_file}")
target_link_libraries(tbb c m dl)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -w -include \"${CMAKE_CURRENT_SOURCE_DIR}/android_additional.h\"")
......
#define __TBB_VERSION_STRINGS \
"TBB: BUILD_HOST buit as part of OpenCV" ENDL \
"TBB: BUILD_OS crosscompiling" ENDL \
"TBB: BUILD_KERNEL UNKNOWN" ENDL \
"TBB: BUILD_HOST Unknown" ENDL \
"TBB: BUILD_OS Android" ENDL \
"TBB: BUILD_KERNEL Unknown" ENDL \
"TBB: BUILD_GCC gcc version 4.4.3" ENDL \
"TBB: BUILD_GLIBC 2.11.1" ENDL \
"TBB: BUILD_LD " ENDL \
"TBB: BUILD_TARGET on " ENDL \
"TBB: BUILD_COMMAND TBD" ENDL \
#define __TBB_DATETIME "Sun Jan 01 00:00:01 UTC 2012"
"TBB: BUILD_GLIBC Unknown" ENDL \
"TBB: BUILD_LD Unknown" ENDL \
"TBB: BUILD_TARGET Unknown" ENDL \
"TBB: BUILD_COMMAND use cv::getBuildInformation() for details" ENDL \
#define __TBB_VERSION_STRINGS(N) \
#N": BUILD_HOST Unknown" ENDL \
#N": BUILD_OS Android" ENDL \
#N": BUILD_KERNEL Unknown" ENDL \
#N": BUILD_GCC gcc version 4.4.3" ENDL \
#N": BUILD_GLIBC Unknown" ENDL \
#N": BUILD_LD Unknown" ENDL \
#N": BUILD_TARGET Unknown" ENDL \
#N": BUILD_COMMAND use cv::getBuildInformation() for details" ENDL \
......@@ -170,7 +170,7 @@ elseif(MSVC_IDE)
set(ENABLE_SOLUTION_FOLDERS0 ON)
else()
set(ENABLE_SOLUTION_FOLDERS0 OFF)
endif()
endif()
# OpenCV build options
# ===================================================
......@@ -311,7 +311,7 @@ if(UNIX)
include(cmake/OpenCVFindPkgConfig.cmake OPTIONAL)
include(CheckFunctionExists)
include(CheckIncludeFile)
if(NOT APPLE)
CHECK_INCLUDE_FILE(alloca.h HAVE_ALLOCA_H)
CHECK_FUNCTION_EXISTS(alloca HAVE_ALLOCA)
......@@ -407,11 +407,32 @@ endif(WITH_UNICAP)
ocv_clear_vars(HAVE_PVAPI)
if(WITH_PVAPI)
find_path(PVAPI_INCLUDE_PATH "PvApi.h"
PATHS "/usr/local/include" "/usr/include"
PATHS /usr/local /opt /usr ENV ProgramFiles ENV ProgramW6432
PATH_SUFFIXES include "Allied Vision Technologies/GigESDK/inc-pc" "AVT GigE SDK/inc-pc" "GigESDK/inc-pc"
DOC "The path to PvAPI header")
if(PVAPI_INCLUDE_PATH)
set(HAVE_PVAPI TRUE)
endif()
if(X86 AND NOT WIN32)
set(PVAPI_SDK_SUBDIR x86)
elseif(X86_64)
set(PVAPI_SDK_SUBDIR x64)
elseif(CMAKE_SYSTEM_PROCESSOR MATCHES arm)
set(PVAPI_SDK_SUBDIR arm)
endif()
get_filename_component(_PVAPI_LIBRARY "${PVAPI_INCLUDE_PATH}/../lib-pc" ABSOLUTE)
if(PVAPI_SDK_SUBDIR)
set(_PVAPI_LIBRARY "${_PVAPI_LIBRARY}/${PVAPI_SDK_SUBDIR}")
endif()
if(NOT WIN32 AND CMAKE_COMPILER_IS_GNUCXX)
set(_PVAPI_LIBRARY "${_PVAPI_LIBRARY}/${CMAKE_OPENCV_GCC_VERSION_MAJOR}.${CMAKE_OPENCV_GCC_VERSION_MINOR}")
endif()
set(PVAPI_LIBRARY "${_PVAPI_LIBRARY}/${CMAKE_STATIC_LIBRARY_PREFIX}PvAPI${CMAKE_STATIC_LIBRARY_SUFFIX}" CACHE PATH "The PvAPI library")
if(EXISTS "${PVAPI_LIBRARY}")
set(HAVE_PVAPI TRUE)
endif()
endif(PVAPI_INCLUDE_PATH)
endif(WITH_PVAPI)
# --- Dc1394 ---
......@@ -462,7 +483,7 @@ if(WITH_FFMPEG)
CHECK_MODULE(libavformat HAVE_FFMPEG_FORMAT)
CHECK_MODULE(libavutil HAVE_FFMPEG_UTIL)
CHECK_MODULE(libswscale HAVE_FFMPEG_SWSCALE)
CHECK_INCLUDE_FILE(libavformat/avformat.h HAVE_GENTOO_FFMPEG)
CHECK_INCLUDE_FILE(ffmpeg/avformat.h HAVE_FFMPEG_FFMPEG)
if(NOT HAVE_GENTOO_FFMPEG AND NOT HAVE_FFMPEG_FFMPEG)
......@@ -589,7 +610,7 @@ include(cmake/OpenCVDetectPython.cmake REQUIRED)
if(ANDROID)
include(cmake/OpenCVDetectApacheAnt.cmake REQUIRED)
include(cmake/OpenCVDetectAndroidSDK.cmake REQUIRED)
if(NOT ANDROID_TOOLS_Pkg_Revision GREATER 13)
message(WARNING "OpenCV requires Android SDK tools revision 14 or newer. Otherwise tests and samples will no be compiled.")
endif()
......
......@@ -32,15 +32,15 @@ if(MINGW)
# http://gcc.gnu.org/bugzilla/show_bug.cgi?id=40838
# here we are trying to workaround the problem
include(CheckCXXCompilerFlag)
# CHECK_CXX_COMPILER_FLAG(-mstackrealign HAVE_STACKREALIGN_FLAG)
# if(HAVE_STACKREALIGN_FLAG)
# set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} -mstackrealign")
#else()
CHECK_CXX_COMPILER_FLAG(-mstackrealign HAVE_STACKREALIGN_FLAG)
if(HAVE_STACKREALIGN_FLAG)
set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} -mstackrealign")
else()
CHECK_CXX_COMPILER_FLAG(-mpreferred-stack-boundary=2 HAVE_PREFERRED_STACKBOUNDARY_FLAG)
if(HAVE_PREFERRED_STACKBOUNDARY_FLAG)
set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} -mstackrealign")
endif()
#endif()
endif()
endif()
if(CMAKE_COMPILER_IS_GNUCXX)
......
......@@ -8,11 +8,11 @@ endif()
if(NOT APPLE)
if(CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
set(CMAKE_COMPILER_IS_GNUCXX 1)
unset(ENABLE_PRECOMPILED_HEADERS CACHE)
set(ENABLE_PRECOMPILED_HEADERS OFF CACHE BOOL "" FORCE)
endif()
if(CMAKE_C_COMPILER_ID STREQUAL "Clang")
set(CMAKE_COMPILER_IS_GNUCC 1)
unset(ENABLE_PRECOMPILED_HEADERS CACHE)
set(ENABLE_PRECOMPILED_HEADERS OFF CACHE BOOL "" FORCE)
endif()
endif()
......
......@@ -970,6 +970,8 @@ namespace cv
};
CV_EXPORTS void applyColorMap(InputArray src, OutputArray dst, int colormap);
CV_EXPORTS bool initModule_contrib();
}
......
......@@ -241,6 +241,11 @@ void DetectionBasedTracker::SeparateDetectionWork::workcycleObjectDetector()
CV_Assert(stateThread==STATE_THREAD_WORKING_SLEEPING);
pthread_mutex_lock(&mutex);
if (!isWorking()) {//it is a rare case, but may cause a crash
LOGD("DetectionBasedTracker::SeparateDetectionWork::workcycleObjectDetector() --- go out from the workcycle from inner part of lock just before waiting");
pthread_mutex_unlock(&mutex);
break;
}
CV_Assert(stateThread==STATE_THREAD_WORKING_SLEEPING);
pthread_cond_wait(&objectDetectorRun, &mutex);
if (isWorking()) {
......
......@@ -713,4 +713,9 @@ Ptr<FaceRecognizer> createLBPHFaceRecognizer(int radius, int neighbors,
return new LBPH(radius, neighbors, grid_x, grid_y);
}
bool initModule_contrib()
{
return true;
}
}
......@@ -2357,7 +2357,7 @@ Algorithm::get
--------------
Returns the algorithm parameter
.. ocv:function:: template<typename _Tp> typename ParamType<_Tp>::member_type get(const string& name) const
.. ocv:function:: template<typename _Tp> typename ParamType<_Tp>::member_type Algorithm::get(const string& name) const
:param name: The parameter name.
......@@ -2378,13 +2378,13 @@ Algorithm::set
--------------
Sets the algorithm parameter
.. ocv:function:: void set(const string& name, int value)
.. ocv:function:: void set(const string& name, double value)
.. ocv:function:: void set(const string& name, bool value)
.. ocv:function:: void set(const string& name, const string& value)
.. ocv:function:: void set(const string& name, const Mat& value)
.. ocv:function:: void set(const string& name, const vector<Mat>& value)
.. ocv:function:: void set(const string& name, const Ptr<Algorithm>& value)
.. ocv:function:: void Algorithm::set(const string& name, int value)
.. ocv:function:: void Algorithm::set(const string& name, double value)
.. ocv:function:: void Algorithm::set(const string& name, bool value)
.. ocv:function:: void Algorithm::set(const string& name, const string& value)
.. ocv:function:: void Algorithm::set(const string& name, const Mat& value)
.. ocv:function:: void Algorithm::set(const string& name, const vector<Mat>& value)
.. ocv:function:: void Algorithm::set(const string& name, const Ptr<Algorithm>& value)
:param name: The parameter name.
:param value: The parameter value.
......@@ -2396,7 +2396,7 @@ Algorithm::write
----------------
Stores algorithm parameters in a file storage
.. ocv:function:: void write(FileStorage& fs) const
.. ocv:function:: void Algorithm::write(FileStorage& fs) const
:param fs: File storage.
......@@ -2413,7 +2413,7 @@ Algorithm::read
---------------
Reads algorithm parameters from a file storage
.. ocv:function:: void read(const FileNode& fn)
.. ocv:function:: void Algorithm::read(const FileNode& fn)
:param fn: File node of the file storage.
......@@ -2423,29 +2423,24 @@ Algorithm::getList
------------------
Returns the list of registered algorithms
.. ocv:function:: void read(vector<string>& algorithms)
.. ocv:function:: void Algorithm::getList(vector<string>& algorithms)
:param algorithms: The output vector of algorithm names.
This static method returns the list of registered algorithms in alphabetical order.
This static method returns the list of registered algorithms in alphabetical order. Here is how to use it ::
Algorithm::getList
------------------
Returns the list of registered algorithms
.. ocv:function:: void read(vector<string>& algorithms)
:param algorithms: The output vector of algorithm names.
This static method returns the list of registered algorithms in alphabetical order.
vector<string> algorithms;
Algorithm::getList(algorithms);
cout << "Algorithms: " << algorithms.size() << endl;
for (size_t i=0; i < algorithms.size(); i++)
cout << algorithms[i] << endl;
Algorithm::create
-----------------
Creates algorithm instance by name
.. ocv:function:: template<typename _Tp> Ptr<_Tp> create(const string& name)
.. ocv:function:: template<typename _Tp> Ptr<_Tp> Algorithm::create(const string& name)
:param name: The algorithm name, one of the names returned by ``Algorithm::getList()``.
......
......@@ -1434,7 +1434,7 @@ Finds the inverse or pseudo-inverse of a matrix.
The function ``invert`` inverts the matrix ``src`` and stores the result in ``dst`` .
When the matrix ``src`` is singular or non-square, the function computes the pseudo-inverse matrix (the ``dst`` matrix) so that ``norm(src*dst - I)`` is minimal, where I is an identity matrix.
In case of the ``DECOMP_LU`` method, the function returns the ``src`` determinant ( ``src`` must be square). If it is 0, the matrix is not inverted and ``dst`` is filled with zeros.
In case of the ``DECOMP_LU`` method, the function returns non-zero value if the inverse has been successfully computed and 0 if ``src`` is singular.
In case of the ``DECOMP_SVD`` method, the function returns the inverse condition number of ``src`` (the ratio of the smallest singular value to the largest singular value) and 0 if ``src`` is singular. The SVD method calculates a pseudo-inverse matrix if ``src`` is singular.
......
......@@ -947,6 +947,9 @@ double cv::invert( InputArray _src, OutputArray _dst, int method )
bool result = false;
Mat src = _src.getMat();
int type = src.type();
CV_Assert(type == CV_32F || type == CV_64F);
size_t esz = CV_ELEM_SIZE(type);
int m = src.rows, n = src.cols;
......@@ -969,7 +972,7 @@ double cv::invert( InputArray _src, OutputArray _dst, int method )
((double*)w.data)[n-1]/((double*)w.data)[0] : 0);
}
CV_Assert( m == n && (type == CV_32F || type == CV_64F));
CV_Assert( m == n );
if( method == DECOMP_EIG )
{
......
......@@ -324,13 +324,13 @@ class CV_EXPORTS_W MSER : public FeatureDetector
{
public:
//! the full constructor
explicit MSER( int _delta=5, int _min_area=60, int _max_area=14400,
CV_WRAP explicit MSER( int _delta=5, int _min_area=60, int _max_area=14400,
double _max_variation=0.25, double _min_diversity=.2,
int _max_evolution=200, double _area_threshold=1.01,
double _min_margin=0.003, int _edge_blur_size=5 );
//! the operator that extracts the MSERs from the image or the specific part of it
CV_WRAP_AS(detect) void operator()( const Mat& image, vector<vector<Point> >& msers,
CV_WRAP_AS(detect) void operator()( const Mat& image, CV_OUT vector<vector<Point> >& msers,
const Mat& mask=Mat() ) const;
AlgorithmInfo* info() const;
......
......@@ -96,7 +96,7 @@ elseif(APPLE)
list(APPEND HIGHGUI_LIBRARIES "-framework Cocoa")
endif()
endif()
if(WIN32)
list(APPEND highgui_srcs src/cap_vfw.cpp src/cap_cmu.cpp src/cap_dshow.cpp)
endif(WIN32)
......@@ -157,19 +157,8 @@ endif(HAVE_FFMPEG)
if(HAVE_PVAPI)
add_definitions(-DHAVE_PVAPI)
ocv_include_directories(${PVAPI_INCLUDE_PATH})
if(X86)
set(PVAPI_SDK_SUBDIR x86)
elseif(X86_64)
set(PVAPI_SDK_SUBDIR x64)
elseif(CMAKE_SYSTEM_PROCESSOR MATCHES arm)
set(PVAPI_SDK_SUBDIR arm)
endif()
if(PVAPI_SDK_SUBDIR AND CMAKE_COMPILER_IS_GNUCXX)
get_filename_component(PVAPI_EXPECTED_LIB_PATH "${PVAPI_INCLUDE_PATH}/../lib-pc/${PVAPI_SDK_SUBDIR}/${CMAKE_OPENCV_GCC_VERSION_MAJOR}.${CMAKE_OPENCV_GCC_VERSION_MINOR}" ABSOLUTE)
link_directories(${PVAPI_EXPECTED_LIB_PATH})
endif()
set(highgui_srcs src/cap_pvapi.cpp ${highgui_srcs})
set(HIGHGUI_LIBRARIES ${HIGHGUI_LIBRARIES} PvAPI)
list(APPEND HIGHGUI_LIBRARIES ${PVAPI_LIBRARY})
endif()
if(WITH_IMAGEIO)
......@@ -241,11 +230,11 @@ if(WIN32 AND WITH_FFMPEG)
if(MSVC64 OR MINGW64)
set(FFMPEG_SUFFIX _64)
endif()
set(ffmpeg_bare_name "opencv_ffmpeg${FFMPEG_SUFFIX}.dll")
set(ffmpeg_bare_name_ver "opencv_ffmpeg${OPENCV_DLLVERSION}${FFMPEG_SUFFIX}.dll")
set(ffmpeg_path "${OpenCV_SOURCE_DIR}/3rdparty/ffmpeg/${ffmpeg_bare_name}")
#if(MSVC AND CMAKE_VERSION VERSION_GREATER "2.8.2")
# add_custom_command(TARGET ${the_module} POST_BUILD
# COMMAND ${CMAKE_COMMAND} -E copy "${ffmpeg_path}" "${EXECUTABLE_OUTPUT_PATH}/$<CONFIGURATION>/${ffmpeg_bare_name_ver}"
......@@ -265,7 +254,7 @@ if(WIN32 AND WITH_FFMPEG)
COMMAND ${CMAKE_COMMAND} -E copy "${ffmpeg_path}" "${EXECUTABLE_OUTPUT_PATH}/${ffmpeg_bare_name_ver}"
COMMENT "Copying ${ffmpeg_path} to the output directory")
endif()
install(FILES "${ffmpeg_path}" DESTINATION bin COMPONENT main RENAME "${ffmpeg_bare_name_ver}")
endif()
......
This diff is collapsed.
......@@ -511,7 +511,7 @@ Unnormalized box filter is useful for computing various integral characteristics
.. seealso::
:ocv:func:`boxFilter`,
:ocv:func:`blur`,
:ocv:func:`bilateralFilter`,
:ocv:func:`GaussianBlur`,
:ocv:func:`medianBlur`,
......
......@@ -665,7 +665,7 @@ Computes the ideal point coordinates from the observed point coordinates.
:param src: Observed point coordinates, 1xN or Nx1 2-channel (CV_32FC2 or CV_64FC2).
:param dst: Output ideal point coordinates after undistortion and reverse perspective transformation.
:param dst: Output ideal point coordinates after undistortion and reverse perspective transformation. If matrix ``P`` is identity or omitted, ``dst`` will contain normalized point coordinates.
:param cameraMatrix: Camera matrix :math:`\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}` .
......@@ -688,6 +688,7 @@ The function is similar to
(x',y') = undistort(x",y",dist_coeffs)
[X,Y,W]T = R*[x' y' 1]T
x = X/W, y = Y/W
// only performed if P=[fx' 0 cx' [tx]; 0 fy' cy' [ty]; 0 0 1 [tz]] is specified
u' = x*fx' + cx'
v' = y*fy' + cy',
......
......@@ -92,6 +92,56 @@ PERF_TEST_P(Size_MatType_BorderType3x3, blur3x3,
SANITY_CHECK(dst, 1e-3);
}
PERF_TEST_P(Size_MatType_BorderType3x3, box3x3,
testing::Combine(
testing::Values(szODD, szQVGA, szVGA, sz720p),
testing::Values(CV_8UC1, CV_16SC1, CV_32SC1, CV_32FC1, CV_32FC3),
testing::ValuesIn(BorderType3x3::all())
)
)
{
Size size = get<0>(GetParam());
int type = get<1>(GetParam());
BorderType3x3 btype = get<2>(GetParam());
Mat src(size, type);
Mat dst(size, type);
declare.in(src, WARMUP_RNG).out(dst);
TEST_CYCLE() boxFilter(src, dst, -1, Size(3,3), Point(-1,-1), false, btype);
SANITY_CHECK(dst, 1e-6, ERROR_RELATIVE);
}
PERF_TEST_P(Size_MatType_BorderType3x3, box3x3_inplace,
testing::Combine(
testing::Values(szODD, szQVGA, szVGA, sz720p),
testing::Values(CV_8UC1, CV_16SC1, CV_32SC1, CV_32FC1, CV_32FC3),
testing::ValuesIn(BorderType3x3::all())
)
)
{
Size size = get<0>(GetParam());
int type = get<1>(GetParam());
BorderType3x3 btype = get<2>(GetParam());
Mat src(size, type);
Mat dst(size, type);
declare.in(src, WARMUP_RNG).out(dst);
while(next())
{
src.copyTo(dst);
startTimer();
boxFilter(dst, dst, -1, Size(3,3), Point(-1,-1), false, btype);
stopTimer();
}
SANITY_CHECK(dst, 1e-6, ERROR_RELATIVE);
}
PERF_TEST_P(Size_MatType_BorderType, gaussianBlur5x5,
testing::Combine(
testing::Values(szODD, szQVGA, szVGA, sz720p),
......@@ -117,7 +167,7 @@ PERF_TEST_P(Size_MatType_BorderType, gaussianBlur5x5,
PERF_TEST_P(Size_MatType_BorderType, blur5x5,
testing::Combine(
testing::Values(szODD, szQVGA, szVGA, sz720p),
testing::Values(CV_8UC1, CV_8UC4, CV_16UC1, CV_16SC1, CV_32FC1),
testing::Values(CV_8UC1, CV_8UC4, CV_16UC1, CV_16SC1, CV_32FC1, CV_32FC3),
testing::ValuesIn(BorderType::all())
)
)
......
......@@ -33,5 +33,8 @@ PERF_TEST_P(Img_BlockSize_ApertureSize_BorderType, cornerEigenValsAndVecs,
TEST_CYCLE() cornerEigenValsAndVecs(src, dst, blockSize, apertureSize, borderType);
SANITY_CHECK(dst, 2e-5);
Mat l1;
extractChannel(dst, l1, 0);
SANITY_CHECK(l1, 2e-5);
}
\ No newline at end of file
......@@ -16,7 +16,7 @@ PERF_TEST_P(Img_BlockSize_ApertureSize_k_BorderType, cornerHarris,
testing::Values( "stitching/a1.jpg", "cv/shared/pic5.png"),
testing::Values( 3, 5 ),
testing::Values( 3, 5 ),
testing::Values( 1, 0.1 ),
testing::Values( 0.04, 0.1 ),
testing::ValuesIn(BorderType::all())
)
)
......@@ -35,5 +35,5 @@ PERF_TEST_P(Img_BlockSize_ApertureSize_k_BorderType, cornerHarris,
TEST_CYCLE() cornerHarris(src, dst, blockSize, apertureSize, k, borderType);
SANITY_CHECK(dst, 2e-6);
SANITY_CHECK(dst, 2e-5);
}
\ No newline at end of file
......@@ -29,10 +29,10 @@ PERF_TEST_P(Image_MaxCorners_QualityLevel_MinDistance_BlockSize_UseHarris, goodF
if (image.empty())
FAIL() << "Unable to load source image" << filename;
Mat corners;
std::vector<Point2f> corners;
double minDistance = 1;
TEST_CYCLE() goodFeaturesToTrack(image, corners, maxCorners, qualityLevel, minDistance, noArray(), blockSize, useHarrisDetector);
SANITY_CHECK(corners);
//SANITY_CHECK(corners);
}
......@@ -247,6 +247,11 @@ cornerEigenValsVecs( const Mat& src, Mat& eigenv, int block_size,
int aperture_size, int op_type, double k=0.,
int borderType=BORDER_DEFAULT )
{
#ifdef HAVE_TEGRA_OPTIMIZATION
if (tegra::cornerEigenValsVecs(src, eigenv, block_size, aperture_size, op_type, k, borderType))
return;
#endif
int depth = src.depth();
double scale = (double)(1 << ((aperture_size > 0 ? aperture_size : 3) - 1)) * block_size;
if( aperture_size < 0 )
......
......@@ -279,7 +279,7 @@ cv::Ptr<cv::FilterEngine> cv::createBoxFilter( int srcType, int dstType, Size ks
{
int sdepth = CV_MAT_DEPTH(srcType);
int cn = CV_MAT_CN(srcType), sumType = CV_64F;
if( sdepth < CV_32S && (!normalize ||
if( sdepth <= CV_32S && (!normalize ||
ksize.width*ksize.height <= (sdepth == CV_8U ? (1<<23) :
sdepth == CV_16U ? (1 << 15) : (1 << 16))) )
sumType = CV_32S;
......
......@@ -177,12 +177,13 @@ int CvMLData::read_csv(const char* filename)
ptr++;
}
cols_count++;
if ( cols_count == 0)
{
fclose(file);
return -1;
}
cols_count++;
// create temporary memory storage to store the whole database
el_ptr = new float[cols_count];
......
......@@ -259,12 +259,12 @@ groupRectangles
Groups the object candidate rectangles.
.. ocv:function:: void groupRectangles(vector<Rect>& rectList, int groupThreshold, double eps=0.2)
.. ocv:function:: void groupRectangles(vector<Rect>& rectList, vector<int>& weights, int groupThreshold, double eps=0.2)
.. ocv:pyfunction:: cv2.groupRectangles(rectList, groupThreshold[, eps]) -> None
.. ocv:pyfunction:: cv2.groupRectangles(rectList, groupThreshold[, eps]) -> weights
.. ocv:pyfunction:: cv2.groupRectangles(rectList, groupThreshold, eps, weights, levelWeights) -> None
.. ocv:pyfunction:: cv2.groupRectangles(rectList, groupThreshold[, eps]) -> rectList, weights
:param rectList: Input/output vector of rectangles. Output vector includes retained and grouped rectangles.
:param rectList: Input/output vector of rectangles. Output vector includes retained and grouped rectangles. (The Python list is not modified in place.)
:param groupThreshold: Minimum possible number of rectangles minus 1. The threshold is used in a group of rectangles to retain it.
......
......@@ -57,6 +57,18 @@ private:
PyThreadState* _state;
};
class PyEnsureGIL
{
public:
PyEnsureGIL() : _state(PyGILState_Ensure()) {}
~PyEnsureGIL()
{
PyGILState_Release(_state);
}
private:
PyGILState_STATE _state;
};
#define ERRWRAP2(expr) \
try \
{ \
......@@ -139,6 +151,8 @@ public:
void allocate(int dims, const int* sizes, int type, int*& refcount,
uchar*& datastart, uchar*& data, size_t* step)
{
PyEnsureGIL gil;
int depth = CV_MAT_DEPTH(type);
int cn = CV_MAT_CN(type);
const int f = (int)(sizeof(size_t)/8);
......@@ -169,6 +183,7 @@ public:
void deallocate(int* refcount, uchar* datastart, uchar* data)
{
PyEnsureGIL gil;
if( !refcount )
return;
PyObject* o = pyObjectFromRefcount(refcount);
......
......@@ -15,9 +15,9 @@ Calculates an optical flow for a sparse feature set using the iterative Lucas-Ka
.. ocv:cfunction:: void cvCalcOpticalFlowPyrLK( const CvArr* prev, const CvArr* curr, CvArr* prevPyr, CvArr* currPyr, const CvPoint2D32f* prevFeatures, CvPoint2D32f* currFeatures, int count, CvSize winSize, int level, char* status, float* trackError, CvTermCriteria criteria, int flags )
.. ocv:pyoldfunction:: cv.CalcOpticalFlowPyrLK( prev, curr, prevPyr, currPyr, prevFeatures, winSize, level, criteria, flags, guesses=None) -> (currFeatures, status, trackError)
:param prevImg: First 8-bit single-channel or 3-channel input image.
:param prevImg: First 8-bit input image or pyramid constructed by :ocv:func:`buildOpticalFlowPyramid`.
:param nextImg: Second input image of the same size and the same type as ``prevImg`` .
:param nextImg: Second input image or pyramid of the same size and the same type as ``prevImg``.
:param prevPts: Vector of 2D points for which the flow needs to be found. The point coordinates must be single-precision floating-point numbers.
......@@ -29,27 +29,51 @@ Calculates an optical flow for a sparse feature set using the iterative Lucas-Ka
:param winSize: Size of the search window at each pyramid level.
:param maxLevel: 0-based maximal pyramid level number. If set to 0, pyramids are not used (single level). If set to 1, two levels are used, and so on.
:param maxLevel: 0-based maximal pyramid level number. If set to 0, pyramids are not used (single level). If set to 1, two levels are used, and so on. If pyramids are passed to input then algorithm will use as many levels as pyramids have but no more than ``maxLevel``.
:param criteria: Parameter specifying the termination criteria of the iterative search algorithm (after the specified maximum number of iterations ``criteria.maxCount`` or when the search window moves by less than ``criteria.epsilon`` .
:param flags: Operation flags:
* **OPTFLOW_USE_INITIAL_FLOW** Use initial estimations stored in ``nextPts`` . If the flag is not set, then ``prevPts`` is copied to ``nextPts`` and is considered as the initial estimate.
* **OPTFLOW_LK_GET_MIN_EIGENVALS** Use minimum eigen values as a error measure (see ``minEigThreshold`` description). If the flag is not set, then L1 distance between patches around the original and a moved point divided by number of pixels in a window is used as a error measure.
* **OPTFLOW_USE_INITIAL_FLOW** Use initial estimations stored in ``nextPts`` . If the flag is not set, then ``prevPts`` is copied to ``nextPts`` and is considered as the initial estimate.
* **OPTFLOW_LK_GET_MIN_EIGENVALS** Use minimum eigen values as a error measure (see ``minEigThreshold`` description). If the flag is not set, then L1 distance between patches around the original and a moved point divided by number of pixels in a window is used as a error measure.
:param minEigThreshold: The algorithm computes a minimum eigen value of a 2x2 normal matrix of optical flow equations (this matrix is called a spatial gradient matrix in [Bouguet00]_) divided by number of pixels in a window. If this value is less then ``minEigThreshold`` then a corresponding feature is filtered out and its flow is not computed. So it allows to remove bad points earlier and speed up the computation.
The function implements a sparse iterative version of the Lucas-Kanade optical flow in pyramids. See [Bouguet00]_. The function is parallelized with the TBB library.
buildOpticalFlowPyramid
-----------------------
Constructs the image pyramid which can be passed to :ocv:func:`calcOpticalFlowPyrLK`.
.. ocv:function:: int buildOpticalFlowPyramid(InputArray img, OutputArrayOfArrays pyramid, Size winSize, int maxLevel, bool withDerivatives = true, int pyrBorder = BORDER_REFLECT_101, int derivBorder = BORDER_CONSTANT, bool tryReuseInputImage = true)
.. ocv:pyfunction:: cv2.buildOpticalFlowPyramid(img, winSize, maxLevel[, pyramid[, withDerivatives[, pyrBorder[, derivBorder[, tryReuseInputImage]]]]]) -> retval, pyramid
:param img: 8-bit input image.
:param pyramid: output pyramid.
:param winSize: window size of optical flow algorithm. Must be not less than ``winSize`` argument of :ocv:func:`calcOpticalFlowPyrLK`. It is needed to calculate required padding for pyramid levels.
:param maxLevel: 0-based maximal pyramid level number.
:param withDerivatives: set to precompute gradients for the every pyramid level. If pyramid is constructed without the gradients then :ocv:func:`calcOpticalFlowPyrLK` will calculate them internally.
:param pyrBorder: the border mode for pyramid layers.
:param derivBorder: the border mode for gradients.
:param tryReuseInputImage: put ROI of input image into the pyramid if possible. You can pass ``false`` to force data copying.
:return: number of levels in constructed pyramid. Can be less than ``maxLevel``.
calcOpticalFlowFarneback
----------------------------
Computes a dense optical flow using the Gunnar Farneback's algorithm.
.. ocv:function:: void calcOpticalFlowFarneback( InputArray prevImg, InputArray nextImg, InputOutputArray flow, double pyrScale, int levels, int winsize, int iterations, int polyN, double polySigma, int flags )
.. ocv:function:: void calcOpticalFlowFarneback( InputArray prevImg, InputArray nextImg, InputOutputArray flow, double pyrScale, int levels, int winsize, int iterations, int polyN, double polySigma, int flags )
.. ocv:cfunction:: void cvCalcOpticalFlowFarneback( const CvArr* prevImg, const CvArr* nextImg, CvArr* flow, double pyrScale, int levels, int winsize, int iterations, int polyN, double polySigma, int flags )
......
......@@ -53,7 +53,7 @@ static void calcSharrDeriv(const cv::Mat& src, cv::Mat& dst)
CV_Assert(depth == CV_8U);
dst.create(rows, cols, CV_MAKETYPE(DataType<deriv_type>::depth, cn*2));
#ifdef HAVE_TEGRA_OPTIMIZATION
#ifdef HAVE_TEGRA_OPTIMIZATION
if (tegra::calcSharrDeriv(src, dst))
return;
#endif
......@@ -655,6 +655,9 @@ void cv::calcOpticalFlowPyrLK( InputArray _prevImg, InputArray _nextImg,
&& ofs.x + prevPyr[lvlStep1].cols + winSize.width <= fullSize.width
&& ofs.y + prevPyr[lvlStep1].rows + winSize.height <= fullSize.height);
}
if(levels1 < maxLevel)
maxLevel = levels1;
}
if(_nextImg.kind() == _InputArray::STD_VECTOR_MAT)
......@@ -680,19 +683,16 @@ void cv::calcOpticalFlowPyrLK( InputArray _prevImg, InputArray _nextImg,
&& ofs.x + nextPyr[lvlStep2].cols + winSize.width <= fullSize.width
&& ofs.y + nextPyr[lvlStep2].rows + winSize.height <= fullSize.height);
}
}
if(levels1 >= 0 || levels2 >= 0)
maxLevel = std::max(levels1, levels2);
if(levels2 < maxLevel)
maxLevel = levels2;
}
if (levels1 < 0)
maxLevel = levels1 = buildOpticalFlowPyramid(_prevImg, prevPyr, winSize, maxLevel, false);
maxLevel = buildOpticalFlowPyramid(_prevImg, prevPyr, winSize, maxLevel, false);
if (levels2 < 0)
levels2 = buildOpticalFlowPyramid(_nextImg, nextPyr, winSize, maxLevel, false);
CV_Assert(levels1 == levels2);
maxLevel = buildOpticalFlowPyramid(_nextImg, nextPyr, winSize, maxLevel, false);
if( (criteria.type & TermCriteria::COUNT) == 0 )
criteria.maxCount = 30;
......
set(sample example-face-detection)
add_android_project(${sample} "${CMAKE_CURRENT_SOURCE_DIR}" LIBRARY_DEPS ${OpenCV_BINARY_DIR} SDK_TARGET 11 ${ANDROID_SDK_TARGET})
if(BUILD_FAT_JAVA_LIB)
set(native_deps opencv_java)
ocv_include_modules(opencv_contrib)
else()
set(native_deps opencv_contrib)
endif()
add_android_project(${sample} "${CMAKE_CURRENT_SOURCE_DIR}" LIBRARY_DEPS ${OpenCV_BINARY_DIR} SDK_TARGET 11 ${ANDROID_SDK_TARGET} NATIVE_DEPS ${native_deps})
if(TARGET ${sample})
add_dependencies(opencv_android_examples ${sample})
endif()
LOCAL_PATH := $(call my-dir)
include $(CLEAR_VARS)
OPENCV_CAMERA_MODULES:=off
include ../includeOpenCV.mk
ifeq ("$(wildcard $(OPENCV_MK_PATH))","")
#try to load OpenCV.mk from default install location
include $(TOOLCHAIN_PREBUILT_ROOT)/user/share/OpenCV/OpenCV.mk
else
include $(OPENCV_MK_PATH)
endif
LOCAL_SRC_FILES := DetectionBaseTracker.cpp
LOCAL_C_INCLUDES := $(LOCAL_PATH)
LOCAL_LDLIBS += -llog -ldl
LOCAL_MODULE := detection_base_tacker
include $(BUILD_SHARED_LIBRARY)
\ No newline at end of file
APP_STL := gnustl_static
APP_CPPFLAGS := -frtti -fexceptions
APP_ABI := armeabi-v7a
#include <DetectionBaseTracker.h>
#include <opencv2/core/core.hpp>
#include <opencv2/contrib/detection_based_tracker.hpp>
#include <string>
#include <vector>
#include <android/log.h>
#define LOG_TAG "FaceDetection/DetectionBasedTracker"
#define LOGD(...) ((void)__android_log_print(ANDROID_LOG_DEBUG, LOG_TAG, __VA_ARGS__))
using namespace std;
using namespace cv;
vector<Rect> RectFaces;
inline void vector_Rect_to_Mat(vector<Rect>& v_rect, Mat& mat)
{
mat = Mat(v_rect, true);
}
JNIEXPORT jlong JNICALL Java_org_opencv_samples_fd_DetectionBaseTracker_nativeCreateObject
(JNIEnv * jenv, jclass jobj, jstring jFileName, jint faceSize)
{
const char* jnamestr = jenv->GetStringUTFChars(jFileName, NULL);
string stdFileName(jnamestr);
jlong result = 0;
try
{
DetectionBasedTracker::Parameters DetectorParams;
if (faceSize > 0)
DetectorParams.minObjectSize = faceSize;
result = (jlong)new DetectionBasedTracker(stdFileName, DetectorParams);
}
catch(cv::Exception e)
{
LOGD("nativeCreateObject catched cv::Exception: %s", e.what());
jclass je = jenv->FindClass("org/opencv/core/CvException");
if(!je)
je = jenv->FindClass("java/lang/Exception");
jenv->ThrowNew(je, e.what());
}
return result;
}
JNIEXPORT void JNICALL Java_org_opencv_samples_fd_DetectionBaseTracker_nativeDestroyObject
(JNIEnv * jenv, jclass jobj, jlong thiz)
{
try
{
((DetectionBasedTracker*)thiz)->stop();
delete (DetectionBasedTracker*)thiz;
}
catch(cv::Exception e)
{
LOGD("nativeestroyObject catched cv::Exception: %s", e.what());
jclass je = jenv->FindClass("org/opencv/core/CvException");
if(!je)
je = jenv->FindClass("java/lang/Exception");
jenv->ThrowNew(je, e.what());
}
}
JNIEXPORT void JNICALL Java_org_opencv_samples_fd_DetectionBaseTracker_nativeStart
(JNIEnv * jenv, jclass jobj, jlong thiz)
{
try
{
((DetectionBasedTracker*)thiz)->run();
}
catch(cv::Exception e)
{
LOGD("nativeStart catched cv::Exception: %s", e.what());
jclass je = jenv->FindClass("org/opencv/core/CvException");
if(!je)
je = jenv->FindClass("java/lang/Exception");
jenv->ThrowNew(je, e.what());
}
}
JNIEXPORT void JNICALL Java_org_opencv_samples_fd_DetectionBaseTracker_nativeStop
(JNIEnv * jenv, jclass jobj, jlong thiz)
{
try
{
((DetectionBasedTracker*)thiz)->stop();
}
catch(cv::Exception e)
{
LOGD("nativeStop catched cv::Exception: %s", e.what());
jclass je = jenv->FindClass("org/opencv/core/CvException");
if(!je)
je = jenv->FindClass("java/lang/Exception");
jenv->ThrowNew(je, e.what());
}
}
JNIEXPORT void JNICALL Java_org_opencv_samples_fd_DetectionBaseTracker_nativeSetFaceSize
(JNIEnv * jenv, jclass jobj, jlong thiz, jint faceSize)
{
try
{
if (faceSize > 0)
{
DetectionBasedTracker::Parameters DetectorParams = \
((DetectionBasedTracker*)thiz)->getParameters();
DetectorParams.minObjectSize = faceSize;
((DetectionBasedTracker*)thiz)->setParameters(DetectorParams);
}
}
catch(cv::Exception e)
{
LOGD("nativeStop catched cv::Exception: %s", e.what());
jclass je = jenv->FindClass("org/opencv/core/CvException");
if(!je)
je = jenv->FindClass("java/lang/Exception");
jenv->ThrowNew(je, e.what());
}
}
JNIEXPORT void JNICALL Java_org_opencv_samples_fd_DetectionBaseTracker_nativeDetect
(JNIEnv * jenv, jclass jobj, jlong thiz, jlong imageGray, jlong faces)
{
try
{
((DetectionBasedTracker*)thiz)->process(*((Mat*)imageGray));
((DetectionBasedTracker*)thiz)->getObjects(RectFaces);
vector_Rect_to_Mat(RectFaces, *((Mat*)faces));
}
catch(cv::Exception e)
{
LOGD("nativeCreateObject catched cv::Exception: %s", e.what());
jclass je = jenv->FindClass("org/opencv/core/CvException");
if(!je)
je = jenv->FindClass("java/lang/Exception");
jenv->ThrowNew(je, e.what());
}
}
\ No newline at end of file
/* DO NOT EDIT THIS FILE - it is machine generated */
#include <jni.h>
/* Header for class org_opencv_samples_fd_DetectionBaseTracker */
#ifndef _Included_org_opencv_samples_fd_DetectionBaseTracker
#define _Included_org_opencv_samples_fd_DetectionBaseTracker
#ifdef __cplusplus
extern "C" {
#endif
/*
* Class: org_opencv_samples_fd_DetectionBaseTracker
* Method: nativeCreateObject
* Signature: (Ljava/lang/String;F)J
*/
JNIEXPORT jlong JNICALL Java_org_opencv_samples_fd_DetectionBaseTracker_nativeCreateObject
(JNIEnv *, jclass, jstring, jint);
/*
* Class: org_opencv_samples_fd_DetectionBaseTracker
* Method: nativeDestroyObject
* Signature: (J)V
*/
JNIEXPORT void JNICALL Java_org_opencv_samples_fd_DetectionBaseTracker_nativeDestroyObject
(JNIEnv *, jclass, jlong);
/*
* Class: org_opencv_samples_fd_DetectionBaseTracker
* Method: nativeStart
* Signature: (J)V
*/
JNIEXPORT void JNICALL Java_org_opencv_samples_fd_DetectionBaseTracker_nativeStart
(JNIEnv *, jclass, jlong);
/*
* Class: org_opencv_samples_fd_DetectionBaseTracker
* Method: nativeStop
* Signature: (J)V
*/
JNIEXPORT void JNICALL Java_org_opencv_samples_fd_DetectionBaseTracker_nativeStop
(JNIEnv *, jclass, jlong);
/*
* Class: org_opencv_samples_fd_DetectionBaseTracker
* Method: nativeSetFaceSize
* Signature: (JI)V
*/
JNIEXPORT void JNICALL Java_org_opencv_samples_fd_DetectionBaseTracker_nativeSetFaceSize
(JNIEnv *, jclass, jlong, jint);
/*
* Class: org_opencv_samples_fd_DetectionBaseTracker
* Method: nativeDetect
* Signature: (JJJ)V
*/
JNIEXPORT void JNICALL Java_org_opencv_samples_fd_DetectionBaseTracker_nativeDetect
(JNIEnv *, jclass, jlong, jlong, jlong);
#ifdef __cplusplus
}
#endif
#endif
package org.opencv.samples.fd;
import org.opencv.core.Mat;
import org.opencv.core.MatOfRect;
public class DetectionBaseTracker
{
public DetectionBaseTracker(String filename, int faceSize)
{
mNativeObj = nativeCreateObject(filename, faceSize);
}
public void start()
{
nativeStart(mNativeObj);
}
public void stop()
{
nativeStop(mNativeObj);
}
public void setMinFaceSize(int faceSize)
{
nativeSetFaceSize(mNativeObj, faceSize);
}
public void detect(Mat imageGray, MatOfRect faces)
{
nativeDetect(mNativeObj, imageGray.getNativeObjAddr(), faces.getNativeObjAddr());
}
public void release()
{
nativeDestroyObject(mNativeObj);
mNativeObj = 0;
}
protected long mNativeObj = 0;
protected static native long nativeCreateObject(String filename, int faceSize);
protected static native void nativeDestroyObject(long thiz);
protected static native void nativeStart(long thiz);
protected static native void nativeStop(long thiz);
protected static native void nativeSetFaceSize(long thiz, int faceSize);
protected static native void nativeDetect(long thiz, long inputImage, long resultMat);
static
{
System.loadLibrary("detection_base_tacker");
}
}
......@@ -16,13 +16,18 @@ public class FdActivity extends Activity {
private MenuItem mItemFace40;
private MenuItem mItemFace30;
private MenuItem mItemFace20;
private MenuItem mItemType;
private FdView mView;
public static float minFaceSize = 0.5f;
private int mDetectorType = 0;
private String[] mDetectorName;
public FdActivity() {
Log.i(TAG, "Instantiated new " + this.getClass());
mDetectorName = new String[2];
mDetectorName[0] = "Cascade";
mDetectorName[1] = "DBT";
}
@Override
......@@ -57,6 +62,7 @@ public class FdActivity extends Activity {
super.onCreate(savedInstanceState);
requestWindowFeature(Window.FEATURE_NO_TITLE);
mView = new FdView(this);
mView.setDtetectorType(mDetectorType);
setContentView(mView);
}
......@@ -67,6 +73,8 @@ public class FdActivity extends Activity {
mItemFace40 = menu.add("Face size 40%");
mItemFace30 = menu.add("Face size 30%");
mItemFace20 = menu.add("Face size 20%");
mItemType = menu.add(mDetectorName[mDetectorType]);
return true;
}
......@@ -74,13 +82,19 @@ public class FdActivity extends Activity {
public boolean onOptionsItemSelected(MenuItem item) {
Log.i(TAG, "Menu Item selected " + item);
if (item == mItemFace50)
minFaceSize = 0.5f;
mView.setMinFaceSize(0.5f);
else if (item == mItemFace40)
minFaceSize = 0.4f;
mView.setMinFaceSize(0.4f);
else if (item == mItemFace30)
minFaceSize = 0.3f;
mView.setMinFaceSize(0.3f);
else if (item == mItemFace20)
minFaceSize = 0.2f;
mView.setMinFaceSize(0.2f);
else if (item == mItemType)
{
mDetectorType = (mDetectorType + 1) % mDetectorName.length;
item.setTitle(mDetectorName[mDetectorType]);
mView.setDtetectorType(mDetectorType);
}
return true;
}
}
......@@ -23,10 +23,47 @@ import android.view.SurfaceHolder;
class FdView extends SampleCvViewBase {
private static final String TAG = "Sample::FdView";
private Mat mRgba;
private Mat mGray;
private CascadeClassifier mCascade;
private Mat mRgba;
private Mat mGray;
private File mCascadeFile;
private CascadeClassifier mCascade;
private DetectionBaseTracker mTracker;
public final int CASCADE_DETECTOR = 0;
public final int DBT_DETECTOR = 1;
private int mDetectorType = CASCADE_DETECTOR;
public static int mFaceSize = 200;
public void setMinFaceSize(float faceSize)
{
int height = mGray.rows();
if (Math.round(height * faceSize) > 0);
{
mFaceSize = Math.round(height * faceSize);
}
mTracker.setMinFaceSize(mFaceSize);
}
public void setDtetectorType(int type)
{
if (mDetectorType != type)
{
mDetectorType = type;
if (type == DBT_DETECTOR)
{
Log.i(TAG, "Detection Base Tracker enabled");
mTracker.start();
}
else
{
Log.i(TAG, "Cascade detectior enabled");
mTracker.stop();
}
}
}
public FdView(Context context) {
super(context);
......@@ -34,8 +71,8 @@ class FdView extends SampleCvViewBase {
try {
InputStream is = context.getResources().openRawResource(R.raw.lbpcascade_frontalface);
File cascadeDir = context.getDir("cascade", Context.MODE_PRIVATE);
File cascadeFile = new File(cascadeDir, "lbpcascade_frontalface.xml");
FileOutputStream os = new FileOutputStream(cascadeFile);
mCascadeFile = new File(cascadeDir, "lbpcascade_frontalface.xml");
FileOutputStream os = new FileOutputStream(mCascadeFile);
byte[] buffer = new byte[4096];
int bytesRead;
......@@ -45,14 +82,15 @@ class FdView extends SampleCvViewBase {
is.close();
os.close();
mCascade = new CascadeClassifier(cascadeFile.getAbsolutePath());
mCascade = new CascadeClassifier(mCascadeFile.getAbsolutePath());
if (mCascade.empty()) {
Log.e(TAG, "Failed to load cascade classifier");
mCascade = null;
} else
Log.i(TAG, "Loaded cascade classifier from " + cascadeFile.getAbsolutePath());
Log.i(TAG, "Loaded cascade classifier from " + mCascadeFile.getAbsolutePath());
cascadeFile.delete();
mTracker = new DetectionBaseTracker(mCascadeFile.getAbsolutePath(), 0);
cascadeDir.delete();
} catch (IOException e) {
......@@ -77,16 +115,26 @@ class FdView extends SampleCvViewBase {
capture.retrieve(mRgba, Highgui.CV_CAP_ANDROID_COLOR_FRAME_RGBA);
capture.retrieve(mGray, Highgui.CV_CAP_ANDROID_GREY_FRAME);
if (mCascade != null) {
int height = mGray.rows();
int faceSize = Math.round(height * FdActivity.minFaceSize);
MatOfRect faces = new MatOfRect();
mCascade.detectMultiScale(mGray, faces, 1.1, 2, 2 // TODO: objdetect.CV_HAAR_SCALE_IMAGE
, new Size(faceSize, faceSize), new Size());
for (Rect r : faces.toArray())
Core.rectangle(mRgba, r.tl(), r.br(), new Scalar(0, 255, 0, 255), 3);
MatOfRect faces = new MatOfRect();
if (mDetectorType == CASCADE_DETECTOR)
{
if (mCascade != null)
mCascade.detectMultiScale(mGray, faces, 1.1, 2, 2 // TODO: objdetect.CV_HAAR_SCALE_IMAGE
, new Size(mFaceSize, mFaceSize), new Size());
}
else if (mDetectorType == DBT_DETECTOR)
{
if (mTracker != null)
mTracker.detect(mGray, faces);
}
else
{
Log.e(TAG, "Detection method is not selected!");
}
for (Rect r : faces.toArray())
Core.rectangle(mRgba, r.tl(), r.br(), new Scalar(0, 255, 0, 255), 3);
Bitmap bmp = Bitmap.createBitmap(mRgba.cols(), mRgba.rows(), Bitmap.Config.RGB_565/*.ARGB_8888*/);
......@@ -110,9 +158,14 @@ class FdView extends SampleCvViewBase {
mRgba.release();
if (mGray != null)
mGray.release();
if (mCascadeFile != null)
mCascadeFile.delete();
if (mTracker != null)
mTracker.release();
mRgba = null;
mGray = null;
mCascadeFile = null;
}
}
}
......@@ -56,8 +56,8 @@ def mtx2rvec(R):
return axis * np.arctan2(s, c)
def draw_str(dst, (x, y), s):
cv2.putText(dst, s, (x+1, y+1), cv2.FONT_HERSHEY_PLAIN, 1.0, (0, 0, 0), thickness = 2, linetype=cv2.CV_AA)
cv2.putText(dst, s, (x, y), cv2.FONT_HERSHEY_PLAIN, 1.0, (255, 255, 255), linetype=cv2.CV_AA)
cv2.putText(dst, s, (x+1, y+1), cv2.FONT_HERSHEY_PLAIN, 1.0, (0, 0, 0), thickness = 2, lineType=cv2.CV_AA)
cv2.putText(dst, s, (x, y), cv2.FONT_HERSHEY_PLAIN, 1.0, (255, 255, 255), lineType=cv2.CV_AA)
class Sketcher:
def __init__(self, windowname, dests, colors_func):
......
......@@ -35,9 +35,10 @@ if __name__ == '__main__':
points, ref_distrs = make_gaussians(cluster_n, img_size)
print 'EM (opencv) ...'
em = cv2.EM(points, params = dict( nclusters = cluster_n, cov_mat_type = cv2.EM_COV_MAT_GENERIC) )
means = em.getMeans()
covs = em.getCovs()
em = cv2.EM(cluster_n, cv2.EM_COV_MAT_GENERIC)
em.train(points)
means = em.getMat('means')
covs = em.getMatVector('covs')
found_distrs = zip(means, covs)
print 'ready!\n'
......
......@@ -25,8 +25,7 @@ from common import draw_str
lk_params = dict( winSize = (19, 19),
maxLevel = 2,
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03),
derivLambda = 0.0 )
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
feature_params = dict( maxCorners = 1000,
qualityLevel = 0.01,
......
......@@ -24,8 +24,7 @@ from time import clock
lk_params = dict( winSize = (15, 15),
maxLevel = 2,
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03),
derivLambda = 0.0 )
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
feature_params = dict( maxCorners = 500,
qualityLevel = 0.3,
......
help='''
'''
Data matrix detector sample.
Usage:
video_dmtx {<video device number>|<video file name>}
......@@ -52,7 +52,7 @@ def data_matrix_demo(cap):
if __name__ == '__main__':
print help
print __doc__
if len(sys.argv) == 1:
cap = cv2.VideoCapture(0)
......
import numpy as np
import cv2
from Queue import Queue
from threading import Thread
from collections import deque
class Worker(Thread):
def __init__(self, tasks):
Thread.__init__(self)
self.tasks = tasks
self.daemon = True
self.start()
def run(self):
while True:
func, args, kargs = self.tasks.get()
try: func(*args, **kargs)
except Exception, e: print e
self.tasks.task_done()
class ThreadPool:
def __init__(self, num_threads):
self.tasks = Queue(num_threads)
for _ in range(num_threads): Worker(self.tasks)
def add_task(self, func, *args, **kargs):
self.tasks.put((func, args, kargs))
def wait_completion(self):
self.tasks.join()
if __name__ == '__main__':
results = deque()
def process_frame(i, frame):
global results
res = cv2.medianBlur(frame, 15)
results.append((i, res))
pool = ThreadPool(4)
cap = cv2.VideoCapture(0)
frame_count = 0
last_frame = None
last_count = -1
while True:
ret, frame = cap.read()
pool.add_task(process_frame, frame_count, frame.copy())
frame_count += 1
while len(results) > 0:
i, frame = results.popleft()
if i > last_count:
last_count, last_frame = i, frame
if last_frame is not None:
cv2.imshow('res', last_frame)
if cv2.waitKey(1) == 27:
break
pool.wait_completion()
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment