Commit d6534912 authored by Olexa Bilaniuk's avatar Olexa Bilaniuk

Merge remote-tracking branch 'refs/remotes/upstream/master'

parents 2113636d 361eb633
SET(deps opencv_core opencv_highgui opencv_imgproc opencv_imgcodecs opencv_videoio)
ocv_check_dependencies(${deps})
SET(OPENCV_ANNOTATION_DEPS opencv_core opencv_highgui opencv_imgproc opencv_imgcodecs opencv_videoio)
ocv_check_dependencies(${OPENCV_ANNOTATION_DEPS})
if(NOT OCV_DEPENDENCIES_FOUND)
return()
endif()
project(annotation)
set(the_target opencv_annotation)
ocv_include_directories("${CMAKE_CURRENT_SOURCE_DIR}" "${OpenCV_SOURCE_DIR}/include/opencv")
ocv_include_modules(${deps})
ocv_target_include_directories(${the_target} PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}" "${OpenCV_SOURCE_DIR}/include/opencv")
ocv_target_include_modules(${the_target} ${OPENCV_ANNOTATION_DEPS})
set(the_target opencv_annotation)
file(GLOB SRCS *.cpp)
add_executable(${the_target} opencv_annotation.cpp)
target_link_libraries(${the_target} ${deps})
set(annotation_files ${SRCS})
ocv_add_executable(${the_target} ${annotation_files})
ocv_target_link_libraries(${the_target} ${OPENCV_ANNOTATION_DEPS})
set_target_properties(${the_target} PROPERTIES
DEBUG_POSTFIX "${OPENCV_DEBUG_POSTFIX}"
......
......@@ -49,7 +49,7 @@ pyopencv_generated_\*.h files). But there may be some basic OpenCV datatypes lik
Size. They need to be extended manually. For example, a Mat type should be extended to Numpy array,
Size should be extended to a tuple of two integers etc. Similarly, there may be some complex
structs/classes/functions etc. which need to be extended manually. All such manual wrapper functions
are placed in modules/python/src2/pycv2.hpp.
are placed in modules/python/src2/cv2.cpp.
So now only thing left is the compilation of these wrapper files which gives us **cv2** module. So
when you call a function, say res = equalizeHist(img1,img2) in Python, you pass two numpy arrays and
......
......@@ -54,25 +54,19 @@ int main( int argc, char** argv )
if( !img_1.data || !img_2.data )
{ std::cout<< " --(!) Error reading images " << std::endl; return -1; }
//-- Step 1: Detect the keypoints using SURF Detector
//-- Step 1: Detect the keypoints using SURF Detector, compute the descriptors
int minHessian = 400;
SurfFeatureDetector detector( minHessian );
Ptr<SURF> detector = SURF::create();
detector->setMinHessian(minHessian);
std::vector<KeyPoint> keypoints_1, keypoints_2;
detector.detect( img_1, keypoints_1 );
detector.detect( img_2, keypoints_2 );
//-- Step 2: Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat descriptors_1, descriptors_2;
extractor.compute( img_1, keypoints_1, descriptors_1 );
extractor.compute( img_2, keypoints_2, descriptors_2 );
detector->detectAndCompute( img_1, keypoints_1, descriptors_1 );
detector->detectAndCompute( img_2, keypoints_2, descriptors_2 );
//-- Step 3: Matching descriptor vectors using FLANN matcher
//-- Step 2: Matching descriptor vectors using FLANN matcher
FlannBasedMatcher matcher;
std::vector< DMatch > matches;
matcher.match( descriptors_1, descriptors_2, matches );
......
......@@ -42,25 +42,18 @@ int main( int argc, char** argv )
if( !img_object.data || !img_scene.data )
{ std::cout<< " --(!) Error reading images " << std::endl; return -1; }
//-- Step 1: Detect the keypoints using SURF Detector
//-- Step 1: Detect the keypoints and extract descriptors using SURF
int minHessian = 400;
SurfFeatureDetector detector( minHessian );
Ptr<SURF> detector = SURF::create( minHessian );
std::vector<KeyPoint> keypoints_object, keypoints_scene;
detector.detect( img_object, keypoints_object );
detector.detect( img_scene, keypoints_scene );
//-- Step 2: Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat descriptors_object, descriptors_scene;
extractor.compute( img_object, keypoints_object, descriptors_object );
extractor.compute( img_scene, keypoints_scene, descriptors_scene );
detector->detectAndCompute( img_object, keypoints_object, descriptors_object );
detector->detectAndCompute( img_scene, keypoints_scene, descriptors_scene );
//-- Step 3: Matching descriptor vectors using FLANN matcher
//-- Step 2: Matching descriptor vectors using FLANN matcher
FlannBasedMatcher matcher;
std::vector< DMatch > matches;
matcher.match( descriptors_object, descriptors_scene, matches );
......
......@@ -184,6 +184,7 @@ public:
// After fix restore code in arithm.cpp: ocl_compare()
inline bool isAMD() const { return vendorID() == VENDOR_AMD; }
inline bool isIntel() const { return vendorID() == VENDOR_INTEL; }
inline bool isNVidia() const { return vendorID() == VENDOR_NVIDIA; }
int maxClockFrequency() const;
int maxComputeUnits() const;
......
......@@ -180,10 +180,9 @@ public:
const int K = centers.rows;
const int dims = centers.cols;
const float *sample;
for( int i = begin; i<end; ++i)
{
sample = data.ptr<float>(i);
const float *sample = data.ptr<float>(i);
int k_best = 0;
double min_dist = DBL_MAX;
......
......@@ -205,9 +205,12 @@ public:
void deallocate(UMatData* u) const
{
if(!u)
return;
CV_Assert(u->urefcount >= 0);
CV_Assert(u->refcount >= 0);
if(u && u->refcount == 0)
if(u->refcount == 0)
{
if( !(u->flags & UMatData::USER_ALLOCATED) )
{
......
......@@ -2114,6 +2114,12 @@ static bool ocl_minMaxIdx( InputArray _src, double* minVal, double* maxVal, int*
int ddepth = -1, bool absValues = false, InputArray _src2 = noArray(), double * maxVal2 = NULL)
{
const ocl::Device & dev = ocl::Device::getDefault();
#ifdef ANDROID
if (dev.isNVidia())
return false;
#endif
bool doubleSupport = dev.doubleFPConfig() > 0, haveMask = !_mask.empty(),
haveSrc2 = _src2.kind() != _InputArray::NONE;
int type = _src.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type),
......@@ -2885,6 +2891,12 @@ static NormDiffFunc getNormDiffFunc(int normType, int depth)
static bool ocl_norm( InputArray _src, int normType, InputArray _mask, double & result )
{
const ocl::Device & d = ocl::Device::getDefault();
#ifdef ANDROID
if (d.isNVidia())
return false;
#endif
int type = _src.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type);
bool doubleSupport = d.doubleFPConfig() > 0,
haveMask = _mask.kind() != _InputArray::NONE;
......@@ -3250,6 +3262,11 @@ namespace cv {
static bool ocl_norm( InputArray _src1, InputArray _src2, int normType, InputArray _mask, double & result )
{
#ifdef ANDROID
if (ocl::Device::getDefault().isNVidia())
return false;
#endif
Scalar sc1, sc2;
int type = _src1.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type);
bool relative = (normType & NORM_RELATIVE) != 0;
......
......@@ -549,13 +549,9 @@ String tempfile( const char* suffix )
#if defined WIN32 || defined _WIN32
#ifdef WINRT
RoInitialize(RO_INIT_MULTITHREADED);
std::wstring temp_dir = L"";
const wchar_t* opencv_temp_dir = GetTempPathWinRT().c_str();
if (opencv_temp_dir)
temp_dir = std::wstring(opencv_temp_dir);
std::wstring temp_dir = GetTempPathWinRT();
std::wstring temp_file;
temp_file = GetTempFileNameWinRT(L"ocv");
std::wstring temp_file = GetTempFileNameWinRT(L"ocv");
if (temp_file.empty())
return String();
......
......@@ -331,7 +331,11 @@ OCL_TEST_P(Mul, Mat_Scale)
OCL_OFF(cv::multiply(src1_roi, src2_roi, dst1_roi, val[0]));
OCL_ON(cv::multiply(usrc1_roi, usrc2_roi, udst1_roi, val[0]));
#ifdef ANDROID
Near(udst1_roi.depth() >= CV_32F ? 2e-1 : 1);
#else
Near(udst1_roi.depth() >= CV_32F ? 1e-3 : 1);
#endif
}
}
......
......@@ -61,6 +61,12 @@ OCL_PERF_TEST_P(ORBFixture, ORB_Full, ORB_IMAGES)
string filename = getDataPath(GetParam());
Mat mframe = imread(filename, IMREAD_GRAYSCALE);
double desc_eps = 1e-6;
#ifdef ANDROID
if (cv::ocl::Device::getDefault().isNVidia())
desc_eps = 2;
#endif
if (mframe.empty())
FAIL() << "Unable to load source image " << filename;
......@@ -77,7 +83,7 @@ OCL_PERF_TEST_P(ORBFixture, ORB_Full, ORB_IMAGES)
::perf::sort(points, descriptors);
SANITY_CHECK_KEYPOINTS(points, 1e-5);
SANITY_CHECK(descriptors);
SANITY_CHECK(descriptors, desc_eps);
}
} // ocl
......
......@@ -172,7 +172,14 @@ namespace cv
cvtColor(image, img, COLOR_BGR2GRAY);
Mat img1_32;
img.convertTo(img1_32, CV_32F, 1.0 / 255.0, 0);
if ( img.depth() == CV_32F )
img1_32 = img;
else if ( img.depth() == CV_8U )
img.convertTo(img1_32, CV_32F, 1.0 / 255.0, 0);
else if ( img.depth() == CV_16U )
img.convertTo(img1_32, CV_32F, 1.0 / 65535.0, 0);
CV_Assert( ! img1_32.empty() );
AKAZEOptions options;
options.descriptor = descriptor;
......
......@@ -1221,7 +1221,22 @@ CV_EXPORTS_W void boxFilter( InputArray src, OutputArray dst, int ddepth,
bool normalize = true,
int borderType = BORDER_DEFAULT );
/** @todo document
/** @brief Calculates the normalized sum of squares of the pixel values overlapping the filter.
For every pixel \f$ (x, y) \f$ in the source image, the function calculates the sum of squares of those neighboring
pixel values which overlap the filter placed over the pixel \f$ (x, y) \f$.
The unnormalized square box filter can be useful in computing local image statistics such as the the local
variance and standard deviation around the neighborhood of a pixel.
@param _src input image
@param _dst output image of the same size and type as _src
@param ddepth the output image depth (-1 to use src.depth())
@param ksize kernel size
@param anchor kernel anchor point. The default value of Point(-1, -1) denotes that the anchor is at the kernel
center.
@param normalize flag, specifying whether the kernel is to be normalized by it's area or not.
@param borderType border mode used to extrapolate pixels outside of the image, see cv::BorderTypes
@sa boxFilter
*/
CV_EXPORTS_W void sqrBoxFilter( InputArray _src, OutputArray _dst, int ddepth,
......
This diff is collapsed.
......@@ -1548,7 +1548,7 @@ static bool ocl_morphOp(InputArray _src, OutputArray _dst, InputArray _kernel,
return true;
}
#if defined ANDROID
#ifdef ANDROID
size_t localThreads[2] = { 16, 8 };
#else
size_t localThreads[2] = { 16, 16 };
......@@ -1563,6 +1563,11 @@ static bool ocl_morphOp(InputArray _src, OutputArray _dst, InputArray _kernel,
if (localThreads[0]*localThreads[1] * 2 < (localThreads[0] + ksize.width - 1) * (localThreads[1] + ksize.height - 1))
return false;
#ifdef ANDROID
if (dev.isNVidia())
return false;
#endif
// build processing
String processing;
Mat kernel8u;
......
......@@ -2966,6 +2966,11 @@ static bool ocl_bilateralFilter_8u(InputArray _src, OutputArray _dst, int d,
double sigma_color, double sigma_space,
int borderType)
{
#ifdef ANDROID
if (ocl::Device::getDefault().isNVidia())
return false;
#endif
int type = _src.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type);
int i, j, maxk, radius;
......
......@@ -99,12 +99,17 @@ OCL_TEST_P(Canny, Accuracy)
generateTestData();
const double low_thresh = 50.0, high_thresh = 100.0;
double eps = 1e-2;
#ifdef ANDROID
if (cv::ocl::Device::getDefault().isNVidia())
eps = 12e-3;
#endif
OCL_OFF(cv::Canny(src_roi, dst_roi, low_thresh, high_thresh, apperture_size, useL2gradient));
OCL_ON(cv::Canny(usrc_roi, udst_roi, low_thresh, high_thresh, apperture_size, useL2gradient));
EXPECT_MAT_SIMILAR(dst_roi, udst_roi, 1e-2);
EXPECT_MAT_SIMILAR(dst, udst, 1e-2);
EXPECT_MAT_SIMILAR(dst_roi, udst_roi, eps);
EXPECT_MAT_SIMILAR(dst, udst, eps);
}
OCL_INSTANTIATE_TEST_CASE_P(ImgProc, Canny, testing::Combine(
......
......@@ -128,7 +128,7 @@ OCL_TEST_P(CvtColor, BGR2GRAY) { performTest(3, 1, CVTCODE(BGR2GRAY)); }
OCL_TEST_P(CvtColor, GRAY2BGR) { performTest(1, 3, CVTCODE(GRAY2BGR)); }
OCL_TEST_P(CvtColor, RGBA2GRAY) { performTest(4, 1, CVTCODE(RGBA2GRAY)); }
OCL_TEST_P(CvtColor, GRAY2RGBA) { performTest(1, 4, CVTCODE(GRAY2RGBA)); }
OCL_TEST_P(CvtColor, BGRA2GRAY) { performTest(4, 1, CVTCODE(BGRA2GRAY)); }
OCL_TEST_P(CvtColor, BGRA2GRAY) { performTest(4, 1, CVTCODE(BGRA2GRAY), cv::ocl::Device::getDefault().isNVidia() ? 1 : 1e-3); }
OCL_TEST_P(CvtColor, GRAY2BGRA) { performTest(1, 4, CVTCODE(GRAY2BGRA)); }
// RGB <-> YUV
......
......@@ -319,10 +319,17 @@ OCL_TEST_P(Remap_INTER_LINEAR, Mat)
{
random_roi();
double eps = 2.0;
#ifdef ANDROID
// TODO investigate accuracy
if (cv::ocl::Device::getDefault().isNVidia())
eps = 8.0;
#endif
OCL_OFF(cv::remap(src_roi, dst_roi, map1_roi, map2_roi, INTER_LINEAR, borderType, val));
OCL_ON(cv::remap(usrc_roi, udst_roi, umap1_roi, umap2_roi, INTER_LINEAR, borderType, val));
Near(2.0);
Near(eps);
}
}
......
......@@ -458,10 +458,8 @@ CV_INLINE void
uchar nShadowDetection
)
{
int size=_src.rows*_src.cols;
int nchannels = CV_MAT_CN(_src.type());
const uchar* pDataCurrent=_src.ptr(0);
uchar* pDataOutput=_dst.ptr(0);
//model
uchar* m_aModel=_bgmodel.ptr(0);
uchar* m_nNextLongUpdate=_nNextLongUpdate.ptr(0);
......@@ -509,48 +507,51 @@ CV_INLINE void
if (_nLongCounter >= m_nLongUpdate) _nLongCounter = 0;
//go through the image
for (long i=0;i<size;i++)
long i = 0;
for (long y = 0; y < _src.rows; y++)
{
const uchar* data=pDataCurrent;
pDataCurrent=pDataCurrent+nchannels;
//update model+ background subtract
uchar include=0;
int result= _cvCheckPixelBackgroundNP(i, data, nchannels,
m_nN, m_aModel, m_fTb,m_nkNN, m_fTau,m_bShadowDetection,include);
_cvUpdatePixelBackgroundNP(i,data,nchannels,
m_nN, m_aModel,
m_nNextLongUpdate,
m_nNextMidUpdate,
m_nNextShortUpdate,
m_aModelIndexLong,
m_aModelIndexMid,
m_aModelIndexShort,
m_nLongCounter,
m_nMidCounter,
m_nShortCounter,
m_nLongUpdate,
m_nMidUpdate,
m_nShortUpdate,
include
);
switch (result)
for (long x = 0; x < _src.cols; x++)
{
case 0:
//foreground
(* pDataOutput)=255;
break;
case 1:
//background
(* pDataOutput)=0;
break;
case 2:
//shadow
(* pDataOutput)=nShadowDetection;
break;
const uchar* data = _src.ptr(y, x);
//update model+ background subtract
uchar include=0;
int result= _cvCheckPixelBackgroundNP(i, data, nchannels,
m_nN, m_aModel, m_fTb,m_nkNN, m_fTau,m_bShadowDetection,include);
_cvUpdatePixelBackgroundNP(i,data,nchannels,
m_nN, m_aModel,
m_nNextLongUpdate,
m_nNextMidUpdate,
m_nNextShortUpdate,
m_aModelIndexLong,
m_aModelIndexMid,
m_aModelIndexShort,
m_nLongCounter,
m_nMidCounter,
m_nShortCounter,
m_nLongUpdate,
m_nMidUpdate,
m_nShortUpdate,
include
);
switch (result)
{
case 0:
//foreground
*_dst.ptr(y, x) = 255;
break;
case 1:
//background
*_dst.ptr(y, x) = 0;
break;
case 2:
//shadow
*_dst.ptr(y, x) = nShadowDetection;
break;
}
i++;
}
pDataOutput++;
}
};
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment