Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in / Register
Toggle navigation
O
opencv
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
submodule
opencv
Commits
ca23c0e6
Commit
ca23c0e6
authored
5 years ago
by
Alexander Alekhin
Browse files
Options
Browse Files
Download
Plain Diff
Merge remote-tracking branch 'upstream/3.4' into merge-3.4
parents
c48473df
1d7bfcc9
Hide whitespace changes
Inline
Side-by-side
Showing
24 changed files
with
565 additions
and
145 deletions
+565
-145
opencv_version.cpp
apps/version/opencv_version.cpp
+23
-0
OpenCVDetectCXXCompiler.cmake
cmake/OpenCVDetectCXXCompiler.cmake
+13
-2
OpenCVModule.cmake
cmake/OpenCVModule.cmake
+6
-0
calib3d.hpp
modules/calib3d/include/opencv2/calib3d.hpp
+6
-0
fundam.cpp
modules/calib3d/src/fundam.cpp
+12
-6
CMakeLists.txt
modules/core/CMakeLists.txt
+17
-0
intrin_vsx.hpp
modules/core/include/opencv2/core/hal/intrin_vsx.hpp
+3
-74
allocator_stats.impl.hpp
.../core/include/opencv2/core/utils/allocator_stats.impl.hpp
+54
-21
buffer_area.private.hpp
...s/core/include/opencv2/core/utils/buffer_area.private.hpp
+6
-0
buffer_area.cpp
modules/core/src/buffer_area.cpp
+26
-10
parallel.cpp
modules/core/src/parallel.cpp
+2
-2
pyopencv_dnn.hpp
modules/dnn/misc/python/pyopencv_dnn.hpp
+2
-1
dnn.cpp
modules/dnn/src/dnn.cpp
+3
-3
ie_ngraph.cpp
modules/dnn/src/ie_ngraph.cpp
+1
-1
onnx_importer.cpp
modules/dnn/src/onnx/onnx_importer.cpp
+53
-2
op_inf_engine.cpp
modules/dnn/src/op_inf_engine.cpp
+30
-16
op_inf_engine.hpp
modules/dnn/src/op_inf_engine.hpp
+1
-1
tf_importer.cpp
modules/dnn/src/tensorflow/tf_importer.cpp
+57
-2
test_onnx_importer.cpp
modules/dnn/test/test_onnx_importer.cpp
+6
-0
test_tf_importer.cpp
modules/dnn/test/test_tf_importer.cpp
+7
-0
test_qrcode.cpp
modules/objdetect/test/test_qrcode.cpp
+2
-2
CMakeLists.txt
modules/python/CMakeLists.txt
+1
-0
intelligent_scissors.cpp
samples/cpp/intelligent_scissors.cpp
+232
-0
tf_text_graph_ssd.py
samples/dnn/tf_text_graph_ssd.py
+2
-2
No files found.
apps/version/opencv_version.cpp
View file @
ca23c0e6
...
...
@@ -14,6 +14,11 @@
#include <windows.h>
#endif
// defined in core/private.hpp
namespace
cv
{
CV_EXPORTS
const
char
*
currentParallelFramework
();
}
static
void
dumpHWFeatures
(
bool
showAll
=
false
)
{
std
::
cout
<<
"OpenCV's HW features list:"
<<
std
::
endl
;
...
...
@@ -34,6 +39,16 @@ static void dumpHWFeatures(bool showAll = false)
std
::
cout
<<
"Total available: "
<<
count
<<
std
::
endl
;
}
static
void
dumpParallelFramework
()
{
const
char
*
parallelFramework
=
cv
::
currentParallelFramework
();
if
(
parallelFramework
)
{
int
threads
=
cv
::
getNumThreads
();
std
::
cout
<<
"Parallel framework: "
<<
parallelFramework
<<
" (nthreads="
<<
threads
<<
")"
<<
std
::
endl
;
}
}
int
main
(
int
argc
,
const
char
**
argv
)
{
CV_TRACE_FUNCTION
();
...
...
@@ -47,6 +62,7 @@ int main(int argc, const char** argv)
"{ verbose v | | show build configuration log }"
"{ opencl | | show information about OpenCL (available platforms/devices, default selected device) }"
"{ hw | | show detected HW features (see cv::checkHardwareSupport() function). Use --hw=0 to show available features only }"
"{ threads | | show configured parallel framework and number of active threads }"
);
if
(
parser
.
has
(
"help"
))
...
...
@@ -73,10 +89,17 @@ int main(int argc, const char** argv)
{
dumpHWFeatures
(
parser
.
get
<
bool
>
(
"hw"
));
}
if
(
parser
.
has
(
"threads"
))
{
dumpParallelFramework
();
}
#else
std
::
cout
<<
cv
::
getBuildInformation
().
c_str
()
<<
std
::
endl
;
cv
::
dumpOpenCLInformation
();
dumpHWFeatures
();
dumpParallelFramework
();
MessageBoxA
(
NULL
,
"Check console window output"
,
"OpenCV("
CV_VERSION
")"
,
MB_ICONINFORMATION
|
MB_OK
);
#endif
...
...
This diff is collapsed.
Click to expand it.
cmake/OpenCVDetectCXXCompiler.cmake
View file @
ca23c0e6
...
...
@@ -215,7 +215,13 @@ if(NOT HAVE_CXX11)
message
(
FATAL_ERROR
"OpenCV 4.x requires C++11"
)
endif
()
if
((
HAVE_CXX11
set
(
__OPENCV_ENABLE_ATOMIC_LONG_LONG OFF
)
if
(
HAVE_CXX11
AND
(
X86 OR X86_64
))
set
(
__OPENCV_ENABLE_ATOMIC_LONG_LONG ON
)
endif
()
option
(
OPENCV_ENABLE_ATOMIC_LONG_LONG
"Enable C++ compiler support for atomic<long long>"
${
__OPENCV_ENABLE_ATOMIC_LONG_LONG
}
)
if
((
HAVE_CXX11 AND OPENCV_ENABLE_ATOMIC_LONG_LONG
AND NOT MSVC
AND
NOT
(
X86 OR X86_64
)
AND NOT OPENCV_SKIP_LIBATOMIC_COMPILER_CHECK
)
...
...
@@ -226,9 +232,14 @@ if((HAVE_CXX11
list
(
APPEND CMAKE_REQUIRED_LIBRARIES atomic
)
ocv_check_compiler_flag
(
CXX
""
HAVE_CXX_ATOMICS_WITH_LIB
"
${
OpenCV_SOURCE_DIR
}
/cmake/checks/atomic_check.cpp"
)
if
(
HAVE_CXX_ATOMICS_WITH_LIB
)
set
(
HAVE_ATOMIC_LONG_LONG ON
)
list
(
APPEND OPENCV_LINKER_LIBS atomic
)
else
()
message
(
FATAL_ERROR
"C++11 compiler must support std::atomic
"
)
message
(
STATUS
"Compiler doesn't support std::atomic<long long>
"
)
endif
()
else
()
set
(
HAVE_ATOMIC_LONG_LONG ON
)
endif
()
else
(
HAVE_CXX11 AND OPENCV_ENABLE_ATOMIC_LONG_LONG
)
set
(
HAVE_ATOMIC_LONG_LONG
${
OPENCV_ENABLE_ATOMIC_LONG_LONG
}
)
endif
()
This diff is collapsed.
Click to expand it.
cmake/OpenCVModule.cmake
View file @
ca23c0e6
...
...
@@ -1115,6 +1115,8 @@ macro(__ocv_parse_test_sources tests_type)
unset
(
__currentvar
)
endmacro
()
ocv_check_environment_variables
(
OPENCV_TEST_EXTRA_CXX_FLAGS_Release
)
# this is a command for adding OpenCV performance tests to the module
# ocv_add_perf_tests(<extra_dependencies>)
function
(
ocv_add_perf_tests
)
...
...
@@ -1279,6 +1281,10 @@ function(ocv_add_accuracy_tests)
_ocv_add_precompiled_headers
(
${
the_target
}
)
endif
()
if
(
OPENCV_TEST_EXTRA_CXX_FLAGS_Release
)
target_compile_options
(
${
the_target
}
PRIVATE
"$<$<CONFIG:Release>:
${
OPENCV_TEST_EXTRA_CXX_FLAGS_Release
}
>"
)
endif
()
ocv_add_test_from_target
(
"
${
the_target
}
"
"Accuracy"
"
${
the_target
}
"
)
else
(
OCV_DEPENDENCIES_FOUND
)
# TODO: warn about unsatisfied dependencies
...
...
This diff is collapsed.
Click to expand it.
modules/calib3d/include/opencv2/calib3d.hpp
View file @
ca23c0e6
...
...
@@ -2138,6 +2138,7 @@ point localization, image resolution, and the image noise.
@param confidence Parameter used for the RANSAC and LMedS methods only. It specifies a desirable level
of confidence (probability) that the estimated matrix is correct.
@param mask
@param maxIters The maximum number of robust method iterations.
The epipolar geometry is described by the following equation:
...
...
@@ -2171,6 +2172,11 @@ stereoRectifyUncalibrated to compute the rectification transformation. :
findFundamentalMat(points1, points2, FM_RANSAC, 3, 0.99);
@endcode
*/
CV_EXPORTS_W
Mat
findFundamentalMat
(
InputArray
points1
,
InputArray
points2
,
int
method
,
double
ransacReprojThreshold
,
double
confidence
,
int
maxIters
,
OutputArray
mask
=
noArray
()
);
/** @overload */
CV_EXPORTS_W
Mat
findFundamentalMat
(
InputArray
points1
,
InputArray
points2
,
int
method
=
FM_RANSAC
,
double
ransacReprojThreshold
=
3.
,
double
confidence
=
0.99
,
...
...
This diff is collapsed.
Click to expand it.
modules/calib3d/src/fundam.cpp
View file @
ca23c0e6
...
...
@@ -809,7 +809,7 @@ public:
cv
::
Mat
cv
::
findFundamentalMat
(
InputArray
_points1
,
InputArray
_points2
,
int
method
,
double
ransacReprojThreshold
,
double
confidence
,
OutputArray
_mask
)
int
maxIters
,
OutputArray
_mask
)
{
CV_INSTRUMENT_REGION
();
...
...
@@ -861,7 +861,7 @@ cv::Mat cv::findFundamentalMat( InputArray _points1, InputArray _points2,
confidence
=
0.99
;
if
(
(
method
&
~
3
)
==
FM_RANSAC
&&
npoints
>=
15
)
result
=
createRANSACPointSetRegistrator
(
cb
,
7
,
ransacReprojThreshold
,
confidence
)
->
run
(
m1
,
m2
,
F
,
_mask
);
result
=
createRANSACPointSetRegistrator
(
cb
,
7
,
ransacReprojThreshold
,
confidence
,
maxIters
)
->
run
(
m1
,
m2
,
F
,
_mask
);
else
result
=
createLMeDSPointSetRegistrator
(
cb
,
7
,
confidence
)
->
run
(
m1
,
m2
,
F
,
_mask
);
}
...
...
@@ -872,11 +872,17 @@ cv::Mat cv::findFundamentalMat( InputArray _points1, InputArray _points2,
return
F
;
}
cv
::
Mat
cv
::
findFundamentalMat
(
InputArray
_points1
,
InputArray
_points2
,
OutputArray
_mask
,
int
method
,
double
ransacReprojThreshold
,
double
confidence
)
cv
::
Mat
cv
::
findFundamentalMat
(
cv
::
InputArray
points1
,
cv
::
InputArray
points2
,
int
method
,
double
ransacReprojThreshold
,
double
confidence
,
cv
::
OutputArray
mask
)
{
return
cv
::
findFundamentalMat
(
points1
,
points2
,
method
,
ransacReprojThreshold
,
confidence
,
1000
,
mask
);
}
cv
::
Mat
cv
::
findFundamentalMat
(
cv
::
InputArray
points1
,
cv
::
InputArray
points2
,
cv
::
OutputArray
mask
,
int
method
,
double
ransacReprojThreshold
,
double
confidence
)
{
return
cv
::
findFundamentalMat
(
_points1
,
_points2
,
method
,
ransacReprojThreshold
,
confidence
,
_
mask
);
return
cv
::
findFundamentalMat
(
points1
,
points2
,
method
,
ransacReprojThreshold
,
confidence
,
1000
,
mask
);
}
...
...
This diff is collapsed.
Click to expand it.
modules/core/CMakeLists.txt
View file @
ca23c0e6
...
...
@@ -81,6 +81,23 @@ if(HAVE_MEMALIGN)
ocv_append_source_file_compile_definitions
(
${
CMAKE_CURRENT_SOURCE_DIR
}
/src/alloc.cpp
"HAVE_MEMALIGN=1"
)
endif
()
option
(
OPENCV_ENABLE_ALLOCATOR_STATS
"Enable Allocator metrics"
ON
)
if
(
NOT OPENCV_ENABLE_ALLOCATOR_STATS
)
add_definitions
(
-DOPENCV_DISABLE_ALLOCATOR_STATS=1
)
else
()
if
(
NOT DEFINED OPENCV_ALLOCATOR_STATS_COUNTER_TYPE
)
if
(
HAVE_ATOMIC_LONG_LONG AND OPENCV_ENABLE_ATOMIC_LONG_LONG
)
set
(
OPENCV_ALLOCATOR_STATS_COUNTER_TYPE
"long long"
)
else
()
set
(
OPENCV_ALLOCATOR_STATS_COUNTER_TYPE
"int"
)
endif
()
endif
()
message
(
STATUS
"Allocator metrics storage type: '
${
OPENCV_ALLOCATOR_STATS_COUNTER_TYPE
}
'"
)
add_definitions
(
"-DOPENCV_ALLOCATOR_STATS_COUNTER_TYPE=
${
OPENCV_ALLOCATOR_STATS_COUNTER_TYPE
}
"
)
endif
()
ocv_create_module
(
${
extra_libs
}
)
ocv_target_link_libraries
(
${
the_module
}
PRIVATE
...
...
This diff is collapsed.
Click to expand it.
modules/core/include/opencv2/core/hal/intrin_vsx.hpp
View file @
ca23c0e6
...
...
@@ -1564,81 +1564,10 @@ OPENCV_HAL_IMPL_VSX_TRANSPOSE4x4(v_uint32x4, vec_uint4)
OPENCV_HAL_IMPL_VSX_TRANSPOSE4x4
(
v_int32x4
,
vec_int4
)
OPENCV_HAL_IMPL_VSX_TRANSPOSE4x4
(
v_float32x4
,
vec_float4
)
template
<
int
i
>
inline
v_int8x16
v_broadcast_element
(
v_int8x16
v
)
{
return
v_int8x16
(
vec_perm
(
v
.
val
,
v
.
val
,
vec_splats
((
unsigned
char
)
i
)));
}
template
<
int
i
>
inline
v_uint8x16
v_broadcast_element
(
v_uint8x16
v
)
{
return
v_uint8x16
(
vec_perm
(
v
.
val
,
v
.
val
,
vec_splats
((
unsigned
char
)
i
)));
}
template
<
int
i
>
inline
v_int16x8
v_broadcast_element
(
v_int16x8
v
)
{
unsigned
char
t0
=
2
*
i
,
t1
=
2
*
i
+
1
;
vec_uchar16
p
=
{
t0
,
t1
,
t0
,
t1
,
t0
,
t1
,
t0
,
t1
,
t0
,
t1
,
t0
,
t1
,
t0
,
t1
,
t0
,
t1
};
return
v_int16x8
(
vec_perm
(
v
.
val
,
v
.
val
,
p
));
}
template
<
int
i
>
inline
v_uint16x8
v_broadcast_element
(
v_uint16x8
v
)
{
unsigned
char
t0
=
2
*
i
,
t1
=
2
*
i
+
1
;
vec_uchar16
p
=
{
t0
,
t1
,
t0
,
t1
,
t0
,
t1
,
t0
,
t1
,
t0
,
t1
,
t0
,
t1
,
t0
,
t1
,
t0
,
t1
};
return
v_uint16x8
(
vec_perm
(
v
.
val
,
v
.
val
,
p
));
}
template
<
int
i
>
inline
v_int32x4
v_broadcast_element
(
v_int32x4
v
)
{
unsigned
char
t0
=
4
*
i
,
t1
=
4
*
i
+
1
,
t2
=
4
*
i
+
2
,
t3
=
4
*
i
+
3
;
vec_uchar16
p
=
{
t0
,
t1
,
t2
,
t3
,
t0
,
t1
,
t2
,
t3
,
t0
,
t1
,
t2
,
t3
,
t0
,
t1
,
t2
,
t3
};
return
v_int32x4
(
vec_perm
(
v
.
val
,
v
.
val
,
p
));
}
template
<
int
i
,
typename
Tvec
>
inline
Tvec
v_broadcast_element
(
const
Tvec
&
v
)
{
return
Tvec
(
vec_splat
(
v
.
val
,
i
));
}
template
<
int
i
>
inline
v_uint32x4
v_broadcast_element
(
v_uint32x4
v
)
{
unsigned
char
t0
=
4
*
i
,
t1
=
4
*
i
+
1
,
t2
=
4
*
i
+
2
,
t3
=
4
*
i
+
3
;
vec_uchar16
p
=
{
t0
,
t1
,
t2
,
t3
,
t0
,
t1
,
t2
,
t3
,
t0
,
t1
,
t2
,
t3
,
t0
,
t1
,
t2
,
t3
};
return
v_uint32x4
(
vec_perm
(
v
.
val
,
v
.
val
,
p
));
}
template
<
int
i
>
inline
v_int64x2
v_broadcast_element
(
v_int64x2
v
)
{
unsigned
char
t0
=
8
*
i
,
t1
=
8
*
i
+
1
,
t2
=
8
*
i
+
2
,
t3
=
8
*
i
+
3
,
t4
=
8
*
i
+
4
,
t5
=
8
*
i
+
5
,
t6
=
8
*
i
+
6
,
t7
=
8
*
i
+
7
;
vec_uchar16
p
=
{
t0
,
t1
,
t2
,
t3
,
t4
,
t5
,
t6
,
t7
,
t0
,
t1
,
t2
,
t3
,
t4
,
t5
,
t6
,
t7
};
return
v_int64x2
(
vec_perm
(
v
.
val
,
v
.
val
,
p
));
}
template
<
int
i
>
inline
v_uint64x2
v_broadcast_element
(
v_uint64x2
v
)
{
unsigned
char
t0
=
8
*
i
,
t1
=
8
*
i
+
1
,
t2
=
8
*
i
+
2
,
t3
=
8
*
i
+
3
,
t4
=
8
*
i
+
4
,
t5
=
8
*
i
+
5
,
t6
=
8
*
i
+
6
,
t7
=
8
*
i
+
7
;
vec_uchar16
p
=
{
t0
,
t1
,
t2
,
t3
,
t4
,
t5
,
t6
,
t7
,
t0
,
t1
,
t2
,
t3
,
t4
,
t5
,
t6
,
t7
};
return
v_uint64x2
(
vec_perm
(
v
.
val
,
v
.
val
,
p
));
}
template
<
int
i
>
inline
v_float32x4
v_broadcast_element
(
v_float32x4
v
)
{
unsigned
char
t0
=
4
*
i
,
t1
=
4
*
i
+
1
,
t2
=
4
*
i
+
2
,
t3
=
4
*
i
+
3
;
vec_uchar16
p
=
{
t0
,
t1
,
t2
,
t3
,
t0
,
t1
,
t2
,
t3
,
t0
,
t1
,
t2
,
t3
,
t0
,
t1
,
t2
,
t3
};
return
v_float32x4
(
vec_perm
(
v
.
val
,
v
.
val
,
p
));
}
template
<
int
i
>
inline
v_float64x2
v_broadcast_element
(
v_float64x2
v
)
{
unsigned
char
t0
=
8
*
i
,
t1
=
8
*
i
+
1
,
t2
=
8
*
i
+
2
,
t3
=
8
*
i
+
3
,
t4
=
8
*
i
+
4
,
t5
=
8
*
i
+
5
,
t6
=
8
*
i
+
6
,
t7
=
8
*
i
+
7
;
vec_uchar16
p
=
{
t0
,
t1
,
t2
,
t3
,
t4
,
t5
,
t6
,
t7
,
t0
,
t1
,
t2
,
t3
,
t4
,
t5
,
t6
,
t7
};
return
v_float64x2
(
vec_perm
(
v
.
val
,
v
.
val
,
p
));
}
CV_CPU_OPTIMIZATION_HAL_NAMESPACE_END
...
...
This diff is collapsed.
Click to expand it.
modules/core/include/opencv2/core/utils/allocator_stats.impl.hpp
View file @
ca23c0e6
...
...
@@ -11,32 +11,55 @@
#include <atomic>
#endif
//#define OPENCV_DISABLE_ALLOCATOR_STATS
namespace
cv
{
namespace
utils
{
#ifndef OPENCV_ALLOCATOR_STATS_COUNTER_TYPE
#if defined(__GNUC__) && (\
(defined(__SIZEOF_POINTER__) && __SIZEOF_POINTER__ == 4) || \
(defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) && !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)) \
)
#define OPENCV_ALLOCATOR_STATS_COUNTER_TYPE int
#endif
#endif
#ifndef OPENCV_ALLOCATOR_STATS_COUNTER_TYPE
#define OPENCV_ALLOCATOR_STATS_COUNTER_TYPE long long
#endif
#ifdef CV__ALLOCATOR_STATS_LOG
namespace
{
#endif
class
AllocatorStatistics
:
public
AllocatorStatisticsInterface
{
protected
:
#ifdef CV_CXX11
std
::
atomic
<
long
long
>
curr
,
total
,
total_allocs
,
peak
;
#else
volatile
long
long
curr
,
total
,
total_allocs
,
peak
;
// overflow is possible, CV_XADD operates with 'int' only
#endif
#ifdef OPENCV_DISABLE_ALLOCATOR_STATS
public
:
AllocatorStatistics
()
#ifndef CV_CXX11
:
curr
(
0
),
total
(
0
),
total_allocs
(
0
),
peak
(
0
)
#endif
{}
AllocatorStatistics
()
{}
~
AllocatorStatistics
()
CV_OVERRIDE
{}
// AllocatorStatisticsInterface
uint64_t
getCurrentUsage
()
const
CV_OVERRIDE
{
return
0
;
}
uint64_t
getTotalUsage
()
const
CV_OVERRIDE
{
return
0
;
}
uint64_t
getNumberOfAllocations
()
const
CV_OVERRIDE
{
return
0
;
}
uint64_t
getPeakUsage
()
const
CV_OVERRIDE
{
return
0
;
}
/** set peak usage = current usage */
void
resetPeakUsage
()
CV_OVERRIDE
{};
void
onAllocate
(
size_t
/*sz*/
)
{}
void
onFree
(
size_t
/*sz*/
)
{}
#elif defined(CV_CXX11)
protected
:
typedef
OPENCV_ALLOCATOR_STATS_COUNTER_TYPE
counter_t
;
std
::
atomic
<
counter_t
>
curr
,
total
,
total_allocs
,
peak
;
public
:
AllocatorStatistics
()
{}
~
AllocatorStatistics
()
CV_OVERRIDE
{}
#ifdef CV_CXX11
uint64_t
getCurrentUsage
()
const
CV_OVERRIDE
{
return
(
uint64_t
)
curr
.
load
();
}
uint64_t
getTotalUsage
()
const
CV_OVERRIDE
{
return
(
uint64_t
)
total
.
load
();
}
uint64_t
getNumberOfAllocations
()
const
CV_OVERRIDE
{
return
(
uint64_t
)
total_allocs
.
load
();
}
...
...
@@ -52,7 +75,7 @@ public:
CV__ALLOCATOR_STATS_LOG
(
cv
::
format
(
"allocate: %lld (curr=%lld)"
,
(
long
long
int
)
sz
,
(
long
long
int
)
curr
.
load
()));
#endif
long
long
new_curr
=
curr
.
fetch_add
((
long
long
)
sz
)
+
(
long
long
)
sz
;
counter_t
new_curr
=
curr
.
fetch_add
((
counter_t
)
sz
)
+
(
counter_t
)
sz
;
// peak = std::max((uint64_t)peak, new_curr);
auto
prev_peak
=
peak
.
load
();
...
...
@@ -63,7 +86,7 @@ public:
}
// end of peak = max(...)
total
+=
(
long
long
)
sz
;
total
+=
(
counter_t
)
sz
;
total_allocs
++
;
}
void
onFree
(
size_t
sz
)
...
...
@@ -71,10 +94,20 @@ public:
#ifdef CV__ALLOCATOR_STATS_LOG
CV__ALLOCATOR_STATS_LOG
(
cv
::
format
(
"free: %lld (curr=%lld)"
,
(
long
long
int
)
sz
,
(
long
long
int
)
curr
.
load
()));
#endif
curr
-=
(
long
long
)
sz
;
curr
-=
(
counter_t
)
sz
;
}
#else
#else // non C++11
protected
:
typedef
OPENCV_ALLOCATOR_STATS_COUNTER_TYPE
counter_t
;
volatile
counter_t
curr
,
total
,
total_allocs
,
peak
;
// overflow is possible, CV_XADD operates with 'int' only
public
:
AllocatorStatistics
()
:
curr
(
0
),
total
(
0
),
total_allocs
(
0
),
peak
(
0
)
{}
~
AllocatorStatistics
()
CV_OVERRIDE
{}
uint64_t
getCurrentUsage
()
const
CV_OVERRIDE
{
return
(
uint64_t
)
curr
;
}
uint64_t
getTotalUsage
()
const
CV_OVERRIDE
{
return
(
uint64_t
)
total
;
}
uint64_t
getNumberOfAllocations
()
const
CV_OVERRIDE
{
return
(
uint64_t
)
total_allocs
;
}
...
...
@@ -89,21 +122,21 @@ public:
CV__ALLOCATOR_STATS_LOG
(
cv
::
format
(
"allocate: %lld (curr=%lld)"
,
(
long
long
int
)
sz
,
(
long
long
int
)
curr
));
#endif
uint64_t
new_curr
=
(
uint64_t
)
CV_XADD
(
&
curr
,
(
uint64_t
)
sz
)
+
sz
;
counter_t
new_curr
=
(
counter_t
)
CV_XADD
(
&
curr
,
(
counter_t
)
sz
)
+
(
counter_t
)
sz
;
peak
=
std
::
max
((
uint64
_t
)
peak
,
new_curr
);
// non-thread safe
peak
=
std
::
max
((
counter
_t
)
peak
,
new_curr
);
// non-thread safe
//CV_XADD(&total, (uint64_t)sz); // overflow with int, non-reliable...
total
+=
sz
;
CV_XADD
(
&
total_allocs
,
(
uint64
_t
)
1
);
CV_XADD
(
&
total_allocs
,
(
counter
_t
)
1
);
}
void
onFree
(
size_t
sz
)
{
#ifdef CV__ALLOCATOR_STATS_LOG
CV__ALLOCATOR_STATS_LOG
(
cv
::
format
(
"free: %lld (curr=%lld)"
,
(
long
long
int
)
sz
,
(
long
long
int
)
curr
));
#endif
CV_XADD
(
&
curr
,
(
uint64
_t
)
-
sz
);
CV_XADD
(
&
curr
,
(
counter
_t
)
-
sz
);
}
#endif
};
...
...
This diff is collapsed.
Click to expand it.
modules/core/include/opencv2/core/utils/buffer_area.private.hpp
View file @
ca23c0e6
...
...
@@ -72,6 +72,10 @@ public:
CV_Assert
(
alignment
%
sizeof
(
T
)
==
0
);
CV_Assert
((
alignment
&
(
alignment
-
1
))
==
0
);
allocate_
((
void
**
)(
&
ptr
),
static_cast
<
ushort
>
(
sizeof
(
T
)),
count
,
alignment
);
#ifndef OPENCV_ENABLE_MEMORY_SANITIZER
if
(
safe
)
#endif
CV_Assert
(
ptr
!=
NULL
);
}
/** @brief Fill one of buffers with zeroes
...
...
@@ -118,9 +122,11 @@ private:
private
:
class
Block
;
std
::
vector
<
Block
>
blocks
;
#ifndef OPENCV_ENABLE_MEMORY_SANITIZER
void
*
oneBuf
;
size_t
totalSize
;
const
bool
safe
;
#endif
};
//! @}
...
...
This diff is collapsed.
Click to expand it.
modules/core/src/buffer_area.cpp
View file @
ca23c0e6
...
...
@@ -5,14 +5,10 @@
#include "opencv2/core/utils/buffer_area.private.hpp"
#include "opencv2/core/utils/configuration.private.hpp"
#ifdef OPENCV_ENABLE_MEMORY_SANITIZER
#define BUFFER_AREA_DEFAULT_MODE true
#else
#define BUFFER_AREA_DEFAULT_MODE false
#endif
#ifndef OPENCV_ENABLE_MEMORY_SANITIZER
static
bool
CV_BUFFER_AREA_OVERRIDE_SAFE_MODE
=
cv
::
utils
::
getConfigurationParameterBool
(
"OPENCV_BUFFER_AREA_ALWAYS_SAFE"
,
BUFFER_AREA_DEFAULT_MODE
);
cv
::
utils
::
getConfigurationParameterBool
(
"OPENCV_BUFFER_AREA_ALWAYS_SAFE"
,
false
);
#endif
namespace
cv
{
namespace
utils
{
...
...
@@ -58,6 +54,7 @@ public:
*
ptr
=
raw_mem
;
}
}
#ifndef OPENCV_ENABLE_MEMORY_SANITIZER
void
*
fast_allocate
(
void
*
buf
)
const
{
CV_Assert
(
ptr
&&
*
ptr
==
NULL
);
...
...
@@ -66,6 +63,7 @@ public:
*
ptr
=
buf
;
return
static_cast
<
void
*>
(
static_cast
<
uchar
*>
(
*
ptr
)
+
type_size
*
count
);
}
#endif
bool
operator
==
(
void
**
other
)
const
{
CV_Assert
(
ptr
&&
other
);
...
...
@@ -86,12 +84,20 @@ private:
//==================================================================================================
#ifndef OPENCV_ENABLE_MEMORY_SANITIZER
BufferArea
::
BufferArea
(
bool
safe_
)
:
oneBuf
(
0
),
totalSize
(
0
),
safe
(
safe_
||
CV_BUFFER_AREA_OVERRIDE_SAFE_MODE
)
{
// nothing
}
#else
BufferArea
::
BufferArea
(
bool
safe_
)
{
CV_UNUSED
(
safe_
);
}
#endif
BufferArea
::~
BufferArea
()
{
...
...
@@ -101,10 +107,16 @@ BufferArea::~BufferArea()
void
BufferArea
::
allocate_
(
void
**
ptr
,
ushort
type_size
,
size_t
count
,
ushort
alignment
)
{
blocks
.
push_back
(
Block
(
ptr
,
type_size
,
count
,
alignment
));
if
(
safe
)
blocks
.
back
().
real_allocate
();
else
#ifndef OPENCV_ENABLE_MEMORY_SANITIZER
if
(
!
safe
)
{
totalSize
+=
blocks
.
back
().
getByteCount
();
}
else
#endif
{
blocks
.
back
().
real_allocate
();
}
}
void
BufferArea
::
zeroFill_
(
void
**
ptr
)
...
...
@@ -129,6 +141,7 @@ void BufferArea::zeroFill()
void
BufferArea
::
commit
()
{
#ifndef OPENCV_ENABLE_MEMORY_SANITIZER
if
(
!
safe
)
{
CV_Assert
(
totalSize
>
0
);
...
...
@@ -141,6 +154,7 @@ void BufferArea::commit()
ptr
=
i
->
fast_allocate
(
ptr
);
}
}
#endif
}
void
BufferArea
::
release
()
...
...
@@ -150,11 +164,13 @@ void BufferArea::release()
i
->
cleanup
();
}
blocks
.
clear
();
#ifndef OPENCV_ENABLE_MEMORY_SANITIZER
if
(
oneBuf
)
{
fastFree
(
oneBuf
);
oneBuf
=
0
;
}
#endif
}
//==================================================================================================
...
...
This diff is collapsed.
Click to expand it.
modules/core/src/parallel.cpp
View file @
ca23c0e6
...
...
@@ -638,9 +638,9 @@ int getNumThreads(void)
#elif defined HAVE_CONCURRENCY
return
1
+
(
pplScheduler
==
0
return
(
pplScheduler
==
0
)
?
Concurrency
::
CurrentScheduler
::
Get
()
->
GetNumberOfVirtualProcessors
()
:
pplScheduler
->
GetNumberOfVirtualProcessors
());
:
(
1
+
pplScheduler
->
GetNumberOfVirtualProcessors
());
#elif defined HAVE_PTHREADS_PF
...
...
This diff is collapsed.
Click to expand it.
modules/dnn/misc/python/pyopencv_dnn.hpp
View file @
ca23c0e6
...
...
@@ -160,12 +160,13 @@ public:
PyObject
*
args
=
pyopencv_from
(
inputs
);
PyObject
*
res
=
PyObject_CallMethodObjArgs
(
o
,
PyString_FromString
(
"forward"
),
args
,
NULL
);
Py_DECREF
(
args
);
PyGILState_Release
(
gstate
);
if
(
!
res
)
CV_Error
(
Error
::
StsNotImplemented
,
"Failed to call
\"
forward
\"
method"
);
std
::
vector
<
Mat
>
pyOutputs
;
CV_Assert
(
pyopencv_to
(
res
,
pyOutputs
,
ArgInfo
(
""
,
0
)));
Py_DECREF
(
res
);
PyGILState_Release
(
gstate
);
CV_Assert
(
pyOutputs
.
size
()
==
outputs
.
size
());
for
(
size_t
i
=
0
;
i
<
outputs
.
size
();
++
i
)
...
...
This diff is collapsed.
Click to expand it.
modules/dnn/src/dnn.cpp
View file @
ca23c0e6
...
...
@@ -114,7 +114,7 @@ public:
{
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R3)
// Lightweight detection
const
std
::
vector
<
std
::
string
>
devices
=
getCore
().
GetAvailableDevices
();
const
std
::
vector
<
std
::
string
>
devices
=
getCore
(
""
).
GetAvailableDevices
();
for
(
std
::
vector
<
std
::
string
>::
const_iterator
i
=
devices
.
begin
();
i
!=
devices
.
end
();
++
i
)
{
if
(
std
::
string
::
npos
!=
i
->
find
(
"MYRIAD"
)
&&
target
==
DNN_TARGET_MYRIAD
)
...
...
@@ -3557,7 +3557,7 @@ Net Net::readFromModelOptimizer(const String& xml, const String& bin)
InferenceEngine
::
CNNNetwork
ieNet
=
reader
.
getNetwork
();
#else
InferenceEngine
::
Core
&
ie
=
getCore
();
InferenceEngine
::
Core
&
ie
=
getCore
(
""
);
InferenceEngine
::
CNNNetwork
ieNet
=
ie
.
ReadNetwork
(
xml
,
bin
);
#endif
...
...
@@ -3606,7 +3606,7 @@ Net Net::readFromModelOptimizer(
InferenceEngine
::
CNNNetwork
ieNet
=
reader
.
getNetwork
();
#else
InferenceEngine
::
Core
&
ie
=
getCore
();
InferenceEngine
::
Core
&
ie
=
getCore
(
""
);
std
::
string
model
;
model
.
assign
((
char
*
)
bufferModelConfigPtr
,
bufferModelConfigSize
);
...
...
This diff is collapsed.
Click to expand it.
modules/dnn/src/ie_ngraph.cpp
View file @
ca23c0e6
...
...
@@ -524,7 +524,7 @@ void InfEngineNgraphNet::initPlugin(InferenceEngine::CNNNetwork& net)
try
{
AutoLock
lock
(
getInitializationMutex
());
InferenceEngine
::
Core
&
ie
=
getCore
();
InferenceEngine
::
Core
&
ie
=
getCore
(
device_name
);
{
isInit
=
true
;
std
::
vector
<
std
::
string
>
candidates
;
...
...
This diff is collapsed.
Click to expand it.
modules/dnn/src/onnx/onnx_importer.cpp
View file @
ca23c0e6
...
...
@@ -431,9 +431,20 @@ void ONNXImporter::populateNet(Net dstNet)
{
bool
isSub
=
layer_type
==
"Sub"
;
CV_CheckEQ
(
node_proto
.
input_size
(),
2
,
""
);
if
(
layer_id
.
find
(
node_proto
.
input
(
1
))
==
layer_id
.
end
())
bool
is_const_0
=
layer_id
.
find
(
node_proto
.
input
(
0
))
==
layer_id
.
end
();
bool
is_const_1
=
layer_id
.
find
(
node_proto
.
input
(
1
))
==
layer_id
.
end
();
if
(
is_const_0
&&
is_const_1
)
{
Mat
blob
=
getBlob
(
node_proto
,
constBlobs
,
1
);
Mat
blob_0
=
getBlob
(
node_proto
,
constBlobs
,
0
);
Mat
blob_1
=
getBlob
(
node_proto
,
constBlobs
,
1
);
CV_Assert
(
blob_0
.
size
==
blob_1
.
size
);
Mat
output
=
isSub
?
(
blob_0
-
blob_1
)
:
(
blob_0
+
blob_1
);
constBlobs
.
insert
(
std
::
make_pair
(
layerParams
.
name
,
output
));
continue
;
}
else
if
(
is_const_0
||
is_const_1
)
{
Mat
blob
=
getBlob
(
node_proto
,
constBlobs
,
is_const_0
?
0
:
1
);
blob
=
blob
.
reshape
(
1
,
1
);
if
(
blob
.
total
()
==
1
)
{
layerParams
.
type
=
"Power"
;
...
...
@@ -808,6 +819,21 @@ void ONNXImporter::populateNet(Net dstNet)
layerParams
.
set
(
"end_axis"
,
axis
);
layerParams
.
type
=
"Flatten"
;
}
else
if
(
layer_type
==
"Flatten"
)
{
CV_CheckEQ
(
node_proto
.
input_size
(),
1
,
""
);
if
(
constBlobs
.
find
(
node_proto
.
input
(
0
))
!=
constBlobs
.
end
())
{
Mat
input
=
getBlob
(
node_proto
,
constBlobs
,
0
);
int
axis
=
clamp
(
layerParams
.
get
<
int
>
(
"axis"
,
1
),
input
.
dims
);
std
::
vector
<
int
>
out_size
(
&
input
.
size
[
0
],
&
input
.
size
[
0
]
+
axis
);
out_size
.
push_back
(
input
.
total
(
axis
));
Mat
output
=
input
.
reshape
(
1
,
out_size
);
constBlobs
.
insert
(
std
::
make_pair
(
layerParams
.
name
,
output
));
continue
;
}
}
else
if
(
layer_type
==
"Unsqueeze"
)
{
CV_Assert
(
node_proto
.
input_size
()
==
1
);
...
...
@@ -896,6 +922,31 @@ void ONNXImporter::populateNet(Net dstNet)
constBlobs
.
insert
(
std
::
make_pair
(
layerParams
.
name
,
shapeMat
));
continue
;
}
else
if
(
layer_type
==
"Cast"
)
{
if
(
constBlobs
.
find
(
node_proto
.
input
(
0
))
!=
constBlobs
.
end
())
{
Mat
blob
=
getBlob
(
node_proto
,
constBlobs
,
0
);
int
type
;
switch
(
layerParams
.
get
<
int
>
(
"to"
))
{
case
opencv_onnx
:
:
TensorProto_DataType_FLOAT
:
type
=
CV_32F
;
break
;
case
opencv_onnx
:
:
TensorProto_DataType_UINT8
:
type
=
CV_8U
;
break
;
case
opencv_onnx
:
:
TensorProto_DataType_UINT16
:
type
=
CV_16U
;
break
;
case
opencv_onnx
:
:
TensorProto_DataType_FLOAT16
:
type
=
CV_16S
;
break
;
case
opencv_onnx
:
:
TensorProto_DataType_INT8
:
case
opencv_onnx
:
:
TensorProto_DataType_INT16
:
case
opencv_onnx
:
:
TensorProto_DataType_INT32
:
case
opencv_onnx
:
:
TensorProto_DataType_INT64
:
type
=
CV_32S
;
break
;
default
:
type
=
blob
.
type
();
}
blob
.
convertTo
(
blob
,
type
);
constBlobs
.
insert
(
std
::
make_pair
(
layerParams
.
name
,
blob
));
continue
;
}
else
layerParams
.
type
=
"Identity"
;
}
else
if
(
layer_type
==
"Gather"
)
{
CV_Assert
(
node_proto
.
input_size
()
==
2
);
...
...
This diff is collapsed.
Click to expand it.
modules/dnn/src/op_inf_engine.cpp
View file @
ca23c0e6
...
...
@@ -604,18 +604,31 @@ static bool init_IE_plugins()
(
void
)
init_core
->
GetAvailableDevices
();
return
true
;
}
static
InferenceEngine
::
Core
&
create_IE_Core_instance
(
)
static
InferenceEngine
::
Core
&
retrieveIECore
(
const
std
::
string
&
id
,
std
::
map
<
std
::
string
,
std
::
shared_ptr
<
InferenceEngine
::
Core
>
>&
cores
)
{
static
InferenceEngine
::
Core
core
;
return
core
;
AutoLock
lock
(
getInitializationMutex
());
std
::
map
<
std
::
string
,
std
::
shared_ptr
<
InferenceEngine
::
Core
>
>::
iterator
i
=
cores
.
find
(
id
);
if
(
i
==
cores
.
end
())
{
std
::
shared_ptr
<
InferenceEngine
::
Core
>
core
=
std
::
make_shared
<
InferenceEngine
::
Core
>
();
cores
[
id
]
=
core
;
return
*
core
.
get
();
}
return
*
(
i
->
second
).
get
();
}
static
InferenceEngine
::
Core
&
create_IE_Core_pointer
()
static
InferenceEngine
::
Core
&
create_IE_Core_instance
(
const
std
::
string
&
id
)
{
static
std
::
map
<
std
::
string
,
std
::
shared_ptr
<
InferenceEngine
::
Core
>
>
cores
;
return
retrieveIECore
(
id
,
cores
);
}
static
InferenceEngine
::
Core
&
create_IE_Core_pointer
(
const
std
::
string
&
id
)
{
// load and hold IE plugins
static
InferenceEngine
::
Core
*
core
=
new
InferenceEngine
::
Core
();
// 'delete' is never called
return
*
core
;
static
std
::
map
<
std
::
string
,
std
::
shared_ptr
<
InferenceEngine
::
Core
>
>*
cores
=
new
std
::
map
<
std
::
string
,
std
::
shared_ptr
<
InferenceEngine
::
Core
>
>
();
return
retrieveIECore
(
id
,
*
cores
);
}
InferenceEngine
::
Core
&
getCore
()
InferenceEngine
::
Core
&
getCore
(
const
std
::
string
&
id
)
{
// to make happy memory leak tools use:
// - OPENCV_DNN_INFERENCE_ENGINE_HOLD_PLUGINS=0
...
...
@@ -631,9 +644,10 @@ InferenceEngine::Core& getCore()
false
#endif
);
static
InferenceEngine
::
Core
&
core
=
param_DNN_INFERENCE_ENGINE_CORE_LIFETIME_WORKAROUND
?
create_IE_Core_pointer
()
:
create_IE_Core_instance
();
InferenceEngine
::
Core
&
core
=
param_DNN_INFERENCE_ENGINE_CORE_LIFETIME_WORKAROUND
?
create_IE_Core_pointer
(
id
)
:
create_IE_Core_instance
(
id
);
return
core
;
}
#endif
...
...
@@ -641,9 +655,10 @@ InferenceEngine::Core& getCore()
#if !defined(OPENCV_DNN_IE_VPU_TYPE_DEFAULT)
static
bool
detectMyriadX_
()
{
AutoLock
lock
(
getInitializationMutex
());
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R3)
// Lightweight detection
InferenceEngine
::
Core
&
ie
=
getCore
();
InferenceEngine
::
Core
&
ie
=
getCore
(
"MYRIAD"
);
const
std
::
vector
<
std
::
string
>
devices
=
ie
.
GetAvailableDevices
();
for
(
std
::
vector
<
std
::
string
>::
const_iterator
i
=
devices
.
begin
();
i
!=
devices
.
end
();
++
i
)
{
...
...
@@ -687,7 +702,6 @@ static bool detectMyriadX_()
#if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1)
InferenceEngine
::
InferenceEnginePluginPtr
enginePtr
;
{
AutoLock
lock
(
getInitializationMutex
());
auto
&
sharedPlugins
=
getSharedPlugins
();
auto
pluginIt
=
sharedPlugins
.
find
(
"MYRIAD"
);
if
(
pluginIt
!=
sharedPlugins
.
end
())
{
...
...
@@ -706,9 +720,9 @@ static bool detectMyriadX_()
try
{
#if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R3)
auto
netExec
=
getCore
().
LoadNetwork
(
cnn
,
"MYRIAD"
,
{{
"VPU_PLATFORM"
,
"VPU_2480"
}});
auto
netExec
=
getCore
(
"MYRIAD"
).
LoadNetwork
(
cnn
,
"MYRIAD"
,
{{
"VPU_PLATFORM"
,
"VPU_2480"
}});
#else
auto
netExec
=
getCore
().
LoadNetwork
(
cnn
,
"MYRIAD"
,
{{
"VPU_MYRIAD_PLATFORM"
,
"VPU_MYRIAD_2480"
}});
auto
netExec
=
getCore
(
"MYRIAD"
).
LoadNetwork
(
cnn
,
"MYRIAD"
,
{{
"VPU_MYRIAD_PLATFORM"
,
"VPU_MYRIAD_2480"
}});
#endif
#endif
auto
infRequest
=
netExec
.
CreateInferRequest
();
...
...
@@ -739,7 +753,7 @@ void InfEngineBackendNet::initPlugin(InferenceEngine::CNNNetwork& net)
}
else
#else
InferenceEngine
::
Core
&
ie
=
getCore
();
InferenceEngine
::
Core
&
ie
=
getCore
(
device_name
);
#endif
{
#if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1)
...
...
@@ -1124,7 +1138,7 @@ void resetMyriadDevice()
getSharedPlugins
().
erase
(
"MYRIAD"
);
#else
// Unregister both "MYRIAD" and "HETERO:MYRIAD,CPU" plugins
InferenceEngine
::
Core
&
ie
=
getCore
();
InferenceEngine
::
Core
&
ie
=
getCore
(
"MYRIAD"
);
try
{
ie
.
UnregisterPlugin
(
"MYRIAD"
);
...
...
This diff is collapsed.
Click to expand it.
modules/dnn/src/op_inf_engine.hpp
View file @
ca23c0e6
...
...
@@ -246,7 +246,7 @@ bool isMyriadX();
CV__DNN_INLINE_NS_END
InferenceEngine
::
Core
&
getCore
();
InferenceEngine
::
Core
&
getCore
(
const
std
::
string
&
id
);
template
<
typename
T
=
size_t
>
static
inline
std
::
vector
<
T
>
getShape
(
const
Mat
&
mat
)
...
...
This diff is collapsed.
Click to expand it.
modules/dnn/src/tensorflow/tf_importer.cpp
View file @
ca23c0e6
...
...
@@ -458,6 +458,7 @@ private:
tensorflow
::
GraphDef
netTxt
;
std
::
vector
<
String
>
netInputsNames
;
std
::
vector
<
MatShape
>
netInputShapes
;
};
TFImporter
::
TFImporter
(
const
char
*
model
,
const
char
*
config
)
...
...
@@ -1401,6 +1402,27 @@ void TFImporter::populateNet(Net dstNet)
netInputsNames
.
push_back
(
name
);
layer_id
[
name
]
=
0
;
}
if
(
hasLayerAttr
(
layer
,
"shape"
))
{
const
tensorflow
::
TensorShapeProto
&
shape
=
getLayerAttr
(
layer
,
"shape"
).
shape
();
MatShape
dims
(
shape
.
dim_size
());
for
(
int
i
=
0
;
i
<
dims
.
size
();
++
i
)
dims
[
i
]
=
shape
.
dim
(
i
).
size
();
if
(
dims
.
size
()
==
4
&&
predictedLayout
==
DATA_LAYOUT_NHWC
)
{
std
::
swap
(
dims
[
1
],
dims
[
3
]);
// NHWC->NCWH
std
::
swap
(
dims
[
2
],
dims
[
3
]);
// NCWH->NCHW
if
(
dims
[
0
]
==
-
1
)
// It's OK to have undetermined batch size
dims
[
0
]
=
1
;
}
bool
hasNeg
=
false
;
for
(
int
i
=
0
;
i
<
dims
.
size
()
&&
!
hasNeg
;
++
i
)
{
hasNeg
=
dims
[
i
]
<
0
;
}
if
(
!
hasNeg
)
netInputShapes
.
push_back
(
dims
);
}
}
else
if
(
type
==
"Split"
)
{
// TODO: determining axis index remapping by input dimensions order of input blob
...
...
@@ -1580,8 +1602,41 @@ void TFImporter::populateNet(Net dstNet)
}
else
{
layerParams
.
set
(
"operation"
,
"prod"
);
int
id
=
dstNet
.
addLayer
(
name
,
"Eltwise"
,
layerParams
);
// Check if all the inputs have the same shape.
bool
equalInpShapes
=
true
;
MatShape
outShape0
;
for
(
int
ii
=
0
;
ii
<
layer
.
input_size
()
&&
!
netInputShapes
.
empty
();
ii
++
)
{
Pin
pin
=
parsePin
(
layer
.
input
(
ii
));
int
inpId
=
layer_id
.
find
(
pin
.
name
)
->
second
;
// Get input shape
MatShape
outShape
;
std
::
vector
<
MatShape
>
inpShapes
,
outShapes
;
dstNet
.
getLayerShapes
(
netInputShapes
,
inpId
,
inpShapes
,
outShapes
);
CV_CheckGT
(
static_cast
<
int
>
(
outShapes
.
size
()),
pin
.
blobIndex
,
""
);
outShape
=
outShapes
[
pin
.
blobIndex
];
if
(
ii
==
0
)
{
outShape0
=
outShape
;
}
else
if
(
outShape
!=
outShape0
)
{
equalInpShapes
=
false
;
break
;
}
}
int
id
;
if
(
equalInpShapes
||
netInputShapes
.
empty
())
{
layerParams
.
set
(
"operation"
,
"prod"
);
id
=
dstNet
.
addLayer
(
name
,
"Eltwise"
,
layerParams
);
}
else
id
=
dstNet
.
addLayer
(
name
,
"Scale"
,
layerParams
);
layer_id
[
name
]
=
id
;
for
(
int
ii
=
0
;
ii
<
layer
.
input_size
();
ii
++
)
...
...
This diff is collapsed.
Click to expand it.
modules/dnn/test/test_onnx_importer.cpp
View file @
ca23c0e6
...
...
@@ -199,6 +199,11 @@ TEST_P(Test_ONNX_layers, MaxPooling_Sigmoid)
testONNXModels
(
"maxpooling_sigmoid"
);
}
TEST_P
(
Test_ONNX_layers
,
Cast
)
{
testONNXModels
(
"cast"
);
}
TEST_P
(
Test_ONNX_layers
,
Concatenation
)
{
if
(
backend
==
DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019
)
...
...
@@ -402,6 +407,7 @@ TEST_P(Test_ONNX_layers, DynamicReshape)
testONNXModels
(
"dynamic_reshape"
);
testONNXModels
(
"dynamic_reshape_opset_11"
);
testONNXModels
(
"flatten_by_prod"
);
testONNXModels
(
"flatten_const"
);
}
TEST_P
(
Test_ONNX_layers
,
Reshape
)
...
...
This diff is collapsed.
Click to expand it.
modules/dnn/test/test_tf_importer.cpp
View file @
ca23c0e6
...
...
@@ -186,6 +186,13 @@ TEST_P(Test_TensorFlow_layers, eltwise)
runTensorFlowNet
(
"eltwise_sub"
);
}
TEST_P
(
Test_TensorFlow_layers
,
channel_broadcast
)
{
if
(
backend
==
DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019
)
applyTestTag
(
CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER
);
runTensorFlowNet
(
"channel_broadcast"
);
}
TEST_P
(
Test_TensorFlow_layers
,
pad_and_concat
)
{
runTensorFlowNet
(
"pad_and_concat"
);
...
...
This diff is collapsed.
Click to expand it.
modules/objdetect/test/test_qrcode.cpp
View file @
ca23c0e6
...
...
@@ -284,7 +284,7 @@ TEST_P(Objdetect_QRCode_Close, regression)
ASSERT_FALSE
(
corners
.
empty
());
ASSERT_FALSE
(
decoded_info
.
empty
());
#else
ASSERT_TRUE
(
qrcode
.
detect
(
src
,
corners
));
ASSERT_TRUE
(
qrcode
.
detect
(
barcode
,
corners
));
#endif
const
std
::
string
dataset_config
=
findDataFile
(
root
+
"dataset_config.json"
);
...
...
@@ -349,7 +349,7 @@ TEST_P(Objdetect_QRCode_Monitor, regression)
ASSERT_FALSE
(
corners
.
empty
());
ASSERT_FALSE
(
decoded_info
.
empty
());
#else
ASSERT_TRUE
(
qrcode
.
detect
(
src
,
corners
));
ASSERT_TRUE
(
qrcode
.
detect
(
barcode
,
corners
));
#endif
const
std
::
string
dataset_config
=
findDataFile
(
root
+
"dataset_config.json"
);
...
...
This diff is collapsed.
Click to expand it.
modules/python/CMakeLists.txt
View file @
ca23c0e6
...
...
@@ -41,6 +41,7 @@ add_subdirectory(python3)
else
()
# standalone build
cmake_minimum_required
(
VERSION 2.8.12
)
project
(
OpenCVPython CXX C
)
include
(
"./standalone.cmake"
)
endif
()
This diff is collapsed.
Click to expand it.
samples/cpp/intelligent_scissors.cpp
0 → 100644
View file @
ca23c0e6
#include <iostream>
#include <cmath>
#include <string>
#include <vector>
#include <queue>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
using
namespace
cv
;
struct
Pix
{
Point
next_point
;
double
cost
;
bool
operator
>
(
const
Pix
&
b
)
const
{
return
cost
>
b
.
cost
;
}
};
struct
Parameters
{
Mat
img
,
img_pre_render
,
img_render
;
Point
end
;
std
::
vector
<
std
::
vector
<
Point
>
>
contours
;
std
::
vector
<
Point
>
tmp_contour
;
Mat
zero_crossing
,
gradient_magnitude
,
Ix
,
Iy
,
hit_map_x
,
hit_map_y
;
};
static
float
local_cost
(
const
Point
&
p
,
const
Point
&
q
,
const
Mat
&
gradient_magnitude
,
const
Mat
&
Iy
,
const
Mat
&
Ix
,
const
Mat
&
zero_crossing
)
{
float
fG
=
gradient_magnitude
.
at
<
float
>
(
q
.
y
,
q
.
x
);
float
dp
;
float
dq
;
const
float
WEIGHT_LAP_ZERO_CROSS
=
0.43
f
;
const
float
WEIGHT_GRADIENT_MAGNITUDE
=
0.14
f
;
const
float
WEIGHT_GRADIENT_DIRECTION
=
0.43
f
;
bool
isDiag
=
(
p
.
x
!=
q
.
x
)
&&
(
p
.
y
!=
q
.
y
);
if
((
Iy
.
at
<
float
>
(
p
)
*
(
q
.
x
-
p
.
x
)
-
Ix
.
at
<
float
>
(
p
)
*
(
q
.
y
-
p
.
y
))
>=
0
)
{
dp
=
Iy
.
at
<
float
>
(
p
)
*
(
q
.
x
-
p
.
x
)
-
Ix
.
at
<
float
>
(
p
)
*
(
q
.
y
-
p
.
y
);
dq
=
Iy
.
at
<
float
>
(
q
)
*
(
q
.
x
-
p
.
x
)
-
Ix
.
at
<
float
>
(
q
)
*
(
q
.
y
-
p
.
y
);
}
else
{
dp
=
Iy
.
at
<
float
>
(
p
)
*
(
p
.
x
-
q
.
x
)
+
(
-
Ix
.
at
<
float
>
(
p
))
*
(
p
.
y
-
q
.
y
);
dq
=
Iy
.
at
<
float
>
(
q
)
*
(
p
.
x
-
q
.
x
)
+
(
-
Ix
.
at
<
float
>
(
q
))
*
(
p
.
y
-
q
.
y
);
}
if
(
isDiag
)
{
dp
/=
sqrtf
(
2
);
dq
/=
sqrtf
(
2
);
}
else
{
fG
/=
sqrtf
(
2
);
}
return
WEIGHT_LAP_ZERO_CROSS
*
zero_crossing
.
at
<
uchar
>
(
q
)
+
WEIGHT_GRADIENT_DIRECTION
*
(
acosf
(
dp
)
+
acosf
(
dq
))
/
static_cast
<
float
>
(
CV_PI
)
+
WEIGHT_GRADIENT_MAGNITUDE
*
fG
;
}
static
void
find_min_path
(
const
Point
&
start
,
Parameters
*
param
)
{
Pix
begin
;
Mat
&
img
=
param
->
img
;
Mat
cost_map
(
img
.
size
(),
CV_32F
,
Scalar
(
FLT_MAX
));
Mat
expand
(
img
.
size
(),
CV_8UC1
,
Scalar
(
0
));
Mat
processed
(
img
.
size
(),
CV_8UC1
,
Scalar
(
0
));
Mat
removed
(
img
.
size
(),
CV_8UC1
,
Scalar
(
0
));
std
::
priority_queue
<
Pix
,
std
::
vector
<
Pix
>
,
std
::
greater
<
Pix
>
>
L
;
cost_map
.
at
<
float
>
(
start
)
=
0
;
processed
.
at
<
uchar
>
(
start
)
=
1
;
begin
.
cost
=
0
;
begin
.
next_point
=
start
;
L
.
push
(
begin
);
while
(
!
L
.
empty
())
{
Pix
P
=
L
.
top
();
L
.
pop
();
Point
p
=
P
.
next_point
;
processed
.
at
<
uchar
>
(
p
)
=
0
;
if
(
removed
.
at
<
uchar
>
(
p
)
==
0
)
{
expand
.
at
<
uchar
>
(
p
)
=
1
;
for
(
int
i
=
-
1
;
i
<=
1
;
i
++
)
{
for
(
int
j
=
-
1
;
j
<=
1
;
j
++
)
{
int
tx
=
p
.
x
+
i
;
int
ty
=
p
.
y
+
j
;
if
(
tx
<
0
||
tx
>=
img
.
cols
||
ty
<
0
||
ty
>=
img
.
rows
)
continue
;
if
(
expand
.
at
<
uchar
>
(
ty
,
tx
)
==
0
)
{
Point
q
=
Point
(
tx
,
ty
);
float
cost
=
cost_map
.
at
<
float
>
(
p
)
+
local_cost
(
p
,
q
,
param
->
gradient_magnitude
,
param
->
Iy
,
param
->
Ix
,
param
->
zero_crossing
);
if
(
processed
.
at
<
uchar
>
(
q
)
==
1
&&
cost
<
cost_map
.
at
<
float
>
(
q
))
{
removed
.
at
<
uchar
>
(
q
)
=
1
;
}
if
(
processed
.
at
<
uchar
>
(
q
)
==
0
)
{
cost_map
.
at
<
float
>
(
q
)
=
cost
;
param
->
hit_map_x
.
at
<
int
>
(
q
)
=
p
.
x
;
param
->
hit_map_y
.
at
<
int
>
(
q
)
=
p
.
y
;
processed
.
at
<
uchar
>
(
q
)
=
1
;
Pix
val
;
val
.
cost
=
cost_map
.
at
<
float
>
(
q
);
val
.
next_point
=
q
;
L
.
push
(
val
);
}
}
}
}
}
}
}
static
void
onMouse
(
int
event
,
int
x
,
int
y
,
int
,
void
*
userdata
)
{
Parameters
*
param
=
reinterpret_cast
<
Parameters
*>
(
userdata
);
Point
&
end
=
param
->
end
;
std
::
vector
<
std
::
vector
<
Point
>
>
&
contours
=
param
->
contours
;
std
::
vector
<
Point
>
&
tmp_contour
=
param
->
tmp_contour
;
Mat
&
img_render
=
param
->
img_render
;
Mat
&
img_pre_render
=
param
->
img_pre_render
;
if
(
event
==
EVENT_LBUTTONDOWN
)
{
end
=
Point
(
x
,
y
);
if
(
!
contours
.
back
().
empty
())
{
for
(
int
i
=
static_cast
<
int
>
(
tmp_contour
.
size
())
-
1
;
i
>=
0
;
i
--
)
{
contours
.
back
().
push_back
(
tmp_contour
[
i
]);
}
tmp_contour
.
clear
();
}
else
{
contours
.
back
().
push_back
(
end
);
}
find_min_path
(
end
,
param
);
img_render
.
copyTo
(
img_pre_render
);
imshow
(
"lasso"
,
img_render
);
}
else
if
(
event
==
EVENT_RBUTTONDOWN
)
{
img_pre_render
.
copyTo
(
img_render
);
drawContours
(
img_pre_render
,
contours
,
static_cast
<
int
>
(
contours
.
size
())
-
1
,
Scalar
(
0
,
255
,
0
),
FILLED
);
addWeighted
(
img_pre_render
,
0.3
,
img_render
,
0.7
,
0
,
img_render
);
contours
.
resize
(
contours
.
size
()
+
1
);
imshow
(
"lasso"
,
img_render
);
}
else
if
(
event
==
EVENT_MOUSEMOVE
&&
!
contours
.
back
().
empty
())
{
tmp_contour
.
clear
();
img_pre_render
.
copyTo
(
img_render
);
Point
val_point
=
Point
(
x
,
y
);
while
(
val_point
!=
end
)
{
tmp_contour
.
push_back
(
val_point
);
Point
cur
=
Point
(
param
->
hit_map_x
.
at
<
int
>
(
val_point
),
param
->
hit_map_y
.
at
<
int
>
(
val_point
));
line
(
img_render
,
val_point
,
cur
,
Scalar
(
255
,
0
,
0
),
2
);
val_point
=
cur
;
}
imshow
(
"lasso"
,
img_render
);
}
}
const
char
*
keys
=
{
"{help h | |}"
"{@image | fruits.jpg| Path to image to process}"
};
int
main
(
int
argc
,
const
char
**
argv
)
{
Parameters
param
;
const
int
EDGE_THRESHOLD_LOW
=
50
;
const
int
EDGE_THRESHOLD_HIGH
=
100
;
CommandLineParser
parser
(
argc
,
argv
,
keys
);
parser
.
about
(
"
\n
This program demonstrates implementation of 'intelligent scissors' algorithm
\n
"
"To start drawing a new contour select a pixel, click LEFT mouse button.
\n
"
"To fix a path click LEFT mouse button again.
\n
"
"To finish drawing a contour click RIGHT mouse button.
\n
"
);
if
(
parser
.
has
(
"help"
))
{
parser
.
printMessage
();
return
1
;
}
std
::
vector
<
std
::
vector
<
Point
>
>
c
(
1
);
param
.
contours
=
c
;
std
::
string
filename
=
parser
.
get
<
std
::
string
>
(
0
);
Mat
grayscale
,
img_canny
;
param
.
img
=
imread
(
samples
::
findFile
(
filename
));
param
.
hit_map_x
.
create
(
param
.
img
.
rows
,
param
.
img
.
cols
,
CV_32SC1
);
param
.
hit_map_y
.
create
(
param
.
img
.
rows
,
param
.
img
.
cols
,
CV_32SC1
);
cvtColor
(
param
.
img
,
grayscale
,
COLOR_BGR2GRAY
);
Canny
(
grayscale
,
img_canny
,
EDGE_THRESHOLD_LOW
,
EDGE_THRESHOLD_HIGH
);
threshold
(
img_canny
,
param
.
zero_crossing
,
254
,
1
,
THRESH_BINARY_INV
);
Sobel
(
grayscale
,
param
.
Ix
,
CV_32FC1
,
1
,
0
,
1
);
Sobel
(
grayscale
,
param
.
Iy
,
CV_32FC1
,
0
,
1
,
1
);
param
.
Ix
.
convertTo
(
param
.
Ix
,
CV_32F
,
1.0
/
255
);
param
.
Iy
.
convertTo
(
param
.
Iy
,
CV_32F
,
1.0
/
255
);
// Compute gradients magnitude.
double
max_val
=
0.0
;
magnitude
(
param
.
Iy
,
param
.
Ix
,
param
.
gradient_magnitude
);
minMaxLoc
(
param
.
gradient_magnitude
,
0
,
&
max_val
);
param
.
gradient_magnitude
.
convertTo
(
param
.
gradient_magnitude
,
CV_32F
,
-
1
/
max_val
,
1.0
);
param
.
img
.
copyTo
(
param
.
img_pre_render
);
param
.
img
.
copyTo
(
param
.
img_render
);
namedWindow
(
"lasso"
);
setMouseCallback
(
"lasso"
,
onMouse
,
&
param
);
imshow
(
"lasso"
,
param
.
img
);
waitKey
(
0
);
}
This diff is collapsed.
Click to expand it.
samples/dnn/tf_text_graph_ssd.py
View file @
ca23c0e6
...
...
@@ -64,7 +64,7 @@ def createSSDGraph(modelPath, configPath, outputPath):
# Nodes that should be kept.
keepOps
=
[
'Conv2D'
,
'BiasAdd'
,
'Add'
,
'AddV2'
,
'Relu'
,
'Relu6'
,
'Placeholder'
,
'FusedBatchNorm'
,
'DepthwiseConv2dNative'
,
'ConcatV2'
,
'Mul'
,
'MaxPool'
,
'AvgPool'
,
'Identity'
,
'Sub'
,
'ResizeNearestNeighbor'
,
'Pad'
,
'FusedBatchNormV3'
]
'Sub'
,
'ResizeNearestNeighbor'
,
'Pad'
,
'FusedBatchNormV3'
,
'Mean'
]
# Node with which prefixes should be removed
prefixesToRemove
=
(
'MultipleGridAnchorGenerator/'
,
'Concatenate/'
,
'Postprocessor/'
,
'Preprocessor/map'
)
...
...
@@ -235,7 +235,7 @@ def createSSDGraph(modelPath, configPath, outputPath):
# Connect input node to the first layer
assert
(
graph_def
.
node
[
0
]
.
op
==
'Placeholder'
)
# assert(graph_def.node[1].op == 'Conv2D')
weights
=
graph_def
.
node
[
1
]
.
input
[
0
]
weights
=
graph_def
.
node
[
1
]
.
input
[
-
1
]
for
i
in
range
(
len
(
graph_def
.
node
[
1
]
.
input
)):
graph_def
.
node
[
1
]
.
input
.
pop
()
graph_def
.
node
[
1
]
.
input
.
append
(
graph_def
.
node
[
0
]
.
name
)
...
...
This diff is collapsed.
Click to expand it.
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment