Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in / Register
Toggle navigation
O
opencv
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
submodule
opencv
Commits
96b26dc8
Commit
96b26dc8
authored
Feb 20, 2020
by
Alexander Alekhin
Browse files
Options
Browse Files
Download
Plain Diff
Merge remote-tracking branch 'upstream/3.4' into merge-3.4
parents
1f695c45
150c2935
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
28 changed files
with
651 additions
and
147 deletions
+651
-147
calibCommon.hpp
apps/interactive-calibration/calibCommon.hpp
+1
-1
defaultConfig.xml
apps/interactive-calibration/defaultConfig.xml
+1
-1
frameProcessor.cpp
apps/interactive-calibration/frameProcessor.cpp
+1
-1
main.cpp
apps/interactive-calibration/main.cpp
+1
-1
parametersController.cpp
apps/interactive-calibration/parametersController.cpp
+6
-3
interactive_calibration.markdown
.../interactive_calibration/interactive_calibration.markdown
+3
-3
fundam.cpp
modules/calib3d/src/fundam.cpp
+49
-2
persistence_yml.cpp
modules/core/src/persistence_yml.cpp
+4
-4
caffe_importer.cpp
modules/dnn/src/caffe/caffe_importer.cpp
+67
-31
dnn.cpp
modules/dnn/src/dnn.cpp
+25
-7
ie_ngraph.cpp
modules/dnn/src/ie_ngraph.cpp
+4
-5
crop_and_resize_layer.cpp
modules/dnn/src/layers/crop_and_resize_layer.cpp
+44
-5
resize_layer.cpp
modules/dnn/src/layers/resize_layer.cpp
+4
-3
scale_layer.cpp
modules/dnn/src/layers/scale_layer.cpp
+22
-17
onnx_importer.cpp
modules/dnn/src/onnx/onnx_importer.cpp
+23
-7
test_ie_models.cpp
modules/dnn/test/test_ie_models.cpp
+20
-26
test_onnx_importer.cpp
modules/dnn/test/test_onnx_importer.cpp
+15
-0
test_tf_importer.cpp
modules/dnn/test/test_tf_importer.cpp
+27
-4
window_cocoa.mm
modules/highgui/src/window_cocoa.mm
+25
-3
test_imgwarp.cpp
modules/imgproc/test/test_imgwarp.cpp
+15
-0
config.py.in
modules/python/package/template/config.py.in
+2
-0
python_loader.cmake
modules/python/python_loader.cmake
+7
-1
falsecolor.cpp
samples/cpp/falsecolor.cpp
+5
-5
human_parsing.py
samples/dnn/human_parsing.py
+12
-8
virtual_try_on.py
samples/dnn/virtual_try_on.py
+0
-0
drawing.py
samples/python/drawing.py
+192
-0
grabcut.py
samples/python/grabcut.py
+7
-9
laplace.py
samples/python/laplace.py
+69
-0
No files found.
apps/interactive-calibration/calibCommon.hpp
View file @
96b26dc8
...
...
@@ -80,7 +80,7 @@ namespace calib
cv
::
Size
boardSize
;
int
charucoDictName
;
int
calibrationStep
;
float
charucoSquareLeng
ht
,
charucoMarkerSize
;
float
charucoSquareLeng
th
,
charucoMarkerSize
;
float
captureDelay
;
float
squareSize
;
float
templDst
;
...
...
apps/interactive-calibration/defaultConfig.xml
View file @
96b26dc8
<?xml version="1.0"?>
<opencv_storage>
<charuco_dict>
0
</charuco_dict>
<charuco_square_leng
ht>
200
</charuco_square_lenght
>
<charuco_square_leng
th>
200
</charuco_square_length
>
<charuco_marker_size>
100
</charuco_marker_size>
<calibration_step>
1
</calibration_step>
<max_frames_num>
30
</max_frames_num>
...
...
apps/interactive-calibration/frameProcessor.cpp
View file @
96b26dc8
...
...
@@ -273,7 +273,7 @@ CalibProcessor::CalibProcessor(cv::Ptr<calibrationData> data, captureParameters
#ifdef HAVE_OPENCV_ARUCO
mArucoDictionary
=
cv
::
aruco
::
getPredefinedDictionary
(
cv
::
aruco
::
PREDEFINED_DICTIONARY_NAME
(
capParams
.
charucoDictName
));
mCharucoBoard
=
cv
::
aruco
::
CharucoBoard
::
create
(
mBoardSize
.
width
,
mBoardSize
.
height
,
capParams
.
charucoSquareLeng
ht
,
mCharucoBoard
=
cv
::
aruco
::
CharucoBoard
::
create
(
mBoardSize
.
width
,
mBoardSize
.
height
,
capParams
.
charucoSquareLeng
th
,
capParams
.
charucoMarkerSize
,
mArucoDictionary
);
#endif
break
;
...
...
apps/interactive-calibration/main.cpp
View file @
96b26dc8
...
...
@@ -181,7 +181,7 @@ int main(int argc, char** argv)
cv
::
aruco
::
getPredefinedDictionary
(
cv
::
aruco
::
PREDEFINED_DICTIONARY_NAME
(
capParams
.
charucoDictName
));
cv
::
Ptr
<
cv
::
aruco
::
CharucoBoard
>
charucoboard
=
cv
::
aruco
::
CharucoBoard
::
create
(
capParams
.
boardSize
.
width
,
capParams
.
boardSize
.
height
,
capParams
.
charucoSquareLeng
ht
,
capParams
.
charucoMarkerSize
,
dictionary
);
capParams
.
charucoSquareLeng
th
,
capParams
.
charucoMarkerSize
,
dictionary
);
globalData
->
totalAvgErr
=
cv
::
aruco
::
calibrateCameraCharuco
(
globalData
->
allCharucoCorners
,
globalData
->
allCharucoIds
,
charucoboard
,
globalData
->
imageSize
,
...
...
apps/interactive-calibration/parametersController.cpp
View file @
96b26dc8
...
...
@@ -37,7 +37,10 @@ bool calib::parametersController::loadFromFile(const std::string &inputFileName)
}
readFromNode
(
reader
[
"charuco_dict"
],
mCapParams
.
charucoDictName
);
readFromNode
(
reader
[
"charuco_square_lenght"
],
mCapParams
.
charucoSquareLenght
);
if
(
readFromNode
(
reader
[
"charuco_square_lenght"
],
mCapParams
.
charucoSquareLength
))
{
std
::
cout
<<
"DEPRECATION: Parameter 'charuco_square_lenght' has been deprecated (typo). Use 'charuco_square_length' instead."
<<
std
::
endl
;
}
readFromNode
(
reader
[
"charuco_square_length"
],
mCapParams
.
charucoSquareLength
);
readFromNode
(
reader
[
"charuco_marker_size"
],
mCapParams
.
charucoMarkerSize
);
readFromNode
(
reader
[
"camera_resolution"
],
mCapParams
.
cameraResolution
);
readFromNode
(
reader
[
"calibration_step"
],
mCapParams
.
calibrationStep
);
...
...
@@ -51,7 +54,7 @@ bool calib::parametersController::loadFromFile(const std::string &inputFileName)
bool
retValue
=
checkAssertion
(
mCapParams
.
charucoDictName
>=
0
,
"Dict name must be >= 0"
)
&&
checkAssertion
(
mCapParams
.
charucoMarkerSize
>
0
,
"Marker size must be positive"
)
&&
checkAssertion
(
mCapParams
.
charucoSquareLeng
ht
>
0
,
"Square size must be positive"
)
&&
checkAssertion
(
mCapParams
.
charucoSquareLeng
th
>
0
,
"Square size must be positive"
)
&&
checkAssertion
(
mCapParams
.
minFramesNum
>
1
,
"Minimal number of frames for calibration < 1"
)
&&
checkAssertion
(
mCapParams
.
calibrationStep
>
0
,
"Calibration step must be positive"
)
&&
checkAssertion
(
mCapParams
.
maxFramesNum
>
mCapParams
.
minFramesNum
,
"maxFramesNum < minFramesNum"
)
&&
...
...
@@ -119,7 +122,7 @@ bool calib::parametersController::loadFromParser(cv::CommandLineParser &parser)
mCapParams
.
board
=
chAruco
;
mCapParams
.
boardSize
=
cv
::
Size
(
6
,
8
);
mCapParams
.
charucoDictName
=
0
;
mCapParams
.
charucoSquareLeng
ht
=
200
;
mCapParams
.
charucoSquareLeng
th
=
200
;
mCapParams
.
charucoMarkerSize
=
100
;
}
else
{
...
...
doc/tutorials/calib3d/interactive_calibration/interactive_calibration.markdown
View file @
96b26dc8
...
...
@@ -64,7 +64,7 @@ By default values of advanced parameters are stored in defaultConfig.xml
<?xml version="1.0"?>
<opencv
_storage
>
<charuco
_dict
>
0
</charuco
_dict
>
<charuco
_square_leng
ht
>
200
</charuco
_square_lenght
>
<charuco
_square_leng
th
>
200
</charuco
_square_length
>
<charuco
_marker_size
>
100
</charuco
_marker_size
>
<calibration
_step
>
1
</calibration
_step
>
<max
_frames_num
>
30
</max
_frames_num
>
...
...
@@ -78,7 +78,7 @@ By default values of advanced parameters are stored in defaultConfig.xml
@endcode
-
*charuco_dict*
: name of special dictionary, which has been used for generation of chAruco pattern
-
*charuco_square_leng
ht
*
: size of square on chAruco board (in pixels)
-
*charuco_square_leng
th
*
: size of square on chAruco board (in pixels)
-
*charuco_marker_size*
: size of Aruco markers on chAruco board (in pixels)
-
*calibration_step*
: interval in frames between launches of @ref cv::calibrateCamera
-
*max_frames_num*
: if number of frames for calibration is greater then this value frames filter starts working.
...
...
@@ -91,7 +91,7 @@ QR faster than SVD, but potentially less precise
-
*frame_filter_conv_param*
: parameter which used in linear convolution of bicriterial frames filter
-
*camera_resolution*
: resolution of camera which is used for calibration
**Note:**
*charuco_dict*
,
*charuco_square_leng
ht
*
and
*charuco_marker_size*
are used for chAruco pattern generation
**Note:**
*charuco_dict*
,
*charuco_square_leng
th
*
and
*charuco_marker_size*
are used for chAruco pattern generation
(see Aruco module description for details:
[
Aruco tutorials
](
https://github.com/opencv/opencv_contrib/tree/master/modules/aruco/tutorials
)
)
Default chAruco pattern:
...
...
modules/calib3d/src/fundam.cpp
View file @
96b26dc8
...
...
@@ -490,12 +490,47 @@ static int run7Point( const Mat& _m1, const Mat& _m2, Mat& _fmatrix )
double
*
fmatrix
=
_fmatrix
.
ptr
<
double
>
();
int
i
,
k
,
n
;
Point2d
m1c
(
0
,
0
),
m2c
(
0
,
0
);
double
t
,
scale1
=
0
,
scale2
=
0
;
const
int
count
=
7
;
// compute centers and average distances for each of the two point sets
for
(
i
=
0
;
i
<
count
;
i
++
)
{
m1c
+=
Point2d
(
m1
[
i
]);
m2c
+=
Point2d
(
m2
[
i
]);
}
// calculate the normalizing transformations for each of the point sets:
// after the transformation each set will have the mass center at the coordinate origin
// and the average distance from the origin will be ~sqrt(2).
t
=
1.
/
count
;
m1c
*=
t
;
m2c
*=
t
;
for
(
i
=
0
;
i
<
count
;
i
++
)
{
scale1
+=
norm
(
Point2d
(
m1
[
i
].
x
-
m1c
.
x
,
m1
[
i
].
y
-
m1c
.
y
));
scale2
+=
norm
(
Point2d
(
m2
[
i
].
x
-
m2c
.
x
,
m2
[
i
].
y
-
m2c
.
y
));
}
scale1
*=
t
;
scale2
*=
t
;
if
(
scale1
<
FLT_EPSILON
||
scale2
<
FLT_EPSILON
)
return
0
;
scale1
=
std
::
sqrt
(
2.
)
/
scale1
;
scale2
=
std
::
sqrt
(
2.
)
/
scale2
;
// form a linear system: i-th row of A(=a) represents
// the equation: (m2[i], 1)'*F*(m1[i], 1) = 0
for
(
i
=
0
;
i
<
7
;
i
++
)
{
double
x0
=
m1
[
i
].
x
,
y0
=
m1
[
i
].
y
;
double
x1
=
m2
[
i
].
x
,
y1
=
m2
[
i
].
y
;
double
x0
=
(
m1
[
i
].
x
-
m1c
.
x
)
*
scale1
;
double
y0
=
(
m1
[
i
].
y
-
m1c
.
y
)
*
scale1
;
double
x1
=
(
m2
[
i
].
x
-
m2c
.
x
)
*
scale2
;
double
y1
=
(
m2
[
i
].
y
-
m2c
.
y
)
*
scale2
;
a
[
i
*
9
+
0
]
=
x1
*
x0
;
a
[
i
*
9
+
1
]
=
x1
*
y0
;
...
...
@@ -559,6 +594,10 @@ static int run7Point( const Mat& _m1, const Mat& _m2, Mat& _fmatrix )
if
(
n
<
1
||
n
>
3
)
return
n
;
// transformation matrices
Matx33d
T1
(
scale1
,
0
,
-
scale1
*
m1c
.
x
,
0
,
scale1
,
-
scale1
*
m1c
.
y
,
0
,
0
,
1
);
Matx33d
T2
(
scale2
,
0
,
-
scale2
*
m2c
.
x
,
0
,
scale2
,
-
scale2
*
m2c
.
y
,
0
,
0
,
1
);
for
(
k
=
0
;
k
<
n
;
k
++
,
fmatrix
+=
9
)
{
// for each root form the fundamental matrix
...
...
@@ -577,6 +616,14 @@ static int run7Point( const Mat& _m1, const Mat& _m2, Mat& _fmatrix )
for
(
i
=
0
;
i
<
8
;
i
++
)
fmatrix
[
i
]
=
f1
[
i
]
*
lambda
+
f2
[
i
]
*
mu
;
// de-normalize
Mat
F
(
3
,
3
,
CV_64F
,
fmatrix
);
F
=
T2
.
t
()
*
F
*
T1
;
// make F(3,3) = 1
if
(
fabs
(
F
.
at
<
double
>
(
8
))
>
FLT_EPSILON
)
F
*=
1.
/
F
.
at
<
double
>
(
8
);
}
return
n
;
...
...
modules/core/src/persistence_yml.cpp
View file @
96b26dc8
...
...
@@ -452,19 +452,19 @@ public:
if
(
d
==
'<'
)
//support of full type heading from YAML 1.2
{
const
char
*
yamlTypeHeading
=
"<tag:yaml.org,2002:"
;
const
size_t
headingLeng
ht
=
strlen
(
yamlTypeHeading
);
const
size_t
headingLeng
th
=
strlen
(
yamlTypeHeading
);
char
*
typeEndPtr
=
++
ptr
;
do
d
=
*++
typeEndPtr
;
while
(
cv_isprint
(
d
)
&&
d
!=
' '
&&
d
!=
'>'
);
if
(
d
==
'>'
&&
(
size_t
)(
typeEndPtr
-
ptr
)
>
headingLeng
ht
)
if
(
d
==
'>'
&&
(
size_t
)(
typeEndPtr
-
ptr
)
>
headingLeng
th
)
{
if
(
memcmp
(
ptr
,
yamlTypeHeading
,
headingLeng
ht
)
==
0
)
if
(
memcmp
(
ptr
,
yamlTypeHeading
,
headingLeng
th
)
==
0
)
{
*
typeEndPtr
=
' '
;
ptr
+=
headingLeng
ht
-
1
;
ptr
+=
headingLeng
th
-
1
;
is_user
=
true
;
//value_type |= FileNode::USER;
}
...
...
modules/dnn/src/caffe/caffe_importer.cpp
View file @
96b26dc8
...
...
@@ -75,6 +75,17 @@ static cv::String toString(const T &v)
return
ss
.
str
();
}
static
inline
MatShape
parseBlobShape
(
const
caffe
::
BlobShape
&
_input_shape
)
{
MatShape
shape
;
for
(
int
i
=
0
;
i
<
_input_shape
.
dim_size
();
i
++
)
{
shape
.
push_back
((
int
)
_input_shape
.
dim
(
i
));
}
return
shape
;
}
class
CaffeImporter
{
caffe
::
NetParameter
net
;
...
...
@@ -235,10 +246,7 @@ public:
}
else
if
(
pbBlob
.
has_shape
())
{
const
caffe
::
BlobShape
&
_shape
=
pbBlob
.
shape
();
for
(
int
i
=
0
;
i
<
_shape
.
dim_size
();
i
++
)
shape
.
push_back
((
int
)
_shape
.
dim
(
i
));
shape
=
parseBlobShape
(
pbBlob
.
shape
());
}
else
shape
.
resize
(
1
,
1
);
// Is a scalar.
...
...
@@ -334,12 +342,49 @@ public:
//setup input layer names
std
::
vector
<
String
>
netInputs
(
net
.
input_size
());
std
::
vector
<
MatShape
>
inp_shapes
;
{
for
(
int
inNum
=
0
;
inNum
<
net
.
input_size
();
inNum
++
)
int
net_input_size
=
net
.
input_size
();
for
(
int
inNum
=
0
;
inNum
<
net_input_size
;
inNum
++
)
{
addedBlobs
.
push_back
(
BlobNote
(
net
.
input
(
inNum
),
0
,
inNum
));
netInputs
[
inNum
]
=
net
.
input
(
inNum
);
}
if
(
net
.
input_dim_size
()
>
0
)
// deprecated in Caffe proto
{
int
net_input_dim_size
=
net
.
input_dim_size
();
CV_Check
(
net_input_dim_size
,
net_input_dim_size
%
4
==
0
,
""
);
CV_CheckEQ
(
net_input_dim_size
,
net_input_size
*
4
,
""
);
for
(
int
inp_id
=
0
;
inp_id
<
net_input_size
;
inp_id
++
)
{
int
dim
=
inp_id
*
4
;
MatShape
shape
(
4
);
shape
[
0
]
=
net
.
input_dim
(
dim
);
shape
[
1
]
=
net
.
input_dim
(
dim
+
1
);
shape
[
2
]
=
net
.
input_dim
(
dim
+
2
);
shape
[
3
]
=
net
.
input_dim
(
dim
+
3
);
inp_shapes
.
push_back
(
shape
);
}
}
else
if
(
net
.
input_shape_size
()
>
0
)
// deprecated in Caffe proto
{
int
net_input_shape_size
=
net
.
input_shape_size
();
CV_CheckEQ
(
net_input_shape_size
,
net_input_size
,
""
);
for
(
int
inp_id
=
0
;
inp_id
<
net_input_shape_size
;
inp_id
++
)
{
MatShape
shape
=
parseBlobShape
(
net
.
input_shape
(
inp_id
));
inp_shapes
.
push_back
(
shape
);
}
}
else
{
for
(
int
inp_id
=
0
;
inp_id
<
net_input_size
;
inp_id
++
)
{
MatShape
shape
;
// empty
inp_shapes
.
push_back
(
shape
);
}
}
}
for
(
int
li
=
0
;
li
<
layersSize
;
li
++
)
...
...
@@ -364,6 +409,17 @@ public:
addedBlobs
.
back
().
outNum
=
netInputs
.
size
();
netInputs
.
push_back
(
addedBlobs
.
back
().
name
);
}
if
(
layer
.
has_input_param
())
{
const
caffe
::
InputParameter
&
inputParameter
=
layer
.
input_param
();
int
input_shape_size
=
inputParameter
.
shape_size
();
CV_CheckEQ
(
input_shape_size
,
layer
.
top_size
(),
""
);
for
(
int
inp_id
=
0
;
inp_id
<
input_shape_size
;
inp_id
++
)
{
MatShape
shape
=
parseBlobShape
(
inputParameter
.
shape
(
inp_id
));
inp_shapes
.
push_back
(
shape
);
}
}
continue
;
}
else
if
(
type
==
"BatchNorm"
)
...
...
@@ -424,35 +480,15 @@ public:
}
dstNet
.
setInputsNames
(
netInputs
);
std
::
vector
<
MatShape
>
inp_shapes
;
if
(
net
.
input_shape_size
()
>
0
||
(
layersSize
>
0
&&
net
.
layer
(
0
).
has_input_param
()
&&
net
.
layer
(
0
).
input_param
().
shape_size
()
>
0
))
{
int
size
=
(
net
.
input_shape_size
()
>
0
)
?
net
.
input_shape_size
()
:
net
.
layer
(
0
).
input_param
().
shape_size
();
for
(
int
inp_id
=
0
;
inp_id
<
size
;
inp_id
++
)
if
(
inp_shapes
.
size
()
>
0
)
{
CV_CheckEQ
(
inp_shapes
.
size
(),
netInputs
.
size
(),
""
);
for
(
int
inp_id
=
0
;
inp_id
<
inp_shapes
.
size
();
inp_id
++
)
{
const
caffe
::
BlobShape
&
_input_shape
=
(
net
.
input_shape_size
()
>
0
)
?
net
.
input_shape
(
inp_id
)
:
net
.
layer
(
0
).
input_param
().
shape
(
inp_id
);
MatShape
shape
;
for
(
int
i
=
0
;
i
<
_input_shape
.
dim_size
();
i
++
)
{
shape
.
push_back
((
int
)
_input_shape
.
dim
(
i
));
}
inp_shapes
.
push_back
(
shape
);
if
(
!
inp_shapes
[
inp_id
].
empty
())
dstNet
.
setInput
(
Mat
(
inp_shapes
[
inp_id
],
CV_32F
),
netInputs
[
inp_id
]);
}
}
else
if
(
net
.
input_dim_size
()
>
0
)
{
MatShape
shape
;
for
(
int
dim
=
0
;
dim
<
net
.
input_dim_size
();
dim
++
)
{
shape
.
push_back
(
net
.
input_dim
(
dim
));
}
inp_shapes
.
push_back
(
shape
);
}
for
(
int
inp_id
=
0
;
inp_id
<
inp_shapes
.
size
();
inp_id
++
)
{
dstNet
.
setInput
(
Mat
(
inp_shapes
[
inp_id
],
CV_32F
),
netInputs
[
inp_id
]);
}
addedBlobs
.
clear
();
}
...
...
modules/dnn/src/dnn.cpp
View file @
96b26dc8
...
...
@@ -1418,13 +1418,15 @@ struct Net::Impl
clear
();
this
->
blobsToKeep
=
blobsToKeep_
;
allocateLayers
(
blobsToKeep_
);
MapIdToLayerData
::
iterator
it
=
layers
.
find
(
0
);
CV_Assert
(
it
!=
layers
.
end
());
it
->
second
.
skip
=
netInputLayer
->
skip
;
initBackend
();
initBackend
(
blobsToKeep_
);
if
(
!
netWasAllocated
)
{
...
...
@@ -1437,7 +1439,6 @@ struct Net::Impl
}
netWasAllocated
=
true
;
this
->
blobsToKeep
=
blobsToKeep_
;
if
(
DNN_NETWORK_DUMP
>
0
)
{
...
...
@@ -1564,7 +1565,7 @@ struct Net::Impl
ldOut
.
consumers
.
push_back
(
LayerPin
(
inLayerId
,
outNum
));
}
void
initBackend
()
void
initBackend
(
const
std
::
vector
<
LayerPin
>&
blobsToKeep_
)
{
CV_TRACE_FUNCTION
();
if
(
preferableBackend
==
DNN_BACKEND_OPENCV
)
...
...
@@ -1574,7 +1575,7 @@ struct Net::Impl
else
if
(
preferableBackend
==
DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019
)
{
#ifdef HAVE_INF_ENGINE
initInfEngineBackend
();
initInfEngineBackend
(
blobsToKeep_
);
#else
CV_Assert
(
false
&&
"This OpenCV version is built without Inference Engine API support"
);
#endif
...
...
@@ -1582,7 +1583,7 @@ struct Net::Impl
else
if
(
preferableBackend
==
DNN_BACKEND_INFERENCE_ENGINE_NGRAPH
)
{
#ifdef HAVE_DNN_NGRAPH
initNgraphBackend
();
initNgraphBackend
(
blobsToKeep_
);
#else
CV_Error
(
Error
::
StsNotImplemented
,
"This OpenCV version is built without support of Inference Engine + nGraph"
);
#endif
...
...
@@ -1688,7 +1689,7 @@ struct Net::Impl
}
}
void
initInfEngineBackend
()
void
initInfEngineBackend
(
const
std
::
vector
<
LayerPin
>&
blobsToKeep_
)
{
CV_TRACE_FUNCTION
();
CV_Assert_N
(
preferableBackend
==
DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019
,
haveInfEngine
());
...
...
@@ -1878,6 +1879,15 @@ struct Net::Impl
CV_Assert
(
!
ieNode
.
empty
());
ieNode
->
net
=
net
;
for
(
const
auto
&
pin
:
blobsToKeep_
)
{
if
(
pin
.
lid
==
ld
.
id
)
{
ieNode
->
net
->
addOutput
(
ieNode
->
layer
.
getName
());
break
;
}
}
// Convert weights in FP16 for specific targets.
if
((
preferableTarget
==
DNN_TARGET_OPENCL_FP16
||
preferableTarget
==
DNN_TARGET_MYRIAD
||
...
...
@@ -1984,7 +1994,7 @@ struct Net::Impl
}
}
void
initNgraphBackend
()
void
initNgraphBackend
(
const
std
::
vector
<
LayerPin
>&
blobsToKeep_
)
{
CV_TRACE_FUNCTION
();
CV_Assert_N
(
preferableBackend
==
DNN_BACKEND_INFERENCE_ENGINE_NGRAPH
,
haveInfEngine
());
...
...
@@ -2173,6 +2183,14 @@ struct Net::Impl
// TF EAST_text_detection
ieNode
->
net
->
setUnconnectedNodes
(
ieNode
);
}
for
(
const
auto
&
pin
:
blobsToKeep_
)
{
if
(
pin
.
lid
==
ld
.
id
)
{
ieNode
->
net
->
addOutput
(
ieNode
->
node
->
get_friendly_name
());
break
;
}
}
ieNode
->
net
->
setNodePtr
(
&
ieNode
->
node
);
net
->
addBlobs
(
ld
.
inputBlobsWrappers
);
...
...
modules/dnn/src/ie_ngraph.cpp
View file @
96b26dc8
...
...
@@ -231,11 +231,10 @@ void InfEngineNgraphNet::init(Target targetId)
}
}
}
}
else
{
for
(
const
auto
&
name
:
requestedOutputs
)
{
cnn
.
addOutput
(
name
);
}
}
for
(
const
auto
&
name
:
requestedOutputs
)
{
cnn
.
addOutput
(
name
);
}
for
(
const
auto
&
it
:
cnn
.
getInputsInfo
())
...
...
modules/dnn/src/layers/crop_and_resize_layer.cpp
View file @
96b26dc8
...
...
@@ -5,6 +5,7 @@
// Copyright (C) 2018, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
#include "../precomp.hpp"
#include "../ie_ngraph.hpp"
#include "layers_common.hpp"
#ifdef HAVE_CUDA
...
...
@@ -25,6 +26,14 @@ public:
outHeight
=
params
.
get
<
float
>
(
"height"
);
}
virtual
bool
supportBackend
(
int
backendId
)
CV_OVERRIDE
{
return
backendId
==
DNN_BACKEND_OPENCV
||
backendId
==
DNN_BACKEND_INFERENCE_ENGINE_NGRAPH
||
backendId
==
DNN_BACKEND_CUDA
;
}
bool
getMemoryShapes
(
const
std
::
vector
<
MatShape
>
&
inputs
,
const
int
requiredOutputs
,
std
::
vector
<
MatShape
>
&
outputs
,
...
...
@@ -41,11 +50,6 @@ public:
return
false
;
}
virtual
bool
supportBackend
(
int
backendId
)
CV_OVERRIDE
{
return
backendId
==
DNN_BACKEND_OPENCV
||
backendId
==
DNN_BACKEND_CUDA
;
}
void
forward
(
InputArrayOfArrays
inputs_arr
,
OutputArrayOfArrays
outputs_arr
,
OutputArrayOfArrays
internals_arr
)
CV_OVERRIDE
{
CV_TRACE_FUNCTION
();
...
...
@@ -121,6 +125,41 @@ public:
}
}
#ifdef HAVE_DNN_NGRAPH
virtual
Ptr
<
BackendNode
>
initNgraph
(
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>&
inputs
,
const
std
::
vector
<
Ptr
<
BackendNode
>
>&
nodes
)
CV_OVERRIDE
{
// Slice second input: from 1x1xNx7 to 1x1xNx5
auto
input
=
nodes
[
0
].
dynamicCast
<
InfEngineNgraphNode
>
()
->
node
;
auto
rois
=
nodes
[
1
].
dynamicCast
<
InfEngineNgraphNode
>
()
->
node
;
std
::
vector
<
size_t
>
dims
=
rois
->
get_shape
(),
offsets
(
4
,
0
);
offsets
[
3
]
=
2
;
dims
[
3
]
=
7
;
auto
lower_bounds
=
std
::
make_shared
<
ngraph
::
op
::
Constant
>
(
ngraph
::
element
::
i64
,
ngraph
::
Shape
{
offsets
.
size
()},
offsets
.
data
());
auto
upper_bounds
=
std
::
make_shared
<
ngraph
::
op
::
Constant
>
(
ngraph
::
element
::
i64
,
ngraph
::
Shape
{
dims
.
size
()},
dims
.
data
());
auto
strides
=
std
::
make_shared
<
ngraph
::
op
::
Constant
>
(
ngraph
::
element
::
i64
,
ngraph
::
Shape
{
dims
.
size
()},
std
::
vector
<
int64_t
>
((
int64_t
)
dims
.
size
(),
1
));
auto
slice
=
std
::
make_shared
<
ngraph
::
op
::
v1
::
StridedSlice
>
(
rois
,
lower_bounds
,
upper_bounds
,
strides
,
std
::
vector
<
int64_t
>
{},
std
::
vector
<
int64_t
>
{});
// Reshape rois from 4D to 2D
std
::
vector
<
size_t
>
shapeData
=
{
dims
[
2
],
5
};
auto
shape
=
std
::
make_shared
<
ngraph
::
op
::
Constant
>
(
ngraph
::
element
::
i64
,
ngraph
::
Shape
{
2
},
shapeData
.
data
());
auto
reshape
=
std
::
make_shared
<
ngraph
::
op
::
v1
::
Reshape
>
(
slice
,
shape
,
true
);
auto
roiPooling
=
std
::
make_shared
<
ngraph
::
op
::
v0
::
ROIPooling
>
(
input
,
reshape
,
ngraph
::
Shape
{(
size_t
)
outHeight
,
(
size_t
)
outWidth
},
1.0
f
,
"bilinear"
);
return
Ptr
<
BackendNode
>
(
new
InfEngineNgraphNode
(
roiPooling
));
}
#endif // HAVE_DNN_NGRAPH
#ifdef HAVE_CUDA
Ptr
<
BackendNode
>
initCUDA
(
void
*
context_
,
...
...
modules/dnn/src/layers/resize_layer.cpp
View file @
96b26dc8
...
...
@@ -41,7 +41,7 @@ public:
CV_Assert
(
params
.
has
(
"zoom_factor_x"
)
&&
params
.
has
(
"zoom_factor_y"
));
}
interpolation
=
params
.
get
<
String
>
(
"interpolation"
);
CV_Assert
(
interpolation
==
"nearest"
||
interpolation
==
"bilinear"
);
CV_Assert
(
interpolation
==
"nearest"
||
interpolation
==
"
opencv_linear"
||
interpolation
==
"
bilinear"
);
alignCorners
=
params
.
get
<
bool
>
(
"align_corners"
,
false
);
}
...
...
@@ -115,14 +115,15 @@ public:
Mat
&
inp
=
inputs
[
0
];
Mat
&
out
=
outputs
[
0
];
if
(
interpolation
==
"nearest"
)
if
(
interpolation
==
"nearest"
||
interpolation
==
"opencv_linear"
)
{
InterpolationFlags
mode
=
interpolation
==
"nearest"
?
INTER_NEAREST
:
INTER_LINEAR
;
for
(
size_t
n
=
0
;
n
<
inputs
[
0
].
size
[
0
];
++
n
)
{
for
(
size_t
ch
=
0
;
ch
<
inputs
[
0
].
size
[
1
];
++
ch
)
{
resize
(
getPlane
(
inp
,
n
,
ch
),
getPlane
(
out
,
n
,
ch
),
Size
(
outWidth
,
outHeight
),
0
,
0
,
INTER_NEAREST
);
Size
(
outWidth
,
outHeight
),
0
,
0
,
mode
);
}
}
}
...
...
modules/dnn/src/layers/scale_layer.cpp
View file @
96b26dc8
...
...
@@ -61,7 +61,8 @@ public:
return
backendId
==
DNN_BACKEND_OPENCV
||
backendId
==
DNN_BACKEND_CUDA
||
backendId
==
DNN_BACKEND_HALIDE
||
((
backendId
==
DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019
||
backendId
==
DNN_BACKEND_INFERENCE_ENGINE_NGRAPH
)
&&
axis
==
1
);
(
backendId
==
DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019
&&
axis
==
1
)
||
(
backendId
==
DNN_BACKEND_INFERENCE_ENGINE_NGRAPH
&&
axis
>
0
);
}
void
forward
(
InputArrayOfArrays
inputs_arr
,
OutputArrayOfArrays
outputs_arr
,
OutputArrayOfArrays
internals_arr
)
CV_OVERRIDE
...
...
@@ -263,22 +264,26 @@ public:
auto
ieInpNode
=
nodes
[
0
].
dynamicCast
<
InfEngineNgraphNode
>
()
->
node
;
std
::
vector
<
size_t
>
shape
(
ieInpNode
->
get_shape
().
size
(),
1
);
shape
[
1
]
=
numChannels
;
auto
weight
=
hasWeights
?
std
::
make_shared
<
ngraph
::
op
::
Constant
>
(
ngraph
::
element
::
f32
,
ngraph
::
Shape
(
shape
),
blobs
[
0
].
data
)
:
std
::
make_shared
<
ngraph
::
op
::
Constant
>
(
ngraph
::
element
::
f32
,
ngraph
::
Shape
(
shape
),
std
::
vector
<
float
>
(
numChannels
,
1
).
data
());
auto
bias
=
hasBias
?
std
::
make_shared
<
ngraph
::
op
::
Constant
>
(
ngraph
::
element
::
f32
,
ngraph
::
Shape
(
shape
),
blobs
.
back
().
data
)
:
std
::
make_shared
<
ngraph
::
op
::
Constant
>
(
ngraph
::
element
::
f32
,
ngraph
::
Shape
(
shape
),
std
::
vector
<
float
>
(
numChannels
,
0
).
data
());
auto
scale_node
=
std
::
make_shared
<
ngraph
::
op
::
v1
::
Multiply
>
(
ieInpNode
,
weight
,
ngraph
::
op
::
AutoBroadcastType
::
NUMPY
);
auto
scale_shift
=
std
::
make_shared
<
ngraph
::
op
::
v1
::
Add
>
(
scale_node
,
bias
,
ngraph
::
op
::
AutoBroadcastType
::
NUMPY
);
return
Ptr
<
BackendNode
>
(
new
InfEngineNgraphNode
(
scale_shift
));
int
cAxis
=
clamp
(
axis
,
shape
.
size
());
shape
[
cAxis
]
=
numChannels
;
auto
node
=
ieInpNode
;
if
(
hasWeights
)
{
auto
weight
=
std
::
make_shared
<
ngraph
::
op
::
Constant
>
(
ngraph
::
element
::
f32
,
ngraph
::
Shape
(
shape
),
blobs
[
0
].
data
);
node
=
std
::
make_shared
<
ngraph
::
op
::
v1
::
Multiply
>
(
node
,
weight
,
ngraph
::
op
::
AutoBroadcastType
::
NUMPY
);
}
if
(
hasBias
||
!
hasWeights
)
{
auto
bias
=
hasBias
?
std
::
make_shared
<
ngraph
::
op
::
Constant
>
(
ngraph
::
element
::
f32
,
ngraph
::
Shape
(
shape
),
blobs
.
back
().
data
)
:
std
::
make_shared
<
ngraph
::
op
::
Constant
>
(
ngraph
::
element
::
f32
,
ngraph
::
Shape
(
shape
),
std
::
vector
<
float
>
(
numChannels
,
0
).
data
());
node
=
std
::
make_shared
<
ngraph
::
op
::
v1
::
Add
>
(
node
,
bias
,
ngraph
::
op
::
AutoBroadcastType
::
NUMPY
);
}
return
Ptr
<
BackendNode
>
(
new
InfEngineNgraphNode
(
node
));
}
#endif // HAVE_DNN_NGRAPH
...
...
modules/dnn/src/onnx/onnx_importer.cpp
View file @
96b26dc8
...
...
@@ -485,16 +485,23 @@ void ONNXImporter::populateNet(Net dstNet)
}
else
if
(
layer_type
==
"Split"
)
{
DictValue
splits
=
layerParams
.
get
(
"split"
);
const
int
numSplits
=
splits
.
size
();
CV_Assert
(
numSplits
>
1
);
if
(
layerParams
.
has
(
"split"
))
{
DictValue
splits
=
layerParams
.
get
(
"split"
);
const
int
numSplits
=
splits
.
size
();
CV_Assert
(
numSplits
>
1
);
std
::
vector
<
int
>
slicePoints
(
numSplits
-
1
,
splits
.
get
<
int
>
(
0
));
for
(
int
i
=
1
;
i
<
splits
.
size
()
-
1
;
++
i
)
std
::
vector
<
int
>
slicePoints
(
numSplits
-
1
,
splits
.
get
<
int
>
(
0
));
for
(
int
i
=
1
;
i
<
splits
.
size
()
-
1
;
++
i
)
{
slicePoints
[
i
]
=
slicePoints
[
i
-
1
]
+
splits
.
get
<
int
>
(
i
-
1
);
}
layerParams
.
set
(
"slice_point"
,
DictValue
::
arrayInt
(
&
slicePoints
[
0
],
slicePoints
.
size
()));
}
else
{
slicePoints
[
i
]
=
slicePoints
[
i
-
1
]
+
splits
.
get
<
int
>
(
i
-
1
);
layerParams
.
set
(
"num_split"
,
node_proto
.
output_size
()
);
}
layerParams
.
set
(
"slice_point"
,
DictValue
::
arrayInt
(
&
slicePoints
[
0
],
slicePoints
.
size
()));
layerParams
.
type
=
"Slice"
;
}
else
if
(
layer_type
==
"Add"
||
layer_type
==
"Sum"
)
...
...
@@ -973,6 +980,15 @@ void ONNXImporter::populateNet(Net dstNet)
replaceLayerParam
(
layerParams
,
"width_scale"
,
"zoom_factor_x"
);
}
replaceLayerParam
(
layerParams
,
"mode"
,
"interpolation"
);
if
(
layerParams
.
get
<
String
>
(
"interpolation"
)
==
"linear"
&&
framework_name
==
"pytorch"
)
{
layerParams
.
type
=
"Resize"
;
Mat
scales
=
getBlob
(
node_proto
,
constBlobs
,
1
);
CV_Assert
(
scales
.
total
()
==
4
);
layerParams
.
set
(
"interpolation"
,
"opencv_linear"
);
layerParams
.
set
(
"zoom_factor_y"
,
scales
.
at
<
float
>
(
2
));
layerParams
.
set
(
"zoom_factor_x"
,
scales
.
at
<
float
>
(
3
));
}
}
else
if
(
layer_type
==
"LogSoftmax"
)
{
...
...
modules/dnn/test/test_ie_models.cpp
View file @
96b26dc8
...
...
@@ -73,28 +73,7 @@ struct OpenVINOModelTestCaseInfo
static
const
std
::
map
<
std
::
string
,
OpenVINOModelTestCaseInfo
>&
getOpenVINOTestModels
()
{
static
std
::
map
<
std
::
string
,
OpenVINOModelTestCaseInfo
>
g_models
{
#if INF_ENGINE_RELEASE <= 2018050000
{
"age-gender-recognition-retail-0013"
,
{
"deployment_tools/intel_models/age-gender-recognition-retail-0013/FP32/age-gender-recognition-retail-0013"
,
"deployment_tools/intel_models/age-gender-recognition-retail-0013/FP16/age-gender-recognition-retail-0013"
}},
{
"face-person-detection-retail-0002"
,
{
"deployment_tools/intel_models/face-person-detection-retail-0002/FP32/face-person-detection-retail-0002"
,
"deployment_tools/intel_models/face-person-detection-retail-0002/FP16/face-person-detection-retail-0002"
}},
{
"head-pose-estimation-adas-0001"
,
{
"deployment_tools/intel_models/head-pose-estimation-adas-0001/FP32/head-pose-estimation-adas-0001"
,
"deployment_tools/intel_models/head-pose-estimation-adas-0001/FP16/head-pose-estimation-adas-0001"
}},
{
"person-detection-retail-0002"
,
{
"deployment_tools/intel_models/person-detection-retail-0002/FP32/person-detection-retail-0002"
,
"deployment_tools/intel_models/person-detection-retail-0002/FP16/person-detection-retail-0002"
}},
{
"vehicle-detection-adas-0002"
,
{
"deployment_tools/intel_models/vehicle-detection-adas-0002/FP32/vehicle-detection-adas-0002"
,
"deployment_tools/intel_models/vehicle-detection-adas-0002/FP16/vehicle-detection-adas-0002"
}}
#else
#if INF_ENGINE_RELEASE >= 2018050000
// layout is defined by open_model_zoo/model_downloader
// Downloaded using these parameters for Open Model Zoo downloader (2019R1):
// ./downloader.py -o ${OPENCV_DNN_TEST_DATA_PATH}/omz_intel_models --cache_dir ${OPENCV_DNN_TEST_DATA_PATH}/.omz_cache/ \
...
...
@@ -118,7 +97,16 @@ static const std::map<std::string, OpenVINOModelTestCaseInfo>& getOpenVINOTestMo
{
"vehicle-detection-adas-0002"
,
{
"Transportation/object_detection/vehicle/mobilenet-reduced-ssd/dldt/vehicle-detection-adas-0002"
,
"Transportation/object_detection/vehicle/mobilenet-reduced-ssd/dldt/vehicle-detection-adas-0002-fp16"
}}
}},
#endif
#if INF_ENGINE_RELEASE >= 2020010000
// Downloaded using these parameters for Open Model Zoo downloader (2020.1):
// ./downloader.py -o ${OPENCV_DNN_TEST_DATA_PATH}/omz_intel_models --cache_dir ${OPENCV_DNN_TEST_DATA_PATH}/.omz_cache/ \
// --name person-detection-retail-0013
{
"person-detection-retail-0013"
,
{
// IRv10
"intel/person-detection-retail-0013/FP32/person-detection-retail-0013"
,
"intel/person-detection-retail-0013/FP16/person-detection-retail-0013"
}},
#endif
};
...
...
@@ -305,8 +293,8 @@ TEST_P(DNNTestOpenVINO, models)
OpenVINOModelTestCaseInfo
modelInfo
=
it
->
second
;
std
::
string
modelPath
=
isFP16
?
modelInfo
.
modelPathFP16
:
modelInfo
.
modelPathFP32
;
std
::
string
xmlPath
=
findDataFile
(
modelPath
+
".xml"
);
std
::
string
binPath
=
findDataFile
(
modelPath
+
".bin"
);
std
::
string
xmlPath
=
findDataFile
(
modelPath
+
".xml"
,
false
);
std
::
string
binPath
=
findDataFile
(
modelPath
+
".bin"
,
false
);
std
::
map
<
std
::
string
,
cv
::
Mat
>
inputsMap
;
std
::
map
<
std
::
string
,
cv
::
Mat
>
ieOutputsMap
,
cvOutputsMap
;
...
...
@@ -316,13 +304,19 @@ TEST_P(DNNTestOpenVINO, models)
runIE
(
targetId
,
xmlPath
,
binPath
,
inputsMap
,
ieOutputsMap
);
runCV
(
backendId
,
targetId
,
xmlPath
,
binPath
,
inputsMap
,
cvOutputsMap
);
double
eps
=
0
;
#if INF_ENGINE_VER_MAJOR_GE(2020010000)
if
(
targetId
==
DNN_TARGET_CPU
&&
checkHardwareSupport
(
CV_CPU_AVX_512F
))
eps
=
1e-5
;
#endif
EXPECT_EQ
(
ieOutputsMap
.
size
(),
cvOutputsMap
.
size
());
for
(
auto
&
srcIt
:
ieOutputsMap
)
{
auto
dstIt
=
cvOutputsMap
.
find
(
srcIt
.
first
);
CV_Assert
(
dstIt
!=
cvOutputsMap
.
end
());
double
normInf
=
cvtest
::
norm
(
srcIt
.
second
,
dstIt
->
second
,
cv
::
NORM_INF
);
EXPECT_
EQ
(
normInf
,
0
)
;
EXPECT_
LE
(
normInf
,
eps
)
<<
"output="
<<
srcIt
.
first
;
}
}
...
...
modules/dnn/test/test_onnx_importer.cpp
View file @
96b26dc8
...
...
@@ -335,6 +335,9 @@ TEST_P(Test_ONNX_layers, Padding)
TEST_P
(
Test_ONNX_layers
,
Resize
)
{
testONNXModels
(
"resize_nearest"
);
if
(
backend
==
DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019
)
applyTestTag
(
CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER
);
testONNXModels
(
"resize_bilinear"
);
}
TEST_P
(
Test_ONNX_layers
,
MultyInputs
)
...
...
@@ -411,6 +414,18 @@ TEST_P(Test_ONNX_layers, ReduceL2)
testONNXModels
(
"reduceL2"
);
}
TEST_P
(
Test_ONNX_layers
,
Split
)
{
if
(
backend
==
DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019
)
applyTestTag
(
CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER
);
if
(
backend
==
DNN_BACKEND_INFERENCE_ENGINE_NGRAPH
)
applyTestTag
(
CV_TEST_TAG_DNN_SKIP_IE_NGRAPH
);
testONNXModels
(
"split_1"
);
testONNXModels
(
"split_2"
);
testONNXModels
(
"split_3"
);
testONNXModels
(
"split_4"
);
}
TEST_P
(
Test_ONNX_layers
,
Slice
)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
...
...
modules/dnn/test/test_tf_importer.cpp
View file @
96b26dc8
...
...
@@ -994,8 +994,16 @@ TEST(Test_TensorFlow, two_inputs)
normAssert
(
out
,
firstInput
+
secondInput
);
}
TEST
(
Test_TensorFlow
,
Mask_RCNN
)
TEST
_P
(
Test_TensorFlow_nets
,
Mask_RCNN
)
{
static
const
double
kMaskThreshold
=
0.5
;
if
(
backend
==
DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019
&&
target
==
DNN_TARGET_MYRIAD
)
applyTestTag
(
CV_TEST_TAG_DNN_SKIP_IE_MYRIAD
,
CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER
);
if
(
target
==
DNN_TARGET_MYRIAD
&&
getInferenceEngineVPUType
()
==
CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X
)
applyTestTag
(
CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X
);
applyTestTag
(
CV_TEST_TAG_MEMORY_1GB
,
CV_TEST_TAG_DEBUG_VERYLONG
);
Mat
img
=
imread
(
findDataFile
(
"dnn/street.png"
));
std
::
string
proto
=
findDataFile
(
"dnn/mask_rcnn_inception_v2_coco_2018_01_28.pbtxt"
);
...
...
@@ -1006,7 +1014,8 @@ TEST(Test_TensorFlow, Mask_RCNN)
Mat
refMasks
=
blobFromNPY
(
path
(
"mask_rcnn_inception_v2_coco_2018_01_28.detection_masks.npy"
));
Mat
blob
=
blobFromImage
(
img
,
1.0
f
,
Size
(
800
,
800
),
Scalar
(),
true
,
false
);
net
.
setPreferableBackend
(
DNN_BACKEND_OPENCV
);
net
.
setPreferableBackend
(
backend
);
net
.
setPreferableTarget
(
target
);
net
.
setInput
(
blob
);
...
...
@@ -1020,7 +1029,10 @@ TEST(Test_TensorFlow, Mask_RCNN)
Mat
outDetections
=
outs
[
0
];
Mat
outMasks
=
outs
[
1
];
normAssertDetections
(
refDetections
,
outDetections
,
""
,
/*threshold for zero confidence*/
1e-5
);
double
scoreDiff
=
(
target
==
DNN_TARGET_OPENCL_FP16
||
target
==
DNN_TARGET_MYRIAD
)
?
0.019
:
2e-5
;
double
iouDiff
=
(
target
==
DNN_TARGET_OPENCL_FP16
||
target
==
DNN_TARGET_MYRIAD
)
?
0.018
:
default_lInf
;
normAssertDetections
(
refDetections
,
outDetections
,
""
,
/*threshold for zero confidence*/
1e-5
,
scoreDiff
,
iouDiff
);
// Output size of masks is NxCxHxW where
// N - number of detected boxes
...
...
@@ -1044,7 +1056,18 @@ TEST(Test_TensorFlow, Mask_RCNN)
outMasks
(
srcRanges
).
copyTo
(
masks
(
dstRanges
));
}
cv
::
Range
topRefMasks
[]
=
{
Range
::
all
(),
Range
(
0
,
numDetections
),
Range
::
all
(),
Range
::
all
()};
normAssert
(
masks
,
refMasks
(
&
topRefMasks
[
0
]));
refMasks
=
refMasks
(
&
topRefMasks
[
0
]);
// make binary masks
cv
::
threshold
(
masks
.
reshape
(
1
,
1
),
masks
,
kMaskThreshold
,
1
,
THRESH_BINARY
);
cv
::
threshold
(
refMasks
.
reshape
(
1
,
1
),
refMasks
,
kMaskThreshold
,
1
,
THRESH_BINARY
);
double
inter
=
cv
::
countNonZero
(
masks
&
refMasks
);
double
area
=
cv
::
countNonZero
(
masks
|
refMasks
);
EXPECT_GE
(
inter
/
area
,
0.99
);
if
(
backend
==
DNN_BACKEND_INFERENCE_ENGINE_NGRAPH
)
expectNoFallbacks
(
net
);
}
}
modules/highgui/src/window_cocoa.mm
View file @
96b26dc8
...
...
@@ -112,11 +112,14 @@ static bool wasInitialized = false;
BOOL autosize;
BOOL firstContent;
int status;
int x0, y0;
}
@property(assign) CvMouseCallback mouseCallback;
@property(assign) void *mouseParam;
@property(assign) BOOL autosize;
@property(assign) BOOL firstContent;
@property(assign) int x0;
@property(assign) int y0;
@property(retain) NSMutableDictionary *sliders;
@property(readwrite) int status;
- (CVView *)contentView;
...
...
@@ -252,6 +255,16 @@ CV_IMPL void cvShowImage( const char* name, const CvArr* arr)
contentSize.height = scaledImageSize.height + [window contentView].sliderHeight;
contentSize.width = std::max<int>(scaledImageSize.width, MIN_SLIDER_WIDTH);
[window setContentSize:contentSize]; //adjust sliders to fit new window size
if([window firstContent])
{
int x = [window x0];
int y = [window y0];
if(x >= 0 && y >= 0)
{
y = [[window screen] visibleFrame].size.height - y;
[window setFrameTopLeftPoint:NSMakePoint(x, y)];
}
}
}
}
[window setFirstContent:NO];
...
...
@@ -275,7 +288,6 @@ CV_IMPL void cvResizeWindow( const char* name, int width, int height)
CV_IMPL void cvMoveWindow( const char* name, int x, int y)
{
CV_FUNCNAME("cvMoveWindow");
__BEGIN__;
...
...
@@ -287,8 +299,14 @@ CV_IMPL void cvMoveWindow( const char* name, int x, int y)
//cout << "cvMoveWindow"<< endl;
window = cvGetWindow(name);
if(window) {
y = [[window screen] frame].size.height - y;
[window setFrameTopLeftPoint:NSMakePoint(x, y)];
if([window firstContent]) {
[window setX0:x];
[window setY0:y];
}
else {
y = [[window screen] visibleFrame].size.height - y;
[window setFrameTopLeftPoint:NSMakePoint(x, y)];
}
}
[localpool1 drain];
...
...
@@ -557,6 +575,8 @@ CV_IMPL int cvNamedWindow( const char* name, int flags )
[window setFrameTopLeftPoint:initContentRect.origin];
[window setFirstContent:YES];
[window setX0:-1];
[window setY0:-1];
[window setContentView:[[CVView alloc] init]];
...
...
@@ -819,6 +839,8 @@ static NSSize constrainAspectRatio(NSSize base, NSSize constraint) {
@synthesize mouseParam;
@synthesize autosize;
@synthesize firstContent;
@synthesize x0;
@synthesize y0;
@synthesize sliders;
@synthesize status;
...
...
modules/imgproc/test/test_imgwarp.cpp
View file @
96b26dc8
...
...
@@ -1413,6 +1413,21 @@ TEST(Resize, lanczos4_regression_16192)
EXPECT_EQ
(
cvtest
::
norm
(
dst
,
expected
,
NORM_INF
),
0
)
<<
dst
(
Rect
(
0
,
0
,
8
,
8
));
}
TEST
(
Resize
,
DISABLED_nearest_regression_15075
)
// reverted https://github.com/opencv/opencv/pull/16497
{
const
int
C
=
5
;
const
int
i1
=
5
,
j1
=
5
;
Size
src_size
(
12
,
12
);
Size
dst_size
(
11
,
11
);
cv
::
Mat
src
=
cv
::
Mat
::
zeros
(
src_size
,
CV_8UC
(
C
)),
dst
;
for
(
int
j
=
0
;
j
<
C
;
j
++
)
src
.
col
(
i1
).
row
(
j1
).
data
[
j
]
=
1
;
cv
::
resize
(
src
,
dst
,
dst_size
,
0
,
0
,
INTER_NEAREST
);
EXPECT_EQ
(
C
,
cvtest
::
norm
(
dst
,
NORM_L1
))
<<
src
.
size
;
}
TEST
(
Imgproc_Warp
,
multichannel
)
{
static
const
int
inter_types
[]
=
{
INTER_NEAREST
,
INTER_AREA
,
INTER_CUBIC
,
...
...
modules/python/package/template/config.py.in
View file @
96b26dc8
import os
BINARIES_PATHS = [
@CMAKE_PYTHON_BINARIES_PATH@
] + BINARIES_PATHS
modules/python/python_loader.cmake
View file @
96b26dc8
...
...
@@ -58,7 +58,13 @@ if(NOT OpenCV_FOUND) # Ignore "standalone" builds of Python bindings
else
()
list
(
APPEND CMAKE_PYTHON_BINARIES_INSTALL_PATH
"os.path.join(
${
CMAKE_PYTHON_EXTENSION_INSTALL_PATH_BASE
}
, '
${
OPENCV_LIB_INSTALL_PATH
}
')"
)
endif
()
string
(
REPLACE
";"
",
\n
"
CMAKE_PYTHON_BINARIES_PATH
"
${
CMAKE_PYTHON_BINARIES_INSTALL_PATH
}
"
)
set
(
CMAKE_PYTHON_BINARIES_PATH
"
${
CMAKE_PYTHON_BINARIES_INSTALL_PATH
}
"
)
if
(
WIN32 AND HAVE_CUDA
)
if
(
DEFINED CUDA_TOOLKIT_ROOT_DIR
)
list
(
APPEND CMAKE_PYTHON_BINARIES_PATH
"os.path.join(os.getenv('CUDA_PATH', '
${
CUDA_TOOLKIT_ROOT_DIR
}
'), 'bin')"
)
endif
()
endif
()
string
(
REPLACE
";"
",
\n
"
CMAKE_PYTHON_BINARIES_PATH
"
${
CMAKE_PYTHON_BINARIES_PATH
}
"
)
configure_file
(
"
${
PYTHON_SOURCE_DIR
}
/package/template/config.py.in"
"
${
__python_loader_install_tmp_path
}
/cv2/config.py"
@ONLY
)
install
(
FILES
"
${
__python_loader_install_tmp_path
}
/cv2/config.py"
DESTINATION
"
${
OPENCV_PYTHON_INSTALL_PATH
}
/cv2/"
COMPONENT python
)
endif
()
...
...
samples/cpp/falsecolor.cpp
View file @
96b26dc8
...
...
@@ -41,10 +41,10 @@ static Mat DrawMyImage(int thickness,int nbShape)
{
Mat
img
=
Mat
::
zeros
(
500
,
256
*
thickness
+
100
,
CV_8UC1
);
int
offsetx
=
50
,
offsety
=
25
;
int
lineLeng
ht
=
50
;
int
lineLeng
th
=
50
;
for
(
int
i
=
0
;
i
<
256
;
i
++
)
line
(
img
,
Point
(
thickness
*
i
+
offsetx
,
offsety
),
Point
(
thickness
*
i
+
offsetx
,
offsety
+
lineLeng
ht
),
Scalar
(
i
),
thickness
);
line
(
img
,
Point
(
thickness
*
i
+
offsetx
,
offsety
),
Point
(
thickness
*
i
+
offsetx
,
offsety
+
lineLeng
th
),
Scalar
(
i
),
thickness
);
RNG
r
;
Point
center
;
int
radius
;
...
...
@@ -57,19 +57,19 @@ static Mat DrawMyImage(int thickness,int nbShape)
int
typeShape
=
r
.
uniform
(
MyCIRCLE
,
MyELLIPSE
+
1
);
switch
(
typeShape
)
{
case
MyCIRCLE
:
center
=
Point
(
r
.
uniform
(
offsetx
,
img
.
cols
-
offsetx
),
r
.
uniform
(
offsety
+
lineLeng
ht
,
img
.
rows
-
offsety
));
center
=
Point
(
r
.
uniform
(
offsetx
,
img
.
cols
-
offsetx
),
r
.
uniform
(
offsety
+
lineLeng
th
,
img
.
rows
-
offsety
));
radius
=
r
.
uniform
(
1
,
min
(
offsetx
,
offsety
));
circle
(
img
,
center
,
radius
,
Scalar
(
i
),
-
1
);
break
;
case
MyRECTANGLE
:
center
=
Point
(
r
.
uniform
(
offsetx
,
img
.
cols
-
offsetx
),
r
.
uniform
(
offsety
+
lineLeng
ht
,
img
.
rows
-
offsety
));
center
=
Point
(
r
.
uniform
(
offsetx
,
img
.
cols
-
offsetx
),
r
.
uniform
(
offsety
+
lineLeng
th
,
img
.
rows
-
offsety
));
width
=
r
.
uniform
(
1
,
min
(
offsetx
,
offsety
));
height
=
r
.
uniform
(
1
,
min
(
offsetx
,
offsety
));
rc
=
Rect
(
center
-
Point
(
width
,
height
)
/
2
,
center
+
Point
(
width
,
height
)
/
2
);
rectangle
(
img
,
rc
,
Scalar
(
i
),
-
1
);
break
;
case
MyELLIPSE
:
center
=
Point
(
r
.
uniform
(
offsetx
,
img
.
cols
-
offsetx
),
r
.
uniform
(
offsety
+
lineLeng
ht
,
img
.
rows
-
offsety
));
center
=
Point
(
r
.
uniform
(
offsetx
,
img
.
cols
-
offsetx
),
r
.
uniform
(
offsety
+
lineLeng
th
,
img
.
rows
-
offsety
));
width
=
r
.
uniform
(
1
,
min
(
offsetx
,
offsety
));
height
=
r
.
uniform
(
1
,
min
(
offsetx
,
offsety
));
angle
=
r
.
uniform
(
0
,
180
);
...
...
samples/dnn/human_parsing.py
View file @
96b26dc8
...
...
@@ -40,6 +40,7 @@ Follow these steps if you want to convert the original model yourself:
'''
import
argparse
import
os.path
import
numpy
as
np
import
cv2
as
cv
...
...
@@ -48,12 +49,11 @@ backends = (cv.dnn.DNN_BACKEND_DEFAULT, cv.dnn.DNN_BACKEND_INFERENCE_ENGINE, cv.
targets
=
(
cv
.
dnn
.
DNN_TARGET_CPU
,
cv
.
dnn
.
DNN_TARGET_OPENCL
,
cv
.
dnn
.
DNN_TARGET_OPENCL_FP16
,
cv
.
dnn
.
DNN_TARGET_MYRIAD
)
def
preprocess
(
image
_path
):
def
preprocess
(
image
):
"""
Create 4-dimensional blob from image and flip image
:param image
_path: path to
input image
:param image
:
input image
"""
image
=
cv
.
imread
(
image_path
)
image_rev
=
np
.
flip
(
image
,
axis
=
1
)
input
=
cv
.
dnn
.
blobFromImages
([
image
,
image_rev
],
mean
=
(
104.00698793
,
116.66876762
,
122.67891434
))
return
input
...
...
@@ -137,15 +137,15 @@ def decode_labels(gray_image):
return
segm
def
parse_human
(
image
_path
,
model_path
,
backend
=
cv
.
dnn
.
DNN_BACKEND_OPENCV
,
target
=
cv
.
dnn
.
DNN_TARGET_CPU
):
def
parse_human
(
image
,
model_path
,
backend
=
cv
.
dnn
.
DNN_BACKEND_OPENCV
,
target
=
cv
.
dnn
.
DNN_TARGET_CPU
):
"""
Prepare input for execution, run net and postprocess output to parse human.
:param image
_path: path to
input image
:param image
:
input image
:param model_path: path to JPPNet model
:param backend: name of computation backend
:param target: name of computation target
"""
input
=
preprocess
(
image
_path
)
input
=
preprocess
(
image
)
input_h
,
input_w
=
input
.
shape
[
2
:]
output
=
run_net
(
input
,
model_path
,
backend
,
target
)
grayscale_out
=
postprocess
(
output
,
(
input_w
,
input_h
))
...
...
@@ -157,7 +157,7 @@ if __name__ == '__main__':
parser
=
argparse
.
ArgumentParser
(
description
=
'Use this script to run human parsing using JPPNet'
,
formatter_class
=
argparse
.
ArgumentDefaultsHelpFormatter
)
parser
.
add_argument
(
'--input'
,
'-i'
,
required
=
True
,
help
=
'Path to input image.'
)
parser
.
add_argument
(
'--model'
,
'-m'
,
required
=
True
,
help
=
'Path to pb model.'
)
parser
.
add_argument
(
'--model'
,
'-m'
,
default
=
'lip_jppnet_384.pb'
,
help
=
'Path to pb model.'
)
parser
.
add_argument
(
'--backend'
,
choices
=
backends
,
default
=
cv
.
dnn
.
DNN_BACKEND_DEFAULT
,
type
=
int
,
help
=
"Choose one of computation backends: "
"
%
d: automatically (by default), "
...
...
@@ -171,7 +171,11 @@ if __name__ == '__main__':
'
%
d: VPU'
%
targets
)
args
,
_
=
parser
.
parse_known_args
()
output
=
parse_human
(
args
.
input
,
args
.
model
,
args
.
backend
,
args
.
target
)
if
not
os
.
path
.
isfile
(
args
.
model
):
raise
OSError
(
"Model not exist"
)
image
=
cv
.
imread
(
args
.
input
)
output
=
parse_human
(
image
,
args
.
model
,
args
.
backend
,
args
.
target
)
winName
=
'Deep learning human parsing in OpenCV'
cv
.
namedWindow
(
winName
,
cv
.
WINDOW_AUTOSIZE
)
cv
.
imshow
(
winName
,
output
)
...
...
samples/dnn/virtual_try_on.py
0 → 100644
View file @
96b26dc8
This diff is collapsed.
Click to expand it.
samples/python/drawing.py
0 → 100644
View file @
96b26dc8
#!/usr/bin/env python
'''
This program demonstrates OpenCV drawing and text output functions by drawing different shapes and text strings
Usage :
python3 drawing.py
Press any button to exit
'''
# Python 2/3 compatibility
from
__future__
import
print_function
import
numpy
as
np
import
cv2
as
cv
# Drawing Lines
def
lines
():
for
i
in
range
(
NUMBER
*
2
):
pt1
,
pt2
=
[],
[]
pt1
.
append
(
np
.
random
.
randint
(
x1
,
x2
))
pt1
.
append
(
np
.
random
.
randint
(
y1
,
y2
))
pt2
.
append
(
np
.
random
.
randint
(
x1
,
x2
))
pt2
.
append
(
np
.
random
.
randint
(
y1
,
y2
))
color
=
"
%06
x"
%
np
.
random
.
randint
(
0
,
0xFFFFFF
)
color
=
tuple
(
int
(
color
[
i
:
i
+
2
],
16
)
for
i
in
(
0
,
2
,
4
))
arrowed
=
np
.
random
.
randint
(
0
,
6
)
if
(
arrowed
<
3
):
cv
.
line
(
image
,
tuple
(
pt1
),
tuple
(
pt2
),
color
,
np
.
random
.
randint
(
1
,
10
),
lineType
)
else
:
cv
.
arrowedLine
(
image
,
tuple
(
pt1
),
tuple
(
pt2
),
color
,
np
.
random
.
randint
(
1
,
10
),
lineType
)
cv
.
imshow
(
wndname
,
image
)
if
cv
.
waitKey
(
DELAY
)
>=
0
:
return
# Drawing Rectangle
def
rectangle
():
for
i
in
range
(
NUMBER
*
2
):
pt1
,
pt2
=
[],
[]
pt1
.
append
(
np
.
random
.
randint
(
x1
,
x2
))
pt1
.
append
(
np
.
random
.
randint
(
y1
,
y2
))
pt2
.
append
(
np
.
random
.
randint
(
x1
,
x2
))
pt2
.
append
(
np
.
random
.
randint
(
y1
,
y2
))
color
=
"
%06
x"
%
np
.
random
.
randint
(
0
,
0xFFFFFF
)
color
=
tuple
(
int
(
color
[
i
:
i
+
2
],
16
)
for
i
in
(
0
,
2
,
4
))
thickness
=
np
.
random
.
randint
(
-
3
,
10
)
marker
=
np
.
random
.
randint
(
0
,
10
)
marker_size
=
np
.
random
.
randint
(
30
,
80
)
if
(
marker
>
5
):
cv
.
rectangle
(
image
,
tuple
(
pt1
),
tuple
(
pt2
),
color
,
max
(
thickness
,
-
1
),
lineType
)
else
:
cv
.
drawMarker
(
image
,
tuple
(
pt1
),
color
,
marker
,
marker_size
)
cv
.
imshow
(
wndname
,
image
)
if
cv
.
waitKey
(
DELAY
)
>=
0
:
return
# Drawing ellipse
def
ellipse
():
for
i
in
range
(
NUMBER
*
2
):
center
=
[]
center
.
append
(
np
.
random
.
randint
(
x1
,
x2
))
center
.
append
(
np
.
random
.
randint
(
x1
,
x2
))
axes
=
[]
axes
.
append
(
np
.
random
.
randint
(
0
,
200
))
axes
.
append
(
np
.
random
.
randint
(
0
,
200
))
angle
=
np
.
random
.
randint
(
0
,
180
)
color
=
"
%06
x"
%
np
.
random
.
randint
(
0
,
0xFFFFFF
)
color
=
tuple
(
int
(
color
[
i
:
i
+
2
],
16
)
for
i
in
(
0
,
2
,
4
))
thickness
=
np
.
random
.
randint
(
-
1
,
9
)
cv
.
ellipse
(
image
,
tuple
(
center
),
tuple
(
axes
),
angle
,
angle
-
100
,
angle
+
200
,
color
,
thickness
,
lineType
)
cv
.
imshow
(
wndname
,
image
)
if
cv
.
waitKey
(
DELAY
)
>=
0
:
return
# Drawing Polygonal Curves
def
polygonal
():
for
i
in
range
(
NUMBER
):
pt
=
[(
0
,
0
)]
*
6
pt
=
np
.
resize
(
pt
,
(
2
,
3
,
2
))
pt
[
0
][
0
][
0
]
=
np
.
random
.
randint
(
x1
,
x2
)
pt
[
0
][
0
][
1
]
=
np
.
random
.
randint
(
y1
,
y2
)
pt
[
0
][
1
][
0
]
=
np
.
random
.
randint
(
x1
,
x2
)
pt
[
0
][
1
][
1
]
=
np
.
random
.
randint
(
y1
,
y2
)
pt
[
0
][
2
][
0
]
=
np
.
random
.
randint
(
x1
,
x2
)
pt
[
0
][
2
][
1
]
=
np
.
random
.
randint
(
y1
,
y2
)
pt
[
1
][
0
][
0
]
=
np
.
random
.
randint
(
x1
,
x2
)
pt
[
1
][
0
][
1
]
=
np
.
random
.
randint
(
y1
,
y2
)
pt
[
1
][
1
][
0
]
=
np
.
random
.
randint
(
x1
,
x2
)
pt
[
1
][
1
][
1
]
=
np
.
random
.
randint
(
y1
,
y2
)
pt
[
1
][
2
][
0
]
=
np
.
random
.
randint
(
x1
,
x2
)
pt
[
1
][
2
][
1
]
=
np
.
random
.
randint
(
y1
,
y2
)
color
=
"
%06
x"
%
np
.
random
.
randint
(
0
,
0xFFFFFF
)
color
=
tuple
(
int
(
color
[
i
:
i
+
2
],
16
)
for
i
in
(
0
,
2
,
4
))
alist
=
[]
for
k
in
pt
[
0
]:
alist
.
append
(
k
)
for
k
in
pt
[
1
]:
alist
.
append
(
k
)
ppt
=
np
.
array
(
alist
)
cv
.
polylines
(
image
,
[
ppt
],
True
,
color
,
thickness
=
np
.
random
.
randint
(
1
,
10
),
lineType
=
lineType
)
cv
.
imshow
(
wndname
,
image
)
if
cv
.
waitKey
(
DELAY
)
>=
0
:
return
# fills an area bounded by several polygonal contours
def
fill
():
for
i
in
range
(
NUMBER
):
pt
=
[(
0
,
0
)]
*
6
pt
=
np
.
resize
(
pt
,
(
2
,
3
,
2
))
pt
[
0
][
0
][
0
]
=
np
.
random
.
randint
(
x1
,
x2
)
pt
[
0
][
0
][
1
]
=
np
.
random
.
randint
(
y1
,
y2
)
pt
[
0
][
1
][
0
]
=
np
.
random
.
randint
(
x1
,
x2
)
pt
[
0
][
1
][
1
]
=
np
.
random
.
randint
(
y1
,
y2
)
pt
[
0
][
2
][
0
]
=
np
.
random
.
randint
(
x1
,
x2
)
pt
[
0
][
2
][
1
]
=
np
.
random
.
randint
(
y1
,
y2
)
pt
[
1
][
0
][
0
]
=
np
.
random
.
randint
(
x1
,
x2
)
pt
[
1
][
0
][
1
]
=
np
.
random
.
randint
(
y1
,
y2
)
pt
[
1
][
1
][
0
]
=
np
.
random
.
randint
(
x1
,
x2
)
pt
[
1
][
1
][
1
]
=
np
.
random
.
randint
(
y1
,
y2
)
pt
[
1
][
2
][
0
]
=
np
.
random
.
randint
(
x1
,
x2
)
pt
[
1
][
2
][
1
]
=
np
.
random
.
randint
(
y1
,
y2
)
color
=
"
%06
x"
%
np
.
random
.
randint
(
0
,
0xFFFFFF
)
color
=
tuple
(
int
(
color
[
i
:
i
+
2
],
16
)
for
i
in
(
0
,
2
,
4
))
alist
=
[]
for
k
in
pt
[
0
]:
alist
.
append
(
k
)
for
k
in
pt
[
1
]:
alist
.
append
(
k
)
ppt
=
np
.
array
(
alist
)
cv
.
fillPoly
(
image
,
[
ppt
],
color
,
lineType
)
cv
.
imshow
(
wndname
,
image
)
if
cv
.
waitKey
(
DELAY
)
>=
0
:
return
# Drawing Circles
def
circles
():
for
i
in
range
(
NUMBER
):
center
=
[]
center
.
append
(
np
.
random
.
randint
(
x1
,
x2
))
center
.
append
(
np
.
random
.
randint
(
x1
,
x2
))
color
=
"
%06
x"
%
np
.
random
.
randint
(
0
,
0xFFFFFF
)
color
=
tuple
(
int
(
color
[
i
:
i
+
2
],
16
)
for
i
in
(
0
,
2
,
4
))
cv
.
circle
(
image
,
tuple
(
center
),
np
.
random
.
randint
(
0
,
300
),
color
,
np
.
random
.
randint
(
-
1
,
9
),
lineType
)
cv
.
imshow
(
wndname
,
image
)
if
cv
.
waitKey
(
DELAY
)
>=
0
:
return
# Draws a text string
def
string
():
for
i
in
range
(
NUMBER
):
org
=
[]
org
.
append
(
np
.
random
.
randint
(
x1
,
x2
))
org
.
append
(
np
.
random
.
randint
(
x1
,
x2
))
color
=
"
%06
x"
%
np
.
random
.
randint
(
0
,
0xFFFFFF
)
color
=
tuple
(
int
(
color
[
i
:
i
+
2
],
16
)
for
i
in
(
0
,
2
,
4
))
cv
.
putText
(
image
,
"Testing text rendering"
,
tuple
(
org
),
np
.
random
.
randint
(
0
,
8
),
np
.
random
.
randint
(
0
,
100
)
*
0.05
+
0.1
,
color
,
np
.
random
.
randint
(
1
,
10
),
lineType
)
cv
.
imshow
(
wndname
,
image
)
if
cv
.
waitKey
(
DELAY
)
>=
0
:
return
def
string1
():
textsize
=
cv
.
getTextSize
(
"OpenCV forever!"
,
cv
.
FONT_HERSHEY_COMPLEX
,
3
,
5
)
org
=
(
int
((
width
-
textsize
[
0
][
0
])
/
2
),
int
((
height
-
textsize
[
0
][
1
])
/
2
))
for
i
in
range
(
0
,
255
,
2
):
image2
=
np
.
array
(
image
)
-
i
cv
.
putText
(
image2
,
"OpenCV forever!"
,
org
,
cv
.
FONT_HERSHEY_COMPLEX
,
3
,
(
i
,
i
,
255
),
5
,
lineType
)
cv
.
imshow
(
wndname
,
image2
)
if
cv
.
waitKey
(
DELAY
)
>=
0
:
return
if
__name__
==
'__main__'
:
print
(
__doc__
)
wndname
=
"Drawing Demo"
NUMBER
=
100
DELAY
=
5
width
,
height
=
1000
,
700
lineType
=
cv
.
LINE_AA
# change it to LINE_8 to see non-antialiased graphics
x1
,
x2
,
y1
,
y2
=
-
width
/
2
,
width
*
3
/
2
,
-
height
/
2
,
height
*
3
/
2
image
=
np
.
zeros
((
height
,
width
,
3
),
dtype
=
np
.
uint8
)
cv
.
imshow
(
wndname
,
image
)
cv
.
waitKey
(
DELAY
)
lines
()
rectangle
()
ellipse
()
polygonal
()
fill
()
circles
()
string
()
string1
()
cv
.
waitKey
(
0
)
cv
.
destroyAllWindows
()
\ No newline at end of file
samples/python/grabcut.py
View file @
96b26dc8
...
...
@@ -11,10 +11,10 @@ USAGE:
README FIRST:
Two windows will show up, one for input and one for output.
At first, in input window, draw a rectangle around the object using
mouse right
button. Then press 'n' to segment the object (once or a few times)
At first, in input window, draw a rectangle around the object using
the
right mouse
button. Then press 'n' to segment the object (once or a few times)
For any finer touch-ups, you can press any of the keys below and draw lines on
the areas you want. Then again press 'n'
for updating
the output.
the areas you want. Then again press 'n'
to update
the output.
Key '0' - To select areas of sure background
Key '1' - To select areas of sure foreground
...
...
@@ -44,8 +44,8 @@ class App():
DRAW_BG
=
{
'color'
:
BLACK
,
'val'
:
0
}
DRAW_FG
=
{
'color'
:
WHITE
,
'val'
:
1
}
DRAW_PR_FG
=
{
'color'
:
GREEN
,
'val'
:
3
}
DRAW_PR_BG
=
{
'color'
:
RED
,
'val'
:
2
}
DRAW_PR_FG
=
{
'color'
:
GREEN
,
'val'
:
3
}
# setting up flags
rect
=
(
0
,
0
,
1
,
1
)
...
...
@@ -160,14 +160,12 @@ class App():
print
(
""" For finer touchups, mark foreground and background after pressing keys 0-3
and again press 'n'
\n
"""
)
try
:
bgdmodel
=
np
.
zeros
((
1
,
65
),
np
.
float64
)
fgdmodel
=
np
.
zeros
((
1
,
65
),
np
.
float64
)
if
(
self
.
rect_or_mask
==
0
):
# grabcut with rect
bgdmodel
=
np
.
zeros
((
1
,
65
),
np
.
float64
)
fgdmodel
=
np
.
zeros
((
1
,
65
),
np
.
float64
)
cv
.
grabCut
(
self
.
img2
,
self
.
mask
,
self
.
rect
,
bgdmodel
,
fgdmodel
,
1
,
cv
.
GC_INIT_WITH_RECT
)
self
.
rect_or_mask
=
1
elif
self
.
rect_or_mask
==
1
:
# grabcut with mask
bgdmodel
=
np
.
zeros
((
1
,
65
),
np
.
float64
)
fgdmodel
=
np
.
zeros
((
1
,
65
),
np
.
float64
)
elif
(
self
.
rect_or_mask
==
1
):
# grabcut with mask
cv
.
grabCut
(
self
.
img2
,
self
.
mask
,
self
.
rect
,
bgdmodel
,
fgdmodel
,
1
,
cv
.
GC_INIT_WITH_MASK
)
except
:
import
traceback
...
...
samples/python/laplace.py
0 → 100644
View file @
96b26dc8
#!/usr/bin/env python
'''
This program demonstrates Laplace point/edge detection using
OpenCV function Laplacian()
It captures from the camera of your choice: 0, 1, ... default 0
Usage:
python laplace.py <ddepth> <smoothType> <sigma>
If no arguments given default arguments will be used.
Keyboard Shortcuts:
Press space bar to exit the program.
'''
# Python 2/3 compatibility
from
__future__
import
print_function
import
numpy
as
np
import
cv2
as
cv
import
sys
def
main
():
# Declare the variables we are going to use
ddepth
=
cv
.
CV_16S
smoothType
=
"MedianBlur"
sigma
=
3
if
len
(
sys
.
argv
)
==
4
:
ddepth
=
sys
.
argv
[
1
]
smoothType
=
sys
.
argv
[
2
]
sigma
=
sys
.
argv
[
3
]
# Taking input from the camera
cap
=
cv
.
VideoCapture
(
0
)
# Create Window and Trackbar
cv
.
namedWindow
(
"Laplace of Image"
,
cv
.
WINDOW_AUTOSIZE
)
cv
.
createTrackbar
(
"Kernel Size Bar"
,
"Laplace of Image"
,
sigma
,
15
,
lambda
x
:
x
)
# Printing frame width, height and FPS
print
(
"=="
*
40
)
print
(
"Frame Width: "
,
cap
.
get
(
cv
.
CAP_PROP_FRAME_WIDTH
),
"Frame Height: "
,
cap
.
get
(
cv
.
CAP_PROP_FRAME_HEIGHT
),
"FPS: "
,
cap
.
get
(
cv
.
CAP_PROP_FPS
))
while
True
:
# Reading input from the camera
ret
,
frame
=
cap
.
read
()
if
ret
==
False
:
print
(
"Can't open camera/video stream"
)
break
# Taking input/position from the trackbar
sigma
=
cv
.
getTrackbarPos
(
"Kernel Size Bar"
,
"Laplace of Image"
)
# Setting kernel size
ksize
=
(
sigma
*
5
)
|
1
# Removing noise by blurring with a filter
if
smoothType
==
"GAUSSIAN"
:
smoothed
=
cv
.
GaussianBlur
(
frame
,
(
ksize
,
ksize
),
sigma
,
sigma
)
if
smoothType
==
"BLUR"
:
smoothed
=
cv
.
blur
(
frame
,
(
ksize
,
ksize
))
if
smoothType
==
"MedianBlur"
:
smoothed
=
cv
.
medianBlur
(
frame
,
ksize
)
# Apply Laplace function
laplace
=
cv
.
Laplacian
(
smoothed
,
ddepth
,
5
)
# Converting back to uint8
result
=
cv
.
convertScaleAbs
(
laplace
,
(
sigma
+
1
)
*
0.25
)
# Display Output
cv
.
imshow
(
"Laplace of Image"
,
result
)
k
=
cv
.
waitKey
(
30
)
if
k
==
27
:
return
if
__name__
==
"__main__"
:
print
(
__doc__
)
main
()
cv
.
destroyAllWindows
()
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment