Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in / Register
Toggle navigation
O
opencv
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
submodule
opencv
Commits
665408e5
Commit
665408e5
authored
Feb 01, 2019
by
Alexander Alekhin
Browse files
Options
Browse Files
Download
Plain Diff
Merge remote-tracking branch 'upstream/3.4' into merge-3.4
parents
a65ccc06
a42bbc97
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
254 additions
and
103 deletions
+254
-103
dnn.cpp
modules/dnn/src/dnn.cpp
+12
-6
blank_layer.cpp
modules/dnn/src/layers/blank_layer.cpp
+16
-5
convolution_layer.cpp
modules/dnn/src/layers/convolution_layer.cpp
+20
-16
pooling_layer.cpp
modules/dnn/src/layers/pooling_layer.cpp
+4
-4
op_inf_engine.cpp
modules/dnn/src/op_inf_engine.cpp
+25
-11
op_inf_engine.hpp
modules/dnn/src/op_inf_engine.hpp
+2
-2
test_layers.cpp
modules/dnn/test/test_layers.cpp
+44
-46
test_misc.cpp
modules/dnn/test/test_misc.cpp
+34
-0
test_onnx_importer.cpp
modules/dnn/test/test_onnx_importer.cpp
+1
-1
test_tf_importer.cpp
modules/dnn/test/test_tf_importer.cpp
+2
-2
perf_blur.cpp
modules/imgproc/perf/perf_blur.cpp
+23
-0
blend.cpp
modules/imgproc/src/blend.cpp
+0
-0
median_blur.cpp
modules/imgproc/src/median_blur.cpp
+0
-0
pyramids.cpp
modules/imgproc/src/pyramids.cpp
+2
-1
spatialgradient.cpp
modules/imgproc/src/spatialgradient.cpp
+0
-0
test_filter.cpp
modules/imgproc/test/test_filter.cpp
+9
-0
core_bindings.cpp
modules/js/src/core_bindings.cpp
+9
-0
svm.cpp
modules/ml/src/svm.cpp
+4
-7
test_svmtrainauto.cpp
modules/ml/test/test_svmtrainauto.cpp
+45
-0
tf_text_graph_common.py
samples/dnn/tf_text_graph_common.py
+2
-2
No files found.
modules/dnn/src/dnn.cpp
View file @
665408e5
...
...
@@ -148,7 +148,13 @@ private:
#else
cv
::
dnn
::
Net
net
;
cv
::
dnn
::
LayerParams
lp
;
net
.
addLayerToPrev
(
"testLayer"
,
"Identity"
,
lp
);
lp
.
set
(
"kernel_size"
,
1
);
lp
.
set
(
"num_output"
,
1
);
lp
.
set
(
"bias_term"
,
false
);
lp
.
type
=
"Convolution"
;
lp
.
name
=
"testLayer"
;
lp
.
blobs
.
push_back
(
Mat
({
1
,
2
,
1
,
1
},
CV_32F
,
Scalar
(
1
)));
net
.
addLayerToPrev
(
lp
.
name
,
lp
.
type
,
lp
);
net
.
setPreferableBackend
(
cv
::
dnn
::
DNN_BACKEND_INFERENCE_ENGINE
);
net
.
setPreferableTarget
(
target
);
static
int
inpDims
[]
=
{
1
,
2
,
3
,
4
};
...
...
@@ -2676,7 +2682,7 @@ Net Net::readFromModelOptimizer(const String& xml, const String& bin)
backendNode
->
net
=
Ptr
<
InfEngineBackendNet
>
(
new
InfEngineBackendNet
(
ieNet
));
for
(
auto
&
it
:
ieNet
.
getOutputsInfo
())
{
Ptr
<
Layer
>
cvLayer
(
new
InfEngineBackendLayer
(
i
t
.
second
));
Ptr
<
Layer
>
cvLayer
(
new
InfEngineBackendLayer
(
i
eNet
));
InferenceEngine
::
CNNLayerPtr
ieLayer
=
ieNet
.
getLayerByName
(
it
.
first
.
c_str
());
CV_Assert
(
ieLayer
);
...
...
@@ -2871,8 +2877,7 @@ void Net::forward(std::vector<std::vector<Mat> >& outputBlobs,
std
::
vector
<
LayerPin
>
pins
;
for
(
int
i
=
0
;
i
<
outBlobNames
.
size
();
i
++
)
{
std
::
vector
<
LayerPin
>
lp
=
impl
->
getLayerOutPins
(
outBlobNames
[
i
]);
pins
.
insert
(
pins
.
end
(),
lp
.
begin
(),
lp
.
end
());
pins
.
push_back
(
impl
->
getPinByAlias
(
outBlobNames
[
i
]));
}
impl
->
setUpNet
(
pins
);
...
...
@@ -2885,9 +2890,10 @@ void Net::forward(std::vector<std::vector<Mat> >& outputBlobs,
for
(
int
i
=
0
;
i
<
outBlobNames
.
size
();
i
++
)
{
std
::
vector
<
LayerPin
>
lp
=
impl
->
getLayerOutPins
(
outBlobNames
[
i
]);
for
(
int
i
=
0
;
i
<
lp
.
size
();
i
++
)
outputBlobs
[
i
].
resize
(
lp
.
size
());
for
(
int
j
=
0
;
j
<
lp
.
size
();
j
++
)
{
outputBlobs
[
i
]
.
push_back
(
impl
->
getBlob
(
lp
[
i
])
);
outputBlobs
[
i
]
[
j
]
=
impl
->
getBlob
(
lp
[
j
]
);
}
}
}
...
...
modules/dnn/src/layers/blank_layer.cpp
View file @
665408e5
...
...
@@ -110,14 +110,25 @@ public:
virtual
Ptr
<
BackendNode
>
initInfEngine
(
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>&
inputs
)
CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
InferenceEngine
::
DataPtr
input
=
infEngineDataNode
(
inputs
[
0
]);
CV_Assert
(
!
input
->
dims
.
empty
());
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
InferenceEngine
::
Builder
::
SplitLayer
ieLayer
(
name
);
ieLayer
.
setOutputPorts
({
InferenceEngine
::
Port
()});
InferenceEngine
::
Builder
::
Layer
ieLayer
(
name
);
ieLayer
.
setName
(
name
);
if
(
preferableTarget
==
DNN_TARGET_MYRIAD
)
{
ieLayer
.
setType
(
"Copy"
);
}
else
{
ieLayer
.
setType
(
"Split"
);
ieLayer
.
getParameters
()[
"axis"
]
=
input
->
dims
.
size
()
-
1
;
ieLayer
.
getParameters
()[
"out_sizes"
]
=
input
->
dims
[
0
];
}
ieLayer
.
setInputPorts
(
std
::
vector
<
InferenceEngine
::
Port
>
(
1
));
ieLayer
.
setOutputPorts
(
std
::
vector
<
InferenceEngine
::
Port
>
(
1
));
return
Ptr
<
BackendNode
>
(
new
InfEngineBackendNode
(
ieLayer
));
#else
InferenceEngine
::
DataPtr
input
=
infEngineDataNode
(
inputs
[
0
]);
CV_Assert
(
!
input
->
dims
.
empty
());
InferenceEngine
::
LayerParams
lp
;
lp
.
name
=
name
;
lp
.
type
=
"Split"
;
...
...
modules/dnn/src/layers/convolution_layer.cpp
View file @
665408e5
...
...
@@ -281,7 +281,7 @@ public:
const
int
outCn
=
blobs
[
0
].
size
[
0
];
// prepare weightsMat where each row is aligned and has enough zero padding on the right to
// use vectorized (i.e. with intrinsics) loops without tail processing
Mat
wm
=
blobs
[
0
].
reshape
(
1
,
outCn
)
.
clone
()
;
Mat
wm
=
blobs
[
0
].
reshape
(
1
,
outCn
);
if
(
wm
.
step1
()
%
VEC_ALIGN
!=
0
)
{
int
newcols
=
(
int
)
alignSize
(
wm
.
step1
(),
VEC_ALIGN
);
...
...
@@ -374,6 +374,10 @@ public:
if
(
!
w
.
empty
())
{
// Keep origin weights unchanged.
if
(
weightsMat
.
data
==
blobs
[
0
].
data
)
weightsMat
=
weightsMat
.
clone
();
Mat
originWeights
=
blobs
[
0
].
reshape
(
1
,
outCn
);
for
(
int
i
=
0
;
i
<
outCn
;
++
i
)
{
...
...
@@ -551,13 +555,13 @@ public:
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
InferenceEngine
::
Builder
::
ConvolutionLayer
ieLayer
(
name
);
ieLayer
.
setKernel
({
kernel
.
height
,
kernel
.
width
});
ieLayer
.
setStrides
({
stride
.
height
,
stride
.
width
});
ieLayer
.
setDilation
({
dilation
.
height
,
dilation
.
width
});
ieLayer
.
setPaddingsBegin
({
pad
.
height
,
pad
.
width
});
ieLayer
.
setPaddingsEnd
({
pad
.
height
,
pad
.
width
});
ieLayer
.
setGroup
(
group
);
ieLayer
.
setOutDepth
(
outCn
);
ieLayer
.
setKernel
({
(
size_t
)
kernel
.
height
,
(
size_t
)
kernel
.
width
});
ieLayer
.
setStrides
({
(
size_t
)
stride
.
height
,
(
size_t
)
stride
.
width
});
ieLayer
.
setDilation
({
(
size_t
)
dilation
.
height
,
(
size_t
)
dilation
.
width
});
ieLayer
.
setPaddingsBegin
({
(
size_t
)
pad
.
height
,
(
size_t
)
pad
.
width
});
ieLayer
.
setPaddingsEnd
({
(
size_t
)
pad
.
height
,
(
size_t
)
pad
.
width
});
ieLayer
.
setGroup
(
(
size_t
)
group
);
ieLayer
.
setOutDepth
(
(
size_t
)
outCn
);
ieLayer
.
setWeights
(
ieWeights
);
if
(
ieBiases
)
...
...
@@ -1220,7 +1224,7 @@ public:
#ifdef HAVE_INF_ENGINE
if
(
backendId
==
DNN_BACKEND_INFERENCE_ENGINE
)
{
if
(
INF_ENGINE_RELEASE
=
=
2018050000
&&
(
adjustPad
.
height
||
adjustPad
.
width
))
if
(
INF_ENGINE_RELEASE
>
=
2018050000
&&
(
adjustPad
.
height
||
adjustPad
.
width
))
return
false
;
const
int
outGroupCn
=
blobs
[
0
].
size
[
1
];
// Weights are in IOHW layout
...
...
@@ -1783,13 +1787,13 @@ public:
InferenceEngine
::
Builder
::
DeconvolutionLayer
ieLayer
(
name
);
ieLayer
.
setKernel
({
kernel
.
height
,
kernel
.
width
});
ieLayer
.
setStrides
({
stride
.
height
,
stride
.
width
});
ieLayer
.
setDilation
({
dilation
.
height
,
dilation
.
width
});
ieLayer
.
setPaddingsBegin
({
pad
.
height
,
pad
.
width
});
ieLayer
.
setPaddingsEnd
({
pad
.
height
,
pad
.
width
});
ieLayer
.
setGroup
(
group
);
ieLayer
.
setOutDepth
(
numOutput
);
ieLayer
.
setKernel
({
(
size_t
)
kernel
.
height
,
(
size_t
)
kernel
.
width
});
ieLayer
.
setStrides
({
(
size_t
)
stride
.
height
,
(
size_t
)
stride
.
width
});
ieLayer
.
setDilation
({
(
size_t
)
dilation
.
height
,
(
size_t
)
dilation
.
width
});
ieLayer
.
setPaddingsBegin
({
(
size_t
)
pad
.
height
,
(
size_t
)
pad
.
width
});
ieLayer
.
setPaddingsEnd
({
(
size_t
)
pad
.
height
,
(
size_t
)
pad
.
width
});
ieLayer
.
setGroup
(
(
size_t
)
group
);
ieLayer
.
setOutDepth
(
(
size_t
)
numOutput
);
ieLayer
.
setWeights
(
wrapToInfEngineBlob
(
blobs
[
0
],
InferenceEngine
::
Layout
::
OIHW
));
if
(
hasBias
())
...
...
modules/dnn/src/layers/pooling_layer.cpp
View file @
665408e5
...
...
@@ -299,10 +299,10 @@ public:
if
(
type
==
MAX
||
type
==
AVE
)
{
InferenceEngine
::
Builder
::
PoolingLayer
ieLayer
(
name
);
ieLayer
.
setKernel
({
kernel
.
height
,
kernel
.
width
});
ieLayer
.
setStrides
({
stride
.
height
,
stride
.
width
});
ieLayer
.
setPaddingsBegin
({
pad_t
,
pad_l
});
ieLayer
.
setPaddingsEnd
({
pad_b
,
pad_r
});
ieLayer
.
setKernel
({
(
size_t
)
kernel
.
height
,
(
size_t
)
kernel
.
width
});
ieLayer
.
setStrides
({
(
size_t
)
stride
.
height
,
(
size_t
)
stride
.
width
});
ieLayer
.
setPaddingsBegin
({
(
size_t
)
pad_t
,
(
size_t
)
pad_l
});
ieLayer
.
setPaddingsEnd
({
(
size_t
)
pad_b
,
(
size_t
)
pad_r
});
ieLayer
.
setPoolingType
(
type
==
MAX
?
InferenceEngine
::
Builder
::
PoolingLayer
::
PoolingType
::
MAX
:
InferenceEngine
::
Builder
::
PoolingLayer
::
PoolingType
::
AVG
);
...
...
modules/dnn/src/op_inf_engine.cpp
View file @
665408e5
...
...
@@ -82,7 +82,7 @@ void InfEngineBackendNet::connect(const std::vector<Ptr<BackendWrapper> >& input
CV_Assert
(
it
!=
layers
.
end
());
const
int
layerId
=
it
->
second
;
for
(
in
t
i
=
0
;
i
<
inpWrappers
.
size
();
++
i
)
for
(
size_
t
i
=
0
;
i
<
inpWrappers
.
size
();
++
i
)
{
const
auto
&
inp
=
inpWrappers
[
i
];
const
std
::
string
&
inpName
=
inp
->
dataPtr
->
name
;
...
...
@@ -103,7 +103,7 @@ void InfEngineBackendNet::connect(const std::vector<Ptr<BackendWrapper> >& input
else
inpId
=
it
->
second
;
netBuilder
.
connect
(
inpId
,
{
layerId
,
i
});
netBuilder
.
connect
(
(
size_t
)
inpId
,
{(
size_t
)
layerId
,
i
});
unconnectedLayersIds
.
erase
(
inpId
);
}
CV_Assert
(
!
outputs
.
empty
());
...
...
@@ -119,7 +119,7 @@ void InfEngineBackendNet::init(int targetId)
for
(
int
id
:
unconnectedLayersIds
)
{
InferenceEngine
::
Builder
::
OutputLayer
outLayer
(
"myconv1"
);
netBuilder
.
addLayer
({
id
},
outLayer
);
netBuilder
.
addLayer
({
InferenceEngine
::
PortInfo
(
id
)
},
outLayer
);
}
cnn
=
InferenceEngine
::
CNNNetwork
(
InferenceEngine
::
Builder
::
convertToICNNNetwork
(
netBuilder
.
build
()));
}
...
...
@@ -718,19 +718,33 @@ Mat infEngineBlobToMat(const InferenceEngine::Blob::Ptr& blob)
return
Mat
(
size
,
CV_32F
,
(
void
*
)
blob
->
buffer
());
}
InfEngineBackendLayer
::
InfEngineBackendLayer
(
const
InferenceEngine
::
DataPtr
&
output_
)
{
output
=
output_
;
}
bool
InfEngineBackendLayer
::
getMemoryShapes
(
const
std
::
vector
<
MatShape
>
&
inputs
,
const
int
requiredOutputs
,
std
::
vector
<
MatShape
>
&
outputs
,
std
::
vector
<
MatShape
>
&
internals
)
const
{
std
::
vector
<
size_t
>
dims
=
output
->
dims
;
std
::
vector
<
int
>
shape
(
dims
.
rbegin
(),
dims
.
rend
());
outputs
.
assign
(
1
,
shape
);
InferenceEngine
::
ICNNNetwork
::
InputShapes
inShapes
=
t_net
.
getInputShapes
();
InferenceEngine
::
ICNNNetwork
::
InputShapes
::
iterator
itr
;
bool
equal_flag
=
true
;
size_t
i
=
0
;
for
(
itr
=
inShapes
.
begin
();
itr
!=
inShapes
.
end
();
++
itr
)
{
InferenceEngine
::
SizeVector
currentInShape
(
inputs
[
i
].
begin
(),
inputs
[
i
].
end
());
if
(
itr
->
second
!=
currentInShape
)
{
itr
->
second
=
currentInShape
;
equal_flag
=
false
;
}
i
++
;
}
if
(
!
equal_flag
)
{
InferenceEngine
::
CNNNetwork
curr_t_net
(
t_net
);
curr_t_net
.
reshape
(
inShapes
);
}
std
::
vector
<
size_t
>
dims
=
t_net
.
getOutputsInfo
()[
name
]
->
getDims
();
outputs
.
push_back
(
MatShape
(
dims
.
begin
(),
dims
.
end
()));
return
false
;
}
...
...
modules/dnn/src/op_inf_engine.hpp
View file @
665408e5
...
...
@@ -260,7 +260,7 @@ InferenceEngine::TBlob<int16_t>::Ptr convertFp16(const InferenceEngine::Blob::Pt
class
InfEngineBackendLayer
:
public
Layer
{
public
:
InfEngineBackendLayer
(
const
InferenceEngine
::
DataPtr
&
output
)
;
InfEngineBackendLayer
(
const
InferenceEngine
::
CNNNetwork
&
t_net_
)
:
t_net
(
t_net_
)
{}
;
virtual
bool
getMemoryShapes
(
const
std
::
vector
<
MatShape
>
&
inputs
,
const
int
requiredOutputs
,
...
...
@@ -273,7 +273,7 @@ public:
virtual
bool
supportBackend
(
int
backendId
)
CV_OVERRIDE
;
private
:
InferenceEngine
::
DataPtr
outpu
t
;
InferenceEngine
::
CNNNetwork
t_ne
t
;
};
#endif // HAVE_INF_ENGINE
...
...
modules/dnn/test/test_layers.cpp
View file @
665408e5
...
...
@@ -236,6 +236,10 @@ TEST_P(Test_Caffe_layers, Dropout)
TEST_P
(
Test_Caffe_layers
,
Concat
)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE > 2018050000
if
(
backend
==
DNN_BACKEND_INFERENCE_ENGINE
&&
target
==
DNN_TARGET_MYRIAD
)
throw
SkipTestException
(
""
);
#endif
testLayerUsingCaffeModels
(
"layer_concat"
);
testLayerUsingCaffeModels
(
"layer_concat_optim"
,
true
,
false
);
testLayerUsingCaffeModels
(
"layer_concat_shared_input"
,
true
,
false
);
...
...
@@ -923,8 +927,9 @@ TEST_P(Layer_Test_Convolution_DLDT, Accuracy)
{
Target
targetId
=
GetParam
();
std
::
string
suffix
=
(
targetId
==
DNN_TARGET_OPENCL_FP16
||
targetId
==
DNN_TARGET_MYRIAD
)
?
"_fp16"
:
""
;
Net
netDefault
=
readNet
(
_tf
(
"layer_convolution.caffemodel"
),
_tf
(
"layer_convolution.prototxt"
));
Net
net
=
readNet
(
_tf
(
"layer_convolution
.xml"
),
_tf
(
"layer_convolution
.bin"
));
Net
net
=
readNet
(
_tf
(
"layer_convolution
"
+
suffix
+
".xml"
),
_tf
(
"layer_convolution"
+
suffix
+
"
.bin"
));
Mat
inp
=
blobFromNPY
(
_tf
(
"blob.npy"
));
...
...
@@ -935,22 +940,15 @@ TEST_P(Layer_Test_Convolution_DLDT, Accuracy)
net
.
setInput
(
inp
);
net
.
setPreferableTarget
(
targetId
);
if
(
targetId
!=
DNN_TARGET_MYRIAD
)
{
Mat
out
=
net
.
forward
();
Mat
out
=
net
.
forward
();
normAssert
(
outDefault
,
out
);
double
l1
=
(
targetId
==
DNN_TARGET_OPENCL_FP16
||
targetId
==
DNN_TARGET_MYRIAD
)
?
1.4e-3
:
1e-5
;
double
lInf
=
(
targetId
==
DNN_TARGET_OPENCL_FP16
||
targetId
==
DNN_TARGET_MYRIAD
)
?
1.8e-2
:
1e-4
;
normAssert
(
outDefault
,
out
,
""
,
l1
,
lInf
);
std
::
vector
<
int
>
outLayers
=
net
.
getUnconnectedOutLayers
();
ASSERT_EQ
(
net
.
getLayer
(
outLayers
[
0
])
->
name
,
"output_merge"
);
ASSERT_EQ
(
net
.
getLayer
(
outLayers
[
0
])
->
type
,
"Concat"
);
}
else
{
// An assertion is expected because the model is in FP32 format but
// Myriad plugin supports only FP16 models.
ASSERT_ANY_THROW
(
net
.
forward
());
}
std
::
vector
<
int
>
outLayers
=
net
.
getUnconnectedOutLayers
();
ASSERT_EQ
(
net
.
getLayer
(
outLayers
[
0
])
->
name
,
"output"
);
ASSERT_EQ
(
net
.
getLayer
(
outLayers
[
0
])
->
type
,
"Convolution"
);
}
TEST_P
(
Layer_Test_Convolution_DLDT
,
setInput_uint8
)
...
...
@@ -962,23 +960,16 @@ TEST_P(Layer_Test_Convolution_DLDT, setInput_uint8)
randu
(
inputs
[
0
],
0
,
255
);
inputs
[
0
].
convertTo
(
inputs
[
1
],
CV_32F
);
std
::
string
suffix
=
(
targetId
==
DNN_TARGET_OPENCL_FP16
||
targetId
==
DNN_TARGET_MYRIAD
)
?
"_fp16"
:
""
;
Mat
outs
[
2
];
for
(
int
i
=
0
;
i
<
2
;
++
i
)
{
Net
net
=
readNet
(
_tf
(
"layer_convolution
.xml"
),
_tf
(
"layer_convolution
.bin"
));
Net
net
=
readNet
(
_tf
(
"layer_convolution
"
+
suffix
+
".xml"
),
_tf
(
"layer_convolution"
+
suffix
+
"
.bin"
));
net
.
setPreferableTarget
(
targetId
);
net
.
setInput
(
inputs
[
i
]);
if
(
targetId
!=
DNN_TARGET_MYRIAD
)
{
outs
[
i
]
=
net
.
forward
();
ASSERT_EQ
(
outs
[
i
].
type
(),
CV_32F
);
}
else
{
// An assertion is expected because the model is in FP32 format but
// Myriad plugin supports only FP16 models.
ASSERT_ANY_THROW
(
net
.
forward
());
}
outs
[
i
]
=
net
.
forward
();
ASSERT_EQ
(
outs
[
i
].
type
(),
CV_32F
);
}
if
(
targetId
!=
DNN_TARGET_MYRIAD
)
normAssert
(
outs
[
0
],
outs
[
1
]);
...
...
@@ -1008,8 +999,8 @@ INSTANTIATE_TEST_CASE_P(/**/, Layer_Test_Convolution_DLDT,
// net.save('/path/to/caffemodel')
//
// 3. Convert using ModelOptimizer.
typedef
testing
::
TestWithParam
<
tuple
<
int
,
int
,
Target
>
>
Test_DLDT_two_inputs
;
TEST_P
(
Test_DLDT_two_inputs
,
as_IR
)
typedef
testing
::
TestWithParam
<
tuple
<
int
,
int
,
Target
,
std
::
vector
<
int
>
>
>
Test_DLDT_two_inputs_3dim
;
TEST_P
(
Test_DLDT_two_inputs
_3dim
,
as_IR
)
{
int
firstInpType
=
get
<
0
>
(
GetParam
());
int
secondInpType
=
get
<
1
>
(
GetParam
());
...
...
@@ -1020,32 +1011,39 @@ TEST_P(Test_DLDT_two_inputs, as_IR)
throw
SkipTestException
(
"Test is enabled starts from OpenVINO 2018R4"
);
#endif
Net
net
=
readNet
(
_tf
(
"net_two_inputs.xml"
),
_tf
(
"net_two_inputs.bin"
));
int
inpSize
[]
=
{
1
,
2
,
3
};
Mat
firstInp
(
3
,
&
inpSize
[
0
],
firstInpType
);
Mat
secondInp
(
3
,
&
inpSize
[
0
],
secondInpType
);
std
::
string
suffix
=
(
targetId
==
DNN_TARGET_OPENCL_FP16
||
targetId
==
DNN_TARGET_MYRIAD
)
?
"_fp16"
:
""
;
Net
net
=
readNet
(
_tf
(
"net_two_inputs"
+
suffix
+
".xml"
),
_tf
(
"net_two_inputs.bin"
));
std
::
vector
<
int
>
inpSize
=
get
<
3
>
(
GetParam
());
Mat
firstInp
(
3
,
inpSize
.
data
(),
firstInpType
);
Mat
secondInp
(
3
,
inpSize
.
data
(),
secondInpType
);
randu
(
firstInp
,
0
,
255
);
randu
(
secondInp
,
0
,
255
);
net
.
setInput
(
firstInp
,
"data"
);
net
.
setInput
(
secondInp
,
"second_input"
);
net
.
setPreferableTarget
(
targetId
);
if
(
targetId
!=
DNN_TARGET_MYRIAD
)
{
Mat
out
=
net
.
forward
();
Mat
ref
;
cv
::
add
(
firstInp
,
secondInp
,
ref
,
Mat
(),
CV_32F
)
;
normAssert
(
out
,
ref
);
}
else
{
// An assertion is expected because the model is in FP32 format but
// Myriad plugin supports only FP16 models.
ASSERT_ANY_THROW
(
net
.
forward
()
);
}
double
l1
=
((
targetId
==
DNN_TARGET_OPENCL_FP16
||
targetId
==
DNN_TARGET_MYRIAD
)
&&
(
firstInpType
==
CV_32F
||
secondInpType
==
CV_32F
))
?
0.06
:
0.0
;
double
lInf
=
((
targetId
==
DNN_TARGET_OPENCL_FP16
||
targetId
==
DNN_TARGET_MYRIAD
)
&&
(
firstInpType
==
CV_32F
||
secondInpType
==
CV_32F
))
?
0.23
:
0.0
;
Mat
out
=
net
.
forward
();
Mat
ref
;
cv
::
add
(
firstInp
,
secondInp
,
ref
,
Mat
(),
CV_32F
);
normAssert
(
out
,
ref
,
""
,
l1
,
lInf
);
}
std
::
vector
<
std
::
vector
<
int
>
>
list_sizes
{
{
1
,
2
,
3
},
{
3
,
2
,
1
},
{
5
,
5
,
5
},
{
13
,
7
,
11
}
};
INSTANTIATE_TEST_CASE_P
(
/*nothing*/
,
Test_DLDT_two_inputs_3dim
,
Combine
(
Values
(
CV_8U
,
CV_32F
),
Values
(
CV_8U
,
CV_32F
),
testing
::
ValuesIn
(
getAvailableTargets
(
DNN_BACKEND_INFERENCE_ENGINE
)),
testing
::
ValuesIn
(
list_sizes
)
));
typedef
testing
::
TestWithParam
<
tuple
<
int
,
int
,
Target
>
>
Test_DLDT_two_inputs
;
TEST_P
(
Test_DLDT_two_inputs
,
as_backend
)
{
static
const
float
kScale
=
0.5
f
;
...
...
modules/dnn/test/test_misc.cpp
View file @
665408e5
...
...
@@ -308,4 +308,38 @@ TEST_P(DeprecatedForward, CustomLayerWithFallback)
INSTANTIATE_TEST_CASE_P
(
/**/
,
DeprecatedForward
,
dnnBackendsAndTargets
());
TEST
(
Net
,
forwardAndRetrieve
)
{
std
::
string
prototxt
=
"input:
\"
data
\"\n
"
"layer {
\n
"
" name:
\"
testLayer
\"\n
"
" type:
\"
Slice
\"\n
"
" bottom:
\"
data
\"\n
"
" top:
\"
firstCopy
\"\n
"
" top:
\"
secondCopy
\"\n
"
" slice_param {
\n
"
" axis: 0
\n
"
" slice_point: 2
\n
"
" }
\n
"
"}"
;
Net
net
=
readNetFromCaffe
(
&
prototxt
[
0
],
prototxt
.
size
());
net
.
setPreferableBackend
(
DNN_BACKEND_OPENCV
);
Mat
inp
(
4
,
5
,
CV_32F
);
randu
(
inp
,
-
1
,
1
);
net
.
setInput
(
inp
);
std
::
vector
<
String
>
outNames
;
outNames
.
push_back
(
"testLayer"
);
std
::
vector
<
std
::
vector
<
Mat
>
>
outBlobs
;
net
.
forward
(
outBlobs
,
outNames
);
EXPECT_EQ
(
outBlobs
.
size
(),
1
);
EXPECT_EQ
(
outBlobs
[
0
].
size
(),
2
);
normAssert
(
outBlobs
[
0
][
0
],
inp
.
rowRange
(
0
,
2
),
"first part"
);
normAssert
(
outBlobs
[
0
][
1
],
inp
.
rowRange
(
2
,
4
),
"second part"
);
}
}}
// namespace
modules/dnn/test/test_onnx_importer.cpp
View file @
665408e5
...
...
@@ -395,7 +395,7 @@ TEST_P(Test_ONNX_nets, DenseNet121)
TEST_P
(
Test_ONNX_nets
,
Inception_v1
)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE
=
= 2018050000
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE
>
= 2018050000
if
(
backend
==
DNN_BACKEND_INFERENCE_ENGINE
&&
target
==
DNN_TARGET_MYRIAD
)
throw
SkipTestException
(
"Test is disabled for OpenVINO 2018R5"
);
#endif
...
...
modules/dnn/test/test_tf_importer.cpp
View file @
665408e5
...
...
@@ -241,7 +241,7 @@ TEST_P(Test_TensorFlow_layers, unfused_flatten)
TEST_P
(
Test_TensorFlow_layers
,
leaky_relu
)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE
=
= 2018050000
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE
>
= 2018050000
if
(
backend
==
DNN_BACKEND_INFERENCE_ENGINE
&&
target
==
DNN_TARGET_OPENCL
)
throw
SkipTestException
(
""
);
#endif
...
...
@@ -388,7 +388,7 @@ TEST_P(Test_TensorFlow_nets, Faster_RCNN)
TEST_P
(
Test_TensorFlow_nets
,
MobileNet_v1_SSD_PPN
)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE
=
= 2018050000
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE
>
= 2018050000
if
(
backend
==
DNN_BACKEND_INFERENCE_ENGINE
&&
(
target
==
DNN_TARGET_OPENCL
||
target
==
DNN_TARGET_OPENCL_FP16
))
throw
SkipTestException
(
"Unstable test case"
);
#endif
...
...
modules/imgproc/perf/perf_blur.cpp
View file @
665408e5
...
...
@@ -230,4 +230,27 @@ PERF_TEST_P(Size_MatType_BorderType, blur5x5,
SANITY_CHECK
(
dst
,
1
);
}
///////////// BlendLinear ////////////////////////
PERF_TEST_P
(
Size_MatType
,
BlendLinear
,
testing
::
Combine
(
testing
::
Values
(
szVGA
,
sz720p
,
sz1080p
,
sz2160p
),
testing
::
Values
(
CV_8UC1
,
CV_32FC1
,
CV_8UC3
,
CV_32FC3
,
CV_8UC4
,
CV_32FC4
)
)
)
{
const
Size
srcSize
=
get
<
0
>
(
GetParam
());
const
int
srcType
=
get
<
1
>
(
GetParam
());
Mat
src1
(
srcSize
,
srcType
),
src2
(
srcSize
,
srcType
),
dst
(
srcSize
,
srcType
);
Mat
weights1
(
srcSize
,
CV_32FC1
),
weights2
(
srcSize
,
CV_32FC1
);
declare
.
in
(
src1
,
src2
,
WARMUP_RNG
).
in
(
weights1
,
weights2
,
WARMUP_READ
).
out
(
dst
);
randu
(
weights1
,
0
,
1
);
randu
(
weights2
,
0
,
1
);
TEST_CYCLE
()
blendLinear
(
src1
,
src2
,
weights1
,
weights2
,
dst
);
SANITY_CHECK_NOTHING
();
}
}
// namespace
modules/imgproc/src/blend.cpp
View file @
665408e5
This diff is collapsed.
Click to expand it.
modules/imgproc/src/median_blur.cpp
View file @
665408e5
This diff is collapsed.
Click to expand it.
modules/imgproc/src/pyramids.cpp
View file @
665408e5
...
...
@@ -112,6 +112,7 @@ struct PyrDownVec_32s8u
v_rshr_pack_store
<
8
>
(
dst
+
x
,
t0
);
x
+=
v_uint16
::
nlanes
;
}
typedef
int
CV_DECL_ALIGNED
(
1
)
unaligned_int
;
for
(
;
x
<=
width
-
v_int32x4
::
nlanes
;
x
+=
v_int32x4
::
nlanes
)
{
v_int32x4
r0
,
r1
,
r2
,
r3
,
r4
,
t0
;
...
...
@@ -122,7 +123,7 @@ struct PyrDownVec_32s8u
r4
=
v_load
(
row4
+
x
);
t0
=
r0
+
r4
+
(
r2
+
r2
)
+
((
r1
+
r3
+
r2
)
<<
2
);
*
(
int
*
)(
dst
+
x
)
=
v_reinterpret_as_s32
(
v_rshr_pack
<
8
>
(
v_pack_u
(
t0
,
t0
),
v_setzero_u16
())).
get0
();
*
(
(
unaligned_int
*
)
(
dst
+
x
)
)
=
v_reinterpret_as_s32
(
v_rshr_pack
<
8
>
(
v_pack_u
(
t0
,
t0
),
v_setzero_u16
())).
get0
();
}
return
x
;
...
...
modules/imgproc/src/spatialgradient.cpp
View file @
665408e5
This diff is collapsed.
Click to expand it.
modules/imgproc/test/test_filter.cpp
View file @
665408e5
...
...
@@ -2235,4 +2235,13 @@ TEST(Imgproc_Sobel, s16_regression_13506)
Sobel
(
src
,
dst
,
CV_16S
,
0
,
1
,
5
);
ASSERT_EQ
(
0.0
,
cvtest
::
norm
(
dst
,
ref
,
NORM_INF
));
}
TEST
(
Imgproc_Pyrdown
,
issue_12961
)
{
Mat
src
(
9
,
9
,
CV_8UC1
,
Scalar
::
all
(
0
));
Mat
dst
;
cv
::
pyrDown
(
src
,
dst
);
ASSERT_EQ
(
0.0
,
cv
::
norm
(
dst
));
}
}}
// namespace
modules/js/src/core_bindings.cpp
View file @
665408e5
...
...
@@ -341,6 +341,9 @@ EMSCRIPTEN_BINDINGS(binding_utils)
register_vector
<
cv
::
Mat
>
(
"MatVector"
);
register_vector
<
cv
::
Rect
>
(
"RectVector"
);
register_vector
<
cv
::
KeyPoint
>
(
"KeyPointVector"
);
register_vector
<
cv
::
DMatch
>
(
"DMatchVector"
);
register_vector
<
std
::
vector
<
cv
::
DMatch
>>
(
"DMatchVectorVector"
);
emscripten
::
class_
<
cv
::
Mat
>
(
"Mat"
)
.
constructor
<>
()
...
...
@@ -494,6 +497,12 @@ EMSCRIPTEN_BINDINGS(binding_utils)
.
field
(
"response"
,
&
cv
::
KeyPoint
::
response
)
.
field
(
"size"
,
&
cv
::
KeyPoint
::
size
);
emscripten
::
value_object
<
cv
::
DMatch
>
(
"DMatch"
)
.
field
(
"queryIdx"
,
&
cv
::
DMatch
::
queryIdx
)
.
field
(
"trainIdx"
,
&
cv
::
DMatch
::
trainIdx
)
.
field
(
"imgIdx"
,
&
cv
::
DMatch
::
imgIdx
)
.
field
(
"distance"
,
&
cv
::
DMatch
::
distance
);
emscripten
::
value_array
<
cv
::
Scalar_
<
double
>>
(
"Scalar"
)
.
element
(
index
<
0
>
())
.
element
(
index
<
1
>
())
...
...
modules/ml/src/svm.cpp
View file @
665408e5
...
...
@@ -200,20 +200,19 @@ public:
{
int
j
;
calc_non_rbf_base
(
vcount
,
var_count
,
vecs
,
another
,
results
,
-
2
*
params
.
gamma
,
-
2
*
params
.
coef0
);
2
*
params
.
gamma
,
2
*
params
.
coef0
);
// TODO: speedup this
for
(
j
=
0
;
j
<
vcount
;
j
++
)
{
Qfloat
t
=
results
[
j
];
Qfloat
e
=
std
::
exp
(
-
std
::
abs
(
t
));
Qfloat
e
=
std
::
exp
(
std
::
abs
(
t
));
if
(
t
>
0
)
results
[
j
]
=
(
Qfloat
)((
1.
-
e
)
/
(
1.
+
e
));
else
results
[
j
]
=
(
Qfloat
)((
e
-
1.
)
/
(
e
+
1.
));
else
results
[
j
]
=
(
Qfloat
)((
1.
-
e
)
/
(
1.
+
e
));
}
}
void
calc_rbf
(
int
vcount
,
int
var_count
,
const
float
*
vecs
,
const
float
*
another
,
Qfloat
*
results
)
{
...
...
@@ -1310,8 +1309,6 @@ public:
if
(
kernelType
!=
SIGMOID
&&
kernelType
!=
POLY
)
params
.
coef0
=
0
;
else
if
(
params
.
coef0
<
0
)
CV_Error
(
CV_StsOutOfRange
,
"The kernel parameter <coef0> must be positive or zero"
);
if
(
kernelType
!=
POLY
)
params
.
degree
=
0
;
...
...
modules/ml/test/test_svmtrainauto.cpp
View file @
665408e5
...
...
@@ -88,6 +88,51 @@ void CV_SVMTrainAutoTest::run( int /*start_from*/ )
TEST
(
ML_SVM
,
trainauto
)
{
CV_SVMTrainAutoTest
test
;
test
.
safe_run
();
}
TEST
(
ML_SVM
,
trainauto_sigmoid
)
{
const
int
datasize
=
100
;
cv
::
Mat
samples
=
cv
::
Mat
::
zeros
(
datasize
,
2
,
CV_32FC1
);
cv
::
Mat
responses
=
cv
::
Mat
::
zeros
(
datasize
,
1
,
CV_32S
);
const
float
scale_factor
=
0.5
;
const
float
radius
=
2.0
;
// Populate samples with data that can be split into two concentric circles
for
(
int
i
=
0
;
i
<
datasize
;
i
+=
2
)
{
const
float
pi
=
3.14159
f
;
const
float
angle_rads
=
(
i
/
datasize
)
*
pi
;
const
float
x
=
radius
*
cos
(
angle_rads
);
const
float
y
=
radius
*
cos
(
angle_rads
);
// Larger circle
samples
.
at
<
float
>
(
i
,
0
)
=
x
;
samples
.
at
<
float
>
(
i
,
1
)
=
y
;
responses
.
at
<
int
>
(
i
,
0
)
=
0
;
// Smaller circle
samples
.
at
<
float
>
(
i
+
1
,
0
)
=
x
*
scale_factor
;
samples
.
at
<
float
>
(
i
+
1
,
1
)
=
y
*
scale_factor
;
responses
.
at
<
int
>
(
i
+
1
,
0
)
=
1
;
}
cv
::
Ptr
<
TrainData
>
data
=
TrainData
::
create
(
samples
,
cv
::
ml
::
ROW_SAMPLE
,
responses
);
cv
::
Ptr
<
SVM
>
svm
=
SVM
::
create
();
svm
->
setKernel
(
SVM
::
SIGMOID
);
svm
->
setGamma
(
10.0
);
svm
->
setCoef0
(
-
10.0
);
svm
->
trainAuto
(
data
,
10
);
// 2-fold cross validation.
float
test_data0
[
2
]
=
{
radius
,
radius
};
cv
::
Mat
test_point0
=
cv
::
Mat
(
1
,
2
,
CV_32FC1
,
test_data0
);
ASSERT_EQ
(
0
,
svm
->
predict
(
test_point0
));
float
test_data1
[
2
]
=
{
scale_factor
*
radius
,
scale_factor
*
radius
};
cv
::
Mat
test_point1
=
cv
::
Mat
(
1
,
2
,
CV_32FC1
,
test_data1
);
ASSERT_EQ
(
1
,
svm
->
predict
(
test_point1
));
}
TEST
(
ML_SVM
,
trainAuto_regression_5369
)
{
...
...
samples/dnn/tf_text_graph_common.py
View file @
665408e5
...
...
@@ -323,7 +323,7 @@ def writeTextGraph(modelPath, outputPath, outNodes):
for
node
in
graph_def
.
node
:
if
node
.
op
==
'Const'
:
if
'value'
in
node
.
attr
:
del
node
.
attr
[
'value'
]
if
'value'
in
node
.
attr
and
node
.
attr
[
'value'
]
.
tensor
.
tensor_content
:
node
.
attr
[
'value'
]
.
tensor
.
tensor_content
=
''
tf
.
train
.
write_graph
(
graph_def
,
""
,
outputPath
,
as_text
=
True
)
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment