Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in / Register
Toggle navigation
O
opencv
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
submodule
opencv
Commits
f0ddf302
Commit
f0ddf302
authored
Jan 14, 2019
by
Dmitry Kurtaev
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Move Inference Engine to new API
parent
4ced27e1
Hide whitespace changes
Inline
Side-by-side
Showing
34 changed files
with
852 additions
and
80 deletions
+852
-80
perf_net.cpp
modules/dnn/perf/perf_net.cpp
+7
-6
dnn.cpp
modules/dnn/src/dnn.cpp
+62
-12
batch_norm_layer.cpp
modules/dnn/src/layers/batch_norm_layer.cpp
+9
-0
blank_layer.cpp
modules/dnn/src/layers/blank_layer.cpp
+6
-0
concat_layer.cpp
modules/dnn/src/layers/concat_layer.cpp
+9
-0
convolution_layer.cpp
modules/dnn/src/layers/convolution_layer.cpp
+77
-25
crop_layer.cpp
modules/dnn/src/layers/crop_layer.cpp
+11
-4
detection_output_layer.cpp
modules/dnn/src/layers/detection_output_layer.cpp
+20
-0
elementwise_layers.cpp
modules/dnn/src/layers/elementwise_layers.cpp
+74
-0
eltwise_layer.cpp
modules/dnn/src/layers/eltwise_layer.cpp
+23
-2
flatten_layer.cpp
modules/dnn/src/layers/flatten_layer.cpp
+12
-1
fully_connected_layer.cpp
modules/dnn/src/layers/fully_connected_layer.cpp
+13
-0
lrn_layer.cpp
modules/dnn/src/layers/lrn_layer.cpp
+12
-0
mvn_layer.cpp
modules/dnn/src/layers/mvn_layer.cpp
+8
-0
normalize_bbox_layer.cpp
modules/dnn/src/layers/normalize_bbox_layer.cpp
+44
-0
permute_layer.cpp
modules/dnn/src/layers/permute_layer.cpp
+6
-0
pooling_layer.cpp
modules/dnn/src/layers/pooling_layer.cpp
+43
-0
prior_box_layer.cpp
modules/dnn/src/layers/prior_box_layer.cpp
+53
-0
proposal_layer.cpp
modules/dnn/src/layers/proposal_layer.cpp
+23
-0
reorg_layer.cpp
modules/dnn/src/layers/reorg_layer.cpp
+6
-0
reshape_layer.cpp
modules/dnn/src/layers/reshape_layer.cpp
+22
-2
resize_layer.cpp
modules/dnn/src/layers/resize_layer.cpp
+41
-0
scale_layer.cpp
modules/dnn/src/layers/scale_layer.cpp
+24
-0
slice_layer.cpp
modules/dnn/src/layers/slice_layer.cpp
+14
-5
softmax_layer.cpp
modules/dnn/src/layers/softmax_layer.cpp
+8
-0
op_inf_engine.cpp
modules/dnn/src/op_inf_engine.cpp
+136
-1
op_inf_engine.hpp
modules/dnn/src/op_inf_engine.hpp
+65
-1
test_backends.cpp
modules/dnn/test/test_backends.cpp
+2
-2
test_darknet_importer.cpp
modules/dnn/test/test_darknet_importer.cpp
+1
-1
test_halide_layers.cpp
modules/dnn/test/test_halide_layers.cpp
+2
-1
test_layers.cpp
modules/dnn/test/test_layers.cpp
+0
-4
test_onnx_importer.cpp
modules/dnn/test/test_onnx_importer.cpp
+9
-1
test_tf_importer.cpp
modules/dnn/test/test_tf_importer.cpp
+5
-8
test_torch_importer.cpp
modules/dnn/test/test_torch_importer.cpp
+5
-4
No files found.
modules/dnn/perf/perf_net.cpp
View file @
f0ddf302
...
...
@@ -157,8 +157,7 @@ PERF_TEST_P_(DNNTestNetwork, MobileNet_SSD_v2_TensorFlow)
PERF_TEST_P_
(
DNNTestNetwork
,
DenseNet_121
)
{
if
(
backend
==
DNN_BACKEND_HALIDE
||
(
backend
==
DNN_BACKEND_INFERENCE_ENGINE
&&
(
target
==
DNN_TARGET_OPENCL_FP16
||
target
==
DNN_TARGET_MYRIAD
)))
if
(
backend
==
DNN_BACKEND_HALIDE
)
throw
SkipTestException
(
""
);
processNet
(
"dnn/DenseNet_121.caffemodel"
,
"dnn/DenseNet_121.prototxt"
,
""
,
Mat
(
cv
::
Size
(
224
,
224
),
CV_32FC3
));
...
...
@@ -211,8 +210,7 @@ PERF_TEST_P_(DNNTestNetwork, Inception_v2_SSD_TensorFlow)
PERF_TEST_P_
(
DNNTestNetwork
,
YOLOv3
)
{
if
(
backend
==
DNN_BACKEND_HALIDE
||
(
backend
==
DNN_BACKEND_INFERENCE_ENGINE
&&
target
==
DNN_TARGET_MYRIAD
))
if
(
backend
==
DNN_BACKEND_HALIDE
)
throw
SkipTestException
(
""
);
Mat
sample
=
imread
(
findDataFile
(
"dnn/dog416.png"
,
false
));
Mat
inp
;
...
...
@@ -222,8 +220,11 @@ PERF_TEST_P_(DNNTestNetwork, YOLOv3)
PERF_TEST_P_
(
DNNTestNetwork
,
EAST_text_detection
)
{
if
(
backend
==
DNN_BACKEND_HALIDE
||
(
backend
==
DNN_BACKEND_INFERENCE_ENGINE
&&
target
==
DNN_TARGET_MYRIAD
))
if
(
backend
==
DNN_BACKEND_HALIDE
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE < 2018030000
||
(
backend
==
DNN_BACKEND_INFERENCE_ENGINE
&&
target
==
DNN_TARGET_MYRIAD
)
#endif
)
throw
SkipTestException
(
""
);
processNet
(
"dnn/frozen_east_text_detection.pb"
,
""
,
""
,
Mat
(
cv
::
Size
(
320
,
320
),
CV_32FC3
));
}
...
...
modules/dnn/src/dnn.cpp
View file @
f0ddf302
...
...
@@ -701,12 +701,6 @@ struct DataLayer : public Layer
virtual
Ptr
<
BackendNode
>
initInfEngine
(
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>&
)
CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
InferenceEngine
::
LayerParams
lp
;
lp
.
name
=
name
;
lp
.
type
=
"ScaleShift"
;
lp
.
precision
=
InferenceEngine
::
Precision
::
FP32
;
std
::
shared_ptr
<
InferenceEngine
::
ScaleShiftLayer
>
ieLayer
(
new
InferenceEngine
::
ScaleShiftLayer
(
lp
));
CV_CheckEQ
(
inputsData
.
size
(),
(
size_t
)
1
,
""
);
CV_CheckEQ
(
inputsData
[
0
].
dims
,
4
,
""
);
const
size_t
numChannels
=
inputsData
[
0
].
size
[
1
];
...
...
@@ -717,7 +711,6 @@ struct DataLayer : public Layer
{
numChannels
});
weights
->
allocate
();
weights
->
set
(
std
::
vector
<
float
>
(
numChannels
,
scaleFactors
[
0
]));
ieLayer
->
_weights
=
weights
;
// Mean subtraction
auto
biases
=
InferenceEngine
::
make_shared_blob
<
float
>
(
InferenceEngine
::
Precision
::
FP32
,
...
...
@@ -729,8 +722,21 @@ struct DataLayer : public Layer
biasesVec
[
i
]
=
-
means
[
0
][
i
]
*
scaleFactors
[
0
];
}
biases
->
set
(
biasesVec
);
ieLayer
->
_biases
=
biases
;
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
InferenceEngine
::
Builder
::
ScaleShiftLayer
ieLayer
(
name
);
ieLayer
.
setWeights
(
weights
);
ieLayer
.
setBiases
(
biases
);
#else
InferenceEngine
::
LayerParams
lp
;
lp
.
name
=
name
;
lp
.
type
=
"ScaleShift"
;
lp
.
precision
=
InferenceEngine
::
Precision
::
FP32
;
std
::
shared_ptr
<
InferenceEngine
::
ScaleShiftLayer
>
ieLayer
(
new
InferenceEngine
::
ScaleShiftLayer
(
lp
));
ieLayer
->
_weights
=
weights
;
ieLayer
->
_biases
=
biases
;
#endif
return
Ptr
<
BackendNode
>
(
new
InfEngineBackendNode
(
ieLayer
));
#endif // HAVE_INF_ENGINE
return
Ptr
<
BackendNode
>
();
...
...
@@ -1451,7 +1457,11 @@ struct Net::Impl
if
(
layerNet
!=
ieInpNode
->
net
)
{
// layerNet is empty or nodes are from different graphs.
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
ieInpNode
->
net
->
addOutput
(
ieInpNode
->
layer
.
getName
());
#else
ieInpNode
->
net
->
addOutput
(
ieInpNode
->
layer
->
name
);
#endif
}
}
}
...
...
@@ -1527,7 +1537,7 @@ struct Net::Impl
// Build Inference Engine networks from sets of layers that support this
// backend. Split a whole model on several Inference Engine networks if
// some of layers
is
not implemented.
// some of layers
are
not implemented.
// Set of all input and output blobs wrappers for current network.
std
::
map
<
LayerPin
,
Ptr
<
BackendWrapper
>
>
netBlobsWrappers
;
...
...
@@ -1543,7 +1553,7 @@ struct Net::Impl
{
addInfEngineNetOutputs
(
ld
);
net
=
Ptr
<
InfEngineBackendNet
>
();
netBlobsWrappers
.
clear
();
netBlobsWrappers
.
clear
();
// Is not used for R5 release but we don't wrap it to #ifdef.
layer
->
preferableTarget
=
DNN_TARGET_CPU
;
continue
;
}
...
...
@@ -1561,12 +1571,13 @@ struct Net::Impl
if
(
ieInpNode
->
net
!=
net
)
{
net
=
Ptr
<
InfEngineBackendNet
>
();
netBlobsWrappers
.
clear
();
netBlobsWrappers
.
clear
();
// Is not used for R5 release but we don't wrap it to #ifdef.
break
;
}
}
}
#if INF_ENGINE_VER_MAJOR_LT(INF_ENGINE_RELEASE_2018R5)
// The same blobs wrappers cannot be shared between two Inference Engine
// networks because of explicit references between layers and blobs.
// So we need to rewrap all the external blobs.
...
...
@@ -1583,6 +1594,7 @@ struct Net::Impl
ld
.
inputBlobsWrappers
[
i
]
=
it
->
second
;
}
netBlobsWrappers
[
LayerPin
(
ld
.
id
,
0
)]
=
ld
.
outputBlobsWrappers
[
0
];
#endif // IE < R5
Ptr
<
BackendNode
>
node
;
if
(
!
net
.
empty
())
...
...
@@ -1613,6 +1625,40 @@ struct Net::Impl
CV_Assert
(
!
ieNode
.
empty
());
ieNode
->
net
=
net
;
// Convert weights in FP16 for specific targets.
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
if
((
preferableTarget
==
DNN_TARGET_OPENCL_FP16
||
preferableTarget
==
DNN_TARGET_MYRIAD
||
preferableTarget
==
DNN_TARGET_FPGA
)
&&
!
fused
)
{
auto
&
blobs
=
ieNode
->
layer
.
getConstantData
();
if
(
blobs
.
empty
())
{
// In case of non weightable layer we have to specify
// it's precision adding dummy blob.
auto
blob
=
InferenceEngine
::
make_shared_blob
<
int16_t
>
(
InferenceEngine
::
Precision
::
FP16
,
InferenceEngine
::
Layout
::
C
,
{
1
});
blob
->
allocate
();
blobs
[
""
]
=
blob
;
}
else
{
for
(
auto
&
it
:
blobs
)
it
.
second
=
convertFp16
(
std
::
const_pointer_cast
<
InferenceEngine
::
Blob
>
(
it
.
second
));
}
}
if
(
!
fused
)
net
->
addLayer
(
ieNode
->
layer
);
net
->
connect
(
ld
.
inputBlobsWrappers
,
ld
.
outputBlobsWrappers
,
ieNode
->
layer
.
getName
());
net
->
addBlobs
(
ld
.
inputBlobsWrappers
);
net
->
addBlobs
(
ld
.
outputBlobsWrappers
);
addInfEngineNetOutputs
(
ld
);
#else // IE >= R5
auto
weightableLayer
=
std
::
dynamic_pointer_cast
<
InferenceEngine
::
WeightableLayer
>
(
ieNode
->
layer
);
if
((
preferableTarget
==
DNN_TARGET_OPENCL_FP16
||
preferableTarget
==
DNN_TARGET_MYRIAD
||
...
...
@@ -1650,10 +1696,10 @@ struct Net::Impl
if
(
!
fused
)
net
->
addLayer
(
ieNode
->
layer
);
addInfEngineNetOutputs
(
ld
);
#endif // IE >= R5
}
// Initialize all networks.
std
::
set
<
InfEngineBackendNet
>
initializedNets
;
for
(
MapIdToLayerData
::
reverse_iterator
it
=
layers
.
rbegin
();
it
!=
layers
.
rend
();
++
it
)
{
LayerData
&
ld
=
it
->
second
;
...
...
@@ -2546,7 +2592,11 @@ Net Net::readFromModelOptimizer(const String& xml, const String& bin)
Net
cvNet
;
cvNet
.
setInputsNames
(
inputsNames
);
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
Ptr
<
InfEngineBackendNode
>
backendNode
(
new
InfEngineBackendNode
(
InferenceEngine
::
Builder
::
Layer
(
""
)));
#else
Ptr
<
InfEngineBackendNode
>
backendNode
(
new
InfEngineBackendNode
(
0
));
#endif
backendNode
->
net
=
Ptr
<
InfEngineBackendNet
>
(
new
InfEngineBackendNet
(
ieNet
));
for
(
auto
&
it
:
ieNet
.
getOutputsInfo
())
{
...
...
modules/dnn/src/layers/batch_norm_layer.cpp
View file @
f0ddf302
...
...
@@ -349,6 +349,14 @@ public:
virtual
Ptr
<
BackendNode
>
initInfEngine
(
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>&
)
CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
InferenceEngine
::
Builder
::
ScaleShiftLayer
ieLayer
(
name
);
const
size_t
numChannels
=
weights_
.
total
();
ieLayer
.
setWeights
(
wrapToInfEngineBlob
(
weights_
,
{
numChannels
},
InferenceEngine
::
Layout
::
C
));
ieLayer
.
setBiases
(
wrapToInfEngineBlob
(
bias_
,
{
numChannels
},
InferenceEngine
::
Layout
::
C
));
return
Ptr
<
BackendNode
>
(
new
InfEngineBackendNode
(
ieLayer
));
#else
InferenceEngine
::
LayerParams
lp
;
lp
.
name
=
name
;
lp
.
type
=
"ScaleShift"
;
...
...
@@ -360,6 +368,7 @@ public:
ieLayer
->
_biases
=
wrapToInfEngineBlob
(
bias_
,
{
numChannels
},
InferenceEngine
::
Layout
::
C
);
return
Ptr
<
BackendNode
>
(
new
InfEngineBackendNode
(
ieLayer
));
#endif
#endif // HAVE_INF_ENGINE
return
Ptr
<
BackendNode
>
();
}
...
...
modules/dnn/src/layers/blank_layer.cpp
View file @
f0ddf302
...
...
@@ -110,6 +110,11 @@ public:
virtual
Ptr
<
BackendNode
>
initInfEngine
(
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>&
inputs
)
CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
InferenceEngine
::
Builder
::
SplitLayer
ieLayer
(
name
);
ieLayer
.
setOutputPorts
({
InferenceEngine
::
Port
()});
return
Ptr
<
BackendNode
>
(
new
InfEngineBackendNode
(
ieLayer
));
#else
InferenceEngine
::
DataPtr
input
=
infEngineDataNode
(
inputs
[
0
]);
CV_Assert
(
!
input
->
dims
.
empty
());
...
...
@@ -123,6 +128,7 @@ public:
ieLayer
->
params
[
"out_sizes"
]
=
format
(
"%d"
,
(
int
)
input
->
dims
[
0
]);
#endif
return
Ptr
<
BackendNode
>
(
new
InfEngineBackendNode
(
ieLayer
));
#endif
#endif // HAVE_INF_ENGINE
return
Ptr
<
BackendNode
>
();
}
...
...
modules/dnn/src/layers/concat_layer.cpp
View file @
f0ddf302
...
...
@@ -301,6 +301,14 @@ public:
virtual
Ptr
<
BackendNode
>
initInfEngine
(
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>&
inputs
)
CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
InferenceEngine
::
DataPtr
input
=
infEngineDataNode
(
inputs
[
0
]);
InferenceEngine
::
Builder
::
ConcatLayer
ieLayer
(
name
);
ieLayer
.
setAxis
(
clamp
(
axis
,
input
->
dims
.
size
()));
ieLayer
.
setInputPorts
(
std
::
vector
<
InferenceEngine
::
Port
>
(
inputs
.
size
()));
return
Ptr
<
BackendNode
>
(
new
InfEngineBackendNode
(
ieLayer
));
#else
InferenceEngine
::
DataPtr
input
=
infEngineDataNode
(
inputs
[
0
]);
InferenceEngine
::
LayerParams
lp
;
lp
.
name
=
name
;
...
...
@@ -309,6 +317,7 @@ public:
std
::
shared_ptr
<
InferenceEngine
::
ConcatLayer
>
ieLayer
(
new
InferenceEngine
::
ConcatLayer
(
lp
));
ieLayer
->
_axis
=
clamp
(
axis
,
input
->
dims
.
size
());
return
Ptr
<
BackendNode
>
(
new
InfEngineBackendNode
(
ieLayer
));
#endif
#endif // HAVE_INF_ENGINE
return
Ptr
<
BackendNode
>
();
}
...
...
modules/dnn/src/layers/convolution_layer.cpp
View file @
f0ddf302
...
...
@@ -451,6 +451,54 @@ public:
const
int
inpGroupCn
=
blobs
[
0
].
size
[
1
];
const
int
group
=
inpCn
/
inpGroupCn
;
auto
ieWeights
=
wrapToInfEngineBlob
(
blobs
[
0
],
InferenceEngine
::
Layout
::
OIHW
);
if
(
newWeightAndBias
)
{
if
(
weightsMat
.
isContinuous
())
{
Mat
fusedWeights
=
weightsMat
.
reshape
(
1
,
blobs
[
0
].
dims
,
blobs
[
0
].
size
);
ieWeights
=
wrapToInfEngineBlob
(
fusedWeights
,
InferenceEngine
::
Layout
::
OIHW
);
}
else
{
ieWeights
=
InferenceEngine
::
make_shared_blob
<
float
>
(
InferenceEngine
::
Precision
::
FP32
,
InferenceEngine
::
Layout
::
OIHW
,
ieWeights
->
dims
());
ieWeights
->
allocate
();
Mat
newWeights
=
infEngineBlobToMat
(
ieWeights
).
reshape
(
1
,
outCn
);
Mat
fusedWeights
=
weightsMat
.
colRange
(
0
,
newWeights
.
cols
);
fusedWeights
.
copyTo
(
newWeights
);
}
}
InferenceEngine
::
Blob
::
Ptr
ieBiases
;
if
(
hasBias
()
||
fusedBias
)
{
Mat
biasesMat
({
outCn
},
CV_32F
,
&
biasvec
[
0
]);
ieBiases
=
wrapToInfEngineBlob
(
biasesMat
,
{(
size_t
)
outCn
},
InferenceEngine
::
Layout
::
C
);
}
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
InferenceEngine
::
Builder
::
ConvolutionLayer
ieLayer
(
name
);
ieLayer
.
setKernel
({
kernel
.
height
,
kernel
.
width
});
ieLayer
.
setStrides
({
stride
.
height
,
stride
.
width
});
ieLayer
.
setDilation
({
dilation
.
height
,
dilation
.
width
});
ieLayer
.
setPaddingsBegin
({
pad
.
height
,
pad
.
width
});
ieLayer
.
setPaddingsEnd
({
pad
.
height
,
pad
.
width
});
ieLayer
.
setGroup
(
group
);
ieLayer
.
setOutDepth
(
outCn
);
ieLayer
.
setWeights
(
ieWeights
);
if
(
ieBiases
)
ieLayer
.
setBiases
(
ieBiases
);
InferenceEngine
::
Builder
::
Layer
l
=
ieLayer
;
if
(
!
padMode
.
empty
())
l
.
getParameters
()[
"auto_pad"
]
=
padMode
==
"VALID"
?
std
::
string
(
"valid"
)
:
std
::
string
(
"same_upper"
);
return
Ptr
<
BackendNode
>
(
new
InfEngineBackendNode
(
l
));
#else
InferenceEngine
::
LayerParams
lp
;
lp
.
name
=
name
;
lp
.
type
=
"Convolution"
;
...
...
@@ -487,32 +535,11 @@ public:
ieLayer
->
_out_depth
=
outCn
;
ieLayer
->
_group
=
group
;
ieLayer
->
_weights
=
wrapToInfEngineBlob
(
blobs
[
0
],
InferenceEngine
::
Layout
::
OIHW
);
if
(
newWeightAndBias
)
{
if
(
weightsMat
.
isContinuous
())
{
Mat
fusedWeights
=
weightsMat
.
reshape
(
1
,
blobs
[
0
].
dims
,
blobs
[
0
].
size
);
ieLayer
->
_weights
=
wrapToInfEngineBlob
(
fusedWeights
,
InferenceEngine
::
Layout
::
OIHW
);
}
else
{
ieLayer
->
_weights
=
InferenceEngine
::
make_shared_blob
<
float
>
(
InferenceEngine
::
Precision
::
FP32
,
InferenceEngine
::
Layout
::
OIHW
,
ieLayer
->
_weights
->
dims
());
ieLayer
->
_weights
->
allocate
();
Mat
newWeights
=
infEngineBlobToMat
(
ieLayer
->
_weights
).
reshape
(
1
,
outCn
);
Mat
fusedWeights
=
weightsMat
.
colRange
(
0
,
newWeights
.
cols
);
fusedWeights
.
copyTo
(
newWeights
);
}
}
if
(
hasBias
()
||
fusedBias
)
{
Mat
biasesMat
({
outCn
},
CV_32F
,
&
biasvec
[
0
]);
ieLayer
->
_biases
=
wrapToInfEngineBlob
(
biasesMat
,
{(
size_t
)
outCn
},
InferenceEngine
::
Layout
::
C
);
}
ieLayer
->
_weights
=
ieWeights
;
if
(
ieBiases
)
ieLayer
->
_biases
=
ieBiases
;
return
Ptr
<
BackendNode
>
(
new
InfEngineBackendNode
(
ieLayer
));
#endif
#endif // HAVE_INF_ENGINE
return
Ptr
<
BackendNode
>
();
}
...
...
@@ -1123,6 +1150,9 @@ public:
#ifdef HAVE_INF_ENGINE
if
(
backendId
==
DNN_BACKEND_INFERENCE_ENGINE
)
{
if
(
INF_ENGINE_RELEASE
==
2018050000
&&
(
adjustPad
.
height
||
adjustPad
.
width
))
return
false
;
const
int
outGroupCn
=
blobs
[
0
].
size
[
1
];
// Weights are in IOHW layout
const
int
group
=
numOutput
/
outGroupCn
;
if
(
group
!=
1
)
...
...
@@ -1677,6 +1707,27 @@ public:
virtual
Ptr
<
BackendNode
>
initInfEngine
(
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>
&
)
CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
const
int
outGroupCn
=
blobs
[
0
].
size
[
1
];
// Weights are in IOHW layout
const
int
group
=
numOutput
/
outGroupCn
;
InferenceEngine
::
Builder
::
DeconvolutionLayer
ieLayer
(
name
);
ieLayer
.
setKernel
({
kernel
.
height
,
kernel
.
width
});
ieLayer
.
setStrides
({
stride
.
height
,
stride
.
width
});
ieLayer
.
setDilation
({
dilation
.
height
,
dilation
.
width
});
ieLayer
.
setPaddingsBegin
({
pad
.
height
,
pad
.
width
});
ieLayer
.
setPaddingsEnd
({
pad
.
height
,
pad
.
width
});
ieLayer
.
setGroup
(
group
);
ieLayer
.
setOutDepth
(
numOutput
);
ieLayer
.
setWeights
(
wrapToInfEngineBlob
(
blobs
[
0
],
InferenceEngine
::
Layout
::
OIHW
));
if
(
hasBias
())
{
ieLayer
.
setBiases
(
wrapToInfEngineBlob
(
blobs
[
1
],
{(
size_t
)
numOutput
},
InferenceEngine
::
Layout
::
C
));
}
return
Ptr
<
BackendNode
>
(
new
InfEngineBackendNode
(
ieLayer
));
#else
const
int
outGroupCn
=
blobs
[
0
].
size
[
1
];
// Weights are in IOHW layout
const
int
group
=
numOutput
/
outGroupCn
;
...
...
@@ -1716,6 +1767,7 @@ public:
ieLayer
->
_biases
=
wrapToInfEngineBlob
(
blobs
[
1
],
{(
size_t
)
numOutput
},
InferenceEngine
::
Layout
::
C
);
}
return
Ptr
<
BackendNode
>
(
new
InfEngineBackendNode
(
ieLayer
));
#endif
#endif // HAVE_INF_ENGINE
return
Ptr
<
BackendNode
>
();
}
...
...
modules/dnn/src/layers/crop_layer.cpp
View file @
f0ddf302
...
...
@@ -67,8 +67,12 @@ public:
virtual
bool
supportBackend
(
int
backendId
)
CV_OVERRIDE
{
return
backendId
==
DNN_BACKEND_OPENCV
||
(
backendId
==
DNN_BACKEND_INFERENCE_ENGINE
&&
crop_ranges
.
size
()
==
4
);
#ifdef HAVE_INF_ENGINE
if
(
backendId
==
DNN_BACKEND_INFERENCE_ENGINE
)
return
INF_ENGINE_VER_MAJOR_LT
(
INF_ENGINE_RELEASE_2018R5
)
&&
crop_ranges
.
size
()
==
4
;
else
#endif
return
backendId
==
DNN_BACKEND_OPENCV
;
}
bool
getMemoryShapes
(
const
std
::
vector
<
MatShape
>
&
inputs
,
...
...
@@ -145,9 +149,10 @@ public:
input
(
&
crop_ranges
[
0
]).
copyTo
(
outputs
[
0
]);
}
#ifdef HAVE_INF_ENGINE
virtual
Ptr
<
BackendNode
>
initInfEngine
(
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>&
)
CV_OVERRIDE
{
#if
def HAVE_INF_ENGINE
#if
INF_ENGINE_VER_MAJOR_LT(INF_ENGINE_RELEASE_2018R5)
InferenceEngine
::
LayerParams
lp
;
lp
.
name
=
name
;
lp
.
type
=
"Crop"
;
...
...
@@ -181,9 +186,11 @@ public:
ieLayer
->
dim
.
push_back
(
crop_ranges
[
3
].
end
-
crop_ranges
[
3
].
start
);
#endif
return
Ptr
<
BackendNode
>
(
new
InfEngineBackendNode
(
ieLayer
));
#e
ndif // HAVE_INF_ENGINE
#e
lse
return
Ptr
<
BackendNode
>
();
#endif // IE < R5
}
#endif
std
::
vector
<
Range
>
crop_ranges
;
};
...
...
modules/dnn/src/layers/detection_output_layer.cpp
View file @
f0ddf302
...
...
@@ -939,6 +939,25 @@ public:
virtual
Ptr
<
BackendNode
>
initInfEngine
(
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>&
)
CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
InferenceEngine
::
Builder
::
DetectionOutputLayer
ieLayer
(
name
);
ieLayer
.
setNumClasses
(
_numClasses
);
ieLayer
.
setShareLocation
(
_shareLocation
);
ieLayer
.
setBackgroudLabelId
(
_backgroundLabelId
);
ieLayer
.
setNMSThreshold
(
_nmsThreshold
);
ieLayer
.
setTopK
(
_topK
);
ieLayer
.
setKeepTopK
(
_keepTopK
);
ieLayer
.
setConfidenceThreshold
(
_confidenceThreshold
);
ieLayer
.
setVariantEncodedInTarget
(
_varianceEncodedInTarget
);
ieLayer
.
setCodeType
(
"caffe.PriorBoxParameter."
+
_codeType
);
ieLayer
.
setInputPorts
(
std
::
vector
<
InferenceEngine
::
Port
>
(
3
));
InferenceEngine
::
Builder
::
Layer
l
=
ieLayer
;
l
.
getParameters
()[
"eta"
]
=
std
::
string
(
"1.0"
);
return
Ptr
<
BackendNode
>
(
new
InfEngineBackendNode
(
l
));
#else
InferenceEngine
::
LayerParams
lp
;
lp
.
name
=
name
;
lp
.
type
=
"DetectionOutput"
;
...
...
@@ -956,6 +975,7 @@ public:
ieLayer
->
params
[
"variance_encoded_in_target"
]
=
_varianceEncodedInTarget
?
"1"
:
"0"
;
ieLayer
->
params
[
"code_type"
]
=
"caffe.PriorBoxParameter."
+
_codeType
;
return
Ptr
<
BackendNode
>
(
new
InfEngineBackendNode
(
ieLayer
));
#endif
#endif // HAVE_INF_ENGINE
return
Ptr
<
BackendNode
>
();
}
...
...
modules/dnn/src/layers/elementwise_layers.cpp
View file @
f0ddf302
...
...
@@ -152,10 +152,16 @@ public:
virtual
Ptr
<
BackendNode
>
initInfEngine
(
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>&
)
CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
InferenceEngine
::
Builder
::
Layer
ieLayer
=
func
.
initInfEngineBuilderAPI
();
ieLayer
.
setName
(
this
->
name
);
return
Ptr
<
BackendNode
>
(
new
InfEngineBackendNode
(
ieLayer
));
#else
InferenceEngine
::
LayerParams
lp
;
lp
.
name
=
this
->
name
;
lp
.
precision
=
InferenceEngine
::
Precision
::
FP32
;
return
Ptr
<
BackendNode
>
(
new
InfEngineBackendNode
(
func
.
initInfEngine
(
lp
)));
#endif
#endif // HAVE_INF_ENGINE
return
Ptr
<
BackendNode
>
();
}
...
...
@@ -345,6 +351,12 @@ struct ReLUFunctor
#endif // HAVE_HALIDE
#ifdef HAVE_INF_ENGINE
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
InferenceEngine
::
Builder
::
Layer
initInfEngineBuilderAPI
()
{
return
InferenceEngine
::
Builder
::
ReLULayer
(
""
).
setNegativeSlope
(
slope
);
}
#else
InferenceEngine
::
CNNLayerPtr
initInfEngine
(
InferenceEngine
::
LayerParams
&
lp
)
{
lp
.
type
=
"ReLU"
;
...
...
@@ -353,6 +365,7 @@ struct ReLUFunctor
ieLayer
->
params
[
"negative_slope"
]
=
format
(
"%f"
,
slope
);
return
ieLayer
;
}
#endif
#endif // HAVE_INF_ENGINE
bool
tryFuse
(
Ptr
<
dnn
::
Layer
>&
)
{
return
false
;
}
...
...
@@ -452,6 +465,12 @@ struct ReLU6Functor
#endif // HAVE_HALIDE
#ifdef HAVE_INF_ENGINE
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
InferenceEngine
::
Builder
::
Layer
initInfEngineBuilderAPI
()
{
return
InferenceEngine
::
Builder
::
ClampLayer
(
""
).
setMinValue
(
minValue
).
setMaxValue
(
maxValue
);
}
#else
InferenceEngine
::
CNNLayerPtr
initInfEngine
(
InferenceEngine
::
LayerParams
&
lp
)
{
lp
.
type
=
"Clamp"
;
...
...
@@ -462,6 +481,7 @@ struct ReLU6Functor
ieLayer
->
params
[
"max"
]
=
format
(
"%f"
,
maxValue
);
return
ieLayer
;
}
#endif
#endif // HAVE_INF_ENGINE
bool
tryFuse
(
Ptr
<
dnn
::
Layer
>&
)
{
return
false
;
}
...
...
@@ -530,12 +550,19 @@ struct TanHFunctor
#endif // HAVE_HALIDE
#ifdef HAVE_INF_ENGINE
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
InferenceEngine
::
Builder
::
Layer
initInfEngineBuilderAPI
()
{
return
InferenceEngine
::
Builder
::
TanHLayer
(
""
);
}
#else
InferenceEngine
::
CNNLayerPtr
initInfEngine
(
InferenceEngine
::
LayerParams
&
lp
)
{
lp
.
type
=
"TanH"
;
std
::
shared_ptr
<
InferenceEngine
::
CNNLayer
>
ieLayer
(
new
InferenceEngine
::
CNNLayer
(
lp
));
return
ieLayer
;
}
#endif
#endif // HAVE_INF_ENGINE
bool
tryFuse
(
Ptr
<
dnn
::
Layer
>&
)
{
return
false
;
}
...
...
@@ -604,12 +631,19 @@ struct SigmoidFunctor
#endif // HAVE_HALIDE
#ifdef HAVE_INF_ENGINE
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
InferenceEngine
::
Builder
::
Layer
initInfEngineBuilderAPI
()
{
return
InferenceEngine
::
Builder
::
SigmoidLayer
(
""
);
}
#else
InferenceEngine
::
CNNLayerPtr
initInfEngine
(
InferenceEngine
::
LayerParams
&
lp
)
{
lp
.
type
=
"Sigmoid"
;
std
::
shared_ptr
<
InferenceEngine
::
CNNLayer
>
ieLayer
(
new
InferenceEngine
::
CNNLayer
(
lp
));
return
ieLayer
;
}
#endif
#endif // HAVE_INF_ENGINE
bool
tryFuse
(
Ptr
<
dnn
::
Layer
>&
)
{
return
false
;
}
...
...
@@ -680,11 +714,18 @@ struct ELUFunctor
#endif // HAVE_HALIDE
#ifdef HAVE_INF_ENGINE
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
InferenceEngine
::
Builder
::
Layer
initInfEngineBuilderAPI
()
{
return
InferenceEngine
::
Builder
::
ELULayer
(
""
);
}
#else
InferenceEngine
::
CNNLayerPtr
initInfEngine
(
InferenceEngine
::
LayerParams
&
lp
)
{
lp
.
type
=
"ELU"
;
return
InferenceEngine
::
CNNLayerPtr
(
new
InferenceEngine
::
CNNLayer
(
lp
));
}
#endif
#endif // HAVE_INF_ENGINE
bool
tryFuse
(
Ptr
<
dnn
::
Layer
>&
)
{
return
false
;
}
...
...
@@ -753,6 +794,12 @@ struct AbsValFunctor
#endif // HAVE_HALIDE
#ifdef HAVE_INF_ENGINE
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
InferenceEngine
::
Builder
::
Layer
initInfEngineBuilderAPI
()
{
return
InferenceEngine
::
Builder
::
ReLULayer
(
""
).
setNegativeSlope
(
-
1
);
}
#else
InferenceEngine
::
CNNLayerPtr
initInfEngine
(
InferenceEngine
::
LayerParams
&
lp
)
{
lp
.
type
=
"ReLU"
;
...
...
@@ -761,6 +808,7 @@ struct AbsValFunctor
ieLayer
->
params
[
"negative_slope"
]
=
"-1.0"
;
return
ieLayer
;
}
#endif
#endif // HAVE_INF_ENGINE
bool
tryFuse
(
Ptr
<
dnn
::
Layer
>&
)
{
return
false
;
}
...
...
@@ -808,11 +856,18 @@ struct BNLLFunctor
#endif // HAVE_HALIDE
#ifdef HAVE_INF_ENGINE
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
InferenceEngine
::
Builder
::
Layer
initInfEngineBuilderAPI
()
{
CV_Error
(
Error
::
StsNotImplemented
,
""
);
}
#else
InferenceEngine
::
CNNLayerPtr
initInfEngine
(
InferenceEngine
::
LayerParams
&
lp
)
{
CV_Error
(
Error
::
StsNotImplemented
,
"BNLL"
);
return
InferenceEngine
::
CNNLayerPtr
();
}
#endif
#endif // HAVE_INF_ENGINE
bool
tryFuse
(
Ptr
<
dnn
::
Layer
>&
)
{
return
false
;
}
...
...
@@ -917,6 +972,14 @@ struct PowerFunctor
#endif // HAVE_HALIDE
#ifdef HAVE_INF_ENGINE
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
InferenceEngine
::
Builder
::
Layer
initInfEngineBuilderAPI
()
{
return
InferenceEngine
::
Builder
::
PowerLayer
(
""
).
setPower
(
power
)
.
setScale
(
scale
)
.
setShift
(
shift
);
}
#else
InferenceEngine
::
CNNLayerPtr
initInfEngine
(
InferenceEngine
::
LayerParams
&
lp
)
{
if
(
power
==
1.0
f
&&
scale
==
1.0
f
&&
shift
==
0.0
f
)
...
...
@@ -936,6 +999,7 @@ struct PowerFunctor
return
ieLayer
;
}
}
#endif
#endif // HAVE_INF_ENGINE
bool
tryFuse
(
Ptr
<
dnn
::
Layer
>&
top
)
...
...
@@ -1067,6 +1131,15 @@ struct ChannelsPReLUFunctor
#endif // HAVE_HALIDE
#ifdef HAVE_INF_ENGINE
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
InferenceEngine
::
Builder
::
Layer
initInfEngineBuilderAPI
()
{
InferenceEngine
::
Builder
::
PReLULayer
ieLayer
(
""
);
const
size_t
numChannels
=
scale
.
total
();
ieLayer
.
setWeights
(
wrapToInfEngineBlob
(
scale
,
{
numChannels
},
InferenceEngine
::
Layout
::
C
));
return
ieLayer
;
}
#else
InferenceEngine
::
CNNLayerPtr
initInfEngine
(
InferenceEngine
::
LayerParams
&
lp
)
{
lp
.
type
=
"PReLU"
;
...
...
@@ -1075,6 +1148,7 @@ struct ChannelsPReLUFunctor
ieLayer
->
_weights
=
wrapToInfEngineBlob
(
scale
,
{
numChannels
},
InferenceEngine
::
Layout
::
C
);
return
ieLayer
;
}
#endif
#endif // HAVE_INF_ENGINE
bool
tryFuse
(
Ptr
<
dnn
::
Layer
>&
)
{
return
false
;
}
...
...
modules/dnn/src/layers/eltwise_layer.cpp
View file @
f0ddf302
...
...
@@ -99,7 +99,7 @@ public:
return
backendId
==
DNN_BACKEND_OPENCV
||
backendId
==
DNN_BACKEND_HALIDE
||
(
backendId
==
DNN_BACKEND_INFERENCE_ENGINE
&&
(
preferableTarget
!=
DNN_TARGET_
MYRIAD
||
coeffs
.
empty
()));
(
preferableTarget
!=
DNN_TARGET_
OPENCL
||
coeffs
.
empty
()));
}
bool
getMemoryShapes
(
const
std
::
vector
<
MatShape
>
&
inputs
,
...
...
@@ -420,9 +420,29 @@ public:
return
Ptr
<
BackendNode
>
();
}
virtual
Ptr
<
BackendNode
>
initInfEngine
(
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>&
)
CV_OVERRIDE
virtual
Ptr
<
BackendNode
>
initInfEngine
(
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>&
inputs
)
CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
InferenceEngine
::
Builder
::
EltwiseLayer
ieLayer
(
name
);
ieLayer
.
setInputPorts
(
std
::
vector
<
InferenceEngine
::
Port
>
(
inputs
.
size
()));
if
(
op
==
SUM
)
ieLayer
.
setEltwiseType
(
InferenceEngine
::
Builder
::
EltwiseLayer
::
EltwiseType
::
SUM
);
else
if
(
op
==
PROD
)
ieLayer
.
setEltwiseType
(
InferenceEngine
::
Builder
::
EltwiseLayer
::
EltwiseType
::
MUL
);
else
if
(
op
==
MAX
)
ieLayer
.
setEltwiseType
(
InferenceEngine
::
Builder
::
EltwiseLayer
::
EltwiseType
::
MAX
);
else
CV_Error
(
Error
::
StsNotImplemented
,
"Unsupported eltwise operation"
);
InferenceEngine
::
Builder
::
Layer
l
=
ieLayer
;
if
(
!
coeffs
.
empty
())
l
.
getParameters
()[
"coeff"
]
=
coeffs
;
return
Ptr
<
BackendNode
>
(
new
InfEngineBackendNode
(
l
));
#else
InferenceEngine
::
LayerParams
lp
;
lp
.
name
=
name
;
lp
.
type
=
"Eltwise"
;
...
...
@@ -438,6 +458,7 @@ public:
else
CV_Error
(
Error
::
StsNotImplemented
,
"Unsupported eltwise operation"
);
return
Ptr
<
BackendNode
>
(
new
InfEngineBackendNode
(
ieLayer
));
#endif
#endif // HAVE_INF_ENGINE
return
Ptr
<
BackendNode
>
();
}
...
...
modules/dnn/src/layers/flatten_layer.cpp
View file @
f0ddf302
...
...
@@ -152,9 +152,19 @@ public:
}
}
virtual
Ptr
<
BackendNode
>
initInfEngine
(
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>&
)
CV_OVERRIDE
virtual
Ptr
<
BackendNode
>
initInfEngine
(
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>&
inputs
)
CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
InferenceEngine
::
Builder
::
Layer
ieLayer
(
name
);
ieLayer
.
setName
(
name
);
ieLayer
.
setType
(
"Flatten"
);
ieLayer
.
getParameters
()[
"axis"
]
=
_startAxis
;
ieLayer
.
getParameters
()[
"end_axis"
]
=
_endAxis
;
ieLayer
.
setInputPorts
(
std
::
vector
<
InferenceEngine
::
Port
>
(
1
));
ieLayer
.
setOutputPorts
(
std
::
vector
<
InferenceEngine
::
Port
>
(
1
));
return
Ptr
<
BackendNode
>
(
new
InfEngineBackendNode
(
ieLayer
));
#else
InferenceEngine
::
LayerParams
lp
;
lp
.
name
=
name
;
lp
.
type
=
"Flatten"
;
...
...
@@ -163,6 +173,7 @@ public:
ieLayer
->
params
[
"axis"
]
=
format
(
"%d"
,
_startAxis
);
ieLayer
->
params
[
"end_axis"
]
=
format
(
"%d"
,
_endAxis
);
return
Ptr
<
BackendNode
>
(
new
InfEngineBackendNode
(
ieLayer
));
#endif
#endif // HAVE_INF_ENGINE
return
Ptr
<
BackendNode
>
();
}
...
...
modules/dnn/src/layers/fully_connected_layer.cpp
View file @
f0ddf302
...
...
@@ -442,6 +442,18 @@ public:
virtual
Ptr
<
BackendNode
>
initInfEngine
(
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>&
)
CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
InferenceEngine
::
Builder
::
FullyConnectedLayer
ieLayer
(
name
);
const
int
outNum
=
blobs
[
0
].
size
[
0
];
ieLayer
.
setOutputNum
(
outNum
);
ieLayer
.
setWeights
(
wrapToInfEngineBlob
(
blobs
[
0
],
{(
size_t
)
blobs
[
0
].
size
[
0
],
(
size_t
)
blobs
[
0
].
size
[
1
],
1
,
1
},
InferenceEngine
::
Layout
::
OIHW
));
if
(
blobs
.
size
()
>
1
)
ieLayer
.
setBiases
(
wrapToInfEngineBlob
(
blobs
[
1
],
{(
size_t
)
outNum
},
InferenceEngine
::
Layout
::
C
));
return
Ptr
<
BackendNode
>
(
new
InfEngineBackendNode
(
ieLayer
));
#else
InferenceEngine
::
LayerParams
lp
;
lp
.
name
=
name
;
lp
.
type
=
"FullyConnected"
;
...
...
@@ -456,6 +468,7 @@ public:
if
(
blobs
.
size
()
>
1
)
ieLayer
->
_biases
=
wrapToInfEngineBlob
(
blobs
[
1
],
{(
size_t
)
ieLayer
->
_out_num
},
InferenceEngine
::
Layout
::
C
);
return
Ptr
<
BackendNode
>
(
new
InfEngineBackendNode
(
ieLayer
));
#endif
#endif // HAVE_INF_ENGINE
return
Ptr
<
BackendNode
>
();
}
...
...
modules/dnn/src/layers/lrn_layer.cpp
View file @
f0ddf302
...
...
@@ -382,6 +382,17 @@ public:
virtual
Ptr
<
BackendNode
>
initInfEngine
(
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>&
)
CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
InferenceEngine
::
Builder
::
NormLayer
ieLayer
(
name
);
ieLayer
.
setSize
(
size
);
ieLayer
.
setAlpha
(
alpha
);
ieLayer
.
setBeta
(
beta
);
ieLayer
.
setAcrossMaps
(
type
==
CHANNEL_NRM
);
InferenceEngine
::
Builder
::
Layer
l
=
ieLayer
;
l
.
getParameters
()[
"k"
]
=
bias
;
return
Ptr
<
BackendNode
>
(
new
InfEngineBackendNode
(
l
));
#else
InferenceEngine
::
LayerParams
lp
;
lp
.
name
=
name
;
lp
.
type
=
"Norm"
;
...
...
@@ -394,6 +405,7 @@ public:
ieLayer
->
_alpha
=
alpha
;
ieLayer
->
_isAcrossMaps
=
(
type
==
CHANNEL_NRM
);
return
Ptr
<
BackendNode
>
(
new
InfEngineBackendNode
(
ieLayer
));
#endif
#endif // HAVE_INF_ENGINE
return
Ptr
<
BackendNode
>
();
}
...
...
modules/dnn/src/layers/mvn_layer.cpp
View file @
f0ddf302
...
...
@@ -371,6 +371,13 @@ public:
virtual
Ptr
<
BackendNode
>
initInfEngine
(
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>&
)
CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
InferenceEngine
::
Builder
::
MVNLayer
ieLayer
(
name
);
ieLayer
.
setAcrossChannels
(
acrossChannels
);
ieLayer
.
setNormalize
(
normVariance
);
ieLayer
.
setEpsilon
(
eps
);
return
Ptr
<
BackendNode
>
(
new
InfEngineBackendNode
(
ieLayer
));
#else
InferenceEngine
::
LayerParams
lp
;
lp
.
name
=
name
;
lp
.
type
=
"MVN"
;
...
...
@@ -380,6 +387,7 @@ public:
ieLayer
->
params
[
"normalize_variance"
]
=
normVariance
?
"1"
:
"0"
;
ieLayer
->
params
[
"eps"
]
=
format
(
"%f"
,
eps
);
return
Ptr
<
BackendNode
>
(
new
InfEngineBackendNode
(
ieLayer
));
#endif
#endif // HAVE_INF_ENGINE
return
Ptr
<
BackendNode
>
();
}
...
...
modules/dnn/src/layers/normalize_bbox_layer.cpp
View file @
f0ddf302
...
...
@@ -264,6 +264,49 @@ public:
virtual
Ptr
<
BackendNode
>
initInfEngine
(
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>&
inputs
)
CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
InferenceEngine
::
DataPtr
input
=
infEngineDataNode
(
inputs
[
0
]);
if
(
input
->
dims
.
size
()
==
4
)
{
InferenceEngine
::
Builder
::
NormalizeLayer
ieLayer
(
name
);
ieLayer
.
setChannelShared
(
false
);
ieLayer
.
setAcrossMaps
(
acrossSpatial
);
ieLayer
.
setEpsilon
(
epsilon
);
InferenceEngine
::
Builder
::
Layer
l
=
ieLayer
;
const
int
numChannels
=
input
->
dims
[
2
];
// NOTE: input->dims are reversed (whcn)
if
(
blobs
.
empty
())
{
auto
weights
=
InferenceEngine
::
make_shared_blob
<
float
>
(
InferenceEngine
::
Precision
::
FP32
,
InferenceEngine
::
Layout
::
C
,
{(
size_t
)
numChannels
});
weights
->
allocate
();
std
::
vector
<
float
>
ones
(
numChannels
,
1
);
weights
->
set
(
ones
);
l
.
addConstantData
(
"weights"
,
weights
);
l
.
getParameters
()[
"channel_shared"
]
=
false
;
}
else
{
CV_Assert
(
numChannels
==
blobs
[
0
].
total
());
l
.
addConstantData
(
"weights"
,
wrapToInfEngineBlob
(
blobs
[
0
],
{(
size_t
)
numChannels
},
InferenceEngine
::
Layout
::
C
));
l
.
getParameters
()[
"channel_shared"
]
=
blobs
[
0
].
total
()
==
1
;
}
l
.
getParameters
()[
"across_spatial"
]
=
acrossSpatial
;
return
Ptr
<
BackendNode
>
(
new
InfEngineBackendNode
(
l
));
}
else
{
InferenceEngine
::
Builder
::
GRNLayer
ieLayer
(
name
);
ieLayer
.
setBeta
(
epsilon
);
InferenceEngine
::
Builder
::
Layer
l
=
ieLayer
;
l
.
getParameters
()[
"bias"
]
=
epsilon
;
return
Ptr
<
BackendNode
>
(
new
InfEngineBackendNode
(
l
));
}
#else
InferenceEngine
::
DataPtr
input
=
infEngineDataNode
(
inputs
[
0
]);
InferenceEngine
::
LayerParams
lp
;
...
...
@@ -307,6 +350,7 @@ public:
ieLayer
->
params
[
"bias"
]
=
format
(
"%f"
,
epsilon
);
return
Ptr
<
BackendNode
>
(
new
InfEngineBackendNode
(
ieLayer
));
}
#endif
#endif // HAVE_INF_ENGINE
return
Ptr
<
BackendNode
>
();
}
...
...
modules/dnn/src/layers/permute_layer.cpp
View file @
f0ddf302
...
...
@@ -373,6 +373,11 @@ public:
virtual
Ptr
<
BackendNode
>
initInfEngine
(
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>&
)
CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
InferenceEngine
::
Builder
::
PermuteLayer
ieLayer
(
name
);
ieLayer
.
setOrder
(
_order
);
return
Ptr
<
BackendNode
>
(
new
InfEngineBackendNode
(
ieLayer
));
#else
InferenceEngine
::
LayerParams
lp
;
lp
.
name
=
name
;
lp
.
type
=
"Permute"
;
...
...
@@ -385,6 +390,7 @@ public:
ieLayer
->
params
[
"order"
]
+=
format
(
",%d"
,
_order
[
i
]);
return
Ptr
<
BackendNode
>
(
new
InfEngineBackendNode
(
ieLayer
));
#endif
#endif // HAVE_INF_ENGINE
return
Ptr
<
BackendNode
>
();
}
...
...
modules/dnn/src/layers/pooling_layer.cpp
View file @
f0ddf302
...
...
@@ -257,6 +257,48 @@ public:
virtual
Ptr
<
BackendNode
>
initInfEngine
(
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>&
)
CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
if
(
type
==
MAX
||
type
==
AVE
)
{
InferenceEngine
::
Builder
::
PoolingLayer
ieLayer
(
name
);
ieLayer
.
setKernel
({
kernel
.
height
,
kernel
.
width
});
ieLayer
.
setStrides
({
stride
.
height
,
stride
.
width
});
ieLayer
.
setPaddingsBegin
({
pad_t
,
pad_l
});
ieLayer
.
setPaddingsEnd
({
pad_b
,
pad_r
});
ieLayer
.
setPoolingType
(
type
==
MAX
?
InferenceEngine
::
Builder
::
PoolingLayer
::
PoolingType
::
MAX
:
InferenceEngine
::
Builder
::
PoolingLayer
::
PoolingType
::
AVG
);
ieLayer
.
setRoundingType
(
ceilMode
?
InferenceEngine
::
Builder
::
PoolingLayer
::
RoundingType
::
CEIL
:
InferenceEngine
::
Builder
::
PoolingLayer
::
RoundingType
::
FLOOR
);
ieLayer
.
setExcludePad
(
type
==
AVE
&&
padMode
==
"SAME"
);
InferenceEngine
::
Builder
::
Layer
l
=
ieLayer
;
if
(
!
padMode
.
empty
())
l
.
getParameters
()[
"auto_pad"
]
=
padMode
==
"VALID"
?
std
::
string
(
"valid"
)
:
std
::
string
(
"same_upper"
);
return
Ptr
<
BackendNode
>
(
new
InfEngineBackendNode
(
l
));
}
else
if
(
type
==
ROI
)
{
InferenceEngine
::
Builder
::
ROIPoolingLayer
ieLayer
(
name
);
ieLayer
.
setSpatialScale
(
spatialScale
);
ieLayer
.
setPooled
({
pooledSize
.
height
,
pooledSize
.
width
});
ieLayer
.
setInputPorts
(
std
::
vector
<
InferenceEngine
::
Port
>
(
2
));
return
Ptr
<
BackendNode
>
(
new
InfEngineBackendNode
(
ieLayer
));
}
else
if
(
type
==
PSROI
)
{
InferenceEngine
::
Builder
::
PSROIPoolingLayer
ieLayer
(
name
);
ieLayer
.
setSpatialScale
(
spatialScale
);
ieLayer
.
setOutputDim
(
psRoiOutChannels
);
ieLayer
.
setGroupSize
(
pooledSize
.
width
);
ieLayer
.
setInputPorts
(
std
::
vector
<
InferenceEngine
::
Port
>
(
2
));
return
Ptr
<
BackendNode
>
(
new
InfEngineBackendNode
(
ieLayer
));
}
else
CV_Error
(
Error
::
StsNotImplemented
,
"Unsupported pooling type"
);
return
Ptr
<
BackendNode
>
();
#else
InferenceEngine
::
LayerParams
lp
;
lp
.
name
=
name
;
lp
.
precision
=
InferenceEngine
::
Precision
::
FP32
;
...
...
@@ -315,6 +357,7 @@ public:
CV_Error
(
Error
::
StsNotImplemented
,
"Unsupported pooling type"
);
return
Ptr
<
BackendNode
>
(
new
InfEngineBackendNode
(
ieLayer
));
#endif
#endif // HAVE_INF_ENGINE
return
Ptr
<
BackendNode
>
();
}
...
...
modules/dnn/src/layers/prior_box_layer.cpp
View file @
f0ddf302
...
...
@@ -483,6 +483,58 @@ public:
virtual
Ptr
<
BackendNode
>
initInfEngine
(
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>&
)
CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
if
(
_explicitSizes
)
{
InferenceEngine
::
Builder
::
PriorBoxClusteredLayer
ieLayer
(
name
);
CV_Assert
(
_stepX
==
_stepY
);
ieLayer
.
setStep
(
_stepX
);
CV_CheckEQ
(
_offsetsX
.
size
(),
(
size_t
)
1
,
""
);
CV_CheckEQ
(
_offsetsY
.
size
(),
(
size_t
)
1
,
""
);
CV_CheckEQ
(
_offsetsX
[
0
],
_offsetsY
[
0
],
""
);
ieLayer
.
setOffset
(
_offsetsX
[
0
]);
ieLayer
.
setClip
(
_clip
);
ieLayer
.
setFlip
(
false
);
// We already flipped aspect ratios.
InferenceEngine
::
Builder
::
Layer
l
=
ieLayer
;
CV_Assert_N
(
!
_boxWidths
.
empty
(),
!
_boxHeights
.
empty
(),
!
_variance
.
empty
());
CV_Assert
(
_boxWidths
.
size
()
==
_boxHeights
.
size
());
l
.
getParameters
()[
"width"
]
=
_boxWidths
;
l
.
getParameters
()[
"height"
]
=
_boxHeights
;
l
.
getParameters
()[
"variance"
]
=
_variance
;
return
Ptr
<
BackendNode
>
(
new
InfEngineBackendNode
(
l
));
}
else
{
InferenceEngine
::
Builder
::
PriorBoxLayer
ieLayer
(
name
);
CV_Assert
(
!
_explicitSizes
);
ieLayer
.
setMinSize
(
_minSize
);
if
(
_maxSize
>
0
)
ieLayer
.
setMaxSize
(
_maxSize
);
CV_Assert
(
_stepX
==
_stepY
);
ieLayer
.
setStep
(
_stepX
);
CV_CheckEQ
(
_offsetsX
.
size
(),
(
size_t
)
1
,
""
);
CV_CheckEQ
(
_offsetsY
.
size
(),
(
size_t
)
1
,
""
);
CV_CheckEQ
(
_offsetsX
[
0
],
_offsetsY
[
0
],
""
);
ieLayer
.
setOffset
(
_offsetsX
[
0
]);
ieLayer
.
setClip
(
_clip
);
ieLayer
.
setFlip
(
false
);
// We already flipped aspect ratios.
InferenceEngine
::
Builder
::
Layer
l
=
ieLayer
;
if
(
!
_aspectRatios
.
empty
())
{
l
.
getParameters
()[
"aspect_ratio"
]
=
_aspectRatios
;
}
CV_Assert
(
!
_variance
.
empty
());
l
.
getParameters
()[
"variance"
]
=
_variance
;
return
Ptr
<
BackendNode
>
(
new
InfEngineBackendNode
(
l
));
}
#else
InferenceEngine
::
LayerParams
lp
;
lp
.
name
=
name
;
lp
.
type
=
_explicitSizes
?
"PriorBoxClustered"
:
"PriorBox"
;
...
...
@@ -538,6 +590,7 @@ public:
ieLayer
->
params
[
"offset"
]
=
format
(
"%f"
,
_offsetsX
[
0
]);
return
Ptr
<
BackendNode
>
(
new
InfEngineBackendNode
(
ieLayer
));
#endif
#endif // HAVE_INF_ENGINE
return
Ptr
<
BackendNode
>
();
}
...
...
modules/dnn/src/layers/proposal_layer.cpp
View file @
f0ddf302
...
...
@@ -328,6 +328,28 @@ public:
virtual
Ptr
<
BackendNode
>
initInfEngine
(
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>&
)
CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
InferenceEngine
::
Builder
::
ProposalLayer
ieLayer
(
name
);
ieLayer
.
setBaseSize
(
baseSize
);
ieLayer
.
setFeatStride
(
featStride
);
ieLayer
.
setMinSize
(
16
);
ieLayer
.
setNMSThresh
(
nmsThreshold
);
ieLayer
.
setPostNMSTopN
(
keepTopAfterNMS
);
ieLayer
.
setPreNMSTopN
(
keepTopBeforeNMS
);
std
::
vector
<
float
>
scalesVec
(
scales
.
size
());
for
(
int
i
=
0
;
i
<
scales
.
size
();
++
i
)
scalesVec
[
i
]
=
scales
.
get
<
float
>
(
i
);
ieLayer
.
setScale
(
scalesVec
);
std
::
vector
<
float
>
ratiosVec
(
ratios
.
size
());
for
(
int
i
=
0
;
i
<
ratios
.
size
();
++
i
)
ratiosVec
[
i
]
=
ratios
.
get
<
float
>
(
i
);
ieLayer
.
setRatio
(
ratiosVec
);
return
Ptr
<
BackendNode
>
(
new
InfEngineBackendNode
(
ieLayer
));
#else
InferenceEngine
::
LayerParams
lp
;
lp
.
name
=
name
;
lp
.
type
=
"Proposal"
;
...
...
@@ -353,6 +375,7 @@ public:
ieLayer
->
params
[
"scale"
]
+=
format
(
",%f"
,
scales
.
get
<
float
>
(
i
));
}
return
Ptr
<
BackendNode
>
(
new
InfEngineBackendNode
(
ieLayer
));
#endif
#endif // HAVE_INF_ENGINE
return
Ptr
<
BackendNode
>
();
}
...
...
modules/dnn/src/layers/reorg_layer.cpp
View file @
f0ddf302
...
...
@@ -181,6 +181,11 @@ public:
virtual
Ptr
<
BackendNode
>
initInfEngine
(
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>&
)
CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
InferenceEngine
::
Builder
::
ReorgYoloLayer
ieLayer
(
name
);
ieLayer
.
setStride
(
reorgStride
);
return
Ptr
<
BackendNode
>
(
new
InfEngineBackendNode
(
ieLayer
));
#else
InferenceEngine
::
LayerParams
lp
;
lp
.
name
=
name
;
lp
.
type
=
"ReorgYolo"
;
...
...
@@ -188,6 +193,7 @@ public:
std
::
shared_ptr
<
InferenceEngine
::
CNNLayer
>
ieLayer
(
new
InferenceEngine
::
CNNLayer
(
lp
));
ieLayer
->
params
[
"stride"
]
=
format
(
"%d"
,
reorgStride
);
return
Ptr
<
BackendNode
>
(
new
InfEngineBackendNode
(
ieLayer
));
#endif
#endif // HAVE_INF_ENGINE
return
Ptr
<
BackendNode
>
();
}
...
...
modules/dnn/src/layers/reshape_layer.cpp
View file @
f0ddf302
...
...
@@ -203,6 +203,17 @@ public:
return
true
;
}
void
finalize
(
InputArrayOfArrays
,
OutputArrayOfArrays
outputs_arr
)
CV_OVERRIDE
{
std
::
vector
<
Mat
>
outputs
;
outputs_arr
.
getMatVector
(
outputs
);
CV_Assert
(
!
outputs
.
empty
());
outShapes
.
resize
(
outputs
.
size
());
for
(
int
i
=
0
;
i
<
outputs
.
size
();
++
i
)
outShapes
[
i
]
=
shape
(
outputs
[
i
]);
}
bool
forward_ocl
(
InputArrayOfArrays
inps
,
OutputArrayOfArrays
outs
,
OutputArrayOfArrays
internals
)
{
std
::
vector
<
UMat
>
inputs
;
...
...
@@ -218,8 +229,7 @@ public:
void
*
dst_handle
=
outputs
[
i
].
handle
(
ACCESS_WRITE
);
if
(
src_handle
!=
dst_handle
)
{
MatShape
outShape
=
shape
(
outputs
[
i
]);
UMat
umat
=
srcBlob
.
reshape
(
1
,
(
int
)
outShape
.
size
(),
&
outShape
[
0
]);
UMat
umat
=
srcBlob
.
reshape
(
1
,
(
int
)
outShapes
[
i
].
size
(),
&
outShapes
[
i
][
0
]);
umat
.
copyTo
(
outputs
[
i
]);
}
}
...
...
@@ -250,6 +260,12 @@ public:
virtual
Ptr
<
BackendNode
>
initInfEngine
(
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>&
inputs
)
CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
InferenceEngine
::
Builder
::
ReshapeLayer
ieLayer
(
name
);
CV_Assert
(
outShapes
.
size
()
==
1
);
ieLayer
.
setDims
(
outShapes
[
0
]);
return
Ptr
<
BackendNode
>
(
new
InfEngineBackendNode
(
ieLayer
));
#else
InferenceEngine
::
LayerParams
lp
;
lp
.
name
=
name
;
lp
.
type
=
"Reshape"
;
...
...
@@ -265,9 +281,13 @@ public:
ieLayer
->
shape
=
std
::
vector
<
int
>
(
shapeSrc
->
dims
.
rbegin
(),
shapeSrc
->
dims
.
rend
());
}
return
Ptr
<
BackendNode
>
(
new
InfEngineBackendNode
(
ieLayer
));
#endif
#endif // HAVE_INF_ENGINE
return
Ptr
<
BackendNode
>
();
}
private
:
std
::
vector
<
MatShape
>
outShapes
;
};
Ptr
<
ReshapeLayer
>
ReshapeLayer
::
create
(
const
LayerParams
&
params
)
...
...
modules/dnn/src/layers/resize_layer.cpp
View file @
f0ddf302
...
...
@@ -163,6 +163,33 @@ public:
virtual
Ptr
<
BackendNode
>
initInfEngine
(
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>&
)
CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
InferenceEngine
::
Builder
::
Layer
ieLayer
(
name
);
ieLayer
.
setName
(
name
);
if
(
interpolation
==
"nearest"
)
{
ieLayer
.
setType
(
"Resample"
);
ieLayer
.
getParameters
()[
"type"
]
=
std
::
string
(
"caffe.ResampleParameter.NEAREST"
);
ieLayer
.
getParameters
()[
"antialias"
]
=
false
;
if
(
scaleWidth
!=
scaleHeight
)
CV_Error
(
Error
::
StsNotImplemented
,
"resample with sw != sh"
);
ieLayer
.
getParameters
()[
"factor"
]
=
1.0
/
scaleWidth
;
}
else
if
(
interpolation
==
"bilinear"
)
{
ieLayer
.
setType
(
"Interp"
);
ieLayer
.
getParameters
()[
"pad_beg"
]
=
0
;
ieLayer
.
getParameters
()[
"pad_end"
]
=
0
;
ieLayer
.
getParameters
()[
"align_corners"
]
=
false
;
}
else
CV_Error
(
Error
::
StsNotImplemented
,
"Unsupported interpolation: "
+
interpolation
);
ieLayer
.
getParameters
()[
"width"
]
=
outWidth
;
ieLayer
.
getParameters
()[
"height"
]
=
outHeight
;
ieLayer
.
setInputPorts
(
std
::
vector
<
InferenceEngine
::
Port
>
(
1
));
ieLayer
.
setOutputPorts
(
std
::
vector
<
InferenceEngine
::
Port
>
(
1
));
return
Ptr
<
BackendNode
>
(
new
InfEngineBackendNode
(
ieLayer
));
#else
InferenceEngine
::
LayerParams
lp
;
lp
.
name
=
name
;
lp
.
precision
=
InferenceEngine
::
Precision
::
FP32
;
...
...
@@ -187,6 +214,7 @@ public:
ieLayer
->
params
[
"width"
]
=
cv
::
format
(
"%d"
,
outWidth
);
ieLayer
->
params
[
"height"
]
=
cv
::
format
(
"%d"
,
outHeight
);
return
Ptr
<
BackendNode
>
(
new
InfEngineBackendNode
(
ieLayer
));
#endif
#endif // HAVE_INF_ENGINE
return
Ptr
<
BackendNode
>
();
}
...
...
@@ -247,6 +275,18 @@ public:
virtual
Ptr
<
BackendNode
>
initInfEngine
(
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>&
)
CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
InferenceEngine
::
Builder
::
Layer
ieLayer
(
name
);
ieLayer
.
setName
(
name
);
ieLayer
.
setType
(
"Interp"
);
ieLayer
.
getParameters
()[
"pad_beg"
]
=
0
;
ieLayer
.
getParameters
()[
"pad_end"
]
=
0
;
ieLayer
.
getParameters
()[
"width"
]
=
outWidth
;
ieLayer
.
getParameters
()[
"height"
]
=
outHeight
;
ieLayer
.
setInputPorts
(
std
::
vector
<
InferenceEngine
::
Port
>
(
1
));
ieLayer
.
setOutputPorts
(
std
::
vector
<
InferenceEngine
::
Port
>
(
1
));
return
Ptr
<
BackendNode
>
(
new
InfEngineBackendNode
(
ieLayer
));
#else
InferenceEngine
::
LayerParams
lp
;
lp
.
name
=
name
;
lp
.
type
=
"Interp"
;
...
...
@@ -256,6 +296,7 @@ public:
ieLayer
->
params
[
"pad_beg"
]
=
"0"
;
ieLayer
->
params
[
"pad_end"
]
=
"0"
;
return
Ptr
<
BackendNode
>
(
new
InfEngineBackendNode
(
ieLayer
));
#endif
#endif // HAVE_INF_ENGINE
return
Ptr
<
BackendNode
>
();
}
...
...
modules/dnn/src/layers/scale_layer.cpp
View file @
f0ddf302
...
...
@@ -197,6 +197,29 @@ public:
virtual
Ptr
<
BackendNode
>
initInfEngine
(
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>&
)
CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
InferenceEngine
::
Builder
::
ScaleShiftLayer
ieLayer
(
name
);
CV_Assert
(
!
blobs
.
empty
());
const
size_t
numChannels
=
blobs
[
0
].
total
();
if
(
hasWeights
)
{
ieLayer
.
setWeights
(
wrapToInfEngineBlob
(
blobs
[
0
],
{
numChannels
},
InferenceEngine
::
Layout
::
C
));
}
else
{
auto
weights
=
InferenceEngine
::
make_shared_blob
<
float
>
(
InferenceEngine
::
Precision
::
FP32
,
{
numChannels
});
weights
->
allocate
();
std
::
vector
<
float
>
ones
(
numChannels
,
1
);
weights
->
set
(
ones
);
ieLayer
.
setWeights
(
weights
);
}
if
(
hasBias
)
ieLayer
.
setBiases
(
wrapToInfEngineBlob
(
blobs
.
back
(),
{
numChannels
},
InferenceEngine
::
Layout
::
C
));
return
Ptr
<
BackendNode
>
(
new
InfEngineBackendNode
(
ieLayer
));
#else
InferenceEngine
::
LayerParams
lp
;
lp
.
name
=
name
;
lp
.
type
=
"ScaleShift"
;
...
...
@@ -223,6 +246,7 @@ public:
ieLayer
->
_biases
=
wrapToInfEngineBlob
(
blobs
.
back
(),
{
numChannels
},
InferenceEngine
::
Layout
::
C
);
return
Ptr
<
BackendNode
>
(
new
InfEngineBackendNode
(
ieLayer
));
#endif
#endif // HAVE_INF_ENGINE
return
Ptr
<
BackendNode
>
();
}
...
...
modules/dnn/src/layers/slice_layer.cpp
View file @
f0ddf302
...
...
@@ -110,8 +110,15 @@ public:
virtual
bool
supportBackend
(
int
backendId
)
CV_OVERRIDE
{
return
backendId
==
DNN_BACKEND_OPENCV
||
(
backendId
==
DNN_BACKEND_INFERENCE_ENGINE
&&
sliceRanges
.
size
()
==
1
&&
sliceRanges
[
0
].
size
()
==
4
);
#ifdef HAVE_INF_ENGINE
if
(
backendId
==
DNN_BACKEND_INFERENCE_ENGINE
)
{
return
INF_ENGINE_VER_MAJOR_LT
(
INF_ENGINE_RELEASE_2018R5
)
&&
sliceRanges
.
size
()
==
1
&&
sliceRanges
[
0
].
size
()
==
4
;
}
else
#endif
return
backendId
==
DNN_BACKEND_OPENCV
;
}
bool
getMemoryShapes
(
const
std
::
vector
<
MatShape
>
&
inputs
,
...
...
@@ -254,9 +261,10 @@ public:
}
}
#ifdef HAVE_INF_ENGINE
virtual
Ptr
<
BackendNode
>
initInfEngine
(
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>&
inputs
)
CV_OVERRIDE
{
#if
def HAVE_INF_ENGINE
#if
INF_ENGINE_VER_MAJOR_LT(INF_ENGINE_RELEASE_2018R5)
InferenceEngine
::
DataPtr
input
=
infEngineDataNode
(
inputs
[
0
]);
InferenceEngine
::
LayerParams
lp
;
lp
.
name
=
name
;
...
...
@@ -286,10 +294,11 @@ public:
ieLayer
->
dim
.
push_back
(
sliceRanges
[
0
][
i
].
end
-
sliceRanges
[
0
][
i
].
start
);
}
return
Ptr
<
BackendNode
>
(
new
InfEngineBackendNode
(
ieLayer
));
#endif // HAVE_INF_ENGINE
#else
return
Ptr
<
BackendNode
>
();
#endif // IE < R5
}
#endif
};
Ptr
<
SliceLayer
>
SliceLayer
::
create
(
const
LayerParams
&
params
)
...
...
modules/dnn/src/layers/softmax_layer.cpp
View file @
f0ddf302
...
...
@@ -312,6 +312,13 @@ public:
virtual
Ptr
<
BackendNode
>
initInfEngine
(
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>&
inputs
)
CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
InferenceEngine
::
DataPtr
input
=
infEngineDataNode
(
inputs
[
0
]);
InferenceEngine
::
Builder
::
SoftMaxLayer
ieLayer
(
name
);
ieLayer
.
setAxis
(
clamp
(
axisRaw
,
input
->
dims
.
size
()));
return
Ptr
<
BackendNode
>
(
new
InfEngineBackendNode
(
ieLayer
));
#else
InferenceEngine
::
DataPtr
input
=
infEngineDataNode
(
inputs
[
0
]);
InferenceEngine
::
LayerParams
lp
;
...
...
@@ -321,6 +328,7 @@ public:
std
::
shared_ptr
<
InferenceEngine
::
SoftMaxLayer
>
ieLayer
(
new
InferenceEngine
::
SoftMaxLayer
(
lp
));
ieLayer
->
axis
=
clamp
(
axisRaw
,
input
->
dims
.
size
());
return
Ptr
<
BackendNode
>
(
new
InfEngineBackendNode
(
ieLayer
));
#endif
#endif // HAVE_INF_ENGINE
return
Ptr
<
BackendNode
>
();
}
...
...
modules/dnn/src/op_inf_engine.cpp
View file @
f0ddf302
...
...
@@ -18,6 +18,10 @@ namespace cv { namespace dnn {
#ifdef HAVE_INF_ENGINE
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
InfEngineBackendNode
::
InfEngineBackendNode
(
const
InferenceEngine
::
Builder
::
Layer
&
_layer
)
:
BackendNode
(
DNN_BACKEND_INFERENCE_ENGINE
),
layer
(
_layer
)
{}
#else
InfEngineBackendNode
::
InfEngineBackendNode
(
const
InferenceEngine
::
CNNLayerPtr
&
_layer
)
:
BackendNode
(
DNN_BACKEND_INFERENCE_ENGINE
),
layer
(
_layer
)
{}
...
...
@@ -40,6 +44,7 @@ void InfEngineBackendNode::connect(std::vector<Ptr<BackendWrapper> >& inputs,
layer
->
outData
[
0
]
=
dataPtr
;
dataPtr
->
creatorLayer
=
InferenceEngine
::
CNNLayerWeakPtr
(
layer
);
}
#endif
static
std
::
vector
<
Ptr
<
InfEngineBackendWrapper
>
>
infEngineWrappers
(
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>&
ptrs
)
...
...
@@ -54,6 +59,129 @@ infEngineWrappers(const std::vector<Ptr<BackendWrapper> >& ptrs)
return
wrappers
;
}
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
InfEngineBackendNet
::
InfEngineBackendNet
()
:
netBuilder
(
""
)
{
hasNetOwner
=
false
;
targetDevice
=
InferenceEngine
::
TargetDevice
::
eCPU
;
}
InfEngineBackendNet
::
InfEngineBackendNet
(
InferenceEngine
::
CNNNetwork
&
net
)
:
netBuilder
(
""
),
cnn
(
net
)
{
hasNetOwner
=
true
;
targetDevice
=
InferenceEngine
::
TargetDevice
::
eCPU
;
}
void
InfEngineBackendNet
::
connect
(
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>&
inputs
,
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>&
outputs
,
const
std
::
string
&
layerName
)
{
std
::
vector
<
Ptr
<
InfEngineBackendWrapper
>
>
inpWrappers
=
infEngineWrappers
(
inputs
);
std
::
map
<
std
::
string
,
int
>::
iterator
it
=
layers
.
find
(
layerName
);
CV_Assert
(
it
!=
layers
.
end
());
const
int
layerId
=
it
->
second
;
for
(
int
i
=
0
;
i
<
inpWrappers
.
size
();
++
i
)
{
const
auto
&
inp
=
inpWrappers
[
i
];
const
std
::
string
&
inpName
=
inp
->
dataPtr
->
name
;
int
inpId
;
it
=
layers
.
find
(
inpName
);
if
(
it
==
layers
.
end
())
{
InferenceEngine
::
Builder
::
InputLayer
inpLayer
(
inpName
);
std
::
vector
<
size_t
>
shape
(
inp
->
blob
->
dims
());
std
::
reverse
(
shape
.
begin
(),
shape
.
end
());
inpLayer
.
setPort
(
InferenceEngine
::
Port
(
shape
));
inpId
=
netBuilder
.
addLayer
(
inpLayer
);
layers
.
insert
({
inpName
,
inpId
});
}
else
inpId
=
it
->
second
;
netBuilder
.
connect
(
inpId
,
{
layerId
,
i
});
unconnectedLayersIds
.
erase
(
inpId
);
}
CV_Assert
(
!
outputs
.
empty
());
InferenceEngine
::
DataPtr
dataPtr
=
infEngineDataNode
(
outputs
[
0
]);
dataPtr
->
name
=
layerName
;
}
void
InfEngineBackendNet
::
init
(
int
targetId
)
{
if
(
!
hasNetOwner
)
{
CV_Assert
(
!
unconnectedLayersIds
.
empty
());
for
(
int
id
:
unconnectedLayersIds
)
{
InferenceEngine
::
Builder
::
OutputLayer
outLayer
(
"myconv1"
);
netBuilder
.
addLayer
({
id
},
outLayer
);
}
cnn
=
InferenceEngine
::
CNNNetwork
(
InferenceEngine
::
Builder
::
convertToICNNNetwork
(
netBuilder
.
build
()));
}
switch
(
targetId
)
{
case
DNN_TARGET_CPU
:
targetDevice
=
InferenceEngine
::
TargetDevice
::
eCPU
;
break
;
case
DNN_TARGET_OPENCL
:
case
DNN_TARGET_OPENCL_FP16
:
targetDevice
=
InferenceEngine
::
TargetDevice
::
eGPU
;
break
;
case
DNN_TARGET_MYRIAD
:
targetDevice
=
InferenceEngine
::
TargetDevice
::
eMYRIAD
;
break
;
case
DNN_TARGET_FPGA
:
targetDevice
=
InferenceEngine
::
TargetDevice
::
eFPGA
;
break
;
default
:
CV_Error
(
Error
::
StsError
,
format
(
"Unknown target identifier: %d"
,
targetId
));
}
for
(
const
auto
&
name
:
requestedOutputs
)
{
cnn
.
addOutput
(
name
);
}
for
(
const
auto
&
it
:
cnn
.
getInputsInfo
())
{
const
std
::
string
&
name
=
it
.
first
;
auto
blobIt
=
allBlobs
.
find
(
name
);
CV_Assert
(
blobIt
!=
allBlobs
.
end
());
inpBlobs
[
name
]
=
blobIt
->
second
;
it
.
second
->
setPrecision
(
blobIt
->
second
->
precision
());
}
for
(
const
auto
&
it
:
cnn
.
getOutputsInfo
())
{
const
std
::
string
&
name
=
it
.
first
;
auto
blobIt
=
allBlobs
.
find
(
name
);
CV_Assert
(
blobIt
!=
allBlobs
.
end
());
outBlobs
[
name
]
=
blobIt
->
second
;
it
.
second
->
setPrecision
(
blobIt
->
second
->
precision
());
// Should be always FP32
}
initPlugin
(
cnn
);
}
void
InfEngineBackendNet
::
addLayer
(
const
InferenceEngine
::
Builder
::
Layer
&
layer
)
{
int
id
=
netBuilder
.
addLayer
(
layer
);
const
std
::
string
&
layerName
=
layer
.
getName
();
CV_Assert
(
layers
.
insert
({
layerName
,
id
}).
second
);
unconnectedLayersIds
.
insert
(
id
);
}
void
InfEngineBackendNet
::
addOutput
(
const
std
::
string
&
name
)
{
requestedOutputs
.
push_back
(
name
);
}
#endif // IE >= R5
static
InferenceEngine
::
Layout
estimateLayout
(
const
Mat
&
m
)
{
if
(
m
.
dims
==
4
)
...
...
@@ -148,6 +276,7 @@ void InfEngineBackendWrapper::setHostDirty()
}
#if INF_ENGINE_VER_MAJOR_LT(INF_ENGINE_RELEASE_2018R5)
InfEngineBackendNet
::
InfEngineBackendNet
()
{
targetDevice
=
InferenceEngine
::
TargetDevice
::
eCPU
;
...
...
@@ -491,6 +620,8 @@ void InfEngineBackendNet::init(int targetId)
initPlugin
(
*
this
);
}
#endif // IE < R5
static
std
::
map
<
InferenceEngine
::
TargetDevice
,
InferenceEngine
::
InferenceEnginePluginPtr
>
sharedPlugins
;
void
InfEngineBackendNet
::
initPlugin
(
InferenceEngine
::
ICNNNetwork
&
net
)
...
...
@@ -566,7 +697,11 @@ void InfEngineBackendNet::addBlobs(const std::vector<Ptr<BackendWrapper> >& ptrs
auto
wrappers
=
infEngineWrappers
(
ptrs
);
for
(
const
auto
&
wrapper
:
wrappers
)
{
allBlobs
.
insert
({
wrapper
->
dataPtr
->
name
,
wrapper
->
blob
});
std
::
string
name
=
wrapper
->
dataPtr
->
name
;
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
name
=
name
.
empty
()
?
"id1"
:
name
;
// TODO: drop the magic input name.
#endif
allBlobs
.
insert
({
name
,
wrapper
->
blob
});
}
}
...
...
modules/dnn/src/op_inf_engine.hpp
View file @
f0ddf302
...
...
@@ -35,6 +35,11 @@
#define INF_ENGINE_VER_MAJOR_GT(ver) (((INF_ENGINE_RELEASE) / 10000) > ((ver) / 10000))
#define INF_ENGINE_VER_MAJOR_GE(ver) (((INF_ENGINE_RELEASE) / 10000) >= ((ver) / 10000))
#define INF_ENGINE_VER_MAJOR_LT(ver) (((INF_ENGINE_RELEASE) / 10000) < ((ver) / 10000))
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
#include <ie_builders.hpp>
#endif
#endif // HAVE_INF_ENGINE
...
...
@@ -42,6 +47,7 @@ namespace cv { namespace dnn {
#ifdef HAVE_INF_ENGINE
#if INF_ENGINE_VER_MAJOR_LT(INF_ENGINE_RELEASE_2018R5)
class
InfEngineBackendNet
:
public
InferenceEngine
::
ICNNNetwork
{
public
:
...
...
@@ -146,17 +152,75 @@ private:
void
initPlugin
(
InferenceEngine
::
ICNNNetwork
&
net
);
};
#else // IE < R5
class
InfEngineBackendNet
{
public
:
InfEngineBackendNet
();
InfEngineBackendNet
(
InferenceEngine
::
CNNNetwork
&
net
);
void
addLayer
(
const
InferenceEngine
::
Builder
::
Layer
&
layer
);
void
addOutput
(
const
std
::
string
&
name
);
void
connect
(
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>&
inputs
,
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>&
outputs
,
const
std
::
string
&
layerName
);
bool
isInitialized
();
void
init
(
int
targetId
);
void
forward
();
void
initPlugin
(
InferenceEngine
::
ICNNNetwork
&
net
);
void
addBlobs
(
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>&
ptrs
);
private
:
InferenceEngine
::
Builder
::
Network
netBuilder
;
InferenceEngine
::
InferenceEnginePluginPtr
enginePtr
;
InferenceEngine
::
InferencePlugin
plugin
;
InferenceEngine
::
ExecutableNetwork
netExec
;
InferenceEngine
::
InferRequest
infRequest
;
InferenceEngine
::
BlobMap
allBlobs
;
InferenceEngine
::
BlobMap
inpBlobs
;
InferenceEngine
::
BlobMap
outBlobs
;
InferenceEngine
::
TargetDevice
targetDevice
;
InferenceEngine
::
CNNNetwork
cnn
;
bool
hasNetOwner
;
std
::
map
<
std
::
string
,
int
>
layers
;
std
::
vector
<
std
::
string
>
requestedOutputs
;
std
::
set
<
int
>
unconnectedLayersIds
;
};
#endif // IE < R5
class
InfEngineBackendNode
:
public
BackendNode
{
public
:
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
InfEngineBackendNode
(
const
InferenceEngine
::
Builder
::
Layer
&
layer
);
#else
InfEngineBackendNode
(
const
InferenceEngine
::
CNNLayerPtr
&
layer
);
#endif
void
connect
(
std
::
vector
<
Ptr
<
BackendWrapper
>
>&
inputs
,
std
::
vector
<
Ptr
<
BackendWrapper
>
>&
outputs
);
InferenceEngine
::
CNNLayerPtr
layer
;
// Inference Engine network object that allows to obtain the outputs of this layer.
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
InferenceEngine
::
Builder
::
Layer
layer
;
Ptr
<
InfEngineBackendNet
>
net
;
#else
InferenceEngine
::
CNNLayerPtr
layer
;
Ptr
<
InfEngineBackendNet
>
net
;
#endif
};
class
InfEngineBackendWrapper
:
public
BackendWrapper
...
...
modules/dnn/test/test_backends.cpp
View file @
f0ddf302
...
...
@@ -180,7 +180,7 @@ TEST_P(DNNTestNetwork, MobileNet_SSD_v2_TensorFlow)
throw
SkipTestException
(
""
);
Mat
sample
=
imread
(
findDataFile
(
"dnn/street.png"
,
false
));
Mat
inp
=
blobFromImage
(
sample
,
1.0
f
,
Size
(
300
,
300
),
Scalar
(),
false
);
float
l1
=
(
target
==
DNN_TARGET_OPENCL_FP16
||
target
==
DNN_TARGET_MYRIAD
)
?
0.013
:
0.0
;
float
l1
=
(
target
==
DNN_TARGET_OPENCL_FP16
||
target
==
DNN_TARGET_MYRIAD
)
?
0.013
:
2e-5
;
float
lInf
=
(
target
==
DNN_TARGET_OPENCL_FP16
||
target
==
DNN_TARGET_MYRIAD
)
?
0.062
:
0.0
;
processNet
(
"dnn/ssd_mobilenet_v2_coco_2018_03_29.pb"
,
"dnn/ssd_mobilenet_v2_coco_2018_03_29.pbtxt"
,
inp
,
"detection_out"
,
""
,
l1
,
lInf
,
0.25
);
...
...
@@ -288,7 +288,7 @@ TEST_P(DNNTestNetwork, FastNeuralStyle_eccv16)
Mat
inp
=
blobFromImage
(
img
,
1.0
,
Size
(
320
,
240
),
Scalar
(
103.939
,
116.779
,
123.68
),
false
,
false
);
// Output image has values in range [-143.526, 148.539].
float
l1
=
(
target
==
DNN_TARGET_OPENCL_FP16
||
target
==
DNN_TARGET_MYRIAD
)
?
0.3
:
4e-5
;
float
lInf
=
(
target
==
DNN_TARGET_OPENCL_FP16
||
target
==
DNN_TARGET_MYRIAD
)
?
7.
0
:
2e-3
;
float
lInf
=
(
target
==
DNN_TARGET_OPENCL_FP16
||
target
==
DNN_TARGET_MYRIAD
)
?
7.
28
:
2e-3
;
processNet
(
"dnn/fast_neural_style_eccv16_starry_night.t7"
,
""
,
inp
,
""
,
""
,
l1
,
lInf
);
}
...
...
modules/dnn/test/test_darknet_importer.cpp
View file @
f0ddf302
...
...
@@ -306,7 +306,7 @@ TEST_P(Test_Darknet_nets, TinyYoloVoc)
// batch size 1
testDarknetModel
(
config_file
,
weights_file
,
ref
.
rowRange
(
0
,
2
),
scoreDiff
,
iouDiff
);
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE
>
= 2018040000
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE
=
= 2018040000
if
(
backend
==
DNN_BACKEND_INFERENCE_ENGINE
&&
target
!=
DNN_TARGET_MYRIAD
)
#endif
// batch size 2
...
...
modules/dnn/test/test_halide_layers.cpp
View file @
f0ddf302
...
...
@@ -163,7 +163,7 @@ TEST_P(Deconvolution, Accuracy)
bool
hasBias
=
get
<
6
>
(
GetParam
());
Backend
backendId
=
get
<
0
>
(
get
<
7
>
(
GetParam
()));
Target
targetId
=
get
<
1
>
(
get
<
7
>
(
GetParam
()));
if
(
backendId
==
DNN_BACKEND_INFERENCE_ENGINE
&&
targetId
==
DNN_TARGET_CPU
&&
if
(
backendId
==
DNN_BACKEND_INFERENCE_ENGINE
&&
(
targetId
==
DNN_TARGET_CPU
||
targetId
==
DNN_TARGET_MYRIAD
)
&&
dilation
.
width
==
2
&&
dilation
.
height
==
2
)
throw
SkipTestException
(
""
);
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE >= 2018040000
...
...
@@ -466,6 +466,7 @@ void testInPlaceActivation(LayerParams& lp, Backend backendId, Target targetId)
pool
.
set
(
"stride_w"
,
2
);
pool
.
set
(
"stride_h"
,
2
);
pool
.
type
=
"Pooling"
;
pool
.
name
=
"ave_pool"
;
Net
net
;
int
poolId
=
net
.
addLayer
(
pool
.
name
,
pool
.
type
,
pool
);
...
...
modules/dnn/test/test_layers.cpp
View file @
f0ddf302
...
...
@@ -295,10 +295,6 @@ TEST_P(Test_Caffe_layers, Eltwise)
{
if
(
backend
==
DNN_BACKEND_INFERENCE_ENGINE
&&
target
==
DNN_TARGET_MYRIAD
)
throw
SkipTestException
(
""
);
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE == 2018050000
if
(
backend
==
DNN_BACKEND_INFERENCE_ENGINE
&&
target
==
DNN_TARGET_OPENCL
)
throw
SkipTestException
(
"Test is disabled for OpenVINO 2018R5"
);
#endif
testLayerUsingCaffeModels
(
"layer_eltwise"
);
}
...
...
modules/dnn/test/test_onnx_importer.cpp
View file @
f0ddf302
...
...
@@ -351,6 +351,10 @@ TEST_P(Test_ONNX_nets, LResNet100E_IR)
l1
=
0.009
;
lInf
=
0.035
;
}
else
if
(
backend
==
DNN_BACKEND_INFERENCE_ENGINE
&&
target
==
DNN_TARGET_CPU
)
{
l1
=
4.5e-5
;
lInf
=
1.9e-4
;
}
testONNXModels
(
"LResNet100E_IR"
,
pb
,
l1
,
lInf
);
}
...
...
@@ -366,6 +370,10 @@ TEST_P(Test_ONNX_nets, Emotion_ferplus)
l1
=
0.021
;
lInf
=
0.034
;
}
else
if
(
backend
==
DNN_BACKEND_INFERENCE_ENGINE
&&
(
target
==
DNN_TARGET_CPU
||
target
==
DNN_TARGET_OPENCL
))
{
l1
=
2.4e-4
;
lInf
=
6e-4
;
}
testONNXModels
(
"emotion_ferplus"
,
pb
,
l1
,
lInf
);
}
...
...
@@ -389,7 +397,7 @@ TEST_P(Test_ONNX_nets, Inception_v1)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE == 2018050000
if
(
backend
==
DNN_BACKEND_INFERENCE_ENGINE
&&
target
==
DNN_TARGET_MYRIAD
)
throw
SkipTestException
(
""
);
throw
SkipTestException
(
"
Test is disabled for OpenVINO 2018R5
"
);
#endif
testONNXModels
(
"inception_v1"
,
pb
);
}
...
...
modules/dnn/test/test_tf_importer.cpp
View file @
f0ddf302
...
...
@@ -351,8 +351,8 @@ TEST_P(Test_TensorFlow_nets, MobileNet_v1_SSD)
Mat
out
=
net
.
forward
();
Mat
ref
=
blobFromNPY
(
findDataFile
(
"dnn/tensorflow/ssd_mobilenet_v1_coco_2017_11_17.detection_out.npy"
));
float
scoreDiff
=
(
target
==
DNN_TARGET_OPENCL_FP16
||
target
==
DNN_TARGET_MYRIAD
)
?
7e-3
:
1e-5
;
float
iouDiff
=
(
target
==
DNN_TARGET_OPENCL_FP16
||
target
==
DNN_TARGET_MYRIAD
)
?
0.0
098
:
1e-3
;
float
scoreDiff
=
(
target
==
DNN_TARGET_OPENCL_FP16
||
target
==
DNN_TARGET_MYRIAD
)
?
7e-3
:
1
.5
e-5
;
float
iouDiff
=
(
target
==
DNN_TARGET_OPENCL_FP16
||
target
==
DNN_TARGET_MYRIAD
)
?
0.0
12
:
1e-3
;
normAssertDetections
(
ref
,
out
,
""
,
0.3
,
scoreDiff
,
iouDiff
);
}
...
...
@@ -366,6 +366,7 @@ TEST_P(Test_TensorFlow_nets, Faster_RCNN)
(
backend
==
DNN_BACKEND_OPENCV
&&
target
==
DNN_TARGET_OPENCL_FP16
))
throw
SkipTestException
(
""
);
double
scoresDiff
=
backend
==
DNN_BACKEND_INFERENCE_ENGINE
?
2.9e-5
:
1e-5
;
for
(
int
i
=
0
;
i
<
2
;
++
i
)
{
std
::
string
proto
=
findDataFile
(
"dnn/"
+
names
[
i
]
+
".pbtxt"
,
false
);
...
...
@@ -381,7 +382,7 @@ TEST_P(Test_TensorFlow_nets, Faster_RCNN)
Mat
out
=
net
.
forward
();
Mat
ref
=
blobFromNPY
(
findDataFile
(
"dnn/tensorflow/"
+
names
[
i
]
+
".detection_out.npy"
));
normAssertDetections
(
ref
,
out
,
names
[
i
].
c_str
(),
0.3
);
normAssertDetections
(
ref
,
out
,
names
[
i
].
c_str
(),
0.3
,
scoresDiff
);
}
}
...
...
@@ -406,7 +407,7 @@ TEST_P(Test_TensorFlow_nets, MobileNet_v1_SSD_PPN)
net
.
setInput
(
blob
);
Mat
out
=
net
.
forward
();
double
scoreDiff
=
(
target
==
DNN_TARGET_OPENCL_FP16
||
target
==
DNN_TARGET_MYRIAD
)
?
0.011
:
default_l1
;
double
scoreDiff
=
(
target
==
DNN_TARGET_OPENCL_FP16
||
target
==
DNN_TARGET_MYRIAD
)
?
0.011
:
1.1e-5
;
double
iouDiff
=
(
target
==
DNN_TARGET_OPENCL_FP16
||
target
==
DNN_TARGET_MYRIAD
)
?
0.021
:
default_lInf
;
normAssertDetections
(
ref
,
out
,
""
,
0.4
,
scoreDiff
,
iouDiff
);
}
...
...
@@ -568,10 +569,6 @@ TEST_P(Test_TensorFlow_layers, slice)
if
(
backend
==
DNN_BACKEND_INFERENCE_ENGINE
&&
(
target
==
DNN_TARGET_OPENCL
||
target
==
DNN_TARGET_OPENCL_FP16
))
throw
SkipTestException
(
""
);
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE == 2018050000
if
(
backend
==
DNN_BACKEND_INFERENCE_ENGINE
&&
target
==
DNN_TARGET_MYRIAD
)
throw
SkipTestException
(
""
);
#endif
runTensorFlowNet
(
"slice_4d"
);
}
...
...
modules/dnn/test/test_torch_importer.cpp
View file @
f0ddf302
...
...
@@ -260,6 +260,11 @@ TEST_P(Test_Torch_layers, run_paralel)
TEST_P
(
Test_Torch_layers
,
net_residual
)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE == 2018050000
if
(
backend
==
DNN_BACKEND_INFERENCE_ENGINE
&&
(
target
==
DNN_TARGET_OPENCL
||
target
==
DNN_TARGET_OPENCL_FP16
))
throw
SkipTestException
(
"Test is disabled for OpenVINO 2018R5"
);
#endif
runTorchNet
(
"net_residual"
,
""
,
false
,
true
);
}
...
...
@@ -390,10 +395,6 @@ TEST_P(Test_Torch_nets, ENet_accuracy)
// -model models/instance_norm/feathers.t7
TEST_P
(
Test_Torch_nets
,
FastNeuralStyle_accuracy
)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE == 2018050000
if
(
backend
==
DNN_BACKEND_INFERENCE_ENGINE
&&
target
==
DNN_TARGET_MYRIAD
)
throw
SkipTestException
(
""
);
#endif
checkBackend
();
std
::
string
models
[]
=
{
"dnn/fast_neural_style_eccv16_starry_night.t7"
,
"dnn/fast_neural_style_instance_norm_feathers.t7"
};
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment