Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in / Register
Toggle navigation
O
opencv
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
submodule
opencv
Commits
523b6f32
Commit
523b6f32
authored
Jul 06, 2018
by
Vadim Pisarevsky
Browse files
Options
Browse Files
Download
Plain Diff
Merge pull request #11867 from dkurt:dnn_ie_layers
parents
3b01777c
019c2f21
Hide whitespace changes
Inline
Side-by-side
Showing
13 changed files
with
761 additions
and
518 deletions
+761
-518
dnn.cpp
modules/dnn/src/dnn.cpp
+3
-1
convolution_layer.cpp
modules/dnn/src/layers/convolution_layer.cpp
+15
-1
eltwise_layer.cpp
modules/dnn/src/layers/eltwise_layer.cpp
+2
-2
reorg_layer.cpp
modules/dnn/src/layers/reorg_layer.cpp
+20
-1
resize_layer.cpp
modules/dnn/src/layers/resize_layer.cpp
+21
-0
slice_layer.cpp
modules/dnn/src/layers/slice_layer.cpp
+15
-1
test_backends.cpp
modules/dnn/test/test_backends.cpp
+5
-36
test_darknet_importer.cpp
modules/dnn/test/test_darknet_importer.cpp
+94
-106
test_halide_layers.cpp
modules/dnn/test/test_halide_layers.cpp
+207
-123
test_layers.cpp
modules/dnn/test/test_layers.cpp
+128
-116
test_precomp.hpp
modules/dnn/test/test_precomp.hpp
+87
-0
test_tf_importer.cpp
modules/dnn/test/test_tf_importer.cpp
+164
-130
test_torch_importer.cpp
modules/dnn/test/test_torch_importer.cpp
+0
-1
No files found.
modules/dnn/src/dnn.cpp
View file @
523b6f32
...
...
@@ -2730,9 +2730,9 @@ void Layer::applyHalideScheduler(Ptr<BackendNode>& node, const std::vector<Mat*>
}
else
if
(
targetId
==
DNN_TARGET_OPENCL
)
{
int
c_split
=
outC
>
8
?
(
outC
>
16
?
8
:
4
)
:
outC
;
if
(
outW
==
1
&&
outH
==
1
)
{
int
c_split
=
outC
>
8
?
(
outC
>
16
?
8
:
4
)
:
outC
;
top
.
split
(
c
,
co
,
ci
,
c_split
)
.
fuse
(
x
,
y
,
tile
).
fuse
(
co
,
tile
,
tile
).
fuse
(
n
,
tile
,
tile
)
.
gpu_blocks
(
tile
)
...
...
@@ -2742,6 +2742,8 @@ void Layer::applyHalideScheduler(Ptr<BackendNode>& node, const std::vector<Mat*>
{
int
x_split
=
outW
>
8
?
(
outW
>=
32
?
16
:
8
)
:
outW
;
int
y_split
=
outH
>
8
?
(
outH
>=
32
?
16
:
8
)
:
outH
;
// Supported vectorization widths: 2, 3, 4, 8, 16
int
c_split
=
outC
>
8
?
(
outC
>
16
?
8
:
4
)
:
std
::
min
(
4
,
outC
);
top
.
split
(
x
,
xo
,
xi
,
x_split
).
split
(
y
,
yo
,
yi
,
y_split
)
.
split
(
c
,
co
,
ci
,
c_split
)
.
gpu_blocks
(
xo
,
yo
,
co
)
...
...
modules/dnn/src/layers/convolution_layer.cpp
View file @
523b6f32
...
...
@@ -82,7 +82,21 @@ public:
virtual
bool
supportBackend
(
int
backendId
)
CV_OVERRIDE
{
if
(
backendId
==
DNN_BACKEND_INFERENCE_ENGINE
)
return
preferableTarget
!=
DNN_TARGET_MYRIAD
||
type
!=
"Deconvolution"
||
adjustPad
==
Size
();
{
if
(
type
==
"Convolution"
)
return
preferableTarget
!=
DNN_TARGET_MYRIAD
||
dilation
.
width
==
dilation
.
height
;
else
{
CV_Assert
(
type
==
"Deconvolution"
);
const
int
outGroupCn
=
blobs
[
0
].
size
[
1
];
// Weights are in IOHW layout
const
int
group
=
numOutput
/
outGroupCn
;
if
(
group
!=
1
)
return
false
;
if
(
preferableTarget
==
DNN_TARGET_OPENCL
||
preferableTarget
==
DNN_TARGET_OPENCL_FP16
)
return
dilation
.
width
==
1
&&
dilation
.
height
==
1
;
return
true
;
}
}
else
return
backendId
==
DNN_BACKEND_OPENCV
||
backendId
==
DNN_BACKEND_HALIDE
;
}
...
...
modules/dnn/src/layers/eltwise_layer.cpp
View file @
523b6f32
...
...
@@ -97,8 +97,8 @@ public:
virtual
bool
supportBackend
(
int
backendId
)
CV_OVERRIDE
{
return
backendId
==
DNN_BACKEND_OPENCV
||
backendId
==
DNN_BACKEND_HALIDE
&&
haveHalide
()
||
backendId
==
DNN_BACKEND_INFERENCE_ENGINE
&&
haveInfEngine
(
);
backendId
==
DNN_BACKEND_HALIDE
||
backendId
==
DNN_BACKEND_INFERENCE_ENGINE
&&
(
op
!=
SUM
||
coeffs
.
empty
()
);
}
bool
getMemoryShapes
(
const
std
::
vector
<
MatShape
>
&
inputs
,
...
...
modules/dnn/src/layers/reorg_layer.cpp
View file @
523b6f32
...
...
@@ -41,9 +41,9 @@
//M*/
#include "../precomp.hpp"
#include "../op_inf_engine.hpp"
#include <opencv2/dnn/shape_utils.hpp>
#include <opencv2/dnn/all_layers.hpp>
#include <iostream>
#ifdef HAVE_OPENCL
#include "opencl_kernels_dnn.hpp"
...
...
@@ -85,6 +85,11 @@ public:
return
false
;
}
virtual
bool
supportBackend
(
int
backendId
)
CV_OVERRIDE
{
return
backendId
==
DNN_BACKEND_OPENCV
||
backendId
==
DNN_BACKEND_INFERENCE_ENGINE
;
}
#ifdef HAVE_OPENCL
bool
forward_ocl
(
InputArrayOfArrays
inps
,
OutputArrayOfArrays
outs
,
OutputArrayOfArrays
internals
)
{
...
...
@@ -169,6 +174,20 @@ public:
}
}
virtual
Ptr
<
BackendNode
>
initInfEngine
(
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>&
)
CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
InferenceEngine
::
LayerParams
lp
;
lp
.
name
=
name
;
lp
.
type
=
"ReorgYolo"
;
lp
.
precision
=
InferenceEngine
::
Precision
::
FP32
;
std
::
shared_ptr
<
InferenceEngine
::
CNNLayer
>
ieLayer
(
new
InferenceEngine
::
CNNLayer
(
lp
));
ieLayer
->
params
[
"stride"
]
=
format
(
"%d"
,
reorgStride
);
return
Ptr
<
BackendNode
>
(
new
InfEngineBackendNode
(
ieLayer
));
#endif // HAVE_INF_ENGINE
return
Ptr
<
BackendNode
>
();
}
virtual
int64
getFLOPS
(
const
std
::
vector
<
MatShape
>
&
inputs
,
const
std
::
vector
<
MatShape
>
&
outputs
)
const
CV_OVERRIDE
{
...
...
modules/dnn/src/layers/resize_layer.cpp
View file @
523b6f32
...
...
@@ -192,6 +192,11 @@ public:
return
(
outputs
[
0
][
2
]
==
inputs
[
0
][
2
])
&&
(
outputs
[
0
][
3
]
==
inputs
[
0
][
3
]);
}
virtual
bool
supportBackend
(
int
backendId
)
CV_OVERRIDE
{
return
backendId
==
DNN_BACKEND_OPENCV
||
backendId
==
DNN_BACKEND_INFERENCE_ENGINE
;
}
virtual
void
finalize
(
const
std
::
vector
<
Mat
*>&
inputs
,
std
::
vector
<
Mat
>
&
outputs
)
CV_OVERRIDE
{
if
(
!
outWidth
&&
!
outHeight
)
...
...
@@ -204,6 +209,22 @@ public:
scaleHeight
=
(
outHeight
>
1
)
?
(
static_cast
<
float
>
(
inpHeight
-
1
)
/
(
outHeight
-
1
))
:
0.
f
;
scaleWidth
=
(
outWidth
>
1
)
?
(
static_cast
<
float
>
(
inpWidth
-
1
)
/
(
outWidth
-
1
))
:
0.
f
;
}
virtual
Ptr
<
BackendNode
>
initInfEngine
(
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>&
)
CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
InferenceEngine
::
LayerParams
lp
;
lp
.
name
=
name
;
lp
.
type
=
"Interp"
;
lp
.
precision
=
InferenceEngine
::
Precision
::
FP32
;
std
::
shared_ptr
<
InferenceEngine
::
CNNLayer
>
ieLayer
(
new
InferenceEngine
::
CNNLayer
(
lp
));
ieLayer
->
params
[
"pad_beg"
]
=
"0"
;
ieLayer
->
params
[
"pad_end"
]
=
"0"
;
return
Ptr
<
BackendNode
>
(
new
InfEngineBackendNode
(
ieLayer
));
#endif // HAVE_INF_ENGINE
return
Ptr
<
BackendNode
>
();
}
};
Ptr
<
Layer
>
InterpLayer
::
create
(
const
LayerParams
&
params
)
...
...
modules/dnn/src/layers/slice_layer.cpp
View file @
523b6f32
...
...
@@ -266,7 +266,21 @@ public:
std
::
shared_ptr
<
InferenceEngine
::
CropLayer
>
ieLayer
(
new
InferenceEngine
::
CropLayer
(
lp
));
CV_Assert
(
sliceRanges
.
size
()
==
1
);
for
(
int
i
=
sliceRanges
[
0
].
size
()
-
1
;
i
>=
0
;
--
i
)
int
from
,
to
,
step
;
if
(
preferableTarget
==
DNN_TARGET_MYRIAD
)
{
from
=
1
;
to
=
sliceRanges
[
0
].
size
()
+
1
;
step
=
1
;
}
else
{
from
=
sliceRanges
[
0
].
size
()
-
1
;
to
=
-
1
;
step
=
-
1
;
}
for
(
int
i
=
from
;
i
!=
to
;
i
+=
step
)
{
ieLayer
->
axis
.
push_back
(
i
);
ieLayer
->
offset
.
push_back
(
sliceRanges
[
0
][
i
].
start
);
...
...
modules/dnn/test/test_backends.cpp
View file @
523b6f32
...
...
@@ -10,18 +10,9 @@
namespace
opencv_test
{
namespace
{
class
DNNTestNetwork
:
public
TestWithParam
<
tuple
<
DNNBackend
,
DNNTarget
>
>
class
DNNTestNetwork
:
public
DNNTestLayer
{
public
:
dnn
::
Backend
backend
;
dnn
::
Target
target
;
DNNTestNetwork
()
{
backend
=
(
dnn
::
Backend
)(
int
)
get
<
0
>
(
GetParam
());
target
=
(
dnn
::
Target
)(
int
)
get
<
1
>
(
GetParam
());
}
void
processNet
(
const
std
::
string
&
weights
,
const
std
::
string
&
proto
,
Size
inpSize
,
const
std
::
string
&
outputLayer
=
""
,
const
std
::
string
&
halideScheduler
=
""
,
...
...
@@ -40,32 +31,10 @@ public:
std
::
string
halideScheduler
=
""
,
double
l1
=
0.0
,
double
lInf
=
0.0
,
double
detectionConfThresh
=
0.2
)
{
if
(
backend
==
DNN_BACKEND_OPENCV
&&
(
target
==
DNN_TARGET_OPENCL
||
target
==
DNN_TARGET_OPENCL_FP16
))
{
#ifdef HAVE_OPENCL
if
(
!
cv
::
ocl
::
useOpenCL
())
#endif
{
throw
SkipTestException
(
"OpenCL is not available/disabled in OpenCV"
);
}
}
if
(
backend
==
DNN_BACKEND_INFERENCE_ENGINE
&&
target
==
DNN_TARGET_MYRIAD
)
{
if
(
!
checkMyriadTarget
())
{
throw
SkipTestException
(
"Myriad is not available/disabled in OpenCV"
);
}
}
if
(
target
==
DNN_TARGET_OPENCL_FP16
||
target
==
DNN_TARGET_MYRIAD
)
{
l1
=
l1
==
0.0
?
4e-3
:
l1
;
lInf
=
lInf
==
0.0
?
2e-2
:
lInf
;
}
else
{
l1
=
l1
==
0.0
?
1e-5
:
l1
;
lInf
=
lInf
==
0.0
?
1e-4
:
lInf
;
}
checkBackend
();
l1
=
l1
?
l1
:
default_l1
;
lInf
=
lInf
?
lInf
:
default_lInf
;
weights
=
findDataFile
(
weights
,
false
);
if
(
!
proto
.
empty
())
proto
=
findDataFile
(
proto
,
false
);
...
...
modules/dnn/test/test_darknet_importer.cpp
View file @
523b6f32
...
...
@@ -65,76 +65,84 @@ TEST(Test_Darknet, read_yolo_voc)
ASSERT_FALSE
(
net
.
empty
());
}
// Test object detection network from Darknet framework.
static
void
testDarknetModel
(
const
std
::
string
&
cfg
,
const
std
::
string
&
weights
,
const
std
::
vector
<
cv
::
String
>&
outNames
,
const
std
::
vector
<
int
>&
refClassIds
,
const
std
::
vector
<
float
>&
refConfidences
,
const
std
::
vector
<
Rect2d
>&
refBoxes
,
int
backendId
,
int
targetId
,
float
scoreDiff
=
0.0
,
float
iouDiff
=
0.0
,
float
confThreshold
=
0.24
)
class
Test_Darknet_layers
:
public
DNNTestLayer
{
if
(
backendId
==
DNN_BACKEND_OPENCV
&&
targetId
==
DNN_TARGET_OPENCL
)
public
:
void
testDarknetLayer
(
const
std
::
string
&
name
,
bool
hasWeights
=
false
)
{
#ifdef HAVE_OPENCL
if
(
!
cv
::
ocl
::
useOpenCL
())
#endif
{
throw
SkipTestException
(
"OpenCL is not available/disabled in OpenCV"
);
}
}
if
(
backendId
==
DNN_BACKEND_INFERENCE_ENGINE
&&
targetId
==
DNN_TARGET_MYRIAD
)
{
if
(
!
checkMyriadTarget
())
{
throw
SkipTestException
(
"Myriad is not available/disabled in OpenCV"
);
}
std
::
string
cfg
=
findDataFile
(
"dnn/darknet/"
+
name
+
".cfg"
,
false
);
std
::
string
model
=
""
;
if
(
hasWeights
)
model
=
findDataFile
(
"dnn/darknet/"
+
name
+
".weights"
,
false
);
Mat
inp
=
blobFromNPY
(
findDataFile
(
"dnn/darknet/"
+
name
+
"_in.npy"
,
false
));
Mat
ref
=
blobFromNPY
(
findDataFile
(
"dnn/darknet/"
+
name
+
"_out.npy"
,
false
));
checkBackend
(
&
inp
,
&
ref
);
Net
net
=
readNet
(
cfg
,
model
);
net
.
setPreferableBackend
(
backend
);
net
.
setPreferableTarget
(
target
);
net
.
setInput
(
inp
);
Mat
out
=
net
.
forward
();
normAssert
(
out
,
ref
,
""
,
default_l1
,
default_lInf
);
}
Mat
sample
=
imread
(
_tf
(
"dog416.png"
));
Mat
inp
=
blobFromImage
(
sample
,
1.0
/
255
,
Size
(
416
,
416
),
Scalar
(),
true
,
false
);
Net
net
=
readNet
(
findDataFile
(
"dnn/"
+
cfg
,
false
),
findDataFile
(
"dnn/"
+
weights
,
false
));
net
.
setPreferableBackend
(
backendId
);
net
.
setPreferableTarget
(
targetId
);
net
.
setInput
(
inp
);
std
::
vector
<
Mat
>
outs
;
net
.
forward
(
outs
,
outNames
);
std
::
vector
<
int
>
classIds
;
std
::
vector
<
float
>
confidences
;
std
::
vector
<
Rect2d
>
boxes
;
for
(
int
i
=
0
;
i
<
outs
.
size
();
++
i
)
};
class
Test_Darknet_nets
:
public
DNNTestLayer
{
public
:
// Test object detection network from Darknet framework.
void
testDarknetModel
(
const
std
::
string
&
cfg
,
const
std
::
string
&
weights
,
const
std
::
vector
<
cv
::
String
>&
outNames
,
const
std
::
vector
<
int
>&
refClassIds
,
const
std
::
vector
<
float
>&
refConfidences
,
const
std
::
vector
<
Rect2d
>&
refBoxes
,
double
scoreDiff
,
double
iouDiff
,
float
confThreshold
=
0.24
)
{
Mat
&
out
=
outs
[
i
];
for
(
int
j
=
0
;
j
<
out
.
rows
;
++
j
)
checkBackend
();
Mat
sample
=
imread
(
_tf
(
"dog416.png"
));
Mat
inp
=
blobFromImage
(
sample
,
1.0
/
255
,
Size
(
416
,
416
),
Scalar
(),
true
,
false
);
Net
net
=
readNet
(
findDataFile
(
"dnn/"
+
cfg
,
false
),
findDataFile
(
"dnn/"
+
weights
,
false
));
net
.
setPreferableBackend
(
backend
);
net
.
setPreferableTarget
(
target
);
net
.
setInput
(
inp
);
std
::
vector
<
Mat
>
outs
;
net
.
forward
(
outs
,
outNames
);
std
::
vector
<
int
>
classIds
;
std
::
vector
<
float
>
confidences
;
std
::
vector
<
Rect2d
>
boxes
;
for
(
int
i
=
0
;
i
<
outs
.
size
();
++
i
)
{
Mat
scores
=
out
.
row
(
j
).
colRange
(
5
,
out
.
cols
);
double
confidence
;
Point
maxLoc
;
minMaxLoc
(
scores
,
0
,
&
confidence
,
0
,
&
maxLoc
);
float
*
detection
=
out
.
ptr
<
float
>
(
j
);
double
centerX
=
detection
[
0
];
double
centerY
=
detection
[
1
];
double
width
=
detection
[
2
];
double
height
=
detection
[
3
];
boxes
.
push_back
(
Rect2d
(
centerX
-
0.5
*
width
,
centerY
-
0.5
*
height
,
width
,
height
));
confidences
.
push_back
(
confidence
);
classIds
.
push_back
(
maxLoc
.
x
);
Mat
&
out
=
outs
[
i
];
for
(
int
j
=
0
;
j
<
out
.
rows
;
++
j
)
{
Mat
scores
=
out
.
row
(
j
).
colRange
(
5
,
out
.
cols
);
double
confidence
;
Point
maxLoc
;
minMaxLoc
(
scores
,
0
,
&
confidence
,
0
,
&
maxLoc
);
float
*
detection
=
out
.
ptr
<
float
>
(
j
);
double
centerX
=
detection
[
0
];
double
centerY
=
detection
[
1
];
double
width
=
detection
[
2
];
double
height
=
detection
[
3
];
boxes
.
push_back
(
Rect2d
(
centerX
-
0.5
*
width
,
centerY
-
0.5
*
height
,
width
,
height
));
confidences
.
push_back
(
confidence
);
classIds
.
push_back
(
maxLoc
.
x
);
}
}
normAssertDetections
(
refClassIds
,
refConfidences
,
refBoxes
,
classIds
,
confidences
,
boxes
,
""
,
confThreshold
,
scoreDiff
,
iouDiff
);
}
normAssertDetections
(
refClassIds
,
refConfidences
,
refBoxes
,
classIds
,
confidences
,
boxes
,
""
,
confThreshold
,
scoreDiff
,
iouDiff
);
}
typedef
testing
::
TestWithParam
<
tuple
<
DNNBackend
,
DNNTarget
>
>
Test_Darknet_nets
;
};
TEST_P
(
Test_Darknet_nets
,
YoloVoc
)
{
int
backendId
=
get
<
0
>
(
GetParam
());
int
targetId
=
get
<
1
>
(
GetParam
());
std
::
vector
<
cv
::
String
>
outNames
(
1
,
"detection_out"
);
std
::
vector
<
int
>
classIds
(
3
);
...
...
@@ -143,34 +151,28 @@ TEST_P(Test_Darknet_nets, YoloVoc)
classIds
[
0
]
=
6
;
confidences
[
0
]
=
0.750469
f
;
boxes
[
0
]
=
Rect2d
(
0.577374
,
0.127391
,
0.325575
,
0.173418
);
// a car
classIds
[
1
]
=
1
;
confidences
[
1
]
=
0.780879
f
;
boxes
[
1
]
=
Rect2d
(
0.270762
,
0.264102
,
0.461713
,
0.48131
);
// a bicycle
classIds
[
2
]
=
11
;
confidences
[
2
]
=
0.901615
f
;
boxes
[
2
]
=
Rect2d
(
0.1386
,
0.338509
,
0.282737
,
0.60028
);
// a dog
double
scoreDiff
=
(
target
Id
==
DNN_TARGET_OPENCL_FP16
||
targetId
==
DNN_TARGET_MYRIAD
)
?
1e-2
:
8e-5
;
double
iouDiff
=
(
target
Id
==
DNN_TARGET_OPENCL_FP16
||
targetId
==
DNN_TARGET_MYRIAD
)
?
0.013
:
3e-5
;
double
scoreDiff
=
(
target
==
DNN_TARGET_OPENCL_FP16
||
target
==
DNN_TARGET_MYRIAD
)
?
1e-2
:
8e-5
;
double
iouDiff
=
(
target
==
DNN_TARGET_OPENCL_FP16
||
target
==
DNN_TARGET_MYRIAD
)
?
0.013
:
3e-5
;
testDarknetModel
(
"yolo-voc.cfg"
,
"yolo-voc.weights"
,
outNames
,
classIds
,
confidences
,
boxes
,
backendId
,
targetId
,
scoreDiff
,
iouDiff
);
classIds
,
confidences
,
boxes
,
scoreDiff
,
iouDiff
);
}
TEST_P
(
Test_Darknet_nets
,
TinyYoloVoc
)
{
int
backendId
=
get
<
0
>
(
GetParam
());
int
targetId
=
get
<
1
>
(
GetParam
());
std
::
vector
<
cv
::
String
>
outNames
(
1
,
"detection_out"
);
std
::
vector
<
int
>
classIds
(
2
);
std
::
vector
<
float
>
confidences
(
2
);
std
::
vector
<
Rect2d
>
boxes
(
2
);
classIds
[
0
]
=
6
;
confidences
[
0
]
=
0.761967
f
;
boxes
[
0
]
=
Rect2d
(
0.579042
,
0.159161
,
0.31544
,
0.160779
);
// a car
classIds
[
1
]
=
11
;
confidences
[
1
]
=
0.780595
f
;
boxes
[
1
]
=
Rect2d
(
0.129696
,
0.386467
,
0.315579
,
0.534527
);
// a dog
double
scoreDiff
=
(
target
Id
==
DNN_TARGET_OPENCL_FP16
||
targetId
==
DNN_TARGET_MYRIAD
)
?
8e-3
:
8e-5
;
double
iouDiff
=
(
target
Id
==
DNN_TARGET_OPENCL_FP16
||
targetId
==
DNN_TARGET_MYRIAD
)
?
8e-3
:
3e-5
;
double
scoreDiff
=
(
target
==
DNN_TARGET_OPENCL_FP16
||
target
==
DNN_TARGET_MYRIAD
)
?
8e-3
:
8e-5
;
double
iouDiff
=
(
target
==
DNN_TARGET_OPENCL_FP16
||
target
==
DNN_TARGET_MYRIAD
)
?
8e-3
:
3e-5
;
testDarknetModel
(
"tiny-yolo-voc.cfg"
,
"tiny-yolo-voc.weights"
,
outNames
,
classIds
,
confidences
,
boxes
,
backendId
,
targetId
,
scoreDiff
,
iouDiff
);
classIds
,
confidences
,
boxes
,
scoreDiff
,
iouDiff
);
}
TEST_P
(
Test_Darknet_nets
,
YOLOv3
)
{
int
backendId
=
get
<
0
>
(
GetParam
());
int
targetId
=
get
<
1
>
(
GetParam
());
if
(
backendId
==
DNN_BACKEND_INFERENCE_ENGINE
&&
targetId
==
DNN_TARGET_MYRIAD
)
throw
SkipTestException
(
""
);
std
::
vector
<
cv
::
String
>
outNames
(
3
);
outNames
[
0
]
=
"yolo_82"
;
outNames
[
1
]
=
"yolo_94"
;
...
...
@@ -182,55 +184,41 @@ TEST_P(Test_Darknet_nets, YOLOv3)
classIds
[
0
]
=
7
;
confidences
[
0
]
=
0.952983
f
;
boxes
[
0
]
=
Rect2d
(
0.614622
,
0.150257
,
0.286747
,
0.138994
);
// a truck
classIds
[
1
]
=
1
;
confidences
[
1
]
=
0.987908
f
;
boxes
[
1
]
=
Rect2d
(
0.150913
,
0.221933
,
0.591342
,
0.524327
);
// a bicycle
classIds
[
2
]
=
16
;
confidences
[
2
]
=
0.998836
f
;
boxes
[
2
]
=
Rect2d
(
0.160024
,
0.389964
,
0.257861
,
0.553752
);
// a dog (COCO)
double
scoreDiff
=
(
target
Id
==
DNN_TARGET_OPENCL_FP16
||
targetId
==
DNN_TARGET_MYRIAD
)
?
4e-3
:
8e-5
;
double
iouDiff
=
(
target
Id
==
DNN_TARGET_OPENCL_FP16
||
targetId
==
DNN_TARGET_MYRIAD
)
?
0.011
:
3e-5
;
double
scoreDiff
=
(
target
==
DNN_TARGET_OPENCL_FP16
||
target
==
DNN_TARGET_MYRIAD
)
?
4e-3
:
8e-5
;
double
iouDiff
=
(
target
==
DNN_TARGET_OPENCL_FP16
||
target
==
DNN_TARGET_MYRIAD
)
?
0.011
:
3e-5
;
testDarknetModel
(
"yolov3.cfg"
,
"yolov3.weights"
,
outNames
,
classIds
,
confidences
,
boxes
,
backendId
,
targetId
,
scoreDiff
,
iouDiff
);
classIds
,
confidences
,
boxes
,
scoreDiff
,
iouDiff
);
}
const
tuple
<
DNNBackend
,
DNNTarget
>
testCases
[]
=
{
#ifdef HAVE_INF_ENGINE
tuple
<
DNNBackend
,
DNNTarget
>
(
DNN_BACKEND_INFERENCE_ENGINE
,
DNN_TARGET_CPU
),
tuple
<
DNNBackend
,
DNNTarget
>
(
DNN_BACKEND_INFERENCE_ENGINE
,
DNN_TARGET_OPENCL
),
tuple
<
DNNBackend
,
DNNTarget
>
(
DNN_BACKEND_INFERENCE_ENGINE
,
DNN_TARGET_OPENCL_FP16
),
tuple
<
DNNBackend
,
DNNTarget
>
(
DNN_BACKEND_INFERENCE_ENGINE
,
DNN_TARGET_MYRIAD
),
#endif
tuple
<
DNNBackend
,
DNNTarget
>
(
DNN_BACKEND_OPENCV
,
DNN_TARGET_CPU
),
tuple
<
DNNBackend
,
DNNTarget
>
(
DNN_BACKEND_OPENCV
,
DNN_TARGET_OPENCL
),
tuple
<
DNNBackend
,
DNNTarget
>
(
DNN_BACKEND_OPENCV
,
DNN_TARGET_OPENCL_FP16
)
};
INSTANTIATE_TEST_CASE_P
(
/**/
,
Test_Darknet_nets
,
dnnBackendsAndTargets
());
INSTANTIATE_TEST_CASE_P
(
/**/
,
Test_Darknet_nets
,
testing
::
ValuesIn
(
testCases
));
TEST_P
(
Test_Darknet_layers
,
shortcut
)
{
if
(
backend
==
DNN_BACKEND_INFERENCE_ENGINE
&&
target
==
DNN_TARGET_CPU
)
throw
SkipTestException
(
""
);
testDarknetLayer
(
"shortcut"
);
}
static
void
testDarknetLayer
(
const
std
::
string
&
name
,
bool
hasWeights
=
fals
e
)
TEST_P
(
Test_Darknet_layers
,
upsampl
e
)
{
std
::
string
cfg
=
findDataFile
(
"dnn/darknet/"
+
name
+
".cfg"
,
false
);
std
::
string
model
=
""
;
if
(
hasWeights
)
model
=
findDataFile
(
"dnn/darknet/"
+
name
+
".weights"
,
false
);
Mat
inp
=
blobFromNPY
(
findDataFile
(
"dnn/darknet/"
+
name
+
"_in.npy"
,
false
));
Mat
ref
=
blobFromNPY
(
findDataFile
(
"dnn/darknet/"
+
name
+
"_out.npy"
,
false
));
Net
net
=
readNet
(
cfg
,
model
);
net
.
setPreferableBackend
(
DNN_BACKEND_OPENCV
);
net
.
setInput
(
inp
);
Mat
out
=
net
.
forward
();
normAssert
(
out
,
ref
);
testDarknetLayer
(
"upsample"
);
}
TEST
(
Test_Darknet
,
shortcut
)
TEST
_P
(
Test_Darknet_layers
,
avgpool_softmax
)
{
testDarknetLayer
(
"
shortcut
"
);
testDarknetLayer
(
"
avgpool_softmax
"
);
}
TEST
(
Test_Darknet
,
upsample
)
TEST
_P
(
Test_Darknet_layers
,
region
)
{
testDarknetLayer
(
"
upsample
"
);
testDarknetLayer
(
"
region
"
);
}
TEST
(
Test_Darknet
,
avgpool_softmax
)
TEST
_P
(
Test_Darknet_layers
,
reorg
)
{
testDarknetLayer
(
"
avgpool_softmax
"
);
testDarknetLayer
(
"
reorg
"
);
}
INSTANTIATE_TEST_CASE_P
(
/**/
,
Test_Darknet_layers
,
dnnBackendsAndTargets
());
}}
// namespace
modules/dnn/test/test_halide_layers.cpp
View file @
523b6f32
...
...
@@ -12,32 +12,60 @@
namespace
opencv_test
{
namespace
{
#ifdef HAVE_HALIDE
using
namespace
cv
;
using
namespace
cv
::
dnn
;
using
namespace
testing
;
static
void
test
(
LayerParams
&
params
,
Mat
&
input
)
static
void
test
(
Mat
&
input
,
Net
&
net
,
int
backendId
,
int
targetId
)
{
DNNTestLayer
::
checkBackend
(
backendId
,
targetId
);
randu
(
input
,
-
1.0
f
,
1.0
f
);
Net
net
;
int
lid
=
net
.
addLayer
(
params
.
name
,
params
.
type
,
params
);
net
.
connect
(
0
,
0
,
lid
,
0
);
net
.
setInput
(
input
);
net
.
setPreferableBackend
(
DNN_BACKEND_OPENCV
);
Mat
outputDefault
=
net
.
forward
(
params
.
name
).
clone
();
Mat
outputDefault
=
net
.
forward
().
clone
();
net
.
setPreferableBackend
(
DNN_BACKEND_HALIDE
);
Mat
outputHalide
=
net
.
forward
(
params
.
name
).
clone
();
normAssert
(
outputDefault
,
outputHalide
);
net
.
setPreferableBackend
(
backendId
);
net
.
setPreferableTarget
(
targetId
);
Mat
outputHalide
=
net
.
forward
().
clone
();
double
l1
,
lInf
;
DNNTestLayer
::
getDefaultThresholds
(
backendId
,
targetId
,
&
l1
,
&
lInf
);
normAssert
(
outputDefault
,
outputHalide
,
""
,
l1
,
lInf
);
}
static
void
test
(
LayerParams
&
params
,
Mat
&
input
,
int
backendId
,
int
targetId
)
{
Net
net
;
net
.
addLayerToPrev
(
params
.
name
,
params
.
type
,
params
);
test
(
input
,
net
,
backendId
,
targetId
);
}
static
testing
::
internal
::
ParamGenerator
<
tuple
<
DNNBackend
,
DNNTarget
>
>
dnnBackendsAndTargetsWithHalide
()
{
static
const
tuple
<
DNNBackend
,
DNNTarget
>
testCases
[]
=
{
#ifdef HAVE_HALIDE
tuple
<
DNNBackend
,
DNNTarget
>
(
DNN_BACKEND_HALIDE
,
DNN_TARGET_CPU
),
tuple
<
DNNBackend
,
DNNTarget
>
(
DNN_BACKEND_HALIDE
,
DNN_TARGET_OPENCL
),
#endif
#ifdef HAVE_INF_ENGINE
tuple
<
DNNBackend
,
DNNTarget
>
(
DNN_BACKEND_INFERENCE_ENGINE
,
DNN_TARGET_CPU
),
tuple
<
DNNBackend
,
DNNTarget
>
(
DNN_BACKEND_INFERENCE_ENGINE
,
DNN_TARGET_OPENCL
),
tuple
<
DNNBackend
,
DNNTarget
>
(
DNN_BACKEND_INFERENCE_ENGINE
,
DNN_TARGET_OPENCL_FP16
),
tuple
<
DNNBackend
,
DNNTarget
>
(
DNN_BACKEND_INFERENCE_ENGINE
,
DNN_TARGET_MYRIAD
),
#endif
tuple
<
DNNBackend
,
DNNTarget
>
(
DNN_BACKEND_OPENCV
,
DNN_TARGET_OPENCL
),
tuple
<
DNNBackend
,
DNNTarget
>
(
DNN_BACKEND_OPENCV
,
DNN_TARGET_OPENCL_FP16
)
};
return
testing
::
ValuesIn
(
testCases
);
}
class
Test_Halide_layers
:
public
DNNTestLayer
{};
////////////////////////////////////////////////////////////////////////////////
// Padding
////////////////////////////////////////////////////////////////////////////////
TEST
(
Padding_Halide
,
Accuracy
)
TEST
_P
(
Test_Halide_layers
,
Padding
)
{
static
const
int
kNumRuns
=
10
;
std
::
vector
<
int
>
paddings
(
8
);
...
...
@@ -52,15 +80,16 @@ TEST(Padding_Halide, Accuracy)
lp
.
type
=
"Padding"
;
lp
.
name
=
"testLayer"
;
Mat
input
({
1
+
rng
(
10
),
1
+
rng
(
10
),
1
+
rng
(
10
),
1
+
rng
(
10
)},
CV_32F
);
test
(
lp
,
input
);
int
sz
[]
=
{
1
+
(
int
)
rng
(
10
),
1
+
(
int
)
rng
(
10
),
1
+
(
int
)
rng
(
10
),
1
+
(
int
)
rng
(
10
)};
Mat
input
(
4
,
&
sz
[
0
],
CV_32F
);
test
(
lp
,
input
,
backend
,
target
);
}
}
////////////////////////////////////////////////////////////////////////////////
// Convolution
////////////////////////////////////////////////////////////////////////////////
typedef
TestWithParam
<
tuple
<
Vec3i
,
Size
,
Size
,
Size
,
Size
,
Size
,
bool
>
>
Convolution
;
typedef
TestWithParam
<
tuple
<
Vec3i
,
Size
,
Size
,
Size
,
Size
,
Size
,
bool
,
tuple
<
DNNBackend
,
DNNTarget
>
>
>
Convolution
;
TEST_P
(
Convolution
,
Accuracy
)
{
int
inChannels
=
get
<
0
>
(
GetParam
())[
0
];
...
...
@@ -72,8 +101,15 @@ TEST_P(Convolution, Accuracy)
Size
pad
=
get
<
4
>
(
GetParam
());
Size
dilation
=
get
<
5
>
(
GetParam
());
bool
hasBias
=
get
<
6
>
(
GetParam
());
int
backendId
=
get
<
0
>
(
get
<
7
>
(
GetParam
()));
int
targetId
=
get
<
1
>
(
get
<
7
>
(
GetParam
()));
if
((
backendId
==
DNN_BACKEND_INFERENCE_ENGINE
&&
targetId
==
DNN_TARGET_MYRIAD
)
||
(
backendId
==
DNN_BACKEND_OPENCV
&&
targetId
==
DNN_TARGET_OPENCL_FP16
))
throw
SkipTestException
(
""
);
Mat
weights
({
outChannels
,
inChannels
/
group
,
kernel
.
height
,
kernel
.
width
},
CV_32F
);
int
sz
[]
=
{
outChannels
,
inChannels
/
group
,
kernel
.
height
,
kernel
.
width
};
Mat
weights
(
4
,
&
sz
[
0
],
CV_32F
);
randu
(
weights
,
-
1.0
f
,
1.0
f
);
LayerParams
lp
;
...
...
@@ -93,12 +129,13 @@ TEST_P(Convolution, Accuracy)
lp
.
blobs
.
push_back
(
weights
);
if
(
hasBias
)
{
Mat
bias
(
{
outChannels
}
,
CV_32F
);
Mat
bias
(
1
,
outChannels
,
CV_32F
);
randu
(
bias
,
-
1.0
f
,
1.0
f
);
lp
.
blobs
.
push_back
(
bias
);
}
Mat
input
({
1
,
inChannels
,
inSize
.
height
,
inSize
.
width
},
CV_32F
);
test
(
lp
,
input
);
int
inpSz
[]
=
{
1
,
inChannels
,
inSize
.
height
,
inSize
.
width
};
Mat
input
(
4
,
&
inpSz
[
0
],
CV_32F
);
test
(
lp
,
input
,
backendId
,
targetId
);
}
INSTANTIATE_TEST_CASE_P
(
Layer_Test_Halide
,
Convolution
,
Combine
(
...
...
@@ -110,13 +147,14 @@ INSTANTIATE_TEST_CASE_P(Layer_Test_Halide, Convolution, Combine(
/*stride*/
Values
(
Size
(
1
,
1
),
Size
(
2
,
2
)),
/*pad*/
Values
(
Size
(
1
,
0
),
Size
(
0
,
1
)),
/*dilation*/
Values
(
Size
(
1
,
1
),
Size
(
2
,
2
)),
/*has bias*/
Bool
()
/*has bias*/
Bool
(),
dnnBackendsAndTargetsWithHalide
()
));
////////////////////////////////////////////////////////////////////////////////
// Deconvolution
////////////////////////////////////////////////////////////////////////////////
typedef
TestWithParam
<
tuple
<
Vec3i
,
Size
,
Size
,
Size
,
Size
,
Vec4i
,
bool
>
>
Deconvolution
;
typedef
TestWithParam
<
tuple
<
Vec3i
,
Size
,
Size
,
Size
,
Size
,
Vec4i
,
bool
,
tuple
<
DNNBackend
,
DNNTarget
>
>
>
Deconvolution
;
TEST_P
(
Deconvolution
,
Accuracy
)
{
int
inChannels
=
get
<
0
>
(
GetParam
())[
0
];
...
...
@@ -129,8 +167,14 @@ TEST_P(Deconvolution, Accuracy)
Size
stride
=
Size
(
get
<
5
>
(
GetParam
())[
0
],
get
<
5
>
(
GetParam
())[
1
]);
Size
adjPad
=
Size
(
get
<
5
>
(
GetParam
())[
2
],
get
<
5
>
(
GetParam
())[
3
]);
bool
hasBias
=
get
<
6
>
(
GetParam
());
Mat
weights
({
inChannels
,
outChannels
/
group
,
kernel
.
height
,
kernel
.
width
},
CV_32F
);
int
backendId
=
get
<
0
>
(
get
<
7
>
(
GetParam
()));
int
targetId
=
get
<
1
>
(
get
<
7
>
(
GetParam
()));
if
(
backendId
==
DNN_BACKEND_INFERENCE_ENGINE
&&
targetId
==
DNN_TARGET_CPU
&&
dilation
.
width
==
2
&&
dilation
.
height
==
2
)
throw
SkipTestException
(
""
);
int
sz
[]
=
{
inChannels
,
outChannels
/
group
,
kernel
.
height
,
kernel
.
width
};
Mat
weights
(
4
,
&
sz
[
0
],
CV_32F
);
randu
(
weights
,
-
1.0
f
,
1.0
f
);
LayerParams
lp
;
...
...
@@ -152,12 +196,13 @@ TEST_P(Deconvolution, Accuracy)
lp
.
blobs
.
push_back
(
weights
);
if
(
hasBias
)
{
Mat
bias
(
{
outChannels
}
,
CV_32F
);
Mat
bias
(
1
,
outChannels
,
CV_32F
);
randu
(
bias
,
-
1.0
f
,
1.0
f
);
lp
.
blobs
.
push_back
(
bias
);
}
Mat
input
({
1
,
inChannels
,
inSize
.
height
,
inSize
.
width
},
CV_32F
);
test
(
lp
,
input
);
int
inpSz
[]
=
{
1
,
inChannels
,
inSize
.
height
,
inSize
.
width
};
Mat
input
(
4
,
&
inpSz
[
0
],
CV_32F
);
test
(
lp
,
input
,
backendId
,
targetId
);
}
INSTANTIATE_TEST_CASE_P
(
Layer_Test_Halide
,
Deconvolution
,
Combine
(
...
...
@@ -168,13 +213,14 @@ INSTANTIATE_TEST_CASE_P(Layer_Test_Halide, Deconvolution, Combine(
/*pad*/
Values
(
Size
(
1
,
0
),
Size
(
0
,
1
)),
/*dilation*/
Values
(
Size
(
1
,
1
),
Size
(
2
,
2
)),
/*stride, adj. pad*/
Values
(
Vec4i
(
1
,
1
,
0
,
0
),
Vec4i
(
2
,
2
,
1
,
0
),
Vec4i
(
1
,
2
,
0
,
1
)),
/*has bias*/
Bool
()
/*has bias*/
Bool
(),
dnnBackendsAndTargetsWithHalide
()
));
////////////////////////////////////////////////////////////////////////////////
// LRN
////////////////////////////////////////////////////////////////////////////////
typedef
TestWithParam
<
tuple
<
Vec3i
,
int
,
Vec3f
,
bool
,
std
::
string
>
>
LRN
;
typedef
TestWithParam
<
tuple
<
Vec3i
,
int
,
Vec3f
,
bool
,
std
::
string
,
tuple
<
DNNBackend
,
DNNTarget
>
>
>
LRN
;
TEST_P
(
LRN
,
Accuracy
)
{
int
inChannels
=
get
<
0
>
(
GetParam
())[
0
];
...
...
@@ -185,6 +231,10 @@ TEST_P(LRN, Accuracy)
float
bias
=
get
<
2
>
(
GetParam
())[
2
];
bool
normBySize
=
get
<
3
>
(
GetParam
());
std
::
string
nrmType
=
get
<
4
>
(
GetParam
());
int
backendId
=
get
<
0
>
(
get
<
5
>
(
GetParam
()));
int
targetId
=
get
<
1
>
(
get
<
5
>
(
GetParam
()));
if
(
backendId
==
DNN_BACKEND_INFERENCE_ENGINE
)
throw
SkipTestException
(
""
);
LayerParams
lp
;
lp
.
set
(
"norm_region"
,
nrmType
);
...
...
@@ -196,8 +246,9 @@ TEST_P(LRN, Accuracy)
lp
.
type
=
"LRN"
;
lp
.
name
=
"testLayer"
;
Mat
input
({
1
,
inChannels
,
inSize
.
height
,
inSize
.
width
},
CV_32F
);
test
(
lp
,
input
);
int
sz
[]
=
{
1
,
inChannels
,
inSize
.
height
,
inSize
.
width
};
Mat
input
(
4
,
&
sz
[
0
],
CV_32F
);
test
(
lp
,
input
,
backendId
,
targetId
);
}
INSTANTIATE_TEST_CASE_P
(
Layer_Test_Halide
,
LRN
,
Combine
(
...
...
@@ -207,19 +258,24 @@ INSTANTIATE_TEST_CASE_P(Layer_Test_Halide, LRN, Combine(
/*alpha, beta,*/
Vec3f
(
1.0
f
,
0.9
f
,
1.1
f
),
Vec3f
(
1.0
f
,
1.1
f
,
0.9
f
),
/*bias */
Vec3f
(
1.1
f
,
0.9
f
,
1.0
f
),
Vec3f
(
1.1
f
,
1.0
f
,
0.9
f
)),
/*norm_by_size*/
Bool
(),
/*norm_type*/
Values
(
"ACROSS_CHANNELS"
,
"WITHIN_CHANNEL"
)
/*norm_type*/
Values
(
"ACROSS_CHANNELS"
,
"WITHIN_CHANNEL"
),
dnnBackendsAndTargetsWithHalide
()
));
////////////////////////////////////////////////////////////////////////////////
// Average pooling
////////////////////////////////////////////////////////////////////////////////
typedef
TestWithParam
<
tuple
<
int
,
Size
,
Size
,
Size
>
>
AvePooling
;
typedef
TestWithParam
<
tuple
<
int
,
Size
,
Size
,
Size
,
tuple
<
DNNBackend
,
DNNTarget
>
>
>
AvePooling
;
TEST_P
(
AvePooling
,
Accuracy
)
{
int
inChannels
=
get
<
0
>
(
GetParam
());
Size
outSize
=
get
<
1
>
(
GetParam
());;
// Input size will be computed from parameters.
Size
kernel
=
get
<
2
>
(
GetParam
());
Size
stride
=
get
<
3
>
(
GetParam
());
int
backendId
=
get
<
0
>
(
get
<
4
>
(
GetParam
()));
int
targetId
=
get
<
1
>
(
get
<
4
>
(
GetParam
()));
if
(
backendId
==
DNN_BACKEND_INFERENCE_ENGINE
&&
targetId
==
DNN_TARGET_MYRIAD
)
throw
SkipTestException
(
""
);
const
int
inWidth
=
(
outSize
.
width
-
1
)
*
stride
.
width
+
kernel
.
width
;
const
int
inHeight
=
(
outSize
.
height
-
1
)
*
stride
.
height
+
kernel
.
height
;
...
...
@@ -233,21 +289,23 @@ TEST_P(AvePooling, Accuracy)
lp
.
type
=
"Pooling"
;
lp
.
name
=
"testLayer"
;
Mat
input
({
1
,
inChannels
,
inHeight
,
inWidth
},
CV_32F
);
test
(
lp
,
input
);
int
sz
[]
=
{
1
,
inChannels
,
inHeight
,
inWidth
};
Mat
input
(
4
,
&
sz
[
0
],
CV_32F
);
test
(
lp
,
input
,
backendId
,
targetId
);
}
INSTANTIATE_TEST_CASE_P
(
Layer_Test_Halide
,
AvePooling
,
Combine
(
/*in channels*/
Values
(
3
,
4
),
/*out size*/
Values
(
Size
(
1
,
1
),
Size
(
2
,
2
),
Size
(
3
,
2
),
Size
(
4
,
7
)),
/*kernel*/
Values
(
Size
(
1
,
1
),
Size
(
2
,
2
),
Size
(
3
,
3
),
Size
(
3
,
2
)),
/*stride*/
Values
(
Size
(
1
,
1
),
Size
(
2
,
2
),
Size
(
3
,
2
))
/*stride*/
Values
(
Size
(
1
,
1
),
Size
(
2
,
2
),
Size
(
3
,
2
)),
dnnBackendsAndTargetsWithHalide
()
));
////////////////////////////////////////////////////////////////////////////////
// Maximum pooling
////////////////////////////////////////////////////////////////////////////////
typedef
TestWithParam
<
tuple
<
int
,
Size
,
Size
,
Size
,
Size
>
>
MaxPooling
;
typedef
TestWithParam
<
tuple
<
int
,
Size
,
Size
,
Size
,
Size
,
tuple
<
DNNBackend
,
DNNTarget
>
>
>
MaxPooling
;
TEST_P
(
MaxPooling
,
Accuracy
)
{
int
inChannels
=
get
<
0
>
(
GetParam
());
...
...
@@ -255,6 +313,8 @@ TEST_P(MaxPooling, Accuracy)
Size
kernel
=
get
<
2
>
(
GetParam
());
Size
stride
=
get
<
3
>
(
GetParam
());
Size
pad
=
get
<
4
>
(
GetParam
());
int
backendId
=
get
<
0
>
(
get
<
5
>
(
GetParam
()));
int
targetId
=
get
<
1
>
(
get
<
5
>
(
GetParam
()));
LayerParams
lp
;
lp
.
set
(
"pool"
,
"max"
);
...
...
@@ -267,8 +327,9 @@ TEST_P(MaxPooling, Accuracy)
lp
.
type
=
"Pooling"
;
lp
.
name
=
"testLayer"
;
Mat
input
({
1
,
inChannels
,
inSize
.
height
,
inSize
.
width
},
CV_32F
);
test
(
lp
,
input
);
int
sz
[]
=
{
1
,
inChannels
,
inSize
.
height
,
inSize
.
width
};
Mat
input
(
4
,
&
sz
[
0
],
CV_32F
);
test
(
lp
,
input
,
backendId
,
targetId
);
}
INSTANTIATE_TEST_CASE_P
(
Layer_Test_Halide
,
MaxPooling
,
Combine
(
...
...
@@ -276,19 +337,25 @@ INSTANTIATE_TEST_CASE_P(Layer_Test_Halide, MaxPooling, Combine(
/*in size*/
Values
(
Size
(
5
,
5
),
Size
(
7
,
6
)),
/*kernel*/
Values
(
Size
(
2
,
2
),
Size
(
3
,
3
),
Size
(
3
,
2
)),
/*stride*/
Values
(
Size
(
1
,
1
),
Size
(
2
,
2
),
Size
(
3
,
2
)),
/*pad*/
Values
(
Size
(
0
,
0
),
Size
(
1
,
1
),
Size
(
0
,
1
))
/*pad*/
Values
(
Size
(
0
,
0
),
Size
(
1
,
1
),
Size
(
0
,
1
)),
dnnBackendsAndTargetsWithHalide
()
));
////////////////////////////////////////////////////////////////////////////////
// Fully-connected
////////////////////////////////////////////////////////////////////////////////
typedef
TestWithParam
<
tuple
<
int
,
Size
,
int
,
bool
>
>
FullyConnected
;
typedef
TestWithParam
<
tuple
<
int
,
Size
,
int
,
bool
,
tuple
<
DNNBackend
,
DNNTarget
>
>
>
FullyConnected
;
TEST_P
(
FullyConnected
,
Accuracy
)
{
int
inChannels
=
get
<
0
>
(
GetParam
());
Size
inSize
=
get
<
1
>
(
GetParam
());
int
outChannels
=
get
<
2
>
(
GetParam
());
bool
hasBias
=
get
<
3
>
(
GetParam
());
int
backendId
=
get
<
0
>
(
get
<
4
>
(
GetParam
()));
int
targetId
=
get
<
1
>
(
get
<
4
>
(
GetParam
()));
if
(
backendId
==
DNN_BACKEND_INFERENCE_ENGINE
||
(
backendId
==
DNN_BACKEND_OPENCV
&&
targetId
==
DNN_TARGET_OPENCL_FP16
))
throw
SkipTestException
(
""
);
Mat
weights
(
outChannels
,
inChannels
*
inSize
.
height
*
inSize
.
width
,
CV_32F
);
randu
(
weights
,
-
1.0
f
,
1.0
f
);
...
...
@@ -304,39 +371,50 @@ TEST_P(FullyConnected, Accuracy)
lp
.
type
=
"InnerProduct"
;
lp
.
name
=
"testLayer"
;
Mat
input
({
1
,
inChannels
,
inSize
.
height
,
inSize
.
width
},
CV_32F
);
test
(
lp
,
input
);
int
sz
[]
=
{
1
,
inChannels
,
inSize
.
height
,
inSize
.
width
};
Mat
input
(
4
,
&
sz
[
0
],
CV_32F
);
test
(
lp
,
input
,
backendId
,
targetId
);
}
INSTANTIATE_TEST_CASE_P
(
Layer_Test_Halide
,
FullyConnected
,
Combine
(
/*in channels*/
Values
(
3
,
4
),
/*in size*/
Values
(
Size
(
5
,
4
),
Size
(
4
,
5
),
Size
(
1
,
1
)),
/*out channels*/
Values
(
3
,
4
),
/*has bias*/
Bool
()
/*has bias*/
Bool
(),
dnnBackendsAndTargetsWithHalide
()
));
////////////////////////////////////////////////////////////////////////////////
// SoftMax
////////////////////////////////////////////////////////////////////////////////
typedef
TestWithParam
<
tuple
<
int
>
>
SoftMax
;
typedef
TestWithParam
<
tuple
<
int
,
tuple
<
DNNBackend
,
DNNTarget
>
>
>
SoftMax
;
TEST_P
(
SoftMax
,
Accuracy
)
{
int
inChannels
=
get
<
0
>
(
GetParam
());
int
backendId
=
get
<
0
>
(
get
<
1
>
(
GetParam
()));
int
targetId
=
get
<
1
>
(
get
<
1
>
(
GetParam
()));
LayerParams
lp
;
lp
.
type
=
"SoftMax"
;
lp
.
name
=
"testLayer"
;
Mat
input
({
1
,
inChannels
,
1
,
1
},
CV_32F
);
test
(
lp
,
input
);
int
sz
[]
=
{
1
,
inChannels
,
1
,
1
};
Mat
input
(
4
,
&
sz
[
0
],
CV_32F
);
test
(
lp
,
input
,
backendId
,
targetId
);
}
INSTANTIATE_TEST_CASE_P
(
Layer_Test_Halide
,
SoftMax
,
Values
(
3
,
4
,
5
,
1024
));
INSTANTIATE_TEST_CASE_P
(
Layer_Test_Halide
,
SoftMax
,
Combine
(
Values
(
3
,
4
,
5
,
1024
),
dnnBackendsAndTargetsWithHalide
()
));
//////////////////////////////////////////////////////////////////////////////
// Max pooling - unpooling
//////////////////////////////////////////////////////////////////////////////
TEST
(
MaxPoolUnpool_Halide
,
Accuracy
)
TEST
_P
(
Test_Halide_layers
,
MaxPoolUnpool
)
{
if
(
backend
==
DNN_BACKEND_INFERENCE_ENGINE
)
throw
SkipTestException
(
""
);
LayerParams
pool
;
pool
.
set
(
"pool"
,
"max"
);
pool
.
set
(
"kernel_w"
,
2
);
...
...
@@ -366,16 +444,9 @@ TEST(MaxPoolUnpool_Halide, Accuracy)
net
.
connect
(
poolId
,
0
,
unpoolId
,
0
);
net
.
connect
(
poolId
,
1
,
unpoolId
,
1
);
Mat
input
({
1
,
1
,
4
,
4
},
CV_32F
);
randu
(
input
,
-
1.0
f
,
1.0
f
);
net
.
setInput
(
input
);
net
.
setPreferableBackend
(
DNN_BACKEND_OPENCV
);
Mat
outputDefault
=
net
.
forward
(
"testUnpool"
).
clone
();
net
.
setPreferableBackend
(
DNN_BACKEND_HALIDE
);
net
.
setInput
(
input
);
Mat
outputHalide
=
net
.
forward
(
"testUnpool"
).
clone
();
normAssert
(
outputDefault
,
outputHalide
);
int
sz
[]
=
{
1
,
1
,
4
,
4
};
Mat
input
(
4
,
&
sz
[
0
],
CV_32F
);
test
(
input
,
net
,
backend
,
target
);
}
////////////////////////////////////////////////////////////////////////////////
...
...
@@ -383,7 +454,7 @@ TEST(MaxPoolUnpool_Halide, Accuracy)
////////////////////////////////////////////////////////////////////////////////
static
const
int
kNumChannels
=
3
;
void
testInPlaceActivation
(
LayerParams
&
lp
)
void
testInPlaceActivation
(
LayerParams
&
lp
,
int
backendId
,
int
targetId
)
{
EXPECT_FALSE
(
lp
.
name
.
empty
());
...
...
@@ -400,24 +471,19 @@ void testInPlaceActivation(LayerParams& lp)
net
.
connect
(
0
,
0
,
poolId
,
0
);
net
.
addLayerToPrev
(
lp
.
name
,
lp
.
type
,
lp
);
Mat
input
({
1
,
kNumChannels
,
10
,
10
},
CV_32F
);
randu
(
input
,
-
1.0
f
,
1.0
f
);
net
.
setInput
(
input
);
net
.
setPreferableBackend
(
DNN_BACKEND_OPENCV
);
Mat
outputDefault
=
net
.
forward
(
lp
.
name
).
clone
();
net
.
setInput
(
input
);
net
.
setPreferableBackend
(
DNN_BACKEND_HALIDE
);
Mat
outputHalide
=
net
.
forward
(
lp
.
name
).
clone
();
normAssert
(
outputDefault
,
outputHalide
);
int
sz
[]
=
{
1
,
kNumChannels
,
10
,
10
};
Mat
input
(
4
,
&
sz
[
0
],
CV_32F
);
test
(
input
,
net
,
backendId
,
targetId
);
}
typedef
TestWithParam
<
tuple
<
bool
,
bool
,
float
>
>
BatchNorm
;
typedef
TestWithParam
<
tuple
<
bool
,
bool
,
float
,
tuple
<
DNNBackend
,
DNNTarget
>
>
>
BatchNorm
;
TEST_P
(
BatchNorm
,
Accuracy
)
{
bool
hasWeights
=
get
<
0
>
(
GetParam
());
bool
hasBias
=
get
<
1
>
(
GetParam
());
float
epsilon
=
get
<
2
>
(
GetParam
());
int
backendId
=
get
<
0
>
(
get
<
3
>
(
GetParam
()));
int
targetId
=
get
<
1
>
(
get
<
3
>
(
GetParam
()));
LayerParams
lp
;
lp
.
set
(
"has_weight"
,
hasWeights
);
...
...
@@ -428,56 +494,66 @@ TEST_P(BatchNorm, Accuracy)
lp
.
blobs
.
reserve
(
4
);
for
(
int
i
=
0
;
i
<
3
;
++
i
)
lp
.
blobs
.
push_back
(
Mat
(
{
kNumChannels
}
,
CV_32F
));
lp
.
blobs
.
push_back
(
Mat
(
1
,
kNumChannels
,
CV_32F
));
if
(
hasBias
||
hasWeights
)
lp
.
blobs
.
push_back
(
Mat
(
{
kNumChannels
}
,
CV_32F
));
lp
.
blobs
.
push_back
(
Mat
(
1
,
kNumChannels
,
CV_32F
));
for
(
Mat
&
m
:
lp
.
blobs
)
randu
(
m
,
0.0
f
,
1.0
f
);
for
(
int
i
=
0
;
i
<
lp
.
blobs
.
size
();
++
i
)
randu
(
lp
.
blobs
[
i
]
,
0.0
f
,
1.0
f
);
testInPlaceActivation
(
lp
);
testInPlaceActivation
(
lp
,
backendId
,
targetId
);
}
INSTANTIATE_TEST_CASE_P
(
Layer_Test_Halide
,
BatchNorm
,
Combine
(
/*has weights*/
Bool
(),
/*has bias*/
Bool
(),
/*epsilon*/
Values
(
1e-3
f
,
1e-5
f
)
/*epsilon*/
Values
(
1e-3
f
,
1e-5
f
),
dnnBackendsAndTargetsWithHalide
()
));
typedef
TestWithParam
<
tuple
<
float
>
>
ReLU
;
typedef
TestWithParam
<
tuple
<
float
,
tuple
<
DNNBackend
,
DNNTarget
>
>
>
ReLU
;
TEST_P
(
ReLU
,
Accuracy
)
{
float
negativeSlope
=
get
<
0
>
(
GetParam
());
int
backendId
=
get
<
0
>
(
get
<
1
>
(
GetParam
()));
int
targetId
=
get
<
1
>
(
get
<
1
>
(
GetParam
()));
LayerParams
lp
;
lp
.
set
(
"negative_slope"
,
negativeSlope
);
lp
.
type
=
"ReLU"
;
lp
.
name
=
"testLayer"
;
testInPlaceActivation
(
lp
);
testInPlaceActivation
(
lp
,
backendId
,
targetId
);
}
INSTANTIATE_TEST_CASE_P
(
Layer_Test_Halide
,
ReLU
,
Values
(
/*negative slope*/
2.0
f
,
0.3
f
,
-
0.1
f
,
0.0
f
INSTANTIATE_TEST_CASE_P
(
Layer_Test_Halide
,
ReLU
,
Combine
(
/*negative slope*/
Values
(
2.0
f
,
0.3
f
,
-
0.1
f
,
0.0
f
),
dnnBackendsAndTargetsWithHalide
()
));
typedef
TestWithParam
<
tuple
<
std
::
string
>
>
NoParamActivation
;
typedef
TestWithParam
<
tuple
<
std
::
string
,
tuple
<
DNNBackend
,
DNNTarget
>
>
>
NoParamActivation
;
TEST_P
(
NoParamActivation
,
Accuracy
)
{
int
backendId
=
get
<
0
>
(
get
<
1
>
(
GetParam
()));
int
targetId
=
get
<
1
>
(
get
<
1
>
(
GetParam
()));
LayerParams
lp
;
lp
.
type
=
get
<
0
>
(
GetParam
());
lp
.
name
=
"testLayer"
;
testInPlaceActivation
(
lp
);
testInPlaceActivation
(
lp
,
backendId
,
targetId
);
}
INSTANTIATE_TEST_CASE_P
(
Layer_Test_Halide
,
NoParamActivation
,
Values
(
/*type*/
"TanH"
,
"Sigmoid"
,
"AbsVal"
,
"BNLL"
INSTANTIATE_TEST_CASE_P
(
Layer_Test_Halide
,
NoParamActivation
,
Combine
(
/*type*/
Values
(
"TanH"
,
"Sigmoid"
,
"AbsVal"
,
"BNLL"
),
dnnBackendsAndTargetsWithHalide
()
));
typedef
TestWithParam
<
tuple
<
Vec3f
>
>
Power
;
typedef
TestWithParam
<
tuple
<
Vec3f
,
tuple
<
DNNBackend
,
DNNTarget
>
>
>
Power
;
TEST_P
(
Power
,
Accuracy
)
{
float
power
=
get
<
0
>
(
GetParam
())[
0
];
float
scale
=
get
<
0
>
(
GetParam
())[
1
];
float
shift
=
get
<
0
>
(
GetParam
())[
2
];
int
backendId
=
get
<
0
>
(
get
<
1
>
(
GetParam
()));
int
targetId
=
get
<
1
>
(
get
<
1
>
(
GetParam
()));
LayerParams
lp
;
lp
.
set
(
"power"
,
power
);
...
...
@@ -485,46 +561,52 @@ TEST_P(Power, Accuracy)
lp
.
set
(
"shift"
,
shift
);
lp
.
type
=
"Power"
;
lp
.
name
=
"testLayer"
;
testInPlaceActivation
(
lp
);
testInPlaceActivation
(
lp
,
backendId
,
targetId
);
}
INSTANTIATE_TEST_CASE_P
(
Layer_Test_Halide
,
Power
,
INSTANTIATE_TEST_CASE_P
(
Layer_Test_Halide
,
Power
,
Combine
(
/*power, scale, shift*/
Values
(
Vec3f
(
0.9
f
,
1.0
f
,
1.1
f
),
Vec3f
(
0.9
f
,
1.1
f
,
1.0
f
),
Vec3f
(
1.0
f
,
0.9
f
,
1.1
f
),
Vec3f
(
1.0
f
,
1.1
f
,
0.9
f
),
Vec3f
(
1.1
f
,
0.9
f
,
1.0
f
),
Vec3f
(
1.1
f
,
1.0
f
,
0.9
f
))
);
Vec3f
(
1.1
f
,
0.9
f
,
1.0
f
),
Vec3f
(
1.1
f
,
1.0
f
,
0.9
f
)),
dnnBackendsAndTargetsWithHalide
()
));
TEST
(
ChannelsPReLU
,
Accuracy
)
TEST
_P
(
Test_Halide_layers
,
ChannelsPReLU
)
{
LayerParams
lp
;
lp
.
type
=
"ChannelsPReLU"
;
lp
.
name
=
"testLayer"
;
lp
.
blobs
.
push_back
(
Mat
(
{
kNumChannels
}
,
CV_32F
));
lp
.
blobs
.
push_back
(
Mat
(
1
,
kNumChannels
,
CV_32F
));
randu
(
lp
.
blobs
[
0
],
-
1.0
f
,
1.0
f
);
testInPlaceActivation
(
lp
);
testInPlaceActivation
(
lp
,
backend
,
target
);
}
typedef
TestWithParam
<
tuple
<
bool
>
>
Scale
;
typedef
TestWithParam
<
tuple
<
bool
,
tuple
<
DNNBackend
,
DNNTarget
>
>
>
Scale
;
TEST_P
(
Scale
,
Accuracy
)
{
bool
hasBias
=
get
<
0
>
(
GetParam
());
int
backendId
=
get
<
0
>
(
get
<
1
>
(
GetParam
()));
int
targetId
=
get
<
1
>
(
get
<
1
>
(
GetParam
()));
LayerParams
lp
;
lp
.
set
(
"bias_term"
,
hasBias
);
lp
.
type
=
"Scale"
;
lp
.
name
=
"testLayer"
;
lp
.
blobs
.
push_back
(
Mat
(
{
kNumChannels
}
,
CV_32F
));
lp
.
blobs
.
push_back
(
Mat
(
1
,
kNumChannels
,
CV_32F
));
randu
(
lp
.
blobs
[
0
],
-
1.0
f
,
1.0
f
);
if
(
hasBias
)
{
lp
.
blobs
.
push_back
(
Mat
(
{
kNumChannels
}
,
CV_32F
));
lp
.
blobs
.
push_back
(
Mat
(
1
,
kNumChannels
,
CV_32F
));
randu
(
lp
.
blobs
[
1
],
-
1.0
f
,
1.0
f
);
}
testInPlaceActivation
(
lp
);
testInPlaceActivation
(
lp
,
backendId
,
targetId
);
}
INSTANTIATE_TEST_CASE_P
(
Layer_Test_Halide
,
Scale
,
Values
(
true
,
false
));
INSTANTIATE_TEST_CASE_P
(
Layer_Test_Halide
,
Scale
,
Combine
(
Bool
(),
dnnBackendsAndTargetsWithHalide
()
));
////////////////////////////////////////////////////////////////////////////////
// Concat layer
...
...
@@ -534,11 +616,13 @@ INSTANTIATE_TEST_CASE_P(Layer_Test_Halide, Scale, Values(true, false));
// `--- conv ----^ ^ ^
// `---- ... ------' '
// `-----------------'
typedef
TestWithParam
<
tuple
<
Vec3i
,
Vec3i
>
>
Concat
;
typedef
TestWithParam
<
tuple
<
Vec3i
,
Vec3i
,
tuple
<
DNNBackend
,
DNNTarget
>
>
>
Concat
;
TEST_P
(
Concat
,
Accuracy
)
{
Vec3i
inSize
=
get
<
0
>
(
GetParam
());
Vec3i
numChannels
=
get
<
1
>
(
GetParam
());
int
backendId
=
get
<
0
>
(
get
<
2
>
(
GetParam
()));
int
targetId
=
get
<
1
>
(
get
<
2
>
(
GetParam
()));
Net
net
;
...
...
@@ -549,7 +633,8 @@ TEST_P(Concat, Accuracy)
if
(
!
numChannels
[
i
])
break
;
Mat
weights
({
numChannels
[
i
],
inSize
[
0
],
1
,
1
},
CV_32F
);
int
sz
[]
=
{
numChannels
[
i
],
inSize
[
0
],
1
,
1
};
Mat
weights
(
4
,
&
sz
[
0
],
CV_32F
);
randu
(
weights
,
-
1.0
f
,
1.0
f
);
LayerParams
convParam
;
...
...
@@ -578,21 +663,15 @@ TEST_P(Concat, Accuracy)
net
.
connect
(
convLayerIds
[
i
],
0
,
concatId
,
i
+
1
);
}
Mat
input
({
1
,
inSize
[
0
],
inSize
[
1
],
inSize
[
2
]},
CV_32F
);
randu
(
input
,
-
1.0
f
,
1.0
f
);
net
.
setInput
(
input
);
net
.
setPreferableBackend
(
DNN_BACKEND_OPENCV
);
Mat
outputDefault
=
net
.
forward
(
concatParam
.
name
).
clone
();
net
.
setPreferableBackend
(
DNN_BACKEND_HALIDE
);
Mat
outputHalide
=
net
.
forward
(
concatParam
.
name
).
clone
();
normAssert
(
outputDefault
,
outputHalide
);
int
sz
[]
=
{
1
,
inSize
[
0
],
inSize
[
1
],
inSize
[
2
]};
Mat
input
(
4
,
&
sz
[
0
],
CV_32F
);
test
(
input
,
net
,
backendId
,
targetId
);
}
INSTANTIATE_TEST_CASE_P
(
Layer_Test_Halide
,
Concat
,
Combine
(
/*input size*/
Values
(
Vec3i
(
1
,
4
,
5
),
Vec3i
(
2
,
8
,
6
)),
/*channels*/
Values
(
Vec3i
(
2
,
0
,
0
),
Vec3i
(
3
,
4
,
0
),
Vec3i
(
1
,
6
,
2
))
/*channels*/
Values
(
Vec3i
(
2
,
0
,
0
),
Vec3i
(
3
,
4
,
0
),
Vec3i
(
1
,
6
,
2
)),
dnnBackendsAndTargetsWithHalide
()
));
////////////////////////////////////////////////////////////////////////////////
...
...
@@ -603,20 +682,27 @@ INSTANTIATE_TEST_CASE_P(Layer_Test_Halide, Concat, Combine(
// `--- conv ----^ ^ ^
// `---- ... ------' '
// `-----------------'
typedef
TestWithParam
<
tuple
<
Vec3i
,
std
::
string
,
int
,
bool
>
>
Eltwise
;
typedef
TestWithParam
<
tuple
<
Vec3i
,
std
::
string
,
int
,
bool
,
tuple
<
DNNBackend
,
DNNTarget
>
>
>
Eltwise
;
TEST_P
(
Eltwise
,
Accuracy
)
{
Vec3i
inSize
=
get
<
0
>
(
GetParam
());
std
::
string
op
=
get
<
1
>
(
GetParam
());
int
numConv
=
get
<
2
>
(
GetParam
());
bool
weighted
=
get
<
3
>
(
GetParam
());
int
backendId
=
get
<
0
>
(
get
<
4
>
(
GetParam
()));
int
targetId
=
get
<
1
>
(
get
<
4
>
(
GetParam
()));
if
(
backendId
==
DNN_BACKEND_OPENCV
&&
(
targetId
==
DNN_TARGET_OPENCL
||
targetId
==
DNN_TARGET_OPENCL_FP16
))
throw
SkipTestException
(
""
);
Net
net
;
std
::
vector
<
int
>
convLayerIds
(
numConv
);
for
(
int
i
=
0
;
i
<
numConv
;
++
i
)
{
Mat
weights
({
inSize
[
0
],
inSize
[
0
],
1
,
1
},
CV_32F
);
int
sz
[]
=
{
inSize
[
0
],
inSize
[
0
],
1
,
1
};
Mat
weights
(
4
,
&
sz
[
0
],
CV_32F
);
randu
(
weights
,
-
1.0
f
,
1.0
f
);
LayerParams
convParam
;
...
...
@@ -655,28 +741,23 @@ TEST_P(Eltwise, Accuracy)
net
.
connect
(
convLayerIds
[
i
],
0
,
eltwiseId
,
i
+
1
);
}
Mat
input
({
1
,
inSize
[
0
],
inSize
[
1
],
inSize
[
2
]},
CV_32F
);
randu
(
input
,
-
1.0
f
,
1.0
f
);
net
.
setInput
(
input
);
net
.
setPreferableBackend
(
DNN_BACKEND_OPENCV
);
Mat
outputDefault
=
net
.
forward
(
eltwiseParam
.
name
).
clone
();
net
.
setPreferableBackend
(
DNN_BACKEND_HALIDE
);
Mat
outputHalide
=
net
.
forward
(
eltwiseParam
.
name
).
clone
();
normAssert
(
outputDefault
,
outputHalide
);
int
sz
[]
=
{
1
,
inSize
[
0
],
inSize
[
1
],
inSize
[
2
]};
Mat
input
(
4
,
&
sz
[
0
],
CV_32F
);
test
(
input
,
net
,
backendId
,
targetId
);
}
INSTANTIATE_TEST_CASE_P
(
Layer_Test_Halide
,
Eltwise
,
Combine
(
/*input size*/
Values
(
Vec3i
(
1
,
4
,
5
),
Vec3i
(
2
,
8
,
6
)),
/*operation*/
Values
(
"prod"
,
"sum"
,
"max"
),
/*num convs*/
Values
(
1
,
2
,
3
),
/*weighted(for sum only)*/
Bool
()
/*weighted(for sum only)*/
Bool
(),
dnnBackendsAndTargetsWithHalide
()
));
////////////////////////////////////////////////////////////////////////////
// Mixed backends
////////////////////////////////////////////////////////////////////////////
#ifdef HAVE_HALIDE
TEST
(
MixedBackends_Halide_Default_Halide
,
Accuracy
)
{
// Just a layer that supports Halide backend.
...
...
@@ -700,7 +781,8 @@ TEST(MixedBackends_Halide_Default_Halide, Accuracy)
net
.
addLayerToPrev
(
mvn
.
name
,
mvn
.
type
,
mvn
);
net
.
addLayerToPrev
(
lrn2
.
name
,
lrn2
.
type
,
lrn2
);
Mat
input
({
4
,
3
,
5
,
6
},
CV_32F
);
int
sz
[]
=
{
4
,
3
,
5
,
6
};
Mat
input
(
4
,
&
sz
[
0
],
CV_32F
);
randu
(
input
,
-
1.0
f
,
1.0
f
);
net
.
setInput
(
input
);
net
.
setPreferableBackend
(
DNN_BACKEND_OPENCV
);
...
...
@@ -718,4 +800,6 @@ TEST(MixedBackends_Halide_Default_Halide, Accuracy)
}
#endif // HAVE_HALIDE
INSTANTIATE_TEST_CASE_P
(
/*nothing*/
,
Test_Halide_layers
,
dnnBackendsAndTargetsWithHalide
());
}}
// namespace
modules/dnn/test/test_layers.cpp
View file @
523b6f32
...
...
@@ -92,75 +92,84 @@ void runLayer(Ptr<Layer> layer, std::vector<Mat> &inpBlobs, std::vector<Mat> &ou
outBlobs
[
i
]
=
outp
[
i
];
}
void
testLayerUsingCaffeModels
(
String
basename
,
int
targetId
=
DNN_TARGET_CPU
,
bool
useCaffeModel
=
false
,
bool
useCommonInputBlob
=
true
)
class
Test_Caffe_layers
:
public
DNNTestLayer
{
String
prototxt
=
_tf
(
basename
+
".prototxt"
);
String
caffemodel
=
_tf
(
basename
+
".caffemodel"
);
public
:
void
testLayerUsingCaffeModels
(
const
String
&
basename
,
bool
useCaffeModel
=
false
,
bool
useCommonInputBlob
=
true
,
double
l1
=
0.0
,
double
lInf
=
0.0
)
{
String
prototxt
=
_tf
(
basename
+
".prototxt"
);
String
caffemodel
=
_tf
(
basename
+
".caffemodel"
);
String
inpfile
=
(
useCommonInputBlob
)
?
_tf
(
"blob.npy"
)
:
_tf
(
basename
+
".input.npy"
);
String
outfile
=
_tf
(
basename
+
".npy"
);
String
inpfile
=
(
useCommonInputBlob
)
?
_tf
(
"blob.npy"
)
:
_tf
(
basename
+
".input.npy"
);
String
outfile
=
_tf
(
basename
+
".npy"
);
Net
net
=
readNetFromCaffe
(
prototxt
,
(
useCaffeModel
)
?
caffemodel
:
String
());
ASSERT_FALSE
(
net
.
empty
());
Mat
inp
=
blobFromNPY
(
inpfile
);
Mat
ref
=
blobFromNPY
(
outfile
);
checkBackend
(
&
inp
,
&
ref
);
net
.
setPreferableBackend
(
DNN_BACKEND_OPENCV
);
net
.
setPreferableTarget
(
targetId
);
Net
net
=
readNetFromCaffe
(
prototxt
,
(
useCaffeModel
)
?
caffemodel
:
String
()
);
ASSERT_FALSE
(
net
.
empty
()
);
Mat
inp
=
blobFromNPY
(
inpfile
);
Mat
ref
=
blobFromNPY
(
outfile
);
net
.
setPreferableBackend
(
backend
);
net
.
setPreferableTarget
(
target
);
net
.
setInput
(
inp
,
"input"
);
Mat
out
=
net
.
forward
(
"output"
);
net
.
setInput
(
inp
,
"input"
);
Mat
out
=
net
.
forward
(
"output"
);
normAssert
(
ref
,
out
);
}
normAssert
(
ref
,
out
,
""
,
l1
?
l1
:
default_l1
,
lInf
?
lInf
:
default_lInf
);
}
};
typedef
testing
::
TestWithParam
<
DNNTarget
>
Test_Caffe_layers
;
TEST_P
(
Test_Caffe_layers
,
Softmax
)
{
testLayerUsingCaffeModels
(
"layer_softmax"
,
GetParam
()
);
testLayerUsingCaffeModels
(
"layer_softmax"
);
}
TEST_P
(
Test_Caffe_layers
,
LRN_spatial
)
{
testLayerUsingCaffeModels
(
"layer_lrn_spatial"
,
GetParam
());
if
(
backend
==
DNN_BACKEND_INFERENCE_ENGINE
&&
target
==
DNN_TARGET_MYRIAD
)
throw
SkipTestException
(
""
);
testLayerUsingCaffeModels
(
"layer_lrn_spatial"
);
}
TEST_P
(
Test_Caffe_layers
,
LRN_channels
)
{
testLayerUsingCaffeModels
(
"layer_lrn_channels"
,
GetParam
()
);
testLayerUsingCaffeModels
(
"layer_lrn_channels"
);
}
TEST_P
(
Test_Caffe_layers
,
Convolution
)
{
testLayerUsingCaffeModels
(
"layer_convolution"
,
GetParam
(),
true
);
testLayerUsingCaffeModels
(
"layer_convolution"
,
true
);
}
TEST_P
(
Test_Caffe_layers
,
DeConvolution
)
{
testLayerUsingCaffeModels
(
"layer_deconvolution"
,
GetParam
(),
true
,
false
);
testLayerUsingCaffeModels
(
"layer_deconvolution"
,
true
,
false
);
}
TEST_P
(
Test_Caffe_layers
,
InnerProduct
)
{
testLayerUsingCaffeModels
(
"layer_inner_product"
,
GetParam
(),
true
);
if
(
backend
==
DNN_BACKEND_INFERENCE_ENGINE
||
(
backend
==
DNN_BACKEND_OPENCV
&&
target
==
DNN_TARGET_OPENCL_FP16
))
throw
SkipTestException
(
""
);
testLayerUsingCaffeModels
(
"layer_inner_product"
,
true
);
}
TEST_P
(
Test_Caffe_layers
,
Pooling_max
)
{
testLayerUsingCaffeModels
(
"layer_pooling_max"
,
GetParam
()
);
testLayerUsingCaffeModels
(
"layer_pooling_max"
);
}
TEST_P
(
Test_Caffe_layers
,
Pooling_ave
)
{
testLayerUsingCaffeModels
(
"layer_pooling_ave"
,
GetParam
()
);
testLayerUsingCaffeModels
(
"layer_pooling_ave"
);
}
TEST_P
(
Test_Caffe_layers
,
MVN
)
{
testLayerUsingCaffeModels
(
"layer_mvn"
,
GetParam
()
);
testLayerUsingCaffeModels
(
"layer_mvn"
);
}
void
testReshape
(
const
MatShape
&
inputShape
,
const
MatShape
&
targetShape
,
...
...
@@ -210,33 +219,38 @@ TEST(Layer_Test_Reshape, Accuracy)
}
}
TEST
(
Layer_Test_BatchNorm
,
Accuracy
)
{
testLayerUsingCaffeModels
(
"layer_batch_norm"
,
DNN_TARGET_CPU
,
true
);
}
TEST
(
Layer_Test_BatchNorm
,
local_stats
)
TEST_P
(
Test_Caffe_layers
,
BatchNorm
)
{
testLayerUsingCaffeModels
(
"layer_batch_norm_local_stats"
,
DNN_TARGET_CPU
,
true
,
false
);
if
(
backend
==
DNN_BACKEND_INFERENCE_ENGINE
)
throw
SkipTestException
(
""
);
testLayerUsingCaffeModels
(
"layer_batch_norm"
,
true
);
testLayerUsingCaffeModels
(
"layer_batch_norm_local_stats"
,
true
,
false
);
}
TEST_P
(
Test_Caffe_layers
,
ReLU
)
{
testLayerUsingCaffeModels
(
"layer_relu"
,
GetParam
()
);
testLayerUsingCaffeModels
(
"layer_relu"
);
}
TEST
(
Layer_Test_Dropout
,
Accuracy
)
TEST
_P
(
Test_Caffe_layers
,
Dropout
)
{
testLayerUsingCaffeModels
(
"layer_dropout"
);
}
TEST_P
(
Test_Caffe_layers
,
Concat
)
{
testLayerUsingCaffeModels
(
"layer_concat"
,
GetParam
());
testLayerUsingCaffeModels
(
"layer_concat"
);
testLayerUsingCaffeModels
(
"layer_concat_optim"
,
true
,
false
);
testLayerUsingCaffeModels
(
"layer_concat_shared_input"
,
true
,
false
);
}
TEST
(
Layer_Test_Fused_Concat
,
Accuracy
)
TEST
_P
(
Test_Caffe_layers
,
Fused_Concat
)
{
if
((
backend
==
DNN_BACKEND_INFERENCE_ENGINE
&&
target
==
DNN_TARGET_CPU
)
||
(
backend
==
DNN_BACKEND_INFERENCE_ENGINE
&&
target
==
DNN_TARGET_OPENCL
))
throw
SkipTestException
(
""
);
checkBackend
();
// Test case
// input
// |
...
...
@@ -267,28 +281,32 @@ TEST(Layer_Test_Fused_Concat, Accuracy)
randu
(
input
,
0.0
f
,
1.0
f
);
// [0, 1] to make AbsVal an identity transformation.
net
.
setInput
(
input
);
net
.
setPreferableBackend
(
DNN_BACKEND_OPENCV
);
net
.
setPreferableBackend
(
backend
);
net
.
setPreferableTarget
(
target
);
Mat
out
=
net
.
forward
();
normAssert
(
slice
(
out
,
Range
::
all
(),
Range
(
0
,
2
),
Range
::
all
(),
Range
::
all
()),
input
);
normAssert
(
slice
(
out
,
Range
::
all
(),
Range
(
2
,
4
),
Range
::
all
(),
Range
::
all
()),
input
);
//
testLayerUsingCaffeModels
(
"layer_concat_optim"
,
DNN_TARGET_CPU
,
true
,
false
);
testLayerUsingCaffeModels
(
"layer_concat_shared_input"
,
DNN_TARGET_CPU
,
true
,
false
);
normAssert
(
slice
(
out
,
Range
::
all
(),
Range
(
0
,
2
),
Range
::
all
(),
Range
::
all
()),
input
,
""
,
default_l1
,
default_lInf
);
normAssert
(
slice
(
out
,
Range
::
all
(),
Range
(
2
,
4
),
Range
::
all
(),
Range
::
all
()),
input
,
""
,
default_l1
,
default_lInf
);
}
TEST_P
(
Test_Caffe_layers
,
Eltwise
)
{
testLayerUsingCaffeModels
(
"layer_eltwise"
,
GetParam
());
if
(
backend
==
DNN_BACKEND_INFERENCE_ENGINE
)
throw
SkipTestException
(
""
);
testLayerUsingCaffeModels
(
"layer_eltwise"
);
}
TEST_P
(
Test_Caffe_layers
,
PReLU
)
{
int
targetId
=
GetParam
();
testLayerUsingCaffeModels
(
"layer_prelu"
,
targetId
,
true
);
testLayerUsingCaffeModels
(
"layer_prelu_fc"
,
targetId
,
true
,
false
);
testLayerUsingCaffeModels
(
"layer_prelu"
,
true
);
}
// TODO: fix an unstable test case
TEST_P
(
Test_Caffe_layers
,
layer_prelu_fc
)
{
if
(
backend
==
DNN_BACKEND_OPENCV
&&
target
==
DNN_TARGET_OPENCL_FP16
)
throw
SkipTestException
(
""
);
testLayerUsingCaffeModels
(
"layer_prelu_fc"
,
true
,
false
);
}
//template<typename XMat>
...
...
@@ -311,13 +329,16 @@ TEST_P(Test_Caffe_layers, PReLU)
// );
//}
static
void
test_Reshape_Split_Slice_layers
(
int
targetId
)
TEST_P
(
Test_Caffe_layers
,
Reshape_Split_Slice
)
{
if
(
backend
==
DNN_BACKEND_INFERENCE_ENGINE
)
throw
SkipTestException
(
""
);
Net
net
=
readNetFromCaffe
(
_tf
(
"reshape_and_slice_routines.prototxt"
));
ASSERT_FALSE
(
net
.
empty
());
net
.
setPreferableBackend
(
DNN_BACKEND_OPENCV
);
net
.
setPreferableTarget
(
target
Id
);
net
.
setPreferableBackend
(
backend
);
net
.
setPreferableTarget
(
target
);
Mat
input
(
6
,
12
,
CV_32F
);
RNG
rng
(
0
);
...
...
@@ -326,15 +347,10 @@ static void test_Reshape_Split_Slice_layers(int targetId)
net
.
setInput
(
input
,
"input"
);
Mat
output
=
net
.
forward
(
"output"
);
normAssert
(
input
,
output
);
normAssert
(
input
,
output
,
""
,
default_l1
,
default_lInf
);
}
TEST_P
(
Test_Caffe_layers
,
Reshape_Split_Slice
)
{
test_Reshape_Split_Slice_layers
(
GetParam
());
}
TEST
(
Layer_Conv_Elu
,
Accuracy
)
TEST_P
(
Test_Caffe_layers
,
Conv_Elu
)
{
Net
net
=
readNetFromTensorflow
(
_tf
(
"layer_elu_model.pb"
));
ASSERT_FALSE
(
net
.
empty
());
...
...
@@ -343,10 +359,11 @@ TEST(Layer_Conv_Elu, Accuracy)
Mat
ref
=
blobFromNPY
(
_tf
(
"layer_elu_out.npy"
));
net
.
setInput
(
inp
,
"input"
);
net
.
setPreferableBackend
(
DNN_BACKEND_OPENCV
);
net
.
setPreferableBackend
(
backend
);
net
.
setPreferableTarget
(
target
);
Mat
out
=
net
.
forward
();
normAssert
(
ref
,
out
);
normAssert
(
ref
,
out
,
""
,
default_l1
,
default_lInf
);
}
class
Layer_LSTM_Test
:
public
::
testing
::
Test
...
...
@@ -496,37 +513,6 @@ TEST_F(Layer_RNN_Test, get_set_test)
EXPECT_EQ
(
shape
(
outputs
[
1
]),
shape
(
nT
,
nS
,
nH
));
}
void
testLayerUsingDarknetModels
(
String
basename
,
bool
useDarknetModel
=
false
,
bool
useCommonInputBlob
=
true
)
{
String
cfg
=
_tf
(
basename
+
".cfg"
);
String
weights
=
_tf
(
basename
+
".weights"
);
String
inpfile
=
(
useCommonInputBlob
)
?
_tf
(
"blob.npy"
)
:
_tf
(
basename
+
".input.npy"
);
String
outfile
=
_tf
(
basename
+
".npy"
);
Net
net
=
readNetFromDarknet
(
cfg
,
(
useDarknetModel
)
?
weights
:
String
());
ASSERT_FALSE
(
net
.
empty
());
Mat
inp
=
blobFromNPY
(
inpfile
);
Mat
ref
=
blobFromNPY
(
outfile
);
net
.
setInput
(
inp
,
"data"
);
net
.
setPreferableBackend
(
DNN_BACKEND_OPENCV
);
Mat
out
=
net
.
forward
();
normAssert
(
ref
,
out
);
}
TEST
(
Layer_Test_Region
,
Accuracy
)
{
testLayerUsingDarknetModels
(
"region"
,
false
,
false
);
}
TEST
(
Layer_Test_Reorg
,
Accuracy
)
{
testLayerUsingDarknetModels
(
"reorg"
,
false
,
false
);
}
TEST
(
Layer_Test_ROIPooling
,
Accuracy
)
{
Net
net
=
readNetFromCaffe
(
_tf
(
"net_roi_pooling.prototxt"
));
...
...
@@ -546,8 +532,10 @@ TEST(Layer_Test_ROIPooling, Accuracy)
TEST_P
(
Test_Caffe_layers
,
FasterRCNN_Proposal
)
{
if
((
backend
==
DNN_BACKEND_OPENCV
&&
target
==
DNN_TARGET_OPENCL_FP16
)
||
backend
==
DNN_BACKEND_INFERENCE_ENGINE
)
throw
SkipTestException
(
""
);
Net
net
=
readNetFromCaffe
(
_tf
(
"net_faster_rcnn_proposal.prototxt"
));
net
.
setPreferableTarget
(
GetParam
());
Mat
scores
=
blobFromNPY
(
_tf
(
"net_faster_rcnn_proposal.scores.npy"
));
Mat
deltas
=
blobFromNPY
(
_tf
(
"net_faster_rcnn_proposal.deltas.npy"
));
...
...
@@ -558,7 +546,8 @@ TEST_P(Test_Caffe_layers, FasterRCNN_Proposal)
net
.
setInput
(
imInfo
,
"im_info"
);
std
::
vector
<
Mat
>
outs
;
net
.
setPreferableBackend
(
DNN_BACKEND_OPENCV
);
net
.
setPreferableBackend
(
backend
);
net
.
setPreferableTarget
(
target
);
net
.
forward
(
outs
,
"output"
);
for
(
int
i
=
0
;
i
<
2
;
++
i
)
...
...
@@ -573,7 +562,6 @@ TEST_P(Test_Caffe_layers, FasterRCNN_Proposal)
EXPECT_EQ
(
countNonZero
(
outs
[
i
].
rowRange
(
numDets
,
outs
[
i
].
size
[
0
])),
0
);
}
}
INSTANTIATE_TEST_CASE_P
(
/**/
,
Test_Caffe_layers
,
availableDnnTargets
());
typedef
testing
::
TestWithParam
<
tuple
<
Vec4i
,
Vec2i
,
bool
>
>
Scale_untrainable
;
TEST_P
(
Scale_untrainable
,
Accuracy
)
...
...
@@ -739,8 +727,10 @@ INSTANTIATE_TEST_CASE_P(Layer_Test, Crop, Combine(
// Check that by default average pooling layer should not count zero padded values
// into the normalization area.
TEST
(
Layer_Test_Average_pooling_kernel_area
,
Accuracy
)
TEST
_P
(
Test_Caffe_layers
,
Average_pooling_kernel_area
)
{
if
(
backend
==
DNN_BACKEND_INFERENCE_ENGINE
&&
target
==
DNN_TARGET_MYRIAD
)
throw
SkipTestException
(
""
);
LayerParams
lp
;
lp
.
name
=
"testAvePool"
;
lp
.
type
=
"Pooling"
;
...
...
@@ -755,17 +745,21 @@ TEST(Layer_Test_Average_pooling_kernel_area, Accuracy)
// ----+--
// 7 8 | 9
Mat
inp
=
(
Mat_
<
float
>
(
3
,
3
)
<<
1
,
2
,
3
,
4
,
5
,
6
,
7
,
8
,
9
);
Mat
target
=
(
Mat_
<
float
>
(
2
,
2
)
<<
(
1
+
2
+
4
+
5
)
/
4.
f
,
(
3
+
6
)
/
2.
f
,
(
7
+
8
)
/
2.
f
,
9
);
Mat
ref
=
(
Mat_
<
float
>
(
2
,
2
)
<<
(
1
+
2
+
4
+
5
)
/
4.
f
,
(
3
+
6
)
/
2.
f
,
(
7
+
8
)
/
2.
f
,
9
);
Mat
tmp
=
blobFromImage
(
inp
);
net
.
setInput
(
blobFromImage
(
inp
));
net
.
setPreferableBackend
(
DNN_BACKEND_OPENCV
);
net
.
setPreferableBackend
(
backend
);
net
.
setPreferableTarget
(
target
);
Mat
out
=
net
.
forward
();
normAssert
(
out
,
blobFromImage
(
target
));
normAssert
(
out
,
blobFromImage
(
ref
));
}
// Test PriorBoxLayer in case of no aspect ratios (just squared proposals).
TEST
(
Layer_PriorBox
,
squares
)
TEST
_P
(
Test_Caffe_layers
,
PriorBox_
squares
)
{
if
(
backend
==
DNN_BACKEND_INFERENCE_ENGINE
||
(
backend
==
DNN_BACKEND_OPENCV
&&
(
target
==
DNN_TARGET_OPENCL
||
target
==
DNN_TARGET_OPENCL_FP16
)))
throw
SkipTestException
(
""
);
LayerParams
lp
;
lp
.
name
=
"testPriorBox"
;
lp
.
type
=
"PriorBox"
;
...
...
@@ -783,14 +777,15 @@ TEST(Layer_PriorBox, squares)
Mat
inp
(
1
,
2
,
CV_32F
);
randu
(
inp
,
-
1
,
1
);
net
.
setInput
(
blobFromImage
(
inp
));
net
.
setPreferableBackend
(
DNN_BACKEND_OPENCV
);
net
.
setPreferableBackend
(
backend
);
net
.
setPreferableTarget
(
target
);
Mat
out
=
net
.
forward
();
Mat
target
=
(
Mat_
<
float
>
(
4
,
4
)
<<
0.0
,
0.0
,
0.75
,
1.0
,
Mat
ref
=
(
Mat_
<
float
>
(
4
,
4
)
<<
0.0
,
0.0
,
0.75
,
1.0
,
0.25
,
0.0
,
1.0
,
1.0
,
0.1
f
,
0.1
f
,
0.2
f
,
0.2
f
,
0.1
f
,
0.1
f
,
0.2
f
,
0.2
f
);
normAssert
(
out
.
reshape
(
1
,
4
),
target
);
normAssert
(
out
.
reshape
(
1
,
4
),
ref
);
}
typedef
TestWithParam
<
tuple
<
int
,
int
>
>
Layer_Test_DWconv_Prelu
;
...
...
@@ -1056,19 +1051,19 @@ TEST(Test_DLDT, multiple_networks)
#endif // HAVE_INF_ENGINE
// Test a custom layer.
class
InterpLayer
CV_FINAL
:
public
Layer
class
Custom
InterpLayer
CV_FINAL
:
public
Layer
{
public
:
InterpLayer
(
const
LayerParams
&
params
)
:
Layer
(
params
)
Custom
InterpLayer
(
const
LayerParams
&
params
)
:
Layer
(
params
)
{
zoomFactor
=
params
.
get
<
int
>
(
"zoom_factor"
,
0
);
outWidth
=
params
.
get
<
int
>
(
"width"
,
0
);
outHeight
=
params
.
get
<
int
>
(
"height"
,
0
);
}
static
Ptr
<
Interp
Layer
>
create
(
LayerParams
&
params
)
static
Ptr
<
Layer
>
create
(
LayerParams
&
params
)
{
return
Ptr
<
InterpLayer
>
(
new
InterpLayer
(
params
));
return
Ptr
<
Layer
>
(
new
Custom
InterpLayer
(
params
));
}
virtual
bool
getMemoryShapes
(
const
std
::
vector
<
std
::
vector
<
int
>
>
&
inputs
,
...
...
@@ -1142,24 +1137,41 @@ public:
}
}
virtual
void
forward
(
InputArrayOfArrays
,
OutputArrayOfArrays
,
OutputArrayOfArrays
)
CV_OVERRIDE
{}
void
forward
(
InputArrayOfArrays
inputs
,
OutputArrayOfArrays
outputs
,
OutputArrayOfArrays
internals
)
CV_OVERRIDE
{
CV_TRACE_FUNCTION
();
CV_TRACE_ARG_VALUE
(
name
,
"name"
,
name
.
c_str
());
Layer
::
forward_fallback
(
inputs
,
outputs
,
internals
);
}
private
:
int
outWidth
,
outHeight
,
zoomFactor
;
};
TEST
(
Layer_Test_Interp_custom
,
Accuracy
)
TEST
_P
(
Test_Caffe_layers
,
Interp
)
{
CV_DNN_REGISTER_LAYER_CLASS
(
Interp
,
InterpLayer
);
testLayerUsingCaffeModels
(
"layer_interp"
,
DNN_TARGET_CPU
,
false
,
false
);
if
(
backend
==
DNN_BACKEND_INFERENCE_ENGINE
&&
target
==
DNN_TARGET_MYRIAD
)
throw
SkipTestException
(
""
);
// Test a cusom layer.
CV_DNN_REGISTER_LAYER_CLASS
(
Interp
,
CustomInterpLayer
);
try
{
testLayerUsingCaffeModels
(
"layer_interp"
,
false
,
false
);
}
catch
(...)
{
LayerFactory
::
unregisterLayer
(
"Interp"
);
throw
;
}
LayerFactory
::
unregisterLayer
(
"Interp"
);
}
TEST
(
Layer_Test_Interp
,
Accuracy
)
{
testLayerUsingCaffeModels
(
"layer_interp"
,
DNN_TARGET_CPU
,
false
,
false
);
// Test an implemented layer.
testLayerUsingCaffeModels
(
"layer_interp"
,
false
,
false
);
}
INSTANTIATE_TEST_CASE_P
(
/*nothing*/
,
Test_Caffe_layers
,
dnnBackendsAndTargets
());
TEST
(
Layer_Test_PoolingIndices
,
Accuracy
)
{
Net
net
;
...
...
modules/dnn/test/test_precomp.hpp
View file @
523b6f32
...
...
@@ -69,6 +69,93 @@ static testing::internal::ParamGenerator<DNNTarget> availableDnnTargets()
return
testing
::
ValuesIn
(
targets
);
}
static
testing
::
internal
::
ParamGenerator
<
tuple
<
DNNBackend
,
DNNTarget
>
>
dnnBackendsAndTargets
()
{
static
const
tuple
<
DNNBackend
,
DNNTarget
>
testCases
[]
=
{
#ifdef HAVE_INF_ENGINE
tuple
<
DNNBackend
,
DNNTarget
>
(
DNN_BACKEND_INFERENCE_ENGINE
,
DNN_TARGET_CPU
),
tuple
<
DNNBackend
,
DNNTarget
>
(
DNN_BACKEND_INFERENCE_ENGINE
,
DNN_TARGET_OPENCL
),
tuple
<
DNNBackend
,
DNNTarget
>
(
DNN_BACKEND_INFERENCE_ENGINE
,
DNN_TARGET_OPENCL_FP16
),
tuple
<
DNNBackend
,
DNNTarget
>
(
DNN_BACKEND_INFERENCE_ENGINE
,
DNN_TARGET_MYRIAD
),
#endif
tuple
<
DNNBackend
,
DNNTarget
>
(
DNN_BACKEND_OPENCV
,
DNN_TARGET_CPU
),
tuple
<
DNNBackend
,
DNNTarget
>
(
DNN_BACKEND_OPENCV
,
DNN_TARGET_OPENCL
),
tuple
<
DNNBackend
,
DNNTarget
>
(
DNN_BACKEND_OPENCV
,
DNN_TARGET_OPENCL_FP16
)
};
return
testing
::
ValuesIn
(
testCases
);
}
class
DNNTestLayer
:
public
TestWithParam
<
tuple
<
DNNBackend
,
DNNTarget
>
>
{
public
:
dnn
::
Backend
backend
;
dnn
::
Target
target
;
double
default_l1
,
default_lInf
;
DNNTestLayer
()
{
backend
=
(
dnn
::
Backend
)(
int
)
get
<
0
>
(
GetParam
());
target
=
(
dnn
::
Target
)(
int
)
get
<
1
>
(
GetParam
());
getDefaultThresholds
(
backend
,
target
,
&
default_l1
,
&
default_lInf
);
}
static
void
getDefaultThresholds
(
int
backend
,
int
target
,
double
*
l1
,
double
*
lInf
)
{
if
(
target
==
DNN_TARGET_OPENCL_FP16
||
target
==
DNN_TARGET_MYRIAD
)
{
*
l1
=
4e-3
;
*
lInf
=
2e-2
;
}
else
{
*
l1
=
1e-5
;
*
lInf
=
1e-4
;
}
}
static
void
checkBackend
(
int
backend
,
int
target
,
Mat
*
inp
=
0
,
Mat
*
ref
=
0
)
{
if
(
backend
==
DNN_BACKEND_OPENCV
&&
(
target
==
DNN_TARGET_OPENCL
||
target
==
DNN_TARGET_OPENCL_FP16
))
{
#ifdef HAVE_OPENCL
if
(
!
cv
::
ocl
::
useOpenCL
())
#endif
{
throw
SkipTestException
(
"OpenCL is not available/disabled in OpenCV"
);
}
}
if
(
backend
==
DNN_BACKEND_INFERENCE_ENGINE
&&
target
==
DNN_TARGET_MYRIAD
)
{
if
(
!
checkMyriadTarget
())
{
throw
SkipTestException
(
"Myriad is not available/disabled in OpenCV"
);
}
if
(
inp
&&
ref
&&
inp
->
size
[
0
]
!=
1
)
{
// Myriad plugin supports only batch size 1. Slice a single sample.
if
(
inp
->
size
[
0
]
==
ref
->
size
[
0
])
{
std
::
vector
<
cv
::
Range
>
range
(
inp
->
dims
,
Range
::
all
());
range
[
0
]
=
Range
(
0
,
1
);
*
inp
=
inp
->
operator
()(
range
);
range
=
std
::
vector
<
cv
::
Range
>
(
ref
->
dims
,
Range
::
all
());
range
[
0
]
=
Range
(
0
,
1
);
*
ref
=
ref
->
operator
()(
range
);
}
else
throw
SkipTestException
(
"Myriad plugin supports only batch size 1"
);
}
}
}
protected
:
void
checkBackend
(
Mat
*
inp
=
0
,
Mat
*
ref
=
0
)
{
checkBackend
(
backend
,
target
,
inp
,
ref
);
}
};
}}
#endif
modules/dnn/test/test_tf_importer.cpp
View file @
523b6f32
...
...
@@ -78,141 +78,170 @@ static std::string path(const std::string& file)
return
findDataFile
(
"dnn/tensorflow/"
+
file
,
false
);
}
static
void
runTensorFlowNet
(
const
std
::
string
&
prefix
,
int
targetId
=
DNN_TARGET_CPU
,
bool
hasText
=
false
,
double
l1
=
1e-5
,
double
lInf
=
1e-4
,
bool
memoryLoad
=
false
)
class
Test_TensorFlow_layers
:
public
DNNTestLayer
{
std
::
string
netPath
=
path
(
prefix
+
"_net.pb"
);
std
::
string
netConfig
=
(
hasText
?
path
(
prefix
+
"_net.pbtxt"
)
:
""
);
std
::
string
inpPath
=
path
(
prefix
+
"_in.npy"
);
std
::
string
outPath
=
path
(
prefix
+
"_out.npy"
);
Net
net
;
if
(
memoryLoad
)
public
:
void
runTensorFlowNet
(
const
std
::
string
&
prefix
,
bool
hasText
=
false
,
double
l1
=
0.0
,
double
lInf
=
0.0
,
bool
memoryLoad
=
false
)
{
// Load files into a memory buffers
string
dataModel
;
ASSERT_TRUE
(
readFileInMemory
(
netPath
,
dataModel
));
std
::
string
netPath
=
path
(
prefix
+
"_net.pb"
);
std
::
string
netConfig
=
(
hasText
?
path
(
prefix
+
"_net.pbtxt"
)
:
""
);
std
::
string
inpPath
=
path
(
prefix
+
"_in.npy"
);
std
::
string
outPath
=
path
(
prefix
+
"_out.npy"
);
cv
::
Mat
input
=
blobFromNPY
(
inpPath
);
cv
::
Mat
ref
=
blobFromNPY
(
outPath
);
checkBackend
(
&
input
,
&
ref
);
Net
net
;
if
(
memoryLoad
)
{
// Load files into a memory buffers
string
dataModel
;
ASSERT_TRUE
(
readFileInMemory
(
netPath
,
dataModel
));
string
dataConfig
;
if
(
hasText
)
ASSERT_TRUE
(
readFileInMemory
(
netConfig
,
dataConfig
));
net
=
readNetFromTensorflow
(
dataModel
.
c_str
(),
dataModel
.
size
(),
dataConfig
.
c_str
(),
dataConfig
.
size
());
}
else
net
=
readNetFromTensorflow
(
netPath
,
netConfig
);
string
dataConfig
;
if
(
hasText
)
ASSERT_TRUE
(
readFileInMemory
(
netConfig
,
dataConfig
));
ASSERT_FALSE
(
net
.
empty
());
net
=
readNetFromTensorflow
(
dataModel
.
c_str
(),
dataModel
.
size
(),
dataConfig
.
c_str
(),
dataConfig
.
size
());
net
.
setPreferableBackend
(
backend
);
net
.
setPreferableTarget
(
target
);
net
.
setInput
(
input
);
cv
::
Mat
output
=
net
.
forward
();
normAssert
(
ref
,
output
,
""
,
l1
?
l1
:
default_l1
,
lInf
?
lInf
:
default_lInf
);
}
else
net
=
readNetFromTensorflow
(
netPath
,
netConfig
);
ASSERT_FALSE
(
net
.
empty
());
net
.
setPreferableBackend
(
DNN_BACKEND_OPENCV
);
net
.
setPreferableTarget
(
targetId
);
cv
::
Mat
input
=
blobFromNPY
(
inpPath
);
cv
::
Mat
target
=
blobFromNPY
(
outPath
);
net
.
setInput
(
input
);
cv
::
Mat
output
=
net
.
forward
();
normAssert
(
target
,
output
,
""
,
l1
,
lInf
);
}
typedef
testing
::
TestWithParam
<
DNNTarget
>
Test_TensorFlow_layers
;
};
TEST_P
(
Test_TensorFlow_layers
,
conv
)
{
int
targetId
=
GetParam
();
runTensorFlowNet
(
"single_conv"
,
targetId
);
runTensorFlowNet
(
"atrous_conv2d_valid"
,
targetId
);
runTensorFlowNet
(
"atrous_conv2d_same"
,
targetId
);
runTensorFlowNet
(
"depthwise_conv2d"
,
targetId
);
runTensorFlowNet
(
"keras_atrous_conv2d_same"
,
targetId
);
runTensorFlowNet
(
"conv_pool_nchw"
,
targetId
);
runTensorFlowNet
(
"single_conv"
);
runTensorFlowNet
(
"atrous_conv2d_valid"
);
runTensorFlowNet
(
"atrous_conv2d_same"
);
runTensorFlowNet
(
"depthwise_conv2d"
);
runTensorFlowNet
(
"keras_atrous_conv2d_same"
);
runTensorFlowNet
(
"conv_pool_nchw"
);
}
TEST_P
(
Test_TensorFlow_layers
,
padding
)
{
int
targetId
=
GetParam
();
runTensorFlowNet
(
"padding_same"
,
targetId
);
runTensorFlowNet
(
"padding_valid"
,
targetId
);
runTensorFlowNet
(
"spatial_padding"
,
targetId
);
runTensorFlowNet
(
"padding_same"
);
runTensorFlowNet
(
"padding_valid"
);
runTensorFlowNet
(
"spatial_padding"
);
}
TEST_P
(
Test_TensorFlow_layers
,
eltwise_add_mul
)
{
runTensorFlowNet
(
"eltwise_add_mul"
,
GetParam
());
runTensorFlowNet
(
"eltwise_add_mul"
);
}
TEST_P
(
Test_TensorFlow_layers
,
pad_and_concat
)
{
if
(
backend
==
DNN_BACKEND_INFERENCE_ENGINE
&&
target
==
DNN_TARGET_MYRIAD
)
throw
SkipTestException
(
""
);
runTensorFlowNet
(
"pad_and_concat"
);
}
TEST_P
(
Test_TensorFlow_layers
,
concat
)
TEST_P
(
Test_TensorFlow_layers
,
concat
_axis_1
)
{
runTensorFlowNet
(
"pad_and_concat"
,
GetParam
());
runTensorFlowNet
(
"concat_axis_1"
,
GetParam
());
runTensorFlowNet
(
"concat_axis_1"
);
}
TEST_P
(
Test_TensorFlow_layers
,
batch_norm
)
{
int
targetId
=
GetParam
();
runTensorFlowNet
(
"batch_norm"
,
targetId
);
runTensorFlowNet
(
"fused_batch_norm"
,
targetId
);
runTensorFlowNet
(
"batch_norm_text"
,
targetId
,
true
);
runTensorFlowNet
(
"mvn_batch_norm"
,
targetId
);
runTensorFlowNet
(
"mvn_batch_norm_1x1"
,
targetId
);
runTensorFlowNet
(
"unfused_batch_norm"
,
targetId
);
runTensorFlowNet
(
"fused_batch_norm_no_gamma"
,
targetId
);
runTensorFlowNet
(
"unfused_batch_norm_no_gamma"
,
targetId
);
runTensorFlowNet
(
"batch_norm"
);
runTensorFlowNet
(
"batch_norm"
,
false
,
0.0
,
0.0
,
true
);
runTensorFlowNet
(
"fused_batch_norm"
);
runTensorFlowNet
(
"fused_batch_norm"
,
false
,
0.0
,
0.0
,
true
);
runTensorFlowNet
(
"batch_norm_text"
,
true
);
runTensorFlowNet
(
"batch_norm_text"
,
true
,
0.0
,
0.0
,
true
);
runTensorFlowNet
(
"unfused_batch_norm"
);
runTensorFlowNet
(
"fused_batch_norm_no_gamma"
);
runTensorFlowNet
(
"unfused_batch_norm_no_gamma"
);
}
TEST_P
(
Test_TensorFlow_layers
,
mvn_batch_norm
)
{
if
(
backend
==
DNN_BACKEND_INFERENCE_ENGINE
)
throw
SkipTestException
(
""
);
runTensorFlowNet
(
"mvn_batch_norm"
);
runTensorFlowNet
(
"mvn_batch_norm_1x1"
);
}
TEST_P
(
Test_TensorFlow_layers
,
pooling
)
{
int
targetId
=
GetParam
();
cv
::
ocl
::
Device
d
=
cv
::
ocl
::
Device
::
getDefault
();
bool
loosenFlag
=
targetId
==
DNN_TARGET_OPENCL
&&
d
.
isIntel
()
&&
d
.
type
()
==
cv
::
ocl
::
Device
::
TYPE_CPU
;
runTensorFlowNet
(
"max_pool_even"
,
targetId
);
runTensorFlowNet
(
"max_pool_odd_valid"
,
targetId
);
runTensorFlowNet
(
"ave_pool_same"
,
targetId
);
runTensorFlowNet
(
"max_pool_odd_same"
,
targetId
,
false
,
loosenFlag
?
3e-5
:
1e-5
,
loosenFlag
?
3e-4
:
1e-4
);
runTensorFlowNet
(
"reduce_mean"
,
targetId
);
// an average pooling over all spatial dimensions.
runTensorFlowNet
(
"max_pool_even"
);
runTensorFlowNet
(
"max_pool_odd_valid"
);
runTensorFlowNet
(
"max_pool_odd_same"
);
runTensorFlowNet
(
"reduce_mean"
);
// an average pooling over all spatial dimensions.
}
// TODO: fix tests and replace to pooling
TEST_P
(
Test_TensorFlow_layers
,
ave_pool_same
)
{
if
(
backend
==
DNN_BACKEND_INFERENCE_ENGINE
&&
target
==
DNN_TARGET_MYRIAD
)
throw
SkipTestException
(
""
);
runTensorFlowNet
(
"ave_pool_same"
);
}
TEST_P
(
Test_TensorFlow_layers
,
deconvolution
)
{
int
targetId
=
GetParam
();
runTensorFlowNet
(
"deconvolution"
,
targetId
);
runTensorFlowNet
(
"deconvolution_same"
,
targetId
);
runTensorFlowNet
(
"deconvolution_stride_2_same"
,
targetId
);
runTensorFlowNet
(
"deconvolution_adj_pad_valid"
,
targetId
);
runTensorFlowNet
(
"deconvolution_adj_pad_same"
,
targetId
);
runTensorFlowNet
(
"keras_deconv_valid"
,
targetId
);
runTensorFlowNet
(
"keras_deconv_same"
,
targetId
);
runTensorFlowNet
(
"deconvolution"
);
runTensorFlowNet
(
"deconvolution_same"
);
runTensorFlowNet
(
"deconvolution_stride_2_same"
);
runTensorFlowNet
(
"deconvolution_adj_pad_valid"
);
runTensorFlowNet
(
"deconvolution_adj_pad_same"
);
runTensorFlowNet
(
"keras_deconv_valid"
);
runTensorFlowNet
(
"keras_deconv_same"
);
}
TEST_P
(
Test_TensorFlow_layers
,
matmul
)
{
int
targetId
=
GetParam
();
runTensorFlowNet
(
"matmul"
,
targetId
);
runTensorFlowNet
(
"nhwc_reshape_matmul"
,
targetId
);
runTensorFlowNet
(
"nhwc_transpose_reshape_matmul"
,
targetId
);
if
(
backend
==
DNN_BACKEND_OPENCV
&&
target
==
DNN_TARGET_OPENCL_FP16
)
throw
SkipTestException
(
""
);
runTensorFlowNet
(
"matmul"
);
runTensorFlowNet
(
"nhwc_reshape_matmul"
);
runTensorFlowNet
(
"nhwc_transpose_reshape_matmul"
);
}
TEST_P
(
Test_TensorFlow_layers
,
reshape
)
{
int
targetId
=
GetParam
();
runTensorFlowNet
(
"shift_reshape_no_reorder"
,
targetId
);
runTensorFlowNet
(
"reshape_no_reorder"
,
targetId
);
runTensorFlowNet
(
"reshape_reduce"
,
targetId
);
runTensorFlowNet
(
"flatten"
,
targetId
,
true
);
runTensorFlowNet
(
"unfused_flatten"
,
targetId
);
runTensorFlowNet
(
"unfused_flatten_unknown_batch"
,
targetId
);
if
(
backend
==
DNN_BACKEND_INFERENCE_ENGINE
)
throw
SkipTestException
(
""
);
runTensorFlowNet
(
"shift_reshape_no_reorder"
);
runTensorFlowNet
(
"reshape_no_reorder"
);
runTensorFlowNet
(
"reshape_reduce"
);
}
TEST_P
(
Test_TensorFlow_layers
,
flatten
)
{
if
(
backend
==
DNN_BACKEND_INFERENCE_ENGINE
&&
(
target
==
DNN_TARGET_OPENCL
||
target
==
DNN_TARGET_OPENCL_FP16
))
throw
SkipTestException
(
""
);
runTensorFlowNet
(
"flatten"
,
true
);
runTensorFlowNet
(
"unfused_flatten"
);
runTensorFlowNet
(
"unfused_flatten_unknown_batch"
);
}
TEST_P
(
Test_TensorFlow_layers
,
l2_normalize
)
{
int
targetId
=
GetParam
();
runTensorFlowNet
(
"l2_normalize"
,
targetId
);
runTensorFlowNet
(
"l2_normalize_3d"
,
targetId
);
runTensorFlowNet
(
"l2_normalize"
);
}
INSTANTIATE_TEST_CASE_P
(
/**/
,
Test_TensorFlow_layers
,
availableDnnTargets
());
// TODO: fix it and add to l2_normalize
TEST_P
(
Test_TensorFlow_layers
,
l2_normalize_3d
)
{
if
(
backend
==
DNN_BACKEND_INFERENCE_ENGINE
&&
target
==
DNN_TARGET_MYRIAD
)
throw
SkipTestException
(
""
);
runTensorFlowNet
(
"l2_normalize_3d"
);
}
typedef
testing
::
TestWithParam
<
DNNTarget
>
Test_TensorFlow_nets
;
...
...
@@ -359,91 +388,96 @@ TEST_P(Test_TensorFlow_nets, EAST_text_detection)
INSTANTIATE_TEST_CASE_P
(
/**/
,
Test_TensorFlow_nets
,
availableDnnTargets
());
typedef
testing
::
TestWithParam
<
DNNTarget
>
Test_TensorFlow_fp16
;
TEST_P
(
Test_TensorFlow_fp16
,
tests
)
TEST_P
(
Test_TensorFlow_layers
,
fp16_weights
)
{
int
targetId
=
GetParam
();
const
float
l1
=
7e-4
;
const
float
lInf
=
1e-2
;
runTensorFlowNet
(
"fp16_single_conv"
,
targetId
,
false
,
l1
,
lInf
);
runTensorFlowNet
(
"fp16_deconvolution"
,
targetId
,
false
,
l1
,
lInf
);
runTensorFlowNet
(
"fp16_max_pool_odd_same"
,
targetId
,
false
,
l1
,
lInf
);
runTensorFlowNet
(
"fp16_padding_valid"
,
targetId
,
false
,
l1
,
lInf
);
runTensorFlowNet
(
"fp16_eltwise_add_mul"
,
targetId
,
false
,
l1
,
lInf
);
runTensorFlowNet
(
"fp16_max_pool_odd_valid"
,
targetId
,
false
,
l1
,
lInf
);
runTensorFlowNet
(
"fp16_pad_and_concat"
,
targetId
,
false
,
l1
,
lInf
);
runTensorFlowNet
(
"fp16_max_pool_even"
,
targetId
,
false
,
l1
,
lInf
);
runTensorFlowNet
(
"fp16_padding_same"
,
targetId
,
false
,
l1
,
lInf
);
const
float
l1
=
0.00071
;
const
float
lInf
=
0.012
;
runTensorFlowNet
(
"fp16_single_conv"
,
false
,
l1
,
lInf
);
runTensorFlowNet
(
"fp16_deconvolution"
,
false
,
l1
,
lInf
);
runTensorFlowNet
(
"fp16_max_pool_odd_same"
,
false
,
l1
,
lInf
);
runTensorFlowNet
(
"fp16_padding_valid"
,
false
,
l1
,
lInf
);
runTensorFlowNet
(
"fp16_eltwise_add_mul"
,
false
,
l1
,
lInf
);
runTensorFlowNet
(
"fp16_max_pool_odd_valid"
,
false
,
l1
,
lInf
);
runTensorFlowNet
(
"fp16_max_pool_even"
,
false
,
l1
,
lInf
);
runTensorFlowNet
(
"fp16_padding_same"
,
false
,
l1
,
lInf
);
}
INSTANTIATE_TEST_CASE_P
(
/**/
,
Test_TensorFlow_fp16
,
Values
(
DNN_TARGET_CPU
,
DNN_TARGET_OPENCL
,
DNN_TARGET_OPENCL_FP16
));
// TODO: fix pad_and_concat and add this test case to fp16_weights
TEST_P
(
Test_TensorFlow_layers
,
fp16_pad_and_concat
)
{
const
float
l1
=
0.00071
;
const
float
lInf
=
0.012
;
if
(
backend
==
DNN_BACKEND_INFERENCE_ENGINE
&&
target
==
DNN_TARGET_MYRIAD
)
throw
SkipTestException
(
""
);
runTensorFlowNet
(
"fp16_pad_and_concat"
,
false
,
l1
,
lInf
);
}
TEST
(
Test_TensorFlow
,
defun
)
TEST
_P
(
Test_TensorFlow_layers
,
defun
)
{
runTensorFlowNet
(
"defun_dropout"
);
}
TEST
(
Test_TensorFlow
,
quantized
)
TEST
_P
(
Test_TensorFlow_layers
,
quantized
)
{
runTensorFlowNet
(
"uint8_single_conv"
);
}
TEST
(
Test_TensorFlow
,
lstm
)
TEST
_P
(
Test_TensorFlow_layers
,
lstm
)
{
runTensorFlowNet
(
"lstm"
,
DNN_TARGET_CPU
,
true
);
if
(
backend
==
DNN_BACKEND_INFERENCE_ENGINE
||
(
backend
==
DNN_BACKEND_OPENCV
&&
target
==
DNN_TARGET_OPENCL_FP16
))
throw
SkipTestException
(
""
);
runTensorFlowNet
(
"lstm"
,
true
);
runTensorFlowNet
(
"lstm"
,
true
,
0.0
,
0.0
,
true
);
}
TEST
(
Test_TensorFlow
,
split
)
TEST
_P
(
Test_TensorFlow_layers
,
split
)
{
if
(
backend
==
DNN_BACKEND_INFERENCE_ENGINE
)
throw
SkipTestException
(
""
);
runTensorFlowNet
(
"split_equals"
);
}
TEST
(
Test_TensorFlow
,
resize_nearest_neighbor
)
TEST
_P
(
Test_TensorFlow_layers
,
resize_nearest_neighbor
)
{
if
(
backend
==
DNN_BACKEND_INFERENCE_ENGINE
&&
target
!=
DNN_TARGET_MYRIAD
)
throw
SkipTestException
(
""
);
runTensorFlowNet
(
"resize_nearest_neighbor"
);
runTensorFlowNet
(
"keras_upsampling2d"
);
}
TEST
(
Test_TensorFlow
,
slice
)
TEST
_P
(
Test_TensorFlow_layers
,
slice
)
{
if
(
backend
==
DNN_BACKEND_INFERENCE_ENGINE
&&
(
target
==
DNN_TARGET_OPENCL
||
target
==
DNN_TARGET_OPENCL_FP16
))
throw
SkipTestException
(
""
);
runTensorFlowNet
(
"slice_4d"
);
}
TEST
(
Test_TensorFlow
,
softmax
)
TEST
_P
(
Test_TensorFlow_layers
,
softmax
)
{
runTensorFlowNet
(
"keras_softmax"
);
}
TEST
(
Test_TensorFlow
,
relu6
)
TEST
_P
(
Test_TensorFlow_layers
,
relu6
)
{
runTensorFlowNet
(
"keras_relu6"
);
runTensorFlowNet
(
"keras_relu6"
,
DNN_TARGET_CPU
,
/*hasText*/
true
);
runTensorFlowNet
(
"keras_relu6"
,
/*hasText*/
true
);
}
TEST
(
Test_TensorFlow
,
keras_mobilenet_head
)
TEST
_P
(
Test_TensorFlow_layers
,
keras_mobilenet_head
)
{
runTensorFlowNet
(
"keras_mobilenet_head"
);
}
TEST
(
Test_TensorFlow
,
memory_read
)
{
double
l1
=
1e-5
;
double
lInf
=
1e-4
;
runTensorFlowNet
(
"lstm"
,
DNN_TARGET_CPU
,
true
,
l1
,
lInf
,
true
);
runTensorFlowNet
(
"batch_norm"
,
DNN_TARGET_CPU
,
false
,
l1
,
lInf
,
true
);
runTensorFlowNet
(
"fused_batch_norm"
,
DNN_TARGET_CPU
,
false
,
l1
,
lInf
,
true
);
runTensorFlowNet
(
"batch_norm_text"
,
DNN_TARGET_CPU
,
true
,
l1
,
lInf
,
true
);
}
TEST
(
Test_TensorFlow
,
resize_bilinear
)
TEST_P
(
Test_TensorFlow_layers
,
resize_bilinear
)
{
runTensorFlowNet
(
"resize_bilinear"
);
runTensorFlowNet
(
"resize_bilinear_factor"
);
}
INSTANTIATE_TEST_CASE_P
(
/**/
,
Test_TensorFlow_layers
,
dnnBackendsAndTargets
());
TEST
(
Test_TensorFlow
,
two_inputs
)
{
Net
net
=
readNet
(
path
(
"two_inputs_net.pbtxt"
));
...
...
modules/dnn/test/test_torch_importer.cpp
View file @
523b6f32
...
...
@@ -296,7 +296,6 @@ TEST_P(Test_Torch_nets, FastNeuralStyle_accuracy)
Mat
inputBlob
=
blobFromImage
(
img
,
1.0
,
Size
(),
Scalar
(
103.939
,
116.779
,
123.68
),
false
);
net
.
setInput
(
inputBlob
);
net
.
setPreferableBackend
(
DNN_BACKEND_OPENCV
);
Mat
out
=
net
.
forward
();
// Deprocessing.
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment