Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in / Register
Toggle navigation
O
opencv
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
submodule
opencv
Commits
e1c32375
Commit
e1c32375
authored
Mar 05, 2018
by
Dmitry Kurtaev
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Parametric OpenCL deep learning tests
parent
667f5b65
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
270 additions
and
480 deletions
+270
-480
softmax_layer.cpp
modules/dnn/src/layers/softmax_layer.cpp
+12
-12
test_caffe_importer.cpp
modules/dnn/test/test_caffe_importer.cpp
+65
-120
test_tf_importer.cpp
modules/dnn/test/test_tf_importer.cpp
+111
-165
test_torch_importer.cpp
modules/dnn/test/test_torch_importer.cpp
+82
-183
No files found.
modules/dnn/src/layers/softmax_layer.cpp
View file @
e1c32375
...
...
@@ -93,6 +93,18 @@ public:
}
#ifdef HAVE_OPENCL
virtual
void
finalize
(
const
std
::
vector
<
Mat
*>
&
inputs
,
std
::
vector
<
Mat
>
&
outputs
)
{
OCL4DNNSoftmaxConfig
config
;
config
.
in_shape
=
shape
(
*
inputs
[
0
]);
config
.
axis
=
axisRaw
;
config
.
channels
=
inputs
[
0
]
->
size
[
axisRaw
];
config
.
logsoftmax
=
logSoftMax
;
softmaxOp
=
Ptr
<
OCL4DNNSoftmax
<
float
>
>
(
new
OCL4DNNSoftmax
<
float
>
(
config
));
}
bool
forward_ocl
(
InputArrayOfArrays
inps
,
OutputArrayOfArrays
outs
,
OutputArrayOfArrays
itns
)
{
std
::
vector
<
UMat
>
inputs
;
...
...
@@ -103,18 +115,6 @@ public:
outs
.
getUMatVector
(
outputs
);
itns
.
getUMatVector
(
internals
);
if
(
softmaxOp
.
empty
())
{
OCL4DNNSoftmaxConfig
config
;
config
.
in_shape
=
shape
(
inputs
[
0
]);
config
.
axis
=
axisRaw
;
config
.
channels
=
inputs
[
0
].
size
[
axisRaw
];
config
.
logsoftmax
=
logSoftMax
;
softmaxOp
=
Ptr
<
OCL4DNNSoftmax
<
float
>
>
(
new
OCL4DNNSoftmax
<
float
>
(
config
));
}
UMat
&
src
=
inputs
[
0
];
UMat
&
dstMat
=
outputs
[
0
];
...
...
modules/dnn/test/test_caffe_importer.cpp
View file @
e1c32375
...
...
@@ -47,6 +47,21 @@
namespace
opencv_test
{
namespace
{
CV_ENUM
(
DNNTarget
,
DNN_TARGET_CPU
,
DNN_TARGET_OPENCL
)
static
testing
::
internal
::
ParamGenerator
<
DNNTarget
>
availableBackends
()
{
static
std
::
vector
<
DNNTarget
>
targets
;
if
(
targets
.
empty
())
{
targets
.
push_back
(
DNN_TARGET_CPU
);
#ifdef HAVE_OPENCL
if
(
cv
::
ocl
::
useOpenCL
())
targets
.
push_back
(
DNN_TARGET_OPENCL
);
#endif
}
return
testing
::
ValuesIn
(
targets
);
}
template
<
typename
TString
>
static
std
::
string
_tf
(
TString
filename
)
{
...
...
@@ -83,44 +98,10 @@ TEST(Test_Caffe, read_googlenet)
ASSERT_FALSE
(
net
.
empty
());
}
typedef
testing
::
TestWithParam
<
bool
>
Reproducibility_AlexNet
;
typedef
testing
::
TestWithParam
<
tuple
<
bool
,
DNNTarget
>
>
Reproducibility_AlexNet
;
TEST_P
(
Reproducibility_AlexNet
,
Accuracy
)
{
bool
readFromMemory
=
GetParam
();
Net
net
;
{
const
string
proto
=
findDataFile
(
"dnn/bvlc_alexnet.prototxt"
,
false
);
const
string
model
=
findDataFile
(
"dnn/bvlc_alexnet.caffemodel"
,
false
);
if
(
readFromMemory
)
{
string
dataProto
;
ASSERT_TRUE
(
readFileInMemory
(
proto
,
dataProto
));
string
dataModel
;
ASSERT_TRUE
(
readFileInMemory
(
model
,
dataModel
));
net
=
readNetFromCaffe
(
dataProto
.
c_str
(),
dataProto
.
size
(),
dataModel
.
c_str
(),
dataModel
.
size
());
}
else
net
=
readNetFromCaffe
(
proto
,
model
);
ASSERT_FALSE
(
net
.
empty
());
}
Mat
sample
=
imread
(
_tf
(
"grace_hopper_227.png"
));
ASSERT_TRUE
(
!
sample
.
empty
());
net
.
setInput
(
blobFromImage
(
sample
,
1.0
f
,
Size
(
227
,
227
),
Scalar
(),
false
),
"data"
);
Mat
out
=
net
.
forward
(
"prob"
);
Mat
ref
=
blobFromNPY
(
_tf
(
"caffe_alexnet_prob.npy"
));
normAssert
(
ref
,
out
);
}
INSTANTIATE_TEST_CASE_P
(
Test_Caffe
,
Reproducibility_AlexNet
,
testing
::
Bool
());
typedef
testing
::
TestWithParam
<
bool
>
Reproducibility_OCL_AlexNet
;
OCL_TEST_P
(
Reproducibility_OCL_AlexNet
,
Accuracy
)
{
bool
readFromMemory
=
GetParam
();
bool
readFromMemory
=
get
<
0
>
(
GetParam
());
Net
net
;
{
const
string
proto
=
findDataFile
(
"dnn/bvlc_alexnet.prototxt"
,
false
);
...
...
@@ -140,8 +121,7 @@ OCL_TEST_P(Reproducibility_OCL_AlexNet, Accuracy)
ASSERT_FALSE
(
net
.
empty
());
}
net
.
setPreferableBackend
(
DNN_BACKEND_DEFAULT
);
net
.
setPreferableTarget
(
DNN_TARGET_OPENCL
);
net
.
setPreferableTarget
(
get
<
1
>
(
GetParam
()));
Mat
sample
=
imread
(
_tf
(
"grace_hopper_227.png"
));
ASSERT_TRUE
(
!
sample
.
empty
());
...
...
@@ -152,7 +132,7 @@ OCL_TEST_P(Reproducibility_OCL_AlexNet, Accuracy)
normAssert
(
ref
,
out
);
}
OCL_INSTANTIATE_TEST_CASE_P
(
Test_Caffe
,
Reproducibility_OCL_AlexNet
,
testing
::
Bool
(
));
INSTANTIATE_TEST_CASE_P
(
/**/
,
Reproducibility_AlexNet
,
Combine
(
testing
::
Bool
(),
availableBackends
()
));
#if !defined(_WIN32) || defined(_WIN64)
TEST
(
Reproducibility_FCN
,
Accuracy
)
...
...
@@ -207,43 +187,14 @@ TEST(Reproducibility_SSD, Accuracy)
normAssert
(
ref
,
out
);
}
TEST
(
Reproducibility_MobileNet_SSD
,
Accuracy
)
{
const
string
proto
=
findDataFile
(
"dnn/MobileNetSSD_deploy.prototxt"
,
false
);
const
string
model
=
findDataFile
(
"dnn/MobileNetSSD_deploy.caffemodel"
,
false
);
Net
net
=
readNetFromCaffe
(
proto
,
model
);
Mat
sample
=
imread
(
_tf
(
"street.png"
));
Mat
inp
=
blobFromImage
(
sample
,
1.0
f
/
127.5
,
Size
(
300
,
300
),
Scalar
(
127.5
,
127.5
,
127.5
),
false
);
net
.
setInput
(
inp
);
Mat
out
=
net
.
forward
();
Mat
ref
=
blobFromNPY
(
_tf
(
"mobilenet_ssd_caffe_out.npy"
));
normAssert
(
ref
,
out
);
// Check that detections aren't preserved.
inp
.
setTo
(
0.0
f
);
net
.
setInput
(
inp
);
out
=
net
.
forward
();
const
int
numDetections
=
out
.
size
[
2
];
ASSERT_NE
(
numDetections
,
0
);
for
(
int
i
=
0
;
i
<
numDetections
;
++
i
)
{
float
confidence
=
out
.
ptr
<
float
>
(
0
,
0
,
i
)[
2
];
ASSERT_EQ
(
confidence
,
0
);
}
}
OCL_TEST
(
Reproducibility_MobileNet_SSD
,
Accuracy
)
typedef
testing
::
TestWithParam
<
DNNTarget
>
Reproducibility_MobileNet_SSD
;
TEST_P
(
Reproducibility_MobileNet_SSD
,
Accuracy
)
{
const
string
proto
=
findDataFile
(
"dnn/MobileNetSSD_deploy.prototxt"
,
false
);
const
string
model
=
findDataFile
(
"dnn/MobileNetSSD_deploy.caffemodel"
,
false
);
Net
net
=
readNetFromCaffe
(
proto
,
model
);
net
.
setPreferableBackend
(
DNN_BACKEND_DEFAULT
);
net
.
setPreferableTarget
(
DNN_TARGET_OPENCL
);
net
.
setPreferableTarget
(
GetParam
());
Mat
sample
=
imread
(
_tf
(
"street.png"
));
...
...
@@ -258,38 +209,39 @@ OCL_TEST(Reproducibility_MobileNet_SSD, Accuracy)
inp
.
setTo
(
0.0
f
);
net
.
setInput
(
inp
);
out
=
net
.
forward
();
out
=
out
.
reshape
(
1
,
out
.
total
()
/
7
);
const
int
numDetections
=
out
.
size
[
2
]
;
const
int
numDetections
=
out
.
rows
;
ASSERT_NE
(
numDetections
,
0
);
for
(
int
i
=
0
;
i
<
numDetections
;
++
i
)
{
float
confidence
=
out
.
ptr
<
float
>
(
0
,
0
,
i
)[
2
];
float
confidence
=
out
.
ptr
<
float
>
(
i
)[
2
];
ASSERT_EQ
(
confidence
,
0
);
}
}
TEST
(
Reproducibility_ResNet50
,
Accuracy
)
{
Net
net
=
readNetFromCaffe
(
findDataFile
(
"dnn/ResNet-50-deploy.prototxt"
,
false
),
findDataFile
(
"dnn/ResNet-50-model.caffemodel"
,
false
));
Mat
input
=
blobFromImage
(
imread
(
_tf
(
"googlenet_0.png"
)),
1.0
f
,
Size
(
224
,
224
),
Scalar
(),
false
);
ASSERT_TRUE
(
!
input
.
empty
());
net
.
setInput
(
input
);
Mat
out
=
net
.
forward
();
Mat
ref
=
blobFromNPY
(
_tf
(
"resnet50_prob.npy"
));
normAssert
(
ref
,
out
);
// Check batching mode.
ref
=
ref
.
reshape
(
1
,
numDetections
);
inp
=
blobFromImages
(
std
::
vector
<
Mat
>
(
2
,
sample
),
1.0
f
/
127.5
,
Size
(
300
,
300
),
Scalar
(
127.5
,
127.5
,
127.5
),
false
);
net
.
setInput
(
inp
);
Mat
outBatch
=
net
.
forward
();
// Output blob has a shape 1x1x2Nx7 where N is a number of detection for
// a single sample in batch. The first numbers of detection vectors are batch id.
outBatch
=
outBatch
.
reshape
(
1
,
outBatch
.
total
()
/
7
);
EXPECT_EQ
(
outBatch
.
rows
,
2
*
numDetections
);
normAssert
(
outBatch
.
rowRange
(
0
,
numDetections
),
ref
);
normAssert
(
outBatch
.
rowRange
(
numDetections
,
2
*
numDetections
).
colRange
(
1
,
7
),
ref
.
colRange
(
1
,
7
));
}
INSTANTIATE_TEST_CASE_P
(
/**/
,
Reproducibility_MobileNet_SSD
,
availableBackends
());
OCL_TEST
(
Reproducibility_ResNet50
,
Accuracy
)
typedef
testing
::
TestWithParam
<
DNNTarget
>
Reproducibility_ResNet50
;
TEST_P
(
Reproducibility_ResNet50
,
Accuracy
)
{
Net
net
=
readNetFromCaffe
(
findDataFile
(
"dnn/ResNet-50-deploy.prototxt"
,
false
),
findDataFile
(
"dnn/ResNet-50-model.caffemodel"
,
false
));
net
.
setPreferableBackend
(
DNN_BACKEND_DEFAULT
);
net
.
setPreferableTarget
(
DNN_TARGET_OPENCL
);
int
targetId
=
GetParam
(
);
net
.
setPreferableTarget
(
targetId
);
Mat
input
=
blobFromImage
(
imread
(
_tf
(
"googlenet_0.png"
)),
1.0
f
,
Size
(
224
,
224
),
Scalar
(),
false
);
ASSERT_TRUE
(
!
input
.
empty
());
...
...
@@ -300,52 +252,46 @@ OCL_TEST(Reproducibility_ResNet50, Accuracy)
Mat
ref
=
blobFromNPY
(
_tf
(
"resnet50_prob.npy"
));
normAssert
(
ref
,
out
);
UMat
out_umat
;
net
.
forward
(
out_umat
);
normAssert
(
ref
,
out_umat
,
"out_umat"
);
std
::
vector
<
UMat
>
out_umats
;
net
.
forward
(
out_umats
);
normAssert
(
ref
,
out_umats
[
0
],
"out_umat_vector"
);
}
TEST
(
Reproducibility_SqueezeNet_v1_1
,
Accuracy
)
{
Net
net
=
readNetFromCaffe
(
findDataFile
(
"dnn/squeezenet_v1.1.prototxt"
,
false
),
findDataFile
(
"dnn/squeezenet_v1.1.caffemodel"
,
false
));
Mat
input
=
blobFromImage
(
imread
(
_tf
(
"googlenet_0.png"
)),
1.0
f
,
Size
(
227
,
227
),
Scalar
(),
false
);
ASSERT_TRUE
(
!
input
.
empty
());
net
.
setInput
(
input
);
Mat
out
=
net
.
forward
();
if
(
targetId
==
DNN_TARGET_OPENCL
)
{
UMat
out_umat
;
net
.
forward
(
out_umat
);
normAssert
(
ref
,
out_umat
,
"out_umat"
);
Mat
ref
=
blobFromNPY
(
_tf
(
"squeezenet_v1.1_prob.npy"
));
normAssert
(
ref
,
out
);
std
::
vector
<
UMat
>
out_umats
;
net
.
forward
(
out_umats
);
normAssert
(
ref
,
out_umats
[
0
],
"out_umat_vector"
);
}
}
INSTANTIATE_TEST_CASE_P
(
/**/
,
Reproducibility_ResNet50
,
availableBackends
());
OCL_TEST
(
Reproducibility_SqueezeNet_v1_1
,
Accuracy
)
typedef
testing
::
TestWithParam
<
DNNTarget
>
Reproducibility_SqueezeNet_v1_1
;
TEST_P
(
Reproducibility_SqueezeNet_v1_1
,
Accuracy
)
{
Net
net
=
readNetFromCaffe
(
findDataFile
(
"dnn/squeezenet_v1.1.prototxt"
,
false
),
findDataFile
(
"dnn/squeezenet_v1.1.caffemodel"
,
false
));
net
.
setPreferableBackend
(
DNN_BACKEND_DEFAULT
);
net
.
setPreferableTarget
(
DNN_TARGET_OPENCL
);
int
targetId
=
GetParam
(
);
net
.
setPreferableTarget
(
targetId
);
Mat
input
=
blobFromImage
(
imread
(
_tf
(
"googlenet_0.png"
)),
1.0
f
,
Size
(
227
,
227
),
Scalar
(),
false
);
ASSERT_TRUE
(
!
input
.
empty
());
// Firstly set a wrong input blob and run the model to receive a wrong output.
net
.
setInput
(
input
*
2.0
f
);
Mat
out
=
net
.
forward
();
// Then set a correct input blob to check CPU->GPU synchronization is working well.
Mat
out
;
if
(
targetId
==
DNN_TARGET_OPENCL
)
{
// Firstly set a wrong input blob and run the model to receive a wrong output.
// Then set a correct input blob to check CPU->GPU synchronization is working well.
net
.
setInput
(
input
*
2.0
f
);
out
=
net
.
forward
();
}
net
.
setInput
(
input
);
out
=
net
.
forward
();
Mat
ref
=
blobFromNPY
(
_tf
(
"squeezenet_v1.1_prob.npy"
));
normAssert
(
ref
,
out
);
}
INSTANTIATE_TEST_CASE_P
(
/**/
,
Reproducibility_SqueezeNet_v1_1
,
availableBackends
());
TEST
(
Reproducibility_AlexNet_fp16
,
Accuracy
)
{
...
...
@@ -456,7 +402,6 @@ TEST(Test_Caffe, multiple_inputs)
normAssert
(
out
,
first_image
+
second_image
);
}
CV_ENUM
(
DNNTarget
,
DNN_TARGET_CPU
,
DNN_TARGET_OPENCL
)
typedef
testing
::
TestWithParam
<
tuple
<
std
::
string
,
DNNTarget
>
>
opencv_face_detector
;
TEST_P
(
opencv_face_detector
,
Accuracy
)
{
...
...
modules/dnn/test/test_tf_importer.cpp
View file @
e1c32375
...
...
@@ -26,6 +26,21 @@ static std::string _tf(TString filename)
return
(
getOpenCVExtraDir
()
+
"/dnn/"
)
+
filename
;
}
CV_ENUM
(
DNNTarget
,
DNN_TARGET_CPU
,
DNN_TARGET_OPENCL
)
static
testing
::
internal
::
ParamGenerator
<
DNNTarget
>
availableBackends
()
{
static
std
::
vector
<
DNNTarget
>
targets
;
if
(
targets
.
empty
())
{
targets
.
push_back
(
DNN_TARGET_CPU
);
#ifdef HAVE_OPENCL
if
(
cv
::
ocl
::
useOpenCL
())
targets
.
push_back
(
DNN_TARGET_OPENCL
);
#endif
}
return
testing
::
ValuesIn
(
targets
);
}
TEST
(
Test_TensorFlow
,
read_inception
)
{
Net
net
;
...
...
@@ -115,118 +130,85 @@ static void runTensorFlowNet(const std::string& prefix, int targetId = DNN_TARGE
normAssert
(
target
,
output
,
""
,
l1
,
lInf
);
}
TEST
(
Test_TensorFlow
,
conv
)
{
runTensorFlowNet
(
"single_conv"
);
runTensorFlowNet
(
"atrous_conv2d_valid"
);
runTensorFlowNet
(
"atrous_conv2d_same"
);
runTensorFlowNet
(
"depthwise_conv2d"
);
}
TEST
(
Test_TensorFlow
,
padding
)
{
runTensorFlowNet
(
"padding_same"
);
runTensorFlowNet
(
"padding_valid"
);
runTensorFlowNet
(
"spatial_padding"
);
}
typedef
testing
::
TestWithParam
<
DNNTarget
>
Test_TensorFlow_layers
;
TEST
(
Test_TensorFlow
,
eltwise_add_mul
)
TEST
_P
(
Test_TensorFlow_layers
,
conv
)
{
runTensorFlowNet
(
"eltwise_add_mul"
);
int
targetId
=
GetParam
();
runTensorFlowNet
(
"single_conv"
,
targetId
);
runTensorFlowNet
(
"atrous_conv2d_valid"
,
targetId
);
runTensorFlowNet
(
"atrous_conv2d_same"
,
targetId
);
runTensorFlowNet
(
"depthwise_conv2d"
,
targetId
);
}
OCL_TEST
(
Test_TensorFlow
,
eltwise_add_mul
)
TEST_P
(
Test_TensorFlow_layers
,
padding
)
{
runTensorFlowNet
(
"eltwise_add_mul"
,
DNN_TARGET_OPENCL
);
int
targetId
=
GetParam
();
runTensorFlowNet
(
"padding_same"
,
targetId
);
runTensorFlowNet
(
"padding_valid"
,
targetId
);
runTensorFlowNet
(
"spatial_padding"
,
targetId
);
}
TEST
(
Test_TensorFlow
,
pad_and_concat
)
TEST
_P
(
Test_TensorFlow_layers
,
eltwise_add_mul
)
{
runTensorFlowNet
(
"
pad_and_concat"
);
runTensorFlowNet
(
"
eltwise_add_mul"
,
GetParam
()
);
}
TEST
(
Test_TensorFlow
,
batch_norm
)
TEST
_P
(
Test_TensorFlow_layers
,
pad_and_concat
)
{
runTensorFlowNet
(
"batch_norm"
);
runTensorFlowNet
(
"fused_batch_norm"
);
runTensorFlowNet
(
"batch_norm_text"
,
DNN_TARGET_CPU
,
true
);
runTensorFlowNet
(
"mvn_batch_norm"
);
runTensorFlowNet
(
"mvn_batch_norm_1x1"
);
runTensorFlowNet
(
"pad_and_concat"
,
GetParam
());
}
OCL_TEST
(
Test_TensorFlow
,
batch_norm
)
TEST_P
(
Test_TensorFlow_layers
,
batch_norm
)
{
runTensorFlowNet
(
"batch_norm"
,
DNN_TARGET_OPENCL
);
runTensorFlowNet
(
"fused_batch_norm"
,
DNN_TARGET_OPENCL
);
runTensorFlowNet
(
"batch_norm_text"
,
DNN_TARGET_OPENCL
,
true
);
int
targetId
=
GetParam
();
runTensorFlowNet
(
"batch_norm"
,
targetId
);
runTensorFlowNet
(
"fused_batch_norm"
,
targetId
);
runTensorFlowNet
(
"batch_norm_text"
,
targetId
,
true
);
runTensorFlowNet
(
"mvn_batch_norm"
,
targetId
);
runTensorFlowNet
(
"mvn_batch_norm_1x1"
,
targetId
);
}
TEST
(
Test_TensorFlow
,
pooling
)
TEST
_P
(
Test_TensorFlow_layers
,
pooling
)
{
runTensorFlowNet
(
"max_pool_even"
);
runTensorFlowNet
(
"max_pool_odd_valid"
);
runTensorFlowNet
(
"max_pool_odd_same"
);
runTensorFlowNet
(
"ave_pool_same"
);
int
targetId
=
GetParam
();
runTensorFlowNet
(
"max_pool_even"
,
targetId
);
runTensorFlowNet
(
"max_pool_odd_valid"
,
targetId
);
runTensorFlowNet
(
"ave_pool_same"
,
targetId
);
runTensorFlowNet
(
"max_pool_odd_same"
,
targetId
);
}
TEST
(
Test_TensorFlow
,
deconvolution
)
TEST
_P
(
Test_TensorFlow_layers
,
deconvolution
)
{
runTensorFlowNet
(
"deconvolution"
);
runTensorFlowNet
(
"deconvolution_same"
);
runTensorFlowNet
(
"deconvolution_stride_2_same"
);
runTensorFlowNet
(
"deconvolution_adj_pad_valid"
);
runTensorFlowNet
(
"deconvolution_adj_pad_same"
);
int
targetId
=
GetParam
();
runTensorFlowNet
(
"deconvolution"
,
targetId
);
runTensorFlowNet
(
"deconvolution_same"
,
targetId
);
runTensorFlowNet
(
"deconvolution_stride_2_same"
,
targetId
);
runTensorFlowNet
(
"deconvolution_adj_pad_valid"
,
targetId
);
runTensorFlowNet
(
"deconvolution_adj_pad_same"
,
targetId
);
}
OCL_TEST
(
Test_TensorFlow
,
deconvolution
)
TEST_P
(
Test_TensorFlow_layers
,
matmul
)
{
runTensorFlowNet
(
"deconvolution"
,
DNN_TARGET_OPENCL
);
runTensorFlowNet
(
"deconvolution_same"
,
DNN_TARGET_OPENCL
);
runTensorFlowNet
(
"deconvolution_stride_2_same"
,
DNN_TARGET_OPENCL
);
runTensorFlowNet
(
"deconvolution_adj_pad_valid"
,
DNN_TARGET_OPENCL
);
runTensorFlowNet
(
"deconvolution_adj_pad_same"
,
DNN_TARGET_OPENCL
);
int
targetId
=
GetParam
();
runTensorFlowNet
(
"matmul"
,
targetId
);
runTensorFlowNet
(
"nhwc_reshape_matmul"
,
targetId
);
runTensorFlowNet
(
"nhwc_transpose_reshape_matmul"
,
targetId
);
}
TEST
(
Test_TensorFlow
,
matmul
)
TEST
_P
(
Test_TensorFlow_layers
,
reshape
)
{
runTensorFlowNet
(
"matmul"
);
runTensorFlowNet
(
"nhwc_reshape_matmul"
);
runTensorFlowNet
(
"nhwc_transpose_reshape_matmul"
);
int
targetId
=
GetParam
();
runTensorFlowNet
(
"shift_reshape_no_reorder"
,
targetId
);
runTensorFlowNet
(
"reshape_reduce"
,
targetId
);
runTensorFlowNet
(
"flatten"
,
targetId
,
true
);
}
TEST
(
Test_TensorFlow
,
defun
)
{
runTensorFlowNet
(
"defun_dropout"
);
}
INSTANTIATE_TEST_CASE_P
(
/**/
,
Test_TensorFlow_layers
,
availableBackends
());
TEST
(
Test_TensorFlow
,
reshape
)
{
runTensorFlowNet
(
"shift_reshape_no_reorder"
);
runTensorFlowNet
(
"reshape_reduce"
);
runTensorFlowNet
(
"flatten"
,
DNN_TARGET_CPU
,
true
);
}
typedef
testing
::
TestWithParam
<
DNNTarget
>
Test_TensorFlow_nets
;
TEST
(
Test_TensorFlow
,
fp16
)
{
const
float
l1
=
1e-3
;
const
float
lInf
=
1e-2
;
runTensorFlowNet
(
"fp16_single_conv"
,
DNN_TARGET_CPU
,
false
,
l1
,
lInf
);
runTensorFlowNet
(
"fp16_deconvolution"
,
DNN_TARGET_CPU
,
false
,
l1
,
lInf
);
runTensorFlowNet
(
"fp16_max_pool_odd_same"
,
DNN_TARGET_CPU
,
false
,
l1
,
lInf
);
runTensorFlowNet
(
"fp16_padding_valid"
,
DNN_TARGET_CPU
,
false
,
l1
,
lInf
);
runTensorFlowNet
(
"fp16_eltwise_add_mul"
,
DNN_TARGET_CPU
,
false
,
l1
,
lInf
);
runTensorFlowNet
(
"fp16_max_pool_odd_valid"
,
DNN_TARGET_CPU
,
false
,
l1
,
lInf
);
runTensorFlowNet
(
"fp16_pad_and_concat"
,
DNN_TARGET_CPU
,
false
,
l1
,
lInf
);
runTensorFlowNet
(
"fp16_max_pool_even"
,
DNN_TARGET_CPU
,
false
,
l1
,
lInf
);
runTensorFlowNet
(
"fp16_padding_same"
,
DNN_TARGET_CPU
,
false
,
l1
,
lInf
);
}
TEST
(
Test_TensorFlow
,
quantized
)
{
runTensorFlowNet
(
"uint8_single_conv"
);
}
TEST
(
Test_TensorFlow
,
MobileNet_SSD
)
TEST_P
(
Test_TensorFlow_nets
,
MobileNet_SSD
)
{
std
::
string
netPath
=
findDataFile
(
"dnn/ssd_mobilenet_v1_coco.pb"
,
false
);
std
::
string
netConfig
=
findDataFile
(
"dnn/ssd_mobilenet_v1_coco.pbtxt"
,
false
);
...
...
@@ -249,17 +231,20 @@ TEST(Test_TensorFlow, MobileNet_SSD)
}
Net
net
=
readNetFromTensorflow
(
netPath
,
netConfig
);
net
.
setPreferableTarget
(
GetParam
());
net
.
setInput
(
inp
);
std
::
vector
<
Mat
>
output
;
net
.
forward
(
output
,
outNames
);
normAssert
(
target
[
0
].
reshape
(
1
,
1
),
output
[
0
].
reshape
(
1
,
1
));
normAssert
(
target
[
0
].
reshape
(
1
,
1
),
output
[
0
].
reshape
(
1
,
1
)
,
""
,
1e-5
,
1.5e-4
);
normAssert
(
target
[
1
].
reshape
(
1
,
1
),
output
[
1
].
reshape
(
1
,
1
),
""
,
1e-5
,
3e-4
);
normAssert
(
target
[
2
].
reshape
(
1
,
1
),
output
[
2
].
reshape
(
1
,
1
),
""
,
4e-5
,
1e-2
);
}
TEST
(
Test_TensorFlow
,
Inception_v2_SSD
)
TEST
_P
(
Test_TensorFlow_nets
,
Inception_v2_SSD
)
{
std
::
string
proto
=
findDataFile
(
"dnn/ssd_inception_v2_coco_2017_11_17.pbtxt"
,
false
);
std
::
string
model
=
findDataFile
(
"dnn/ssd_inception_v2_coco_2017_11_17.pb"
,
false
);
...
...
@@ -268,6 +253,8 @@ TEST(Test_TensorFlow, Inception_v2_SSD)
Mat
img
=
imread
(
findDataFile
(
"dnn/street.png"
,
false
));
Mat
blob
=
blobFromImage
(
img
,
1.0
f
/
127.5
,
Size
(
300
,
300
),
Scalar
(
127.5
,
127.5
,
127.5
),
true
,
false
);
net
.
setPreferableTarget
(
GetParam
());
net
.
setInput
(
blob
);
// Output has shape 1x1xNx7 where N - number of detections.
// An every detection is a vector of values [id, classId, confidence, left, top, right, bottom]
...
...
@@ -289,74 +276,57 @@ TEST(Test_TensorFlow, Inception_v2_SSD)
normAssert
(
detections
,
ref
);
}
OCL_TEST
(
Test_TensorFlow
,
MobileNet_SSD
)
{
std
::
string
netPath
=
findDataFile
(
"dnn/ssd_mobilenet_v1_coco.pb"
,
false
);
std
::
string
netConfig
=
findDataFile
(
"dnn/ssd_mobilenet_v1_coco.pbtxt"
,
false
);
std
::
string
imgPath
=
findDataFile
(
"dnn/street.png"
,
false
);
Mat
inp
;
resize
(
imread
(
imgPath
),
inp
,
Size
(
300
,
300
));
inp
=
blobFromImage
(
inp
,
1.0
f
/
127.5
,
Size
(),
Scalar
(
127.5
,
127.5
,
127.5
),
true
);
std
::
vector
<
String
>
outNames
(
3
);
outNames
[
0
]
=
"concat"
;
outNames
[
1
]
=
"concat_1"
;
outNames
[
2
]
=
"detection_out"
;
std
::
vector
<
Mat
>
target
(
outNames
.
size
());
for
(
int
i
=
0
;
i
<
outNames
.
size
();
++
i
)
{
std
::
string
path
=
findDataFile
(
"dnn/tensorflow/ssd_mobilenet_v1_coco."
+
outNames
[
i
]
+
".npy"
,
false
);
target
[
i
]
=
blobFromNPY
(
path
);
}
Net
net
=
readNetFromTensorflow
(
netPath
,
netConfig
);
net
.
setPreferableBackend
(
DNN_BACKEND_DEFAULT
);
net
.
setPreferableTarget
(
DNN_TARGET_OPENCL
);
net
.
setInput
(
inp
);
std
::
vector
<
Mat
>
output
;
net
.
forward
(
output
,
outNames
);
normAssert
(
target
[
0
].
reshape
(
1
,
1
),
output
[
0
].
reshape
(
1
,
1
),
""
,
1e-5
,
1.5e-4
);
normAssert
(
target
[
1
].
reshape
(
1
,
1
),
output
[
1
].
reshape
(
1
,
1
),
""
,
1e-5
,
3e-4
);
normAssert
(
target
[
2
].
reshape
(
1
,
1
),
output
[
2
].
reshape
(
1
,
1
),
""
,
4e-5
,
1e-2
);
}
OCL_TEST
(
Test_TensorFlow
,
Inception_v2_SSD
)
TEST_P
(
Test_TensorFlow_nets
,
opencv_face_detector_uint8
)
{
std
::
string
proto
=
findDataFile
(
"dnn/
ssd_inception_v2_coco_2017_11_17
.pbtxt"
,
false
);
std
::
string
model
=
findDataFile
(
"dnn/
ssd_inception_v2_coco_2017_11_17
.pb"
,
false
);
std
::
string
proto
=
findDataFile
(
"dnn/
opencv_face_detector
.pbtxt"
,
false
);
std
::
string
model
=
findDataFile
(
"dnn/
opencv_face_detector_uint8
.pb"
,
false
);
Net
net
=
readNetFromTensorflow
(
model
,
proto
);
Mat
img
=
imread
(
findDataFile
(
"
dnn/street
.png"
,
false
));
Mat
blob
=
blobFromImage
(
img
,
1.0
f
/
127.5
,
Size
(
300
,
300
),
Scalar
(
127.5
,
127.5
,
127.5
),
tru
e
,
false
);
Mat
img
=
imread
(
findDataFile
(
"
gpu/lbpcascade/er
.png"
,
false
));
Mat
blob
=
blobFromImage
(
img
,
1.0
,
Size
(),
Scalar
(
104.0
,
177.0
,
123.0
),
fals
e
,
false
);
net
.
setPreferableBackend
(
DNN_BACKEND_DEFAULT
);
net
.
setPreferableTarget
(
DNN_TARGET_OPENCL
);
net
.
setPreferableTarget
(
GetParam
());
net
.
setInput
(
blob
);
// Output has shape 1x1xNx7 where N - number of detections.
// An every detection is a vector of values [id, classId, confidence, left, top, right, bottom]
Mat
out
=
net
.
forward
();
out
=
out
.
reshape
(
1
,
out
.
total
()
/
7
);
Mat
detections
;
for
(
int
i
=
0
;
i
<
out
.
rows
;
++
i
)
{
if
(
out
.
at
<
float
>
(
i
,
2
)
>
0.5
)
detections
.
push_back
(
out
.
row
(
i
).
colRange
(
1
,
7
));
}
// References are from test for Caffe model.
Mat
ref
=
(
Mat_
<
float
>
(
6
,
5
)
<<
0.99520785
,
0.80997437
,
0.16379407
,
0.87996572
,
0.26685631
,
0.9934696
,
0.2831718
,
0.50738752
,
0.345781
,
0.5985168
,
0.99096733
,
0.13629119
,
0.24892329
,
0.19756334
,
0.3310290
,
0.98977017
,
0.23901358
,
0.09084064
,
0.29902688
,
0.1769477
,
0.97203469
,
0.67965847
,
0.06876482
,
0.73999709
,
0.1513494
,
0.95097077
,
0.51901293
,
0.45863652
,
0.5777427
,
0.5347801
);
normAssert
(
out
.
reshape
(
1
,
out
.
total
()
/
7
).
rowRange
(
0
,
6
).
colRange
(
2
,
7
),
ref
,
""
,
2.8e-4
,
3.4e-3
);
}
Mat
ref
=
(
Mat_
<
float
>
(
5
,
6
)
<<
1
,
0.90176028
,
0.19872092
,
0.36311883
,
0.26461923
,
0.63498729
,
3
,
0.93569964
,
0.64865261
,
0.45906419
,
0.80675775
,
0.65708131
,
3
,
0.75838411
,
0.44668293
,
0.45907149
,
0.49459291
,
0.52197015
,
10
,
0.95932811
,
0.38349164
,
0.32528657
,
0.40387636
,
0.39165527
,
10
,
0.93973452
,
0.66561931
,
0.37841269
,
0.68074018
,
0.42907384
);
normAssert
(
detections
,
ref
);
INSTANTIATE_TEST_CASE_P
(
/**/
,
Test_TensorFlow_nets
,
availableBackends
());
TEST
(
Test_TensorFlow
,
defun
)
{
runTensorFlowNet
(
"defun_dropout"
);
}
TEST
(
Test_TensorFlow
,
fp16
)
{
const
float
l1
=
1e-3
;
const
float
lInf
=
1e-2
;
runTensorFlowNet
(
"fp16_single_conv"
,
DNN_TARGET_CPU
,
false
,
l1
,
lInf
);
runTensorFlowNet
(
"fp16_deconvolution"
,
DNN_TARGET_CPU
,
false
,
l1
,
lInf
);
runTensorFlowNet
(
"fp16_max_pool_odd_same"
,
DNN_TARGET_CPU
,
false
,
l1
,
lInf
);
runTensorFlowNet
(
"fp16_padding_valid"
,
DNN_TARGET_CPU
,
false
,
l1
,
lInf
);
runTensorFlowNet
(
"fp16_eltwise_add_mul"
,
DNN_TARGET_CPU
,
false
,
l1
,
lInf
);
runTensorFlowNet
(
"fp16_max_pool_odd_valid"
,
DNN_TARGET_CPU
,
false
,
l1
,
lInf
);
runTensorFlowNet
(
"fp16_pad_and_concat"
,
DNN_TARGET_CPU
,
false
,
l1
,
lInf
);
runTensorFlowNet
(
"fp16_max_pool_even"
,
DNN_TARGET_CPU
,
false
,
l1
,
lInf
);
runTensorFlowNet
(
"fp16_padding_same"
,
DNN_TARGET_CPU
,
false
,
l1
,
lInf
);
}
TEST
(
Test_TensorFlow
,
quantized
)
{
runTensorFlowNet
(
"uint8_single_conv"
);
}
TEST
(
Test_TensorFlow
,
lstm
)
...
...
@@ -390,28 +360,4 @@ TEST(Test_TensorFlow, memory_read)
runTensorFlowNet
(
"batch_norm_text"
,
DNN_TARGET_CPU
,
true
,
l1
,
lInf
,
true
);
}
TEST
(
Test_TensorFlow
,
opencv_face_detector_uint8
)
{
std
::
string
proto
=
findDataFile
(
"dnn/opencv_face_detector.pbtxt"
,
false
);
std
::
string
model
=
findDataFile
(
"dnn/opencv_face_detector_uint8.pb"
,
false
);
Net
net
=
readNetFromTensorflow
(
model
,
proto
);
Mat
img
=
imread
(
findDataFile
(
"gpu/lbpcascade/er.png"
,
false
));
Mat
blob
=
blobFromImage
(
img
,
1.0
,
Size
(),
Scalar
(
104.0
,
177.0
,
123.0
),
false
,
false
);
net
.
setInput
(
blob
);
// Output has shape 1x1xNx7 where N - number of detections.
// An every detection is a vector of values [id, classId, confidence, left, top, right, bottom]
Mat
out
=
net
.
forward
();
// References are from test for Caffe model.
Mat
ref
=
(
Mat_
<
float
>
(
6
,
5
)
<<
0.99520785
,
0.80997437
,
0.16379407
,
0.87996572
,
0.26685631
,
0.9934696
,
0.2831718
,
0.50738752
,
0.345781
,
0.5985168
,
0.99096733
,
0.13629119
,
0.24892329
,
0.19756334
,
0.3310290
,
0.98977017
,
0.23901358
,
0.09084064
,
0.29902688
,
0.1769477
,
0.97203469
,
0.67965847
,
0.06876482
,
0.73999709
,
0.1513494
,
0.95097077
,
0.51901293
,
0.45863652
,
0.5777427
,
0.5347801
);
normAssert
(
out
.
reshape
(
1
,
out
.
total
()
/
7
).
rowRange
(
0
,
6
).
colRange
(
2
,
7
),
ref
,
""
,
2.8e-4
,
3.4e-3
);
}
}
modules/dnn/test/test_torch_importer.cpp
View file @
e1c32375
...
...
@@ -62,6 +62,21 @@ static std::string _tf(TStr filename, bool inTorchDir = true)
return
findDataFile
(
path
,
false
);
}
CV_ENUM
(
DNNTarget
,
DNN_TARGET_CPU
,
DNN_TARGET_OPENCL
)
static
testing
::
internal
::
ParamGenerator
<
DNNTarget
>
availableBackends
()
{
static
std
::
vector
<
DNNTarget
>
targets
;
if
(
targets
.
empty
())
{
targets
.
push_back
(
DNN_TARGET_CPU
);
#ifdef HAVE_OPENCL
if
(
cv
::
ocl
::
useOpenCL
())
targets
.
push_back
(
DNN_TARGET_OPENCL
);
#endif
}
return
testing
::
ValuesIn
(
targets
);
}
TEST
(
Torch_Importer
,
simple_read
)
{
Net
net
;
...
...
@@ -100,219 +115,122 @@ static void runTorchNet(String prefix, int targetId = DNN_TARGET_CPU, String out
}
}
TEST
(
Torch_Importer
,
run_convolution
)
{
runTorchNet
(
"net_conv"
);
}
OCL_TEST
(
Torch_Importer
,
run_convolution
)
{
runTorchNet
(
"net_conv"
,
DNN_TARGET_OPENCL
);
}
TEST
(
Torch_Importer
,
run_pool_max
)
{
runTorchNet
(
"net_pool_max"
,
DNN_TARGET_CPU
,
""
,
true
);
}
OCL_TEST
(
Torch_Importer
,
run_pool_max
)
{
runTorchNet
(
"net_pool_max"
,
DNN_TARGET_OPENCL
,
""
,
true
);
}
TEST
(
Torch_Importer
,
run_pool_ave
)
{
runTorchNet
(
"net_pool_ave"
);
}
OCL_TEST
(
Torch_Importer
,
run_pool_ave
)
{
runTorchNet
(
"net_pool_ave"
,
DNN_TARGET_OPENCL
);
}
typedef
testing
::
TestWithParam
<
DNNTarget
>
Test_Torch_layers
;
TEST
(
Torch_Importer
,
run_reshape
)
TEST
_P
(
Test_Torch_layers
,
run_convolution
)
{
runTorchNet
(
"net_reshape"
);
runTorchNet
(
"net_reshape_batch"
);
runTorchNet
(
"net_reshape_single_sample"
);
runTorchNet
(
"net_reshape_channels"
,
DNN_TARGET_CPU
,
""
,
false
,
true
);
}
TEST
(
Torch_Importer
,
run_linear
)
{
runTorchNet
(
"net_linear_2d"
);
}
TEST
(
Torch_Importer
,
run_paralel
)
{
runTorchNet
(
"net_parallel"
,
DNN_TARGET_CPU
,
"l5_torchMerge"
);
runTorchNet
(
"net_conv"
,
GetParam
());
}
TEST
(
Torch_Importer
,
run_concat
)
TEST
_P
(
Test_Torch_layers
,
run_pool_max
)
{
runTorchNet
(
"net_concat"
,
DNN_TARGET_CPU
,
"l5_torchMerge"
);
runTorchNet
(
"net_depth_concat"
,
DNN_TARGET_CPU
,
""
,
false
,
true
);
runTorchNet
(
"net_pool_max"
,
GetParam
(),
""
,
true
);
}
OCL_TEST
(
Torch_Importer
,
run_concat
)
TEST_P
(
Test_Torch_layers
,
run_pool_ave
)
{
runTorchNet
(
"net_concat"
,
DNN_TARGET_OPENCL
,
"l5_torchMerge"
);
runTorchNet
(
"net_depth_concat"
,
DNN_TARGET_OPENCL
,
""
,
false
,
true
);
runTorchNet
(
"net_pool_ave"
,
GetParam
());
}
TEST
(
Torch_Importer
,
run_deconv
)
TEST
_P
(
Test_Torch_layers
,
run_reshape
)
{
runTorchNet
(
"net_deconv"
);
int
targetId
=
GetParam
();
runTorchNet
(
"net_reshape"
,
targetId
);
runTorchNet
(
"net_reshape_batch"
,
targetId
);
runTorchNet
(
"net_reshape_single_sample"
,
targetId
);
runTorchNet
(
"net_reshape_channels"
,
targetId
,
""
,
false
,
true
);
}
OCL_TEST
(
Torch_Importer
,
run_deconv
)
TEST_P
(
Test_Torch_layers
,
run_linear
)
{
runTorchNet
(
"net_
deconv"
,
DNN_TARGET_OPENCL
);
runTorchNet
(
"net_
linear_2d"
,
GetParam
()
);
}
TEST
(
Torch_Importer
,
run_batch_norm
)
TEST
_P
(
Test_Torch_layers
,
run_concat
)
{
runTorchNet
(
"net_batch_norm"
,
DNN_TARGET_CPU
,
""
,
false
,
true
);
int
targetId
=
GetParam
();
runTorchNet
(
"net_concat"
,
targetId
,
"l5_torchMerge"
);
runTorchNet
(
"net_depth_concat"
,
targetId
,
""
,
false
,
true
);
}
OCL_TEST
(
Torch_Importer
,
run_batch_norm
)
TEST_P
(
Test_Torch_layers
,
run_deconv
)
{
runTorchNet
(
"net_
batch_norm"
,
DNN_TARGET_OPENCL
,
""
,
false
,
true
);
runTorchNet
(
"net_
deconv"
,
GetParam
()
);
}
TEST
(
Torch_Importer
,
net_prelu
)
TEST
_P
(
Test_Torch_layers
,
run_batch_norm
)
{
runTorchNet
(
"net_
prelu"
);
runTorchNet
(
"net_
batch_norm"
,
GetParam
(),
""
,
false
,
true
);
}
TEST
(
Torch_Importer
,
net_cadd_table
)
TEST
_P
(
Test_Torch_layers
,
net_prelu
)
{
runTorchNet
(
"net_
cadd_table"
);
runTorchNet
(
"net_
prelu"
,
GetParam
()
);
}
TEST
(
Torch_Importer
,
net_softmax
)
TEST
_P
(
Test_Torch_layers
,
net_cadd_table
)
{
runTorchNet
(
"net_softmax"
);
runTorchNet
(
"net_softmax_spatial"
);
runTorchNet
(
"net_cadd_table"
,
GetParam
());
}
OCL_TEST
(
Torch_Importer
,
net_softmax
)
TEST_P
(
Test_Torch_layers
,
net_softmax
)
{
runTorchNet
(
"net_softmax"
,
DNN_TARGET_OPENCL
);
runTorchNet
(
"net_softmax_spatial"
,
DNN_TARGET_OPENCL
);
int
targetId
=
GetParam
();
runTorchNet
(
"net_softmax"
,
targetId
);
runTorchNet
(
"net_softmax_spatial"
,
targetId
);
}
TEST
(
Torch_Importer
,
net_logsoftmax
)
TEST
_P
(
Test_Torch_layers
,
net_logsoftmax
)
{
runTorchNet
(
"net_logsoftmax"
);
runTorchNet
(
"net_logsoftmax_spatial"
);
}
OCL_TEST
(
Torch_Importer
,
net_logsoftmax
)
TEST_P
(
Test_Torch_layers
,
net_lp_pooling
)
{
runTorchNet
(
"net_logsoftmax"
,
DNN_TARGET_OPENCL
);
runTorchNet
(
"net_logsoftmax_spatial"
,
DNN_TARGET_OPENCL
);
int
targetId
=
GetParam
();
runTorchNet
(
"net_lp_pooling_square"
,
targetId
,
""
,
false
,
true
);
runTorchNet
(
"net_lp_pooling_power"
,
targetId
,
""
,
false
,
true
);
}
TEST
(
Torch_Importer
,
net_lp_pooling
)
TEST
_P
(
Test_Torch_layers
,
net_conv_gemm_lrn
)
{
runTorchNet
(
"net_lp_pooling_square"
,
DNN_TARGET_CPU
,
""
,
false
,
true
);
runTorchNet
(
"net_lp_pooling_power"
,
DNN_TARGET_CPU
,
""
,
false
,
true
);
runTorchNet
(
"net_conv_gemm_lrn"
,
GetParam
(),
""
,
false
,
true
);
}
TEST
(
Torch_Importer
,
net_conv_gemm_lrn
)
TEST
_P
(
Test_Torch_layers
,
net_inception_block
)
{
runTorchNet
(
"net_
conv_gemm_lrn"
,
DNN_TARGET_CPU
,
""
,
false
,
true
);
runTorchNet
(
"net_
inception_block"
,
GetParam
()
,
""
,
false
,
true
);
}
TEST
(
Torch_Importer
,
net_inception_block
)
TEST
_P
(
Test_Torch_layers
,
net_normalize
)
{
runTorchNet
(
"net_
inception_block"
,
DNN_TARGET_CPU
,
""
,
false
,
true
);
runTorchNet
(
"net_
normalize"
,
GetParam
()
,
""
,
false
,
true
);
}
TEST
(
Torch_Importer
,
net_normalize
)
TEST
_P
(
Test_Torch_layers
,
net_padding
)
{
runTorchNet
(
"net_normalize"
,
DNN_TARGET_CPU
,
""
,
false
,
true
);
int
targetId
=
GetParam
();
runTorchNet
(
"net_padding"
,
targetId
,
""
,
false
,
true
);
runTorchNet
(
"net_spatial_zero_padding"
,
targetId
,
""
,
false
,
true
);
runTorchNet
(
"net_spatial_reflection_padding"
,
targetId
,
""
,
false
,
true
);
}
OCL_TEST
(
Torch_Importer
,
net_normalize
)
TEST_P
(
Test_Torch_layers
,
net_non_spatial
)
{
runTorchNet
(
"net_no
rmalize"
,
DNN_TARGET_OPENCL
,
""
,
false
,
true
);
runTorchNet
(
"net_no
n_spatial"
,
GetParam
()
,
""
,
false
,
true
);
}
TEST
(
Torch_Importer
,
net_padding
)
{
runTorchNet
(
"net_padding"
,
DNN_TARGET_CPU
,
""
,
false
,
true
);
runTorchNet
(
"net_spatial_zero_padding"
,
DNN_TARGET_CPU
,
""
,
false
,
true
);
runTorchNet
(
"net_spatial_reflection_padding"
,
DNN_TARGET_CPU
,
""
,
false
,
true
);
}
TEST
(
Torch_Importer
,
net_non_spatial
)
{
runTorchNet
(
"net_non_spatial"
,
DNN_TARGET_CPU
,
""
,
false
,
true
);
}
INSTANTIATE_TEST_CASE_P
(
/**/
,
Test_Torch_layers
,
availableBackends
());
OCL_TEST
(
Torch_Importer
,
net_non_spatial
)
{
runTorchNet
(
"net_non_spatial"
,
DNN_TARGET_OPENCL
,
""
,
false
,
true
);
}
typedef
testing
::
TestWithParam
<
DNNTarget
>
Test_Torch_nets
;
TEST
(
Torch_Importer
,
ENet_accuracy
)
{
Net
net
;
{
const
string
model
=
findDataFile
(
"dnn/Enet-model-best.net"
,
false
);
net
=
readNetFromTorch
(
model
,
true
);
ASSERT_FALSE
(
net
.
empty
());
}
Mat
sample
=
imread
(
_tf
(
"street.png"
,
false
));
Mat
inputBlob
=
blobFromImage
(
sample
,
1.
/
255
);
net
.
setInput
(
inputBlob
,
""
);
Mat
out
=
net
.
forward
();
Mat
ref
=
blobFromNPY
(
_tf
(
"torch_enet_prob.npy"
,
false
));
// Due to numerical instability in Pooling-Unpooling layers (indexes jittering)
// thresholds for ENet must be changed. Accuracy of resuults was checked on
// Cityscapes dataset and difference in mIOU with Torch is 10E-4%
normAssert
(
ref
,
out
,
""
,
0.00044
,
0.44
);
const
int
N
=
3
;
for
(
int
i
=
0
;
i
<
N
;
i
++
)
{
net
.
setInput
(
inputBlob
,
""
);
Mat
out
=
net
.
forward
();
normAssert
(
ref
,
out
,
""
,
0.00044
,
0.44
);
}
}
TEST
(
Torch_Importer
,
OpenFace_accuracy
)
TEST_P
(
Test_Torch_nets
,
OpenFace_accuracy
)
{
const
string
model
=
findDataFile
(
"dnn/openface_nn4.small2.v1.t7"
,
false
);
Net
net
=
readNetFromTorch
(
model
);
Mat
sample
=
imread
(
findDataFile
(
"cv/shared/lena.png"
,
false
));
Mat
sampleF32
(
sample
.
size
(),
CV_32FC3
);
sample
.
convertTo
(
sampleF32
,
sampleF32
.
type
());
sampleF32
/=
255
;
resize
(
sampleF32
,
sampleF32
,
Size
(
96
,
96
),
0
,
0
,
INTER_NEAREST
);
Mat
inputBlob
=
blobFromImage
(
sampleF32
);
net
.
setInput
(
inputBlob
);
Mat
out
=
net
.
forward
();
Mat
outRef
=
readTorchBlob
(
_tf
(
"net_openface_output.dat"
),
true
);
normAssert
(
out
,
outRef
);
}
OCL_TEST
(
Torch_Importer
,
OpenFace_accuracy
)
{
const
string
model
=
findDataFile
(
"dnn/openface_nn4.small2.v1.t7"
,
false
);
Net
net
=
readNetFromTorch
(
model
);
net
.
setPreferableBackend
(
DNN_BACKEND_DEFAULT
);
net
.
setPreferableTarget
(
DNN_TARGET_OPENCL
);
net
.
setPreferableTarget
(
GetParam
());
Mat
sample
=
imread
(
findDataFile
(
"cv/shared/lena.png"
,
false
));
Mat
sampleF32
(
sample
.
size
(),
CV_32FC3
);
...
...
@@ -329,7 +247,7 @@ OCL_TEST(Torch_Importer, OpenFace_accuracy)
normAssert
(
out
,
outRef
);
}
OCL_TEST
(
Torch_Importer
,
ENet_accuracy
)
TEST_P
(
Test_Torch_nets
,
ENet_accuracy
)
{
Net
net
;
{
...
...
@@ -338,8 +256,7 @@ OCL_TEST(Torch_Importer, ENet_accuracy)
ASSERT_TRUE
(
!
net
.
empty
());
}
net
.
setPreferableBackend
(
DNN_BACKEND_DEFAULT
);
net
.
setPreferableTarget
(
DNN_TARGET_OPENCL
);
net
.
setPreferableTarget
(
GetParam
());
Mat
sample
=
imread
(
_tf
(
"street.png"
,
false
));
Mat
inputBlob
=
blobFromImage
(
sample
,
1.
/
255
);
...
...
@@ -374,7 +291,7 @@ OCL_TEST(Torch_Importer, ENet_accuracy)
// -median_filter 0 \
// -image_size 0 \
// -model models/instance_norm/feathers.t7
TEST
(
Torch_Importer
,
FastNeuralStyle_accuracy
)
TEST
_P
(
Test_Torch_nets
,
FastNeuralStyle_accuracy
)
{
std
::
string
models
[]
=
{
"dnn/fast_neural_style_eccv16_starry_night.t7"
,
"dnn/fast_neural_style_instance_norm_feathers.t7"
};
...
...
@@ -385,6 +302,8 @@ TEST(Torch_Importer, FastNeuralStyle_accuracy)
const
string
model
=
findDataFile
(
models
[
i
],
false
);
Net
net
=
readNetFromTorch
(
model
);
net
.
setPreferableTarget
(
GetParam
());
Mat
img
=
imread
(
findDataFile
(
"dnn/googlenet_1.png"
,
false
));
Mat
inputBlob
=
blobFromImage
(
img
,
1.0
,
Size
(),
Scalar
(
103.939
,
116.779
,
123.68
),
false
);
...
...
@@ -404,37 +323,17 @@ TEST(Torch_Importer, FastNeuralStyle_accuracy)
}
}
OCL_TEST
(
Torch_Importer
,
FastNeuralStyle_accuracy
)
{
std
::
string
models
[]
=
{
"dnn/fast_neural_style_eccv16_starry_night.t7"
,
"dnn/fast_neural_style_instance_norm_feathers.t7"
};
std
::
string
targets
[]
=
{
"dnn/lena_starry_night.png"
,
"dnn/lena_feathers.png"
};
for
(
int
i
=
0
;
i
<
2
;
++
i
)
{
const
string
model
=
findDataFile
(
models
[
i
],
false
);
Net
net
=
readNetFromTorch
(
model
);
net
.
setPreferableBackend
(
DNN_BACKEND_DEFAULT
);
net
.
setPreferableTarget
(
DNN_TARGET_OPENCL
);
INSTANTIATE_TEST_CASE_P
(
/**/
,
Test_Torch_nets
,
availableBackends
());
Mat
img
=
imread
(
findDataFile
(
"dnn/googlenet_1.png"
,
false
));
Mat
inputBlob
=
blobFromImage
(
img
,
1.0
,
Size
(),
Scalar
(
103.939
,
116.779
,
123.68
),
false
);
net
.
setInput
(
inputBlob
);
Mat
out
=
net
.
forward
();
// Deprocessing.
getPlane
(
out
,
0
,
0
)
+=
103.939
;
getPlane
(
out
,
0
,
1
)
+=
116.779
;
getPlane
(
out
,
0
,
2
)
+=
123.68
;
out
=
cv
::
min
(
cv
::
max
(
0
,
out
),
255
);
Mat
ref
=
imread
(
findDataFile
(
targets
[
i
]));
Mat
refBlob
=
blobFromImage
(
ref
,
1.0
,
Size
(),
Scalar
(),
false
);
// TODO: fix OpenCL and add to the rest of tests
TEST
(
Torch_Importer
,
run_paralel
)
{
runTorchNet
(
"net_parallel"
,
DNN_TARGET_CPU
,
"l5_torchMerge"
);
}
normAssert
(
out
,
refBlob
,
""
,
0.5
,
1.1
);
}
TEST
(
Torch_Importer
,
DISABLED_run_paralel
)
{
runTorchNet
(
"net_parallel"
,
DNN_TARGET_OPENCL
,
"l5_torchMerge"
);
}
}
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment