Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in / Register
Toggle navigation
O
opencv
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
submodule
opencv
Commits
d891e9b1
Commit
d891e9b1
authored
7 years ago
by
Dmitry Kurtaev
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Layers for MobileNet from TensorFlow
parent
6bf8fe81
Show whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
148 additions
and
16 deletions
+148
-16
all_layers.hpp
modules/dnn/include/opencv2/dnn/all_layers.hpp
+6
-0
init.cpp
modules/dnn/src/init.cpp
+1
-0
elementwise_layers.cpp
modules/dnn/src/layers/elementwise_layers.cpp
+65
-0
tf_importer.cpp
modules/dnn/src/tensorflow/tf_importer.cpp
+67
-14
test_tf_importer.cpp
modules/dnn/test/test_tf_importer.cpp
+9
-2
No files found.
modules/dnn/include/opencv2/dnn/all_layers.hpp
View file @
d891e9b1
...
...
@@ -359,6 +359,12 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN
static
Ptr
<
ReLULayer
>
create
(
const
LayerParams
&
params
);
};
class
CV_EXPORTS
ReLU6Layer
:
public
ActivationLayer
{
public
:
static
Ptr
<
ReLU6Layer
>
create
(
const
LayerParams
&
params
);
};
class
CV_EXPORTS
ChannelsPReLULayer
:
public
ActivationLayer
{
public
:
...
...
This diff is collapsed.
Click to expand it.
modules/dnn/src/init.cpp
View file @
d891e9b1
...
...
@@ -94,6 +94,7 @@ void initializeLayerFactory()
CV_DNN_REGISTER_LAYER_CLASS
(
LPNormalize
,
LPNormalizeLayer
);
CV_DNN_REGISTER_LAYER_CLASS
(
ReLU
,
ReLULayer
);
CV_DNN_REGISTER_LAYER_CLASS
(
ReLU6
,
ReLU6Layer
);
CV_DNN_REGISTER_LAYER_CLASS
(
ChannelsPReLU
,
ChannelsPReLULayer
);
CV_DNN_REGISTER_LAYER_CLASS
(
Sigmoid
,
SigmoidLayer
);
CV_DNN_REGISTER_LAYER_CLASS
(
TanH
,
TanHLayer
);
...
...
This diff is collapsed.
Click to expand it.
modules/dnn/src/layers/elementwise_layers.cpp
View file @
d891e9b1
...
...
@@ -248,6 +248,62 @@ struct ReLUFunctor
int64
getFLOPSPerElement
()
const
{
return
1
;
}
};
struct
ReLU6Functor
{
typedef
ReLU6Layer
Layer
;
float
minValue
,
maxValue
;
ReLU6Functor
(
float
minValue_
=
0.0
f
,
float
maxValue_
=
6.0
f
)
:
minValue
(
minValue_
),
maxValue
(
maxValue_
)
{
CV_Assert
(
minValue
<=
maxValue
);
}
void
apply
(
const
float
*
srcptr
,
float
*
dstptr
,
int
len
,
size_t
planeSize
,
int
cn0
,
int
cn1
)
const
{
for
(
int
cn
=
cn0
;
cn
<
cn1
;
cn
++
,
srcptr
+=
planeSize
,
dstptr
+=
planeSize
)
{
int
i
=
0
;
#if CV_SIMD128
v_float32x4
minV
=
v_setall_f32
(
minValue
),
maxV
=
v_setall_f32
(
maxValue
);
for
(
;
i
<=
len
-
16
;
i
+=
16
)
{
v_float32x4
x0
=
v_load
(
srcptr
+
i
);
v_float32x4
x1
=
v_load
(
srcptr
+
i
+
4
);
v_float32x4
x2
=
v_load
(
srcptr
+
i
+
8
);
v_float32x4
x3
=
v_load
(
srcptr
+
i
+
12
);
x0
=
v_min
(
v_max
(
minV
,
x0
),
maxV
);
x1
=
v_min
(
v_max
(
minV
,
x1
),
maxV
);
x2
=
v_min
(
v_max
(
minV
,
x2
),
maxV
);
x3
=
v_min
(
v_max
(
minV
,
x3
),
maxV
);
v_store
(
dstptr
+
i
,
x0
);
v_store
(
dstptr
+
i
+
4
,
x1
);
v_store
(
dstptr
+
i
+
8
,
x2
);
v_store
(
dstptr
+
i
+
12
,
x3
);
}
#endif
for
(
;
i
<
len
;
i
++
)
{
float
x
=
srcptr
[
i
];
if
(
x
>=
minValue
)
dstptr
[
i
]
=
x
<=
maxValue
?
x
:
maxValue
;
else
dstptr
[
i
]
=
minValue
;
}
}
}
#ifdef HAVE_HALIDE
void
attachHalide
(
const
Halide
::
Expr
&
input
,
Halide
::
Func
&
top
)
{
Halide
::
Var
x
(
"x"
),
y
(
"y"
),
c
(
"c"
),
n
(
"n"
);
top
(
x
,
y
,
c
,
n
)
=
clamp
(
input
,
minValue
,
maxValue
);
}
#endif // HAVE_HALIDE
int64
getFLOPSPerElement
()
const
{
return
2
;
}
};
struct
TanHFunctor
{
typedef
TanHLayer
Layer
;
...
...
@@ -517,6 +573,15 @@ Ptr<ReLULayer> ReLULayer::create(const LayerParams& params)
return
l
;
}
Ptr
<
ReLU6Layer
>
ReLU6Layer
::
create
(
const
LayerParams
&
params
)
{
float
minValue
=
params
.
get
<
float
>
(
"min_value"
,
0.0
f
);
float
maxValue
=
params
.
get
<
float
>
(
"max_value"
,
6.0
f
);
Ptr
<
ReLU6Layer
>
l
(
new
ElementWiseLayer
<
ReLU6Functor
>
(
ReLU6Functor
(
minValue
,
maxValue
)));
l
->
setParamsFrom
(
params
);
return
l
;
}
Ptr
<
TanHLayer
>
TanHLayer
::
create
(
const
LayerParams
&
params
)
{
Ptr
<
TanHLayer
>
l
(
new
ElementWiseLayer
<
TanHFunctor
>
());
...
...
This diff is collapsed.
Click to expand it.
modules/dnn/src/tensorflow/tf_importer.cpp
View file @
d891e9b1
...
...
@@ -85,11 +85,38 @@ static Mat getTensorContent(const tensorflow::TensorProto &tensor)
switch
(
tensor
.
dtype
())
{
case
tensorflow
:
:
DT_FLOAT
:
{
if
(
!
content
.
empty
())
return
Mat
(
1
,
content
.
size
()
/
sizeof
(
float
),
CV_32FC1
,
(
void
*
)
content
.
c_str
()).
clone
();
else
{
const
RepeatedField
<
float
>&
field
=
tensor
.
float_val
();
CV_Assert
(
!
field
.
empty
());
return
Mat
(
1
,
field
.
size
(),
CV_32FC1
,
(
void
*
)
field
.
data
()).
clone
();
}
}
case
tensorflow
:
:
DT_DOUBLE
:
{
if
(
!
content
.
empty
())
return
Mat
(
1
,
content
.
size
()
/
sizeof
(
double
),
CV_64FC1
,
(
void
*
)
content
.
c_str
()).
clone
();
else
{
const
RepeatedField
<
double
>&
field
=
tensor
.
double_val
();
CV_Assert
(
!
field
.
empty
());
return
Mat
(
1
,
field
.
size
(),
CV_64FC1
,
(
void
*
)
field
.
data
()).
clone
();
}
}
case
tensorflow
:
:
DT_INT32
:
{
if
(
!
content
.
empty
())
return
Mat
(
1
,
content
.
size
()
/
sizeof
(
int32_t
),
CV_32SC1
,
(
void
*
)
content
.
c_str
()).
clone
();
else
{
const
RepeatedField
<
int32_t
>&
field
=
tensor
.
int_val
();
CV_Assert
(
!
field
.
empty
());
return
Mat
(
1
,
field
.
size
(),
CV_32SC1
,
(
void
*
)
field
.
data
()).
clone
();
}
}
case
tensorflow
:
:
DT_HALF
:
{
Mat
halfs
;
...
...
@@ -573,7 +600,7 @@ void TFImporter::populateNet(Net dstNet)
if
(
layers_to_ignore
.
find
(
li
)
!=
layers_to_ignore
.
end
())
continue
;
if
(
type
==
"Conv2D"
||
type
==
"SpaceToBatchND"
)
if
(
type
==
"Conv2D"
||
type
==
"SpaceToBatchND"
||
type
==
"DepthwiseConv2dNative"
)
{
// The first node of dilated convolution subgraph.
// Extract input node, dilation rate and paddings.
...
...
@@ -621,7 +648,28 @@ void TFImporter::populateNet(Net dstNet)
}
kernelFromTensor
(
getConstBlob
(
layer
,
value_id
),
layerParams
.
blobs
[
0
]);
const
int
*
kshape
=
layerParams
.
blobs
[
0
].
size
.
p
;
int
*
kshape
=
layerParams
.
blobs
[
0
].
size
.
p
;
if
(
type
==
"DepthwiseConv2dNative"
)
{
const
int
chMultiplier
=
kshape
[
0
];
const
int
inCh
=
kshape
[
1
];
const
int
height
=
kshape
[
2
];
const
int
width
=
kshape
[
3
];
Mat
copy
=
layerParams
.
blobs
[
0
].
clone
();
float
*
src
=
(
float
*
)
copy
.
data
;
float
*
dst
=
(
float
*
)
layerParams
.
blobs
[
0
].
data
;
for
(
int
i
=
0
;
i
<
chMultiplier
;
++
i
)
for
(
int
j
=
0
;
j
<
inCh
;
++
j
)
for
(
int
s
=
0
;
s
<
height
*
width
;
++
s
)
{
int
src_i
=
(
i
*
inCh
+
j
)
*
height
*
width
+
s
;
int
dst_i
=
(
j
*
chMultiplier
+
i
)
*
height
*
width
+
s
;
dst
[
dst_i
]
=
src
[
src_i
];
}
kshape
[
0
]
=
inCh
*
chMultiplier
;
kshape
[
1
]
=
1
;
}
layerParams
.
set
(
"kernel_h"
,
kshape
[
2
]);
layerParams
.
set
(
"kernel_w"
,
kshape
[
3
]);
layerParams
.
set
(
"num_output"
,
kshape
[
0
]);
...
...
@@ -689,6 +737,10 @@ void TFImporter::populateNet(Net dstNet)
layerParams
.
blobs
.
resize
(
1
);
StrIntVector
next_layers
=
getNextLayers
(
net
,
name
,
"BiasAdd"
);
if
(
next_layers
.
empty
())
{
next_layers
=
getNextLayers
(
net
,
name
,
"Add"
);
}
if
(
next_layers
.
size
()
==
1
)
{
layerParams
.
set
(
"bias_term"
,
true
);
layerParams
.
blobs
.
resize
(
2
);
...
...
@@ -840,20 +892,20 @@ void TFImporter::populateNet(Net dstNet)
{
// Multiplication by constant.
CV_Assert
(
layer
.
input_size
()
==
2
);
Mat
scaleMat
=
getTensorContent
(
getConstBlob
(
layer
,
value_id
));
CV_Assert
(
scaleMat
.
type
()
==
CV_32FC1
);
float
scale
;
if
(
!
getConstBlob
(
layer
,
value_id
).
float_val
().
empty
())
scale
=
getConstBlob
(
layer
,
value_id
).
float_val
()[
0
];
else
int
id
;
if
(
scaleMat
.
total
()
==
1
)
// is a scalar.
{
Mat
scaleMat
;
blobFromTensor
(
getConstBlob
(
layer
,
value_id
),
scaleMat
);
CV_Assert
(
scaleMat
.
total
()
==
1
&&
scaleMat
.
type
()
==
CV_32FC1
);
scale
=
scaleMat
.
at
<
float
>
(
0
,
0
);
layerParams
.
set
(
"scale"
,
scaleMat
.
at
<
float
>
(
0
));
id
=
dstNet
.
addLayer
(
name
,
"Power"
,
layerParams
);
}
else
// is a vector
{
layerParams
.
blobs
.
resize
(
1
,
scaleMat
);
id
=
dstNet
.
addLayer
(
name
,
"Scale"
,
layerParams
);
}
layerParams
.
set
(
"scale"
,
scale
);
int
id
=
dstNet
.
addLayer
(
name
,
"Power"
,
layerParams
);
layer_id
[
name
]
=
id
;
Pin
inp0
=
parsePin
(
layer
.
input
(
0
));
...
...
@@ -1006,12 +1058,13 @@ void TFImporter::populateNet(Net dstNet)
}
else
if
(
type
==
"Abs"
||
type
==
"Tanh"
||
type
==
"Sigmoid"
||
type
==
"Relu"
||
type
==
"Elu"
||
type
==
"Softmax"
||
type
==
"Identity"
)
type
==
"Identity"
||
type
==
"Relu6"
)
{
std
::
string
dnnType
=
type
;
if
(
type
==
"Abs"
)
dnnType
=
"AbsVal"
;
else
if
(
type
==
"Tanh"
)
dnnType
=
"TanH"
;
else
if
(
type
==
"Relu"
)
dnnType
=
"ReLU"
;
else
if
(
type
==
"Relu6"
)
dnnType
=
"ReLU6"
;
else
if
(
type
==
"Elu"
)
dnnType
=
"ELU"
;
int
id
=
dstNet
.
addLayer
(
name
,
dnnType
,
layerParams
);
...
...
This diff is collapsed.
Click to expand it.
modules/dnn/test/test_tf_importer.cpp
View file @
d891e9b1
...
...
@@ -93,11 +93,12 @@ static void runTensorFlowNet(const std::string& prefix,
normAssert
(
target
,
output
,
""
,
l1
,
lInf
);
}
TEST
(
Test_TensorFlow
,
single_
conv
)
TEST
(
Test_TensorFlow
,
conv
)
{
runTensorFlowNet
(
"single_conv"
);
runTensorFlowNet
(
"atrous_conv2d_valid"
);
runTensorFlowNet
(
"atrous_conv2d_same"
);
runTensorFlowNet
(
"depthwise_conv2d"
);
}
TEST
(
Test_TensorFlow
,
padding
)
...
...
@@ -116,8 +117,9 @@ TEST(Test_TensorFlow, pad_and_concat)
runTensorFlowNet
(
"pad_and_concat"
);
}
TEST
(
Test_TensorFlow
,
fused_
batch_norm
)
TEST
(
Test_TensorFlow
,
batch_norm
)
{
runTensorFlowNet
(
"batch_norm"
);
runTensorFlowNet
(
"fused_batch_norm"
);
}
...
...
@@ -133,6 +135,11 @@ TEST(Test_TensorFlow, deconvolution)
runTensorFlowNet
(
"deconvolution"
);
}
TEST
(
Test_TensorFlow
,
matmul
)
{
runTensorFlowNet
(
"matmul"
);
}
TEST
(
Test_TensorFlow
,
fp16
)
{
const
float
l1
=
1e-3
;
...
...
This diff is collapsed.
Click to expand it.
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment