Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in / Register
Toggle navigation
O
opencv
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
submodule
opencv
Commits
1c092a18
Commit
1c092a18
authored
5 years ago
by
Alexander Alekhin
Browse files
Options
Browse Files
Download
Plain Diff
Merge pull request #14454 from dkurt:dnn_tf_subgraph_fusion
parents
ddfaa41f
9408c3e6
No related merge requests found
Show whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
63 additions
and
28 deletions
+63
-28
tf_graph_simplifier.cpp
modules/dnn/src/tensorflow/tf_graph_simplifier.cpp
+44
-21
tf_importer.cpp
modules/dnn/src/tensorflow/tf_importer.cpp
+12
-7
test_tf_importer.cpp
modules/dnn/test/test_tf_importer.cpp
+7
-0
No files found.
modules/dnn/src/tensorflow/tf_graph_simplifier.cpp
View file @
1c092a18
...
...
@@ -79,7 +79,7 @@ public:
}
}
static
const
tensorflow
::
NodeDef
&
getInputNode
(
const
tensorflow
::
GraphDef
&
net
,
static
int
getInputNodeId
(
const
tensorflow
::
GraphDef
&
net
,
const
tensorflow
::
NodeDef
&
node
,
int
inpId
)
{
...
...
@@ -92,7 +92,7 @@ public:
for
(
int
i
=
0
;
i
<
numNodes
;
++
i
)
{
if
(
net
.
node
(
i
).
name
()
==
name
)
return
net
.
node
(
i
)
;
return
i
;
}
CV_Error
(
Error
::
StsParseError
,
"Input node with name "
+
name
+
" not found"
);
}
...
...
@@ -104,36 +104,46 @@ public:
matchedNodesIds
.
clear
();
matchedNodesIds
.
reserve
(
nodesToFuse
.
size
());
int
numNodes
=
net
.
node_size
();
for
(
int
i
=
0
;
i
<
nodesToFuse
.
size
();
++
i
)
{
while
(
nodeId
<
numNodes
&&
net
.
node
(
nodeId
).
op
()
==
"Const"
)
std
::
queue
<
int
>
nodesToMatch
;
std
::
queue
<
int
>
targetNodes
;
nodesToMatch
.
push
(
nodeId
);
targetNodes
.
push
(
nodesToFuse
.
back
());
while
(
!
nodesToMatch
.
empty
())
{
nodeId
+=
1
;
}
if
(
nodeId
>
numNodes
-
1
)
return
false
;
int
nodeToMatch
=
nodesToMatch
.
front
()
;
int
targetNodeId
=
targetNodes
.
front
();
nodesToMatch
.
pop
();
targetNodes
.
pop
()
;
const
tensorflow
::
NodeDef
&
node
=
net
.
node
(
nodeId
);
if
(
std
::
find
(
matchedNodesIds
.
begin
(),
matchedNodesIds
.
end
(),
nodeToMatch
)
!=
matchedNodesIds
.
end
())
continue
;
if
(
node
.
op
()
!=
nodes
[
nodesToFuse
[
i
]])
const
tensorflow
::
NodeDef
&
node
=
net
.
node
(
nodeToMatch
);
if
(
node
.
op
()
!=
nodes
[
targetNodeId
])
return
false
;
std
::
vector
<
int
>&
inputNodes
=
inputs
[
nodesToFuse
[
i
]
];
std
::
vector
<
int
>&
inputNodes
=
inputs
[
targetNodeId
];
if
(
inputNodes
.
size
()
!=
node
.
input_size
())
return
false
;
for
(
int
j
=
0
;
j
<
inputNodes
.
size
();
++
j
)
{
if
(
nodes
[
inputNodes
[
j
]].
empty
())
// Unknown input node type.
continue
;
const
tensorflow
::
NodeDef
&
inpNode
=
getInputNode
(
net
,
node
,
j
);
if
(
inpNode
.
op
()
!=
nodes
[
inputNodes
[
j
]])
nodeId
=
getInputNodeId
(
net
,
node
,
j
);
const
tensorflow
::
NodeDef
&
inpNode
=
net
.
node
(
nodeId
);
if
(
inpNode
.
op
()
!=
"Const"
)
{
nodesToMatch
.
push
(
nodeId
);
targetNodes
.
push
(
inputNodes
[
j
]);
}
else
if
(
nodes
[
inputNodes
[
j
]]
!=
"Const"
)
return
false
;
}
matchedNodesIds
.
push_back
(
nodeId
);
nodeId
+=
1
;
matchedNodesIds
.
push_back
(
nodeToMatch
);
}
std
::
sort
(
matchedNodesIds
.
begin
(),
matchedNodesIds
.
end
());
return
true
;
}
...
...
@@ -181,7 +191,7 @@ public:
std
::
vector
<
tensorflow
::
NodeDef
*>
inputNodes
(
inputsNames
.
size
());
for
(
int
i
=
0
;
i
<
inputsNames
.
size
();
++
i
)
{
inputNodes
[
i
]
=
(
tensorflow
::
NodeDef
*
)
&
getInputNode
(
net
,
*
node
,
i
);
inputNodes
[
i
]
=
net
.
mutable_node
(
getInputNodeId
(
net
,
*
node
,
i
)
);
}
finalize
(
net
,
node
,
inputNodes
);
}
...
...
@@ -354,7 +364,7 @@ public:
{
if
(
!
Subgraph
::
match
(
net
,
nodeId
,
matchedNodesIds
))
return
false
;
Mat
maxValue
=
getTensorContent
(
net
.
node
(
nodeId
+
1
).
attr
().
at
(
"value"
).
tensor
());
Mat
maxValue
=
getTensorContent
(
net
.
node
(
matchedNodesIds
.
front
()
+
1
).
attr
().
at
(
"value"
).
tensor
());
return
maxValue
.
type
()
==
CV_32FC1
&&
maxValue
.
total
()
==
1
&&
maxValue
.
at
<
float
>
(
0
)
==
6
;
}
};
...
...
@@ -384,6 +394,17 @@ public:
setFusedNode
(
"Reshape"
,
ids
);
}
virtual
bool
match
(
const
tensorflow
::
GraphDef
&
net
,
int
nodeId
,
std
::
vector
<
int
>&
matchedNodesIds
)
CV_OVERRIDE
{
const
tensorflow
::
NodeDef
&
node
=
net
.
node
(
nodeId
);
if
(
node
.
input_size
()
==
0
)
return
false
;
inpName
=
node
.
input
(
0
);
return
Subgraph
::
match
(
net
,
nodeId
,
matchedNodesIds
);
}
virtual
void
finalize
(
tensorflow
::
GraphDef
&
,
tensorflow
::
NodeDef
*
fusedNode
,
std
::
vector
<
tensorflow
::
NodeDef
*>&
inputNodes
)
CV_OVERRIDE
{
...
...
@@ -395,6 +416,7 @@ public:
}
tensorflow
::
TensorProto
*
shapeTensor
=
inputNodes
[
1
]
->
mutable_attr
()
->
at
(
"value"
).
mutable_tensor
();
fusedNode
->
mutable_input
()
->
DeleteSubrange
(
2
,
numOutDims
-
1
);
fusedNode
->
set_input
(
0
,
inpName
);
shapeTensor
->
clear_int_val
();
for
(
int
i
=
0
;
i
<
shape
.
size
();
++
i
)
...
...
@@ -405,6 +427,7 @@ public:
private
:
int
numOutDims
;
std
::
string
inpName
;
};
class
L2NormalizeSubgraph
:
public
Subgraph
...
...
@@ -685,9 +708,9 @@ void simplifySubgraphs(tensorflow::GraphDef& net)
subgraphs
.
push_back
(
Ptr
<
Subgraph
>
(
new
DeconvolutionSameKerasSubgraph
()));
subgraphs
.
push_back
(
Ptr
<
Subgraph
>
(
new
ResizeBilinearSubgraph
()));
subgraphs
.
push_back
(
Ptr
<
Subgraph
>
(
new
UpsamplingKerasSubgraph
()));
subgraphs
.
push_back
(
Ptr
<
Subgraph
>
(
new
ReshapeAsShapeSubgraph
()));
subgraphs
.
push_back
(
Ptr
<
Subgraph
>
(
new
SoftMaxSlimSubgraph
()));
subgraphs
.
push_back
(
Ptr
<
Subgraph
>
(
new
SoftMaxSlimV2Subgraph
()));
subgraphs
.
push_back
(
Ptr
<
Subgraph
>
(
new
ReshapeAsShapeSubgraph
()));
int
numNodes
=
net
.
node_size
();
std
::
vector
<
int
>
matchedNodesIds
;
...
...
This diff is collapsed.
Click to expand it.
modules/dnn/src/tensorflow/tf_importer.cpp
View file @
1c092a18
...
...
@@ -1126,7 +1126,15 @@ void TFImporter::populateNet(Net dstNet)
{
Mat
newShape
=
getTensorContent
(
getConstBlob
(
layer
,
value_id
,
1
));
if
(
newShape
.
total
()
!=
4
&&
inpLayout
==
DATA_LAYOUT_NHWC
)
if
(
inpLayout
==
DATA_LAYOUT_NHWC
)
{
if
(
newShape
.
total
()
==
4
)
{
// NHWC->NCHW
std
::
swap
(
*
newShape
.
ptr
<
int32_t
>
(
0
,
2
),
*
newShape
.
ptr
<
int32_t
>
(
0
,
3
));
std
::
swap
(
*
newShape
.
ptr
<
int32_t
>
(
0
,
1
),
*
newShape
.
ptr
<
int32_t
>
(
0
,
2
));
}
if
(
newShape
.
total
()
!=
4
||
newShape
.
at
<
int
>
(
1
)
==
1
)
{
LayerParams
permLP
;
int
order
[]
=
{
0
,
2
,
3
,
1
};
// From OpenCV's NCHW to NHWC.
...
...
@@ -1140,11 +1148,6 @@ void TFImporter::populateNet(Net dstNet)
inpId
=
Pin
(
permName
);
inpLayout
=
DATA_LAYOUT_NCHW
;
}
else
if
(
newShape
.
total
()
==
4
&&
inpLayout
==
DATA_LAYOUT_NHWC
)
{
// NHWC->NCHW
std
::
swap
(
*
newShape
.
ptr
<
int32_t
>
(
0
,
2
),
*
newShape
.
ptr
<
int32_t
>
(
0
,
3
));
std
::
swap
(
*
newShape
.
ptr
<
int32_t
>
(
0
,
1
),
*
newShape
.
ptr
<
int32_t
>
(
0
,
2
));
}
layerParams
.
set
(
"dim"
,
DictValue
::
arrayInt
<
int
*>
(
newShape
.
ptr
<
int
>
(),
newShape
.
total
()));
...
...
@@ -1381,7 +1384,9 @@ void TFImporter::populateNet(Net dstNet)
// num_split
// 1st blob is dims tensor
int
axis
=
getConstBlob
(
layer
,
value_id
,
0
).
int_val
().
Get
(
0
);
layerParams
.
set
(
"axis"
,
toNCHW
(
axis
));
if
(
getDataLayout
(
name
,
data_layouts
)
==
DATA_LAYOUT_NHWC
)
axis
=
toNCHW
(
axis
);
layerParams
.
set
(
"axis"
,
axis
);
int
id
=
dstNet
.
addLayer
(
name
,
"Slice"
,
layerParams
);
layer_id
[
name
]
=
id
;
...
...
This diff is collapsed.
Click to expand it.
modules/dnn/test/test_tf_importer.cpp
View file @
1c092a18
...
...
@@ -675,6 +675,13 @@ TEST_P(Test_TensorFlow_layers, relu6)
runTensorFlowNet
(
"keras_relu6"
,
/*hasText*/
true
);
}
TEST_P
(
Test_TensorFlow_layers
,
subpixel
)
{
if
(
backend
==
DNN_BACKEND_INFERENCE_ENGINE
)
throw
SkipTestException
(
""
);
runTensorFlowNet
(
"subpixel"
);
}
TEST_P
(
Test_TensorFlow_layers
,
keras_mobilenet_head
)
{
runTensorFlowNet
(
"keras_mobilenet_head"
);
...
...
This diff is collapsed.
Click to expand it.
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment