Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in / Register
Toggle navigation
N
ngraph
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
submodule
ngraph
Commits
0d693fc3
Commit
0d693fc3
authored
Oct 24, 2018
by
Nick Korovaiko
Committed by
Robert Kimball
Oct 24, 2018
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
fix Klockwork warnings CPU part 1 (#1902)
* fix Klockwork warnings CPU part 1 * fix spelling error * fix a typo
parent
92c1d504
Show whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
45 additions
and
23 deletions
+45
-23
cpu_collapse_dims.cpp
src/ngraph/runtime/cpu/pass/cpu_collapse_dims.cpp
+2
-2
cpu_concat_inputs.cpp
src/ngraph/runtime/cpu/pass/cpu_concat_inputs.cpp
+5
-4
cpu_fusion.cpp
src/ngraph/runtime/cpu/pass/cpu_fusion.cpp
+38
-17
No files found.
src/ngraph/runtime/cpu/pass/cpu_collapse_dims.cpp
View file @
0d693fc3
...
...
@@ -98,7 +98,7 @@ static void collapse_dims(std::vector<size_t>& shape,
static
bool
collapse_broadcast
(
std
::
shared_ptr
<
Node
>
n
)
{
bool
replaced
=
false
;
auto
node
=
std
::
dynam
ic_pointer_cast
<
op
::
Broadcast
>
(
n
).
get
();
auto
node
=
std
::
stat
ic_pointer_cast
<
op
::
Broadcast
>
(
n
).
get
();
auto
input_shape
=
node
->
get_argument
(
0
)
->
get_shape
();
auto
output_shape
=
node
->
get_shape
();
auto
operated_axes
=
node
->
get_broadcast_axes
();
...
...
@@ -147,7 +147,7 @@ template <typename T>
static
bool
collapse_reduction
(
std
::
shared_ptr
<
Node
>
n
)
{
bool
replaced
=
false
;
auto
node
=
std
::
dynam
ic_pointer_cast
<
T
>
(
n
).
get
();
auto
node
=
std
::
stat
ic_pointer_cast
<
T
>
(
n
).
get
();
auto
input_shape
=
node
->
get_argument
(
0
)
->
get_shape
();
auto
output_shape
=
node
->
get_shape
();
auto
operated_axes
=
node
->
get_reduction_axes
();
...
...
src/ngraph/runtime/cpu/pass/cpu_concat_inputs.cpp
View file @
0d693fc3
...
...
@@ -82,10 +82,10 @@ void ngraph::runtime::cpu::pass::ConcatInputs::concat_lstm_inputs()
std
::
shared_ptr
<
Node
>
bias
=
std
::
make_shared
<
op
::
Add
>
(
pattern_map
[
bias1
],
pattern_map
[
bias2
]);
auto
lstm_node
=
pattern_map
[
lstm_node_label
]
->
get_arguments
()[
0
];
auto
batch_size
=
std
::
dynamic_pointer_cast
<
op
::
Lstm
>
(
lstm_node
)
->
get_batch_size
(
);
auto
feature_size
=
std
::
dynamic_pointer_cast
<
op
::
Lstm
>
(
lstm_node
)
->
get_src_iter_feature_size
();
auto
lstm_node
=
std
::
static_pointer_cast
<
op
::
Lstm
>
(
pattern_map
[
lstm_node_label
]
->
get_arguments
()[
0
]
);
auto
batch_size
=
lstm_node
->
get_batch_size
();
auto
feature_size
=
lstm_node
->
get_src_iter_feature_size
();
auto
lstm_mkldnn_node
=
std
::
make_shared
<
op
::
Lstm
>
(
src_layer
,
src_iter
,
pattern_map
[
weights_i2h
],
pattern_map
[
weights_h2h
],
bias
);
...
...
@@ -105,6 +105,7 @@ void ngraph::runtime::cpu::pass::ConcatInputs::concat_lstm_inputs()
for
(
auto
&
goes
:
lstm_node
->
get_outputs
().
at
(
0
).
get_inputs
())
{
auto
goe_node
=
std
::
dynamic_pointer_cast
<
op
::
GetOutputElement
>
(
goes
->
get_node
());
NGRAPH_ASSERT
(
goe_node
);
lstm_outputs
.
insert
(
goes
->
get_node
());
// first output node of lstm
if
(
goe_node
->
get_n
()
==
0
)
...
...
src/ngraph/runtime/cpu/pass/cpu_fusion.cpp
View file @
0d693fc3
...
...
@@ -309,6 +309,11 @@ void ngraph::runtime::cpu::pass::CPUFusion::construct_fprop_bn()
// get epsilon value
auto
eps_ptr
=
std
::
dynamic_pointer_cast
<
op
::
Constant
>
(
pattern_map
[
eps_label
]);
if
(
!
eps_ptr
)
{
NGRAPH_DEBUG
<<
"Eps must be a constant"
;
return
false
;
}
double
epsilon
=
*
(
reinterpret_cast
<
const
double
*>
(
eps_ptr
->
get_data_ptr
()));
auto
bn_node
=
std
::
make_shared
<
op
::
BatchNormTraining
>
(
epsilon
,
pattern_map
[
gamma_label
],
pattern_map
[
beta_label
],
pattern_map
[
input
]);
...
...
@@ -408,12 +413,17 @@ void ngraph::runtime::cpu::pass::CPUFusion::construct_zero_padded_reshaped_conv(
auto
pattern_map
=
m
.
get_pattern_map
();
auto
pad_value_op
=
std
::
dynamic_pointer_cast
<
op
::
Constant
>
(
pattern_map
[
pad_value
]);
if
(
!
pad_value_op
)
{
NGRAPH_DEBUG
<<
"Pad value must be a constant"
;
return
false
;
}
const
auto
&
matched_conv
=
std
::
dynam
ic_pointer_cast
<
op
::
Convolution
>
(
pattern_map
[
conv_label
]);
const
auto
&
matched_pad
=
std
::
dynam
ic_pointer_cast
<
op
::
Pad
>
(
pattern_map
[
pad_label
]);
std
::
stat
ic_pointer_cast
<
op
::
Convolution
>
(
pattern_map
[
conv_label
]);
const
auto
&
matched_pad
=
std
::
stat
ic_pointer_cast
<
op
::
Pad
>
(
pattern_map
[
pad_label
]);
const
auto
&
matched_reshape
=
std
::
dynam
ic_pointer_cast
<
op
::
Reshape
>
(
pattern_map
[
reshape_label
]);
std
::
stat
ic_pointer_cast
<
op
::
Reshape
>
(
pattern_map
[
reshape_label
]);
const
auto
&
input_order
=
matched_reshape
->
get_input_order
();
auto
hoisted_reshape_output_shape
=
...
...
@@ -485,10 +495,15 @@ void ngraph::runtime::cpu::pass::CPUFusion::construct_zero_padded_conv()
auto
pattern_map
=
m
.
get_pattern_map
();
auto
pad_value_op
=
std
::
dynamic_pointer_cast
<
op
::
Constant
>
(
pattern_map
[
pad_value
]);
if
(
!
pad_value_op
)
{
NGRAPH_DEBUG
<<
"Pad value must be a constant"
;
return
false
;
}
const
auto
&
matched_conv
=
std
::
dynam
ic_pointer_cast
<
op
::
Convolution
>
(
pattern_map
[
conv_label
]);
const
auto
&
matched_pad
=
std
::
dynam
ic_pointer_cast
<
op
::
Pad
>
(
pattern_map
[
pad_label
]);
std
::
stat
ic_pointer_cast
<
op
::
Convolution
>
(
pattern_map
[
conv_label
]);
const
auto
&
matched_pad
=
std
::
stat
ic_pointer_cast
<
op
::
Pad
>
(
pattern_map
[
pad_label
]);
if
(
!
zero_padded_conv_consistency_check
(
m
.
get_match_root
(),
pad_value_op
,
...
...
@@ -550,10 +565,15 @@ void ngraph::runtime::cpu::pass::CPUFusion::construct_zero_padded_conv_backprop_
auto
pattern_map
=
m
.
get_pattern_map
();
auto
pad_value_op
=
std
::
dynamic_pointer_cast
<
op
::
Constant
>
(
pattern_map
[
pad_value
]);
if
(
!
pad_value_op
)
{
NGRAPH_DEBUG
<<
"Pad value must be a constant"
;
return
false
;
}
const
auto
&
matched_conv
=
std
::
dynam
ic_pointer_cast
<
op
::
ConvolutionBackpropFilters
>
(
pattern_map
[
conv_label
]);
const
auto
&
matched_pad
=
std
::
dynam
ic_pointer_cast
<
op
::
Pad
>
(
pattern_map
[
pad_label
]);
std
::
stat
ic_pointer_cast
<
op
::
ConvolutionBackpropFilters
>
(
pattern_map
[
conv_label
]);
const
auto
&
matched_pad
=
std
::
stat
ic_pointer_cast
<
op
::
Pad
>
(
pattern_map
[
pad_label
]);
if
(
!
zero_padded_conv_consistency_check
(
m
.
get_match_root
(),
pad_value_op
,
...
...
@@ -615,7 +635,7 @@ void ngraph::runtime::cpu::pass::CPUFusion::construct_conv_bias()
<<
m
.
get_match_root
()
->
get_name
();
auto
pattern_map
=
m
.
get_pattern_map
();
auto
conv
=
std
::
dynam
ic_pointer_cast
<
op
::
Convolution
>
(
m
.
get_match_root
()
->
get_argument
(
0
));
auto
conv
=
std
::
stat
ic_pointer_cast
<
op
::
Convolution
>
(
m
.
get_match_root
()
->
get_argument
(
0
));
if
(
conv
->
get_input_shape
(
0
).
size
()
==
4
)
{
auto
bias
=
m
.
get_match_root
()
->
get_argument
(
1
)
->
get_argument
(
0
);
...
...
@@ -668,7 +688,7 @@ void ngraph::runtime::cpu::pass::CPUFusion::construct_conv_bias_bprop()
auto
pattern_map
=
m
.
get_pattern_map
();
auto
conv_bprop
=
std
::
dynam
ic_pointer_cast
<
op
::
ConvolutionBackpropFilters
>
(
m
.
get_match_root
());
std
::
stat
ic_pointer_cast
<
op
::
ConvolutionBackpropFilters
>
(
m
.
get_match_root
());
if
(
conv_bprop
->
get_input_shape
(
0
).
size
()
==
4
&&
conv_bprop
->
get_input_shape
(
1
).
size
()
==
4
&&
...
...
@@ -750,7 +770,7 @@ void ngraph::runtime::cpu::pass::CPUFusion::construct_batch_norm_relu()
<<
m
.
get_match_root
()
->
get_name
();
auto
pattern_map
=
m
.
get_pattern_map
();
auto
m_bn
=
std
::
dynam
ic_pointer_cast
<
op
::
BatchNormTraining
>
(
auto
m_bn
=
std
::
stat
ic_pointer_cast
<
op
::
BatchNormTraining
>
(
m
.
get_match_root
()
->
get_argument
(
0
)
->
get_inputs
().
at
(
0
).
get_output
().
get_node
());
// as of now, only MKLDNN supports this fusion
...
...
@@ -766,6 +786,7 @@ void ngraph::runtime::cpu::pass::CPUFusion::construct_batch_norm_relu()
for
(
auto
bn_in
:
m_bn
->
get_output_inputs
(
0
))
{
auto
mgoe
=
std
::
dynamic_pointer_cast
<
op
::
GetOutputElement
>
(
bn_in
->
get_node
());
NGRAPH_ASSERT
(
mgoe
);
mgoes
[
mgoe
->
get_n
()]
=
mgoe
;
}
...
...
@@ -881,7 +902,7 @@ void ngraph::runtime::cpu::pass::CPUFusion::construct_conv_relu()
NGRAPH_DEBUG
<<
"In a callback for construct_conv_relu against "
<<
m
.
get_match_root
()
->
get_name
();
auto
conv
=
std
::
dynam
ic_pointer_cast
<
op
::
Convolution
>
(
m
.
get_match_root
()
->
get_argument
(
0
));
auto
conv
=
std
::
stat
ic_pointer_cast
<
op
::
Convolution
>
(
m
.
get_match_root
()
->
get_argument
(
0
));
// These checks are to make sure a MKLDNN Convolution kernel can be used.
bool
data_dilated
=
false
;
...
...
@@ -949,7 +970,7 @@ void ngraph::runtime::cpu::pass::CPUFusion::construct_conv_bias_relu()
<<
m
.
get_match_root
()
->
get_name
();
auto
conv
=
std
::
dynam
ic_pointer_cast
<
op
::
ConvolutionBias
>
(
m
.
get_match_root
()
->
get_argument
(
0
));
std
::
stat
ic_pointer_cast
<
op
::
ConvolutionBias
>
(
m
.
get_match_root
()
->
get_argument
(
0
));
// These checks are to make sure a MKLDNN Convolution kernel can be used.
bool
data_dilated
=
false
;
...
...
@@ -1113,7 +1134,7 @@ void ngraph::runtime::cpu::pass::CPUFusion::construct_conv_add_relu()
<<
m
.
get_match_root
()
->
get_name
();
auto
conv_m
=
std
::
dynam
ic_pointer_cast
<
op
::
ConvolutionAdd
>
(
m
.
get_match_root
()
->
get_argument
(
0
));
std
::
stat
ic_pointer_cast
<
op
::
ConvolutionAdd
>
(
m
.
get_match_root
()
->
get_argument
(
0
));
if
(
conv_m
->
get_users
().
size
()
>
1
)
{
NGRAPH_DEBUG
<<
"Convolution has more than one user"
;
...
...
@@ -1255,7 +1276,7 @@ void ngraph::runtime::cpu::pass::CPUFusion::construct_conv_bias_add_relu()
<<
m
.
get_match_root
()
->
get_name
();
auto
conv_m
=
std
::
dynam
ic_pointer_cast
<
op
::
ConvolutionBiasAdd
>
(
m
.
get_match_root
()
->
get_argument
(
0
));
std
::
stat
ic_pointer_cast
<
op
::
ConvolutionBiasAdd
>
(
m
.
get_match_root
()
->
get_argument
(
0
));
if
(
conv_m
->
get_users
().
size
()
>
1
)
{
NGRAPH_DEBUG
<<
"Convolution has more than one user"
;
...
...
@@ -1395,7 +1416,7 @@ void ngraph::runtime::cpu::pass::CPUFusion::construct_bounded_relu()
return
false
;
}
auto
alpha_const_op
=
std
::
dynam
ic_pointer_cast
<
op
::
Constant
>
(
pattern_map
[
alpha
]);
auto
alpha_const_op
=
std
::
stat
ic_pointer_cast
<
op
::
Constant
>
(
pattern_map
[
alpha
]);
float
alpha_val
=
*
(
static_cast
<
float
const
*>
(
alpha_const_op
->
get_data_ptr
()));
NGRAPH_DEBUG
<<
"relu_input: "
<<
pattern_map
[
relu_input
]
<<
" min_val: "
<<
*
(
static_cast
<
float
const
*>
(
alpha_const_op
->
get_data_ptr
()));
...
...
@@ -1437,8 +1458,8 @@ void ngraph::runtime::cpu::pass::CPUFusion::construct_conv_bias_folded_batch_nor
<<
m
.
get_match_root
()
->
get_name
();
auto
pattern_map
=
m
.
get_pattern_map
();
auto
m_bn
=
std
::
dynam
ic_pointer_cast
<
op
::
BatchNormInference
>
(
m
.
get_match_root
());
auto
m_conv
=
std
::
dynam
ic_pointer_cast
<
op
::
ConvolutionBias
>
(
m_bn
->
get_argument
(
2
));
auto
m_bn
=
std
::
stat
ic_pointer_cast
<
op
::
BatchNormInference
>
(
m
.
get_match_root
());
auto
m_conv
=
std
::
stat
ic_pointer_cast
<
op
::
ConvolutionBias
>
(
m_bn
->
get_argument
(
2
));
if
(
m_conv
->
get_users
().
size
()
>
1
)
{
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment