Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in / Register
Toggle navigation
N
ngraph
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
submodule
ngraph
Commits
3d21f6ed
Unverified
Commit
3d21f6ed
authored
Oct 05, 2018
by
Robert Kimball
Committed by
GitHub
Oct 05, 2018
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
address klocwork number overflow issue (#1751)
* address klocwork number overflow issue * one more issue
parent
be0a9f03
Show whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
32 additions
and
39 deletions
+32
-39
lstm.cpp
src/ngraph/runtime/cpu/op/lstm.cpp
+8
-10
lstm.hpp
src/ngraph/runtime/cpu/op/lstm.hpp
+19
-19
cpu_concat_inputs.cpp
src/ngraph/runtime/cpu/pass/cpu_concat_inputs.cpp
+5
-10
No files found.
src/ngraph/runtime/cpu/op/lstm.cpp
View file @
3d21f6ed
...
...
@@ -68,8 +68,8 @@ op::Lstm::Lstm(std::shared_ptr<Node> input_xt_1,
,
m_num_timesteps
(
1
)
,
m_num_gates_per_cell
(
4
)
,
m_src_sequence_length
(
1
)
,
m_src_layer_feature_size
(
static_cast
<
int
>
(
input_xt_1
->
get_shape
()[
1
])
)
,
m_src_iter_feature_size
(
static_cast
<
int
>
(
hidden_state_ht_1
->
get_shape
()[
1
])
)
,
m_src_layer_feature_size
(
input_xt_1
->
get_shape
()[
1
]
)
,
m_src_iter_feature_size
(
hidden_state_ht_1
->
get_shape
()[
1
]
)
,
m_num_cell_states
(
2
)
,
m_direction
(
1
)
,
m_num_fused_layers
(
1
)
...
...
@@ -89,7 +89,7 @@ op::Lstm::Lstm(std::shared_ptr<Node> input_xt_1,
if
(
input_xt_1
->
get_shape
().
size
()
==
2
)
{
m_batch_size
=
static_cast
<
int
>
(
input_xt_1
->
get_shape
()[
0
])
;
m_batch_size
=
input_xt_1
->
get_shape
()[
0
]
;
}
else
{
...
...
@@ -132,8 +132,8 @@ op::Lstm::Lstm(std::shared_ptr<Node> src_layer,
,
m_num_timesteps
(
1
)
,
m_num_gates_per_cell
(
4
)
,
m_src_sequence_length
(
1
)
,
m_src_layer_feature_size
(
s
tatic_cast
<
int
>
(
src_layer
->
get_shape
()[
1
])
)
,
m_src_iter_feature_size
(
s
tatic_cast
<
int
>
(
src_iter
->
get_shape
()[
1
])
)
,
m_src_layer_feature_size
(
s
rc_layer
->
get_shape
()[
1
]
)
,
m_src_iter_feature_size
(
s
rc_iter
->
get_shape
()[
1
]
)
,
m_num_cell_states
(
2
)
,
m_direction
(
1
)
,
m_num_fused_layers
(
1
)
...
...
@@ -153,7 +153,7 @@ op::Lstm::Lstm(std::shared_ptr<Node> src_layer,
if
(
src_layer
->
get_shape
().
size
()
==
2
)
{
m_batch_size
=
s
tatic_cast
<
int
>
(
src_layer
->
get_shape
()[
0
]
/
m_num_timesteps
)
;
m_batch_size
=
s
rc_layer
->
get_shape
()[
0
]
/
m_num_timesteps
;
}
else
{
...
...
@@ -184,10 +184,8 @@ op::Lstm::Lstm(std::shared_ptr<Node> src_layer,
set_output_size
(
2
);
set_output_type
(
0
,
src_layer
->
get_element_type
(),
Shape
{
static_cast
<
unsigned
long
>
(
m_num_timesteps
*
m_batch_size
),
static_cast
<
unsigned
long
>
(
m_src_iter_feature_size
)});
Shape
{(
m_num_timesteps
*
m_batch_size
),
m_src_iter_feature_size
});
set_output_type
(
1
,
src_layer
->
get_element_type
(),
Shape
{
static_cast
<
unsigned
long
>
(
m_num_cell_states
*
m_batch_size
),
static_cast
<
unsigned
long
>
(
m_src_iter_feature_size
)});
Shape
{(
m_num_cell_states
*
m_batch_size
),
m_src_iter_feature_size
});
}
src/ngraph/runtime/cpu/op/lstm.hpp
View file @
3d21f6ed
...
...
@@ -69,31 +69,31 @@ namespace ngraph
std
::
shared_ptr
<
Node
>
bias
);
Shape
get_output_tensor_shape
()
const
{
return
m_output_tensor_shape
;
}
Shape
get_output_cell_shape
()
const
{
return
m_output_cell_shape
;
}
in
t
get_num_timesteps
()
const
{
return
m_num_timesteps
;
}
in
t
get_src_sequence_length
()
const
{
return
m_src_sequence_length
;
}
in
t
get_gates_per_cell
()
const
{
return
m_num_gates_per_cell
;
}
in
t
get_batch_size
()
const
{
return
m_batch_size
;
}
in
t
get_src_layer_feature_size
()
const
{
return
m_src_layer_feature_size
;
}
in
t
get_src_iter_feature_size
()
const
{
return
m_src_iter_feature_size
;
}
in
t
get_num_cell_states
()
const
{
return
m_num_cell_states
;
}
in
t
get_direction
()
const
{
return
m_direction
;
}
in
t
get_num_fused_layers
()
const
{
return
m_num_fused_layers
;
}
in
t
get_fused_inputs
()
const
{
return
m_fused_inputs
;
}
size_
t
get_num_timesteps
()
const
{
return
m_num_timesteps
;
}
size_
t
get_src_sequence_length
()
const
{
return
m_src_sequence_length
;
}
size_
t
get_gates_per_cell
()
const
{
return
m_num_gates_per_cell
;
}
size_
t
get_batch_size
()
const
{
return
m_batch_size
;
}
size_
t
get_src_layer_feature_size
()
const
{
return
m_src_layer_feature_size
;
}
size_
t
get_src_iter_feature_size
()
const
{
return
m_src_iter_feature_size
;
}
size_
t
get_num_cell_states
()
const
{
return
m_num_cell_states
;
}
size_
t
get_direction
()
const
{
return
m_direction
;
}
size_
t
get_num_fused_layers
()
const
{
return
m_num_fused_layers
;
}
size_
t
get_fused_inputs
()
const
{
return
m_fused_inputs
;
}
virtual
std
::
shared_ptr
<
Node
>
copy_with_new_args
(
const
NodeVector
&
new_args
)
const
override
;
private
:
Shape
m_output_tensor_shape
;
Shape
m_output_cell_shape
;
in
t
m_num_timesteps
;
in
t
m_num_gates_per_cell
;
in
t
m_src_sequence_length
;
in
t
m_batch_size
;
in
t
m_src_layer_feature_size
;
in
t
m_src_iter_feature_size
;
in
t
m_num_cell_states
;
in
t
m_direction
;
in
t
m_num_fused_layers
;
size_
t
m_num_timesteps
;
size_
t
m_num_gates_per_cell
;
size_
t
m_src_sequence_length
;
size_
t
m_batch_size
;
size_
t
m_src_layer_feature_size
;
size_
t
m_src_iter_feature_size
;
size_
t
m_num_cell_states
;
size_
t
m_direction
;
size_
t
m_num_fused_layers
;
bool
m_fused_inputs
;
// True if node gets fused inputs/weights
};
}
...
...
src/ngraph/runtime/cpu/pass/cpu_concat_inputs.cpp
View file @
3d21f6ed
...
...
@@ -94,16 +94,11 @@ void ngraph::runtime::cpu::pass::ConcatInputs::concat_lstm_inputs()
// dst_iter of lstm mkldnn output holds the results of both recurrent state
// tensor outputs. we need to slice the ct.
auto
ht_slice
=
std
::
make_shared
<
op
::
Slice
>
(
lstm_ht_ct_out
,
Coordinate
{
0
,
0
},
Coordinate
{
static_cast
<
unsigned
long
>
(
batch_size
),
static_cast
<
unsigned
long
>
(
feature_size
)});
auto
ct_slice
=
std
::
make_shared
<
op
::
Slice
>
(
lstm_ht_ct_out
,
Coordinate
{
static_cast
<
unsigned
long
>
(
batch_size
),
0
},
Coordinate
{
static_cast
<
unsigned
long
>
(
2
*
batch_size
),
static_cast
<
unsigned
long
>
(
feature_size
)});
auto
ht_slice
=
std
::
make_shared
<
op
::
Slice
>
(
lstm_ht_ct_out
,
Coordinate
{
0
,
0
},
Coordinate
{
batch_size
,
feature_size
});
auto
ct_slice
=
std
::
make_shared
<
op
::
Slice
>
(
lstm_ht_ct_out
,
Coordinate
{
batch_size
,
0
},
Coordinate
{(
2
*
batch_size
),
feature_size
});
// now go through the GOE'sand replace the slices(ht)
std
::
set
<
std
::
shared_ptr
<
ngraph
::
Node
>>
lstm_outputs
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment