Unverified Commit 3d21f6ed authored by Robert Kimball's avatar Robert Kimball Committed by GitHub

address klocwork number overflow issue (#1751)

* address klocwork number overflow issue

* one more issue
parent be0a9f03
...@@ -68,8 +68,8 @@ op::Lstm::Lstm(std::shared_ptr<Node> input_xt_1, ...@@ -68,8 +68,8 @@ op::Lstm::Lstm(std::shared_ptr<Node> input_xt_1,
, m_num_timesteps(1) , m_num_timesteps(1)
, m_num_gates_per_cell(4) , m_num_gates_per_cell(4)
, m_src_sequence_length(1) , m_src_sequence_length(1)
, m_src_layer_feature_size(static_cast<int>(input_xt_1->get_shape()[1])) , m_src_layer_feature_size(input_xt_1->get_shape()[1])
, m_src_iter_feature_size(static_cast<int>(hidden_state_ht_1->get_shape()[1])) , m_src_iter_feature_size(hidden_state_ht_1->get_shape()[1])
, m_num_cell_states(2) , m_num_cell_states(2)
, m_direction(1) , m_direction(1)
, m_num_fused_layers(1) , m_num_fused_layers(1)
...@@ -89,7 +89,7 @@ op::Lstm::Lstm(std::shared_ptr<Node> input_xt_1, ...@@ -89,7 +89,7 @@ op::Lstm::Lstm(std::shared_ptr<Node> input_xt_1,
if (input_xt_1->get_shape().size() == 2) if (input_xt_1->get_shape().size() == 2)
{ {
m_batch_size = static_cast<int>(input_xt_1->get_shape()[0]); m_batch_size = input_xt_1->get_shape()[0];
} }
else else
{ {
...@@ -132,8 +132,8 @@ op::Lstm::Lstm(std::shared_ptr<Node> src_layer, ...@@ -132,8 +132,8 @@ op::Lstm::Lstm(std::shared_ptr<Node> src_layer,
, m_num_timesteps(1) , m_num_timesteps(1)
, m_num_gates_per_cell(4) , m_num_gates_per_cell(4)
, m_src_sequence_length(1) , m_src_sequence_length(1)
, m_src_layer_feature_size(static_cast<int>(src_layer->get_shape()[1])) , m_src_layer_feature_size(src_layer->get_shape()[1])
, m_src_iter_feature_size(static_cast<int>(src_iter->get_shape()[1])) , m_src_iter_feature_size(src_iter->get_shape()[1])
, m_num_cell_states(2) , m_num_cell_states(2)
, m_direction(1) , m_direction(1)
, m_num_fused_layers(1) , m_num_fused_layers(1)
...@@ -153,7 +153,7 @@ op::Lstm::Lstm(std::shared_ptr<Node> src_layer, ...@@ -153,7 +153,7 @@ op::Lstm::Lstm(std::shared_ptr<Node> src_layer,
if (src_layer->get_shape().size() == 2) if (src_layer->get_shape().size() == 2)
{ {
m_batch_size = static_cast<int>(src_layer->get_shape()[0] / m_num_timesteps); m_batch_size = src_layer->get_shape()[0] / m_num_timesteps;
} }
else else
{ {
...@@ -184,10 +184,8 @@ op::Lstm::Lstm(std::shared_ptr<Node> src_layer, ...@@ -184,10 +184,8 @@ op::Lstm::Lstm(std::shared_ptr<Node> src_layer,
set_output_size(2); set_output_size(2);
set_output_type(0, set_output_type(0,
src_layer->get_element_type(), src_layer->get_element_type(),
Shape{static_cast<unsigned long>(m_num_timesteps * m_batch_size), Shape{(m_num_timesteps * m_batch_size), m_src_iter_feature_size});
static_cast<unsigned long>(m_src_iter_feature_size)});
set_output_type(1, set_output_type(1,
src_layer->get_element_type(), src_layer->get_element_type(),
Shape{static_cast<unsigned long>(m_num_cell_states * m_batch_size), Shape{(m_num_cell_states * m_batch_size), m_src_iter_feature_size});
static_cast<unsigned long>(m_src_iter_feature_size)});
} }
...@@ -69,31 +69,31 @@ namespace ngraph ...@@ -69,31 +69,31 @@ namespace ngraph
std::shared_ptr<Node> bias); std::shared_ptr<Node> bias);
Shape get_output_tensor_shape() const { return m_output_tensor_shape; } Shape get_output_tensor_shape() const { return m_output_tensor_shape; }
Shape get_output_cell_shape() const { return m_output_cell_shape; } Shape get_output_cell_shape() const { return m_output_cell_shape; }
int get_num_timesteps() const { return m_num_timesteps; } size_t get_num_timesteps() const { return m_num_timesteps; }
int get_src_sequence_length() const { return m_src_sequence_length; } size_t get_src_sequence_length() const { return m_src_sequence_length; }
int get_gates_per_cell() const { return m_num_gates_per_cell; } size_t get_gates_per_cell() const { return m_num_gates_per_cell; }
int get_batch_size() const { return m_batch_size; } size_t get_batch_size() const { return m_batch_size; }
int get_src_layer_feature_size() const { return m_src_layer_feature_size; } size_t get_src_layer_feature_size() const { return m_src_layer_feature_size; }
int get_src_iter_feature_size() const { return m_src_iter_feature_size; } size_t get_src_iter_feature_size() const { return m_src_iter_feature_size; }
int get_num_cell_states() const { return m_num_cell_states; } size_t get_num_cell_states() const { return m_num_cell_states; }
int get_direction() const { return m_direction; } size_t get_direction() const { return m_direction; }
int get_num_fused_layers() const { return m_num_fused_layers; } size_t get_num_fused_layers() const { return m_num_fused_layers; }
int get_fused_inputs() const { return m_fused_inputs; } size_t get_fused_inputs() const { return m_fused_inputs; }
virtual std::shared_ptr<Node> virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override; copy_with_new_args(const NodeVector& new_args) const override;
private: private:
Shape m_output_tensor_shape; Shape m_output_tensor_shape;
Shape m_output_cell_shape; Shape m_output_cell_shape;
int m_num_timesteps; size_t m_num_timesteps;
int m_num_gates_per_cell; size_t m_num_gates_per_cell;
int m_src_sequence_length; size_t m_src_sequence_length;
int m_batch_size; size_t m_batch_size;
int m_src_layer_feature_size; size_t m_src_layer_feature_size;
int m_src_iter_feature_size; size_t m_src_iter_feature_size;
int m_num_cell_states; size_t m_num_cell_states;
int m_direction; size_t m_direction;
int m_num_fused_layers; size_t m_num_fused_layers;
bool m_fused_inputs; // True if node gets fused inputs/weights bool m_fused_inputs; // True if node gets fused inputs/weights
}; };
} }
......
...@@ -94,16 +94,11 @@ void ngraph::runtime::cpu::pass::ConcatInputs::concat_lstm_inputs() ...@@ -94,16 +94,11 @@ void ngraph::runtime::cpu::pass::ConcatInputs::concat_lstm_inputs()
// dst_iter of lstm mkldnn output holds the results of both recurrent state // dst_iter of lstm mkldnn output holds the results of both recurrent state
// tensor outputs. we need to slice the ct. // tensor outputs. we need to slice the ct.
auto ht_slice = auto ht_slice = std::make_shared<op::Slice>(
std::make_shared<op::Slice>(lstm_ht_ct_out, lstm_ht_ct_out, Coordinate{0, 0}, Coordinate{batch_size, feature_size});
Coordinate{0, 0}, auto ct_slice = std::make_shared<op::Slice>(lstm_ht_ct_out,
Coordinate{static_cast<unsigned long>(batch_size), Coordinate{batch_size, 0},
static_cast<unsigned long>(feature_size)}); Coordinate{(2 * batch_size), feature_size});
auto ct_slice =
std::make_shared<op::Slice>(lstm_ht_ct_out,
Coordinate{static_cast<unsigned long>(batch_size), 0},
Coordinate{static_cast<unsigned long>(2 * batch_size),
static_cast<unsigned long>(feature_size)});
// now go through the GOE'sand replace the slices(ht) // now go through the GOE'sand replace the slices(ht)
std::set<std::shared_ptr<ngraph::Node>> lstm_outputs; std::set<std::shared_ptr<ngraph::Node>> lstm_outputs;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment