Commit f9caac99 authored by Robert Kimball's avatar Robert Kimball Committed by Scott Cyphers

address klocwork issues (#2555)

parent ca5476b3
...@@ -615,7 +615,7 @@ int codegen::CompilerCore::full_version_number(const std::string& path, const st ...@@ -615,7 +615,7 @@ int codegen::CompilerCore::full_version_number(const std::string& path, const st
} }
// create full version number and return // create full version number and return
std::string full_version = {}; std::string full_version = "0";
// Assume versioning like X.Y.Z // Assume versioning like X.Y.Z
int padding = 3 - tokens.size(); int padding = 3 - tokens.size();
for (std::string s : tokens) for (std::string s : tokens)
...@@ -633,7 +633,7 @@ std::string codegen::CompilerCore::find_header_version(const std::string& path) ...@@ -633,7 +633,7 @@ std::string codegen::CompilerCore::find_header_version(const std::string& path)
{ {
// Step 1: find highest g++ version // Step 1: find highest g++ version
std::string gpp_prefix = file_util::path_join(path, "bin/g++-"); std::string gpp_prefix = file_util::path_join(path, "bin/g++-");
std::string gpp_ver = {}; std::string gpp_ver = "0";
for (auto i : {"8", "7", "6", "5", "4.9", "4.8"}) for (auto i : {"8", "7", "6", "5", "4.9", "4.8"})
{ {
if (file_util::exists(gpp_prefix + i)) if (file_util::exists(gpp_prefix + i))
......
...@@ -84,10 +84,13 @@ std::string ngraph::get_timestamp() ...@@ -84,10 +84,13 @@ std::string ngraph::get_timestamp()
auto timer = std::chrono::system_clock::to_time_t(now); auto timer = std::chrono::system_clock::to_time_t(now);
// convert to broken time // convert to broken time
std::tm* bt = std::localtime(&timer);
char buffer[256]; char buffer[256];
buffer[0] = 0;
std::tm* bt = std::localtime(&timer);
if (bt)
{
strftime(buffer, sizeof(buffer), "%H:%M:%S", bt); strftime(buffer, sizeof(buffer), "%H:%M:%S", bt);
}
std::ostringstream timestamp; std::ostringstream timestamp;
timestamp << buffer; timestamp << buffer;
......
...@@ -804,8 +804,8 @@ void ngraph::runtime::cpu::pass::BiDirectionalRnn::construct_bidirectional_rnn() ...@@ -804,8 +804,8 @@ void ngraph::runtime::cpu::pass::BiDirectionalRnn::construct_bidirectional_rnn()
rnn_right_to_left](pattern::Matcher& m) { rnn_right_to_left](pattern::Matcher& m) {
auto pattern_map = m.get_pattern_map(); auto pattern_map = m.get_pattern_map();
auto rnn_ltor_node = std::dynamic_pointer_cast<op::Rnn>(pattern_map[rnn_left_to_right]); auto rnn_ltor_node = std::static_pointer_cast<op::Rnn>(pattern_map[rnn_left_to_right]);
auto rnn_rtol_node = std::dynamic_pointer_cast<op::Rnn>(pattern_map[rnn_right_to_left]); auto rnn_rtol_node = std::static_pointer_cast<op::Rnn>(pattern_map[rnn_right_to_left]);
if (rnn_ltor_node->get_src_sequence_length() != rnn_rtol_node->get_src_sequence_length()) if (rnn_ltor_node->get_src_sequence_length() != rnn_rtol_node->get_src_sequence_length())
{ {
......
...@@ -161,6 +161,10 @@ void runtime::hybrid::rewrite_function(const shared_ptr<Function>& f, ...@@ -161,6 +161,10 @@ void runtime::hybrid::rewrite_function(const shared_ptr<Function>& f,
if (cluster.size() > 0) if (cluster.size() > 0)
{ {
shared_ptr<Node> tmp_node = *cluster.begin(); shared_ptr<Node> tmp_node = *cluster.begin();
if (tmp_node == nullptr)
{
throw runtime_error("cluster contains nullptr instead of nodes");
}
auto placement = tmp_node->get_placement_index(); auto placement = tmp_node->get_placement_index();
if (placement != 0) if (placement != 0)
{ {
......
...@@ -775,6 +775,7 @@ TEST(cpu_test, memory_reuse_destructive_oi_relu) ...@@ -775,6 +775,7 @@ TEST(cpu_test, memory_reuse_destructive_oi_relu)
shared_ptr<runtime::Executable> handle = backend->compile(f); shared_ptr<runtime::Executable> handle = backend->compile(f);
handle->call_with_validate({result}, {a, b, c}); handle->call_with_validate({result}, {a, b, c});
ASSERT_NE(handle, nullptr);
EXPECT_EQ(read_vector<float>(result), expected); EXPECT_EQ(read_vector<float>(result), expected);
} }
...@@ -802,6 +803,7 @@ TEST(cpu_test, memory_reuse_cacheable_no_destructive_oi_relu) ...@@ -802,6 +803,7 @@ TEST(cpu_test, memory_reuse_cacheable_no_destructive_oi_relu)
vector<float> expected{0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; vector<float> expected{0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
shared_ptr<runtime::Executable> handle = backend->compile(f); shared_ptr<runtime::Executable> handle = backend->compile(f);
ASSERT_NE(handle, nullptr);
handle->call_with_validate({result}, {a, b, c}); handle->call_with_validate({result}, {a, b, c});
EXPECT_EQ(read_vector<float>(result), expected); EXPECT_EQ(read_vector<float>(result), expected);
...@@ -864,6 +866,7 @@ TEST(cpu_test, memory_reuse_in_place_slice_after_in_place_concat) ...@@ -864,6 +866,7 @@ TEST(cpu_test, memory_reuse_in_place_slice_after_in_place_concat)
auto result = backend->create_tensor(element::f32, shape_r); auto result = backend->create_tensor(element::f32, shape_r);
shared_ptr<runtime::Executable> handle = backend->compile(f); shared_ptr<runtime::Executable> handle = backend->compile(f);
ASSERT_NE(handle, nullptr);
handle->call_with_validate({result}, {a, b, c, d}); handle->call_with_validate({result}, {a, b, c, d});
EXPECT_EQ((vector<float>{3, 7}), read_vector<float>(result)); EXPECT_EQ((vector<float>{3, 7}), read_vector<float>(result));
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment