Commit 599fb3e7 authored by Robert Kimball's avatar Robert Kimball Committed by Scott Cyphers

Klocwork issues addressed (#2079)

* fix klocwork issues

* more klocwork issues addressed
parent 880594ba
...@@ -69,9 +69,12 @@ LogHelper::LogHelper(LOG_TYPE type, ...@@ -69,9 +69,12 @@ LogHelper::LogHelper(LOG_TYPE type,
time_t tt = chrono::system_clock::to_time_t(chrono::system_clock::now()); time_t tt = chrono::system_clock::to_time_t(chrono::system_clock::now());
auto tm = gmtime(&tt); auto tm = gmtime(&tt);
char buffer[256]; if (tm)
strftime(buffer, sizeof(buffer), "%Y-%m-%dT%H:%M:%Sz", tm); {
m_stream << buffer << " "; char buffer[256];
strftime(buffer, sizeof(buffer), "%Y-%m-%dT%H:%M:%Sz", tm);
m_stream << buffer << " ";
}
m_stream << file; m_stream << file;
m_stream << " " << line; m_stream << " " << line;
......
...@@ -33,6 +33,10 @@ pass::MemoryLayout::MemoryLayout(size_t alignment, bool disable_memory_sharing) ...@@ -33,6 +33,10 @@ pass::MemoryLayout::MemoryLayout(size_t alignment, bool disable_memory_sharing)
: m_alignment(alignment) : m_alignment(alignment)
, m_disable_memory_sharing(disable_memory_sharing) , m_disable_memory_sharing(disable_memory_sharing)
{ {
if (m_alignment == 0)
{
throw invalid_argument("Memory alignment must be > 0");
}
} }
bool pass::MemoryLayout::run_on_function(shared_ptr<ngraph::Function> function) bool pass::MemoryLayout::run_on_function(shared_ptr<ngraph::Function> function)
......
...@@ -24,19 +24,19 @@ static int GetNumCores() ...@@ -24,19 +24,19 @@ static int GetNumCores()
const auto ngraph_intra_op_parallelism = std::getenv("NGRAPH_INTRA_OP_PARALLELISM"); const auto ngraph_intra_op_parallelism = std::getenv("NGRAPH_INTRA_OP_PARALLELISM");
int count = 0; int count = 0;
if (omp_num_threads && (count = std::atoi(omp_num_threads))) if (omp_num_threads)
{ {
return count; count = std::atoi(omp_num_threads);
} }
else if (ngraph_intra_op_parallelism && (count = std::atoi(ngraph_intra_op_parallelism))) else if (ngraph_intra_op_parallelism)
{ {
return count; count = std::atoi(ngraph_intra_op_parallelism);
} }
else else
{ {
count = std::thread::hardware_concurrency() >> 1; count = std::thread::hardware_concurrency() / 2;
} }
return count ? count : 1; return count < 1 ? 1 : count;
} }
static int GetNumThreadPools() static int GetNumThreadPools()
...@@ -44,12 +44,12 @@ static int GetNumThreadPools() ...@@ -44,12 +44,12 @@ static int GetNumThreadPools()
const auto ngraph_inter_op_parallelism = std::getenv("NGRAPH_INTER_OP_PARALLELISM"); const auto ngraph_inter_op_parallelism = std::getenv("NGRAPH_INTER_OP_PARALLELISM");
int count = 0; int count = 0;
if (ngraph_inter_op_parallelism && (count = std::atoi(ngraph_inter_op_parallelism))) if (ngraph_inter_op_parallelism)
{ {
return count; count = std::atoi(ngraph_inter_op_parallelism);
} }
return 1; return count < 1 ? 1 : count;
} }
namespace ngraph namespace ngraph
......
...@@ -403,7 +403,7 @@ mkldnn::memory::desc runtime::cpu::mkldnn_utils::create_blocked_mkldnn_md( ...@@ -403,7 +403,7 @@ mkldnn::memory::desc runtime::cpu::mkldnn_utils::create_blocked_mkldnn_md(
// MKLDNN kernel selection sometimes relies on named layouts like "mkldnn_nchw" // MKLDNN kernel selection sometimes relies on named layouts like "mkldnn_nchw"
// Try and convert a blocked layout into a named layout // Try and convert a blocked layout into a named layout
memory::desc runtime::cpu::mkldnn_utils::try_get_named_md(mkldnn_memory_desc_t md) memory::desc runtime::cpu::mkldnn_utils::try_get_named_md(const mkldnn_memory_desc_t& md)
{ {
auto out_md = memory::desc(md); auto out_md = memory::desc(md);
......
...@@ -54,7 +54,7 @@ namespace ngraph ...@@ -54,7 +54,7 @@ namespace ngraph
mkldnn::memory::desc create_blocked_mkldnn_md(const Shape& dims, mkldnn::memory::desc create_blocked_mkldnn_md(const Shape& dims,
const Strides& strides, const Strides& strides,
const ngraph::element::Type type); const ngraph::element::Type type);
mkldnn::memory::desc try_get_named_md(mkldnn_memory_desc_t md); mkldnn::memory::desc try_get_named_md(const mkldnn_memory_desc_t& md);
mkldnn::memory::desc rotate_blocked_md(const mkldnn::memory::desc& in, mkldnn::memory::desc rotate_blocked_md(const mkldnn::memory::desc& in,
const AxisVector& axis_order); const AxisVector& axis_order);
mkldnn::memory::desc squeeze_blocked_md(const mkldnn::memory::desc& in, mkldnn::memory::desc squeeze_blocked_md(const mkldnn::memory::desc& in,
......
...@@ -1586,7 +1586,7 @@ void ngraph::runtime::cpu::pass::CPUFusion::construct_groupconv_batchnorm_global ...@@ -1586,7 +1586,7 @@ void ngraph::runtime::cpu::pass::CPUFusion::construct_groupconv_batchnorm_global
<< m.get_match_root()->get_name(); << m.get_match_root()->get_name();
auto pattern_map = m.get_pattern_map(); auto pattern_map = m.get_pattern_map();
auto m_bn = std::dynamic_pointer_cast<op::BatchNormInference>(m.get_match_root()); auto m_bn = std::static_pointer_cast<op::BatchNormInference>(m.get_match_root());
auto conv_m = std::static_pointer_cast<op::GroupConvolution>(pattern_map[conv_label]); auto conv_m = std::static_pointer_cast<op::GroupConvolution>(pattern_map[conv_label]);
if (conv_m->get_users().size() > 1) if (conv_m->get_users().size() > 1)
......
...@@ -92,17 +92,17 @@ shared_ptr<Node> runtime::cpu::pass::CPULayout::insert_input_conversions( ...@@ -92,17 +92,17 @@ shared_ptr<Node> runtime::cpu::pass::CPULayout::insert_input_conversions(
const auto& output = input.get_output(); const auto& output = input.get_output();
auto tv = output.get_tensor_ptr(); auto tv = output.get_tensor_ptr();
auto tvl = dynamic_pointer_cast<runtime::cpu::LayoutDescriptor>(tv->get_tensor_layout()); auto tvl = dynamic_pointer_cast<runtime::cpu::LayoutDescriptor>(tv->get_tensor_layout());
if (input.get_shape() == Shape{})
{
tvl->set_mkldnn_md(required_mds[index]);
}
if (!tvl) if (!tvl)
{ {
throw ngraph_error( throw ngraph_error(
"In insert_input_conversions: Expecting Layout descriptor to be already set on " + "In insert_input_conversions: Expecting Layout descriptor to be already set on " +
output.get_node()->get_name()); output.get_node()->get_name());
} }
if (input.get_shape() == Shape{})
{
tvl->set_mkldnn_md(required_mds[index]);
}
if (!tvl->is_mkldnn_layout()) if (!tvl->is_mkldnn_layout())
{ {
throw ngraph_error( throw ngraph_error(
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment