Commit dad2585b authored by Scott Cyphers's avatar Scott Cyphers Committed by Robert Kimball

Fix Klocwork issues (#2395)

* Fix Klocwork issues

* style
parent 4261a230
......@@ -231,7 +231,9 @@ namespace ngraph
if (old_pops.kind(i) == mkldnn::primitive::kind::eltwise)
{
mkldnn::algorithm alg;
float scale, alpha, beta;
float scale = 0;
float alpha = 0;
float beta = 0;
old_pops.get_params_eltwise(i, scale, alg, alpha, beta);
new_pops.append_eltwise(scale, alg, alpha, beta);
}
......@@ -303,7 +305,9 @@ namespace ngraph
if (old_pops.kind(i) == mkldnn::primitive::kind::eltwise)
{
mkldnn::algorithm alg;
float scale, alpha, beta;
float scale = 0;
float alpha = 0;
float beta = 0;
old_pops.get_params_eltwise(i, scale, alg, alpha, beta);
new_pops.append_eltwise(scale, alg, alpha, beta);
}
......
......@@ -1974,12 +1974,12 @@ void runtime::cpu::CPU_ExternalFunction::build()
for (size_t i = 0; i < functors.size(); i++)
{
ss << op_names.at(i) << " will be executed with the following inputs:\n";
for (auto is : this->m_op_attrs.at(i).Inputs)
for (auto& is : this->m_op_attrs.at(i).Inputs)
{
ss << "\t" << is << " = " << this->get_tensor_data(is) << std::endl;
}
ss << "and outputs :\n";
for (auto os : this->m_op_attrs.at(i).Outputs)
for (auto& os : this->m_op_attrs.at(i).Outputs)
{
ss << "\t" << os << " = " << this->get_tensor_data(os) << std::endl;
}
......
......@@ -302,9 +302,10 @@ void set_layouts_binaryeltwise(ngraph::runtime::cpu::CPU_ExternalFunction* exter
vector<memory::desc> i_mds;
vector<memory::desc> o_mds;
int select = 0;
if (std::getenv("NGRAPH_PASS_CPU_LAYOUT_ELTWISE") != nullptr)
char* ngraph_pass_cpu_layout_eltwise = std::getenv("NGRAPH_PASS_CPU_LAYOUT_ELTWISE");
if (ngraph_pass_cpu_layout_eltwise != nullptr)
{
const int user_select = std::atoi(std::getenv("NGRAPH_PASS_CPU_LAYOUT_ELTWISE"));
const int user_select = std::atoi(ngraph_pass_cpu_layout_eltwise);
select = (user_select == 0 || user_select == 1) ? user_select : select;
}
i_mds.push_back(arg_mds[select]);
......
......@@ -41,6 +41,7 @@ shared_ptr<runtime::Tensor>
const Shape& shape)
{
auto it = m_backend_list.begin();
NGRAPH_ASSERT(it != m_backend_list.end());
return (*it)->create_tensor(element_type, shape);
}
......@@ -48,6 +49,7 @@ shared_ptr<runtime::Tensor> runtime::hybrid::HybridBackend::create_tensor(
const element::Type& element_type, const Shape& shape, void* memory_pointer)
{
auto it = m_backend_list.begin();
NGRAPH_ASSERT(it != m_backend_list.end());
return (*it)->create_tensor(element_type, shape, memory_pointer);
}
......
......@@ -113,7 +113,7 @@ namespace ngraph
static bool merge(element::Type& dst, const element::Type& t1, const element::Type& t2);
private:
Type_t m_type;
Type_t m_type{Type_t::undefined};
};
extern NGRAPH_API const Type dynamic;
......
......@@ -180,8 +180,8 @@ protected:
}
}
float expected;
int tolerance_bits;
float expected{0};
int tolerance_bits{0};
float upper_bound;
float lower_bound;
float past_upper_bound;
......@@ -299,8 +299,8 @@ protected:
}
}
double expected;
int tolerance_bits;
double expected{0};
int tolerance_bits{0};
double upper_bound;
double lower_bound;
double past_upper_bound;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment