Unverified Commit 9b9b184a authored by Scott Cyphers's avatar Scott Cyphers Committed by GitHub

Fix compiler warnings. (#3513)

parent 7b711340
......@@ -46,8 +46,6 @@ namespace ngraph
external_function->get_buffer_index(args[2].get_name()); // scale
auto out0_buffer_index = external_function->get_buffer_index(out[0].get_name());
auto scales_size = shape_size(args[2].get_shape());
if (runtime::cpu::mkldnn_utils::use_mkldnn_kernel(node))
{
auto& mkldnn_emitter = external_function->get_mkldnn_emitter();
......@@ -62,7 +60,6 @@ namespace ngraph
auto& deps = mkldnn_emitter->get_primitive_deps(ip_index);
auto functor = [&,
scales_size,
ip_desc,
ip_attr,
deps,
......
......@@ -132,7 +132,7 @@ void ngraph::runtime::cpu::pass::LSTMFusion::construct_onnx_lstmcell_fprop()
auto weights_ifco = std::make_shared<ngraph::op::Concat>(
NodeVector{gate_slices[0], gate_slices[2], gate_slices[3], gate_slices[1]}, 0);
return weights_ifco;
return std::move(weights_ifco);
};
auto get_bias_ifco_gate_order =
......@@ -157,7 +157,7 @@ void ngraph::runtime::cpu::pass::LSTMFusion::construct_onnx_lstmcell_fprop()
auto new_bias = std::make_shared<ngraph::op::Concat>(
NodeVector{gate_slices[0], gate_slices[2], gate_slices[3], gate_slices[1]}, 0);
return new_bias;
return std::move(new_bias);
};
auto W_iofc = pattern_map[W];
......
......@@ -542,7 +542,7 @@ ngraph::runtime::plaidml::builder::UnaryContraction
{
case OpType::AvgPool: agg_op = "+"; break;
case OpType::MaxPool: agg_op = ">"; break;
default: throw std::runtime_error("Asked for pool contraction for non-pool op");
case OpType::Conv: throw std::runtime_error("Asked for pool contraction for non-pool op");
}
return builder::UnaryContraction{agg_op}
.set((m_op == OpType::AvgPool && m_deriv == DerivType::Data) ? I_out_body() : O_out_body())
......
......@@ -43,7 +43,7 @@ namespace
return;
case VAI_LOG_SEVERITY_WARNING: NGRAPH_WARN << message; return;
case VAI_LOG_SEVERITY_ERROR:
default: NGRAPH_ERR << message; return;
case VAI_LOG_SEVERITY_FATAL: NGRAPH_ERR << message; return;
}
}
}
......
......@@ -163,7 +163,9 @@ void ngraph::runtime::plaidml::ImplConstant::Apply()
case PLAIDML_DATA_FLOAT64:
set_output(static_cast<double>(*static_cast<const double*>(op().get_data_ptr())));
return;
default: break;
case PLAIDML_DATA_INVALID:
case PLAIDML_DATA_PRNG:
case PLAIDML_DATA_INT128: return;
}
}
......
......@@ -40,7 +40,6 @@ void ngraph::runtime::plaidml::ImplGroupConvolution::Apply()
const auto& image = op_input(0);
const auto& filter = op_input(1);
auto rank = op().get_input_shape(0).size() - 2;
const auto& groups = op().get_groups();
const auto& padding_above = op().get_padding_above();
const auto& padding_below = op().get_padding_below();
......
......@@ -117,7 +117,9 @@ std::string ngraph::runtime::plaidml::tile_converter(const std::string& tensor_n
case PLAIDML_DATA_FLOAT16: return "as_float(" + tensor_name + ", 16)";
case PLAIDML_DATA_FLOAT32: return "as_float(" + tensor_name + ", 32)";
case PLAIDML_DATA_FLOAT64: return "as_float(" + tensor_name + ", 64)";
default: throw std::runtime_error{"Unsupported type conversion"};
case PLAIDML_DATA_INVALID:
case PLAIDML_DATA_INT128:
case PLAIDML_DATA_PRNG: throw std::runtime_error{"Unsupported type conversion"};
}
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment