Unverified Commit b94a042d authored by Scott Cyphers's avatar Scott Cyphers Committed by GitHub

Fix clang compiler warnings (#2898)

* Fix clang compiler warnings

* Remove unintended file.

* style

* Not part of PR

* Another extra closure ref

* More warnings from merges

* Lambda arg was used
parent d93ced6c
......@@ -104,7 +104,7 @@ namespace ngraph
output_zero_point->get_element_type(),
axes,
op::Quantize::RoundMode::ROUND_NEAREST_TOWARD_EVEN);
return q_convolution;
return move(q_convolution);
}
shared_ptr<Node> QuantizedLinearConvolutionBias(const shared_ptr<Node>& input,
......
......@@ -49,7 +49,7 @@ shared_ptr<Node> op::Result::copy_with_new_args(const NodeVector& new_args) cons
{
res->set_needs_default_layout(m_needs_default_layout);
}
return res;
return std::move(res);
}
void op::Result::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas)
......
......@@ -97,7 +97,7 @@ shared_ptr<Node> op::ReverseSequence::copy_with_new_args(const NodeVector& new_a
check_new_args_count(this, new_args);
auto res =
make_shared<ReverseSequence>(new_args.at(0), new_args.at(1), m_batch_axis, m_seq_axis);
return res;
return move(res);
}
void op::ReverseSequence::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas)
......
......@@ -160,7 +160,7 @@ std::shared_ptr<Node> fuse_group_convolution(const std::shared_ptr<Node>& n)
sconv->get_data_dilation_strides(),
n->get_arguments().size());
return new_conv;
return move(new_conv);
}
bool ngraph::pass::BatchFusion::run_on_function(std::shared_ptr<Function> func)
......
......@@ -410,7 +410,7 @@ static std::shared_ptr<Node> reduce_broadcast(std::shared_ptr<Node> broadcast)
std::make_shared<op::Broadcast>(matched_broadcast_w1->get_argument(0),
shape_w1,
matched_broadcast_w1->get_broadcast_axes());
return new_broadcast_w1;
return move(new_broadcast_w1);
}
static size_t shape_to_index(Shape shape)
......
......@@ -38,8 +38,8 @@ namespace ngraph
size_t n = goe->get_n();
auto arg_buffer_index = external_function->get_buffer_index(args[n].get_name());
auto out_buffer_index = external_function->get_buffer_index(out[0].get_name());
auto functor = [&, n, arg_buffer_index, out_buffer_index](
CPURuntimeContext* ctx, CPUExecutionContext* ectx) {
auto functor = [&, arg_buffer_index, out_buffer_index](CPURuntimeContext* ctx,
CPUExecutionContext* ectx) {
if (ctx->buffer_data[arg_buffer_index] != ctx->buffer_data[out_buffer_index])
{
throw ngraph_error("GOE's input and out must be equal");
......
......@@ -30,7 +30,7 @@ namespace ngraph
[]() -> std::shared_ptr<ngraph::op::util::OpAnnotations> {
auto op_annotations =
std::make_shared<ngraph::runtime::cpu::CPUOpAnnotations>();
return op_annotations;
return std::move(op_annotations);
};
return func;
}
......
......@@ -73,7 +73,7 @@ namespace ngraph
// TODO (nbpatel) Templatize the return type when we have double scales
template <typename OP>
static std::vector<float> extract_scale_value(const ngraph::Node* node, int index)
inline std::vector<float> extract_scale_value(const ngraph::Node* node, int index)
{
auto qc = static_cast<const OP*>(node);
std::vector<float> scale_val = {1.0f};
......
......@@ -1680,7 +1680,7 @@ void ngraph::runtime::cpu::pass::CPUFusion::construct_groupconv_batchnorm_global
auto bn =
std::make_shared<ngraph::op::BatchNormInference>(eps, gamma, beta, conv_label, mean, var);
auto callback = [input, filters, conv_label, mean, var, gamma, beta, eps](pattern::Matcher& m) {
auto callback = [input, filters, conv_label, mean, var, gamma, beta](pattern::Matcher& m) {
NGRAPH_DEBUG << "In callback for groupconv BatchNorm folding against node = "
<< m.get_match_root()->get_name();
......@@ -1838,7 +1838,7 @@ void ngraph::runtime::cpu::pass::CPUFusion::construct_deconvolution_affine_foldi
double eps = 0.001;
auto bn = std::make_shared<op::BatchNormInference>(eps, gamma, beta, conv_label, mean, var);
auto callback = [data_label, filters, out_delta, conv_label, mean, var, gamma, beta, eps](
auto callback = [data_label, filters, out_delta, conv_label, mean, var, gamma, beta](
pattern::Matcher& m) {
NGRAPH_DEBUG << "In callback for deconv affine folding against node = "
<< m.get_match_root()->get_name();
......
......@@ -502,7 +502,7 @@ std::shared_ptr<Node> fuse_group_convolution(const std::shared_ptr<Node>& n)
sconv->get_data_dilation_strides(),
n->get_arguments().size());
return new_conv;
return move(new_conv);
}
std::shared_ptr<Node> fuse_batch_mat_mul_transpose(const std::shared_ptr<Node>& n)
......
......@@ -31,7 +31,7 @@ namespace ngraph
// Had to split out these two functions. They used to be lambda expressions but
// MSVC had difficulty compiling. This way is more explicit.
template <typename T, typename U>
static bool compare_max(const std::tuple<T, U>& a, const std::tuple<T, U>& b)
inline bool compare_max(const std::tuple<T, U>& a, const std::tuple<T, U>& b)
{
// this is intentional to be able to compare floats directly
// without using relative or absolute tolerance
......@@ -46,7 +46,7 @@ namespace ngraph
return a > b;
}
template <typename T, typename U>
static bool compare_min(const std::tuple<T, U>& a, const std::tuple<T, U>& b)
inline bool compare_min(const std::tuple<T, U>& a, const std::tuple<T, U>& b)
{
return a < b;
}
......
......@@ -228,7 +228,7 @@ static json write_partial_shape(const PartialShape& s)
{
vals[i] = write_dimension(s[i]);
}
return vals;
return move(vals);
}
}
......
......@@ -110,7 +110,7 @@ std::vector<const element::Type*> element::Type::get_known_types()
element::Type::Type(
size_t bitwidth, bool is_real, bool is_signed, bool is_quantized, const std::string& cname)
{
for (const pair<element::Type_t, TypeInfo>& t : get_type_info_map())
for (auto& t : get_type_info_map())
{
const TypeInfo& info = t.second;
if (bitwidth == info.m_bitwidth && is_real == info.m_is_real &&
......
......@@ -85,7 +85,7 @@ multimap<size_t, string> aggregate_timing_details(const vector<PerfShape>& perf_
}
multimap<size_t, string> rc;
for (const pair<string, size_t>& t : timing)
for (auto& t : timing)
{
rc.insert({t.second, t.first + to_string(count[t.first])});
}
......@@ -103,7 +103,7 @@ multimap<size_t, string> aggregate_timing(const vector<PerfShape>& perf_data)
}
multimap<size_t, string> rc;
for (const pair<string, size_t>& t : timing)
for (auto& t : timing)
{
rc.insert({t.second, t.first});
}
......@@ -115,7 +115,7 @@ void print_times(const multimap<size_t, string>& timing)
// set the column widths
int name_width = 0;
int time_width = 0;
for (const pair<size_t, string>& p : timing)
for (auto& p : timing)
{
name_width = max(name_width, static_cast<int>(p.second.size()));
time_width = max(time_width, static_cast<int>(locale_string(p.first).size()));
......@@ -411,7 +411,7 @@ OPTIONS
cout << " " << type << "\n";
}
cout << "--\n";
for (const pair<string, size_t>& op_info : op_list)
for (auto& op_info : op_list)
{
cout << op_info.first << ": " << op_info.second << " ops" << endl;
}
......
......@@ -50,7 +50,7 @@ public:
virtual std::shared_ptr<Node> copy_with_new_args(const NodeVector& new_args) const override
{
auto clone = make_shared<ControlDependencyOp>(new_args, std::set<std::shared_ptr<Node>>{});
return clone;
return move(clone);
}
ControlDependencyOp(const NodeVector& args, const std::set<std::shared_ptr<Node>>& deps)
......
......@@ -3206,7 +3206,7 @@ static std::shared_ptr<Function>
auto dot = std::make_shared<op::Dot>(data_param_reshape, W_reshape);
auto bias_broadcast = make_shared<op::Broadcast>(bias, dot->get_shape(), AxisSet{0});
auto add_bias = std::make_shared<op::Add>(dot, bias_broadcast);
return add_bias;
return move(add_bias);
};
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment