Commit 706e705e authored by Ashok Emani's avatar Ashok Emani Committed by Scott Cyphers

resolve -wall warnings with clang-8 (#3602)

* resolve -wall warnings with clang-8

* remove -Wno-extra-semi-stmt
parent b0373e3f
......@@ -756,7 +756,7 @@ bool ngraph::check_for_cycles(const ngraph::Function* func,
{
is_bkwd_cycle = true;
return true;
};
}
}
for (auto param : func->get_parameters())
......@@ -768,7 +768,7 @@ bool ngraph::check_for_cycles(const ngraph::Function* func,
{
is_bkwd_cycle = false;
return true;
};
}
}
// no cycles
return false;
......
......@@ -48,7 +48,7 @@ namespace ngraph
double get_alpha() const { return m_alpha; }
private:
const double m_alpha;
double m_alpha;
};
} // namespace op
} // namespace ngraph
......@@ -44,7 +44,6 @@ namespace ngraph
NGRAPH_API
static constexpr NodeTypeInfo type_info{"GRUCell", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
GRUCell() = default;
///
/// \brief Constructs GRUCell node.
///
......
......@@ -45,7 +45,6 @@ namespace ngraph
NGRAPH_API
static constexpr NodeTypeInfo type_info{"LSTMCell", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
LSTMCell() = default;
///
/// \brief Constructs LSTMCell node.
///
......
......@@ -51,8 +51,8 @@ namespace ngraph
bool get_transpose_a() const { return m_transpose_a; }
bool get_transpose_b() const { return m_transpose_b; }
private:
const bool m_transpose_a;
const bool m_transpose_b;
bool m_transpose_a;
bool m_transpose_b;
};
} // namespace op
} // namespace ngraph
......@@ -70,9 +70,9 @@ namespace ngraph
bool get_normalize_variance() const { return m_normalize_variance; }
AxisSet get_reduction_axes() const { return m_reduction_axes; }
private:
const double m_eps;
const bool m_across_channels;
const bool m_normalize_variance;
double m_eps;
bool m_across_channels;
bool m_normalize_variance;
AxisSet m_reduction_axes;
};
} // namespace op
......
......@@ -64,7 +64,7 @@ namespace ngraph
Shape get_pre_shuffle_shape(const Shape& data_shape) const;
int m_axis;
const size_t m_groups;
size_t m_groups;
};
}
}
......@@ -45,7 +45,7 @@ namespace ngraph
int get_src_id() const;
private:
const int m_src_id;
int m_src_id;
};
}
}
......@@ -45,7 +45,7 @@ namespace ngraph
int get_dest_id() const;
private:
const int m_dest_id;
int m_dest_id;
};
}
}
......@@ -37,7 +37,7 @@ static bool broadcast_and_replace(std::shared_ptr<ngraph::Node>& node)
op->input(i).replace_source_output(new_args[i]->output(0));
}
return true;
};
}
}
return false;
}
......
......@@ -58,7 +58,7 @@ namespace ngraph
kernel;
SELECT_RANK2(
kernel, float, int64_t, in_shape.size(), runtime::cpu::kernel::argmax);
kernel, float, int64_t, in_shape.size(), runtime::cpu::kernel::argmax)
functor = [&,
kernel,
......@@ -81,7 +81,7 @@ namespace ngraph
std::function<decltype(runtime::cpu::kernel::argmax<float, int, 1>)> kernel;
SELECT_RANK2(
kernel, float, int, in_shape.size(), runtime::cpu::kernel::argmax);
kernel, float, int, in_shape.size(), runtime::cpu::kernel::argmax)
functor = [&,
kernel,
......@@ -108,7 +108,7 @@ namespace ngraph
kernel;
SELECT_RANK2(
kernel, double, int64_t, in_shape.size(), runtime::cpu::kernel::argmax);
kernel, double, int64_t, in_shape.size(), runtime::cpu::kernel::argmax)
functor = [&,
kernel,
......@@ -132,7 +132,7 @@ namespace ngraph
kernel;
SELECT_RANK2(
kernel, double, int, in_shape.size(), runtime::cpu::kernel::argmax);
kernel, double, int, in_shape.size(), runtime::cpu::kernel::argmax)
functor = [&,
kernel,
......@@ -159,7 +159,7 @@ namespace ngraph
kernel;
SELECT_RANK2(
kernel, int, int64_t, in_shape.size(), runtime::cpu::kernel::argmax);
kernel, int, int64_t, in_shape.size(), runtime::cpu::kernel::argmax)
functor = [&,
kernel,
......@@ -182,7 +182,7 @@ namespace ngraph
std::function<decltype(runtime::cpu::kernel::argmax<int, int, 1>)> kernel;
SELECT_RANK2(
kernel, int, int, in_shape.size(), runtime::cpu::kernel::argmax);
kernel, int, int, in_shape.size(), runtime::cpu::kernel::argmax)
functor = [&,
kernel,
......
......@@ -58,7 +58,7 @@ namespace ngraph
kernel;
SELECT_RANK2(
kernel, float, int64_t, in_shape.size(), runtime::cpu::kernel::argmin);
kernel, float, int64_t, in_shape.size(), runtime::cpu::kernel::argmin)
functor = [&,
kernel,
......@@ -81,7 +81,7 @@ namespace ngraph
std::function<decltype(runtime::cpu::kernel::argmin<float, int, 1>)> kernel;
SELECT_RANK2(
kernel, float, int, in_shape.size(), runtime::cpu::kernel::argmin);
kernel, float, int, in_shape.size(), runtime::cpu::kernel::argmin)
functor = [&,
kernel,
......@@ -108,7 +108,7 @@ namespace ngraph
kernel;
SELECT_RANK2(
kernel, double, int64_t, in_shape.size(), runtime::cpu::kernel::argmin);
kernel, double, int64_t, in_shape.size(), runtime::cpu::kernel::argmin)
functor = [&,
kernel,
......@@ -132,7 +132,7 @@ namespace ngraph
kernel;
SELECT_RANK2(
kernel, double, int, in_shape.size(), runtime::cpu::kernel::argmin);
kernel, double, int, in_shape.size(), runtime::cpu::kernel::argmin)
functor = [&,
kernel,
......@@ -159,7 +159,7 @@ namespace ngraph
kernel;
SELECT_RANK2(
kernel, int, int64_t, in_shape.size(), runtime::cpu::kernel::argmin);
kernel, int, int64_t, in_shape.size(), runtime::cpu::kernel::argmin)
functor = [&,
kernel,
......@@ -182,7 +182,7 @@ namespace ngraph
std::function<decltype(runtime::cpu::kernel::argmin<int, int, 1>)> kernel;
SELECT_RANK2(
kernel, int, int, in_shape.size(), runtime::cpu::kernel::argmin);
kernel, int, int, in_shape.size(), runtime::cpu::kernel::argmin)
functor = [&,
kernel,
......
......@@ -87,8 +87,7 @@ namespace ngraph
{
std::function<decltype(runtime::cpu::kernel::avg_pool<float>)> kernel;
SELECT_KERNEL(
kernel, out[0].get_element_type(), runtime::cpu::kernel::avg_pool);
SELECT_KERNEL(kernel, out[0].get_element_type(), runtime::cpu::kernel::avg_pool)
auto functor = [&,
kernel,
......@@ -182,7 +181,7 @@ namespace ngraph
{
std::function<decltype(runtime::cpu::kernel::avg_pool_backprop<float>)> kernel;
SELECT_KERNEL(
kernel, out[0].get_element_type(), runtime::cpu::kernel::avg_pool_backprop);
kernel, out[0].get_element_type(), runtime::cpu::kernel::avg_pool_backprop)
auto functor = [&,
kernel,
......
......@@ -234,7 +234,7 @@ namespace ngraph
SELECT_KERNEL(kernel,
args[0].get_element_type(),
runtime::cpu::kernel::batch_norm_training);
runtime::cpu::kernel::batch_norm_training)
auto arg2_shape = args[2].get_shape();
auto arg0_buffer_index =
......@@ -283,7 +283,7 @@ namespace ngraph
SELECT_KERNEL(kernel,
args[0].get_element_type(),
runtime::cpu::kernel::batch_norm_inference);
runtime::cpu::kernel::batch_norm_inference)
auto arg2_shape = args[2].get_shape();
auto arg0_buffer_index =
......@@ -346,7 +346,7 @@ namespace ngraph
SELECT_KERNEL(kernel,
args[0].get_element_type(),
runtime::cpu::kernel::batch_norm_inference);
runtime::cpu::kernel::batch_norm_inference)
auto arg2_shape = args[2].get_shape();
auto arg0_buffer_index =
......
......@@ -80,7 +80,7 @@ namespace ngraph
std::function<decltype(runtime::cpu::kernel::bounded_relu<float>)> kernel;
SELECT_KERNEL(
kernel, out[0].get_element_type(), runtime::cpu::kernel::bounded_relu);
kernel, out[0].get_element_type(), runtime::cpu::kernel::bounded_relu)
auto functor = [&, kernel, alpha, count, input_buffer_index, out_buffer_index](
CPURuntimeContext* ctx, CPUExecutionContext* ectx) {
......
......@@ -158,7 +158,7 @@ namespace ngraph
SELECT_KERNEL_BY_RANK(kernel,
broadcast->get_input_element_type(0),
out_rank,
runtime::cpu::kernel::broadcast);
runtime::cpu::kernel::broadcast)
}
template <>
......
......@@ -152,7 +152,7 @@ namespace ngraph
SELECT_KERNEL_BY_RANK(kernel,
out[0].get_element_type(),
out[0].get_shape().size(),
runtime::cpu::kernel::concat);
runtime::cpu::kernel::concat)
auto functor = [&,
kernel,
......
......@@ -43,59 +43,59 @@ namespace ngraph
if (out[0].get_element_type() == element::boolean)
{
SELECT_KERNEL(
kernel, args[0].get_element_type(), runtime::cpu::kernel::convert_to_bool);
kernel, args[0].get_element_type(), runtime::cpu::kernel::convert_to_bool)
}
else if (out[0].get_element_type() == element::f32)
{
SELECT_KERNEL(kernel,
args[0].get_element_type(),
runtime::cpu::kernel::convert_to_float32);
runtime::cpu::kernel::convert_to_float32)
}
else if (out[0].get_element_type() == element::f64)
{
SELECT_KERNEL(kernel,
args[0].get_element_type(),
runtime::cpu::kernel::convert_to_float64);
runtime::cpu::kernel::convert_to_float64)
}
else if (out[0].get_element_type() == element::i8)
{
SELECT_KERNEL(
kernel, args[0].get_element_type(), runtime::cpu::kernel::convert_to_i8);
kernel, args[0].get_element_type(), runtime::cpu::kernel::convert_to_i8)
}
else if (out[0].get_element_type() == element::i16)
{
SELECT_KERNEL(
kernel, args[0].get_element_type(), runtime::cpu::kernel::convert_to_i16);
kernel, args[0].get_element_type(), runtime::cpu::kernel::convert_to_i16)
}
else if (out[0].get_element_type() == element::i32)
{
SELECT_KERNEL(
kernel, args[0].get_element_type(), runtime::cpu::kernel::convert_to_i32);
kernel, args[0].get_element_type(), runtime::cpu::kernel::convert_to_i32)
}
else if (out[0].get_element_type() == element::i64)
{
SELECT_KERNEL(
kernel, args[0].get_element_type(), runtime::cpu::kernel::convert_to_i64);
kernel, args[0].get_element_type(), runtime::cpu::kernel::convert_to_i64)
}
else if (out[0].get_element_type() == element::u8)
{
SELECT_KERNEL(
kernel, args[0].get_element_type(), runtime::cpu::kernel::convert_to_u8);
kernel, args[0].get_element_type(), runtime::cpu::kernel::convert_to_u8)
}
else if (out[0].get_element_type() == element::u16)
{
SELECT_KERNEL(
kernel, args[0].get_element_type(), runtime::cpu::kernel::convert_to_u16);
kernel, args[0].get_element_type(), runtime::cpu::kernel::convert_to_u16)
}
else if (out[0].get_element_type() == element::u32)
{
SELECT_KERNEL(
kernel, args[0].get_element_type(), runtime::cpu::kernel::convert_to_u32);
kernel, args[0].get_element_type(), runtime::cpu::kernel::convert_to_u32)
}
else if (out[0].get_element_type() == element::u64)
{
SELECT_KERNEL(
kernel, args[0].get_element_type(), runtime::cpu::kernel::convert_to_u64);
kernel, args[0].get_element_type(), runtime::cpu::kernel::convert_to_u64)
}
else
{
......
......@@ -98,7 +98,7 @@ namespace ngraph
kernel;
SELECT_KERNEL_3ARGS(
kernel, out[0].get_element_type(), runtime::cpu::kernel::convolution);
kernel, out[0].get_element_type(), runtime::cpu::kernel::convolution)
auto window_movement_strides = convolution->get_window_movement_strides();
auto window_dilation_strides = convolution->get_window_dilation_strides();
......@@ -485,7 +485,7 @@ namespace ngraph
SELECT_KERNEL(kernel,
out[0].get_element_type(),
runtime::cpu::kernel::convolution_backprop_in);
runtime::cpu::kernel::convolution_backprop_in)
auto& in_shape = convolution->get_data_batch_shape();
auto data_dilation_strides = convolution->get_data_dilation_strides_forward();
auto window_dilation_strides =
......@@ -599,7 +599,7 @@ namespace ngraph
SELECT_KERNEL(kernel,
out[0].get_element_type(),
runtime::cpu::kernel::convolution_backprop_filter);
runtime::cpu::kernel::convolution_backprop_filter)
auto& filters_shape = convolution->get_filters_shape();
auto window_dilation_strides =
......
......@@ -79,7 +79,7 @@ namespace ngraph
std::function<decltype(runtime::cpu::kernel::dot_scalar<float>)> kernel;
SELECT_KERNEL(
kernel, out[0].get_element_type(), runtime::cpu::kernel::dot_scalar);
kernel, out[0].get_element_type(), runtime::cpu::kernel::dot_scalar)
auto element_count = shape_size(second.get_shape());
......@@ -106,7 +106,7 @@ namespace ngraph
std::function<decltype(runtime::cpu::kernel::dot_1d_1d_1rd<float>)> kernel;
SELECT_KERNEL(
kernel, out[0].get_element_type(), runtime::cpu::kernel::dot_1d_1d_1rd);
kernel, out[0].get_element_type(), runtime::cpu::kernel::dot_1d_1d_1rd)
auto functor = [&,
kernel,
......@@ -135,7 +135,7 @@ namespace ngraph
std::function<decltype(runtime::cpu::kernel::dot_2d_1d_1rd<float>)> kernel;
SELECT_KERNEL(
kernel, out[0].get_element_type(), runtime::cpu::kernel::dot_2d_1d_1rd);
kernel, out[0].get_element_type(), runtime::cpu::kernel::dot_2d_1d_1rd)
auto functor = [&,
kernel,
......@@ -164,7 +164,7 @@ namespace ngraph
std::function<decltype(runtime::cpu::kernel::dot_1d_2d_1rd<float>)> kernel;
SELECT_KERNEL(
kernel, out[0].get_element_type(), runtime::cpu::kernel::dot_1d_2d_1rd);
kernel, out[0].get_element_type(), runtime::cpu::kernel::dot_1d_2d_1rd)
auto functor = [&,
kernel,
......@@ -234,7 +234,7 @@ namespace ngraph
std::function<decltype(runtime::cpu::kernel::dot_ref<float, float, float>)> kernel;
SELECT_KERNEL_3ARGS(
kernel, out[0].get_element_type(), runtime::cpu::kernel::dot_ref);
kernel, out[0].get_element_type(), runtime::cpu::kernel::dot_ref)
auto functor = [&,
kernel,
......
......@@ -61,7 +61,7 @@ namespace ngraph
{
std::function<decltype(runtime::cpu::kernel::reference_erf<float>)> kernel;
SELECT_KERNEL(
kernel, args[0].get_element_type(), runtime::cpu::kernel::reference_erf);
kernel, args[0].get_element_type(), runtime::cpu::kernel::reference_erf)
auto functor = [&, kernel, element_count, arg0_buffer_index, out0_buffer_index](
CPURuntimeContext* ctx, CPUExecutionContext* /* ectx */) {
kernel(ctx->buffer_data[arg0_buffer_index],
......
......@@ -66,7 +66,7 @@ namespace ngraph
args[0].get_element_type(),
params_shape.size(),
out_shape.size(),
runtime::cpu::kernel::gather_i64);
runtime::cpu::kernel::gather_i64)
return [&,
kernel,
......@@ -126,7 +126,7 @@ namespace ngraph
args[0].get_element_type(),
params_shape.size(),
out_shape.size(),
runtime::cpu::kernel::gather_i32);
runtime::cpu::kernel::gather_i32)
return [&,
kernel,
......
......@@ -80,7 +80,7 @@ namespace ngraph
std::function<decltype(runtime::cpu::kernel::leaky_relu<float>)> kernel;
SELECT_KERNEL(
kernel, out[0].get_element_type(), runtime::cpu::kernel::leaky_relu);
kernel, out[0].get_element_type(), runtime::cpu::kernel::leaky_relu)
auto functor = [&, kernel, alpha, count, input_buffer_index, out_buffer_index](
CPURuntimeContext* ctx, CPUExecutionContext* ectx) {
......
......@@ -86,8 +86,7 @@ namespace ngraph
{
std::function<decltype(runtime::cpu::kernel::max_pool<float>)> kernel;
SELECT_KERNEL(
kernel, out[0].get_element_type(), runtime::cpu::kernel::max_pool);
SELECT_KERNEL(kernel, out[0].get_element_type(), runtime::cpu::kernel::max_pool)
auto functor = [&,
kernel,
......@@ -220,7 +219,7 @@ namespace ngraph
std::function<decltype(runtime::cpu::kernel::max_pool_backprop<float>)> kernel;
SELECT_KERNEL(
kernel, out[0].get_element_type(), runtime::cpu::kernel::max_pool_backprop);
kernel, out[0].get_element_type(), runtime::cpu::kernel::max_pool_backprop)
auto functor = [&,
kernel,
......
......@@ -46,7 +46,7 @@ namespace ngraph
{
std::function<decltype(runtime::cpu::kernel::one_hot_rank_0<float>)> kernel;
SELECT_KERNEL(
kernel, out[0].get_element_type(), runtime::cpu::kernel::one_hot_rank_0);
kernel, out[0].get_element_type(), runtime::cpu::kernel::one_hot_rank_0)
auto functor =
[&, kernel, out_shape, one_hot_axis, arg_buffer_index, out_buffer_index](
CPURuntimeContext* ctx, CPUExecutionContext* ectx) {
......@@ -63,7 +63,7 @@ namespace ngraph
{
std::function<decltype(runtime::cpu::kernel::one_hot_rank_1<float>)> kernel;
SELECT_KERNEL(
kernel, out[0].get_element_type(), runtime::cpu::kernel::one_hot_rank_1);
kernel, out[0].get_element_type(), runtime::cpu::kernel::one_hot_rank_1)
auto functor = [&,
kernel,
arg_shape,
......@@ -88,7 +88,7 @@ namespace ngraph
kernel;
SELECT_KERNEL(kernel,
out[0].get_element_type(),
runtime::cpu::kernel::one_hot_rank_2_or_more);
runtime::cpu::kernel::one_hot_rank_2_or_more)
auto functor = [&,
kernel,
arg_shape,
......
......@@ -57,7 +57,7 @@ namespace ngraph
SELECT_KERNEL_BY_RANK(kernel,
args[0].get_element_type(),
arg_shape.size(),
runtime::cpu::kernel::pad_and_slice);
runtime::cpu::kernel::pad_and_slice)
auto functor = [&,
kernel,
......@@ -84,8 +84,7 @@ namespace ngraph
{
std::function<decltype(runtime::cpu::kernel::pad_ref<float>)> kernel;
SELECT_KERNEL(
kernel, args[0].get_element_type(), runtime::cpu::kernel::pad_ref);
SELECT_KERNEL(kernel, args[0].get_element_type(), runtime::cpu::kernel::pad_ref)
auto functor = [&,
kernel,
......@@ -130,7 +129,7 @@ namespace ngraph
SELECT_KERNEL_BY_RANK(kernel,
pad->get_input_element_type(0),
arg_shape.size(),
runtime::cpu::kernel::pad_and_slice);
runtime::cpu::kernel::pad_and_slice)
auto functor = [kernel, arg_shape, out_shape, padding_below, padding_above](
const std::vector<void*>& inputs, std::vector<void*>& outputs) {
......@@ -150,7 +149,7 @@ namespace ngraph
std::function<decltype(runtime::cpu::kernel::pad_ref<float>)> kernel;
SELECT_KERNEL(
kernel, pad->get_input_element_type(0), runtime::cpu::kernel::pad_ref);
kernel, pad->get_input_element_type(0), runtime::cpu::kernel::pad_ref)
auto functor =
[kernel, arg_shape, out_shape, padding_below, padding_above, pad_mode](
......
......@@ -186,4 +186,4 @@
reduction_axes, \
ectx->arena); \
}; \
functors.emplace_back(functor);
functors.emplace_back(functor)
......@@ -130,7 +130,7 @@ namespace ngraph
std::function<decltype(runtime::cpu::kernel::relu_backprop<float>)> kernel;
SELECT_KERNEL(
kernel, out[0].get_element_type(), runtime::cpu::kernel::relu_backprop);
kernel, out[0].get_element_type(), runtime::cpu::kernel::relu_backprop)
auto functor = [&,
kernel,
......
......@@ -79,7 +79,7 @@ namespace ngraph
SELECT_KERNEL_BY_RANK(kernel,
args[0].get_element_type(),
arg0_shape.size(),
runtime::cpu::kernel::strided_replace_slice);
runtime::cpu::kernel::strided_replace_slice)
auto functor = [&,
kernel,
......@@ -111,7 +111,7 @@ namespace ngraph
SELECT_KERNEL_BY_RANK(kernel,
args[0].get_element_type(),
arg0_shape.size(),
runtime::cpu::kernel::replace_slice);
runtime::cpu::kernel::replace_slice)
auto functor = [&,
kernel,
......
......@@ -84,27 +84,27 @@ namespace ngraph
if (arg_rank == 1)
{
SELECT_KERNEL_BY_RANK(
kernel, result_element_type, result_rank, runtime::cpu::kernel::reshape_1d);
kernel, result_element_type, result_rank, runtime::cpu::kernel::reshape_1d)
}
else if (arg_rank == 2)
{
SELECT_KERNEL_BY_RANK(
kernel, result_element_type, result_rank, runtime::cpu::kernel::reshape_2d);
kernel, result_element_type, result_rank, runtime::cpu::kernel::reshape_2d)
}
else if (arg_rank == 3)
{
SELECT_KERNEL_BY_RANK(
kernel, result_element_type, result_rank, runtime::cpu::kernel::reshape_3d);
kernel, result_element_type, result_rank, runtime::cpu::kernel::reshape_3d)
}
else if (arg_rank == 4)
{
SELECT_KERNEL_BY_RANK(
kernel, result_element_type, result_rank, runtime::cpu::kernel::reshape_4d);
kernel, result_element_type, result_rank, runtime::cpu::kernel::reshape_4d)
}
else
{
SELECT_KERNEL(
ref_kernel, result_element_type, runtime::cpu::kernel::reshape_ref);
ref_kernel, result_element_type, runtime::cpu::kernel::reshape_ref)
}
}
......
......@@ -43,7 +43,7 @@ namespace ngraph
std::function<decltype(runtime::cpu::kernel::reverse<float>)> kernel;
SELECT_KERNEL(kernel, out[0].get_element_type(), runtime::cpu::kernel::reverse);
SELECT_KERNEL(kernel, out[0].get_element_type(), runtime::cpu::kernel::reverse)
auto functor = [&,
kernel,
......
......@@ -50,7 +50,7 @@ namespace ngraph
SELECT_KERNEL_BY_RANK(kernel,
args[0].get_element_type(),
arg_shape.size(),
runtime::cpu::kernel::reverse_sequence_sli32);
runtime::cpu::kernel::reverse_sequence_sli32)
}
else
{
......
......@@ -72,7 +72,7 @@ namespace ngraph
args[0].get_element_type(),
inputs_shape.size(),
updates_shape.size(),
runtime::cpu::kernel::scatter_add_i64);
runtime::cpu::kernel::scatter_add_i64)
auto functor = [&,
kernel,
......@@ -111,7 +111,7 @@ namespace ngraph
args[0].get_element_type(),
inputs_shape.size(),
updates_shape.size(),
runtime::cpu::kernel::scatter_add_i32);
runtime::cpu::kernel::scatter_add_i32)
auto functor = [&,
kernel,
......
......@@ -43,7 +43,7 @@ namespace ngraph
std::function<decltype(runtime::cpu::kernel::select<float>)> kernel;
SELECT_KERNEL(kernel, out[0].get_element_type(), runtime::cpu::kernel::select);
SELECT_KERNEL(kernel, out[0].get_element_type(), runtime::cpu::kernel::select)
auto functor = [&,
kernel,
......
......@@ -141,7 +141,7 @@ namespace ngraph
SELECT_KERNEL_BY_RANK(kernel,
args[0].get_element_type(),
arg_shape.size(),
runtime::cpu::kernel::strided_slice);
runtime::cpu::kernel::strided_slice)
auto functor = [&,
kernel,
......@@ -171,7 +171,7 @@ namespace ngraph
SELECT_KERNEL_BY_RANK(kernel,
args[0].get_element_type(),
arg_shape.size(),
runtime::cpu::kernel::slice);
runtime::cpu::kernel::slice)
auto functor = [&,
kernel,
......
......@@ -85,7 +85,7 @@ namespace ngraph
PARTIAL_SELECT_KERNEL_BY_RANK(kernel,
args[0].get_element_type(),
args[0].get_shape().size(),
runtime::cpu::kernel::softmax_all);
runtime::cpu::kernel::softmax_all)
auto functor = [&, kernel, arg_shape, arg_buffer_index, out_buffer_index](
CPURuntimeContext* ctx, CPUExecutionContext* ectx) {
......@@ -108,7 +108,7 @@ namespace ngraph
kernel,
args[0].get_element_type(),
args[0].get_shape().size(),
runtime::cpu::kernel::softmax_innermost_1rd);
runtime::cpu::kernel::softmax_innermost_1rd)
auto functor =
[&, kernel, arg_shape, arg_buffer_index, out_buffer_index](
......@@ -128,7 +128,7 @@ namespace ngraph
PARTIAL_SELECT_KERNEL_BY_RANK(kernel,
args[0].get_element_type(),
args[0].get_shape().size(),
runtime::cpu::kernel::softmax_1rd);
runtime::cpu::kernel::softmax_1rd)
auto functor =
[&, kernel, arg_shape, axes, arg_buffer_index, out_buffer_index](
......@@ -148,7 +148,7 @@ namespace ngraph
SELECT_KERNEL(kernel,
args[0].get_element_type(),
runtime::cpu::kernel::softmax_3d_2rd);
runtime::cpu::kernel::softmax_3d_2rd)
auto functor =
[&, kernel, arg_shape, axes, arg_buffer_index, out_buffer_index](
......@@ -167,7 +167,7 @@ namespace ngraph
SELECT_KERNEL(kernel,
args[0].get_element_type(),
runtime::cpu::kernel::softmax_4d_3rd);
runtime::cpu::kernel::softmax_4d_3rd)
auto functor =
[&, kernel, arg_shape, axes, arg_buffer_index, out_buffer_index](
......
......@@ -47,7 +47,7 @@ namespace ngraph
size_t repeats = shape_size(out_shape);
std::function<decltype(runtime::cpu::kernel::tile_rank_0<float>)> kernel;
SELECT_KERNEL(
kernel, out[0].get_element_type(), runtime::cpu::kernel::tile_rank_0);
kernel, out[0].get_element_type(), runtime::cpu::kernel::tile_rank_0)
auto functor = [&, kernel, repeats, arg_buffer_index, out_buffer_index](
CPURuntimeContext* ctx, CPUExecutionContext* /* ectx */) {
kernel(ctx->buffer_data[arg_buffer_index],
......@@ -61,7 +61,7 @@ namespace ngraph
{
std::function<decltype(runtime::cpu::kernel::tile<float, 2>)> kernel;
SELECT_KERNEL_BY_RANK(
kernel, out[0].get_element_type(), arg_rank, runtime::cpu::kernel::tile);
kernel, out[0].get_element_type(), arg_rank, runtime::cpu::kernel::tile)
auto functor =
[&, kernel, arg_shape, out_shape, arg_buffer_index, out_buffer_index](
CPURuntimeContext* ctx, CPUExecutionContext* ectx) {
......
......@@ -69,7 +69,7 @@ namespace ngraph
SELECT_KERNEL_BY_RANK(kernel,
args[0].get_element_type(),
arg0_shape.size(),
runtime::cpu::kernel::strided_update_slice);
runtime::cpu::kernel::strided_update_slice)
auto functor = [&,
kernel,
......@@ -101,7 +101,7 @@ namespace ngraph
SELECT_KERNEL_BY_RANK(kernel,
args[0].get_element_type(),
arg0_shape.size(),
runtime::cpu::kernel::update_slice);
runtime::cpu::kernel::update_slice)
auto functor = [&,
kernel,
......
......@@ -142,7 +142,7 @@ namespace ngraph
auto& functors = external_function->get_functors();
const ngraph::op::Divide* divop = static_cast<const ngraph::op::Divide*>(node);
std::function<void(void*, void*, void*, size_t, bool, int)> kernel;
SELECT_KERNEL(kernel, args[0].get_element_type(), runtime::cpu::kernel::divide);
SELECT_KERNEL(kernel, args[0].get_element_type(), runtime::cpu::kernel::divide)
auto element_count = out[0].get_size();
auto arg0_buffer_index = external_function->get_buffer_index(args[0].get_name());
auto arg1_buffer_index = external_function->get_buffer_index(args[1].get_name());
......@@ -453,8 +453,7 @@ namespace ngraph
{
const ngraph::op::Divide* divop = static_cast<const ngraph::op::Divide*>(node);
std::function<void(void*, void*, void*, size_t, bool, int)> kernel;
SELECT_KERNEL(
kernel, node->get_input_element_type(0), runtime::cpu::kernel::divide);
SELECT_KERNEL(kernel, node->get_input_element_type(0), runtime::cpu::kernel::divide)
auto element_count = shape_size(node->get_shape());
bool pythondiv = divop->is_pythondiv();
auto functor = [&, kernel, element_count, pythondiv](
......
......@@ -324,7 +324,7 @@
element_count, \
ectx->arena); \
}; \
functors.emplace_back(functor);
functors.emplace_back(functor)
#define BUILD_BINARY_ELEMWISE_FUNCTOR(OP) \
(void)node; \
......@@ -347,7 +347,7 @@
element_count, \
ectx->arena); \
}; \
functors.emplace_back(functor);
functors.emplace_back(functor)
#define BUILD_UNARY_ELEMWISE_CF_FUNCTOR(OP) \
std::function<void(void*, void*, size_t, int)> kernel; \
......@@ -360,7 +360,7 @@
std::vector<void*>& outputs) { \
kernel(inputs[0], outputs[0], element_count, 0); \
}; \
return functor;
return functor
#define BUILD_BINARY_ELEMWISE_CF_FUNCTOR(OP) \
std::function<void(void*, void*, void*, size_t, int)> kernel; \
......@@ -373,7 +373,7 @@
std::vector<void*>& outputs) { \
kernel(inputs[0], inputs[1], outputs[0], element_count, 0); \
}; \
return functor;
return functor
#define REGISTER_OP_BUILDER(OP) \
GetGlobalBuildDispatcher().insert( \
......
......@@ -1202,59 +1202,58 @@ void runtime::cpu::CPU_ExternalFunction::register_common_passes(
return true;
};
REGISTER_KNOBBED_PASS(LikeReplacement, true, ngraph::pass);
REGISTER_KNOBBED_PASS_WITH_ARGS(FusedOpDecomposition, true, ngraph::pass, is_supported);
REGISTER_KNOBBED_PASS(ImplicitBroadcastElimination, true, ngraph::pass);
REGISTER_KNOBBED_PASS(NopElimination, true, ngraph::pass);
REGISTER_KNOBBED_PASS(ZeroDimTensorElimination, true, ngraph::pass);
REGISTER_KNOBBED_PASS(LSTMFusion, true, runtime::cpu::pass);
REGISTER_KNOBBED_PASS(RNNFusion, true, runtime::cpu::pass);
REGISTER_KNOBBED_PASS(AlgebraicSimplification, true, ngraph::pass);
REGISTER_KNOBBED_PASS(MultiLayerRNNFusion, true, runtime::cpu::pass);
REGISTER_KNOBBED_PASS(BiDirectionalRnn, true, runtime::cpu::pass);
REGISTER_KNOBBED_PASS(CPURnnMatFusion, true, runtime::cpu::pass);
REGISTER_KNOBBED_PASS(BatchFusion, true, ngraph::pass);
REGISTER_KNOBBED_PASS(CPUBatchFusion, true, runtime::cpu::pass);
REGISTER_KNOBBED_PASS(ReshapeSinking, false, ngraph::pass);
REGISTER_KNOBBED_PASS(ReshapeElimination, true, ngraph::pass);
REGISTER_KNOBBED_PASS(RecurrentReshapeElimination, false, ngraph::pass);
REGISTER_KNOBBED_PASS(LikeReplacement, true, ngraph::pass)
REGISTER_KNOBBED_PASS_WITH_ARGS(FusedOpDecomposition, true, ngraph::pass, is_supported)
REGISTER_KNOBBED_PASS(ImplicitBroadcastElimination, true, ngraph::pass)
REGISTER_KNOBBED_PASS(NopElimination, true, ngraph::pass)
REGISTER_KNOBBED_PASS(ZeroDimTensorElimination, true, ngraph::pass)
REGISTER_KNOBBED_PASS(LSTMFusion, true, runtime::cpu::pass)
REGISTER_KNOBBED_PASS(RNNFusion, true, runtime::cpu::pass)
REGISTER_KNOBBED_PASS(AlgebraicSimplification, true, ngraph::pass)
REGISTER_KNOBBED_PASS(MultiLayerRNNFusion, true, runtime::cpu::pass)
REGISTER_KNOBBED_PASS(BiDirectionalRnn, true, runtime::cpu::pass)
REGISTER_KNOBBED_PASS(CPURnnMatFusion, true, runtime::cpu::pass)
REGISTER_KNOBBED_PASS(BatchFusion, true, ngraph::pass)
REGISTER_KNOBBED_PASS(CPUBatchFusion, true, runtime::cpu::pass)
REGISTER_KNOBBED_PASS(ReshapeSinking, false, ngraph::pass)
REGISTER_KNOBBED_PASS(ReshapeElimination, true, ngraph::pass)
REGISTER_KNOBBED_PASS(RecurrentReshapeElimination, false, ngraph::pass)
REGISTER_KNOBBED_PASS_WITH_ARGS(
CoreFusion, true, ngraph::pass, ngraph::pass::FusionType::ALL_FUSIONS);
REGISTER_KNOBBED_PASS(CPUPreFusion, true, runtime::cpu::pass);
CoreFusion, true, ngraph::pass, ngraph::pass::FusionType::ALL_FUSIONS)
REGISTER_KNOBBED_PASS(CPUPreFusion, true, runtime::cpu::pass)
// Disable CPUFusion if MLIR is enabled to preserve core ops.
if (std::getenv("NGRAPH_MLIR") == nullptr)
{
REGISTER_KNOBBED_PASS(CPUFusion, true, runtime::cpu::pass);
REGISTER_KNOBBED_PASS(CPUFusion, true, runtime::cpu::pass)
}
REGISTER_KNOBBED_PASS(CPUQuantFusion, true, runtime::cpu::pass);
REGISTER_KNOBBED_PASS(CPUHorizontalFusion, true, runtime::cpu::pass);
REGISTER_KNOBBED_PASS(CPUCollapseDims, true, runtime::cpu::pass);
REGISTER_KNOBBED_PASS(CPUQuantFusion, true, runtime::cpu::pass)
REGISTER_KNOBBED_PASS(CPUHorizontalFusion, true, runtime::cpu::pass)
REGISTER_KNOBBED_PASS(CPUCollapseDims, true, runtime::cpu::pass)
#if defined(NGRAPH_HALIDE)
REGISTER_KNOBBED_PASS(HalideSubgraphExtraction, true, ngraph::runtime::cpu::pass);
REGISTER_KNOBBED_PASS(HalideSubgraphExtraction, true, ngraph::runtime::cpu::pass)
#endif
#ifdef NGRAPH_MLIR_ENABLE
if (std::getenv("NGRAPH_MLIR") != nullptr)
{
REGISTER_KNOBBED_PASS(MLIRSubgraphExtractionPass, /*enable by default*/ true, ngraph::pass);
REGISTER_KNOBBED_PASS(MLIRSubgraphExtractionPass, /*enable by default*/ true, ngraph::pass)
}
#endif
NodeVector nv_cwi; // We dont need CPUWorkspaceInsertion to return list of indices
REGISTER_KNOBBED_PASS_WITH_ARGS(CPUWorkspaceInsertion, true, runtime::cpu::pass, nv_cwi, false);
REGISTER_KNOBBED_PASS_WITH_ARGS(CPUAssignment, true, runtime::cpu::pass, this);
REGISTER_KNOBBED_PASS_WITH_ARGS(CPUWorkspaceInsertion, true, runtime::cpu::pass, nv_cwi, false)
REGISTER_KNOBBED_PASS_WITH_ARGS(CPUAssignment, true, runtime::cpu::pass, this)
REGISTER_KNOBBED_PASS_WITH_ARGS(ConstantFolding, true, ngraph::pass, GetGlobalCFDispatcherCPU())
REGISTER_KNOBBED_PASS_WITH_ARGS(CPULayout, true, runtime::cpu::pass, this)
REGISTER_KNOBBED_PASS_WITH_ARGS(
ConstantFolding, true, ngraph::pass, GetGlobalCFDispatcherCPU());
REGISTER_KNOBBED_PASS_WITH_ARGS(CPULayout, true, runtime::cpu::pass, this);
CommonSubexpressionElimination, true, ngraph::pass, runtime::cpu::get_cse_handlers_map())
REGISTER_KNOBBED_PASS(CPUPostLayoutOptimizations, true, runtime::cpu::pass)
REGISTER_KNOBBED_PASS(CPUConvertLayoutConstantFolding, true, runtime::cpu::pass)
REGISTER_KNOBBED_PASS(CPUMemoryOptimization, true, runtime::cpu::pass)
REGISTER_KNOBBED_PASS(GetOutputElementElimination, false, ngraph::pass)
REGISTER_KNOBBED_PASS_WITH_ARGS(
CommonSubexpressionElimination, true, ngraph::pass, runtime::cpu::get_cse_handlers_map());
REGISTER_KNOBBED_PASS(CPUPostLayoutOptimizations, true, runtime::cpu::pass);
REGISTER_KNOBBED_PASS(CPUConvertLayoutConstantFolding, true, runtime::cpu::pass);
REGISTER_KNOBBED_PASS(CPUMemoryOptimization, true, runtime::cpu::pass);
REGISTER_KNOBBED_PASS(GetOutputElementElimination, false, ngraph::pass);
REGISTER_KNOBBED_PASS_WITH_ARGS(
PropagateCacheability, true, ngraph::pass, runtime::cpu::get_annotations_factory());
PropagateCacheability, true, ngraph::pass, runtime::cpu::get_annotations_factory())
bool reuse_memory = pass_config.get_pass_attribute("CPUMemoryAssignment::ReuseMemory") ||
pass_config.get_pass_attribute("ReuseMemory");
pass_manager.register_pass<runtime::cpu::pass::CPUMemoryAssignment>(
......
......@@ -510,22 +510,22 @@ memory::desc runtime::cpu::mkldnn_utils::try_get_named_md(const mkldnn_memory_de
return get_named_md(md, X);
switch (md.ndims)
{
case 1: CANONICALIZE_MD(mkldnn_x); break;
case 2: CANONICALIZE_MD(mkldnn_nc); break;
case 1: CANONICALIZE_MD(mkldnn_x) break;
case 2: CANONICALIZE_MD(mkldnn_nc) break;
case 3:
CANONICALIZE_MD(mkldnn_tnc);
CANONICALIZE_MD(mkldnn_ntc);
CANONICALIZE_MD(mkldnn_tnc)
CANONICALIZE_MD(mkldnn_ntc)
break;
case 4:
CANONICALIZE_MD(mkldnn_nchw);
CANONICALIZE_MD(mkldnn_nhwc);
CANONICALIZE_MD(mkldnn_nChw8c);
CANONICALIZE_MD(mkldnn_nChw16c);
CANONICALIZE_MD(mkldnn_nchw)
CANONICALIZE_MD(mkldnn_nhwc)
CANONICALIZE_MD(mkldnn_nChw8c)
CANONICALIZE_MD(mkldnn_nChw16c)
break;
case 5:
CANONICALIZE_MD(mkldnn_ncdhw);
CANONICALIZE_MD(mkldnn_ndhwc);
CANONICALIZE_MD(mkldnn_nCdhw16c);
CANONICALIZE_MD(mkldnn_ncdhw)
CANONICALIZE_MD(mkldnn_ndhwc)
CANONICALIZE_MD(mkldnn_nCdhw16c)
break;
default:;
}
......
......@@ -371,12 +371,12 @@ void ngraph::runtime::cpu::pass::LSTMFusion::construct_lstm_fprop()
return false;
}
CHECK_RANK(pattern_map[xt], 2);
CHECK_RANK(pattern_map[ht_1], 2);
CHECK_RANK(pattern_map[w_i2h], 2);
CHECK_RANK(pattern_map[w_h2h], 2);
CHECK_RANK(pattern_map[bias_i2h], 1);
CHECK_RANK(pattern_map[bias_h2h], 1);
CHECK_RANK(pattern_map[xt], 2)
CHECK_RANK(pattern_map[ht_1], 2)
CHECK_RANK(pattern_map[w_i2h], 2)
CHECK_RANK(pattern_map[w_h2h], 2)
CHECK_RANK(pattern_map[bias_i2h], 1)
CHECK_RANK(pattern_map[bias_h2h], 1)
auto weights_layer = pattern_map[w_i2h];
auto weights_iter = pattern_map[w_h2h];
......@@ -669,11 +669,11 @@ void ngraph::runtime::cpu::pass::RNNFusion::construct_rnn_lstm_fprop()
}
}
CHECK_RANK(rnn_src_layer, 2);
CHECK_RANK(rnn_src_iter, 2);
CHECK_RANK(rnn_weights_layer, 2);
CHECK_RANK(rnn_weights_iter, 2);
CHECK_RANK(rnn_bias, 1);
CHECK_RANK(rnn_src_layer, 2)
CHECK_RANK(rnn_src_iter, 2)
CHECK_RANK(rnn_weights_layer, 2)
CHECK_RANK(rnn_weights_iter, 2)
CHECK_RANK(rnn_bias, 1)
if (rnn_src_layer->get_element_type() != element::f32 ||
rnn_src_iter->get_element_type() != element::f32)
......
......@@ -228,7 +228,7 @@ TEST(debugger, MLIR_DISABLE_TEST(while_stepping))
dbg.add_breakpoint(add);
while (dbg.step())
{
};
}
ASSERT_EQ(*static_cast<int*>(dbg.inspect(add)), -777);
ASSERT_EQ(*static_cast<int*>(dbg.inspect(absn)), 777);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment