Unverified Commit 76e144d3 authored by Robert Kimball's avatar Robert Kimball Committed by GitHub

Merge branch 'master' into gauri/new_reshape_error_master

parents 30af51f2 a15d2ec5
...@@ -19,7 +19,9 @@ We are pleased to announce the release of version |version|. ...@@ -19,7 +19,9 @@ We are pleased to announce the release of version |version|.
Core updates for |version| Core updates for |version|
-------------------------- --------------------------
Allow DLLs that link nGraph statically to load backends + Allow DLLs that link nGraph statically to load backends
+ Add rank id to trace file name.
+ Allow provenance merging to be disabled
.. important:: Pre-releases (``-rc-0.*``) have newer features, and are less stable. .. important:: Pre-releases (``-rc-0.*``) have newer features, and are less stable.
......
...@@ -94,3 +94,5 @@ ngraph_tablegen(ops.cpp.inc -gen-op-defs) ...@@ -94,3 +94,5 @@ ngraph_tablegen(ops.cpp.inc -gen-op-defs)
add_public_tablegen_target(ngraph_ops_gen) add_public_tablegen_target(ngraph_ops_gen)
add_dependencies(mlir_backend ngraph_ops_gen) add_dependencies(mlir_backend ngraph_ops_gen)
target_include_directories(mlir_backend PRIVATE ${CMAKE_CURRENT_BINARY_DIR}) target_include_directories(mlir_backend PRIVATE ${CMAKE_CURRENT_BINARY_DIR})
install(TARGETS mlir_backend DESTINATION ${NGRAPH_INSTALL_LIB})
...@@ -42,27 +42,49 @@ op::NormalizeL2::NormalizeL2(const Output<Node>& data, ...@@ -42,27 +42,49 @@ op::NormalizeL2::NormalizeL2(const Output<Node>& data,
void op::NormalizeL2::pre_validate_and_infer_types() void op::NormalizeL2::pre_validate_and_infer_types()
{ {
const auto& data_pshape = get_input_partial_shape(0); auto axes_node = input_value(1).get_node_shared_ptr();
const auto& input_pshape = get_input_partial_shape(0);
const auto& axes_pshape = get_input_partial_shape(1); const auto& axes_pshape = get_input_partial_shape(1);
const auto& input_rank = input_pshape.rank();
const auto& axes_rank = axes_pshape.rank();
NODE_VALIDATION_CHECK(this, data_pshape.is_static(), "Input data must be static."); NODE_VALIDATION_CHECK(this, axes_node->is_constant(), "Input axes must be Constant type");
NODE_VALIDATION_CHECK(this, axes_pshape.is_static(), "Input axes must be static.");
const Shape data_shape{data_pshape.to_shape()}; if (axes_rank.is_static())
{
// Input data must be 2, 3 or 4D tensor. NODE_VALIDATION_CHECK(this,
NODE_VALIDATION_CHECK(this, static_cast<size_t>(axes_rank) == 1,
(data_shape.size() >= 2 && data_shape.size() <= 4), "Input axes must have rank equals 1 (axes rank: ",
"Input tensor rank must be 2, 3 or 4 dimensional (actual input " axes_rank,
"shape: ", ").");
data_shape,
")."); if (input_rank.is_static())
{
const auto reduction_axes = get_reduction_axes();
for (auto axis : reduction_axes)
{
NODE_VALIDATION_CHECK(this,
axis < size_t(input_rank),
"Reduction axis (",
axis,
") is out of bounds ",
"(argument shape: ",
input_pshape,
")");
}
}
}
}
NODE_VALIDATION_CHECK(this, AxisSet op::NormalizeL2::get_reduction_axes() const
static_cast<size_t>(axes_pshape.rank()) == 1, {
"Input axes must have rank equals 1 (axes shape: ", AxisSet axes;
axes_pshape, auto axes_input_node = input_value(1).get_node_shared_ptr();
")."); if (auto const_op = dynamic_pointer_cast<op::Constant>(axes_input_node))
{
axes = const_op->get_axis_set_val();
}
return axes;
} }
NodeVector op::NormalizeL2::decompose_op() const NodeVector op::NormalizeL2::decompose_op() const
...@@ -70,37 +92,14 @@ NodeVector op::NormalizeL2::decompose_op() const ...@@ -70,37 +92,14 @@ NodeVector op::NormalizeL2::decompose_op() const
Output<Node> data{input_value(0)}; Output<Node> data{input_value(0)};
const Shape input_shape{data.get_shape()}; const Shape input_shape{data.get_shape()};
// Reshape to 4D tensor. AxisSet reduction_axes = get_reduction_axes();
if (input_shape.size() != 4)
{
Shape data_shape(4 - input_shape.size(), 1);
copy(begin(input_shape), end(input_shape), back_inserter(data_shape));
data = builder::reshape(data, data_shape);
}
auto axes_node = input(1).get_source_output().get_node_shared_ptr();
NODE_VALIDATION_CHECK(this,
axes_node->is_constant(),
"doesn't support 'axes' input of other type than a Constant.");
// Calculate norm over axes indicated by axes input param
auto axes_constant = dynamic_pointer_cast<op::Constant>(axes_node);
auto axes_vector = axes_constant->get_vector<size_t>();
AxisSet reduction_axes{axes_vector};
// Calculate l2 norm across axes determined by axes input // Calculate l2 norm across axes determined by axes input
auto builder_bias_mode = auto builder_bias_mode =
(m_eps_mode == EpsMode::MAX) ? builder::BiasMode::MAX : builder::BiasMode::ADD; (m_eps_mode == EpsMode::MAX) ? builder::BiasMode::MAX : builder::BiasMode::ADD;
Output<Node> norm = builder::l2_norm(data, reduction_axes, m_eps, builder_bias_mode); Output<Node> norm = builder::l2_norm(data, reduction_axes, m_eps, builder_bias_mode);
norm = make_broadcast_node(norm, data.get_shape(), 0);
data = data / norm; data = make_shared<op::Divide>(data, norm, AutoBroadcastSpec(AutoBroadcastType::NUMPY));
// get back original input tensor rank
if (input_shape.size() != 4)
{
data = builder::reshape(data, input_shape);
}
return as_node_vector({data}); return as_node_vector({data});
} }
......
...@@ -54,6 +54,7 @@ namespace ngraph ...@@ -54,6 +54,7 @@ namespace ngraph
EpsMode get_eps_mode() const { return m_eps_mode; } EpsMode get_eps_mode() const { return m_eps_mode; }
virtual NodeVector decompose_op() const override; virtual NodeVector decompose_op() const override;
virtual void pre_validate_and_infer_types() override; virtual void pre_validate_and_infer_types() override;
AxisSet get_reduction_axes() const;
virtual std::shared_ptr<Node> virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override; copy_with_new_args(const NodeVector& new_args) const override;
......
...@@ -69,13 +69,14 @@ gather_4d_indices_no_axis_2d_input ...@@ -69,13 +69,14 @@ gather_4d_indices_no_axis_2d_input
gemm gemm
gemm_broadcast_input_C gemm_broadcast_input_C
normalize_across_chw_4d normalize_across_chw_4d
normalize_across_empty_axes_input
normalize_across_h_4d
normalize_across_1axis_5d
normalize_across_123axes_5d
normalize_across_chw_4d_max_bias normalize_across_chw_4d_max_bias
normalize_across_chw_3d normalize_axes_input_not_constant
normalize_across_chw_2d
normalize_across_hw_4d
normalize_invalid_input_tensor_rank
normalize_invalid_axes_rank normalize_invalid_axes_rank
normalize_output_shape_across_chw normalize_axes_out_of_bounds
hardsigmoid hardsigmoid
model_erf model_erf
model_erf_int32 model_erf_int32
......
...@@ -582,7 +582,7 @@ NGRAPH_TEST(${BACKEND_NAME}, normalize_across_chw_4d) ...@@ -582,7 +582,7 @@ NGRAPH_TEST(${BACKEND_NAME}, normalize_across_chw_4d)
{ {
Shape data_shape{1, 2, 3, 4}; Shape data_shape{1, 2, 3, 4};
auto data = make_shared<op::Parameter>(element::f32, data_shape); auto data = make_shared<op::Parameter>(element::f32, data_shape);
const auto axes = make_shared<op::Constant>(element::u64, Shape{3}, vector<int64_t>{1, 2, 3}); const auto axes = make_shared<op::Constant>(element::i64, Shape{3}, vector<int64_t>{1, 2, 3});
float eps{1e-6f}; float eps{1e-6f};
auto eps_mode = op::EpsMode::ADD; auto eps_mode = op::EpsMode::ADD;
...@@ -605,11 +605,11 @@ NGRAPH_TEST(${BACKEND_NAME}, normalize_across_chw_4d) ...@@ -605,11 +605,11 @@ NGRAPH_TEST(${BACKEND_NAME}, normalize_across_chw_4d)
test_case.run(DEFAULT_FLOAT_TOLERANCE_BITS + 1); test_case.run(DEFAULT_FLOAT_TOLERANCE_BITS + 1);
} }
NGRAPH_TEST(${BACKEND_NAME}, normalize_across_chw_3d) NGRAPH_TEST(${BACKEND_NAME}, normalize_across_empty_axes_input)
{ {
Shape data_shape{2, 3, 4}; Shape data_shape{1, 2, 3, 4};
auto data = make_shared<op::Parameter>(element::f32, data_shape); auto data = make_shared<op::Parameter>(element::f32, data_shape);
const auto axes = make_shared<op::Constant>(element::u64, Shape{3}, vector<int64_t>{1, 2, 3}); const auto axes = make_shared<op::Constant>(element::i64, Shape{0}, vector<int64_t>{});
float eps{1e-6f}; float eps{1e-6f};
auto eps_mode = op::EpsMode::ADD; auto eps_mode = op::EpsMode::ADD;
...@@ -623,20 +623,17 @@ NGRAPH_TEST(${BACKEND_NAME}, normalize_across_chw_3d) ...@@ -623,20 +623,17 @@ NGRAPH_TEST(${BACKEND_NAME}, normalize_across_chw_3d)
test_case.add_input<float>(input_data); test_case.add_input<float>(input_data);
test_case.add_expected_output<float>( // output should be filled with 1f values
data_shape, {0.01428571f, 0.02857143f, 0.04285714f, 0.05714286f, 0.07142857f, 0.08571429f, test_case.add_expected_output<float>(data_shape, vector<float>(shape_size(data_shape), 1));
0.1f, 0.11428571f, 0.12857144f, 0.14285715f, 0.15714286f, 0.17142858f,
0.18571429f, 0.2f, 0.21428572f, 0.22857143f, 0.24285714f, 0.25714287f,
0.27142859f, 0.2857143f, 0.30000001f, 0.31428573f, 0.32857144f, 0.34285715f});
test_case.run(DEFAULT_FLOAT_TOLERANCE_BITS + 1); test_case.run(DEFAULT_FLOAT_TOLERANCE_BITS + 1);
} }
NGRAPH_TEST(${BACKEND_NAME}, normalize_across_chw_2d) NGRAPH_TEST(${BACKEND_NAME}, normalize_across_h_4d)
{ {
Shape data_shape{3, 4}; Shape data_shape{1, 2, 3, 4};
auto data = make_shared<op::Parameter>(element::f32, data_shape); auto data = make_shared<op::Parameter>(element::f32, data_shape);
const auto axes = make_shared<op::Constant>(element::u64, Shape{3}, vector<int64_t>{1, 2, 3}); const auto axes = make_shared<op::Constant>(element::i64, Shape{1}, vector<int64_t>{1});
float eps{1e-6f}; float eps{1e-6f};
auto eps_mode = op::EpsMode::ADD; auto eps_mode = op::EpsMode::ADD;
...@@ -650,28 +647,19 @@ NGRAPH_TEST(${BACKEND_NAME}, normalize_across_chw_2d) ...@@ -650,28 +647,19 @@ NGRAPH_TEST(${BACKEND_NAME}, normalize_across_chw_2d)
test_case.add_input<float>(input_data); test_case.add_input<float>(input_data);
test_case.add_expected_output<float>(data_shape, test_case.add_expected_output<float>(
{0.03922323f, data_shape, {0.0766965f, 0.14142136f, 0.19611613f, 0.24253564f, 0.28216633f, 0.31622776f,
0.07844646f, 0.34570536f, 0.37139067f, 0.39391932f, 0.41380295f, 0.43145549f, 0.44721359f,
0.11766968f, 0.99705452f, 0.98994946f, 0.98058069f, 0.97014254f, 0.95936549f, 0.94868332f,
0.15689291f, 0.93834311f, 0.92847669f, 0.91914505f, 0.91036648f, 0.90213418f, 0.89442718f});
0.19611613f, test_case.run(DEFAULT_FLOAT_TOLERANCE_BITS + 1);
0.23533936f,
0.2745626f,
0.31378582f,
0.35300905f,
0.39223227f,
0.43145549f,
0.47067872f});
test_case.run();
} }
NGRAPH_TEST(${BACKEND_NAME}, normalize_across_empty_axes_input) NGRAPH_TEST(${BACKEND_NAME}, normalize_across_1axis_5d)
{ {
Shape data_shape{1, 2, 3, 4}; Shape data_shape{1, 2, 2, 2, 3};
auto data = make_shared<op::Parameter>(element::f32, data_shape); auto data = make_shared<op::Parameter>(element::f32, data_shape);
const auto axes = make_shared<op::Constant>(element::u64, Shape{0}, vector<int64_t>{}); const auto axes = make_shared<op::Constant>(element::i64, Shape{1}, vector<int64_t>{1});
float eps{1e-6f}; float eps{1e-6f};
auto eps_mode = op::EpsMode::ADD; auto eps_mode = op::EpsMode::ADD;
...@@ -685,17 +673,19 @@ NGRAPH_TEST(${BACKEND_NAME}, normalize_across_empty_axes_input) ...@@ -685,17 +673,19 @@ NGRAPH_TEST(${BACKEND_NAME}, normalize_across_empty_axes_input)
test_case.add_input<float>(input_data); test_case.add_input<float>(input_data);
// output should be filled with 1f values test_case.add_expected_output<float>(
test_case.add_expected_output<float>(data_shape, vector<float>(shape_size(data_shape), 1)); data_shape, {0.0766965f, 0.14142136f, 0.19611613f, 0.24253564f, 0.28216633f, 0.31622776f,
0.34570536f, 0.37139067f, 0.39391932f, 0.41380295f, 0.43145549f, 0.44721359f,
0.99705452f, 0.98994946f, 0.98058069f, 0.97014254f, 0.95936549f, 0.94868332f,
0.93834311f, 0.92847669f, 0.91914505f, 0.91036648f, 0.90213418f, 0.89442718f});
test_case.run(DEFAULT_FLOAT_TOLERANCE_BITS + 1); test_case.run(DEFAULT_FLOAT_TOLERANCE_BITS + 1);
} }
NGRAPH_TEST(${BACKEND_NAME}, normalize_across_hw_4d) NGRAPH_TEST(${BACKEND_NAME}, normalize_across_123axes_5d)
{ {
Shape data_shape{1, 2, 3, 4}; Shape data_shape{1, 2, 2, 2, 3};
auto data = make_shared<op::Parameter>(element::f32, data_shape); auto data = make_shared<op::Parameter>(element::f32, data_shape);
const auto axes = make_shared<op::Constant>(element::u64, Shape{2}, vector<int64_t>{2, 3}); const auto axes = make_shared<op::Constant>(element::i64, Shape{3}, vector<int64_t>{1, 2, 3});
float eps{1e-6f}; float eps{1e-6f};
auto eps_mode = op::EpsMode::ADD; auto eps_mode = op::EpsMode::ADD;
...@@ -710,10 +700,10 @@ NGRAPH_TEST(${BACKEND_NAME}, normalize_across_hw_4d) ...@@ -710,10 +700,10 @@ NGRAPH_TEST(${BACKEND_NAME}, normalize_across_hw_4d)
test_case.add_input<float>(input_data); test_case.add_input<float>(input_data);
test_case.add_expected_output<float>( test_case.add_expected_output<float>(
data_shape, {0.03922323f, 0.07844646f, 0.11766968f, 0.15689291f, 0.19611613f, 0.23533936f, data_shape, {0.02638899f, 0.04956816f, 0.070014f, 0.10555596f, 0.1239204f, 0.140028f,
0.2745626f, 0.31378582f, 0.35300905f, 0.39223227f, 0.43145549f, 0.47067872f, 0.18472293f, 0.19827265f, 0.210042f, 0.26388991f, 0.27262488f, 0.280056f,
0.1994109f, 0.2147502f, 0.2300895f, 0.2454288f, 0.26076809f, 0.2761074f, 0.34305686f, 0.34697714f, 0.35007f, 0.42222384f, 0.42132938f, 0.420084f,
0.29144669f, 0.306786f, 0.32212529f, 0.3374646f, 0.35280389f, 0.3681432f}); 0.50139081f, 0.49568161f, 0.49009803f, 0.58055776f, 0.57003385f, 0.560112f});
test_case.run(DEFAULT_FLOAT_TOLERANCE_BITS + 1); test_case.run(DEFAULT_FLOAT_TOLERANCE_BITS + 1);
} }
...@@ -721,7 +711,7 @@ NGRAPH_TEST(${BACKEND_NAME}, normalize_across_chw_4d_max_bias) ...@@ -721,7 +711,7 @@ NGRAPH_TEST(${BACKEND_NAME}, normalize_across_chw_4d_max_bias)
{ {
Shape data_shape{1, 2, 3, 4}; Shape data_shape{1, 2, 3, 4};
auto data = make_shared<op::Parameter>(element::f32, data_shape); auto data = make_shared<op::Parameter>(element::f32, data_shape);
const auto axes = make_shared<op::Constant>(element::u64, Shape{3}, vector<int64_t>{1, 2, 3}); const auto axes = make_shared<op::Constant>(element::i64, Shape{3}, vector<int64_t>{1, 2, 3});
float eps{5000}; float eps{5000};
auto eps_mode = op::EpsMode::MAX; auto eps_mode = op::EpsMode::MAX;
......
...@@ -20,11 +20,12 @@ ...@@ -20,11 +20,12 @@
using namespace std; using namespace std;
using namespace ngraph; using namespace ngraph;
TEST(type_prop, normalize_invalid_input_tensor_rank)
TEST(type_prop, normalize_axes_input_not_constant)
{ {
Shape data_shape{1, 2, 3, 4, 5}; Shape data_shape{1, 2, 3, 4};
auto data = make_shared<op::Parameter>(element::f32, data_shape); auto data = make_shared<op::Parameter>(element::f32, data_shape);
auto axes = make_shared<op::Parameter>(element::u64, Shape{1, 2}); auto axes = make_shared<op::Parameter>(element::u64, Shape{1});
float eps{1e-6f}; float eps{1e-6f};
auto eps_mode = op::EpsMode::ADD; auto eps_mode = op::EpsMode::ADD;
...@@ -36,15 +37,21 @@ TEST(type_prop, normalize_invalid_input_tensor_rank) ...@@ -36,15 +37,21 @@ TEST(type_prop, normalize_invalid_input_tensor_rank)
} }
catch (const NodeValidationFailure& error) catch (const NodeValidationFailure& error)
{ {
EXPECT_HAS_SUBSTRING(error.what(), EXPECT_HAS_SUBSTRING(error.what(), std::string("Input axes must be Constant type"));
std::string("Input tensor rank must be 2, 3 or 4 dimensional"));
} }
catch (...) catch (...)
{ {
FAIL() << "Deduced type check failed for unexpected reason"; FAIL() << "Deduced type check failed for unexpected reason";
} }
}
data = make_shared<op::Parameter>(element::f32, Shape{2}); TEST(type_prop, normalize_invalid_axes_rank)
{
Shape data_shape{1, 2, 3, 4};
auto data = make_shared<op::Parameter>(element::f32, data_shape);
const auto axes = make_shared<op::Constant>(element::i64, Shape{1, 2}, vector<int64_t>{1, 2});
float eps{1e-6f};
auto eps_mode = op::EpsMode::ADD;
try try
{ {
...@@ -54,8 +61,7 @@ TEST(type_prop, normalize_invalid_input_tensor_rank) ...@@ -54,8 +61,7 @@ TEST(type_prop, normalize_invalid_input_tensor_rank)
} }
catch (const NodeValidationFailure& error) catch (const NodeValidationFailure& error)
{ {
EXPECT_HAS_SUBSTRING(error.what(), EXPECT_HAS_SUBSTRING(error.what(), std::string("Input axes must have rank equals 1"));
std::string("Input tensor rank must be 2, 3 or 4 dimensional"));
} }
catch (...) catch (...)
{ {
...@@ -63,11 +69,11 @@ TEST(type_prop, normalize_invalid_input_tensor_rank) ...@@ -63,11 +69,11 @@ TEST(type_prop, normalize_invalid_input_tensor_rank)
} }
} }
TEST(type_prop, normalize_invalid_axes_rank) TEST(type_prop, normalize_axes_out_of_bounds)
{ {
Shape data_shape{1, 2, 3, 4}; Shape data_shape{1, 2, 3, 4};
auto data = make_shared<op::Parameter>(element::f32, data_shape); auto data = make_shared<op::Parameter>(element::f32, data_shape);
auto axes = make_shared<op::Parameter>(element::u64, Shape{1, 2}); const auto axes = make_shared<op::Constant>(element::i64, Shape{2}, vector<int64_t>{3, 4});
float eps{1e-6f}; float eps{1e-6f};
auto eps_mode = op::EpsMode::ADD; auto eps_mode = op::EpsMode::ADD;
...@@ -79,23 +85,10 @@ TEST(type_prop, normalize_invalid_axes_rank) ...@@ -79,23 +85,10 @@ TEST(type_prop, normalize_invalid_axes_rank)
} }
catch (const NodeValidationFailure& error) catch (const NodeValidationFailure& error)
{ {
EXPECT_HAS_SUBSTRING(error.what(), std::string("Input axes must have rank equals 1")); EXPECT_HAS_SUBSTRING(error.what(), std::string("Reduction axis ("));
} }
catch (...) catch (...)
{ {
FAIL() << "Deduced type check failed for unexpected reason"; FAIL() << "Deduced type check failed for unexpected reason";
} }
} }
TEST(type_prop, normalize_output_shape_across_chw)
{
Shape data_shape{2, 3, 4};
auto data = make_shared<op::Parameter>(element::f32, data_shape);
const auto axes = make_shared<op::Constant>(element::u64, Shape{3}, vector<int64_t>{1, 2, 3});
float eps{1e-6f};
auto eps_mode = op::EpsMode::ADD;
auto normalize = make_shared<op::NormalizeL2>(data, axes, eps, eps_mode);
EXPECT_EQ(normalize->get_element_type(), element::f32);
EXPECT_EQ(normalize->get_shape(), (Shape{2, 3, 4}));
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment