Commit c95ee1e3 authored by Adam Procter's avatar Adam Procter Committed by Scott Cyphers

Add execution tests for dynamic reduction ops; fix validation logic (#3061)

* Add execution tests for dynamic reduction ops; fix validation logic

* Add dynamic_GPU.all to manifest

* Be explicit about the dynamic_GPU prefix in manifest
parent e44ee3df
......@@ -39,6 +39,11 @@ op::util::ArithmeticReduction::ArithmeticReduction(const Output<Node>& arg,
{
}
bool op::util::ArithmeticReduction::reduction_axes_constant() const
{
return dynamic_pointer_cast<op::Constant>(get_argument(1)) != nullptr;
}
const AxisSet op::util::ArithmeticReduction::get_reduction_axes() const
{
AxisSet axes;
......@@ -64,7 +69,7 @@ void op::util::ArithmeticReduction::validate_and_infer_types()
PartialShape result_shape{PartialShape::dynamic()};
if (input_rank.is_static())
if (input_rank.is_static() && reduction_axes_constant())
{
std::vector<Dimension> dims;
......@@ -93,5 +98,7 @@ void op::util::ArithmeticReduction::validate_and_infer_types()
result_shape = PartialShape(dims);
}
set_input_is_relevant_to_shape(1);
set_output_type(0, get_input_element_type(0), result_shape);
}
......@@ -46,8 +46,14 @@ namespace ngraph
public:
void validate_and_infer_types() override;
/// \return true if reduction axes are constant else false.
bool reduction_axes_constant() const;
/// \return The axis positions (0-based) to be eliminated through reduction.
/// \throws CheckFailure if the reduction axes are not constant. (Use
/// reduction_axes_constant to check.)
const AxisSet get_reduction_axes() const;
/// \brief Change the reduction axes
void set_reduction_axes(const AxisSet& reduction_axes);
};
......
......@@ -38,6 +38,11 @@ op::util::LogicalReduction::LogicalReduction(const Output<Node>& arg,
{
}
bool op::util::LogicalReduction::reduction_axes_constant() const
{
return dynamic_pointer_cast<op::Constant>(get_argument(1)) != nullptr;
}
const AxisSet op::util::LogicalReduction::get_reduction_axes() const
{
AxisSet axes;
......@@ -63,7 +68,7 @@ void op::util::LogicalReduction::validate_and_infer_types()
PartialShape result_shape{PartialShape::dynamic()};
if (input_rank.is_static())
if (input_rank.is_static() && reduction_axes_constant())
{
std::vector<Dimension> dims;
......@@ -92,6 +97,8 @@ void op::util::LogicalReduction::validate_and_infer_types()
result_shape = PartialShape(dims);
}
set_input_is_relevant_to_shape(1);
NODE_VALIDATION_CHECK(this,
get_input_element_type(0).compatible(element::boolean),
"Input element type must be boolean.");
......
......@@ -45,7 +45,12 @@ namespace ngraph
public:
void validate_and_infer_types() override;
/// \return true if reduction axes are constant else false.
bool reduction_axes_constant() const;
/// \return The axis positions (0-based) to be eliminated through reduction.
/// \throws CheckFailure if the reduction axes are not constant. (Use
/// reduction_axes_constant to check.)
const AxisSet get_reduction_axes() const;
void set_reduction_axes(const AxisSet& reduction_axes);
};
......
......@@ -93,6 +93,7 @@ all_2x2x3_eliminate_dims_0_1
all_2x2x3_eliminate_dims_0_2
all_2x2x3_eliminate_dims_1_2
all_2x2x3_eliminate_dims_0_1_2
dynamic_GPU.all
# GPU backend uses floats to implement these ops for int32
floor_int32
......
......@@ -159,3 +159,103 @@ NGRAPH_TEST(dynamic_${BACKEND_NAME}, transpose)
ASSERT_TRUE(test::all_close_f(results, expected_results[i], MIN_FLOAT_TOLERANCE_BITS));
}
}
NGRAPH_TEST(dynamic_${BACKEND_NAME}, sum)
{
// Create a graph for f(x,axes:int32) = Sum(x,Convert<int64>(axes)).
auto x = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
auto axes = make_shared<op::Parameter>(element::i32, PartialShape{Dimension::dynamic()});
auto axes_i64 = make_shared<op::Convert>(axes, element::i64);
auto sum = make_shared<op::Sum>(x, axes_i64);
ASSERT_TRUE(sum->get_output_partial_shape(0).rank().is_dynamic());
auto f = make_shared<Function>(NodeVector{sum}, ParameterVector{x, axes});
auto backend = runtime::Backend::create("${BACKEND_NAME}", true);
auto ex = backend->compile(f);
auto t_r = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic());
std::vector<Shape> x_shapes{
Shape{2, 3}, Shape{2, 3}, Shape{2, 3}, Shape{2, 3}, Shape{5}, Shape{5}};
std::vector<std::vector<int32_t>> axeses{{}, {0}, {1}, {0, 1}, {}, {0}};
std::vector<std::vector<float>> inputs{{1, 2, 3, 4, 5, 6},
{1, 2, 3, 4, 5, 6},
{1, 2, 3, 4, 5, 6},
{1, 2, 3, 4, 5, 6},
{1, 2, 3, 4, 5},
{1, 2, 3, 4, 5}};
std::vector<Shape> expected_result_shapes{
Shape{2, 3}, Shape{3}, Shape{2}, Shape{}, Shape{5}, Shape{}};
std::vector<std::vector<float>> expected_results{
{1, 2, 3, 4, 5, 6}, {5, 7, 9}, {6, 15}, {21}, {1, 2, 3, 4, 5}, {15}};
for (size_t i = 0; i < x_shapes.size(); i++)
{
auto t_x = backend->create_tensor(element::f32, x_shapes[i]);
auto t_axes = backend->create_tensor(element::i32, Shape{axeses[i].size()});
copy_data(t_x, inputs[i]);
copy_data(t_axes, axeses[i]);
ex->call_with_validate({t_r}, {t_x, t_axes});
ASSERT_EQ(t_r->get_shape(), expected_result_shapes[i]);
auto results = read_vector<float>(t_r);
ASSERT_TRUE(test::all_close_f(results, expected_results[i], MIN_FLOAT_TOLERANCE_BITS));
}
}
NGRAPH_TEST(dynamic_${BACKEND_NAME}, all)
{
// Create a graph for f(x,axes:int32) = All(x,Convert<int64>(axes)).
auto x = make_shared<op::Parameter>(element::boolean, PartialShape::dynamic());
auto axes = make_shared<op::Parameter>(element::i32, PartialShape{Dimension::dynamic()});
auto axes_i64 = make_shared<op::Convert>(axes, element::i64);
auto all = make_shared<op::All>(x, axes_i64);
ASSERT_TRUE(all->get_output_partial_shape(0).rank().is_dynamic());
auto f = make_shared<Function>(NodeVector{all}, ParameterVector{x, axes});
auto backend = runtime::Backend::create("${BACKEND_NAME}", true);
auto ex = backend->compile(f);
auto t_r = backend->create_dynamic_tensor(element::boolean, PartialShape::dynamic());
std::vector<Shape> x_shapes{
Shape{2, 3}, Shape{2, 3}, Shape{2, 3}, Shape{2, 3}, Shape{5}, Shape{5}};
std::vector<std::vector<int32_t>> axeses{{}, {0}, {1}, {0, 1}, {}, {0}};
std::vector<std::vector<char>> inputs{{1, 0, 1, 0, 1, 0},
{1, 0, 1, 0, 0, 1},
{1, 0, 1, 1, 1, 1},
{1, 0, 1, 0, 1, 0},
{1, 0, 1, 0, 1},
{1, 0, 1, 0, 1}};
std::vector<Shape> expected_result_shapes{
Shape{2, 3}, Shape{3}, Shape{2}, Shape{}, Shape{5}, Shape{}};
std::vector<std::vector<char>> expected_results{
{1, 0, 1, 0, 1, 0}, {0, 0, 1}, {0, 1}, {0}, {1, 0, 1, 0, 1}, {0}};
for (size_t i = 0; i < x_shapes.size(); i++)
{
auto t_x = backend->create_tensor(element::boolean, x_shapes[i]);
auto t_axes = backend->create_tensor(element::i32, Shape{axeses[i].size()});
copy_data(t_x, inputs[i]);
copy_data(t_axes, axeses[i]);
ex->call_with_validate({t_r}, {t_x, t_axes});
ASSERT_EQ(t_r->get_shape(), expected_result_shapes[i]);
auto results = read_vector<char>(t_r);
ASSERT_EQ(results, expected_results[i]);
}
}
......@@ -9699,6 +9699,16 @@ TEST(type_prop, sum_axis_oob)
}
}
TEST(type_prop, sum_dynamic_axes)
{
auto param = make_shared<op::Parameter>(element::f32, Shape{3, 4, 5});
auto summation_axes = make_shared<op::Parameter>(element::i64, Shape{2});
auto sum = make_shared<op::Sum>(param, summation_axes);
EXPECT_EQ(sum->get_output_element_type(0), element::f32);
EXPECT_TRUE(sum->get_output_partial_shape(0).rank().is_dynamic());
}
TEST(type_prop, sum_partial_rank_dynamic)
{
auto param = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment