Commit 97578988 authored by Adam Procter's avatar Adam Procter Committed by Scott Cyphers

Dynamic "flatten" builder (#3448)

* Dynamic version of builder::flatten

* Remove as_single_output_node (no longer needed)

* Temp fix for dyn elimination
parent a4a3031b
......@@ -21,6 +21,12 @@
#include "ngraph/axis_vector.hpp"
#include "ngraph/builder/reshape.hpp"
#include "ngraph/op/concat.hpp"
#include "ngraph/op/constant.hpp"
#include "ngraph/op/experimental/dyn_reshape.hpp"
#include "ngraph/op/experimental/dyn_slice.hpp"
#include "ngraph/op/experimental/shape_of.hpp"
#include "ngraph/op/product.hpp"
#include "ngraph/op/reshape.hpp"
#include "ngraph/util.hpp"
......@@ -76,3 +82,41 @@ shared_ptr<Node> builder::flatten(const Output<Node>& value, int axis)
return make_shared<op::Reshape>(
value, get_default_order(data_shape.size()), Shape{first_dim_size, last_dim_size});
}
// Dynamic version of "flatten".
shared_ptr<Node> builder::flatten(const Output<Node>& value, const Output<Node>& axis)
{
// value_shape := ShapeOf(value)
auto value_shape = make_shared<op::ShapeOf>(value);
// value_shape_shape := ShapeOf(value_shape)
auto value_shape_shape = make_shared<op::ShapeOf>(value_shape);
// shape_1_vector := Constant(i64, Shape{1}, [1])
auto shape_1_vector = make_shared<op::Constant>(element::i64, Shape{1}, vector<int64_t>{1});
// unit_strides := Constant(i64, Shape{1}, [1])
auto unit_strides = make_shared<op::Constant>(element::i64, Shape{1}, vector<int64_t>{1});
// row_dims := value_shape[0:axis]
auto row_dims_slice_start =
make_shared<op::Constant>(element::i64, Shape{1}, vector<int64_t>{0});
auto row_dims_slice_end = make_shared<op::DynReshape>(axis, shape_1_vector);
auto row_dims = make_shared<op::DynSlice>(
value_shape, row_dims_slice_start, row_dims_slice_end, unit_strides);
// col_dims := value_shape[axis:ReshapeToScalar(value_shape_shape)]
auto col_dims =
make_shared<op::DynSlice>(value_shape, row_dims_slice_end, value_shape_shape, unit_strides);
// row_dims_prod := [Product(row_dims, axis=0)]
auto row_dims_prod = make_shared<op::Reshape>(
make_shared<op::Product>(row_dims, AxisSet{0}), AxisVector{}, Shape{1});
// col_dims_prod := [Product(col_dims, axis=0)]
auto col_dims_prod = make_shared<op::Reshape>(
make_shared<op::Product>(col_dims, AxisSet{0}), AxisVector{}, Shape{1});
// flattened_dims := Concat({row_dims_prod, col_dims_prod})
auto flattened_dims = make_shared<op::Concat>(NodeVector{row_dims_prod, col_dims_prod}, 0);
// result := DynReshape(value, flattened_dims)
return make_shared<op::DynReshape>(value, flattened_dims);
}
......@@ -52,12 +52,21 @@ namespace ngraph
/// \return: Value with reversed dimensions.
std::shared_ptr<Node> transpose(const Output<Node>& value);
/// \brief Flatten a value into a 2D matrix.
/// \brief Flatten a value into a 2D matrix, with a static dividing axis.
///
/// \param value The tensor to be flattened.
/// \param axis The axis dividing shape.
///
/// \return The new value will be a 2D matrix representing the flattened input node.
std::shared_ptr<Node> flatten(const Output<Node>& value, int axis);
/// \brief Flatten a value into a 2D matrix, with a dynamic dividing axis.
///
/// \param value The tensor to be flattened.
/// \param axis The tensor representing the axis dividing the shape. Must be reshapeable
/// to the shape (1,).
///
/// \return The new value will be a 2D matrix representing the flattened input node.
std::shared_ptr<Node> flatten(const Output<Node>& value, const Output<Node>& axis);
} // namespace builder
} // namespace ngraph
......@@ -65,6 +65,7 @@ namespace ngraph
#include "ngraph/builder/numpy_transpose.hpp"
#include "ngraph/builder/quantized_conv_builder.hpp"
#include "ngraph/builder/reduce_ops.hpp"
#include "ngraph/builder/reshape.hpp"
#include "ngraph/builder/tensor_mask.hpp"
#include "ngraph/coordinate_transform.hpp"
#include "ngraph/descriptor/input.hpp"
......
......@@ -16,6 +16,12 @@
#include "ngraph/runtime/dynamic/dynamic_backend.hpp"
#include "ngraph/graph_util.hpp"
#include "ngraph/op/experimental/dyn_broadcast.hpp"
#include "ngraph/op/experimental/dyn_replace_slice.hpp"
#include "ngraph/op/experimental/dyn_reshape.hpp"
#include "ngraph/op/experimental/dyn_slice.hpp"
#include "ngraph/op/experimental/range.hpp"
#include "ngraph/op/experimental/transpose.hpp"
#include "ngraph/pass/constant_folding.hpp"
#include "ngraph/pass/dyn_elimination.hpp"
#include "ngraph/pass/manager.hpp"
......@@ -72,6 +78,26 @@ runtime::dynamic::DynamicExecutable::DynamicExecutable(shared_ptr<Function> wrap
set_parameters_and_results(*wrapped_function);
}
// Helper for a vile hack in DynamicExecutable::call. See body of that function for details.
static size_t count_dyn_nodes(const shared_ptr<ngraph::Function>& f)
{
size_t count = 0;
for (auto op : f->get_ops())
{
if (std::dynamic_pointer_cast<op::Transpose>(op) ||
std::dynamic_pointer_cast<op::DynBroadcast>(op) ||
std::dynamic_pointer_cast<op::DynReplaceSlice>(op) ||
std::dynamic_pointer_cast<op::DynSlice>(op) ||
std::dynamic_pointer_cast<op::DynReshape>(op) ||
std::dynamic_pointer_cast<op::Range>(op))
{
count++;
}
}
return count;
}
bool runtime::dynamic::DynamicExecutable::call(
const std::vector<std::shared_ptr<runtime::Tensor>>& outputs,
const std::vector<std::shared_ptr<runtime::Tensor>>& inputs)
......@@ -146,13 +172,44 @@ bool runtime::dynamic::DynamicExecutable::call(
pass::Manager passes;
passes.register_pass<pass::ConstantFolding>();
passes.register_pass<pass::DynElimination>();
passes.run_passes(clone);
passes.set_per_pass_validation(false);
const ResultVector& results = clone->get_results();
NGRAPH_CHECK(results.size() == outputs.size());
// FIXME(amprocte): Vile, temporary hack: we need to do repeated rounds of
// ConstantFolding/DynElimination until everything that DynElimination is supposed to
// eliminate has actually been eliminated. We could do this by monitoring the return values of
// of the passes (keep iterating until both CF and DE report no changes), but that did not
// seem to work so here we are. Probably a better fix is to somehow combine the matchers in CF
// and DE into one pass.
size_t num_dyn_nodes_last_pass = std::numeric_limits<size_t>::max();
while (num_dyn_nodes_last_pass != 0)
{
passes.run_passes(clone);
auto num_dyn_nodes_this_pass = count_dyn_nodes(clone);
NGRAPH_CHECK(num_dyn_nodes_this_pass < num_dyn_nodes_last_pass,
"Could not eliminate all Dyn nodes (",
num_dyn_nodes_this_pass,
" remaining)");
num_dyn_nodes_last_pass = num_dyn_nodes_this_pass;
}
pass::Manager pass_val;
pass_val.register_pass<pass::Validate>();
pass_val.run_passes(clone);
std::vector<std::shared_ptr<runtime::Tensor>> wrapped_outputs;
const ResultVector& results = clone->get_results();
for (auto& result : results)
{
NGRAPH_CHECK(result->get_output_partial_shape(0).is_static(),
"Shape staticization failed for result node ",
*result);
}
NGRAPH_CHECK(results.size() == outputs.size());
for (size_t i = 0; i < outputs.size(); i++)
{
if (auto dynamic_tensor =
......
......@@ -232,6 +232,7 @@ set(MULTI_TEST_SRC
backend/batch_mat_mul.in.cpp
backend/batch_norm.in.cpp
backend/broadcast.in.cpp
backend/builder_flatten.in.cpp
backend/ceiling.in.cpp
backend/comparison.in.cpp
backend/computation_reuse.in.cpp
......
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include <algorithm>
#include <cinttypes>
#include <cmath>
#include <cstdlib>
#include <random>
#include <string>
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "util/all_close.hpp"
#include "util/all_close_f.hpp"
#include "util/ndarray.hpp"
#include "util/test_control.hpp"
#include "util/test_tools.hpp"
using namespace std;
using namespace ngraph;
static string s_manifest = "${MANIFEST}";
struct FlattenTestParams
{
// Shape of input tensor to feed to flatten.
Shape in_shape;
// Parallel arrays (lengths must be same).
// - expected_out_shapes[i] is the expected shape of
// "flatten(some_tensor_of_in_shape, axis=in_axes[i])"
vector<size_t> in_axes;
vector<Shape> expected_out_shapes;
};
struct FlattenTest : ::testing::TestWithParam<FlattenTestParams>
{
};
NGRAPH_TEST_P(${BACKEND_NAME}, FlattenTest, flatten)
{
FlattenTestParams p = GetParam();
auto value = make_shared<op::Parameter>(element::i32, p.in_shape);
auto axis = make_shared<op::Parameter>(element::i64, Shape{});
auto flattened = builder::flatten(value, axis);
auto f = make_shared<Function>(NodeVector{flattened}, ParameterVector{value, axis});
auto backend = runtime::Backend::create("${BACKEND_NAME}", true);
auto ex = backend->compile(f);
auto t_value = backend->create_tensor(element::i32, p.in_shape);
vector<int32_t> value_data(shape_size(p.in_shape));
std::iota(value_data.begin(), value_data.end(), 0);
copy_data(t_value, value_data);
auto t_axis = backend->create_tensor(element::i64, Shape{});
auto t_result = backend->create_dynamic_tensor(element::i32, PartialShape::dynamic(2));
ASSERT_EQ(p.in_axes.size(), p.expected_out_shapes.size());
for (size_t i = 0; i < p.in_axes.size(); i++)
{
copy_data(t_axis, vector<int64_t>{static_cast<int64_t>(p.in_axes[i])});
ex->call_with_validate({t_result}, {t_value, t_axis});
ASSERT_EQ(t_result->get_shape(), p.expected_out_shapes[i]);
ASSERT_EQ(read_vector<int32_t>(t_result), value_data);
}
}
NGRAPH_INSTANTIATE_TEST_CASE_P(
${BACKEND_NAME},
flatten,
FlattenTest,
(::testing::Values(
FlattenTestParams{
Shape{2, 3, 4}, {0, 1, 2, 3}, {Shape{1, 24}, Shape{2, 12}, Shape{6, 4}, Shape{24, 1}}},
FlattenTestParams{
Shape{38}, {0, 1, 2, 3}, {Shape{1, 38}, Shape{38, 1}, Shape{38, 1}, Shape{38, 1}}},
FlattenTestParams{Shape{0, 0, 0},
{0, 1, 2, 3, 4},
{Shape{1, 0}, Shape{0, 0}, Shape{0, 0}, Shape{0, 1}, Shape{0, 1}}},
FlattenTestParams{Shape{},
{0, 1, 2, 3, 4},
{Shape{1, 1}, Shape{1, 1}, Shape{1, 1}, Shape{1, 1}, Shape{1, 1}}})));
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment