Unverified Commit 1dbef5d3 authored by baojun's avatar baojun Committed by GitHub

Remove dynreshape use v1:Reshape instead (#4203)

* remove dynreshape

* remove header

* use v1::Reshape for shape relevance

* remove return of bool due to function refactor

* remove gpu emit v0 DynReshape

* Merge cleanups

* throw exception for reshape node with dyn shape

* fix style
Co-authored-by: 's avatarScott Cyphers <diyessi@users.noreply.github.com>
Co-authored-by: 's avatarWenzhe Xue <wenzhe.xue@intel.com>
Co-authored-by: 's avatarScott Cyphers <scott.cyphers@intel.com>
Co-authored-by: 's avatarSang Ik Lee <sang.ik.lee@intel.com>
parent 1b034470
...@@ -186,8 +186,6 @@ set (SRC ...@@ -186,8 +186,6 @@ set (SRC
op/experimental/dyn_pad.hpp op/experimental/dyn_pad.hpp
op/experimental/dyn_replace_slice.cpp op/experimental/dyn_replace_slice.cpp
op/experimental/dyn_replace_slice.hpp op/experimental/dyn_replace_slice.hpp
op/experimental/dyn_reshape.cpp
op/experimental/dyn_reshape.hpp
op/experimental/dyn_slice.cpp op/experimental/dyn_slice.cpp
op/experimental/dyn_slice.hpp op/experimental/dyn_slice.hpp
op/experimental/generate_mask.cpp op/experimental/generate_mask.cpp
......
...@@ -23,7 +23,6 @@ ...@@ -23,7 +23,6 @@
#include "ngraph/builder/reshape.hpp" #include "ngraph/builder/reshape.hpp"
#include "ngraph/op/concat.hpp" #include "ngraph/op/concat.hpp"
#include "ngraph/op/constant.hpp" #include "ngraph/op/constant.hpp"
#include "ngraph/op/experimental/dyn_reshape.hpp"
#include "ngraph/op/experimental/dyn_slice.hpp" #include "ngraph/op/experimental/dyn_slice.hpp"
#include "ngraph/op/experimental/shape_of.hpp" #include "ngraph/op/experimental/shape_of.hpp"
#include "ngraph/op/experimental/transpose.hpp" #include "ngraph/op/experimental/transpose.hpp"
...@@ -104,7 +103,7 @@ shared_ptr<Node> builder::flatten(const Output<Node>& value, const Output<Node>& ...@@ -104,7 +103,7 @@ shared_ptr<Node> builder::flatten(const Output<Node>& value, const Output<Node>&
// row_dims := value_shape[0:axis] // row_dims := value_shape[0:axis]
auto row_dims_slice_start = auto row_dims_slice_start =
make_shared<op::Constant>(element::i64, Shape{1}, vector<int64_t>{0}); make_shared<op::Constant>(element::i64, Shape{1}, vector<int64_t>{0});
auto row_dims_slice_end = make_shared<op::DynReshape>(axis, shape_1_vector); auto row_dims_slice_end = make_shared<op::v1::Reshape>(axis, shape_1_vector, true);
auto row_dims = make_shared<op::DynSlice>( auto row_dims = make_shared<op::DynSlice>(
value_shape, row_dims_slice_start, row_dims_slice_end, unit_strides); value_shape, row_dims_slice_start, row_dims_slice_end, unit_strides);
...@@ -122,8 +121,7 @@ shared_ptr<Node> builder::flatten(const Output<Node>& value, const Output<Node>& ...@@ -122,8 +121,7 @@ shared_ptr<Node> builder::flatten(const Output<Node>& value, const Output<Node>&
// flattened_dims := Concat({row_dims_prod, col_dims_prod}) // flattened_dims := Concat({row_dims_prod, col_dims_prod})
auto flattened_dims = make_shared<op::Concat>(NodeVector{row_dims_prod, col_dims_prod}, 0); auto flattened_dims = make_shared<op::Concat>(NodeVector{row_dims_prod, col_dims_prod}, 0);
// result := DynReshape(value, flattened_dims) return make_shared<op::v1::Reshape>(value, flattened_dims, true)
return make_shared<op::DynReshape>(value, flattened_dims)
->add_provenance_group_members_above({value}); ->add_provenance_group_members_above({value});
} }
......
//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include <algorithm>
#include <iostream>
#include "ngraph/op/constant.hpp"
#include "ngraph/op/experimental/dyn_reshape.hpp"
using namespace std;
using namespace ngraph;
constexpr NodeTypeInfo op::v0::DynReshape::type_info;
op::v0::DynReshape::DynReshape(const Output<Node>& arg, const Output<Node>& pattern, bool zero_flag)
: Op({arg, pattern})
, m_zero_flag(zero_flag)
{
constructor_validate_and_infer_types();
}
void op::v0::DynReshape::validate_and_infer_types()
{
auto pattern_et = get_input_element_type(1);
// check data types
NODE_VALIDATION_CHECK(
this, pattern_et.compatible(element::Type_t::i64), "Pattern must have element type i64.");
// check shapes
const PartialShape& pattern_shape = get_input_partial_shape(1);
NODE_VALIDATION_CHECK(this,
pattern_shape.rank().compatible(1),
"Pattern shape must have rank 1, got ",
pattern_shape.rank(),
".");
Rank output_rank = pattern_shape.rank().is_dynamic() ? Rank::dynamic() : pattern_shape[0];
set_input_is_relevant_to_shape(1);
if (auto const_shape = as_type_ptr<op::Constant>(input_value(1).get_node_shared_ptr()))
{
std::vector<int64_t> out_shape_val = const_shape->get_vector<int64_t>();
NODE_VALIDATION_CHECK(this,
std::none_of(out_shape_val.begin(),
out_shape_val.end(),
[](int64_t v) { return v < -1; }),
"Dim size cannot be less than -1 ");
int zero_dims = std::count_if(
out_shape_val.begin(), out_shape_val.end(), [](int64_t v) { return v == 0; });
int negative_dims = std::count_if(
out_shape_val.begin(), out_shape_val.end(), [](int64_t v) { return v == -1; });
NODE_VALIDATION_CHECK(this,
negative_dims <= 1,
"More than one dimension has size of -1 (",
negative_dims,
")");
if (!(zero_dims && m_zero_flag) && !negative_dims)
{
set_output_type(0, get_input_element_type(0), const_shape->get_shape_val());
}
else
{
std::vector<Dimension> partial_shape(output_rank.get_length());
// Replace zeros and negatives with Dynamic dimensions as needed
std::transform(out_shape_val.begin(),
out_shape_val.end(),
partial_shape.begin(),
[&](const int64_t& v) {
return (v < 0)
? Dimension()
: ((v == 0 && m_zero_flag) ? Dimension() : Dimension(v));
});
if (get_input_partial_shape(0).is_static())
{
size_t output_elements = 1;
int negative_dim = -1;
auto input_shape = get_input_partial_shape(0).to_shape();
size_t input_elements = shape_size(input_shape);
for (size_t i = 0; i < output_rank.get_length(); i++)
{
if (out_shape_val[i] == 0 && m_zero_flag)
{
// Copy input_shape[i] for zero values
NODE_VALIDATION_CHECK(
this, i < input_shape.size(), "'0' dimension is out of range");
partial_shape[i] = Dimension(input_shape[i]);
output_elements *= input_shape[i];
}
else if (out_shape_val[i] == -1)
{
negative_dim = i;
}
else
{
output_elements *= out_shape_val[i];
}
}
if (negative_dim != -1)
{
// Infer size such that number of output elements matches
// input elements
if (output_elements == 0)
{
// TODO(amprocte): Decide if this is desired behavior here. (NumPy seems
// to fail.)
NODE_VALIDATION_CHECK(this,
input_elements == 0,
"Cannot infer '-1' dimension with zero-size output "
"dimension unless at least one input dimension is "
"also zero-size");
partial_shape[negative_dim] = Dimension(0);
}
else
{
NODE_VALIDATION_CHECK(
this,
input_elements % output_elements == 0,
"Non-'-1' output dimensions do not evenly divide the input dimensions");
partial_shape[negative_dim] = Dimension(input_elements / output_elements);
}
}
}
set_output_type(0, get_input_element_type(0), PartialShape(partial_shape));
}
}
else
{
set_output_type(0, get_input_element_type(0), PartialShape::dynamic(output_rank));
}
}
shared_ptr<Node> op::v0::DynReshape::copy_with_new_args(const NodeVector& new_args) const
{
check_new_args_count(this, new_args);
return make_shared<v0::DynReshape>(new_args.at(0), new_args.at(1), m_zero_flag);
}
void op::v0::DynReshape::generate_adjoints(autodiff::Adjoints& /* adjoints */,
const OutputVector& /* deltas */)
{
throw ngraph_error("generate_adjoints not implemented for DynReshape");
}
//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include "ngraph/node.hpp"
#include "ngraph/op/op.hpp"
namespace ngraph
{
namespace op
{
namespace v0
{
/// \brief Tensor dynamic reshape operation.
///
/// "Converts" an input tensor into a new shape with the same number of elements.
/// This op does not touch the actual data. If needed, use Transpose for that purpose.
///
class NGRAPH_API DynReshape : public Op
{
public:
static constexpr NodeTypeInfo type_info{"DynReshape", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
DynReshape() = default;
/// \brief Constructs a dynamic reshape operation. This operation does not perform
/// transpose.
///
/// \param arg The tensor to be reshaped.
/// \param pattern The node that defines output shape pattern.
/// If the input shape is \f$(a_0,\dots,a_{k-1})\f$ then the output shape
/// must
/// be of the form \f$(b_0,\dots,b_{j-1})\f$ where \f$\Pi(a_i) = \Pi(b_i)\f$.
/// A value of -1 is allowed for at most one dimension, in which case the
/// dimension size is inferred based on element count of input tensor.
/// \param zero_flag Treats zeros in `pattern` as wildcard flags indicating a copy
/// from
/// input shape at the same index.
DynReshape(const Output<Node>& arg,
const Output<Node>& pattern,
bool zero_flag = false);
void validate_and_infer_types() override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
bool get_zero_flag() const { return m_zero_flag; }
void set_zero_flag(bool zero_flag) { m_zero_flag = zero_flag; }
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas) override;
private:
bool m_zero_flag;
};
}
// default opset version
using v0::DynReshape;
}
}
...@@ -20,7 +20,6 @@ ...@@ -20,7 +20,6 @@
#include "ngraph/op/concat.hpp" #include "ngraph/op/concat.hpp"
#include "ngraph/op/dot.hpp" #include "ngraph/op/dot.hpp"
#include "ngraph/op/experimental/batch_mat_mul.hpp" #include "ngraph/op/experimental/batch_mat_mul.hpp"
#include "ngraph/op/experimental/dyn_reshape.hpp"
#include "ngraph/op/reshape.hpp" #include "ngraph/op/reshape.hpp"
#include "ngraph/op/slice.hpp" #include "ngraph/op/slice.hpp"
#include "ngraph/util.hpp" #include "ngraph/util.hpp"
......
...@@ -88,7 +88,6 @@ NGRAPH_OP(Dot, ngraph::op::v0, 0) ...@@ -88,7 +88,6 @@ NGRAPH_OP(Dot, ngraph::op::v0, 0)
NGRAPH_OP(DynBroadcast, ngraph::op::v0, 0) NGRAPH_OP(DynBroadcast, ngraph::op::v0, 0)
NGRAPH_OP(DynPad, ngraph::op::v0, 0) NGRAPH_OP(DynPad, ngraph::op::v0, 0)
NGRAPH_OP(DynReplaceSlice, ngraph::op::v0, 0) NGRAPH_OP(DynReplaceSlice, ngraph::op::v0, 0)
NGRAPH_OP(DynReshape, ngraph::op::v0, 0)
NGRAPH_OP(DynSlice, ngraph::op::v0, 0) NGRAPH_OP(DynSlice, ngraph::op::v0, 0)
NGRAPH_OP(Elu, ngraph::op::v0, 0) NGRAPH_OP(Elu, ngraph::op::v0, 0)
NGRAPH_OP(EmbeddingLookup, ngraph::op::v0, 0) NGRAPH_OP(EmbeddingLookup, ngraph::op::v0, 0)
......
...@@ -59,7 +59,6 @@ ...@@ -59,7 +59,6 @@
#include "ngraph/op/experimental/dyn_broadcast.hpp" #include "ngraph/op/experimental/dyn_broadcast.hpp"
#include "ngraph/op/experimental/dyn_pad.hpp" #include "ngraph/op/experimental/dyn_pad.hpp"
#include "ngraph/op/experimental/dyn_replace_slice.hpp" #include "ngraph/op/experimental/dyn_replace_slice.hpp"
#include "ngraph/op/experimental/dyn_reshape.hpp"
#include "ngraph/op/experimental/dyn_slice.hpp" #include "ngraph/op/experimental/dyn_slice.hpp"
#include "ngraph/op/experimental/generate_mask.hpp" #include "ngraph/op/experimental/generate_mask.hpp"
#include "ngraph/op/experimental/layers/ctc_greedy_decoder.hpp" #include "ngraph/op/experimental/layers/ctc_greedy_decoder.hpp"
......
...@@ -96,7 +96,6 @@ NGRAPH_OP(Dot, ngraph::op) ...@@ -96,7 +96,6 @@ NGRAPH_OP(Dot, ngraph::op)
NGRAPH_OP(DynBroadcast, ngraph::op) NGRAPH_OP(DynBroadcast, ngraph::op)
NGRAPH_OP(DynPad, ngraph::op) NGRAPH_OP(DynPad, ngraph::op)
NGRAPH_OP(DynReplaceSlice, ngraph::op) NGRAPH_OP(DynReplaceSlice, ngraph::op)
NGRAPH_OP(DynReshape, ngraph::op)
NGRAPH_OP(DynSlice, ngraph::op) NGRAPH_OP(DynSlice, ngraph::op)
NGRAPH_OP(Elu, ngraph::op) NGRAPH_OP(Elu, ngraph::op)
NGRAPH_OP(EmbeddingLookup, ngraph::op) NGRAPH_OP(EmbeddingLookup, ngraph::op)
......
...@@ -17,7 +17,6 @@ ...@@ -17,7 +17,6 @@
#include <numeric> #include <numeric>
#include "constant_folding.hpp" #include "constant_folding.hpp"
#include "ngraph/op/experimental/dyn_reshape.hpp"
#include "ngraph/op/reshape.hpp" #include "ngraph/op/reshape.hpp"
#include "ngraph/runtime/reference/reshape.hpp" #include "ngraph/runtime/reference/reshape.hpp"
#include "ngraph/type/element_type.hpp" #include "ngraph/type/element_type.hpp"
...@@ -103,8 +102,6 @@ void pass::ConstantFolding::construct_constant_dyn_reshape() ...@@ -103,8 +102,6 @@ void pass::ConstantFolding::construct_constant_dyn_reshape()
make_shared<pattern::op::Label>(element::i64, Shape{1}, pattern::has_class<op::Constant>()); make_shared<pattern::op::Label>(element::i64, Shape{1}, pattern::has_class<op::Constant>());
auto reshape_v1 = auto reshape_v1 =
make_shared<op::v1::Reshape>(constant_data_label, constant_shape_label, false); make_shared<op::v1::Reshape>(constant_data_label, constant_shape_label, false);
auto dyn_reshape =
make_shared<op::v0::DynReshape>(constant_data_label, constant_shape_label, false);
// Note: No need to capture or consider constant_shape_label, because // Note: No need to capture or consider constant_shape_label, because
// shape propagation will have transferred the info to dyn_reshape's // shape propagation will have transferred the info to dyn_reshape's
...@@ -130,26 +127,4 @@ void pass::ConstantFolding::construct_constant_dyn_reshape() ...@@ -130,26 +127,4 @@ void pass::ConstantFolding::construct_constant_dyn_reshape()
make_shared<pattern::Matcher>(reshape_v1, "ConstantFolding.ConstantReshapev1"); make_shared<pattern::Matcher>(reshape_v1, "ConstantFolding.ConstantReshapev1");
this->add_matcher( this->add_matcher(
reshape_v1_matcher, constant_reshape_v1_callback, PassProperty::CHANGE_DYNAMIC_STATE); reshape_v1_matcher, constant_reshape_v1_callback, PassProperty::CHANGE_DYNAMIC_STATE);
auto constant_dyn_reshape_callback = [constant_data_label](pattern::Matcher& m) {
NGRAPH_DEBUG << "In callback for constant_dyn_reshape_callback against node = "
<< m.get_match_root()->get_name();
auto pattern_map = m.get_pattern_map();
auto constant_data_match =
static_pointer_cast<op::Constant>(pattern_map[constant_data_label]);
auto match_root = m.get_match_root();
NGRAPH_CHECK(revalidate_and_ensure_static(match_root));
shared_ptr<Node> replacement;
replacement =
do_fold(static_pointer_cast<op::v0::DynReshape>(match_root), constant_data_match);
replace_node(m.get_match_root(), replacement);
return true;
};
auto dyn_reshape_matcher =
make_shared<pattern::Matcher>(dyn_reshape, "ConstantFolding.ConstantDynReshape");
this->add_matcher(
dyn_reshape_matcher, constant_dyn_reshape_callback, PassProperty::CHANGE_DYNAMIC_STATE);
} }
...@@ -20,7 +20,6 @@ ...@@ -20,7 +20,6 @@
#include "ngraph/op/broadcast.hpp" #include "ngraph/op/broadcast.hpp"
#include "ngraph/op/experimental/dyn_broadcast.hpp" #include "ngraph/op/experimental/dyn_broadcast.hpp"
#include "ngraph/op/experimental/dyn_replace_slice.hpp" #include "ngraph/op/experimental/dyn_replace_slice.hpp"
#include "ngraph/op/experimental/dyn_reshape.hpp"
#include "ngraph/op/experimental/dyn_slice.hpp" #include "ngraph/op/experimental/dyn_slice.hpp"
#include "ngraph/op/experimental/range.hpp" #include "ngraph/op/experimental/range.hpp"
#include "ngraph/op/experimental/transpose.hpp" #include "ngraph/op/experimental/transpose.hpp"
...@@ -43,7 +42,6 @@ pass::DynElimination::DynElimination() ...@@ -43,7 +42,6 @@ pass::DynElimination::DynElimination()
construct_dyn_broadcast(); construct_dyn_broadcast();
construct_dyn_replace_slice(); construct_dyn_replace_slice();
construct_dyn_slice(); construct_dyn_slice();
construct_dyn_reshape();
construct_range(); construct_range();
} }
...@@ -299,49 +297,6 @@ void pass::DynElimination::construct_dyn_replace_slice() ...@@ -299,49 +297,6 @@ void pass::DynElimination::construct_dyn_replace_slice()
add_matcher(dyn_replace_slice_matcher, dyn_replace_slice_callback, all_pass_property_off); add_matcher(dyn_replace_slice_matcher, dyn_replace_slice_callback, all_pass_property_off);
} }
void pass::DynElimination::construct_dyn_reshape()
{
auto data_arg_label = make_shared<pattern::op::Label>(element::f32, Shape{1, 2, 3});
auto shape_arg_label =
make_shared<pattern::op::Label>(element::i64, Shape{3}, pattern::has_class<op::Constant>());
auto dyn_reshape = make_shared<op::DynReshape>(data_arg_label, shape_arg_label);
auto dyn_reshape_callback = [data_arg_label, shape_arg_label](pattern::Matcher& m) {
auto pattern_map = m.get_pattern_map();
auto data_arg = pattern_map[data_arg_label];
auto shape_arg = static_pointer_cast<op::Constant>(pattern_map[shape_arg_label]);
auto dyn_reshape_node = static_pointer_cast<op::DynReshape>(m.get_match_root());
// TODO(amprocte): Can't handle the case where data rank is dynamic even if we know the
// output shape, because static Reshape requries an axis permutation (here an identity) to
// be given. See if we can come up with a workaround.
if (data_arg->get_output_partial_shape(0).rank().is_dynamic())
{
return false;
}
if (dyn_reshape_node->get_output_partial_shape(0).is_dynamic())
{
return false;
}
auto& result_shape = dyn_reshape_node->get_output_shape(0);
AxisVector perm(data_arg->get_output_partial_shape(0).rank().get_length());
std::iota(perm.begin(), perm.end(), 0);
auto replacement = std::make_shared<op::Reshape>(data_arg, perm, result_shape);
replace_node(dyn_reshape_node, replacement);
return true;
};
auto dyn_reshape_matcher =
make_shared<pattern::Matcher>(dyn_reshape, "DynElimination.DynReshape");
add_matcher(dyn_reshape_matcher, dyn_reshape_callback, all_pass_property_off);
}
template <typename T> template <typename T>
std::shared_ptr<op::Constant> make_range_replacement(const element::Type& et, std::shared_ptr<op::Constant> make_range_replacement(const element::Type& et,
const Shape& shape, const Shape& shape,
......
...@@ -33,7 +33,6 @@ namespace ngraph ...@@ -33,7 +33,6 @@ namespace ngraph
void construct_dyn_broadcast(); void construct_dyn_broadcast();
void construct_dyn_replace_slice(); void construct_dyn_replace_slice();
void construct_dyn_slice(); void construct_dyn_slice();
void construct_dyn_reshape();
void construct_range(); void construct_range();
}; };
} }
......
...@@ -319,8 +319,7 @@ namespace ...@@ -319,8 +319,7 @@ namespace
} }
else else
{ {
replacement_node = make_shared<op::v0::DynReshape>( NGRAPH_CHECK(replacement_node, "Unable to convert Reshape:v1 with dynamic shape.");
node->input_value(0), node->input_value(1), node->get_special_zero());
} }
replace_node(node, replacement_node); replace_node(node, replacement_node);
......
...@@ -220,15 +220,6 @@ namespace ...@@ -220,15 +220,6 @@ namespace
return replacement_node; return replacement_node;
} }
shared_ptr<Node> op_cast(shared_ptr<op::DynReshape> node)
{
auto zero_flag = false;
auto replacement_node =
make_shared<op::v1::Reshape>(node->input_value(0), node->input_value(1), zero_flag);
replace_node(node, replacement_node);
return replacement_node;
}
shared_ptr<Node> op_cast(shared_ptr<op::Equal> node) shared_ptr<Node> op_cast(shared_ptr<op::Equal> node)
{ {
return op_cast_binary_elementwise_node<op::v0::Equal, op::v1::Equal>(node); return op_cast_binary_elementwise_node<op::v0::Equal, op::v1::Equal>(node);
......
...@@ -29,7 +29,7 @@ using namespace ngraph; ...@@ -29,7 +29,7 @@ using namespace ngraph;
// | | // | |
// | | // | |
// | | // | |
// N2[DynReshape] // N2[v1::Reshape]
// //
// N1 (but not N0) will be flagged as shape-relevant, because N1 feeds into the "shape" input // N1 (but not N0) will be flagged as shape-relevant, because N1 feeds into the "shape" input
// of N2. // of N2.
...@@ -38,7 +38,7 @@ using namespace ngraph; ...@@ -38,7 +38,7 @@ using namespace ngraph;
// | | // | |
// | N2[ShapeOf] // | N2[ShapeOf]
// | | // | |
// N3[DynReshape] // N3[v1::Reshape]
// //
// Neither N0 nor N1 will be flagged as shape-relevant. (N1 does feed into the "shape" input of N3, // Neither N0 nor N1 will be flagged as shape-relevant. (N1 does feed into the "shape" input of N3,
// but only via the value-irrelevant input of ShapeOf.) // but only via the value-irrelevant input of ShapeOf.)
......
...@@ -21,7 +21,6 @@ ...@@ -21,7 +21,6 @@
#include "ngraph/op/convolution.hpp" #include "ngraph/op/convolution.hpp"
#include "ngraph/op/experimental/dyn_broadcast.hpp" #include "ngraph/op/experimental/dyn_broadcast.hpp"
#include "ngraph/op/experimental/dyn_replace_slice.hpp" #include "ngraph/op/experimental/dyn_replace_slice.hpp"
#include "ngraph/op/experimental/dyn_reshape.hpp"
#include "ngraph/op/experimental/dyn_slice.hpp" #include "ngraph/op/experimental/dyn_slice.hpp"
#include "ngraph/op/experimental/generate_mask.hpp" #include "ngraph/op/experimental/generate_mask.hpp"
#include "ngraph/op/experimental/range.hpp" #include "ngraph/op/experimental/range.hpp"
...@@ -90,7 +89,7 @@ bool is_dynamic_op(const std::shared_ptr<Node>& op) ...@@ -90,7 +89,7 @@ bool is_dynamic_op(const std::shared_ptr<Node>& op)
{ {
return is_type<op::Transpose>(op) || is_type<op::DynBroadcast>(op) || return is_type<op::Transpose>(op) || is_type<op::DynBroadcast>(op) ||
is_type<op::DynReplaceSlice>(op) || is_type<op::DynSlice>(op) || is_type<op::DynReplaceSlice>(op) || is_type<op::DynSlice>(op) ||
is_type<op::v1::Reshape>(op) || is_type<op::DynReshape>(op) || is_type<op::Range>(op) || is_type<op::v1::Reshape>(op) || is_type<op::Range>(op) ||
is_type<op::v1::ConvolutionBackpropData>(op) || is_type<op::v1::ConvolutionBackpropData>(op) ||
is_type<op::v1::ConvolutionBackpropFilters>(op) || is_type<op::v1::ConvolutionBackpropFilters>(op) ||
is_type<op::v1::AvgPoolBackprop>(op) || is_type<op::v1::Broadcast>(op) || is_type<op::v1::AvgPoolBackprop>(op) || is_type<op::v1::Broadcast>(op) ||
......
...@@ -1286,11 +1286,6 @@ std::string runtime::gpu::GPU_Emitter::emit_v0_DynReplaceSlice(EMIT_ARGS) ...@@ -1286,11 +1286,6 @@ std::string runtime::gpu::GPU_Emitter::emit_v0_DynReplaceSlice(EMIT_ARGS)
throw unsupported_op("Unsupported op '" + node->description() + "'"); throw unsupported_op("Unsupported op '" + node->description() + "'");
} }
std::string runtime::gpu::GPU_Emitter::emit_v0_DynReshape(EMIT_ARGS)
{
throw unsupported_op("Unsupported op '" + node->description() + "'");
}
std::string runtime::gpu::GPU_Emitter::emit_v0_DynSlice(EMIT_ARGS) std::string runtime::gpu::GPU_Emitter::emit_v0_DynSlice(EMIT_ARGS)
{ {
throw unsupported_op("Unsupported op '" + node->description() + "'"); throw unsupported_op("Unsupported op '" + node->description() + "'");
......
...@@ -772,11 +772,6 @@ protected: ...@@ -772,11 +772,6 @@ protected:
dot->get_reduction_axes_count()); dot->get_reduction_axes_count());
break; break;
} }
case OP_TYPEID::DynReshape:
{
throw unsupported_op("Unsupported op '" + node.description() + "'");
break;
}
case OP_TYPEID::DynSlice: case OP_TYPEID::DynSlice:
{ {
throw unsupported_op("Unsupported op '" + node.description() + "'"); throw unsupported_op("Unsupported op '" + node.description() + "'");
......
...@@ -1512,12 +1512,6 @@ shared_ptr<Node> JSONDeserializer::deserialize_node(json node_js) ...@@ -1512,12 +1512,6 @@ shared_ptr<Node> JSONDeserializer::deserialize_node(json node_js)
ellipsis_mask); ellipsis_mask);
break; break;
} }
case OP_TYPEID::DynReshape:
{
const auto zero_flag = node_js.at("zero_flag").get<bool>();
node = make_shared<op::v0::DynReshape>(args[0], args[1], zero_flag);
break;
}
case OP_TYPEID::Reshape_v1: case OP_TYPEID::Reshape_v1:
{ {
const bool special_zero = node_js.at("special_zero").get<bool>(); const bool special_zero = node_js.at("special_zero").get<bool>();
...@@ -3636,12 +3630,6 @@ json JSONSerializer::serialize_node(const Node& n) ...@@ -3636,12 +3630,6 @@ json JSONSerializer::serialize_node(const Node& n)
node["ellipsis_mask"] = tmp->get_ellipsis_mask(); node["ellipsis_mask"] = tmp->get_ellipsis_mask();
break; break;
} }
case OP_TYPEID::DynReshape:
{
auto tmp = static_cast<const op::v0::DynReshape*>(&n);
node["zero_flag"] = tmp->get_zero_flag();
break;
}
case OP_TYPEID::Reshape_v1: case OP_TYPEID::Reshape_v1:
{ {
auto tmp = static_cast<const op::v1::Reshape*>(&n); auto tmp = static_cast<const op::v1::Reshape*>(&n);
......
...@@ -77,7 +77,6 @@ set(SRC ...@@ -77,7 +77,6 @@ set(SRC
opset_pass/binary_elementwise_opset_pass.cpp opset_pass/binary_elementwise_opset_pass.cpp
opset_pass/broadcast_opset_pass.cpp opset_pass/broadcast_opset_pass.cpp
opset_pass/convolution_opset_pass.cpp opset_pass/convolution_opset_pass.cpp
opset_pass/dyn_reshape_opset_pass.cpp
opset_pass/logical_and_opset_pass.cpp opset_pass/logical_and_opset_pass.cpp
opset_pass/logical_not_opset_pass.cpp opset_pass/logical_not_opset_pass.cpp
opset_pass/logical_or_opset_pass.cpp opset_pass/logical_or_opset_pass.cpp
......
...@@ -25,97 +25,6 @@ using namespace ngraph; ...@@ -25,97 +25,6 @@ using namespace ngraph;
static string s_manifest = "${MANIFEST}"; static string s_manifest = "${MANIFEST}";
NGRAPH_TEST(${BACKEND_NAME}, dyn_reshape)
{
auto backend = runtime::Backend::create("${BACKEND_NAME}", true);
auto build_graph = [&backend](bool zero_flag) {
// Create a graph for f(x,shape) = DynReshape(x,shape,zero_flag=zero_flag).
auto x = make_shared<op::Parameter>(element::i32, PartialShape::dynamic());
auto shape = make_shared<op::Parameter>(element::i64, PartialShape::dynamic(1));
auto dyn_reshape = make_shared<op::DynReshape>(x, shape, zero_flag);
EXPECT_TRUE(dyn_reshape->get_output_partial_shape(0).same_scheme(PartialShape::dynamic()));
auto f = make_shared<Function>(NodeVector{dyn_reshape}, ParameterVector{x, shape});
auto ex = backend->compile(f);
return ex;
};
auto t_r = backend->create_dynamic_tensor(element::i32, PartialShape::dynamic());
auto ex_flag_off = build_graph(false);
auto ex_flag_on = build_graph(true);
std::vector<std::tuple<bool, Shape, std::vector<int32_t>, std::vector<int64_t>, Shape>> tests;
tests.emplace_back(make_tuple(
false, Shape{2, 3}, vector<int32_t>{1, 2, 3, 4, 5, 6}, vector<int64_t>{6}, Shape{6}));
tests.emplace_back(make_tuple(
true, Shape{2, 3}, vector<int32_t>{1, 2, 3, 4, 5, 6}, vector<int64_t>{6}, Shape{6}));
tests.emplace_back(make_tuple(
false, Shape{2, 3}, vector<int32_t>{1, 2, 3, 4, 5, 6}, vector<int64_t>{-1}, Shape{6}));
tests.emplace_back(make_tuple(false,
Shape{2, 3},
vector<int32_t>{1, 2, 3, 4, 5, 6},
vector<int64_t>{2, -1},
Shape{2, 3}));
tests.emplace_back(make_tuple(false,
Shape{2, 3},
vector<int32_t>{1, 2, 3, 4, 5, 6},
vector<int64_t>{3, -1},
Shape{3, 2}));
tests.emplace_back(make_tuple(false,
Shape{2, 3},
vector<int32_t>{1, 2, 3, 4, 5, 6},
vector<int64_t>{3, 2, -1},
Shape{3, 2, 1}));
tests.emplace_back(make_tuple(true,
Shape{2, 3},
vector<int32_t>{1, 2, 3, 4, 5, 6},
vector<int64_t>{3, 2, -1},
Shape{3, 2, 1}));
tests.emplace_back(make_tuple(true,
Shape{2, 3},
vector<int32_t>{1, 2, 3, 4, 5, 6},
vector<int64_t>{0, 0, -1},
Shape{2, 3, 1}));
tests.emplace_back(make_tuple(true,
Shape{2, 3},
vector<int32_t>{1, 2, 3, 4, 5, 6},
vector<int64_t>{2, 0, -1},
Shape{2, 3, 1}));
tests.emplace_back(make_tuple(
true, Shape{0, 3, 4}, vector<int32_t>{}, vector<int64_t>{3, -1, 2}, Shape{3, 0, 2}));
for (auto& test : tests)
{
bool zero_flag = get<0>(test);
const Shape& in_shape = get<1>(test);
const std::vector<int32_t>& data = get<2>(test);
const std::vector<int64_t>& dims = get<3>(test);
const Shape& out_shape = get<4>(test);
auto t_x = backend->create_tensor(element::i32, in_shape);
auto t_shape = backend->create_tensor(element::i64, Shape{dims.size()});
copy_data(t_x, data);
copy_data(t_shape, dims);
auto ex = zero_flag ? ex_flag_on : ex_flag_off;
ex->call_with_validate({t_r}, {t_x, t_shape});
ASSERT_EQ(t_r->get_element_type(), element::i32);
ASSERT_EQ(t_r->get_shape(), out_shape);
auto results = read_vector<int32_t>(t_r);
ASSERT_EQ(results, data);
}
}
NGRAPH_TEST(${BACKEND_NAME}, reshape_v1) NGRAPH_TEST(${BACKEND_NAME}, reshape_v1)
{ {
auto arg = std::make_shared<op::Parameter>(element::i64, PartialShape::dynamic()); auto arg = std::make_shared<op::Parameter>(element::i64, PartialShape::dynamic());
......
...@@ -181,7 +181,7 @@ static void to_vector_test(const PartialShape& input_pshape, const std::vector<S ...@@ -181,7 +181,7 @@ static void to_vector_test(const PartialShape& input_pshape, const std::vector<S
x_new_shape = make_shared<op::Product>(x_new_shape, AxisSet{0}); x_new_shape = make_shared<op::Product>(x_new_shape, AxisSet{0});
x_new_shape = make_shared<op::Reshape>(x_new_shape, AxisVector{}, Shape{1}); x_new_shape = make_shared<op::Reshape>(x_new_shape, AxisVector{}, Shape{1});
auto x_reshaped = make_shared<op::DynReshape>(x, x_new_shape); auto x_reshaped = make_shared<op::v1::Reshape>(x, x_new_shape, true);
auto f = make_shared<Function>(NodeVector{x_reshaped}, ParameterVector{x}); auto f = make_shared<Function>(NodeVector{x_reshaped}, ParameterVector{x});
auto backend = runtime::Backend::create("${BACKEND_NAME}", true); auto backend = runtime::Backend::create("${BACKEND_NAME}", true);
...@@ -240,7 +240,7 @@ static void reverse_shape_test(const PartialShape& input_pshape, ...@@ -240,7 +240,7 @@ static void reverse_shape_test(const PartialShape& input_pshape,
shared_ptr<Node> x_new_shape = make_shared<op::ShapeOf>(x); shared_ptr<Node> x_new_shape = make_shared<op::ShapeOf>(x);
x_new_shape = make_shared<op::Reverse>(x_new_shape, AxisSet{0}); x_new_shape = make_shared<op::Reverse>(x_new_shape, AxisSet{0});
auto x_reshaped = make_shared<op::DynReshape>(x, x_new_shape); auto x_reshaped = make_shared<op::v1::Reshape>(x, x_new_shape, true);
auto f = make_shared<Function>(NodeVector{x_reshaped}, ParameterVector{x}); auto f = make_shared<Function>(NodeVector{x_reshaped}, ParameterVector{x});
auto backend = runtime::Backend::create("${BACKEND_NAME}", true); auto backend = runtime::Backend::create("${BACKEND_NAME}", true);
......
...@@ -188,7 +188,7 @@ TEST(build_graph, function_revalidate_and_infer) ...@@ -188,7 +188,7 @@ TEST(build_graph, function_revalidate_and_infer)
auto arg = make_shared<op::Parameter>(element::f32, Shape{2, 4, 6, 8}); auto arg = make_shared<op::Parameter>(element::f32, Shape{2, 4, 6, 8});
auto pattern = op::Constant::create(element::i64, Shape{6}, {1, 3, 16, 2, 2, 2}); auto pattern = op::Constant::create(element::i64, Shape{6}, {1, 3, 16, 2, 2, 2});
auto r = make_shared<op::DynReshape>(arg, pattern); auto r = make_shared<op::v1::Reshape>(arg, pattern, true);
auto relu = make_shared<op::Relu>(r); auto relu = make_shared<op::Relu>(r);
auto f = make_shared<Function>(relu, ParameterVector{arg}); auto f = make_shared<Function>(relu, ParameterVector{arg});
......
...@@ -183,30 +183,6 @@ TEST(dyn_elimination, replace_slice) ...@@ -183,30 +183,6 @@ TEST(dyn_elimination, replace_slice)
ASSERT_EQ(f->get_results().at(0)->get_shape(), (Shape{2, 4, 6, 8, 2, 2, 2})); ASSERT_EQ(f->get_results().at(0)->get_shape(), (Shape{2, 4, 6, 8, 2, 2, 2}));
} }
TEST(dyn_elimination, reshape)
{
auto input_arg = make_shared<op::Parameter>(element::f32, Shape{2, 4, 6, 8});
auto shape_arg = make_shared<op::Constant>(element::i64, Shape{3}, vector<int64_t>{0, 6, -1});
auto r = make_shared<op::DynReshape>(input_arg, shape_arg, true);
ASSERT_EQ(r->get_element_type(), element::f32);
ASSERT_EQ(r->get_shape(), (Shape{2, 6, 32}));
auto f = make_shared<Function>(r, ParameterVector{input_arg});
pass::Manager pass_manager;
pass_manager.register_pass<pass::DynElimination>();
pass_manager.run_passes(f);
ASSERT_EQ(count_ops_of_type<op::DynReshape>(f), 0);
ASSERT_EQ(count_ops_of_type<op::Constant>(f), 0);
ASSERT_EQ(count_ops_of_type<op::Reshape>(f), 1);
ASSERT_EQ(f->get_results().at(0)->get_element_type(), element::f32);
ASSERT_EQ(f->get_results().at(0)->get_shape(), (Shape{2, 6, 32}));
}
TEST(dyn_elimination, range) TEST(dyn_elimination, range)
{ {
auto constant_start = make_shared<op::Constant>(element::i64, Shape{}, vector<int64_t>{0}); auto constant_start = make_shared<op::Constant>(element::i64, Shape{}, vector<int64_t>{0});
...@@ -269,25 +245,3 @@ TEST(dyn_elimination, range_f64) ...@@ -269,25 +245,3 @@ TEST(dyn_elimination, range_f64)
ASSERT_TRUE(test::all_close_f( ASSERT_TRUE(test::all_close_f(
vals, vector<double>{-0.5, -0.25, 0, 0.25, 0.5, 0.75, 1.0, 1.25, 1.5, 1.75})); vals, vector<double>{-0.5, -0.25, 0, 0.25, 0.5, 0.75, 1.0, 1.25, 1.5, 1.75}));
} }
#ifndef NGRAPH_JSON_DISABLE
TEST(dyn_elimination, paddlepaddle_transpose)
{
string model = "paddlepaddle/transpose.json";
const string json_path = file_util::path_join(SERIALIZED_ZOO, model);
const string json_string = file_util::read_file_to_string(json_path);
shared_ptr<Function> f = ngraph::deserialize(json_string);
vector<element::Type> arg_element_types = {element::f64, element::f64};
vector<PartialShape> arg_shapes = {{3, 4}, {4, 3}};
std::vector<void*> arg_value_base_pointers = {nullptr, nullptr};
auto clone = specialize_function(f, arg_element_types, arg_shapes, arg_value_base_pointers);
pass::Manager passes;
passes.register_pass<pass::ConstantFolding>();
passes.register_pass<pass::DynElimination>();
passes.register_pass<pass::Opset0Downgrade>(); // Converts dynamic v1 variants to v0 ops
passes.set_per_pass_validation(false);
passes.run_passes(clone);
}
#endif
This diff is collapsed.
...@@ -438,15 +438,6 @@ namespace ...@@ -438,15 +438,6 @@ namespace
EXPECT_FALSE(node.is_binary_elementwise_logical()); EXPECT_FALSE(node.is_binary_elementwise_logical());
} }
void op_is_DynReshape()
{
op::DynReshape node;
EXPECT_FALSE(node.is_unary_elementwise_arithmetic());
EXPECT_FALSE(node.is_binary_elementwise_arithmetic());
EXPECT_FALSE(node.is_binary_elementwise_comparison());
EXPECT_FALSE(node.is_binary_elementwise_logical());
}
void op_is_DynSlice() void op_is_DynSlice()
{ {
op::DynSlice node; op::DynSlice node;
......
//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "ngraph/pass/manager.hpp"
#include "ngraph/pass/opset0_downgrade.hpp"
#include "ngraph/pass/opset1_upgrade.hpp"
#include "util/type_prop.hpp"
using namespace std;
using namespace ngraph;
TEST(opset_transform, opset1_dyn_reshape_upgrade_pass)
{
const auto arg = make_shared<op::Parameter>(element::f32, Shape{1, 2, 3});
const auto pattern = make_shared<op::Parameter>(element::i64, Shape{6});
const auto dyn_reshape_v0 = make_shared<op::v0::DynReshape>(arg, pattern, true);
const auto result = make_shared<op::Result>(dyn_reshape_v0);
auto f = make_shared<Function>(ResultVector{result}, ParameterVector{arg, pattern});
ngraph::pass::Manager pass_manager;
pass_manager.register_pass<pass::Opset1Upgrade>();
pass_manager.run_passes(f);
const auto pass_replacement_node = f->get_result()->input_value(0).get_node_shared_ptr();
EXPECT_TRUE(is_type<op::v1::Reshape>(pass_replacement_node));
}
TEST(opset_transform, opset1_reshape_downgrade_pass)
{
const auto arg = make_shared<op::Parameter>(element::f32, Shape{1, 2, 3});
const auto pattern = make_shared<op::Parameter>(element::i64, Shape{6});
const auto dyn_reshape_v0 = make_shared<op::v1::Reshape>(arg, pattern, true);
const auto result = make_shared<op::Result>(dyn_reshape_v0);
auto f = make_shared<Function>(ResultVector{result}, ParameterVector{arg, pattern});
ngraph::pass::Manager pass_manager;
pass_manager.register_pass<pass::Opset0Downgrade>();
pass_manager.run_passes(f);
const auto pass_replacement_node = f->get_result()->input_value(0).get_node_shared_ptr();
const auto reshape_v1 = as_type_ptr<op::v0::DynReshape>(pass_replacement_node);
ASSERT_TRUE(reshape_v1);
EXPECT_EQ(reshape_v1->get_zero_flag(), true);
}
...@@ -48,7 +48,7 @@ TEST(shape_relevance, param_direct) ...@@ -48,7 +48,7 @@ TEST(shape_relevance, param_direct)
{ {
auto param0 = make_shared<op::Parameter>(element::f32, Shape{4, 6}); auto param0 = make_shared<op::Parameter>(element::f32, Shape{4, 6});
auto param1 = make_shared<op::Parameter>(element::i64, Shape{4}); auto param1 = make_shared<op::Parameter>(element::i64, Shape{4});
auto x = make_shared<op::DynReshape>(param0, param1); auto x = make_shared<op::v1::Reshape>(param0, param1, true);
auto f = make_shared<Function>(x, ParameterVector{param0, param1}); auto f = make_shared<Function>(x, ParameterVector{param0, param1});
...@@ -67,7 +67,7 @@ TEST(shape_relevance, param_indirect) ...@@ -67,7 +67,7 @@ TEST(shape_relevance, param_indirect)
auto param2 = make_shared<op::Parameter>(element::i64, Shape{2}); auto param2 = make_shared<op::Parameter>(element::i64, Shape{2});
auto c = make_shared<op::Concat>(NodeVector{param1, param2}, 0); auto c = make_shared<op::Concat>(NodeVector{param1, param2}, 0);
auto x = make_shared<op::DynReshape>(param0, c); auto x = make_shared<op::v1::Reshape>(param0, c, true);
auto f = make_shared<Function>(x, ParameterVector{param0, param1, param2}); auto f = make_shared<Function>(x, ParameterVector{param0, param1, param2});
...@@ -84,7 +84,7 @@ TEST(shape_relevance, param_shape_of_direct) ...@@ -84,7 +84,7 @@ TEST(shape_relevance, param_shape_of_direct)
{ {
auto param0 = make_shared<op::Parameter>(element::f32, Shape{4, 6}); auto param0 = make_shared<op::Parameter>(element::f32, Shape{4, 6});
auto x = make_shared<op::DynReshape>(param0, make_shared<op::ShapeOf>(param0)); auto x = make_shared<op::v1::Reshape>(param0, make_shared<op::ShapeOf>(param0), true);
auto f = make_shared<Function>(x, ParameterVector{param0}); auto f = make_shared<Function>(x, ParameterVector{param0});
...@@ -101,7 +101,7 @@ TEST(shape_relevance, param_shape_of_indirect) ...@@ -101,7 +101,7 @@ TEST(shape_relevance, param_shape_of_indirect)
auto s = make_shared<op::ShapeOf>(param0); auto s = make_shared<op::ShapeOf>(param0);
auto r = make_shared<op::Reverse>(s, AxisSet{0}); auto r = make_shared<op::Reverse>(s, AxisSet{0});
auto x = make_shared<op::DynReshape>(param0, r); auto x = make_shared<op::v1::Reshape>(param0, r, true);
auto f = make_shared<Function>(x, ParameterVector{param0}); auto f = make_shared<Function>(x, ParameterVector{param0});
......
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment