Commit 6e5b4cd0 authored by Nishant Patel's avatar Nishant Patel Committed by Scott Cyphers

Shape as a node in ConvBpropData/Filters (v1) (#3711)

* Shape as a node in ConvBpropData (v1)

* Fix constant op creation

* Convbpropfilters dynamic shape

* Avgpool bprop dynamic shape

* Add downgrade pass

* Downgrade pass Avgpoolbackprop

* Shape checks and shape relevancy

* Dyn avgpoolbprop test case

* Revert "Dyn avgpoolbprop test case"

This reverts commit b094f2f99d00023593c667ff16cf060a586ad16a.

* Revert "Avgpool bprop dynamic shape"

This reverts commit ef8a4d197577c0d277d634baa9d6d082adcddae0.

* Revert "Downgrade pass Avgpoolbackprop"

This reverts commit 3f31f0fea8fe6e79bd958cbaa7cb8495008a795f.

* Call shape relevancy

* Revert Avgpoolbprop changes

* mark nodes as dynamic

* Add opset transform tests

* Change API

* Adjust strides

* Use size

* Merge PR 3776

* add shape relevance

* Add convbprop_data test case

* Add convbprop_filter test case

* Use is_constant helper

* Remove opset pdpd test case

* resolve conflict

* add checks for validate and infer

* Type prop tests
parent 1cc36521
This diff is collapsed.
......@@ -115,9 +115,9 @@ namespace ngraph
/// \param dilations The dilations from forward-prop.
/// \param pads_begin The padding-below sizes from forward-prop.
/// \param pads_end The padding-above sizes from forward-prop.
ConvolutionBackpropData(const Shape& data_batch_shape,
const Output<Node>& filters,
ConvolutionBackpropData(const Output<Node>& filters,
const Output<Node>& output_delta,
const Output<Node>& data_batch_shape,
const Strides& strides,
const Strides& dilations,
const CoordinateDiff& pads_begin,
......@@ -132,11 +132,8 @@ namespace ngraph
copy_with_new_args(const NodeVector& new_args) const override;
/// \return The data batch shape.
const Shape& get_data_batch_shape() const { return m_data_batch_shape; }
void set_data_batch_shape(const Shape& data_batch_shape)
{
m_data_batch_shape = data_batch_shape;
}
const Shape get_data_batch_shape() const;
void set_data_batch_shape(const Shape& data_batch_shape);
/// \return The strides from the forward prop.
const Strides& get_strides() const { return m_strides; }
void set_strides(const Strides& strides) { m_strides = strides; }
......@@ -154,7 +151,6 @@ namespace ngraph
CoordinateDiff compute_backward_delta_out_pad_below() const;
protected:
Shape m_data_batch_shape;
Strides m_strides;
Strides m_dilations;
CoordinateDiff m_pads_begin;
......@@ -180,8 +176,8 @@ namespace ngraph
/// \param pads_begin The padding-below sizes from forward-prop.
/// \param pads_end The padding-above sizes from forward-prop.
ConvolutionBackpropFilters(const Output<Node>& data_batch,
const Shape& filters_shape,
const Output<Node>& output_delta,
const Output<Node>& filters_shape,
const Strides& strides,
const Strides& dilations,
const CoordinateDiff& pads_begin,
......@@ -194,7 +190,7 @@ namespace ngraph
copy_with_new_args(const NodeVector& new_args) const override;
/// \return The filters tensor shape.
const Shape& get_filters_shape() const { return m_filters_shape; }
const Shape get_filters_shape() const;
/// \return The strides from the forward prop.
const Strides& get_strides() const { return m_strides; }
void set_strides(const Strides& strides) { m_strides = strides; }
......@@ -211,7 +207,6 @@ namespace ngraph
CoordinateDiff compute_backward_in_pad_above() const;
protected:
Shape m_filters_shape;
Strides m_strides;
Strides m_dilations;
CoordinateDiff m_pads_begin;
......
......@@ -197,6 +197,10 @@ bool pass::Opset0Downgrade::run_on_node(shared_ptr<Node> node)
case OP_TYPEID::ConvolutionBackpropData:
{
auto tmp = as_type_ptr<op::v1::ConvolutionBackpropData>(node);
NGRAPH_CHECK(node->input_value(2).get_node_shared_ptr()->is_constant());
auto data_batch_shape =
static_pointer_cast<op::Constant>(node->input_value(2).get_node_shared_ptr())
->get_shape_val();
const auto filters_arg = node->input(0).get_source_output();
const auto delta_arg = node->input(1).get_source_output();
const PartialShape& delta_arg_pshape = node->get_input_partial_shape(1);
......@@ -206,7 +210,7 @@ bool pass::Opset0Downgrade::run_on_node(shared_ptr<Node> node)
*node);
const size_t num_spatial_dims = static_cast<size_t>(delta_arg_pshape.rank()) - 2;
auto replacement_node =
make_shared<op::v0::ConvolutionBackpropData>(tmp->get_data_batch_shape(),
make_shared<op::v0::ConvolutionBackpropData>(data_batch_shape,
filters_arg,
delta_arg,
tmp->get_strides(),
......@@ -221,6 +225,10 @@ bool pass::Opset0Downgrade::run_on_node(shared_ptr<Node> node)
case OP_TYPEID::ConvolutionBackpropFilters:
{
auto tmp = as_type_ptr<op::v1::ConvolutionBackpropFilters>(node);
NGRAPH_CHECK(node->input_value(2).get_node_shared_ptr()->is_constant());
auto filters_shape =
static_pointer_cast<op::Constant>(node->input_value(2).get_node_shared_ptr())
->get_shape_val();
const auto data_arg = node->input(0).get_source_output();
const auto delta_arg = node->input(1).get_source_output();
const PartialShape& data_arg_pshape = node->get_input_partial_shape(0);
......@@ -231,7 +239,7 @@ bool pass::Opset0Downgrade::run_on_node(shared_ptr<Node> node)
const size_t num_spatial_dims = static_cast<size_t>(data_arg_pshape.rank()) - 2;
auto replacement_node =
make_shared<op::v0::ConvolutionBackpropFilters>(data_arg,
tmp->get_filters_shape(),
filters_shape,
delta_arg,
tmp->get_strides(),
tmp->get_dilations(),
......
......@@ -238,9 +238,9 @@ bool pass::Opset1Upgrade::run_on_node(shared_ptr<Node> node)
*node);
auto replacement_node =
make_shared<op::v1::ConvolutionBackpropData>(data_batch_shape,
node->input(0).get_source_output(),
make_shared<op::v1::ConvolutionBackpropData>(node->input(0).get_source_output(),
node->input(1).get_source_output(),
node->input(2).get_source_output(),
strides,
dilations,
pads_begin,
......@@ -274,8 +274,8 @@ bool pass::Opset1Upgrade::run_on_node(shared_ptr<Node> node)
auto replacement_node =
make_shared<op::v1::ConvolutionBackpropFilters>(node->input(0).get_source_output(),
filters_shape,
node->input(1).get_source_output(),
node->input(2).get_source_output(),
strides,
dilations,
pads_begin,
......
......@@ -18,6 +18,7 @@
#include "ngraph/graph_util.hpp"
#include "ngraph/op/avg_pool.hpp"
#include "ngraph/op/broadcast.hpp"
#include "ngraph/op/convolution.hpp"
#include "ngraph/op/experimental/dyn_broadcast.hpp"
#include "ngraph/op/experimental/dyn_replace_slice.hpp"
#include "ngraph/op/experimental/dyn_reshape.hpp"
......@@ -90,8 +91,10 @@ bool is_dynamic_op(const std::shared_ptr<Node>& op)
return is_type<op::Transpose>(op) || is_type<op::DynBroadcast>(op) ||
is_type<op::DynReplaceSlice>(op) || is_type<op::DynSlice>(op) ||
is_type<op::v1::Reshape>(op) || is_type<op::DynReshape>(op) || is_type<op::Range>(op) ||
is_type<op::v1::GenerateMask>(op) || is_type<op::v1::AvgPoolBackprop>(op) ||
is_type<op::v1::Broadcast>(op);
is_type<op::v1::ConvolutionBackpropData>(op) ||
is_type<op::v1::ConvolutionBackpropFilters>(op) ||
is_type<op::v1::AvgPoolBackprop>(op) || is_type<op::v1::Broadcast>(op) ||
is_type<op::v1::GenerateMask>(op);
}
// Helper for a vile hack in DynamicExecutable::call. See body of that function for details.
......
......@@ -1088,13 +1088,12 @@ shared_ptr<Node> JSONDeserializer::deserialize_node(json node_js)
}
if (op_version == 1)
{
auto data_batch_shape = node_js.at("data_batch_shape").get<vector<size_t>>();
auto strides = node_js.at("strides").get<vector<size_t>>();
auto dilations = node_js.at("dilations").get<vector<size_t>>();
auto pads_begin = node_js.at("pads_begin").get<vector<std::ptrdiff_t>>();
auto pads_end = node_js.at("pads_end").get<vector<std::ptrdiff_t>>();
node = make_shared<op::v1::ConvolutionBackpropData>(
data_batch_shape, args[0], args[1], strides, dilations, pads_begin, pads_end);
args[0], args[1], args[2], strides, dilations, pads_begin, pads_end);
}
break;
}
......@@ -1131,7 +1130,7 @@ shared_ptr<Node> JSONDeserializer::deserialize_node(json node_js)
auto pads_begin = node_js.at("pads_begin").get<vector<std::ptrdiff_t>>();
auto pads_end = node_js.at("pads_end").get<vector<std::ptrdiff_t>>();
node = make_shared<op::v1::ConvolutionBackpropFilters>(
args[0], filters_shape, args[1], strides, dilations, pads_begin, pads_end);
args[0], args[1], args[2], strides, dilations, pads_begin, pads_end);
}
break;
}
......
......@@ -136,3 +136,103 @@ NGRAPH_TEST(${BACKEND_NAME}, convolution_simple_padding)
handle->call_with_validate({result}, {a, b});
EXPECT_TRUE(test::all_close_f(vector<float>{expected_result}, read_vector<float>(result)));
}
// The purpose of this test is to check if we can allow
// data_batch_shape as a node rather than argument
NGRAPH_TEST(${BACKEND_NAME}, dyn_convolution_backprop_data)
{
Shape shape_filter{6, 3, 3, 3};
auto filters = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
Shape shape_delta{2, 6, 3, 3};
auto deltas = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
Shape shape_data_batch_shape{2, 3, 5, 5};
auto data_batch_shape =
make_shared<op::Parameter>(element::i64, PartialShape{Dimension::dynamic()});
auto strides = Strides{1, 1};
auto dilations = Strides{1, 1};
auto padding_begin = CoordinateDiff{0, 0};
auto padding_end = CoordinateDiff{0, 0};
auto conv1 = make_shared<op::v1::ConvolutionBackpropData>(
filters, deltas, data_batch_shape, strides, dilations, padding_begin, padding_end);
auto f = make_shared<Function>(conv1, ParameterVector{filters, deltas, data_batch_shape});
auto backend = runtime::Backend::create("${BACKEND_NAME}", true);
auto handle = backend->compile(f);
auto result = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic());
vector<float> filter, delta, expected_result;
for (int i = 0; i < 6 * 3 * 3 * 3; i++)
filter.emplace_back(i);
for (int i = 0; i < 2 * 6 * 3 * 3; i++)
delta.emplace_back(i);
for (int i = 0; i < 2 * 3 * 5 * 5; i++)
expected_result.emplace_back(i);
vector<int64_t> shapes = {2, 3, 5, 5};
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_filter);
copy_data(a, filter);
auto b = backend->create_tensor(element::f32, shape_delta);
copy_data(b, delta);
auto c = backend->create_tensor(element::i64, Shape{shapes.size()}); // dynamic data batch shape
copy_data(c, shapes);
handle->call_with_validate({result}, {a, b, c});
EXPECT_FALSE(test::all_close_f(vector<float>{expected_result}, read_vector<float>(result)));
}
// The purpose of this test is to check if we can allow
// filters_shape as a node rather than argument
NGRAPH_TEST(${BACKEND_NAME}, dyn_convolution_backprop_filter)
{
Shape shape_data{64, 3, 100};
auto data = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
Shape shape_delta{64, 128, 96};
auto deltas = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
auto filters_shape =
make_shared<op::Parameter>(element::i64, PartialShape{Dimension::dynamic()});
auto strides = Strides{1};
auto dilations = Strides{1};
auto padding_begin = CoordinateDiff{2};
auto padding_end = CoordinateDiff{3};
auto conv1 = make_shared<op::v1::ConvolutionBackpropFilters>(
data, deltas, filters_shape, strides, dilations, padding_begin, padding_end);
auto f = make_shared<Function>(conv1, ParameterVector{data, deltas, filters_shape});
auto backend = runtime::Backend::create("${BACKEND_NAME}", true);
auto handle = backend->compile(f);
auto result = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic());
vector<float> input, delta, expected_result;
for (int i = 0; i < 64 * 3 * 100; i++)
input.emplace_back(i);
for (int i = 0; i < 64 * 128 * 96; i++)
delta.emplace_back(i);
for (int i = 0; i < 128 * 3 * 10; i++)
expected_result.emplace_back(i);
vector<int64_t> shapes = {128, 3, 10};
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_data);
copy_data(a, input);
auto b = backend->create_tensor(element::f32, shape_delta);
copy_data(b, delta);
auto c = backend->create_tensor(element::i64, Shape{shapes.size()}); // dynamic data batch shape
copy_data(c, shapes);
handle->call_with_validate({result}, {a, b, c});
EXPECT_FALSE(test::all_close_f(vector<float>{expected_result}, read_vector<float>(result)));
}
......@@ -80,7 +80,7 @@ TEST(opset_transform, opset1_convolution_downgrade_pass)
TEST(opset_transform, opset1_convolution_backprop_data_downgrade_pass)
{
Shape data_batch_shape{64, 3, 100};
auto data_batch_shape = op::Constant::create<int64_t>(element::i64, Shape{3}, {64, 3, 100});
auto filters = make_shared<op::Parameter>(element::f32, Shape{128, 3, 10});
auto delta = make_shared<op::Parameter>(element::f32, Shape{64, 128, 96});
auto strides = Strides{1};
......@@ -89,7 +89,7 @@ TEST(opset_transform, opset1_convolution_backprop_data_downgrade_pass)
auto padding_end = CoordinateDiff{3};
auto conv = make_shared<op::v1::ConvolutionBackpropData>(
data_batch_shape, filters, delta, strides, dilations, padding_begin, padding_end);
filters, delta, data_batch_shape, strides, dilations, padding_begin, padding_end);
auto result = make_shared<op::Result>(conv);
auto f = make_shared<Function>(ResultVector{result}, ParameterVector{filters, delta});
......@@ -103,7 +103,7 @@ TEST(opset_transform, opset1_convolution_backprop_data_downgrade_pass)
EXPECT_EQ(conv_v0_node->description(), "ConvolutionBackpropData");
EXPECT_EQ(conv_v0_node->get_version(), 0);
EXPECT_EQ(conv_v0_node->get_data_batch_shape(), data_batch_shape);
EXPECT_EQ(conv_v0_node->get_data_batch_shape(), (Shape{64, 3, 100}));
EXPECT_EQ(conv_v0_node->get_window_movement_strides_forward(), strides);
EXPECT_EQ(conv_v0_node->get_window_dilation_strides_forward(), dilations);
EXPECT_EQ(conv_v0_node->get_padding_below_forward(), padding_begin);
......@@ -113,7 +113,7 @@ TEST(opset_transform, opset1_convolution_backprop_data_downgrade_pass)
TEST(opset_transform, opset1_convolution_backprop_filters_downgrade_pass)
{
Shape filters_shape{128, 3, 10};
auto filters_shape = op::Constant::create<int64_t>(element::i64, Shape{3}, {128, 3, 10});
auto data = make_shared<op::Parameter>(element::f32, Shape{64, 3, 100});
auto delta = make_shared<op::Parameter>(element::f32, Shape{64, 128, 96});
auto strides = Strides{1};
......@@ -121,7 +121,7 @@ TEST(opset_transform, opset1_convolution_backprop_filters_downgrade_pass)
auto padding_begin = CoordinateDiff{2};
auto padding_end = CoordinateDiff{3};
auto conv = make_shared<op::v1::ConvolutionBackpropFilters>(
data, filters_shape, delta, strides, dilations, padding_begin, padding_end);
data, delta, filters_shape, strides, dilations, padding_begin, padding_end);
auto result = make_shared<op::Result>(conv);
auto f = make_shared<Function>(ResultVector{result}, ParameterVector{data, delta});
......@@ -135,7 +135,7 @@ TEST(opset_transform, opset1_convolution_backprop_filters_downgrade_pass)
EXPECT_EQ(conv_v0_node->description(), "ConvolutionBackpropFilters");
EXPECT_EQ(conv_v0_node->get_version(), 0);
EXPECT_EQ(conv_v0_node->get_filters_shape(), filters_shape);
EXPECT_EQ(conv_v0_node->get_filters_shape(), (Shape{128, 3, 10}));
EXPECT_EQ(conv_v0_node->get_window_movement_strides_forward(), strides);
EXPECT_EQ(conv_v0_node->get_window_dilation_strides_forward(), dilations);
EXPECT_EQ(conv_v0_node->get_padding_below_forward(), padding_begin);
......
......@@ -2804,3 +2804,39 @@ TEST(type_prop, conv_partial_dynamic_et)
ASSERT_TRUE(conv->get_output_partial_shape(0).same_scheme(
PartialShape{64, 100, 1, Dimension::dynamic()}));
}
TEST(type_prop, conv_bprop_filter_v1_output_partial_shape_dynamic)
{
Shape shape_data{64, 3, 100};
auto data = make_shared<op::Parameter>(element::f32, shape_data);
Shape shape_delta{64, 128, 96};
auto deltas = make_shared<op::Parameter>(element::f32, shape_delta);
auto filters_shape = make_shared<op::Parameter>(element::i64, Shape{128, 3, 10});
auto strides = Strides{1};
auto dilations = Strides{1};
auto padding_begin = CoordinateDiff{2};
auto padding_end = CoordinateDiff{3};
auto conv1 = make_shared<op::v1::ConvolutionBackpropFilters>(
data, deltas, filters_shape, strides, dilations, padding_begin, padding_end);
ASSERT_TRUE(conv1->get_output_partial_shape(0).is_dynamic());
}
TEST(type_prop, conv_bprop_data_v1_output_partial_shape_dynamic)
{
Shape shape_filter{6, 3, 3, 3};
auto filters = make_shared<op::Parameter>(element::f32, shape_filter);
Shape shape_delta{2, 6, 3, 3};
auto deltas = make_shared<op::Parameter>(element::f32, shape_delta);
Shape shape_data_batch_shape{2, 3, 5, 5};
auto data_batch_shape = make_shared<op::Parameter>(element::i64, Shape{2, 3, 5, 5});
auto strides = Strides{1, 1};
auto dilations = Strides{1, 1};
auto padding_begin = CoordinateDiff{0, 0};
auto padding_end = CoordinateDiff{0, 0};
auto conv1 = make_shared<op::v1::ConvolutionBackpropData>(
filters, deltas, data_batch_shape, strides, dilations, padding_begin, padding_end);
ASSERT_TRUE(conv1->get_output_partial_shape(0).is_dynamic());
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment