Commit 8f999289 authored by Tomasz Socha's avatar Tomasz Socha Committed by Scott Cyphers

[SPEC] Adjust ConvolutionBackpropData op. (#3935)

* [SPEC] Adjust ConvolutionBackpropData op.

```
inputs:
  1. filters-------+
  2. output_delta  |  -> 1. data
                   +---> 2. filters
  3. data_batch_shape -> 3. output_shape(+optional)

attributes:
  1. strides          -> 1. strides
  2. dilations-----+
  3. pads_begin    |  -> 2. pads_begin
  4. pads_end      |  -> 3. pads_end
                   +---> 4. dilations
		      -> 5. +auto_pad(optional)[PadType::EXPLICIT]
		      -> 6. +output_padding(optional)[zeros]
```

* Review fix I
parent f3603647
This diff is collapsed.
......@@ -103,22 +103,49 @@ namespace ngraph
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs a batched-convolution data batch-backprop operation.
ConvolutionBackpropData() = default;
// clang-format off
/// \brief Constructs a batched-convolution data batch-backprop operation.
///
/// \param data_batch_shape The shape of the data batch from forward-prop.
/// \param filters The node producing the filters from forward-prop.
/// \param output_delta The node producing output delta.
/// \param strides The strides from forward-prop.
/// \param dilations The dilations from forward-prop.
/// \param pads_begin The padding-below sizes from forward-prop.
/// \param pads_end The padding-above sizes from forward-prop.
ConvolutionBackpropData(const Output<Node>& filters,
const Output<Node>& output_delta,
const Output<Node>& data_batch_shape,
/// \param data The node producing data from forward-prop.
/// \param filters The node producing the filters from forward-prop.
/// \param output_shape The shape of the data batch from forward-prop.
/// \param strides The strides from forward-prop.
/// \param pads_begin The padding-below sizes from forward-prop.
/// \param pads_end The padding-above sizes from forward-prop.
/// \param dilations The dilations from forward-prop.
/// \param auto_pad The pad type for automatically computing padding sizes.
/// \param output_padding The output padding adds additional amount of paddings per each spatial axis in the output tensor.
// clang-format on
ConvolutionBackpropData(const Output<Node>& data,
const Output<Node>& filters,
const Output<Node>& output_shape,
const Strides& strides,
const CoordinateDiff& pads_begin,
const CoordinateDiff& pads_end,
const Strides& dilations,
const PadType& auto_pad = PadType::EXPLICIT,
const CoordinateDiff& output_padding = {});
// clang-format off
/// \brief Constructs a batched-convolution data batch-backprop operation.
///
/// \param data The node producing data from forward-prop.
/// \param filters The node producing the filters from forward-prop.
/// \param strides The strides from forward-prop.
/// \param pads_begin The padding-below sizes from forward-prop.
/// \param pads_end The padding-above sizes from forward-prop.
/// \param dilations The dilations from forward-prop.
/// \param auto_pad The pad type for automatically computing padding sizes.
/// \param output_padding The output padding adds additional amount of paddings per each spatial axis in the output tensor.
// clang-format on
ConvolutionBackpropData(const Output<Node>& data,
const Output<Node>& filters,
const Strides& strides,
const CoordinateDiff& pads_begin,
const CoordinateDiff& pads_end);
const CoordinateDiff& pads_end,
const Strides& dilations,
const PadType& auto_pad = PadType::EXPLICIT,
const CoordinateDiff& output_padding = {});
void validate_and_infer_types() override;
......@@ -128,8 +155,8 @@ namespace ngraph
copy_with_new_args(const NodeVector& new_args) const override;
/// \return The data batch shape.
const Shape get_data_batch_shape() const;
void set_data_batch_shape(const Shape& data_batch_shape);
const PartialShape get_output_shape() const;
void set_output_shape(const Shape& output_shape);
/// \return The strides from the forward prop.
const Strides& get_strides() const { return m_strides; }
void set_strides(const Strides& strides) { m_strides = strides; }
......@@ -142,15 +169,23 @@ namespace ngraph
/// \return The padding-above sizes (possibly negative) from the forward prop.
const CoordinateDiff& get_pads_end() const { return m_pads_end; }
void set_pads_end(const CoordinateDiff& pads_end) { m_pads_end = pads_end; }
// Compute the pad_above values to be used if in a convolution
CoordinateDiff compute_backward_delta_out_pad_above() const;
CoordinateDiff compute_backward_delta_out_pad_below() const;
/// \return The auto pad.
const PadType& get_auto_pad() const { return m_auto_pad; }
void set_auto_pad(const PadType& auto_pad) { m_auto_pad = auto_pad; }
/// \return The output padding.
const CoordinateDiff& get_output_padding() const { return m_output_padding; }
void set_output_padding(const CoordinateDiff& output_padding)
{
m_output_padding = output_padding;
}
protected:
Strides m_strides;
Strides m_dilations;
CoordinateDiff m_pads_begin;
CoordinateDiff m_pads_end;
PadType m_auto_pad;
CoordinateDiff m_output_padding;
};
/// \brief Filters backprop for batched convolution operation.
......@@ -398,7 +433,7 @@ namespace ngraph
///
/// \param data_batch_shape The shape of the data batch from forward-prop.
/// \param filters The node producing the filters from forward-prop.
/// \param output_delta The node producing output delta.
/// \param data The node producing output delta.
/// \param window_movement_strides_forward The window movement strides from
/// forward-prop.
/// \param window_dilation_strides_forward The window dilation strides from
......@@ -409,7 +444,7 @@ namespace ngraph
/// forward-prop.
ConvolutionBackpropData(const Shape& data_batch_shape,
const Output<Node>& filters,
const Output<Node>& output_delta,
const Output<Node>& data,
const Strides& window_movement_strides_forward,
const Strides& window_dilation_strides_forward,
const CoordinateDiff& padding_below_forward,
......
......@@ -137,22 +137,34 @@ namespace
bool op_cast(shared_ptr<op::v1::ConvolutionBackpropData> node)
{
NGRAPH_CHECK(node->input_value(2).get_node_shared_ptr()->is_constant());
auto data_batch_shape =
static_pointer_cast<op::Constant>(node->input_value(2).get_node_shared_ptr())
->get_shape_val();
const auto filters_arg = node->input_value(0);
const auto delta_arg = node->input_value(1);
auto output_shape = as_type_ptr<op::Constant>(node->input_value(2).get_node_shared_ptr());
const auto data_arg = node->input(0).get_source_output();
const auto filters_arg = node->input(1).get_source_output();
const PartialShape& delta_arg_pshape = node->get_input_partial_shape(1);
NGRAPH_CHECK(delta_arg_pshape.rank().is_static(),
"Unable to convert ConvolutionBackpropData:v1 to ConvolutionBackpropData:v0 "
"if delta argument rank is dynamic. Node: ",
*node);
NGRAPH_CHECK(output_shape,
"Unable to convert ConvolutionBackpropData:v1 to ConvolutionBackpropData:v0 "
"if output_shape is not constant. Node: ",
*node);
const size_t num_spatial_dims = static_cast<size_t>(delta_arg_pshape.rank()) - 2;
auto output_padding = node->get_output_padding();
bool is_op_valid = all_of(
output_padding.begin(), output_padding.end(), [](size_t value) { return value == 0; });
NGRAPH_CHECK(is_op_valid,
"Unable to convert ConvolutionBackpropData:v1 to ConvolutionBackpropData:v0 "
"with output padding other than `0`. Node: ",
*node);
auto replacement_node =
make_shared<op::v0::ConvolutionBackpropData>(data_batch_shape,
make_shared<op::v0::ConvolutionBackpropData>(output_shape->get_shape_val(),
filters_arg,
delta_arg,
data_arg,
node->get_strides(),
node->get_dilations(),
node->get_pads_begin(),
......
......@@ -179,13 +179,14 @@ namespace
"other than `1`. Node: ",
*node);
auto replacement_node = make_shared<op::v1::ConvolutionBackpropData>(node->input_value(0),
node->input_value(1),
node->input_value(2),
strides,
dilations,
pads_begin,
pads_end);
auto replacement_node = make_shared<op::v1::ConvolutionBackpropData>(
node->input_value(1), // data
node->input_value(0), // filters
op::Constant::create(element::i64, Shape{data_batch_shape.size()}, data_batch_shape),
strides,
pads_begin,
pads_end,
dilations);
replace_node(node, replacement_node);
return true;
}
......@@ -199,11 +200,9 @@ namespace
auto pads_end = node->get_padding_above_forward();
auto data_dilation_strides = node->get_data_dilation_strides_forward();
bool is_dds_valid = true;
for (auto value : data_dilation_strides)
{
is_dds_valid = is_dds_valid && (value == 1);
}
bool is_dds_valid = all_of(data_dilation_strides.begin(),
data_dilation_strides.end(),
[](size_t value) { return value == 1; });
NGRAPH_CHECK(
is_dds_valid,
......
......@@ -1147,9 +1147,30 @@ shared_ptr<Node> JSONDeserializer::deserialize_node(json node_js)
auto dilations = node_js.at("dilations").get<vector<size_t>>();
auto pads_begin = node_js.at("pads_begin").get<vector<std::ptrdiff_t>>();
auto pads_end = node_js.at("pads_end").get<vector<std::ptrdiff_t>>();
node = make_shared<op::v1::ConvolutionBackpropData>(
args[0], args[1], args[2], strides, dilations, pads_begin, pads_end);
auto output_padding = node_js.at("output_padding").get<vector<std::ptrdiff_t>>();
if (args.size() == 3)
{
node = make_shared<op::v1::ConvolutionBackpropData>(args[0],
args[1],
args[2],
strides,
pads_begin,
pads_end,
dilations,
read_pad_type(node_js),
output_padding);
}
else
{
node = make_shared<op::v1::ConvolutionBackpropData>(args[0],
args[1],
strides,
pads_begin,
pads_end,
dilations,
read_pad_type(node_js),
output_padding);
}
break;
}
case OP_TYPEID::ConvolutionBackpropFilters:
......@@ -3149,11 +3170,12 @@ json JSONSerializer::serialize_node(const Node& n)
case OP_TYPEID::ConvolutionBackpropData_v1:
{
auto tmp = static_cast<const op::v1::ConvolutionBackpropData*>(&n);
node["data_batch_shape"] = tmp->get_data_batch_shape();
node["strides"] = tmp->get_strides();
node["dilations"] = tmp->get_dilations();
node["pads_begin"] = tmp->get_pads_begin();
node["pads_end"] = tmp->get_pads_end();
node["auto_pad"] = tmp->get_auto_pad();
node["output_padding"] = tmp->get_output_padding();
break;
}
case OP_TYPEID::ConvolutionBackpropFilters:
......
......@@ -154,9 +154,9 @@ NGRAPH_TEST(${BACKEND_NAME}, dyn_convolution_backprop_data)
auto padding_end = CoordinateDiff{0, 0};
auto conv1 = make_shared<op::v1::ConvolutionBackpropData>(
filters, deltas, data_batch_shape, strides, dilations, padding_begin, padding_end);
deltas, filters, data_batch_shape, strides, padding_begin, padding_end, dilations);
auto f = make_shared<Function>(conv1, ParameterVector{filters, deltas, data_batch_shape});
auto f = make_shared<Function>(conv1, ParameterVector{deltas, filters, data_batch_shape});
auto backend = runtime::Backend::create("${BACKEND_NAME}", true);
......@@ -178,10 +178,10 @@ NGRAPH_TEST(${BACKEND_NAME}, dyn_convolution_backprop_data)
vector<int64_t> shapes = {2, 3, 5, 5};
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_filter);
copy_data(a, filter);
auto b = backend->create_tensor(element::f32, shape_delta);
copy_data(b, delta);
auto a = backend->create_tensor(element::f32, shape_delta);
copy_data(a, delta);
auto b = backend->create_tensor(element::f32, shape_filter);
copy_data(b, filter);
auto c = backend->create_tensor(element::i64, Shape{shapes.size()}); // dynamic data batch shape
copy_data(c, shapes);
handle->call_with_validate({result}, {a, b, c});
......
......@@ -87,7 +87,7 @@ TEST(opset_transform, opset1_convolution_backprop_data_downgrade_pass)
auto padding_end = CoordinateDiff{3};
auto conv = make_shared<op::v1::ConvolutionBackpropData>(
filters, delta, data_batch_shape, strides, dilations, padding_begin, padding_end);
delta, filters, data_batch_shape, strides, padding_begin, padding_end, dilations);
auto result = make_shared<op::Result>(conv);
auto f = make_shared<Function>(ResultVector{result}, ParameterVector{filters, delta});
......
......@@ -2836,7 +2836,7 @@ TEST(type_prop, conv_bprop_data_v1_output_partial_shape_dynamic)
auto padding_end = CoordinateDiff{0, 0};
auto conv1 = make_shared<op::v1::ConvolutionBackpropData>(
filters, deltas, data_batch_shape, strides, dilations, padding_begin, padding_end);
deltas, filters, data_batch_shape, strides, padding_begin, padding_end, dilations);
ASSERT_TRUE(conv1->get_output_partial_shape(0).is_dynamic());
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment