Commit 8f999289 authored by Tomasz Socha's avatar Tomasz Socha Committed by Scott Cyphers

[SPEC] Adjust ConvolutionBackpropData op. (#3935)

* [SPEC] Adjust ConvolutionBackpropData op.

```
inputs:
  1. filters-------+
  2. output_delta  |  -> 1. data
                   +---> 2. filters
  3. data_batch_shape -> 3. output_shape(+optional)

attributes:
  1. strides          -> 1. strides
  2. dilations-----+
  3. pads_begin    |  -> 2. pads_begin
  4. pads_end      |  -> 3. pads_end
                   +---> 4. dilations
		      -> 5. +auto_pad(optional)[PadType::EXPLICIT]
		      -> 6. +output_padding(optional)[zeros]
```

* Review fix I
parent f3603647
...@@ -138,11 +138,24 @@ void op::v1::Convolution::generate_adjoints(autodiff::Adjoints& adjoints, const ...@@ -138,11 +138,24 @@ void op::v1::Convolution::generate_adjoints(autodiff::Adjoints& adjoints, const
adjoints.add_delta(x, adjoints.add_delta(x,
make_shared<op::v1::ConvolutionBackpropData>( make_shared<op::v1::ConvolutionBackpropData>(
f, delta, x, m_strides, m_dilations, m_pads_begin, m_pads_end)); delta,
f,
op::Constant::create(element::i64, Shape{x_shape.size()}, x_shape),
m_strides,
m_pads_begin,
m_pads_end,
m_dilations,
m_auto_pad));
adjoints.add_delta(f, adjoints.add_delta(f,
make_shared<op::v1::ConvolutionBackpropFilters>( make_shared<op::v1::ConvolutionBackpropFilters>(
x, delta, f, m_strides, m_dilations, m_pads_begin, m_pads_end)); x,
delta,
op::Constant::create(element::i64, Shape{x_shape.size()}, f_shape),
m_strides,
m_dilations,
m_pads_begin,
m_pads_end));
} }
constexpr NodeTypeInfo op::v1::ConvolutionBackpropData::type_info; constexpr NodeTypeInfo op::v1::ConvolutionBackpropData::type_info;
...@@ -151,141 +164,171 @@ shared_ptr<Node> op::v1::Convolution::get_default_value() const ...@@ -151,141 +164,171 @@ shared_ptr<Node> op::v1::Convolution::get_default_value() const
return ngraph::make_constant_from_string("0", get_element_type(), get_shape()); return ngraph::make_constant_from_string("0", get_element_type(), get_shape());
} }
op::v1::ConvolutionBackpropData::ConvolutionBackpropData(const Output<Node>& filters, op::v1::ConvolutionBackpropData::ConvolutionBackpropData(const Output<Node>& data,
const Output<Node>& output_delta, const Output<Node>& filters,
const Output<Node>& data_batch_shape, const Output<Node>& output_shape,
const Strides& strides, const Strides& strides,
const CoordinateDiff& pads_begin,
const CoordinateDiff& pads_end,
const Strides& dilations, const Strides& dilations,
const PadType& auto_pad,
const CoordinateDiff& output_padding)
: Op({data, filters, output_shape})
, m_strides(strides)
, m_dilations(dilations)
, m_pads_begin(pads_begin)
, m_pads_end(pads_end)
, m_auto_pad(auto_pad)
, m_output_padding(output_padding)
{
constructor_validate_and_infer_types();
}
op::v1::ConvolutionBackpropData::ConvolutionBackpropData(const Output<Node>& data,
const Output<Node>& filters,
const Strides& strides,
const CoordinateDiff& pads_begin, const CoordinateDiff& pads_begin,
const CoordinateDiff& pads_end) const CoordinateDiff& pads_end,
: Op({filters, output_delta, data_batch_shape}) const Strides& dilations,
const PadType& auto_pad,
const CoordinateDiff& output_padding)
: Op({data, filters})
, m_strides(strides) , m_strides(strides)
, m_dilations(dilations) , m_dilations(dilations)
, m_pads_begin(pads_begin) , m_pads_begin(pads_begin)
, m_pads_end(pads_end) , m_pads_end(pads_end)
, m_auto_pad(auto_pad)
, m_output_padding(output_padding)
{ {
constructor_validate_and_infer_types(); constructor_validate_and_infer_types();
} }
const Shape op::v1::ConvolutionBackpropData::get_data_batch_shape() const const PartialShape op::v1::ConvolutionBackpropData::get_output_shape() const
{ {
Shape shape; PartialShape shape{PartialShape::dynamic()};
bool is_output_shape_present = get_inputs().size() == 3;
if (is_output_shape_present)
{
if (auto const_op = as_type<op::Constant>(input_value(2).get_node())) if (auto const_op = as_type<op::Constant>(input_value(2).get_node()))
{ {
shape = const_op->get_shape_val(); shape = const_op->get_shape_val();
} }
}
return shape; return shape;
} }
void op::v1::ConvolutionBackpropData::set_data_batch_shape(const Shape& shape) void op::v1::ConvolutionBackpropData::set_output_shape(const Shape& shape)
{ {
this->input(2).replace_source_output( this->input(2).replace_source_output(
op::Constant::create(element::i64, Shape{shape.size()}, shape)->output(0)); op::Constant::create(element::i64, Shape{shape.size()}, shape)->output(0));
} }
void op::v1::ConvolutionBackpropData::validate_and_infer_types() void op::v1::ConvolutionBackpropData::validate_and_infer_types()
{ {
// Backprop to data is itself convolution, with inputs/outputs/attributes transmogrified as auto data_pshape = get_input_partial_shape(0);
// follows. element::Type delta_et = get_input_element_type(0);
// const PartialShape& filters_pshape = get_input_partial_shape(1);
// Forward Backward element::Type filters_et = get_input_element_type(1);
// "N" axis for data batch 0 0
// "C" axis for data batch 1 1
// "Co" axis for filters 0 0
// "Ci" axis for filters 1 1
// "N" axis for output 0 0
// "C" axis for output 1 1
// Data batch x delta
// Data batch shape S_x S_o
// Filters f reverse(f) [on spatial axes]
// Filters shape S_f S_f
// Window movement strides q_x p_x
// Window dilation strides p_f p_f
// Padding below a_x (S_f - 1)p_f - a_x
// Padding above b_x (S_f - 1)p_f +
// + ((a_x + (S_x - 1)p_x + b_x - (S_f - 1)p_f)
// % q_x)
// - b_x
// Output shape S_o S_x
//
// To _validate_, we simply need to check/infer the output shape of the forward convolution,
// then check to make sure that the incoming delta has the same shape as the forward output.
const PartialShape& filters_shape = get_input_partial_shape(0);
element::Type filters_et = get_input_element_type(0);
const PartialShape& delta_shape = get_input_partial_shape(1);
element::Type delta_et = get_input_element_type(1);
element::Type forward_result_et;
PartialShape forward_result_shape;
PartialShape data_batch_shape{PartialShape::dynamic()};
if (input_value(2).get_node_shared_ptr()->is_constant()) bool is_output_shape_present = get_inputs().size() == 3;
{ PartialShape output_pshape = get_output_shape();
data_batch_shape = get_data_batch_shape();
}
element::Type result_et;
NODE_VALIDATION_CHECK( NODE_VALIDATION_CHECK(
this, this,
element::Type::merge(forward_result_et, delta_et, filters_et), element::Type::merge(result_et, delta_et, filters_et),
"Element types for data batch and filters do not match (data batch element type: ", "Element types for data batch and filters do not match (data batch element type: ",
delta_et, delta_et,
", filters element type: ", ", filters element type: ",
filters_et, filters_et,
")."); ").");
if (input_value(2).get_node_shared_ptr()->is_constant()) if (m_auto_pad == PadType::SAME_UPPER || m_auto_pad == PadType::SAME_LOWER)
{ {
forward_result_shape = infer_convolution_forward( NODE_VALIDATION_CHECK(this,
this, is_output_shape_present,
data_batch_shape, "Selected Pad type: ",
Strides(static_cast<size_t>(get_data_batch_shape().size()) - 2, 1), m_auto_pad,
m_pads_begin, "requires an output_shape input which is missing.");
m_pads_end, if (output_pshape.is_static() && filters_pshape.is_static())
filters_shape, {
m_pads_begin.clear();
m_pads_end.clear();
auto filter_shape = filters_pshape.to_shape();
filter_shape.erase(filter_shape.begin(), filter_shape.begin() + 2); // Remove {O,I}
infer_auto_padding(output_pshape.to_shape(),
filter_shape,
m_strides, m_strides,
m_dilations); m_dilations,
m_auto_pad,
m_pads_end,
m_pads_begin);
}
}
NODE_VALIDATION_CHECK(this, PartialShape result_shape;
forward_result_shape.compatible(delta_shape), if (is_output_shape_present)
"Inferred forward output shape (", {
forward_result_shape, set_input_is_relevant_to_shape(2);
") does not match shape of ", if (output_pshape.is_static() && data_pshape.is_static())
"delta (", {
delta_shape, auto data_shape = data_pshape.to_shape();
")."); auto output_shape = output_pshape.to_shape();
output_shape.insert(output_shape.begin(), data_shape.begin(), data_shape.begin() + 1);
output_pshape = output_shape;
}
}
else
{
if (filters_pshape.is_static() && data_pshape.is_static())
{
auto filters_shape = filters_pshape.to_shape();
auto data_shape = data_pshape.to_shape();
Shape output_shape;
auto data_spatial_rank = data_shape.size() - 2;
auto output_padding = get_output_padding();
if (output_padding.size() == 0)
{
output_padding.insert(output_padding.begin(), data_spatial_rank, 0);
}
for (size_t i = 0; i < data_spatial_rank; ++i)
{
size_t tmp = m_strides[i] * (data_shape[i + 2] - 1) +
((filters_shape[i] + 2 - 1) * m_dilations[i] + 1) - m_pads_begin[i] -
m_pads_end[i] + output_padding[i];
output_shape.push_back(tmp);
output_pshape = output_shape;
} }
output_shape.insert(output_shape.begin(), data_shape.begin(), data_shape.begin() + 1);
}
}
set_input_is_relevant_to_shape(0); set_input_is_relevant_to_shape(0);
set_input_is_relevant_to_shape(1); set_input_is_relevant_to_shape(1);
set_input_is_relevant_to_shape(2); set_output_type(0, result_et, output_pshape);
set_output_type(0, forward_result_et, data_batch_shape);
} }
void op::v1::ConvolutionBackpropData::generate_adjoints(autodiff::Adjoints& adjoints, void op::v1::ConvolutionBackpropData::generate_adjoints(autodiff::Adjoints& adjoints,
const NodeVector& deltas) const NodeVector& deltas)
{ {
if (input_value(2).get_node_shared_ptr()->is_constant())
{
}
else
{
throw ngraph_error("Autodiff not supported with dynamic shapes");
}
auto delta = deltas.at(0); auto delta = deltas.at(0);
auto x = input_value(1); auto x = input_value(0);
const auto x_shape = x.get_shape(); const auto x_shape = x.get_shape();
auto f = input_value(0); auto f = input_value(1);
const auto f_shape = f.get_shape(); const auto f_shape = f.get_shape();
auto data_conv = make_shared<op::v1::Convolution>( auto data_conv = make_shared<op::v1::Convolution>(
delta, f, m_strides, m_pads_begin, m_pads_end, m_dilations); delta, f, m_strides, m_pads_begin, m_pads_end, m_dilations, m_auto_pad);
adjoints.add_delta(x, data_conv); adjoints.add_delta(x, data_conv);
Strides strides = m_dilations; Strides strides = m_dilations;
CoordinateDiff pads_begin; CoordinateDiff pads_begin;
CoordinateDiff pads_end; CoordinateDiff pads_end;
const Shape& filters_shape = get_input_shape(0); const Shape& filters_shape = get_input_shape(1);
for (size_t i = 0; i < f_shape.size() - 2; i++) for (size_t i = 0; i < f_shape.size() - 2; i++)
{ {
ptrdiff_t pads_begin_backward = ptrdiff_t pads_begin_backward =
...@@ -294,8 +337,9 @@ void op::v1::ConvolutionBackpropData::generate_adjoints(autodiff::Adjoints& adjo ...@@ -294,8 +337,9 @@ void op::v1::ConvolutionBackpropData::generate_adjoints(autodiff::Adjoints& adjo
ptrdiff_t pads_end_backward = ptrdiff_t pads_end_backward =
(static_cast<ptrdiff_t>(filters_shape[i + 2]) - 1) * m_dilations[i] + (static_cast<ptrdiff_t>(filters_shape[i + 2]) - 1) * m_dilations[i] +
((m_pads_begin[i] + ((get_data_batch_shape()[i + 2]) - 1) * m_strides[i] + ((m_pads_begin[i] +
m_pads_end[i] - (static_cast<ptrdiff_t>(filters_shape[i + 2]) - 1) * m_dilations[i]) % (static_cast<size_t>(get_output_shape()[i + 2]) - 1) * m_strides[i] + m_pads_end[i] -
(static_cast<ptrdiff_t>(filters_shape[i + 2]) - 1) * m_dilations[i]) %
m_strides[i]) - m_strides[i]) -
m_pads_end[i]; m_pads_end[i];
...@@ -321,7 +365,7 @@ void op::v1::ConvolutionBackpropData::generate_adjoints(autodiff::Adjoints& adjo ...@@ -321,7 +365,7 @@ void op::v1::ConvolutionBackpropData::generate_adjoints(autodiff::Adjoints& adjo
x = swap_NC(x); x = swap_NC(x);
shared_ptr<Node> filter_deconv_bprop = make_shared<op::v1::Convolution>( shared_ptr<Node> filter_deconv_bprop = make_shared<op::v1::Convolution>(
x, delta, strides, pads_begin, pads_end, Strides(x.get_shape().size() - 2, 1)); x, delta, strides, pads_begin, pads_end, Strides(x.get_shape().size() - 2, 1), m_auto_pad);
AxisSet axes; AxisSet axes;
for (size_t i = 2; i < filter_deconv_bprop->get_shape().size(); ++i) for (size_t i = 2; i < filter_deconv_bprop->get_shape().size(); ++i)
{ {
...@@ -335,58 +379,29 @@ shared_ptr<Node> ...@@ -335,58 +379,29 @@ shared_ptr<Node>
op::v1::ConvolutionBackpropData::copy_with_new_args(const NodeVector& new_args) const op::v1::ConvolutionBackpropData::copy_with_new_args(const NodeVector& new_args) const
{ {
check_new_args_count(this, new_args); check_new_args_count(this, new_args);
if (new_args.size() == 3)
{
return make_shared<v1::ConvolutionBackpropData>(new_args.at(0), return make_shared<v1::ConvolutionBackpropData>(new_args.at(0),
new_args.at(1), new_args.at(1),
new_args.at(2), new_args.at(2),
m_strides, m_strides,
m_dilations,
m_pads_begin, m_pads_begin,
m_pads_end); m_pads_end,
} m_dilations,
m_auto_pad,
CoordinateDiff op::v1::ConvolutionBackpropData::compute_backward_delta_out_pad_below() const m_output_padding);
{
auto& in_shape = get_data_batch_shape();
auto& filter_dilation = get_dilations();
auto& filter_shape = get_input_shape(0);
auto& in_pad_below = get_pads_begin();
size_t spatial_dim_count = static_cast<size_t>(in_shape.size()) - 2;
CoordinateDiff backward_delta_out_pad_below;
backward_delta_out_pad_below.resize(spatial_dim_count);
for (size_t i = 0; i < spatial_dim_count; i++)
{
backward_delta_out_pad_below[i] =
(static_cast<ptrdiff_t>(filter_shape[i + 2]) - 1) * filter_dilation[i] -
in_pad_below[i];
} }
return backward_delta_out_pad_below; else
}
CoordinateDiff op::v1::ConvolutionBackpropData::compute_backward_delta_out_pad_above() const
{
auto& in_shape = get_data_batch_shape();
auto& filter_dilation = get_dilations();
auto& filter_shape = get_input_shape(0);
auto& in_pad_below = get_pads_begin();
auto& in_pad_above = get_pads_end();
auto& stride = get_strides();
size_t spatial_dim_count = static_cast<size_t>(in_shape.size()) - 2;
CoordinateDiff backward_delta_out_pad_above;
backward_delta_out_pad_above.resize(spatial_dim_count);
for (size_t i = 0; i < spatial_dim_count; i++)
{ {
backward_delta_out_pad_above[i] = return make_shared<v1::ConvolutionBackpropData>(new_args.at(0),
(static_cast<ptrdiff_t>(filter_shape[i + 2]) - 1) * filter_dilation[i] + new_args.at(1),
((in_pad_below[i] + ((in_shape[i + 2]) - 1) + in_pad_above[i] - m_strides,
(static_cast<ptrdiff_t>(filter_shape[i + 2]) - 1) * filter_dilation[i]) % m_pads_begin,
stride[i]) - m_pads_end,
in_pad_above[i]; m_dilations,
m_auto_pad,
m_output_padding);
} }
return backward_delta_out_pad_above;
} }
constexpr NodeTypeInfo op::v1::ConvolutionBackpropFilters::type_info; constexpr NodeTypeInfo op::v1::ConvolutionBackpropFilters::type_info;
......
...@@ -103,22 +103,49 @@ namespace ngraph ...@@ -103,22 +103,49 @@ namespace ngraph
const NodeTypeInfo& get_type_info() const override { return type_info; } const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs a batched-convolution data batch-backprop operation. /// \brief Constructs a batched-convolution data batch-backprop operation.
ConvolutionBackpropData() = default; ConvolutionBackpropData() = default;
// clang-format off
/// \brief Constructs a batched-convolution data batch-backprop operation. /// \brief Constructs a batched-convolution data batch-backprop operation.
/// ///
/// \param data_batch_shape The shape of the data batch from forward-prop. /// \param data The node producing data from forward-prop.
/// \param filters The node producing the filters from forward-prop. /// \param filters The node producing the filters from forward-prop.
/// \param output_delta The node producing output delta. /// \param output_shape The shape of the data batch from forward-prop.
/// \param strides The strides from forward-prop. /// \param strides The strides from forward-prop.
/// \param dilations The dilations from forward-prop.
/// \param pads_begin The padding-below sizes from forward-prop. /// \param pads_begin The padding-below sizes from forward-prop.
/// \param pads_end The padding-above sizes from forward-prop. /// \param pads_end The padding-above sizes from forward-prop.
ConvolutionBackpropData(const Output<Node>& filters, /// \param dilations The dilations from forward-prop.
const Output<Node>& output_delta, /// \param auto_pad The pad type for automatically computing padding sizes.
const Output<Node>& data_batch_shape, /// \param output_padding The output padding adds additional amount of paddings per each spatial axis in the output tensor.
// clang-format on
ConvolutionBackpropData(const Output<Node>& data,
const Output<Node>& filters,
const Output<Node>& output_shape,
const Strides& strides, const Strides& strides,
const CoordinateDiff& pads_begin,
const CoordinateDiff& pads_end,
const Strides& dilations, const Strides& dilations,
const PadType& auto_pad = PadType::EXPLICIT,
const CoordinateDiff& output_padding = {});
// clang-format off
/// \brief Constructs a batched-convolution data batch-backprop operation.
///
/// \param data The node producing data from forward-prop.
/// \param filters The node producing the filters from forward-prop.
/// \param strides The strides from forward-prop.
/// \param pads_begin The padding-below sizes from forward-prop.
/// \param pads_end The padding-above sizes from forward-prop.
/// \param dilations The dilations from forward-prop.
/// \param auto_pad The pad type for automatically computing padding sizes.
/// \param output_padding The output padding adds additional amount of paddings per each spatial axis in the output tensor.
// clang-format on
ConvolutionBackpropData(const Output<Node>& data,
const Output<Node>& filters,
const Strides& strides,
const CoordinateDiff& pads_begin, const CoordinateDiff& pads_begin,
const CoordinateDiff& pads_end); const CoordinateDiff& pads_end,
const Strides& dilations,
const PadType& auto_pad = PadType::EXPLICIT,
const CoordinateDiff& output_padding = {});
void validate_and_infer_types() override; void validate_and_infer_types() override;
...@@ -128,8 +155,8 @@ namespace ngraph ...@@ -128,8 +155,8 @@ namespace ngraph
copy_with_new_args(const NodeVector& new_args) const override; copy_with_new_args(const NodeVector& new_args) const override;
/// \return The data batch shape. /// \return The data batch shape.
const Shape get_data_batch_shape() const; const PartialShape get_output_shape() const;
void set_data_batch_shape(const Shape& data_batch_shape); void set_output_shape(const Shape& output_shape);
/// \return The strides from the forward prop. /// \return The strides from the forward prop.
const Strides& get_strides() const { return m_strides; } const Strides& get_strides() const { return m_strides; }
void set_strides(const Strides& strides) { m_strides = strides; } void set_strides(const Strides& strides) { m_strides = strides; }
...@@ -142,15 +169,23 @@ namespace ngraph ...@@ -142,15 +169,23 @@ namespace ngraph
/// \return The padding-above sizes (possibly negative) from the forward prop. /// \return The padding-above sizes (possibly negative) from the forward prop.
const CoordinateDiff& get_pads_end() const { return m_pads_end; } const CoordinateDiff& get_pads_end() const { return m_pads_end; }
void set_pads_end(const CoordinateDiff& pads_end) { m_pads_end = pads_end; } void set_pads_end(const CoordinateDiff& pads_end) { m_pads_end = pads_end; }
// Compute the pad_above values to be used if in a convolution /// \return The auto pad.
CoordinateDiff compute_backward_delta_out_pad_above() const; const PadType& get_auto_pad() const { return m_auto_pad; }
CoordinateDiff compute_backward_delta_out_pad_below() const; void set_auto_pad(const PadType& auto_pad) { m_auto_pad = auto_pad; }
/// \return The output padding.
const CoordinateDiff& get_output_padding() const { return m_output_padding; }
void set_output_padding(const CoordinateDiff& output_padding)
{
m_output_padding = output_padding;
}
protected: protected:
Strides m_strides; Strides m_strides;
Strides m_dilations; Strides m_dilations;
CoordinateDiff m_pads_begin; CoordinateDiff m_pads_begin;
CoordinateDiff m_pads_end; CoordinateDiff m_pads_end;
PadType m_auto_pad;
CoordinateDiff m_output_padding;
}; };
/// \brief Filters backprop for batched convolution operation. /// \brief Filters backprop for batched convolution operation.
...@@ -398,7 +433,7 @@ namespace ngraph ...@@ -398,7 +433,7 @@ namespace ngraph
/// ///
/// \param data_batch_shape The shape of the data batch from forward-prop. /// \param data_batch_shape The shape of the data batch from forward-prop.
/// \param filters The node producing the filters from forward-prop. /// \param filters The node producing the filters from forward-prop.
/// \param output_delta The node producing output delta. /// \param data The node producing output delta.
/// \param window_movement_strides_forward The window movement strides from /// \param window_movement_strides_forward The window movement strides from
/// forward-prop. /// forward-prop.
/// \param window_dilation_strides_forward The window dilation strides from /// \param window_dilation_strides_forward The window dilation strides from
...@@ -409,7 +444,7 @@ namespace ngraph ...@@ -409,7 +444,7 @@ namespace ngraph
/// forward-prop. /// forward-prop.
ConvolutionBackpropData(const Shape& data_batch_shape, ConvolutionBackpropData(const Shape& data_batch_shape,
const Output<Node>& filters, const Output<Node>& filters,
const Output<Node>& output_delta, const Output<Node>& data,
const Strides& window_movement_strides_forward, const Strides& window_movement_strides_forward,
const Strides& window_dilation_strides_forward, const Strides& window_dilation_strides_forward,
const CoordinateDiff& padding_below_forward, const CoordinateDiff& padding_below_forward,
......
...@@ -137,22 +137,34 @@ namespace ...@@ -137,22 +137,34 @@ namespace
bool op_cast(shared_ptr<op::v1::ConvolutionBackpropData> node) bool op_cast(shared_ptr<op::v1::ConvolutionBackpropData> node)
{ {
NGRAPH_CHECK(node->input_value(2).get_node_shared_ptr()->is_constant()); auto output_shape = as_type_ptr<op::Constant>(node->input_value(2).get_node_shared_ptr());
auto data_batch_shape = const auto data_arg = node->input(0).get_source_output();
static_pointer_cast<op::Constant>(node->input_value(2).get_node_shared_ptr()) const auto filters_arg = node->input(1).get_source_output();
->get_shape_val();
const auto filters_arg = node->input_value(0);
const auto delta_arg = node->input_value(1);
const PartialShape& delta_arg_pshape = node->get_input_partial_shape(1); const PartialShape& delta_arg_pshape = node->get_input_partial_shape(1);
NGRAPH_CHECK(delta_arg_pshape.rank().is_static(), NGRAPH_CHECK(delta_arg_pshape.rank().is_static(),
"Unable to convert ConvolutionBackpropData:v1 to ConvolutionBackpropData:v0 " "Unable to convert ConvolutionBackpropData:v1 to ConvolutionBackpropData:v0 "
"if delta argument rank is dynamic. Node: ", "if delta argument rank is dynamic. Node: ",
*node); *node);
NGRAPH_CHECK(output_shape,
"Unable to convert ConvolutionBackpropData:v1 to ConvolutionBackpropData:v0 "
"if output_shape is not constant. Node: ",
*node);
const size_t num_spatial_dims = static_cast<size_t>(delta_arg_pshape.rank()) - 2; const size_t num_spatial_dims = static_cast<size_t>(delta_arg_pshape.rank()) - 2;
auto output_padding = node->get_output_padding();
bool is_op_valid = all_of(
output_padding.begin(), output_padding.end(), [](size_t value) { return value == 0; });
NGRAPH_CHECK(is_op_valid,
"Unable to convert ConvolutionBackpropData:v1 to ConvolutionBackpropData:v0 "
"with output padding other than `0`. Node: ",
*node);
auto replacement_node = auto replacement_node =
make_shared<op::v0::ConvolutionBackpropData>(data_batch_shape, make_shared<op::v0::ConvolutionBackpropData>(output_shape->get_shape_val(),
filters_arg, filters_arg,
delta_arg, data_arg,
node->get_strides(), node->get_strides(),
node->get_dilations(), node->get_dilations(),
node->get_pads_begin(), node->get_pads_begin(),
......
...@@ -179,13 +179,14 @@ namespace ...@@ -179,13 +179,14 @@ namespace
"other than `1`. Node: ", "other than `1`. Node: ",
*node); *node);
auto replacement_node = make_shared<op::v1::ConvolutionBackpropData>(node->input_value(0), auto replacement_node = make_shared<op::v1::ConvolutionBackpropData>(
node->input_value(1), node->input_value(1), // data
node->input_value(2), node->input_value(0), // filters
op::Constant::create(element::i64, Shape{data_batch_shape.size()}, data_batch_shape),
strides, strides,
dilations,
pads_begin, pads_begin,
pads_end); pads_end,
dilations);
replace_node(node, replacement_node); replace_node(node, replacement_node);
return true; return true;
} }
...@@ -199,11 +200,9 @@ namespace ...@@ -199,11 +200,9 @@ namespace
auto pads_end = node->get_padding_above_forward(); auto pads_end = node->get_padding_above_forward();
auto data_dilation_strides = node->get_data_dilation_strides_forward(); auto data_dilation_strides = node->get_data_dilation_strides_forward();
bool is_dds_valid = true; bool is_dds_valid = all_of(data_dilation_strides.begin(),
for (auto value : data_dilation_strides) data_dilation_strides.end(),
{ [](size_t value) { return value == 1; });
is_dds_valid = is_dds_valid && (value == 1);
}
NGRAPH_CHECK( NGRAPH_CHECK(
is_dds_valid, is_dds_valid,
......
...@@ -1147,9 +1147,30 @@ shared_ptr<Node> JSONDeserializer::deserialize_node(json node_js) ...@@ -1147,9 +1147,30 @@ shared_ptr<Node> JSONDeserializer::deserialize_node(json node_js)
auto dilations = node_js.at("dilations").get<vector<size_t>>(); auto dilations = node_js.at("dilations").get<vector<size_t>>();
auto pads_begin = node_js.at("pads_begin").get<vector<std::ptrdiff_t>>(); auto pads_begin = node_js.at("pads_begin").get<vector<std::ptrdiff_t>>();
auto pads_end = node_js.at("pads_end").get<vector<std::ptrdiff_t>>(); auto pads_end = node_js.at("pads_end").get<vector<std::ptrdiff_t>>();
node = make_shared<op::v1::ConvolutionBackpropData>( auto output_padding = node_js.at("output_padding").get<vector<std::ptrdiff_t>>();
args[0], args[1], args[2], strides, dilations, pads_begin, pads_end); if (args.size() == 3)
{
node = make_shared<op::v1::ConvolutionBackpropData>(args[0],
args[1],
args[2],
strides,
pads_begin,
pads_end,
dilations,
read_pad_type(node_js),
output_padding);
}
else
{
node = make_shared<op::v1::ConvolutionBackpropData>(args[0],
args[1],
strides,
pads_begin,
pads_end,
dilations,
read_pad_type(node_js),
output_padding);
}
break; break;
} }
case OP_TYPEID::ConvolutionBackpropFilters: case OP_TYPEID::ConvolutionBackpropFilters:
...@@ -3149,11 +3170,12 @@ json JSONSerializer::serialize_node(const Node& n) ...@@ -3149,11 +3170,12 @@ json JSONSerializer::serialize_node(const Node& n)
case OP_TYPEID::ConvolutionBackpropData_v1: case OP_TYPEID::ConvolutionBackpropData_v1:
{ {
auto tmp = static_cast<const op::v1::ConvolutionBackpropData*>(&n); auto tmp = static_cast<const op::v1::ConvolutionBackpropData*>(&n);
node["data_batch_shape"] = tmp->get_data_batch_shape();
node["strides"] = tmp->get_strides(); node["strides"] = tmp->get_strides();
node["dilations"] = tmp->get_dilations(); node["dilations"] = tmp->get_dilations();
node["pads_begin"] = tmp->get_pads_begin(); node["pads_begin"] = tmp->get_pads_begin();
node["pads_end"] = tmp->get_pads_end(); node["pads_end"] = tmp->get_pads_end();
node["auto_pad"] = tmp->get_auto_pad();
node["output_padding"] = tmp->get_output_padding();
break; break;
} }
case OP_TYPEID::ConvolutionBackpropFilters: case OP_TYPEID::ConvolutionBackpropFilters:
......
...@@ -154,9 +154,9 @@ NGRAPH_TEST(${BACKEND_NAME}, dyn_convolution_backprop_data) ...@@ -154,9 +154,9 @@ NGRAPH_TEST(${BACKEND_NAME}, dyn_convolution_backprop_data)
auto padding_end = CoordinateDiff{0, 0}; auto padding_end = CoordinateDiff{0, 0};
auto conv1 = make_shared<op::v1::ConvolutionBackpropData>( auto conv1 = make_shared<op::v1::ConvolutionBackpropData>(
filters, deltas, data_batch_shape, strides, dilations, padding_begin, padding_end); deltas, filters, data_batch_shape, strides, padding_begin, padding_end, dilations);
auto f = make_shared<Function>(conv1, ParameterVector{filters, deltas, data_batch_shape}); auto f = make_shared<Function>(conv1, ParameterVector{deltas, filters, data_batch_shape});
auto backend = runtime::Backend::create("${BACKEND_NAME}", true); auto backend = runtime::Backend::create("${BACKEND_NAME}", true);
...@@ -178,10 +178,10 @@ NGRAPH_TEST(${BACKEND_NAME}, dyn_convolution_backprop_data) ...@@ -178,10 +178,10 @@ NGRAPH_TEST(${BACKEND_NAME}, dyn_convolution_backprop_data)
vector<int64_t> shapes = {2, 3, 5, 5}; vector<int64_t> shapes = {2, 3, 5, 5};
// Create some tensors for input/output // Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_filter); auto a = backend->create_tensor(element::f32, shape_delta);
copy_data(a, filter); copy_data(a, delta);
auto b = backend->create_tensor(element::f32, shape_delta); auto b = backend->create_tensor(element::f32, shape_filter);
copy_data(b, delta); copy_data(b, filter);
auto c = backend->create_tensor(element::i64, Shape{shapes.size()}); // dynamic data batch shape auto c = backend->create_tensor(element::i64, Shape{shapes.size()}); // dynamic data batch shape
copy_data(c, shapes); copy_data(c, shapes);
handle->call_with_validate({result}, {a, b, c}); handle->call_with_validate({result}, {a, b, c});
......
...@@ -87,7 +87,7 @@ TEST(opset_transform, opset1_convolution_backprop_data_downgrade_pass) ...@@ -87,7 +87,7 @@ TEST(opset_transform, opset1_convolution_backprop_data_downgrade_pass)
auto padding_end = CoordinateDiff{3}; auto padding_end = CoordinateDiff{3};
auto conv = make_shared<op::v1::ConvolutionBackpropData>( auto conv = make_shared<op::v1::ConvolutionBackpropData>(
filters, delta, data_batch_shape, strides, dilations, padding_begin, padding_end); delta, filters, data_batch_shape, strides, padding_begin, padding_end, dilations);
auto result = make_shared<op::Result>(conv); auto result = make_shared<op::Result>(conv);
auto f = make_shared<Function>(ResultVector{result}, ParameterVector{filters, delta}); auto f = make_shared<Function>(ResultVector{result}, ParameterVector{filters, delta});
......
...@@ -2836,7 +2836,7 @@ TEST(type_prop, conv_bprop_data_v1_output_partial_shape_dynamic) ...@@ -2836,7 +2836,7 @@ TEST(type_prop, conv_bprop_data_v1_output_partial_shape_dynamic)
auto padding_end = CoordinateDiff{0, 0}; auto padding_end = CoordinateDiff{0, 0};
auto conv1 = make_shared<op::v1::ConvolutionBackpropData>( auto conv1 = make_shared<op::v1::ConvolutionBackpropData>(
filters, deltas, data_batch_shape, strides, dilations, padding_begin, padding_end); deltas, filters, data_batch_shape, strides, padding_begin, padding_end, dilations);
ASSERT_TRUE(conv1->get_output_partial_shape(0).is_dynamic()); ASSERT_TRUE(conv1->get_output_partial_shape(0).is_dynamic());
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment