Commit 599f0f21 authored by Adam Rogowiec's avatar Adam Rogowiec Committed by Scott Cyphers

[ONNX] Use nGraph auto padding in ONNX operators. (#3175)

* Helper function converting ONNX auto_pad into nGraph PadType.

Separate auto_pad support from explicitly provided paddings.

* Add support for more PadType values for GroupConvolutionTranspose.

* Pass auto_pad attribute value to nGraph operator.

* Helper class for generating ONNX pooling operators.

* Pass auto pad type to nGraph Convolution operator.

* Use pooling factory.

* Helper function calculating pads taking into account auto_pad attribute.

* Fix attribute type in UT ONNX models.

* Take auto_pad attribute value into account.

* Rename helper function and update doc.

* Retain old API for GroupConvoloutionTranspose

* Remove PadType::INVALID and use other approach to validate auto_pad
value.

* Style apply.

* Fix clang err on documentation style.

* Refactor get_auto_pad method.

* Fix segfaults on CentOS 7.

When using const reference the node member m_attributes had invalid
data.
parent 7ad4d5c1
......@@ -204,6 +204,8 @@ add_library(onnx_import STATIC
utils/convpool.hpp
utils/matmul_factory.cpp
utils/matmul_factory.hpp
utils/pooling_factory.cpp
utils/pooling_factory.hpp
utils/reduction.cpp
utils/reduction.hpp
utils/reshape.cpp
......
......@@ -16,7 +16,7 @@
#include "ngraph/node.hpp"
#include "ngraph/op/avg_pool.hpp"
#include "utils/convpool.hpp"
#include "utils/pooling_factory.hpp"
namespace ngraph
{
......@@ -28,7 +28,7 @@ namespace ngraph
{
NodeVector average_pool(const Node& node)
{
return convpool::make_ng_pool<ngraph::op::AvgPool>(node);
return pooling::PoolingFactory(node).make_pooling_op<ngraph::op::AvgPool>();
}
} // namespace set_1
......
......@@ -26,6 +26,7 @@
#include "ngraph/op/concat.hpp"
#include "ngraph/op/convolution.hpp"
#include "ngraph/op/slice.hpp"
#include "ngraph/op/util/attr_types.hpp"
#include "ngraph/op/util/broadcasting.hpp"
#include "op/conv.hpp"
......@@ -46,7 +47,8 @@ namespace ngraph
const ngraph::Strides& dilations,
const ngraph::CoordinateDiff& padding_below,
const ngraph::CoordinateDiff& padding_above,
int groups)
int groups,
const ngraph::op::PadType& auto_pad)
{
if (groups > 1)
{
......@@ -85,7 +87,9 @@ namespace ngraph
strides,
dilations,
padding_below,
padding_above));
padding_above,
Strides{},
auto_pad));
}
std::size_t concatenation_axis = 1;
return std::make_shared<ngraph::op::Concat>(convolution_nodes,
......@@ -93,8 +97,14 @@ namespace ngraph
}
else
{
return std::make_shared<ngraph::op::Convolution>(
data, filters, strides, dilations, padding_below, padding_above);
return std::make_shared<ngraph::op::Convolution>(data,
filters,
strides,
dilations,
padding_below,
padding_above,
Strides{},
auto_pad);
}
}
......@@ -126,11 +136,18 @@ namespace ngraph
auto strides = convpool::get_strides(node);
auto dilations = convpool::get_dilations(node);
auto paddings = convpool::get_pads(node);
ngraph::op::PadType auto_pad_type = convpool::get_auto_pad(node);
const auto& padding_below = paddings.first;
const auto& padding_above = paddings.second;
auto conv_node = make_ng_convolution(
data, filters, strides, dilations, padding_below, padding_above, groups);
auto conv_node = make_ng_convolution(data,
filters,
strides,
dilations,
padding_below,
padding_above,
groups,
auto_pad_type);
// no bias param
if (inputs.size() < 3)
......
......@@ -19,6 +19,7 @@
#include "ngraph/frontend/onnx_import/exceptions.hpp"
#include "ngraph/frontend/onnx_import/utils/convpool.hpp"
#include "ngraph/op/quantized_convolution.hpp"
#include "ngraph/op/util/attr_types.hpp"
using namespace ngraph::builder;
......@@ -45,8 +46,17 @@ namespace ngraph
auto window_movement_strides = convpool::get_strides(node);
auto window_dilation_strides = convpool::get_dilations(node);
auto paddings = convpool::get_pads(node);
const auto& padding_below = paddings.first;
const auto& padding_above = paddings.second;
ngraph::op::PadType auto_pad_type = convpool::get_auto_pad(node);
auto& padding_below = paddings.first;
auto& padding_above = paddings.second;
convpool::calculate_auto_pads(input->get_shape(),
filters->get_shape(),
window_movement_strides,
window_dilation_strides,
auto_pad_type,
padding_below,
padding_above);
const Strides default_data_dilation_strides(input->get_shape().size() - 2, 1);
auto scale_one = make_constant(ngraph::element::f32, Shape{}, 1);
auto input_zero_point = make_constant(input->get_element_type(), Shape{}, 0);
......
......@@ -26,6 +26,7 @@
#include "ngraph/frontend/onnx_import/utils/convpool.hpp"
#include "ngraph/op/add.hpp"
#include "ngraph/op/fused/group_conv_transpose.hpp"
#include "ngraph/op/util/attr_types.hpp"
#include "ngraph/op/util/broadcasting.hpp"
#include "ngraph/shape.hpp"
#include "ngraph/strides.hpp"
......@@ -51,6 +52,7 @@ namespace ngraph
auto strides = convpool::get_strides(node);
auto dilations = convpool::get_dilations(node);
auto paddings = convpool::get_pads(node);
ngraph::op::PadType auto_pad_type = convpool::get_auto_pad(node);
CoordinateDiff padding_below = paddings.first;
CoordinateDiff padding_above = paddings.second;
......@@ -100,7 +102,8 @@ namespace ngraph
padding_below,
padding_above,
CoordinateDiff(std::begin(output_padding), std::end(output_padding)),
groups);
groups,
auto_pad_type);
}
// no bias param
......
......@@ -14,10 +14,9 @@
// limitations under the License.
//*****************************************************************************
#include "ngraph/node.hpp"
#include "ngraph/node.hpp"
#include "ngraph/op/avg_pool.hpp"
#include "utils/convpool.hpp"
#include "utils/pooling_factory.hpp"
namespace ngraph
{
......@@ -29,7 +28,8 @@ namespace ngraph
{
NodeVector global_average_pool(const Node& node)
{
return convpool::make_ng_pool<ngraph::op::AvgPool>(node);
return pooling::GlobalPoolingFactory(node)
.make_pooling_op<ngraph::op::AvgPool>();
}
} // namespace set_1
......
......@@ -14,10 +14,9 @@
// limitations under the License.
//*****************************************************************************
#include "ngraph/node.hpp"
#include "ngraph/node.hpp"
#include "ngraph/op/max_pool.hpp"
#include "utils/convpool.hpp"
#include "utils/pooling_factory.hpp"
namespace ngraph
{
......@@ -29,7 +28,8 @@ namespace ngraph
{
NodeVector global_max_pool(const Node& node)
{
return convpool::make_ng_pool<ngraph::op::MaxPool>(node);
return pooling::GlobalPoolingFactory(node)
.make_pooling_op<ngraph::op::MaxPool>();
}
} // namespace set_1
......
......@@ -17,7 +17,7 @@
#include "ngraph/op/max_pool.hpp"
#include "core/null_node.hpp"
#include "ngraph/node.hpp"
#include "utils/convpool.hpp"
#include "utils/pooling_factory.hpp"
namespace ngraph
{
......@@ -29,7 +29,8 @@ namespace ngraph
{
NodeVector max_pool(const Node& node)
{
auto max_pool = convpool::make_ng_pool<ngraph::op::MaxPool>(node);
auto max_pool =
pooling::PoolingFactory(node).make_pooling_op<ngraph::op::MaxPool>();
max_pool.emplace_back(std::make_shared<NullNode>()); // Indices (optional)
return max_pool;
}
......
......@@ -30,6 +30,7 @@
#include "ngraph/op/multiply.hpp"
#include "ngraph/op/quantized_convolution.hpp"
#include "ngraph/op/slice.hpp"
#include "ngraph/op/util/attr_types.hpp"
#include "ngraph/op/util/broadcasting.hpp"
#include "ngraph/strides.hpp"
#include "quant_conv.hpp"
......@@ -224,8 +225,16 @@ namespace ngraph
Strides filter_dilations = convpool::get_dilations(node);
Strides data_dilations = Strides(convpool::get_kernel_shape(node).size(), 1UL);
auto paddings = convpool::get_pads(node);
const CoordinateDiff& padding_below = paddings.first;
const CoordinateDiff& padding_above = paddings.second;
ngraph::op::PadType auto_pad_type = convpool::get_auto_pad(node);
CoordinateDiff& padding_below = paddings.first;
CoordinateDiff& padding_above = paddings.second;
convpool::calculate_auto_pads(data->get_shape(),
filters->get_shape(),
strides,
filter_dilations,
auto_pad_type,
padding_below,
padding_above);
std::shared_ptr<ngraph::Node> conv_node = nullptr;
......
......@@ -15,12 +15,16 @@
//*****************************************************************************
#include <cmath>
#include <string>
#include <unordered_map>
#include "convpool.hpp"
#include "core/attribute.hpp"
#include "core/node.hpp"
#include "exceptions.hpp"
#include "ngraph/coordinate_diff.hpp"
#include "ngraph/shape.hpp"
#include "ngraph/validation_util.hpp"
namespace ngraph
{
......@@ -61,119 +65,79 @@ namespace ngraph
return detail::get_strides_helper(node, "dilations", get_kernel_shape(node));
}
namespace
ngraph::op::PadType get_auto_pad(const Node& node)
{
Shape get_output_data_shape(const Shape& input, const Strides& strides)
// Default value means use explicitly provided padding values.
ngraph::op::PadType pad_type{ngraph::op::PadType::NOTSET};
if (node.has_attribute("auto_pad"))
{
Shape output;
for (std::size_t idx = 0; idx < input.size(); ++idx)
{
output.emplace_back(std::ceil(static_cast<float>(input.at(idx)) /
static_cast<float>(strides.at(idx))));
}
return output;
}
static std::unordered_multimap<std::string, ngraph::op::PadType>
auto_pad_values{
{"VALID", ngraph::op::PadType::VALID},
{"SAME_UPPER", ngraph::op::PadType::SAME_UPPER},
{"SAME_LOWER", ngraph::op::PadType::SAME_LOWER},
{"NOTSET", ngraph::op::PadType::NOTSET},
{"", ngraph::op::PadType::NOTSET},
};
Shape get_pad_shape(const Shape& input,
const Shape& kernel,
const Shape& strides,
const Shape& output)
{
Shape pad_shape;
for (std::size_t idx = 0; idx < input.size(); ++idx)
{
// for `SAME` pading formula is: max((output - 1) * strides[1] + kernel - input, 0)
// Element type of shape is unsigned long.
// During pad computation we can get a value as result value
// During max computation unsigned long(-1) is greater than 0
// so std::max won't work corectly without casting
pad_shape.emplace_back(
std::max(static_cast<long>((output.at(idx) - 1) * strides.at(idx) +
kernel.at(idx) - input.at(idx)),
0L));
}
return pad_shape;
}
CoordinateDiff get_auto_pads(const Shape& input_shape,
const Shape& kernel_shape,
const Strides& strides,
const std::string& auto_pad)
{
if (auto_pad == "VALID")
{
return CoordinateDiff(input_shape.size());
}
CoordinateDiff pads_begin;
CoordinateDiff pads_end;
// Omit {N,C} axes
Shape input_spatial_shape{std::next(std::begin(input_shape), 2),
std::end(input_shape)};
// Assume that all {input_spatial_shape,kernel_shape,strides}.size()
// is the same.
const Shape& output_spatial_shape =
get_output_data_shape(input_spatial_shape, strides);
const Shape& pad_shape = get_pad_shape(
input_spatial_shape, kernel_shape, strides, output_spatial_shape);
if (auto_pad == "SAME_UPPER")
{
for (size_t pad : pad_shape)
{
// Integer division
pads_begin.emplace_back(pad / 2);
pads_end.emplace_back(pad - pads_begin.back());
}
}
else if (auto_pad == "SAME_LOWER")
{
for (size_t pad : pad_shape)
{
// Integer division
pads_end.emplace_back(pad / 2);
pads_begin.emplace_back(pad - pads_end.back());
}
}
CoordinateDiff pads{pads_begin};
pads.insert(std::end(pads), std::begin(pads_end), std::end(pads_end));
return pads;
const std::string& pad_str{node.get_attribute_value<std::string>("auto_pad")};
const auto pad_val_it = auto_pad_values.find(pad_str);
CHECK_VALID_NODE(node,
pad_val_it != auto_pad_values.end(),
"Provided `auto_pad` attribute value: '",
pad_str,
"' is invalid.");
pad_type = pad_val_it->second;
}
} // namespace
return pad_type;
}
std::pair<CoordinateDiff, CoordinateDiff> get_pads(const Node& node,
const Shape& kernel_shape)
{
CoordinateDiff pads;
try
CoordinateDiff pads(kernel_shape.size(), 0);
if (node.has_attribute("pads"))
{
auto pads_int64 = node.get_attribute_value<std::vector<int64_t>>("pads");
pads = CoordinateDiff{std::begin(pads_int64), std::end(pads_int64)};
}
catch (const error::node::UnknownAttribute&)
{
std::string auto_pad{node.get_attribute_value<std::string>("auto_pad", "")};
if (!auto_pad.empty())
{
pads = get_auto_pads(node.get_ng_inputs().at(0)->get_shape(),
kernel_shape,
get_strides(node),
auto_pad);
}
}
if (pads.empty())
if (pads.size() == kernel_shape.size() * 2)
{
pads = CoordinateDiff(static_cast<std::ptrdiff_t>(kernel_shape.size()), 0UL);
return {{std::begin(pads), std::begin(pads) + pads.size() / 2},
{std::begin(pads) + pads.size() / 2, std::end(pads)}};
}
if (pads.size() != kernel_shape.size() * 2)
else
{
// Paddings specified in (H, W, C) format.
// No paddings provided or only one side values provided, which means same
// padding at both begin and end of axis.
return {pads, pads};
}
else
}
void calculate_auto_pads(const Shape& data_shape,
const Shape& filter_shape,
const Strides& strides,
const Strides& dilations,
const ngraph::op::PadType& pad_type,
CoordinateDiff& padding_below,
CoordinateDiff& padding_above)
{
if (pad_type == ngraph::op::PadType::SAME_UPPER ||
pad_type == ngraph::op::PadType::SAME_LOWER)
{
return {{std::begin(pads), std::begin(pads) + pads.size() / 2},
{std::begin(pads) + pads.size() / 2, std::end(pads)}};
padding_below.clear();
padding_above.clear();
// Extract kernel shape - remove (N,C) channels
Shape kernel_shape(std::next(std::begin(filter_shape), 2),
std::end(filter_shape));
ngraph::infer_auto_padding(data_shape,
kernel_shape,
strides,
dilations,
pad_type,
padding_above,
padding_below);
}
}
......
......@@ -22,7 +22,9 @@
#include "core/node.hpp"
#include "ngraph/coordinate_diff.hpp"
#include "ngraph/op/avg_pool.hpp"
#include "ngraph/op/util/attr_types.hpp"
#include "ngraph/shape.hpp"
#include "ngraph/strides.hpp"
namespace ngraph
{
......@@ -57,8 +59,7 @@ namespace ngraph
Strides get_dilations(const Node& node);
/// \brief Get padding values for the operation described by an ONNX node.
/// \details If `auto_pad` attribute is specified as SAME_UPPER or SAME_LOWER, or VALID
/// values are calculated. Otherwise values are taken from the `pads` attribute.
/// \details Values are taken from the `pads` attribute.
///
/// `pads` value should follow [x1_begin, x2_begin..., x1_end, x2_end,...].
///
......@@ -71,8 +72,7 @@ namespace ngraph
const Shape& kernel_shape);
/// \brief Get padding values for the operation described by an ONNX node.
/// \details If `auto_pad` attribute is specified as SAME_UPPER or SAME_LOWER, or VALID
/// values are calculated. Otherwise values are taken from the `pads` attribute.
/// \details Values are taken from the `pads` attribute.
///
/// `pads` value should follow [x1_begin, x2_begin..., x1_end, x2_end,...].
///
......@@ -86,57 +86,33 @@ namespace ngraph
return get_pads(node, get_kernel_shape(node));
}
/// \brief Create an nGraph pooling operation based on an ONNX pooling op.
///
/// \tparam T Class of an nGraph pooling operation (e.g. AveragePool, MaxPool)
/// \param node incoming ONNX opearation
/// \return nGraph node equivalent of the ONNX operation
template <class T>
inline NodeVector make_ng_pool(const Node& node)
{
// Fetch input node for the pooling operation
auto data = node.get_ng_inputs().at(0);
// Parse ONNX op attributes
Shape kernel_shape;
if (node.op_type().find("Global") != std::string::npos)
{
kernel_shape = node.get_ng_inputs()[0]->get_shape();
// Remove N and C dimensions and leave only spatial dims.
kernel_shape.erase(std::begin(kernel_shape),
std::next(std::begin(kernel_shape), 2));
}
else
{
kernel_shape = convpool::get_kernel_shape(node);
}
auto strides = convpool::get_strides(node);
auto dilations = convpool::get_dilations(node);
auto paddings = convpool::get_pads(node);
bool count_include_pad = node.get_attribute_value<int64_t>("count_include_pad", 0);
// Convert padding from CoordinateDiff to Shape objects
const CoordinateDiff& padding_above{paddings.second};
const CoordinateDiff& padding_below{paddings.first};
Shape padding_below_shape{std::begin(padding_below), std::end(padding_below)};
Shape padding_above_shape{std::begin(padding_above), std::end(padding_above)};
if (count_include_pad)
{
return {std::make_shared<ngraph::op::AvgPool>(data,
kernel_shape,
strides,
padding_below_shape,
padding_above_shape,
count_include_pad)};
}
else
{
return {std::make_shared<T>(
data, kernel_shape, strides, padding_below_shape, padding_above_shape)};
}
}
/// \brief Calculate paddings with respect to auto_pad value.
///
/// \param[in] data_shape The input data tensor shape.
/// \param[in] filter_shape The input filters tensor shape.
/// \param[in] strides The data strides.
/// \param[in] dilations The data dilations.
/// \param[in] pad_type The value of auto_pad attribute.
/// \param[in,out] padding_below The paddings below axis.
/// \param[in,out] padding_above The paddings above axis.
///
/// \see ngraph::op::PadType
void calculate_auto_pads(const Shape& data_shape,
const Shape& filter_shape,
const Strides& strides,
const Strides& dilations,
const ngraph::op::PadType& pad_type,
CoordinateDiff& padding_below,
CoordinateDiff& padding_above);
/// \brief Gets the 'auto_pad' attribute value.
///
/// \param[in] node The ONNX node we query for attribute.
///
/// \return The nGraph PadType object representing 'auto_pad' attribute value.
///
ngraph::op::PadType get_auto_pad(const Node& node);
} // namespace convpool
......
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include <iterator>
#include "ngraph/coordinate_diff.hpp"
#include "utils/convpool.hpp"
#include "utils/pooling_factory.hpp"
namespace ngraph
{
namespace onnx_import
{
namespace pooling
{
PoolingFactory::PoolingFactory(const Node& node)
: m_onnx_node{node}
, m_inputs{node.get_ng_inputs()}
, m_kernel_shape{convpool::get_kernel_shape(node)}
, m_strides{convpool::get_strides(node)}
, m_dilations{convpool::get_dilations(node)}
, m_auto_pad{convpool::get_auto_pad(node)}
{
auto paddings = convpool::get_pads(node);
const CoordinateDiff& padding_above{paddings.second};
const CoordinateDiff& padding_below{paddings.first};
m_padding_below = Shape{std::begin(padding_below), std::end(padding_below)};
m_padding_above = Shape{std::begin(padding_above), std::end(padding_above)};
}
template <>
NodeVector PoolingFactory::make_pooling_op<ngraph::op::AvgPool>() const
{
bool count_include_pad =
m_onnx_node.get_attribute_value<std::int64_t>("count_include_pad", 0);
return {std::make_shared<ngraph::op::AvgPool>(m_inputs.at(0),
m_kernel_shape,
m_strides,
m_padding_below,
m_padding_above,
count_include_pad,
m_auto_pad)};
}
GlobalPoolingFactory::GlobalPoolingFactory(const Node& node)
: PoolingFactory(node)
{
// Correct the kernel shape.
const Shape& data_shape{m_inputs.at(0)->get_shape()};
// Set shape to all but {N,C} axes.
m_kernel_shape = Shape{std::next(std::begin(data_shape), 2), std::end(data_shape)};
}
} // namespace pooling
} // namespace onnx_import
} // namespace ngraph
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include <memory>
#include <type_traits>
#include "core/node.hpp"
#include "ngraph/node.hpp"
#include "ngraph/op/avg_pool.hpp"
#include "ngraph/op/op.hpp"
#include "ngraph/op/util/attr_types.hpp"
#include "ngraph/shape.hpp"
#include "ngraph/strides.hpp"
namespace ngraph
{
namespace onnx_import
{
namespace pooling
{
///
/// \brief Factory class which generates sub-graphs for ONNX 'regular' pooling operators.
///
/// \note This factory is intended for creating pooling operations like:
/// - AveragePool
/// - MaxPool
///
/// This base class holds all common attributes like srides, dilations,
/// paddings, kernel shape and auto_pad type.
///
/// \see GlobalPoolingFactory
class PoolingFactory
{
public:
explicit PoolingFactory(const Node& node);
virtual ~PoolingFactory() = default;
///
/// \brief Creates a sub-graph representing appropriate ONNX operation.
///
/// \tparam NgraphOperator nGraph operator class type used to build ONNX operation.
///
/// \return Vector of output nodes.
///
template <typename NgraphOperator>
NodeVector make_pooling_op() const
{
return {std::make_shared<NgraphOperator>(m_inputs.at(0),
m_kernel_shape,
m_strides,
m_padding_below,
m_padding_above,
m_auto_pad)};
}
protected:
Node m_onnx_node;
const NodeVector m_inputs;
Shape m_kernel_shape;
Strides m_strides;
Strides m_dilations;
Shape m_padding_below;
Shape m_padding_above;
ngraph::op::PadType m_auto_pad;
};
// AvgPool accepts some additional parameters thus we have specialization for it.
template <>
NodeVector PoolingFactory::make_pooling_op<ngraph::op::AvgPool>() const;
///
/// \brief Factory class which generates sub-graphs for ONNX 'global' pooling operators.
///
class GlobalPoolingFactory : public PoolingFactory
{
public:
explicit GlobalPoolingFactory(const Node& node);
virtual ~GlobalPoolingFactory() = default;
};
} // namespace pooling
} // namespace onnx_import
} // namespace ngraph
......@@ -128,9 +128,6 @@ void op::GroupConvolutionTranspose::pre_validate_and_infer_types()
NODE_VALIDATION_CHECK(this,
n_data_channels % m_groups == 0,
"Number of data channels not a multiple of group size.");
// padding type
NODE_VALIDATION_CHECK(
this, m_pad_type == PadType::EXPLICIT, "Currently only eplicit pad type is supported.");
if (m_padding_begin.size() == 0)
{
......@@ -192,6 +189,30 @@ void op::GroupConvolutionTranspose::pre_validate_and_infer_types()
}
}
void op::GroupConvolutionTranspose::post_validate_and_infer_types()
{
auto data_shape = get_input_partial_shape(0);
auto filters_shape = get_input_partial_shape(1);
if (data_shape.is_static() && filters_shape.is_static())
{
if (m_pad_type == PadType::SAME_UPPER || m_pad_type == PadType::SAME_LOWER)
{
m_padding_begin.clear();
m_padding_end.clear();
auto filter_shape = filters_shape.to_shape();
// Extract kernel shape
filter_shape.erase(filter_shape.begin(), filter_shape.begin() + 2);
infer_auto_padding(data_shape.to_shape(),
filter_shape,
m_strides,
m_dilations,
m_pad_type,
m_padding_end,
m_padding_begin);
}
}
}
shared_ptr<Node> op::GroupConvolutionTranspose::copy_with_new_args(const NodeVector& new_args) const
{
check_new_args_count(this, new_args);
......
......@@ -65,10 +65,10 @@ namespace ngraph
///
/// \brief Constructs GroupConvolutionTranspose operation.
///
/// \param[in] data The node producing input data.
/// \param[in] filters The node producing filters data.
/// \param[in] groups The number of groups the input channels and output channels
/// are divided into.
/// \param[in] data The node producing input data.
/// \param[in] filters The node producing filters data.
/// \param[in] groups The number of groups the input channels and output channels
/// are divided into.
///
GroupConvolutionTranspose(const std::shared_ptr<Node>& data,
const std::shared_ptr<Node>& filters,
......@@ -110,8 +110,8 @@ namespace ngraph
const Shape& output_shape,
const std::size_t groups = 1UL);
std::shared_ptr<Node> get_filters() { return get_argument(1); }
std::shared_ptr<Node> get_data() { return get_argument(0); }
std::shared_ptr<Node> get_filters() { return get_argument(1); }
const Strides& get_strides() const { return m_strides; }
const Strides& get_dilations() const { return m_dilations; }
const CoordinateDiff& get_padding_begin() const { return m_padding_begin; }
......@@ -121,6 +121,7 @@ namespace ngraph
const PadType& get_pad_type() const { return m_pad_type; }
const Shape& get_output_shape() const { return m_output_shape; }
virtual void pre_validate_and_infer_types() override;
virtual void post_validate_and_infer_types() override;
virtual NodeVector decompose_op() const override;
virtual std::shared_ptr<Node>
......
......@@ -50,7 +50,7 @@ namespace ngraph
SAME_UPPER,
VALID,
AUTO = SAME_UPPER,
NOTSET = EXPLICIT
NOTSET = EXPLICIT,
};
/// \brief Specifies the algorithm to use for implicit broadcasting of a tensor
......
......@@ -8,7 +8,7 @@ graph {
op_type: "Conv"
attribute {
name: "auto_pad"
type: FLOATS
type: STRING
}
attribute {
name: "dilations"
......
......@@ -9,7 +9,7 @@ graph {
op_type: "Conv"
attribute {
name: "auto_pad"
type: FLOATS
type: STRING
}
attribute {
name: "dilations"
......
......@@ -131,26 +131,6 @@ TEST(type_prop, group_conv_transpose_invalid_params)
std::string("data channels"));
}
try
{
const auto gct = make_shared<op::GroupConvolutionTranspose>(data,
weights,
Strides{1, 1},
Strides{1, 1},
CoordinateDiff{2, 2},
CoordinateDiff{2, 2},
CoordinateDiff{0, 0},
4,
op::PadType::SAME_UPPER);
EXPECT_FALSE(gct.get()) << "GroupConvolutionTranspose validation did not work. "
"Node was created with incorrect params.";
}
catch (const NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(error.what(),
std::string("Currently only eplicit pad type is supported."));
}
try
{
const auto gct = make_shared<op::GroupConvolutionTranspose>(data,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment