Commit cca14ae1 authored by baojun's avatar baojun Committed by Jayaram Bobba

Add groupconvolution bprop (#3940)

* add placeholder for conv bprop

* add constructor, api, serializer and can compile

* implement decompose_op

* fix arg num

* fix and update

* address comment, clean up and add ut placeholder

* update ut

* address comment on groups
parent a06b896c
This diff is collapsed.
......@@ -67,7 +67,7 @@ namespace ngraph
const Strides& get_data_dilation_strides() const { return m_data_dilation_strides; }
Output<Node> get_filters() { return input_value(1); }
Output<Node> get_data_batch() { return input_value(0); }
size_t get_groups() const;
size_t get_groups() const { return m_groups; };
const PadType& get_pad_type() const { return m_pad_type; }
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
......@@ -86,13 +86,100 @@ namespace ngraph
CoordinateDiff m_padding_below;
CoordinateDiff m_padding_above;
Strides m_data_dilation_strides;
Dimension m_groups;
size_t m_groups;
PadType m_pad_type{PadType::NOTSET};
private:
bool has_groups_in_filters_shape() const;
};
/// \brief Group Convolution data batch backprop
class NGRAPH_API GroupConvolutionBackpropData : public ngraph::op::util::FusedOp
{
public:
static constexpr NodeTypeInfo type_info{"GroupConvolutionBackpropData", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
GroupConvolutionBackpropData() = default;
GroupConvolutionBackpropData(const Output<Node>& data_batch,
const Output<Node>& filters,
const Output<Node>& output_delta,
const Strides& window_movement_strides,
const Strides& window_dilation_strides,
const CoordinateDiff& padding_below,
const CoordinateDiff& padding_above,
const size_t groups);
const Strides& get_window_movement_strides() const
{
return m_window_movement_strides;
}
const Strides& get_window_dilation_strides() const
{
return m_window_dilation_strides;
}
const CoordinateDiff& get_padding_below() const { return m_padding_below; }
const CoordinateDiff& get_padding_above() const { return m_padding_above; }
size_t get_groups() const { return m_groups; };
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
virtual NodeVector decompose_op() const override;
virtual void pre_validate_and_infer_types() override;
protected:
Strides m_window_movement_strides;
Strides m_window_dilation_strides;
CoordinateDiff m_padding_below;
CoordinateDiff m_padding_above;
size_t m_groups;
};
/// \brief Group Convolution filters backprop
class NGRAPH_API GroupConvolutionBackpropFilters : public ngraph::op::util::FusedOp
{
public:
static constexpr NodeTypeInfo type_info{"GroupConvolutionBackpropFilters", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
GroupConvolutionBackpropFilters() = default;
GroupConvolutionBackpropFilters(const Output<Node>& data_batch,
const Output<Node>& filters,
const Output<Node>& output_delta,
const Strides& window_movement_strides,
const Strides& window_dilation_strides,
const CoordinateDiff& padding_below,
const CoordinateDiff& padding_above,
const size_t groups);
const Strides& get_window_movement_strides() const
{
return m_window_movement_strides;
}
const Strides& get_window_dilation_strides() const
{
return m_window_dilation_strides;
}
const CoordinateDiff& get_padding_below() const { return m_padding_below; }
const CoordinateDiff& get_padding_above() const { return m_padding_above; }
size_t get_groups() const { return m_groups; }
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
virtual NodeVector decompose_op() const override;
virtual void pre_validate_and_infer_types() override;
protected:
Strides m_window_movement_strides;
Strides m_window_dilation_strides;
CoordinateDiff m_padding_below;
CoordinateDiff m_padding_above;
size_t m_groups;
};
}
using v0::GroupConvolution;
using v0::GroupConvolutionBackpropData;
using v0::GroupConvolutionBackpropFilters;
}
}
......@@ -112,6 +112,8 @@ NGRAPH_OP(Greater, ngraph::op::v1, 1)
NGRAPH_OP(GreaterEq, ngraph::op::v0, 0)
NGRAPH_OP(GreaterEqual, ngraph::op::v1, 1)
NGRAPH_OP(GroupConvolution, ngraph::op::v0, 0)
NGRAPH_OP(GroupConvolutionBackpropData, ngraph::op::v0, 0)
NGRAPH_OP(GroupConvolutionBackpropFilters, ngraph::op::v0, 0)
NGRAPH_OP(GroupConvolutionTranspose, ngraph::op::v0, 0)
NGRAPH_OP(HardSigmoid, ngraph::op::v0, 0)
NGRAPH_OP(Interpolate, ngraph::op::v0, 0)
......
......@@ -117,6 +117,8 @@ NGRAPH_OP(GetOutputElement, ngraph::op)
NGRAPH_OP(Greater, ngraph::op)
NGRAPH_OP(GreaterEq, ngraph::op)
NGRAPH_OP(GroupConvolution, ngraph::op)
NGRAPH_OP(GroupConvolutionBackpropData, ngraph::op)
NGRAPH_OP(GroupConvolutionBackpropFilters, ngraph::op)
NGRAPH_OP(GroupConvolutionTranspose, ngraph::op)
NGRAPH_OP(HardSigmoid, ngraph::op)
NGRAPH_OP(Interpolate, ngraph::op)
......
......@@ -1858,6 +1858,8 @@ private:
case OP_TYPEID::Elu:
case OP_TYPEID::FakeQuantize:
case OP_TYPEID::GroupConvolution:
case OP_TYPEID::GroupConvolutionBackpropData:
case OP_TYPEID::GroupConvolutionBackpropFilters:
case OP_TYPEID::GRN:
case OP_TYPEID::GRUCell:
case OP_TYPEID::Gelu:
......
......@@ -1609,6 +1609,46 @@ shared_ptr<Node> JSONDeserializer::deserialize_node(json node_js)
pad_type);
break;
}
case OP_TYPEID::GroupConvolutionBackpropData:
{
auto window_movement_strides =
node_js.at("window_movement_strides").get<vector<size_t>>();
auto window_dilation_strides =
node_js.at("window_dilation_strides").get<vector<size_t>>();
auto padding_below = node_js.at("padding_below").get<vector<std::ptrdiff_t>>();
auto padding_above = node_js.at("padding_above").get<vector<std::ptrdiff_t>>();
auto groups = node_js.at("groups").get<size_t>();
node = make_shared<op::GroupConvolutionBackpropData>(args[0],
args[1],
args[2],
window_movement_strides,
window_dilation_strides,
padding_below,
padding_above,
groups);
break;
}
case OP_TYPEID::GroupConvolutionBackpropFilters:
{
auto window_movement_strides =
node_js.at("window_movement_strides").get<vector<size_t>>();
auto window_dilation_strides =
node_js.at("window_dilation_strides").get<vector<size_t>>();
auto padding_below = node_js.at("padding_below").get<vector<std::ptrdiff_t>>();
auto padding_above = node_js.at("padding_above").get<vector<std::ptrdiff_t>>();
auto groups = node_js.at("groups").get<size_t>();
node = make_shared<op::GroupConvolutionBackpropFilters>(args[0],
args[1],
args[2],
window_movement_strides,
window_dilation_strides,
padding_below,
padding_above,
groups);
break;
}
case OP_TYPEID::GroupConvolutionTranspose:
{
auto strides = node_js.at("strides").get<vector<size_t>>();
......@@ -3538,6 +3578,26 @@ json JSONSerializer::serialize_node(const Node& n)
node["pad_type"] = tmp->get_pad_type();
break;
}
case OP_TYPEID::GroupConvolutionBackpropData:
{
auto tmp = static_cast<const op::GroupConvolutionBackpropData*>(&n);
node["window_movement_strides"] = tmp->get_window_movement_strides();
node["window_dilation_strides"] = tmp->get_window_dilation_strides();
node["padding_below"] = tmp->get_padding_below();
node["padding_above"] = tmp->get_padding_above();
node["groups"] = tmp->get_groups();
break;
}
case OP_TYPEID::GroupConvolutionBackpropFilters:
{
auto tmp = static_cast<const op::GroupConvolutionBackpropFilters*>(&n);
node["window_movement_strides"] = tmp->get_window_movement_strides();
node["window_dilation_strides"] = tmp->get_window_dilation_strides();
node["padding_below"] = tmp->get_padding_below();
node["padding_above"] = tmp->get_padding_above();
node["groups"] = tmp->get_groups();
break;
}
case OP_TYPEID::GroupConvolutionTranspose:
{
auto tmp = static_cast<const op::GroupConvolutionTranspose*>(&n);
......
......@@ -308,6 +308,7 @@ set(MULTI_TEST_SRC
backend/gather.in.cpp
backend/gelu.in.cpp
backend/generate_mask.in.cpp
backend/group_convolution.in.cpp
backend/layer_norm.in.cpp
backend/log.in.cpp
backend/logical_and.in.cpp
......
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "util/all_close.hpp"
#include "util/all_close_f.hpp"
#include "util/known_element_types.hpp"
#include "util/ndarray.hpp"
#include "util/test_control.hpp"
#include "util/test_tools.hpp"
using namespace std;
using namespace ngraph;
static string s_manifest = "${MANIFEST}";
NGRAPH_TEST(${BACKEND_NAME}, dyn_group_convolution_backprop_data)
{
Shape shape_filter{6, 1, 3, 3};
auto filters = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
Shape shape_delta{2, 6, 3, 3};
auto deltas = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
Shape shape_data_batch{2, 3, 5, 5};
auto data_batch = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
auto strides = Strides{1, 1};
auto dilations = Strides{1, 1};
auto padding_begin = CoordinateDiff{0, 0};
auto padding_end = CoordinateDiff{0, 0};
size_t groups = 3;
auto conv_bprop_data = make_shared<op::GroupConvolutionBackpropData>(
data_batch, filters, deltas, strides, dilations, padding_begin, padding_end, groups);
auto f = make_shared<Function>(conv_bprop_data, ParameterVector{data_batch, filters, deltas});
auto backend = runtime::Backend::create("${BACKEND_NAME}", true);
auto handle = backend->compile(f);
auto result = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic());
vector<float> filter, delta, data, expected_result;
for (int i = 0; i < 6 * 1 * 3 * 3; i++)
filter.emplace_back(i);
for (int i = 0; i < 2 * 6 * 3 * 3; i++)
delta.emplace_back(i);
for (int i = 0; i < 2 * 3 * 5 * 5; i++)
data.emplace_back(i);
for (int i = 0; i < 2 * 3 * 5 * 5; i++)
expected_result.emplace_back(i);
auto a = backend->create_tensor(element::f32, shape_data_batch);
copy_data(a, data);
auto b = backend->create_tensor(element::f32, shape_filter);
copy_data(b, filter);
auto c = backend->create_tensor(element::f32, shape_delta);
copy_data(c, delta);
handle->call_with_validate({result}, {a, b, c});
EXPECT_FALSE(test::all_close_f(vector<float>{expected_result}, read_vector<float>(result)));
}
NGRAPH_TEST(${BACKEND_NAME}, dyn_group_convolution_backprop_filters)
{
Shape shape_filter{6, 1, 3, 3};
auto filters = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
Shape shape_delta{2, 6, 3, 3};
auto deltas = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
Shape shape_data_batch{2, 3, 5, 5};
auto data_batch = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
auto strides = Strides{1, 1};
auto dilations = Strides{1, 1};
auto padding_begin = CoordinateDiff{0, 0};
auto padding_end = CoordinateDiff{0, 0};
size_t groups = 3;
auto conv_bprop_filters = make_shared<op::GroupConvolutionBackpropFilters>(
data_batch, filters, deltas, strides, dilations, padding_begin, padding_end, groups);
auto f =
make_shared<Function>(conv_bprop_filters, ParameterVector{data_batch, filters, deltas});
auto backend = runtime::Backend::create("${BACKEND_NAME}", true);
auto handle = backend->compile(f);
auto result = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic());
vector<float> filter, delta, data, expected_result;
for (int i = 0; i < 6 * 1 * 3 * 3; i++)
filter.emplace_back(i);
for (int i = 0; i < 2 * 6 * 3 * 3; i++)
delta.emplace_back(i);
for (int i = 0; i < 2 * 3 * 5 * 5; i++)
data.emplace_back(i);
for (int i = 0; i < 6 * 1 * 3 * 3; i++)
expected_result.emplace_back(i);
auto a = backend->create_tensor(element::f32, shape_data_batch);
copy_data(a, data);
auto b = backend->create_tensor(element::f32, shape_filter);
copy_data(b, filter);
auto c = backend->create_tensor(element::f32, shape_delta);
copy_data(c, delta);
handle->call_with_validate({result}, {a, b, c});
EXPECT_FALSE(test::all_close_f(vector<float>{expected_result}, read_vector<float>(result)));
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment