Commit 4fb4be5e authored by Adam Rogowiec's avatar Adam Rogowiec Committed by Scott Cyphers

[Fused Op] GRN (#2905)

* Extend lp-norm functions to take bias.

* Move lp-norm utilities to nGraph core op/util.

* Move norm files to builder directory.

* Apply clang-format.

* Sceleton for GRN operation.

* Add GRN implementation.

* Fix reshape utility function.

* Address review comments.

* Add using namespace std.

* Review comments.

* Few fixes in grn implementation.

* Clang format.

* Basic UT.

* Fix expected data.

* Add more UT and skip them on IGPU.

* Review comments: const correctness and remove using namespace std statement.

* Unblock GRN on IGPU.

* Get back GRN op case to switch.

* merge error
parent bf869655
......@@ -296,6 +296,8 @@ set (SRC
op/fused/elu.hpp
op/fused/gemm.cpp
op/fused/gemm.hpp
op/fused/grn.cpp
op/fused/grn.hpp
op/fused/group_conv.hpp
op/fused/group_conv.cpp
op/fused/mvn.cpp
......
......@@ -100,6 +100,7 @@
#include "ngraph/op/fused/depth_to_space.hpp"
#include "ngraph/op/fused/elu.hpp"
#include "ngraph/op/fused/gemm.hpp"
#include "ngraph/op/fused/grn.hpp"
#include "ngraph/op/fused/group_conv.hpp"
#include "ngraph/op/fused/hard_sigmoid.hpp"
#include "ngraph/op/fused/mvn.hpp"
......
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include <algorithm>
#include <iterator>
#include "grn.hpp"
#include "ngraph/axis_set.hpp"
#include "ngraph/builder/norm.hpp"
#include "ngraph/op/broadcast.hpp"
#include "ngraph/op/divide.hpp"
#include "ngraph/op/util/reshape.hpp"
#include "ngraph/shape.hpp"
using namespace std;
using namespace ngraph;
op::GRN::GRN(const shared_ptr<Node>& data, float bias)
: FusedOp("GRN", {data})
, m_bias(bias)
{
constructor_validate_and_infer_types();
}
void op::GRN::pre_validate_and_infer_types()
{
const auto& data_pshape = get_input_partial_shape(0);
if (data_pshape.is_static())
{
const Shape& data_shape{data_pshape.to_shape()};
// Input data must be 2, 3 or 4D tensor.
NODE_VALIDATION_CHECK(this,
(data_shape.size() >= 2 && data_shape.size() <= 4),
"Input tensor rank must be 2, 3 or 4 dimensional (actual input "
"shape: ",
data_shape,
").");
}
}
NodeVector op::GRN::decompose_op() const
{
shared_ptr<Node> data{get_argument(0)};
const Shape& input_shape{data->get_shape()};
// Reshape to 4D tensor.
if (input_shape.size() != 4)
{
Shape data_shape(4 - input_shape.size(), 1);
copy(begin(input_shape), end(input_shape), back_inserter(data_shape));
data = util::reshape(data, data_shape);
}
// Calculate l2 norm across channels.
shared_ptr<Node> norm = builder::l2_norm(data, AxisSet{1}, m_bias);
// Get back reduced axis.
norm = std::make_shared<Broadcast>(norm, data->get_shape(), AxisSet{1});
data = data / norm;
// get back original input tensor rank
if (input_shape.size() != 4)
{
data = util::reshape(data, input_shape);
}
return {data};
}
shared_ptr<Node> op::GRN::copy_with_new_args(const NodeVector& new_args) const
{
if (new_args.size() != 1)
{
throw ngraph_error("Incorrect number of new arguments");
}
return make_shared<GRN>(new_args.at(0), m_bias);
}
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include <memory>
#include "ngraph/node.hpp"
#include "ngraph/op/util/fused_op.hpp"
namespace ngraph
{
namespace op
{
/// \brief Global Response Normalization with L2 norm (across channels only).
///
class GRN : public ngraph::op::util::FusedOp
{
public:
/// \brief Constructs a GRN operation.
///
/// \param data - Node producing the input tensor
/// \param bias - The bias added to the variance.
///
GRN(const std::shared_ptr<ngraph::Node>& data, float bias);
float get_bias() const { return m_bias; }
virtual void pre_validate_and_infer_types() override;
virtual NodeVector decompose_op() const override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
protected:
float m_bias = 1.0f;
};
}
}
......@@ -23,6 +23,7 @@ NGRAPH_OP(ConvolutionBiasAdd, ngraph::op)
NGRAPH_OP(ConvolutionBiasBackpropFiltersBias, ngraph::op)
NGRAPH_OP(DepthToSpace, ngraph::op)
NGRAPH_OP(Elu, ngraph::op)
NGRAPH_OP(GRN, ngraph::op)
NGRAPH_OP(Gemm, ngraph::op)
NGRAPH_OP(GroupConvolution, ngraph::op)
NGRAPH_OP(HardSigmoid, ngraph::op)
......
......@@ -82,6 +82,7 @@
#include "ngraph/op/fused/depth_to_space.hpp"
#include "ngraph/op/fused/elu.hpp"
#include "ngraph/op/fused/gemm.hpp"
#include "ngraph/op/fused/grn.hpp"
#include "ngraph/op/fused/group_conv.hpp"
#include "ngraph/op/fused/hard_sigmoid.hpp"
#include "ngraph/op/fused/mvn.hpp"
......@@ -2052,6 +2053,7 @@ shared_ptr<runtime::Executable>
case OP_TYPEID::Gather:
case OP_TYPEID::GatherND:
case OP_TYPEID::GenerateMask:
case OP_TYPEID::GRN:
case OP_TYPEID::HardSigmoid:
case OP_TYPEID::MVN:
case OP_TYPEID::Normalize:
......@@ -2163,6 +2165,7 @@ bool runtime::intelgpu::IntelGPUBackend::is_supported_impl(const Node& node)
case OP_TYPEID::DepthToSpace:
case OP_TYPEID::Elu:
case OP_TYPEID::Gemm:
case OP_TYPEID::GRN:
case OP_TYPEID::MVN:
case OP_TYPEID::Normalize:
case OP_TYPEID::PRelu:
......
......@@ -71,6 +71,7 @@
#include "ngraph/op/fused/depth_to_space.hpp"
#include "ngraph/op/fused/elu.hpp"
#include "ngraph/op/fused/gemm.hpp"
#include "ngraph/op/fused/grn.hpp"
#include "ngraph/op/fused/group_conv.hpp"
#include "ngraph/op/fused/hard_sigmoid.hpp"
#include "ngraph/op/fused/mvn.hpp"
......@@ -985,6 +986,12 @@ static shared_ptr<ngraph::Function>
node = make_shared<op::GreaterEq>(args[0], args[1]);
break;
}
case OP_TYPEID::GRN:
{
auto bias = node_js.at("bias").get<float>();
node = make_shared<op::GRN>(args[0], bias);
break;
}
case OP_TYPEID::HardSigmoid:
{
auto alpha = node_js.at("alpha").get<float>();
......@@ -1906,6 +1913,12 @@ static json write(const Node& n, bool binary_constant_data)
}
case OP_TYPEID::GreaterEq: { break;
}
case OP_TYPEID::GRN:
{
auto tmp = dynamic_cast<const op::GRN*>(&n);
node["bias"] = tmp->get_bias();
break;
}
case OP_TYPEID::HardSigmoid:
{
auto tmp = dynamic_cast<const op::HardSigmoid*>(&n);
......
......@@ -717,6 +717,62 @@ NGRAPH_TEST(${BACKEND_NAME}, mvn_mean_variance_normalization_split_channels)
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, grn_4d)
{
const Shape data_shape{1, 2, 3, 4};
const auto data = make_shared<op::Parameter>(element::f32, data_shape);
float bias{1e-6f};
const auto grn = make_shared<op::GRN>(data, bias);
const auto function = make_shared<Function>(NodeVector{grn}, ParameterVector{data});
auto test_case = ngraph::test::NgraphTestCase(function, "${BACKEND_NAME}");
vector<float> input_data(shape_size(data_shape));
iota(begin(input_data), end(input_data), 1);
test_case.add_input<float>(input_data);
test_case.add_expected_output<float>(
data_shape, {0.0766965f, 0.14142136f, 0.19611613f, 0.24253564f, 0.28216633f, 0.31622776f,
0.34570536f, 0.37139067f, 0.39391932f, 0.41380295f, 0.4314555f, 0.4472136f,
0.9970545f, 0.98994946f, 0.9805807f, 0.97014254f, 0.9593655f, 0.9486833f,
0.9383431f, 0.9284767f, 0.91914505f, 0.9103665f, 0.9021342f, 0.8944272f});
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, grn_2d_with_bias)
{
const Shape data_shape{3, 4};
const auto data = make_shared<op::Parameter>(element::f32, data_shape);
float bias{2.25f};
const auto grn = make_shared<op::GRN>(data, bias);
const auto function = make_shared<Function>(NodeVector{grn}, ParameterVector{data});
auto test_case = ngraph::test::NgraphTestCase(function, "${BACKEND_NAME}");
vector<float> input_data(shape_size(data_shape));
iota(begin(input_data), end(input_data), 1);
test_case.add_input<float>(input_data);
test_case.add_expected_output<float>(data_shape,
{0.5547002f,
0.8f,
0.8944272f,
0.9363292f,
0.95782626f,
0.9701425f,
0.9778024f,
0.98287225f,
0.9863939f,
0.9889363f,
0.9908301f,
0.99227786f});
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, scale_shift_no_broadcast)
{
auto data = make_shared<op::Parameter>(element::f64, Shape{3, 6});
......
......@@ -14415,6 +14415,57 @@ TEST(type_prop, gemm_broadcast_input_C)
EXPECT_EQ(gemm_func->get_shape(), (Shape{3, 4}));
}
TEST(type_prop, grn)
{
float bias = 1.25f;
Shape data_shape{2, 3, 4, 5};
auto A = make_shared<op::Parameter>(element::f32, data_shape);
auto grn = make_shared<op::GRN>(A, bias);
ASSERT_EQ(grn->get_element_type(), element::f32);
ASSERT_EQ(grn->get_shape(), data_shape);
}
TEST(type_prop, grn_invalid_data_rank)
{
float bias = 1.25f;
auto A = make_shared<op::Parameter>(element::f32, Shape{4});
try
{
auto grn = make_shared<op::GRN>(A, bias);
// Should have thrown, so fail if it didn't
FAIL() << "Invalid input tensor rank.";
}
catch (const NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(error.what(),
std::string("Input tensor rank must be 2, 3 or 4 dimensional"));
}
catch (...)
{
FAIL() << "Deduced type check failed for unexpected reason";
}
A = make_shared<op::Parameter>(element::f32, Shape{1, 2, 3, 4, 5});
try
{
auto grn = make_shared<op::GRN>(A, bias);
// Should have thrown, so fail if it didn't
FAIL() << "Invalid input tensor rank.";
}
catch (const NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(error.what(),
std::string("Input tensor rank must be 2, 3 or 4 dimensional"));
}
catch (...)
{
FAIL() << "Deduced type check failed for unexpected reason";
}
}
TEST(type_prop, mvn)
{
auto data = make_shared<op::Parameter>(element::f32, Shape{1, 3, 6});
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment