Commit fb0ae59c authored by Adam Rogowiec's avatar Adam Rogowiec Committed by Robert Kimball

Add HardSigmoid to fused ops. (#2824)

* Move HardSigmoid to nGraph fused operators.

* UT for HardSigmoid fused operator.

* Add type_prop UT.

* Reorder operations in implementation.

* Fix unit tests.

* Fix typo.

* Apply style-check.

* Switch to single-precision parameters.

* Disable unit test for IGPU.
parent 86a99474
......@@ -276,6 +276,8 @@ set (SRC
op/topk.hpp
op/fused/conv_fused.cpp
op/fused/conv_fused.hpp
op/fused/hard_sigmoid.cpp
op/fused/hard_sigmoid.hpp
op/fused/depth_to_space.cpp
op/fused/depth_to_space.hpp
op/fused/elu.cpp
......
......@@ -16,15 +16,8 @@
#include <memory>
#include "core/node.hpp"
#include "hard_sigmoid.hpp"
#include "ngraph/node.hpp"
#include "ngraph/op/add.hpp"
#include "ngraph/op/constant.hpp"
#include "ngraph/op/maximum.hpp"
#include "ngraph/op/minimum.hpp"
#include "ngraph/op/multiply.hpp"
#include "ngraph/op/util/broadcasting.hpp"
#include "ngraph/op/fused/hard_sigmoid.hpp"
using namespace ngraph::op;
......@@ -43,29 +36,7 @@ namespace ngraph
double alpha = node.get_attribute_value<double>("alpha", 0.2);
double beta = node.get_attribute_value<double>("beta", 0.5);
std::shared_ptr<ngraph::Node> alpha_node =
std::make_shared<ngraph::op::Constant>(
data->get_element_type(), ngraph::Shape{}, std::vector<double>{alpha});
alpha_node = make_broadcast_node(alpha_node, data->get_shape());
std::shared_ptr<ngraph::Node> beta_node =
std::make_shared<ngraph::op::Constant>(
data->get_element_type(), ngraph::Shape{}, std::vector<double>{beta});
beta_node = make_broadcast_node(beta_node, data->get_shape());
std::shared_ptr<ngraph::Node> one_node = std::make_shared<ngraph::op::Constant>(
data->get_element_type(), Shape{}, std::vector<double>{1});
one_node = make_broadcast_node(one_node, data->get_shape());
std::shared_ptr<ngraph::Node> zero_node =
std::make_shared<ngraph::op::Constant>(
data->get_element_type(), Shape{}, std::vector<double>{0});
zero_node = make_broadcast_node(zero_node, data->get_shape());
return {std::make_shared<ngraph::op::Maximum>(
zero_node,
std::make_shared<ngraph::op::Minimum>(one_node,
alpha_node * data + beta_node))};
return {std::make_shared<ngraph::op::HardSigmoid>(data, alpha, beta)};
}
} // namespace set_1
......
......@@ -100,6 +100,7 @@
#include "ngraph/op/fused/elu.hpp"
#include "ngraph/op/fused/gemm.hpp"
#include "ngraph/op/fused/group_conv.hpp"
#include "ngraph/op/fused/hard_sigmoid.hpp"
#include "ngraph/op/fused/prelu.hpp"
#include "ngraph/op/fused/space_to_depth.hpp"
#include "ngraph/op/gather.hpp"
......
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include <memory>
#include "ngraph/op/add.hpp"
#include "ngraph/op/constant.hpp"
#include "ngraph/op/fused/hard_sigmoid.hpp"
#include "ngraph/op/maximum.hpp"
#include "ngraph/op/minimum.hpp"
#include "ngraph/op/multiply.hpp"
#include "ngraph/shape.hpp"
using namespace std;
using namespace ngraph;
op::HardSigmoid::HardSigmoid(const shared_ptr<Node>& data, float alpha, float beta)
: FusedOp("HardSigmoid", {data})
, m_alpha(alpha)
, m_beta(beta)
{
constructor_validate_and_infer_types();
}
NodeVector op::HardSigmoid::decompose_op() const
{
auto data = get_argument(0);
auto data_shape = data->get_shape();
size_t elem_count = shape_size(data_shape);
std::shared_ptr<ngraph::Node> alpha_node = ngraph::op::Constant::create<float>(
data->get_element_type(), data_shape, std::vector<float>(elem_count, m_alpha));
std::shared_ptr<ngraph::Node> beta_node = ngraph::op::Constant::create<float>(
data->get_element_type(), data_shape, std::vector<float>(elem_count, m_beta));
std::shared_ptr<ngraph::Node> one_node = ngraph::op::Constant::create<float>(
data->get_element_type(), data_shape, std::vector<float>(elem_count, 1.0));
std::shared_ptr<ngraph::Node> zero_node = ngraph::op::Constant::create<float>(
data->get_element_type(), data_shape, std::vector<float>(elem_count, 0.0));
return {std::make_shared<op::Minimum>(
std::make_shared<op::Maximum>(alpha_node * data + beta_node, zero_node), one_node)};
}
shared_ptr<Node> op::HardSigmoid::copy_with_new_args(const NodeVector& new_args) const
{
if (new_args.size() != 1)
{
throw ngraph_error("Incorrect number of new arguments");
}
return make_shared<HardSigmoid>(new_args.at(0), m_alpha, m_beta);
}
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include "ngraph/autodiff/adjoints.hpp"
#include "ngraph/node.hpp"
#include "ngraph/op/util/fused_op.hpp"
namespace ngraph
{
namespace op
{
/// \brief Parameterized, bounded sigmoid-like, piecewise linear
/// function. min(max(alpha*x + beta, 0), 1)
///
class HardSigmoid : public ngraph::op::util::FusedOp
{
public:
/// \brief Constructs a HardSigmoid operation.
///
/// \param data Input tensor.
/// \param[in] alpha The alpha parameter.
/// \param[in] beta The beta parameter.
///
HardSigmoid(const std::shared_ptr<ngraph::Node>& data, float alpha, float beta);
virtual NodeVector decompose_op() const override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
float get_alpha() const { return m_alpha; }
float get_beta() const { return m_beta; }
private:
float m_alpha;
float m_beta;
};
}
}
......@@ -23,6 +23,7 @@ NGRAPH_OP(PRelu, ngraph::op)
NGRAPH_OP(ConvolutionBias, ngraph::op)
NGRAPH_OP(ConvolutionBiasAdd, ngraph::op)
NGRAPH_OP(ConvolutionBiasBackpropFiltersBias, ngraph::op)
NGRAPH_OP(HardSigmoid, ngraph::op)
NGRAPH_OP(DepthToSpace, ngraph::op)
NGRAPH_OP(SpaceToDepth, ngraph::op)
NGRAPH_OP(GroupConvolution, ngraph::op)
......@@ -82,6 +82,7 @@
#include "ngraph/op/fused/elu.hpp"
#include "ngraph/op/fused/gemm.hpp"
#include "ngraph/op/fused/group_conv.hpp"
#include "ngraph/op/fused/hard_sigmoid.hpp"
#include "ngraph/op/fused/space_to_depth.hpp"
#include "ngraph/op/get_output_element.hpp"
#include "ngraph/op/greater.hpp"
......@@ -1988,6 +1989,7 @@ shared_ptr<runtime::Executable>
case OP_TYPEID::GatherND:
case OP_TYPEID::Gemm:
case OP_TYPEID::GenerateMask:
case OP_TYPEID::HardSigmoid:
case OP_TYPEID::PRelu:
case OP_TYPEID::Passthrough:
case OP_TYPEID::QuantizedAvgPool:
......
......@@ -66,3 +66,4 @@ gather_scalar_indices
gather_nd_single_indices
gemm
gemm_broadcast_input_C
hardsigmoid
......@@ -71,6 +71,7 @@
#include "ngraph/op/fused/elu.hpp"
#include "ngraph/op/fused/gemm.hpp"
#include "ngraph/op/fused/group_conv.hpp"
#include "ngraph/op/fused/hard_sigmoid.hpp"
#include "ngraph/op/fused/prelu.hpp"
#include "ngraph/op/fused/space_to_depth.hpp"
#include "ngraph/op/gather.hpp"
......@@ -970,6 +971,13 @@ static shared_ptr<ngraph::Function>
node = make_shared<op::GreaterEq>(args[0], args[1]);
break;
}
case OP_TYPEID::HardSigmoid:
{
auto alpha = node_js.at("alpha").get<float>();
auto beta = node_js.at("beta").get<float>();
node = make_shared<op::HardSigmoid>(args[0], alpha, beta);
break;
}
case OP_TYPEID::GroupConvolution:
{
auto window_movement_strides =
......@@ -1840,6 +1848,13 @@ static json write(const Node& n, bool binary_constant_data)
}
case OP_TYPEID::GreaterEq: { break;
}
case OP_TYPEID::HardSigmoid:
{
auto tmp = dynamic_cast<const op::HardSigmoid*>(&n);
node["alpha"] = tmp->get_alpha();
node["beta"] = tmp->get_beta();
break;
}
case OP_TYPEID::GroupConvolution:
{
auto tmp = dynamic_cast<const op::GroupConvolution*>(&n);
......
......@@ -18,6 +18,8 @@
#include <cinttypes>
#include <cmath>
#include <cstdlib>
#include <iterator>
#include <limits>
#include <random>
#include <string>
......@@ -89,6 +91,47 @@ NGRAPH_TEST(${BACKEND_NAME}, prelu)
EXPECT_EQ(expected, read_vector<float>(result0));
}
NGRAPH_TEST(${BACKEND_NAME}, hardsigmoid)
{
Shape shape{2, 7};
float alpha = 0.125f;
float beta = 0.642f;
auto A = make_shared<op::Parameter>(element::f32, shape);
auto hardsigmoid = make_shared<op::HardSigmoid>(A, alpha, beta);
auto f0 = make_shared<Function>(NodeVector{hardsigmoid}, ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Prepare input and expected output data
vector<float> input_data{-1.f,
0.f,
1.f,
-100.f,
100.f,
-3.1234567f,
5.876543f,
7.13245364f,
numeric_limits<float>::max(),
numeric_limits<float>::lowest(),
numeric_limits<float>::min(),
numeric_limits<float>::infinity(),
numeric_limits<float>::min() / 16.f,
-numeric_limits<float>::min() / 16.f};
auto impl = [alpha, beta](float val) { return min(max(alpha * val + beta, 0.f), 1.f); };
vector<float> expected_output;
transform(begin(input_data), end(input_data), back_inserter(expected_output), impl);
auto a = backend->create_tensor(element::f32, shape);
copy_data(a, input_data);
auto result0 = backend->create_tensor(element::f32, shape);
auto handle = backend->compile(f0);
handle->call_with_validate({result0}, {a});
EXPECT_TRUE(test::all_close_f(expected_output, read_vector<float>(result0)));
}
NGRAPH_TEST(${BACKEND_NAME}, prelu_shared_slope)
{
Shape shape{3, 2};
......
......@@ -13751,6 +13751,17 @@ TEST(type_prop, conv_bias_bprop_2d_deduce)
EXPECT_EQ(conv->get_output_shape(1), bias->get_shape());
}
TEST(type_prop, hardsigmoid)
{
Shape data_shape{3, 5};
float alpha = 0.1;
float beta = 1.2;
auto P = make_shared<op::Parameter>(element::f32, data_shape);
auto H = make_shared<op::HardSigmoid>(P, alpha, beta);
ASSERT_EQ(H->get_element_type(), element::f32);
ASSERT_EQ(H->get_shape(), data_shape);
}
TEST(type_prop, group_conv)
{
// Deduce type
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment