Commit 1039ea1a authored by Tomasz Socha's avatar Tomasz Socha Committed by Scott Cyphers

[FUSED] Add new Selu fused op. (#3844)

* [FUSED] Add new Selu fused op.

* Add Selu to serializer

* Add Selu to ngraph.hpp
parent f543849e
......@@ -363,6 +363,8 @@ set (SRC
op/fused/rnn_cell.hpp
op/fused/scale_shift.cpp
op/fused/scale_shift.hpp
op/fused/selu.cpp
op/fused/selu.hpp
op/fused/shuffle_channels.cpp
op/fused/shuffle_channels.hpp
op/fused/softmax_crossentropy.cpp
......
......@@ -17,15 +17,8 @@
#include <memory>
#include <vector>
#include "ngraph/op/add.hpp"
#include "ngraph/op/constant.hpp"
#include "ngraph/op/exp.hpp"
#include "ngraph/op/maximum.hpp"
#include "ngraph/op/minimum.hpp"
#include "ngraph/op/multiply.hpp"
#include "ngraph/op/subtract.hpp"
#include "ngraph/op/util/broadcasting.hpp"
#include "ngraph/shape.hpp"
#include "ngraph/op/fused/selu.hpp"
#include "selu.hpp"
using namespace ngraph::op;
......@@ -41,31 +34,18 @@ namespace ngraph
NodeVector selu(const Node& node)
{
auto data = node.get_ng_inputs().at(0);
double alpha =
auto alpha =
node.get_attribute_value<double>("alpha", 1.67326319217681884765625);
double gamma =
auto gamma =
node.get_attribute_value<double>("gamma", 1.05070102214813232421875);
std::shared_ptr<ngraph::Node> alpha_node =
std::make_shared<ngraph::op::Constant>(
data->get_element_type(), ngraph::Shape{}, std::vector<double>{alpha});
alpha_node = make_broadcast_node(alpha_node, data->get_shape());
auto alpha_node = std::make_shared<ngraph::op::Constant>(
data->get_element_type(), data->get_shape(), std::vector<double>{alpha});
std::shared_ptr<ngraph::Node> gamma_node =
std::make_shared<ngraph::op::Constant>(
data->get_element_type(), ngraph::Shape{}, std::vector<double>{gamma});
gamma_node = make_broadcast_node(gamma_node, data->get_shape());
auto gamma_node = std::make_shared<ngraph::op::Constant>(
data->get_element_type(), data->get_shape(), std::vector<double>{gamma});
std::shared_ptr<ngraph::Node> zero_node =
std::make_shared<ngraph::op::Constant>(
data->get_element_type(), ngraph::Shape{}, std::vector<double>{0});
zero_node = make_broadcast_node(zero_node, data->get_shape());
return {gamma_node * (std::make_shared<ngraph::op::Maximum>(data, zero_node) +
alpha_node * std::make_shared<ngraph::op::Exp>(
std::make_shared<ngraph::op::Minimum>(
data, zero_node)) -
alpha_node)};
return {std::make_shared<ngraph::op::v1::Selu>(data, alpha_node, gamma_node)};
}
} // namespace set_1
......
......@@ -147,6 +147,7 @@ namespace ngraph
#include "ngraph/op/fused/prelu.hpp"
#include "ngraph/op/fused/rnn_cell.hpp"
#include "ngraph/op/fused/scale_shift.hpp"
#include "ngraph/op/fused/selu.hpp"
#include "ngraph/op/fused/shuffle_channels.hpp"
#include "ngraph/op/fused/softmax_crossentropy.hpp"
#include "ngraph/op/fused/space_to_depth.hpp"
......
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "ngraph/op/fused/selu.hpp"
#include "ngraph/op/add.hpp"
#include "ngraph/op/constant.hpp"
#include "ngraph/op/exp.hpp"
#include "ngraph/op/maximum.hpp"
#include "ngraph/op/minimum.hpp"
#include "ngraph/op/multiply.hpp"
#include "ngraph/op/subtract.hpp"
using namespace std;
using namespace ngraph;
constexpr NodeTypeInfo op::v1::Selu::type_info;
op::v1::Selu::Selu(const Output<Node>& data, const Output<Node>& alpha, const Output<Node>& lambda)
: FusedOp({data, alpha, lambda})
{
constructor_validate_and_infer_types();
}
NodeVector op::v1::Selu::decompose_op() const
{
const auto data = input_value(0);
const auto alpha = input_value(1);
const auto lambda = input_value(2);
const auto zero_node = std::make_shared<ngraph::op::Constant>(
data.get_element_type(), data.get_shape(), std::vector<double>{0});
return {lambda *
(std::make_shared<op::Maximum>(data, zero_node) +
alpha * std::make_shared<op::Exp>(std::make_shared<op::Minimum>(data, zero_node)) -
alpha)};
}
shared_ptr<Node> op::v1::Selu::copy_with_new_args(const NodeVector& new_args) const
{
check_new_args_count(this, new_args);
return make_shared<v1::Selu>(new_args.at(0), new_args.at(1), new_args.at(2));
}
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include "ngraph/node.hpp"
#include "ngraph/op/op.hpp"
#include "ngraph/op/util/fused_op.hpp"
namespace ngraph
{
namespace op
{
namespace v1
{
/// \brief Performs a SELU activation function on all elements of the input node
class Selu : public ngraph::op::util::FusedOp
{
public:
NGRAPH_API
static constexpr NodeTypeInfo type_info{"Selu", 1};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs a Selu node.
///
/// \param data - Node producing the input tensor
/// \param alpha - Alpha coefficient of SELU operation
/// \param lambda - Lambda coefficient of SELU operation
Selu(const Output<Node>& data,
const Output<Node>& alpha,
const Output<Node>& lambda);
virtual NodeVector decompose_op() const override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
};
}
using v1::Selu;
} // namespace op
} // namespace ngraph
......@@ -49,6 +49,7 @@ NGRAPH_OP(PartialSliceBackprop, ngraph::op)
NGRAPH_OP(PRelu, ngraph::op)
NGRAPH_OP(RNNCell, ngraph::op)
NGRAPH_OP(ScaleShift, ngraph::op)
NGRAPH_OP(Selu, ngraph::op)
NGRAPH_OP(ShuffleChannels, ngraph::op)
NGRAPH_OP(SpaceToDepth, ngraph::op)
NGRAPH_OP(Split, ngraph::op)
......
......@@ -91,6 +91,7 @@
#include "ngraph/op/fused/prelu.hpp"
#include "ngraph/op/fused/rnn_cell.hpp"
#include "ngraph/op/fused/scale_shift.hpp"
#include "ngraph/op/fused/selu.hpp"
#include "ngraph/op/fused/shuffle_channels.hpp"
#include "ngraph/op/fused/softmax_crossentropy.hpp"
#include "ngraph/op/fused/space_to_depth.hpp"
......@@ -2155,6 +2156,11 @@ shared_ptr<Node> JSONDeserializer::deserialize_node(json node_js)
node = make_shared<op::Select>(args[0], args[1], args[2]);
break;
}
case OP_TYPEID::Selu:
{
node = make_shared<op::Selu>(args[0], args[1], args[2]);
break;
}
case OP_TYPEID::Send:
{
auto dest_id = node_js.at("dest_id").get<size_t>();
......@@ -3535,6 +3541,8 @@ json JSONSerializer::serialize_node(const Node& n)
}
case OP_TYPEID::Select: { break;
}
case OP_TYPEID::Selu: { break;
}
case OP_TYPEID::Send:
{
auto tmp = static_cast<const op::Send*>(&n);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment