Commit bf511013 authored by Mateusz Bencer's avatar Mateusz Bencer Committed by Michał Karzyński

[Fused] Remove fused LeakyRelu (#3333)

parent 6e6c23ff
......@@ -330,8 +330,6 @@ set (SRC
op/fused/group_conv_transpose.cpp
op/fused/gru_cell.cpp
op/fused/gru_cell.hpp
op/fused/leaky_relu.cpp
op/fused/leaky_relu.hpp
op/fused/lstm_cell.cpp
op/fused/lstm_cell.hpp
op/fused/mvn.cpp
......
......@@ -16,13 +16,19 @@
#include <memory>
#include "core/node.hpp"
#include "exceptions.hpp"
#include "ngraph/node.hpp"
#include "ngraph/node.hpp"
#include "ngraph/op/broadcast.hpp"
#include "ngraph/op/constant.hpp"
#include "ngraph/op/fused/leaky_relu.hpp"
#include "ngraph/op/maximum.hpp"
#include "ngraph/op/multiply.hpp"
#include "ngraph/shape.hpp"
#include "core/node.hpp"
#include "leaky_relu.hpp"
#include "ngraph/op/util/broadcasting.hpp"
namespace ngraph
{
namespace onnx_import
......@@ -42,8 +48,8 @@ namespace ngraph
std::shared_ptr<ngraph::Node> alpha_node =
std::make_shared<ngraph::op::Constant>(
data->get_element_type(), Shape{}, std::vector<double>{alpha});
return {std::make_shared<ngraph::op::LeakyRelu>(data, alpha_node)};
alpha_node = ngraph::op::make_broadcast_node(alpha_node, data->get_shape());
return {std::make_shared<ngraph::op::Maximum>(data * alpha_node, data)};
}
} // namespace set_1
......
......@@ -111,7 +111,6 @@
#include "ngraph/op/fused/group_conv_transpose.hpp"
#include "ngraph/op/fused/gru_cell.hpp"
#include "ngraph/op/fused/hard_sigmoid.hpp"
#include "ngraph/op/fused/leaky_relu.hpp"
#include "ngraph/op/fused/lstm_cell.hpp"
#include "ngraph/op/fused/mvn.hpp"
#include "ngraph/op/fused/normalize.hpp"
......
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "ngraph/op/fused/leaky_relu.hpp"
#include "ngraph/node.hpp"
#include "ngraph/op/maximum.hpp"
#include "ngraph/op/multiply.hpp"
#include "ngraph/op/util/broadcasting.hpp"
using namespace std;
using namespace ngraph;
const string op::LeakyRelu::type_name{"LeakyRelu"};
op::LeakyRelu::LeakyRelu(const Output<Node>& data, const Output<Node>& alpha)
: FusedOp({data, alpha})
{
constructor_validate_and_infer_types();
}
NodeVector op::LeakyRelu::decompose_op() const
{
auto data = input(0).get_source_output();
auto alpha_node = input(1).get_source_output();
alpha_node = ngraph::op::numpy_style_broadcast(alpha_node, data.get_shape());
return {std::make_shared<ngraph::op::Maximum>(data * alpha_node, data)};
}
shared_ptr<Node> op::LeakyRelu::copy_with_new_args(const NodeVector& new_args) const
{
if (new_args.size() != 2)
{
throw ngraph_error("Incorrect number of new arguments");
}
return make_shared<LeakyRelu>(new_args.at(0), new_args.at(1));
}
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include <memory>
#include "ngraph/axis_vector.hpp"
#include "ngraph/node.hpp"
#include "ngraph/op/op.hpp"
#include "ngraph/op/util/fused_op.hpp"
namespace ngraph
{
namespace op
{
class LeakyRelu : public ngraph::op::util::FusedOp
{
public:
NGRAPH_API
static const std::string type_name;
const std::string& description() const override { return type_name; }
LeakyRelu() = default;
LeakyRelu(const Output<Node>& data, const Output<Node>& alpha);
virtual NodeVector decompose_op() const override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
};
}
}
......@@ -36,7 +36,6 @@ NGRAPH_OP(GroupConvolution, ngraph::op)
NGRAPH_OP(GroupConvolutionTranspose, ngraph::op)
NGRAPH_OP(GRUCell, ngraph::op)
NGRAPH_OP(HardSigmoid, ngraph::op)
NGRAPH_OP(LeakyRelu, ngraph::op)
NGRAPH_OP(LSTMCell, ngraph::op)
NGRAPH_OP(MVN, ngraph::op)
NGRAPH_OP(Normalize, ngraph::op)
......
......@@ -88,8 +88,6 @@
#include "ngraph/op/fused/group_conv.hpp"
#include "ngraph/op/fused/group_conv_transpose.hpp"
#include "ngraph/op/fused/gru_cell.hpp"
#include "ngraph/op/fused/hard_sigmoid.hpp"
#include "ngraph/op/fused/leaky_relu.hpp"
#include "ngraph/op/fused/lstm_cell.hpp"
#include "ngraph/op/fused/mvn.hpp"
#include "ngraph/op/fused/normalize.hpp"
......@@ -2072,7 +2070,6 @@ shared_ptr<runtime::Executable>
case OP_TYPEID::GroupConvolutionTranspose:
case OP_TYPEID::GRUCell:
case OP_TYPEID::HardSigmoid:
case OP_TYPEID::LeakyRelu:
case OP_TYPEID::LSTMCell:
case OP_TYPEID::MVN:
case OP_TYPEID::Normalize:
......@@ -2200,7 +2197,6 @@ bool runtime::intelgpu::IntelGPUBackend::is_supported_impl(const Node& node)
case OP_TYPEID::GRN:
case OP_TYPEID::GroupConvolutionTranspose:
case OP_TYPEID::GRUCell:
case OP_TYPEID::LeakyRelu:
case OP_TYPEID::LSTMCell:
case OP_TYPEID::MVN:
case OP_TYPEID::Normalize:
......
......@@ -82,7 +82,6 @@
#include "ngraph/op/fused/group_conv_transpose.hpp"
#include "ngraph/op/fused/gru_cell.hpp"
#include "ngraph/op/fused/hard_sigmoid.hpp"
#include "ngraph/op/fused/leaky_relu.hpp"
#include "ngraph/op/fused/lstm_cell.hpp"
#include "ngraph/op/fused/mvn.hpp"
#include "ngraph/op/fused/normalize.hpp"
......@@ -1343,11 +1342,6 @@ shared_ptr<Node> JSONDeserializer::deserialize_node(json node_js)
break;
}
case OP_TYPEID::LeakyRelu:
{
node = make_shared<op::LeakyRelu>(args[0], args[1]);
break;
}
case OP_TYPEID::Less:
{
node = make_shared<op::Less>(args[0], args[1], read_auto_broadcast(node_js, "autob"));
......@@ -2503,8 +2497,6 @@ json JSONSerializer::serialize_node(const Node& n)
node["beta"] = tmp->get_beta();
break;
}
case OP_TYPEID::LeakyRelu: { break;
}
case OP_TYPEID::Less:
{
auto tmp = dynamic_cast<const op::Less*>(&n);
......
......@@ -110,7 +110,6 @@ set(SRC
type_prop/gru_cell.cpp
type_prop/hard_sigmoid.cpp
type_prop/index_reduction.cpp
type_prop/leaky_relu.cpp
type_prop/lstm_cell.cpp
type_prop/max_pool.cpp
type_prop/mvn.cpp
......
......@@ -773,43 +773,6 @@ NGRAPH_TEST(${BACKEND_NAME}, grn_2d_with_bias)
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, leaky_relu)
{
auto data_node = make_shared<op::Parameter>(element::f64, Shape{3, 4});
auto alpha_node = make_shared<op::Constant>(element::f64, Shape{}, vector<double>{0.1});
auto leaky_relu = make_shared<op::LeakyRelu>(data_node, alpha_node);
auto function = make_shared<Function>(NodeVector{leaky_relu}, ParameterVector{data_node});
auto test_case = test::NgraphTestCase(function, "${BACKEND_NAME}");
test_case.add_input<double>({numeric_limits<double>::max(),
-numeric_limits<double>::infinity(),
numeric_limits<double>::infinity(),
-8.0,
-6.66667,
-5.5,
-0.0000001,
0,
0.0000001,
4.25,
6.66667,
1000});
test_case.add_expected_output<double>(Shape{3, 4},
{numeric_limits<double>::max(),
-numeric_limits<double>::infinity(),
numeric_limits<double>::infinity(),
-0.8,
-0.666667,
-0.55,
-0.00000001,
0,
0.0000001,
4.25,
6.66667,
1000});
}
NGRAPH_TEST(${BACKEND_NAME}, unsqueeze)
{
auto data_node = make_shared<op::Parameter>(element::f32, Shape{4, 2});
......
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "util/type_prop.hpp"
using namespace std;
using namespace ngraph;
TEST(type_prop, leaky_relu)
{
auto data = make_shared<op::Parameter>(element::f32, Shape{3, 6});
auto alpha = make_shared<op::Parameter>(element::f32, Shape{});
auto leaky_relu_func = make_shared<op::LeakyRelu>(data, alpha);
EXPECT_EQ(leaky_relu_func->get_element_type(), element::f32);
EXPECT_EQ(leaky_relu_func->get_shape(), (Shape{3, 6}));
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment