Commit aa52081c authored by Sevin F. Varoglu's avatar Sevin F. Varoglu Committed by Sang Ik Lee

Add erf op, interpreter kernel and unit tests (#2650)

* Add erf op, interpreter kernel and unit tests

* Add review feedback and fix unit tests

* Add unsupported op to GPU and IntelGPU

* Add one more test
parent 62055ab7
...@@ -126,6 +126,8 @@ set (SRC ...@@ -126,6 +126,8 @@ set (SRC
op/embedding_lookup.hpp op/embedding_lookup.hpp
op/equal.cpp op/equal.cpp
op/equal.hpp op/equal.hpp
op/erf.cpp
op/erf.hpp
op/exp.cpp op/exp.cpp
op/exp.hpp op/exp.hpp
op/experimental/dyn_broadcast.cpp op/experimental/dyn_broadcast.cpp
......
...@@ -85,6 +85,7 @@ ...@@ -85,6 +85,7 @@
#include "ngraph/op/dot.hpp" #include "ngraph/op/dot.hpp"
#include "ngraph/op/embedding_lookup.hpp" #include "ngraph/op/embedding_lookup.hpp"
#include "ngraph/op/equal.hpp" #include "ngraph/op/equal.hpp"
#include "ngraph/op/erf.hpp"
#include "ngraph/op/exp.hpp" #include "ngraph/op/exp.hpp"
#include "ngraph/op/experimental/dyn_broadcast.hpp" #include "ngraph/op/experimental/dyn_broadcast.hpp"
#include "ngraph/op/experimental/dyn_pad.hpp" #include "ngraph/op/experimental/dyn_pad.hpp"
......
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "ngraph/op/erf.hpp"
#include "ngraph/log.hpp"
#include "ngraph/util.hpp"
using namespace std;
using namespace ngraph;
shared_ptr<Node> op::Erf::copy_with_new_args(const NodeVector& new_args) const
{
check_new_args_count(this, new_args);
return make_shared<Erf>(new_args.at(0));
}
op::Erf::Erf(shared_ptr<Node> arg)
: UnaryElementwiseArithmetic("Erf", arg)
{
constructor_validate_and_infer_types();
}
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include "ngraph/op/op.hpp"
#include "ngraph/op/util/unary_elementwise_arithmetic.hpp"
#include "ngraph/util.hpp"
namespace ngraph
{
namespace op
{
class Erf : public util::UnaryElementwiseArithmetic
{
public:
Erf(std::shared_ptr<Node> arg);
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
};
}
}
...@@ -85,6 +85,7 @@ NGRAPH_OP(DynPad, ngraph::op) ...@@ -85,6 +85,7 @@ NGRAPH_OP(DynPad, ngraph::op)
NGRAPH_OP(DynReshape, ngraph::op) NGRAPH_OP(DynReshape, ngraph::op)
NGRAPH_OP(DynSlice, ngraph::op) NGRAPH_OP(DynSlice, ngraph::op)
NGRAPH_OP(Equal, ngraph::op) NGRAPH_OP(Equal, ngraph::op)
NGRAPH_OP(Erf, ngraph::op)
NGRAPH_OP(Exp, ngraph::op) NGRAPH_OP(Exp, ngraph::op)
NGRAPH_OP(Floor, ngraph::op) NGRAPH_OP(Floor, ngraph::op)
NGRAPH_OP(GenerateMask, ngraph::op) NGRAPH_OP(GenerateMask, ngraph::op)
......
...@@ -15,3 +15,7 @@ argmax_4D_axis_3_i64_in_i32 ...@@ -15,3 +15,7 @@ argmax_4D_axis_3_i64_in_i32
# Even after AnyAllReplacement, these trigger an "Unsupported Reduce" error. # Even after AnyAllReplacement, these trigger an "Unsupported Reduce" error.
any_2x2x3_eliminate_dims_0_1_2 any_2x2x3_eliminate_dims_0_1_2
all_2x2x3_eliminate_dims_0_1_2 all_2x2x3_eliminate_dims_0_1_2
# Not implemented
erf
zero_sized_erf
...@@ -56,6 +56,7 @@ ...@@ -56,6 +56,7 @@
#include "ngraph/op/dot.hpp" #include "ngraph/op/dot.hpp"
#include "ngraph/op/embedding_lookup.hpp" #include "ngraph/op/embedding_lookup.hpp"
#include "ngraph/op/equal.hpp" #include "ngraph/op/equal.hpp"
#include "ngraph/op/erf.hpp"
#include "ngraph/op/exp.hpp" #include "ngraph/op/exp.hpp"
#include "ngraph/op/experimental/dyn_broadcast.hpp" #include "ngraph/op/experimental/dyn_broadcast.hpp"
#include "ngraph/op/experimental/dyn_pad.hpp" #include "ngraph/op/experimental/dyn_pad.hpp"
...@@ -611,6 +612,11 @@ std::string runtime::gpu::GPU_Emitter::emit_Equal(EMIT_ARGS) ...@@ -611,6 +612,11 @@ std::string runtime::gpu::GPU_Emitter::emit_Equal(EMIT_ARGS)
return emit_elementwise<ngraph::op::Equal>(compiled_function, function_name, node, args, out); return emit_elementwise<ngraph::op::Equal>(compiled_function, function_name, node, args, out);
} }
std::string runtime::gpu::GPU_Emitter::emit_Erf(EMIT_ARGS)
{
throw unsupported_op("Unsupported op '" + node->description() + "'");
}
std::string runtime::gpu::GPU_Emitter::emit_Exp(EMIT_ARGS) std::string runtime::gpu::GPU_Emitter::emit_Exp(EMIT_ARGS)
{ {
return emit_elementwise<ngraph::op::Exp>(compiled_function, function_name, node, args, out); return emit_elementwise<ngraph::op::Exp>(compiled_function, function_name, node, args, out);
......
...@@ -119,3 +119,7 @@ model_quant_conv_linear ...@@ -119,3 +119,7 @@ model_quant_conv_linear
# This should be implemented # This should be implemented
create_tensor_2 create_tensor_2
# Not implemented
erf
zero_sized_erf
...@@ -75,6 +75,7 @@ ...@@ -75,6 +75,7 @@
#include "ngraph/op/dequantize.hpp" #include "ngraph/op/dequantize.hpp"
#include "ngraph/op/dot.hpp" #include "ngraph/op/dot.hpp"
#include "ngraph/op/embedding_lookup.hpp" #include "ngraph/op/embedding_lookup.hpp"
#include "ngraph/op/erf.hpp"
#include "ngraph/op/get_output_element.hpp" #include "ngraph/op/get_output_element.hpp"
#include "ngraph/op/lrn.hpp" #include "ngraph/op/lrn.hpp"
#include "ngraph/op/max.hpp" #include "ngraph/op/max.hpp"
...@@ -1997,6 +1998,7 @@ shared_ptr<runtime::Executable> ...@@ -1997,6 +1998,7 @@ shared_ptr<runtime::Executable>
case OP_TYPEID::BroadcastLike: case OP_TYPEID::BroadcastLike:
case OP_TYPEID::DynReshape: case OP_TYPEID::DynReshape:
case OP_TYPEID::DynSlice: case OP_TYPEID::DynSlice:
case OP_TYPEID::Erf:
case OP_TYPEID::QuantizedAvgPool: case OP_TYPEID::QuantizedAvgPool:
case OP_TYPEID::QuantizedConvolutionBias: case OP_TYPEID::QuantizedConvolutionBias:
case OP_TYPEID::QuantizedConvolutionBiasAdd: case OP_TYPEID::QuantizedConvolutionBiasAdd:
......
...@@ -62,3 +62,7 @@ pad_reflect_1d_bottom_neg_bigger_than_tensor ...@@ -62,3 +62,7 @@ pad_reflect_1d_bottom_neg_bigger_than_tensor
pad_reflect_1d_multi_reflect pad_reflect_1d_multi_reflect
pad_reflect_2d pad_reflect_2d
pad_reflect_2d_with_neg pad_reflect_2d_with_neg
# Not implemented
erf
zero_sized_erf
...@@ -90,6 +90,7 @@ ...@@ -90,6 +90,7 @@
#include "ngraph/runtime/reference/dot.hpp" #include "ngraph/runtime/reference/dot.hpp"
#include "ngraph/runtime/reference/embedding_lookup.hpp" #include "ngraph/runtime/reference/embedding_lookup.hpp"
#include "ngraph/runtime/reference/equal.hpp" #include "ngraph/runtime/reference/equal.hpp"
#include "ngraph/runtime/reference/erf.hpp"
#include "ngraph/runtime/reference/exp.hpp" #include "ngraph/runtime/reference/exp.hpp"
#include "ngraph/runtime/reference/floor.hpp" #include "ngraph/runtime/reference/floor.hpp"
#include "ngraph/runtime/reference/generate_mask.hpp" #include "ngraph/runtime/reference/generate_mask.hpp"
...@@ -728,6 +729,13 @@ private: ...@@ -728,6 +729,13 @@ private:
element_count); element_count);
break; break;
} }
case OP_TYPEID::Erf:
{
size_t element_count = shape_size(node.get_output_shape(0));
reference::erf<T>(
args[0]->get_data_ptr<const T>(), out[0]->get_data_ptr<T>(), element_count);
break;
}
case OP_TYPEID::Exp: case OP_TYPEID::Exp:
{ {
size_t element_count = shape_size(node.get_output_shape(0)); size_t element_count = shape_size(node.get_output_shape(0));
......
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include <cmath>
#include <cstddef>
namespace ngraph
{
namespace runtime
{
namespace reference
{
template <typename T>
void erf(const T* arg, T* out, size_t count)
{
for (size_t i = 0; i < count; i++)
{
out[i] = std::erf(arg[i]);
}
}
}
}
}
...@@ -46,6 +46,7 @@ ...@@ -46,6 +46,7 @@
#include "ngraph/op/dot.hpp" #include "ngraph/op/dot.hpp"
#include "ngraph/op/embedding_lookup.hpp" #include "ngraph/op/embedding_lookup.hpp"
#include "ngraph/op/equal.hpp" #include "ngraph/op/equal.hpp"
#include "ngraph/op/erf.hpp"
#include "ngraph/op/exp.hpp" #include "ngraph/op/exp.hpp"
#include "ngraph/op/experimental/dyn_broadcast.hpp" #include "ngraph/op/experimental/dyn_broadcast.hpp"
#include "ngraph/op/experimental/dyn_pad.hpp" #include "ngraph/op/experimental/dyn_pad.hpp"
...@@ -781,6 +782,11 @@ static shared_ptr<ngraph::Function> ...@@ -781,6 +782,11 @@ static shared_ptr<ngraph::Function>
node = make_shared<op::Equal>(args[0], args[1]); node = make_shared<op::Equal>(args[0], args[1]);
break; break;
} }
case OP_TYPEID::Erf:
{
node = make_shared<op::Erf>(args[0]);
break;
}
case OP_TYPEID::Exp: case OP_TYPEID::Exp:
{ {
node = make_shared<op::Exp>(args[0]); node = make_shared<op::Exp>(args[0]);
...@@ -1536,6 +1542,8 @@ static json write(const Node& n, bool binary_constant_data) ...@@ -1536,6 +1542,8 @@ static json write(const Node& n, bool binary_constant_data)
} }
case OP_TYPEID::Equal: { break; case OP_TYPEID::Equal: { break;
} }
case OP_TYPEID::Erf: { break;
}
case OP_TYPEID::Exp: { break; case OP_TYPEID::Exp: { break;
} }
case OP_TYPEID::Floor: { break; case OP_TYPEID::Floor: { break;
......
...@@ -2687,6 +2687,11 @@ NGRAPH_TEST(${BACKEND_NAME}, zero_sized_ceiling) ...@@ -2687,6 +2687,11 @@ NGRAPH_TEST(${BACKEND_NAME}, zero_sized_ceiling)
make_unary_empty_test<op::Ceiling>("${BACKEND_NAME}"); make_unary_empty_test<op::Ceiling>("${BACKEND_NAME}");
} }
NGRAPH_TEST(${BACKEND_NAME}, zero_sized_erf)
{
make_unary_empty_test<op::Erf>("${BACKEND_NAME}");
}
NGRAPH_TEST(${BACKEND_NAME}, zero_sized_exp) NGRAPH_TEST(${BACKEND_NAME}, zero_sized_exp)
{ {
make_unary_empty_test<op::Exp>("${BACKEND_NAME}"); make_unary_empty_test<op::Exp>("${BACKEND_NAME}");
......
...@@ -211,6 +211,32 @@ NGRAPH_TEST(${BACKEND_NAME}, cosh) ...@@ -211,6 +211,32 @@ NGRAPH_TEST(${BACKEND_NAME}, cosh)
EXPECT_TRUE(test::all_close_f(input, read_vector<float>(result))); EXPECT_TRUE(test::all_close_f(input, read_vector<float>(result)));
} }
NGRAPH_TEST(${BACKEND_NAME}, erf)
{
Shape shape{8};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::Erf>(A), ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
copy_data(a, vector<float>{-4.0f, -3.0f, -2.0f, -1.0f, 0.0f, 1.0f, 2.0f, 3.0f});
auto result = backend->create_tensor(element::f32, shape);
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_TRUE(test::all_close_f(vector<float>{erf(-4.0f),
erf(-3.0f),
erf(-2.0f),
erf(-1.0f),
erf(0.0f),
erf(1.0f),
erf(2.0f),
erf(3.0f)},
read_vector<float>(result)));
}
NGRAPH_TEST(${BACKEND_NAME}, exp) NGRAPH_TEST(${BACKEND_NAME}, exp)
{ {
Shape shape{8}; Shape shape{8};
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment