Commit 3b30799a authored by Mateusz Bencer's avatar Mateusz Bencer Committed by Sang Ik Lee

Add Round op (#4124)

* Added round op

* Add CPU support, unit tests

* Disable UT for PlaidML

* Update year
Co-authored-by: 's avatarScott Cyphers <diyessi@users.noreply.github.com>
parent 2347d9e7
......@@ -291,6 +291,8 @@ set (SRC
op/reduce_mean.hpp
op/reduce_sum.cpp
op/reduce_sum.hpp
op/round.cpp
op/round.hpp
op/quantize.cpp
op/quantize.hpp
op/quantized_convolution.cpp
......
......@@ -205,6 +205,7 @@ NGRAPH_OP(Result, ngraph::op::v0, 0)
NGRAPH_OP(Reverse, ngraph::op::v0, 0)
NGRAPH_OP(Reverse, ngraph::op::v1, 1)
NGRAPH_OP(ReverseSequence, ngraph::op::v0, 0)
NGRAPH_OP(Round, ngraph::op::v0, 0)
NGRAPH_OP(ScalarConstantLike, ngraph::op::v0, 0)
NGRAPH_OP(ScaleShift, ngraph::op::v0, 0)
NGRAPH_OP(ScatterAdd, ngraph::op::v0, 0)
......
//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "ngraph/op/round.hpp"
using namespace std;
using namespace ngraph;
constexpr NodeTypeInfo op::Round::type_info;
op::Round::Round(const Output<Node>& arg)
: UnaryElementwiseArithmetic(arg)
{
constructor_validate_and_infer_types();
}
shared_ptr<Node> op::Round::copy_with_new_args(const NodeVector& new_args) const
{
check_new_args_count(this, new_args);
return make_shared<Round>(new_args.at(0));
}
//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include "ngraph/op/util/unary_elementwise_arithmetic.hpp"
namespace ngraph
{
namespace op
{
namespace v0
{
/// \brief Elementwise round operation.
class NGRAPH_API Round : public util::UnaryElementwiseArithmetic
{
public:
static constexpr NodeTypeInfo type_info{"Round", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs a round operation.
Round() = default;
/// \brief Constructs a round operation. The output is round to the nearest integer
/// for each value. In case of halfs, the rule is to round them to the nearest even
/// integer.
///
/// \param arg Node that produces the input tensor.
Round(const Output<Node>& arg);
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
};
}
using v0::Round;
}
}
......@@ -161,6 +161,7 @@
#include "ngraph/op/result.hpp"
#include "ngraph/op/reverse.hpp"
#include "ngraph/op/reverse_sequence.hpp"
#include "ngraph/op/round.hpp"
#include "ngraph/op/scatter_add.hpp"
#include "ngraph/op/scatter_nd_add.hpp"
#include "ngraph/op/select.hpp"
......
......@@ -173,6 +173,7 @@ NGRAPH_OP(Result, ngraph::op)
NGRAPH_OP(Reverse, ngraph::op)
NGRAPH_OP(ReverseSequence, ngraph::op)
NGRAPH_OP(RNNCell, ngraph::op)
NGRAPH_OP(Round, ngraph::op)
NGRAPH_OP(ScalarConstantLike, ngraph::op)
NGRAPH_OP(ScaleShift, ngraph::op)
NGRAPH_OP(ScatterAdd, ngraph::op)
......
......@@ -23,6 +23,7 @@
#include "ngraph/op/negative.hpp"
#include "ngraph/op/not.hpp"
#include "ngraph/op/relu.hpp"
#include "ngraph/op/round.hpp"
#include "ngraph/op/sign.hpp"
#include "ngraph/op/sqrt.hpp"
#include "ngraph/runtime/reference/abs.hpp"
......@@ -32,6 +33,7 @@
#include "ngraph/runtime/reference/negate.hpp"
#include "ngraph/runtime/reference/not.hpp"
#include "ngraph/runtime/reference/relu.hpp"
#include "ngraph/runtime/reference/round.hpp"
#include "ngraph/runtime/reference/sign.hpp"
#include "ngraph/runtime/reference/sqrt.hpp"
......@@ -42,7 +44,7 @@ bool is_supported_unary_op(std::shared_ptr<Node> n)
{
return is_type<op::Abs>(n) || is_type<op::Ceiling>(n) || is_type<op::Floor>(n) ||
is_type<op::Negative>(n) || is_type<op::Not>(n) || is_type<op::Relu>(n) ||
is_type<op::Sign>(n) || is_type<op::Sqrt>(n);
is_type<op::Round>(n) || is_type<op::Sign>(n) || is_type<op::Sqrt>(n);
}
template <class T>
......@@ -109,6 +111,11 @@ shared_ptr<op::Constant> fold_constant_unary(shared_ptr<op::Constant> constant,
runtime::reference::relu<T>(
constant->get_data_ptr<T>(), buffer.get_ptr<T>(), shape_size(out_shape));
}
else if (is_type<op::Round>(unary))
{
runtime::reference::round<T>(
constant->get_data_ptr<T>(), buffer.get_ptr<T>(), shape_size(out_shape));
}
else if (is_type<op::Sign>(unary))
{
runtime::reference::sign<T>(
......
......@@ -59,6 +59,7 @@
#include "ngraph/op/power.hpp"
#include "ngraph/op/relu.hpp"
#include "ngraph/op/result.hpp"
#include "ngraph/op/round.hpp"
#include "ngraph/op/sign.hpp"
#include "ngraph/op/sin.hpp"
#include "ngraph/op/sinh.hpp"
......@@ -100,6 +101,7 @@
#include "ngraph/runtime/cpu/kernel/or.hpp"
#include "ngraph/runtime/cpu/kernel/relu.hpp"
#include "ngraph/runtime/cpu/kernel/result.hpp"
#include "ngraph/runtime/cpu/kernel/round.hpp"
#include "ngraph/runtime/cpu/kernel/sign.hpp"
#include "ngraph/runtime/cpu/kernel/sin.hpp"
#include "ngraph/runtime/cpu/kernel/sinh.hpp"
......@@ -366,6 +368,12 @@ namespace ngraph
BUILD_UNARY_ELEMWISE_FUNCTOR(runtime::cpu::kernel::floor);
}
template <>
void Builder::BUILDER_DECL(ngraph::op::Round)
{
BUILD_UNARY_ELEMWISE_FUNCTOR(runtime::cpu::kernel::round);
}
template <>
void Builder::BUILDER_DECL(ngraph::op::Negative)
{
......@@ -566,6 +574,12 @@ namespace ngraph
BUILD_UNARY_ELEMWISE_CF_FUNCTOR(runtime::cpu::kernel::floor);
}
template <>
NodeExecutorTy Builder::BUILDER_CF_DECL(ngraph::op::Round)
{
BUILD_UNARY_ELEMWISE_CF_FUNCTOR(runtime::cpu::kernel::round);
}
template <>
NodeExecutorTy Builder::BUILDER_CF_DECL(ngraph::op::Ceiling)
{
......@@ -699,6 +713,7 @@ namespace ngraph
REGISTER_OP_BUILDER(Negative);
REGISTER_OP_BUILDER(Exp);
REGISTER_OP_BUILDER(Log);
REGISTER_OP_BUILDER(Round);
REGISTER_OP_BUILDER(Sqrt);
REGISTER_OP_BUILDER(Sign);
REGISTER_OP_BUILDER(Sin);
......@@ -740,6 +755,7 @@ namespace ngraph
REGISTER_CF_BUILDER(And);
REGISTER_CF_BUILDER(Or);
REGISTER_CF_BUILDER(Xor);
REGISTER_CF_BUILDER(Round);
REGISTER_CF_BUILDER(Sign);
REGISTER_CF_BUILDER(Not);
REGISTER_CF_BUILDER(Power);
......
......@@ -2330,6 +2330,21 @@ namespace ngraph
writer.block_end();
}
template <>
void CPU_Emitter::EMITTER_DECL(ngraph::op::Round)
{
(void)external_function;
(void)node;
writer.block_begin();
size_t element_count = out[0].get_size();
writer << "#pragma omp parallel for\n";
writer << "for (size_t i = 0; i < " << element_count << "; i++)\n";
writer.block_begin();
writer << out[0].get_name() << "[i] = round(" << args[0].get_name() << "[i]);\n";
writer.block_end();
writer.block_end();
}
template <>
void CPU_Emitter::EMITTER_DECL(ngraph::op::Sqrt)
{
......
......@@ -454,7 +454,7 @@ static const runtime::cpu::OpMap dispatcher{
{TI(ngraph::op::Tile), &runtime::cpu::CPU_Emitter::emit<op::Tile>},
{TI(ngraph::op::Gelu), &runtime::cpu::CPU_Emitter::emit<op::Gelu>},
{TI(ngraph::op::GeluBackprop), &runtime::cpu::CPU_Emitter::emit<op::GeluBackprop>},
};
{TI(ngraph::op::Round), &runtime::cpu::CPU_Emitter::emit<op::Round>}};
static void
generate_isnan_isinf_check(CodeWriter& writer,
......
//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include "ngraph/runtime/reference/round.hpp"
namespace ngraph
{
namespace runtime
{
namespace cpu
{
namespace kernel
{
template <typename ElementType>
void round(void* arg, void* output, size_t count, int arena)
{
reference::round<ElementType>(static_cast<const ElementType*>(arg),
static_cast<ElementType*>(output),
count);
}
}
}
}
}
......@@ -96,6 +96,7 @@
#include "ngraph/runtime/reference/result.hpp"
#include "ngraph/runtime/reference/reverse.hpp"
#include "ngraph/runtime/reference/reverse_sequence.hpp"
#include "ngraph/runtime/reference/round.hpp"
#include "ngraph/runtime/reference/scatter_add.hpp"
#include "ngraph/runtime/reference/scatter_nd_add.hpp"
#include "ngraph/runtime/reference/select.hpp"
......@@ -1590,6 +1591,13 @@ protected:
}
break;
}
case OP_TYPEID::Round:
{
size_t element_count = shape_size(node.get_output_shape(0));
reference::round<T>(
args[0]->get_data_ptr<const T>(), out[0]->get_data_ptr<T>(), element_count);
break;
}
case OP_TYPEID::ScatterAdd:
{
if (node.get_input_element_type(1) == element::i64)
......
......@@ -253,6 +253,10 @@ model_cum_sum_2d_axis_input
model_cum_sum_2d_dynamic_axis_input
model_cum_sum_3d_exclusive_reverse
# unsupported op: `Round`
round
round_2D
# onnx tests
model_quant_conv_linear_2d
model_quant_conv_linear_3d
......
//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include <cmath>
namespace ngraph
{
namespace runtime
{
namespace reference
{
template <typename T>
T round_to_nearest_even(const T arg)
{
const auto floor_arg = std::floor(arg);
const auto diff = arg - floor_arg;
if (diff < 0.5f || (diff == 0.5f && static_cast<int>(floor_arg) % 2 == 0))
{
return floor_arg;
}
else
{
return floor_arg + 1.0f;
}
}
template <typename T>
void round(const T* arg, T* out, size_t count)
{
for (size_t i = 0; i < count; ++i)
{
out[i] = round_to_nearest_even(arg[i]);
}
}
}
}
}
......@@ -2654,7 +2654,11 @@ shared_ptr<Node> JSONDeserializer::deserialize_node(json node_js)
}
case OP_TYPEID::ReorgYolo: { break;
}
case OP_TYPEID::Round:
{
node = make_shared<op::Round>(args[0]);
break;
}
case OP_TYPEID::ScalarConstantLike:
{
double value = node_js.at("value").get<double>();
......@@ -3553,6 +3557,8 @@ json JSONSerializer::serialize_node(const Node& n)
}
case OP_TYPEID::ReorgYolo: { break;
}
case OP_TYPEID::Round: { break;
}
case OP_TYPEID::DeformableConvolution_v1:
{
const auto tmp = static_cast<const op::v1::DeformableConvolution*>(&n);
......
......@@ -350,6 +350,7 @@ set(MULTI_TEST_SRC
backend/reshape.in.cpp
backend/reverse_sequence.in.cpp
backend/reverse.in.cpp
backend/round.in.cpp
backend/scatter.in.cpp
backend/select.in.cpp
backend/shape_of.in.cpp
......
//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "util/all_close.hpp"
#include "util/all_close_f.hpp"
#include "util/ndarray.hpp"
#include "util/test_control.hpp"
#include "util/test_tools.hpp"
using namespace std;
using namespace ngraph;
static string s_manifest = "${MANIFEST}";
NGRAPH_TEST(${BACKEND_NAME}, round)
{
Shape shape{5};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::Round>(A), ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
auto a = backend->create_tensor(element::f32, shape);
copy_data(a, vector<float>{0.9f, 2.5f, 2.3f, 1.5f, -4.5f});
auto result = backend->create_tensor(element::f32, shape);
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_TRUE(test::all_close_f((vector<float>{1.0f, 2.0f, 2.0f, 2.0f, -4.0f}),
read_vector<float>(result),
MIN_FLOAT_TOLERANCE_BITS));
}
NGRAPH_TEST(${BACKEND_NAME}, round_2D)
{
Shape shape{3, 5};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::Round>(A), ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
auto a = backend->create_tensor(element::f32, shape);
copy_data(a,
vector<float>{0.1f,
0.5f,
0.9f,
1.2f,
1.5f,
1.8f,
2.3f,
2.5f,
2.7f,
-1.1f,
-1.5f,
-1.9f,
-2.2f,
-2.5f,
-2.8f});
auto result = backend->create_tensor(element::f32, shape);
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_TRUE(test::all_close_f(
(vector<float>{
0.f, 0.f, 1.f, 1.f, 2.f, 2.f, 2.f, 2.f, 3.f, -1.f, -2.f, -2.f, -2.f, -2.f, -3.f}),
read_vector<float>(result),
MIN_FLOAT_TOLERANCE_BITS));
}
......@@ -1131,6 +1131,15 @@ namespace
EXPECT_FALSE(node.is_binary_elementwise_logical());
}
void op_is_Round()
{
op::Round node;
EXPECT_TRUE(node.is_unary_elementwise_arithmetic());
EXPECT_FALSE(node.is_binary_elementwise_arithmetic());
EXPECT_FALSE(node.is_binary_elementwise_comparison());
EXPECT_FALSE(node.is_binary_elementwise_logical());
}
void op_is_ScalarConstantLike()
{
op::ScalarConstantLike node;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment