Unverified Commit 41a44f92 authored by Scott Cyphers's avatar Scott Cyphers Committed by GitHub

Atan2 op (#3859) (#3861)

* Atan2 op
parent 4a25881e
......@@ -114,6 +114,8 @@ set (SRC
op/asin.hpp
op/atan.cpp
op/atan.hpp
op/atan2.cpp
op/atan2.hpp
op/avg_pool.cpp
op/avg_pool.hpp
op/batch_norm.cpp
......
......@@ -94,6 +94,7 @@ namespace ngraph
#include "ngraph/op/argmin.hpp"
#include "ngraph/op/asin.hpp"
#include "ngraph/op/atan.hpp"
#include "ngraph/op/atan2.hpp"
#include "ngraph/op/avg_pool.hpp"
#include "ngraph/op/batch_norm.hpp"
#include "ngraph/op/binary_convolution.hpp"
......
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "ngraph/op/atan2.hpp"
using namespace std;
using namespace ngraph;
constexpr NodeTypeInfo op::Atan2::type_info;
op::Atan2::Atan2(const Output<Node>& y, const Output<Node>& x, const AutoBroadcastSpec& autob)
: BinaryElementwiseArithmetic(y, x, autob)
{
constructor_validate_and_infer_types();
}
shared_ptr<Node> op::Atan2::copy_with_new_args(const NodeVector& new_args) const
{
check_new_args_count(this, new_args);
return make_shared<Atan2>(new_args.at(0), new_args.at(1), this->get_autob());
}
void op::Atan2::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas)
{
if (get_autob().m_type != op::AutoBroadcastType::NONE)
{
throw ngraph_error("Autodiff not supported with auto broadcasting");
}
throw ngraph_error("Autodiff not supported for Atan2");
}
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include <memory>
#include "ngraph/op/util/binary_elementwise_arithmetic.hpp"
namespace ngraph
{
namespace op
{
/// \brief Elementwise full arctan operation
class Atan2 : public util::BinaryElementwiseArithmetic
{
public:
NGRAPH_API
static constexpr NodeTypeInfo type_info{"Atan2", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
Atan2() = default;
/// \brief atan2(y,x) is the angle from the origin to the point (x,y) (note reversed
/// order).
///
/// \param y
/// \param x
Atan2(const Output<Node>& y,
const Output<Node>& x,
const AutoBroadcastSpec& autob = AutoBroadcastSpec());
std::shared_ptr<Node> copy_with_new_args(const NodeVector& new_args) const override;
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const NodeVector& deltas) override;
};
}
}
......@@ -61,6 +61,7 @@ NGRAPH_OP(ArgMax, ngraph::op)
NGRAPH_OP(ArgMin, ngraph::op)
NGRAPH_OP(Asin, ngraph::op)
NGRAPH_OP(Atan, ngraph::op)
NGRAPH_OP(Atan2, ngraph::op)
NGRAPH_OP(AvgPool, ngraph::op)
NGRAPH_OP(AvgPoolBackprop, ngraph::op)
NGRAPH_OP(BatchMatMul, ngraph::op)
......
......@@ -29,6 +29,7 @@
#include "ngraph/op/add.hpp"
#include "ngraph/op/asin.hpp"
#include "ngraph/op/atan.hpp"
#include "ngraph/op/atan2.hpp"
#include "ngraph/op/broadcast.hpp"
#include "ngraph/op/ceiling.hpp"
#include "ngraph/op/constant.hpp"
......@@ -151,6 +152,7 @@ static unordered_map<type_index, function<bool(shared_ptr<Node>, shared_ptr<Node
{TI(op::Acos), cse_unarywise},
{TI(op::Asin), cse_unarywise},
{TI(op::Atan), cse_unarywise},
{TI(op::Atan2), cse_binarywise},
{TI(op::Ceiling), cse_unarywise},
{TI(op::Constant), cse_constant},
{TI(op::Cos), cse_unarywise},
......
......@@ -31,6 +31,7 @@
#include "ngraph/op/and.hpp"
#include "ngraph/op/asin.hpp"
#include "ngraph/op/atan.hpp"
#include "ngraph/op/atan2.hpp"
#include "ngraph/op/ceiling.hpp"
#include "ngraph/op/constant.hpp"
#include "ngraph/op/cos.hpp"
......@@ -75,6 +76,7 @@
#include "ngraph/runtime/cpu/kernel/and.hpp"
#include "ngraph/runtime/cpu/kernel/asin.hpp"
#include "ngraph/runtime/cpu/kernel/atan.hpp"
#include "ngraph/runtime/cpu/kernel/atan2.hpp"
#include "ngraph/runtime/cpu/kernel/broadcast.hpp"
#include "ngraph/runtime/cpu/kernel/ceil.hpp"
#include "ngraph/runtime/cpu/kernel/cos.hpp"
......@@ -334,6 +336,12 @@ namespace ngraph
BUILD_UNARY_ELEMWISE_FUNCTOR(runtime::cpu::kernel::atan);
}
template <>
void Builder::BUILDER_DECL(ngraph::op::Atan2)
{
BUILD_BINARY_ELEMWISE_FUNCTOR(runtime::cpu::kernel::atan2);
}
template <>
void Builder::BUILDER_DECL(ngraph::op::Ceiling)
{
......@@ -683,6 +691,7 @@ namespace ngraph
REGISTER_OP_BUILDER(Acos);
REGISTER_OP_BUILDER(Asin);
REGISTER_OP_BUILDER(Atan);
REGISTER_OP_BUILDER(Atan2);
REGISTER_OP_BUILDER(Ceiling);
REGISTER_OP_BUILDER(Cos);
REGISTER_OP_BUILDER(Cosh);
......
......@@ -33,6 +33,7 @@
#include "ngraph/op/argmin.hpp"
#include "ngraph/op/asin.hpp"
#include "ngraph/op/atan.hpp"
#include "ngraph/op/atan2.hpp"
#include "ngraph/op/avg_pool.hpp"
#include "ngraph/op/batch_norm.hpp"
#include "ngraph/op/broadcast.hpp"
......@@ -1892,6 +1893,21 @@ namespace ngraph
writer.block_end();
}
template <>
void CPU_Emitter::EMITTER_DECL(ngraph::op::Atan2)
{
(void)external_function;
(void)node;
writer.block_begin();
writer << "#pragma omp parallel for\n";
writer << "for (size_t i = 0; i < " << out[0].get_size() << "; i++)\n";
writer.block_begin();
writer << out[0].get_name() << "[i] = atan2(" << args[0].get_name() << ", "
<< args[1].get_name() << "[i]);\n";
writer.block_end();
writer.block_end();
}
static void emitArgMinArgMax(const std::vector<TensorViewWrapper>& args,
const std::vector<TensorViewWrapper>& out,
size_t reduction_axis,
......
......@@ -56,6 +56,7 @@
#include "ngraph/op/argmin.hpp"
#include "ngraph/op/asin.hpp"
#include "ngraph/op/atan.hpp"
#include "ngraph/op/atan2.hpp"
#include "ngraph/op/avg_pool.hpp"
#include "ngraph/op/batch_norm.hpp"
#include "ngraph/op/broadcast.hpp"
......@@ -368,6 +369,7 @@ static const runtime::cpu::OpMap dispatcher{
{TI(ngraph::op::ArgMax), &runtime::cpu::CPU_Emitter::emit<op::ArgMax>},
{TI(ngraph::op::Acos), &runtime::cpu::CPU_Emitter::emit<op::Acos>},
{TI(ngraph::op::Atan), &runtime::cpu::CPU_Emitter::emit<op::Atan>},
{TI(ngraph::op::Atan2), &runtime::cpu::CPU_Emitter::emit<op::Atan2>},
{TI(ngraph::op::ReplaceSlice), &runtime::cpu::CPU_Emitter::emit<op::ReplaceSlice>},
{TI(ngraph::op::UpdateSlice), &runtime::cpu::CPU_Emitter::emit<op::UpdateSlice>},
{TI(ngraph::op::OneHot), &runtime::cpu::CPU_Emitter::emit<op::OneHot>},
......
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include <cmath>
#define EIGEN_USE_THREADS
#include <unsupported/Eigen/CXX11/Tensor>
#include "ngraph/runtime/cpu/cpu_executor.hpp"
namespace ngraph
{
namespace runtime
{
namespace cpu
{
namespace kernel
{
template <typename ElementType>
void atan2(void* input0, void* input1, void* output, size_t count, int arena)
{
Eigen::array<Eigen::Index, 1> out_dims, in_dims;
out_dims[0] = in_dims[0] = count;
Eigen::TensorMap<Eigen::Tensor<ElementType, 1, Eigen::RowMajor>> out(
static_cast<ElementType*>(output), out_dims);
Eigen::TensorMap<Eigen::Tensor<ElementType, 1, Eigen::RowMajor>> in0(
static_cast<ElementType*>(input0), in_dims);
Eigen::TensorMap<Eigen::Tensor<ElementType, 1, Eigen::RowMajor>> in1(
static_cast<ElementType*>(input1), in_dims);
out.device(ngraph::runtime::cpu::executor::GetCPUExecutor().get_device(arena)) =
in0.binaryExpr(in1, [](ElementType y, ElementType x) {
return static_cast<ElementType>(std::atan2(y, x));
});
}
}
}
}
}
......@@ -87,6 +87,7 @@
#include "ngraph/runtime/reference/argmin.hpp"
#include "ngraph/runtime/reference/asin.hpp"
#include "ngraph/runtime/reference/atan.hpp"
#include "ngraph/runtime/reference/atan2.hpp"
#include "ngraph/runtime/reference/avg_pool.hpp"
#include "ngraph/runtime/reference/batch_mat_mul.hpp"
#include "ngraph/runtime/reference/batch_norm.hpp"
......@@ -357,6 +358,15 @@ private:
args[0]->get_data_ptr<const T>(), out[0]->get_data_ptr<T>(), element_count);
break;
}
case OP_TYPEID::Atan2:
{
size_t element_count = shape_size(node.get_output_shape(0));
reference::atan2<T>(args[0]->get_data_ptr<const T>(),
args[1]->get_data_ptr<const T>(),
out[0]->get_data_ptr<T>(),
element_count);
break;
}
case OP_TYPEID::AvgPool:
{
const op::AvgPool* avg_pool = static_cast<const op::AvgPool*>(&node);
......
......@@ -103,6 +103,7 @@
#include "ngraph/runtime/reference/argmin.hpp"
#include "ngraph/runtime/reference/asin.hpp"
#include "ngraph/runtime/reference/atan.hpp"
#include "ngraph/runtime/reference/atan2.hpp"
#include "ngraph/runtime/reference/avg_pool.hpp"
#include "ngraph/runtime/reference/batch_mat_mul.hpp"
#include "ngraph/runtime/reference/batch_norm.hpp"
......@@ -401,6 +402,15 @@ private:
args[0]->get_data_ptr<const T>(), out[0]->get_data_ptr<T>(), element_count);
break;
}
case OP_TYPEID::Atan2:
{
size_t element_count = shape_size(node.get_output_shape(0));
reference::atan2<T>(args[0]->get_data_ptr<const T>(),
args[1]->get_data_ptr<const T>(),
out[0]->get_data_ptr<T>(),
element_count);
break;
}
case OP_TYPEID::AvgPool:
{
const op::AvgPool* avg_pool = static_cast<const op::AvgPool*>(&node);
......
......@@ -52,6 +52,9 @@ top_k_opset_10 # No plans to implement TopK
top_k_opset_10_const_k # No plans to implement TopK
top_k_opset_11_const_k_smallest # No plans to implement TopK
# unsupported op: `Atan2`
atan2
# unsupported op: `Erf`
erf
gelu_f32
......
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include <cmath>
#include <cstddef>
namespace ngraph
{
namespace runtime
{
namespace reference
{
template <typename X, typename Y, typename Z>
void atan2(const X* py, const Y* px, Z* pout, size_t count)
{
for (size_t i = 0; i < count; i++)
{
*pout++ = static_cast<Z>(std::atan2(*py++, *px++));
}
}
}
}
}
......@@ -33,6 +33,7 @@
#include "ngraph/op/argmin.hpp"
#include "ngraph/op/asin.hpp"
#include "ngraph/op/atan.hpp"
#include "ngraph/op/atan2.hpp"
#include "ngraph/op/avg_pool.hpp"
#include "ngraph/op/batch_norm.hpp"
#include "ngraph/op/binary_convolution.hpp"
......@@ -979,6 +980,12 @@ shared_ptr<Node> JSONDeserializer::deserialize_node(json node_js)
node = make_shared<op::Atan>(args[0]);
break;
}
case OP_TYPEID::Atan2:
{
node = make_shared<op::Atan2>(args[0], args[1], read_auto_broadcast(node_js, "autob"));
break;
}
case OP_TYPEID::AvgPool:
{
if (op_version == 0)
......@@ -2939,6 +2946,15 @@ json JSONSerializer::serialize_node(const Node& n)
}
case OP_TYPEID::Atan: { break;
}
case OP_TYPEID::Atan2:
{
auto tmp = dynamic_cast<const op::Atan2*>(&n);
if (tmp->get_autob().m_type != op::AutoBroadcastType::NONE)
{
node["autob"] = write_auto_broadcast(tmp->get_autob());
}
break;
}
case OP_TYPEID::AvgPool:
{
if (op_version == 0)
......
......@@ -266,6 +266,7 @@ set(MULTI_TEST_SRC
backend/arg_reduce.in.cpp
backend/asin.in.cpp
backend/atan.in.cpp
backend/atan2.in.cpp
backend/auto_broadcast.in.cpp
backend/autodiff.in.cpp
backend/batch_mat_mul.in.cpp
......
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include <algorithm>
#include <cinttypes>
#include <cmath>
#include <cstdlib>
#include <random>
#include <string>
// clang-format off
#ifdef ${BACKEND_NAME}_FLOAT_TOLERANCE_BITS
#define DEFAULT_FLOAT_TOLERANCE_BITS ${BACKEND_NAME}_FLOAT_TOLERANCE_BITS
#endif
#ifdef ${BACKEND_NAME}_DOUBLE_TOLERANCE_BITS
#define DEFAULT_DOUBLE_TOLERANCE_BITS ${BACKEND_NAME}_DOUBLE_TOLERANCE_BITS
#endif
// clang-format on
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "util/all_close.hpp"
#include "util/all_close_f.hpp"
#include "util/ndarray.hpp"
#include "util/test_control.hpp"
#include "util/test_tools.hpp"
using namespace std;
using namespace ngraph;
static string s_manifest = "${MANIFEST}";
NGRAPH_TEST(${BACKEND_NAME}, atan2)
{
Shape shape{30};
auto X = make_shared<op::Parameter>(element::f32, shape);
auto Y = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::Atan2>(Y, X), ParameterVector{X, Y});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
shared_ptr<runtime::Tensor> x = backend->create_tensor(element::f32, shape);
shared_ptr<runtime::Tensor> y = backend->create_tensor(element::f32, shape);
shared_ptr<runtime::Tensor> result = backend->create_tensor(element::f32, shape);
std::vector<float> xref;
std::vector<float> yref;
std::vector<float> zref;
int halfelts = shape.at(0) / 2;
float scale = 1.0 / (halfelts * 4.0 * std::atan(1.0));
for (int i = 0; i < halfelts; ++i)
{
float theta = i * scale;
zref.push_back(theta);
xref.push_back(static_cast<float>((i + 1) * std::cos(theta)));
yref.push_back(static_cast<float>((i + 1) * std::sin(theta)));
zref.push_back(-theta);
xref.push_back(static_cast<float>((i + 1) * std::cos(-theta)));
yref.push_back(static_cast<float>((i + 1) * std::sin(-theta)));
}
copy_data(x, xref);
copy_data(y, yref);
auto handle = backend->compile(f);
handle->call_with_validate({result}, {x, y});
EXPECT_TRUE(test::all_close_f(read_vector<float>(result), (zref)));
}
......@@ -79,6 +79,11 @@ TEST(copy, atan)
ASSERT_TRUE(check_unary<op::Atan>());
}
TEST(copy, atan2)
{
ASSERT_TRUE(check_binary<op::Atan2>());
}
TEST(copy, broadcast)
{
Shape shape1{1};
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment