Unverified Commit eebf0b70 authored by Adam Procter's avatar Adam Procter Committed by GitHub

Implement Power op (#261)

parent 51dcfe07
......@@ -45,6 +45,7 @@ set (SRC
ops/negative.cpp
ops/op.cpp
ops/parameter.cpp
ops/power.cpp
ops/reduce.cpp
ops/reshape.cpp
ops/select.cpp
......
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#include "ngraph/ops/power.hpp"
#include "ngraph/ops/divide.hpp"
#include "ngraph/ops/log.hpp"
#include "ngraph/ops/multiply.hpp"
void ngraph::op::Power::generate_adjoints(autodiff::Adjoints& adjoints,
const std::shared_ptr<Node>& delta)
{
auto x = m_arguments[0];
auto y = m_arguments[1];
auto log_x = std::make_shared<op::Log>(x);
adjoints.add_delta(x, delta * y * shared_from_this() / x);
adjoints.add_delta(y, delta * shared_from_this() * log_x);
}
......@@ -37,9 +37,9 @@ namespace ngraph
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ---------------- |
/// | NGVM | Not implemented. |
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Power : public BinaryElementwiseArithmetic
{
public:
......@@ -59,6 +59,10 @@ namespace ngraph
throw ngraph_error("Incorrect number of new arguments");
return std::make_shared<Power>(new_args.at(0), new_args.at(1));
}
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const std::shared_ptr<Node>& delta) override;
};
}
}
......@@ -1447,6 +1447,20 @@ void Emitter::EmitAtan(const ngraph::Node* n,
TU << "}\n";
}
void Emitter::EmitPower(const ngraph::Node* n,
const std::vector<TensorViewInfo>& inputs,
const std::vector<TensorViewInfo>& outputs)
{
TU << "{ // " << n->get_name() << "\n";
TU.indent++;
TU << emit_array1d(outputs[0]) << " = \n";
TU.indent++;
TU << emit_array1d(inputs[0]) << ".pow(\n ";
TU << emit_array1d(inputs[1]) << ");\n";
TU.indent -= 2;
TU << "}\n";
}
//------------------------------------------------------------------------------------------------
// Utility methods
//------------------------------------------------------------------------------------------------
......
......@@ -93,6 +93,7 @@ namespace ngraph
void EMITTER_DECL(EmitAsin);
void EMITTER_DECL(EmitAcos);
void EMITTER_DECL(EmitAtan);
void EMITTER_DECL(EmitPower);
private:
void generate_call(const std::vector<TensorViewInfo>& inputs,
......
......@@ -56,6 +56,7 @@
#include "ngraph/ops/multiply.hpp"
#include "ngraph/ops/negative.hpp"
#include "ngraph/ops/not_equal.hpp"
#include "ngraph/ops/power.hpp"
#include "ngraph/ops/reduce.hpp"
#include "ngraph/ops/reshape.hpp"
#include "ngraph/ops/select.hpp"
......@@ -117,6 +118,7 @@ static const OpMap dispatcher{
{TI(ngraph::op::Minimum), &Emitter::EmitMinimum},
{TI(ngraph::op::Negative), &Emitter::EmitNegative},
{TI(ngraph::op::NotEqual), &Emitter::EmitNotEqual},
{TI(ngraph::op::Power), &Emitter::EmitPower},
{TI(ngraph::op::Select), &Emitter::EmitSelect},
{TI(ngraph::op::Subtract), &Emitter::EmitSubtract},
{TI(ngraph::op::ParameterizedConstant<ngraph::element::Bool>),
......
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include "ngraph/runtime/ngvm/call_frame.hpp"
#include "ngraph/runtime/ngvm/eigen/utils.hpp"
#include "ngraph/runtime/ngvm/instruction.hpp"
#include "ngraph/runtime/tensor_view.hpp"
namespace ngraph
{
namespace runtime
{
namespace ngvm
{
namespace eigen
{
template <typename ET>
class PowerInstruction : public Instruction
{
public:
PowerInstruction(const TensorViewInfo& arg0,
const TensorViewInfo& arg1,
const TensorViewInfo& out)
: m_arg0(arg0)
, m_arg1(arg1)
, m_out(out)
{
}
virtual void execute(CallFrame& call_frame) const override
{
EigenArray1d<ET>(call_frame, m_out) =
EigenArray1d<ET>(call_frame, m_arg0)
.pow(EigenArray1d<ET>(call_frame, m_arg1));
}
protected:
TensorViewInfo m_arg0;
TensorViewInfo m_arg1;
TensorViewInfo m_out;
};
}
}
}
}
......@@ -50,6 +50,7 @@
#include "ngraph/ops/multiply.hpp"
#include "ngraph/ops/negative.hpp"
#include "ngraph/ops/not_equal.hpp"
#include "ngraph/ops/power.hpp"
#include "ngraph/ops/reduce.hpp"
#include "ngraph/ops/reshape.hpp"
#include "ngraph/ops/select.hpp"
......@@ -98,6 +99,7 @@
#include "ngraph/runtime/ngvm/eigen/multiply.hpp"
#include "ngraph/runtime/ngvm/eigen/negate.hpp"
#include "ngraph/runtime/ngvm/eigen/not_equal.hpp"
#include "ngraph/runtime/ngvm/eigen/power.hpp"
#include "ngraph/runtime/ngvm/eigen/reduce_matrix_columns.hpp"
#include "ngraph/runtime/ngvm/eigen/reduce_matrix_rows.hpp"
#include "ngraph/runtime/ngvm/eigen/reduce_to_scalar.hpp"
......@@ -380,6 +382,7 @@ ExternalFunction::OpMap& ExternalFunction::get_op_map()
REGISTER_NUMERIC_BINOP(op::Maximum, eigen::MaximumInstruction);
REGISTER_NUMERIC_BINOP(op::Minimum, eigen::MinimumInstruction);
REGISTER_NUMERIC_BINOP(op::Multiply, eigen::MultiplyInstruction);
REGISTER_NUMERIC_BINOP(op::Power, eigen::PowerInstruction);
REGISTER_NUMERIC_BINOP(op::Subtract, eigen::SubtractInstruction);
REGISTER_TO_OP_MAP(op::Constant)
......
......@@ -403,6 +403,48 @@ TEST(backwards, parameter)
EXPECT_TRUE(autodiff_numeric_compare<float>(manager, backend, make_graph, {x0}, .01f, .01f));
}
TEST(backwards, power)
{
auto manager = runtime::Manager::get("NGVM");
auto backend = manager->allocate_backend();
test::Uniform<float> rng_neg(-5.0f, -0.5f);
test::Uniform<float> rng_pos(0.5f, 5.0f);
auto shape = Shape{2, 3};
auto make_graph = [shape]() {
auto X0 = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto X1 = make_shared<op::Parameter>(element::Float32::element_type(), shape);
return make_shared<Function>(std::make_shared<op::Power>(X0, X1),
nullptr,
std::vector<std::shared_ptr<op::Parameter>>{X0, X1});
};
auto x0 = rng_neg.initialize(backend->make_primary_tensor_view<float>(shape));
auto x1 = rng_pos.initialize(backend->make_primary_tensor_view<float>(shape));
EXPECT_TRUE(
autodiff_numeric_compare<float>(manager, backend, make_graph, {x0, x1}, .01f, .01f));
x0 = rng_pos.initialize(backend->make_primary_tensor_view<float>(shape));
x1 = rng_neg.initialize(backend->make_primary_tensor_view<float>(shape));
EXPECT_TRUE(
autodiff_numeric_compare<float>(manager, backend, make_graph, {x0, x1}, .01f, .01f));
x0 = rng_neg.initialize(backend->make_primary_tensor_view<float>(shape));
x1 = rng_neg.initialize(backend->make_primary_tensor_view<float>(shape));
EXPECT_TRUE(
autodiff_numeric_compare<float>(manager, backend, make_graph, {x0, x1}, .01f, .01f));
x0 = rng_pos.initialize(backend->make_primary_tensor_view<float>(shape));
x1 = rng_pos.initialize(backend->make_primary_tensor_view<float>(shape));
EXPECT_TRUE(
autodiff_numeric_compare<float>(manager, backend, make_graph, {x0, x1}, .01f, .01f));
}
TEST(backwards, reshape)
{
auto manager = runtime::Manager::get("NGVM");
......
......@@ -2709,6 +2709,30 @@ TEST(${BACKEND_NAME}, sign)
ASSERT_EQ((vector<float>{1, -1, 0, -1, 1, 0}), result->get_vector<float>());
}
TEST(${BACKEND_NAME}, power)
{
auto shape = Shape{2, 2};
auto A = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto B = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), shape);
auto f = make_shared<Function>(make_shared<op::Power>(A, B), rt, op::Parameters{A, B});
auto manager = runtime::Manager::get("${BACKEND_NAME}");
auto external = manager->compile(f);
auto backend = manager->allocate_backend();
auto cf = backend->make_call_frame(external);
// Create some tensors for input/output
auto a = backend->make_primary_tensor_view(element::Float32::element_type(), shape);
copy_data(a, vector<float>{1, 2, 3, 5});
auto b = backend->make_primary_tensor_view(element::Float32::element_type(), shape);
copy_data(b, vector<float>{2, 0, 6, 3});
auto result = backend->make_primary_tensor_view(element::Float32::element_type(), shape);
(*cf)({a, b}, {result});
ASSERT_EQ((vector<float>{1, 1, 729, 125}), result->get_vector<float>());
}
TEST(${BACKEND_NAME}, constant_equality_bool)
{
auto shape = Shape{4};
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment