Commit 0273b716 authored by Nishant Patel's avatar Nishant Patel Committed by Scott Cyphers

[MLIR] Add unary op -- Negative (#3391)

* Add negative op

* Add test case

* Address feedback

* Merge master

* Consolidate to one routine for unary ops

* Change from Negative to Neg
parent f62f31e6
......@@ -40,6 +40,7 @@
#include "ngraph/op/maximum.hpp"
#include "ngraph/op/minimum.hpp"
#include "ngraph/op/multiply.hpp"
#include "ngraph/op/negative.hpp"
#include "ngraph/op/relu.hpp"
#include "ngraph/op/subtract.hpp"
#include "ngraph/op/util/index_reduction.hpp"
......@@ -572,6 +573,12 @@ namespace ngraph
{
return compiler.create_generic_op<mlir::NGReluOp>(ng_node);
}
template <>
mlir::Operation* MLIRCompiler::COMPILE_OP_DECL(ngraph::op::Negative)
{
return compiler.create_generic_op<mlir::NGNegOp>(ng_node);
}
}
}
}
......
......@@ -94,6 +94,12 @@ namespace
PatternRewriter& rewriter,
DialectLoweringPass& pass);
template <typename OP>
void lower_unary_elementwise(Operation* op,
ArrayRef<Value*> operands,
PatternRewriter& rewriter,
DialectLoweringPass& pass);
/// Conversion from types in the nGraph dialect to the Standard dialect.
class NGraphTypeConverter : public TypeConverter
{
......@@ -526,6 +532,13 @@ namespace
return matchSuccess();
}
// Negative
REWRITER(NGNegOp)
{
lower_unary_elementwise<mlir::NGNegOp>(op, operands, rewriter, pass);
return matchSuccess();
}
REWRITER(NGDotOp)
{
auto dot = cast<NGDotOp>(op);
......@@ -793,6 +806,67 @@ namespace
#undef REWRITER
/// End of pattern matchers
template <typename OP>
void lower_unary_elementwise(Operation* op,
ArrayRef<Value*> operands,
PatternRewriter& rewriter,
DialectLoweringPass& pass)
{
auto loc = cast<OP>(op).getLoc();
auto result = pass.buildOutputDefs(op, rewriter)[0];
NGRAPH_CHECK(result->getType().isa<MemRefType>());
// Note that builder's current function is still the original function body.
// use getBlock to get the new block instead.
// get new operands
Value* lhs = operands[0];
ScopedContext scope(rewriter, loc);
// Views
MemRefView vRes(result), vLHS(lhs);
// Index Values
IndexedValue iRes(result), iLHS(lhs);
// Bounds Index Handles
auto lbs = vLHS.getLbs();
auto ubs = vLHS.getUbs();
// Loop induction vars
auto ivs = IndexHandle::makeIndexHandles(vLHS.rank());
auto pivs = IndexHandle::makeIndexHandlePointers(ivs);
// Steps
auto steps = vLHS.getSteps();
NGRAPH_CHECK(lhs->getType().isa<MemRefType>());
Type elemTy = lhs->getType().cast<MemRefType>().getElementType();
LoopNestBuilder(pivs, lbs, ubs, steps)([&] {
ValueHandle val = iLHS(ivs);
if (isa<NGNegOp>(op))
{
if (auto floatTy = elemTy.dyn_cast<FloatType>())
{
ValueHandle zero = intrinsics::constant_float(llvm::APFloat(0.0f), floatTy);
iRes(ivs) = zero - val;
}
else if (auto intTy = elemTy.dyn_cast<IntegerType>())
{
ValueHandle zero = intrinsics::constant_int(0, intTy.getWidth());
iRes(ivs) = zero - val;
}
else
{
NGRAPH_CHECK(false, "Unsupported type for Negative");
}
}
else
{
NGRAPH_CHECK(false, "Unsupported op");
}
});
rewriter.replaceOp(op, {result});
}
template <typename OP>
void lower_binary_elementwise(Operation* op,
ArrayRef<Value*> operands,
......
......@@ -35,6 +35,7 @@ MLIR_OP(NGLessOp)
MLIR_OP(NGMulOp)
MLIR_OP(NGMaxOp)
MLIR_OP(NGMinOp)
MLIR_OP(NGNegOp)
MLIR_OP(NGReluOp)
MLIR_OP(NGSubOp)
MLIR_LAST_OP(NGReturnOp)
......
......@@ -15,6 +15,7 @@ MLIR_OP(Less)
MLIR_OP(Maximum)
MLIR_OP(Minimum)
MLIR_OP(Multiply)
MLIR_OP(Negative)
MLIR_OP(Subtract)
MLIR_OP(Relu)
// Add new supported ops here
......
......@@ -35,6 +35,7 @@
#include "ngraph/op/maximum.hpp"
#include "ngraph/op/minimum.hpp"
#include "ngraph/op/multiply.hpp"
#include "ngraph/op/negative.hpp"
#include "ngraph/op/relu.hpp"
#include "ngraph/op/subtract.hpp"
......@@ -342,6 +343,12 @@ bool MLIRSubgraphExtractionPass::is_supported_mlir_op(std::shared_ptr<Node> node
return true;
}
}
if (TI(ngraph::op::Negative) == TI(*node))
{
return true;
}
return true;
}
......
......@@ -63,3 +63,45 @@ NGRAPH_TEST(${BACKEND_NAME}, negative)
read_vector<float>(result),
MIN_FLOAT_TOLERANCE_BITS));
}
NGRAPH_TEST(${BACKEND_NAME}, negative_i32)
{
auto shape_a = Shape{2, 5};
auto A = make_shared<op::Parameter>(element::i32, shape_a);
auto relu = make_shared<op::Negative>(A);
auto shape_rt = Shape{2, 5};
auto f = make_shared<Function>(relu, ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
auto a = backend->create_tensor(element::i32, shape_a);
copy_data(a, vector<int32_t>{1, 8, -8, 17, -2, 1, 8, -8, 17, -1});
auto result = backend->create_tensor(element::i32, shape_rt);
vector<int32_t> expected{-1, -8, 8, -17, 2, -1, -8, 8, -17, 1};
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_EQ(expected, read_vector<int32_t>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, negative_f32)
{
auto shape_a = Shape{2, 5};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
auto relu = make_shared<op::Negative>(A);
auto shape_rt = Shape{2, 5};
auto f = make_shared<Function>(relu, ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(
a, vector<float>{1.35f, 8.76f, -8.0f, 17.234f, -2.121f, 1.0f, 8.7f, -8.92f, 17.0f, -1.0f});
auto result = backend->create_tensor(element::f32, shape_rt);
vector<float> expected{
-1.35f, -8.76f, 8.0f, -17.234f, 2.121f, -1.0f, -8.7f, 8.92f, -17.0f, 1.0f};
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_EQ(expected, read_vector<float>(result));
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment