Commit 6bb90e3c authored by Nagy Mostafa's avatar Nagy Mostafa Committed by nmostafa

[MLIR] Use .td and tablegen to declare ng dialect ops (#21)

* Initial td file. Cmake changes

* Move all ops to .td file.

* Added few more opcodes to show-case. Fixed PR feedback

* Remove NG_ prefix of opcode records. Some fixes

* Added some doc

*  Adding back NG prefix

* Bug fix in MLIR gen
parent 9bb2fad3
......@@ -46,8 +46,9 @@ if (NINJA)
# point find_package to the pre-built libs
set(LLVM_DIR ${MLIR_LLVM_ROOT}/build/lib/cmake/llvm)
set(MLIR_INCLUDE_PATH ${MLIR_SOURCE_DIR}/include)
set(MLIR_INCLUDE_PATH ${MLIR_INCLUDE_PATH};${MLIR_BUILD_DIR}/projects/mlir/include)
set(MLIR_SRC_INCLUDE_PATH ${MLIR_SOURCE_DIR}/include)
set(MLIR_BIN_INCLUDE_PATH ${MLIR_BUILD_DIR}/projects/mlir/include)
set(MLIR_INCLUDE_PATHS ${MLIR_SRC_INCLUDE_PATH};${MLIR_BIN_INCLUDE_PATH})
else()
message(FATAL_ERROR "Cannot find ninja. Cannot build with NGRAPH_MLIR_ENABLE=ON")
endif()
......@@ -39,10 +39,10 @@ if (NGRAPH_MLIR_ENABLE)
target_include_directories(mlir_backend PRIVATE ${LLVM_INCLUDE_DIRS})
message(STATUS "MLIR Headers at : ${MLIR_INCLUDE_PATH}")
message(STATUS "MLIR Headers at : ${MLIR_INCLUDE_PATHS}")
message(STATUS "LLVM Headers at : ${MLIR_LLVM_INCLUDEPATH}")
target_include_directories(mlir_backend PRIVATE ${MLIR_INCLUDE_PATH})
target_include_directories(mlir_backend PRIVATE ${MLIR_INCLUDE_PATHS})
llvm_map_components_to_libnames(llvm_libs support core irreader)
......@@ -92,4 +92,25 @@ if (NGRAPH_MLIR_ENABLE)
# Link ngraph
target_link_libraries(mlir_backend PUBLIC ngraph)
# table-gen dialect ops
# include table-gen helpers
include(${LLVM_DIR}/TableGen.cmake)
function(ngraph_tablegen ofn)
tablegen(MLIR ${ARGV} "-I${MLIR_SRC_INCLUDE_PATH}" "-I${MLIR_BIN_INCLUDE_PATH}")
set(TABLEGEN_OUTPUT ${TABLEGEN_OUTPUT} ${CMAKE_CURRENT_BINARY_DIR}/${ofn} PARENT_SCOPE)
endfunction()
set(MLIR_TABLEGEN_EXE mlir-tblgen)
set(LLVM_TARGET_DEFINITIONS dialect/ops.td)
ngraph_tablegen(ops.h.inc -gen-op-decls)
ngraph_tablegen(ops.cpp.inc -gen-op-defs)
add_public_tablegen_target(ngraph_ops_gen)
add_dependencies(mlir_backend ngraph_ops_gen)
target_include_directories(mlir_backend PRIVATE ${CMAKE_CURRENT_BINARY_DIR})
endif()
......@@ -14,19 +14,7 @@
// limitations under the License.
//*****************************************************************************
#include "compiler.hpp"
#include "ngraph/descriptor/tensor.hpp"
#include "ngraph/graph_util.hpp"
#include "ngraph/node_vector.hpp"
#include "ngraph/op/add.hpp"
#include "ngraph/op/experimental/compiled_kernel.hpp"
#include "ngraph/runtime/cpu/op/matmul_bias.hpp"
#include "ngraph/type/element_type.hpp"
#include "dialect/dialect.hpp"
#include "dialect/ops.hpp"
#include "dialect/type.hpp"
#include "lowerer.hpp"
#include <llvm/ADT/STLExtras.h>
#include <llvm/IR/Module.h>
......@@ -44,6 +32,16 @@
#include <mlir/Target/LLVMIR.h>
#include <mlir/Transforms/DialectConversion.h>
#include <mlir/Transforms/Passes.h>
#include "dialect/dialect.hpp"
#include "dialect/type.hpp"
#include "lowerer.hpp"
#include "ngraph/descriptor/tensor.hpp"
#include "ngraph/graph_util.hpp"
#include "ngraph/node_vector.hpp"
#include "ngraph/op/add.hpp"
#include "ngraph/op/experimental/compiled_kernel.hpp"
#include "ngraph/runtime/cpu/op/matmul_bias.hpp"
#include "ngraph/type/element_type.hpp"
using llvm::SmallVector;
using llvm::StringRef;
......@@ -246,7 +244,7 @@ namespace ngraph
template <>
mlir::Value* MLIRCompiler::COMPILE_OP_DECL(ngraph::op::Add)
{
return compiler.create_binary_op<NG_AddOp>(ng_node);
return compiler.create_binary_op<NGAddOp>(ng_node);
}
template <>
......@@ -257,7 +255,7 @@ namespace ngraph
NGRAPH_ASSERT(ng_node->get_arguments().size() == 2)
<< "Bias is not supported in MatmulBias operation";
return compiler.create_binary_op<NG_MatmulBiasOp>(ng_node);
return compiler.create_binary_op<NGMatMulBiasOp>(ng_node);
}
const MLIRCompiler::MLIRCompOpMap MLIRCompiler::op_dispatcher{
......@@ -282,7 +280,7 @@ namespace ngraph
{
value_list.push_back(get_tensor_value(output->get_output_tensor_ptr().get()).m_value);
}
m_builder->create<NG_ReturnOp>(mlir::UnknownLoc::get(&m_context), value_list);
m_builder->create<NGReturnOp>(mlir::UnknownLoc::get(&m_context), value_list);
}
void MLIRCompiler::bind_arguments()
......
......@@ -29,10 +29,11 @@ NGDialect::NGDialect(mlir::MLIRContext* ctx)
addTypes<NGTensorType>();
addTypes<NGIntegerType>();
addTypes<NGBoolType>();
addOperations<NG_AddOp>();
addOperations<NG_MatmulBiasOp>();
addOperations<NG_ReturnOp>();
addOperations<NG_FakeInput>();
addOperations<NGAddOp>();
addOperations<NGMatMulBiasOp>();
addOperations<NGReturnOp>();
addOperations<NGFakeInputOp>();
}
void NGDialect::printType(mlir::Type type, raw_ostream& os) const
......
......@@ -33,133 +33,81 @@ namespace ngraph
{
namespace ngmlir
{
// TODO:
// - Move verifiers and other OP helpers (e.g. getSomeAttribute()) to separate files
//
// - Op helpers: Since it is not possible to add arbitrary code (and would complicate the .td file)
// to Ops classes, we will add helper classes with static methods for each Op that needs it
// Additional verification methods
// Tensor type checks are already verified by the caller of these methods
template <typename T>
static mlir::LogicalResult verifyBinOperands(T* op)
static mlir::LogicalResult verifyUnaryArithOp(T* op)
{
if (!op->getOperand(0)->getType().template isa<NGTensorType>())
{
std::string msg;
raw_string_ostream os(msg);
os << "expects a Tensor type for LHS, got " << op->getOperand(0)->getType();
return op->emitOpError(os.str());
}
if (!op->getOperand(1)->getType().template isa<NGTensorType>())
{
std::string msg;
raw_string_ostream os(msg);
os << "expects a Tensor type for RHS, got " << op->getOperand(0)->getType();
return op->emitOpError(os.str());
}
// TODO: Check matching element types
return mlir::success();
}
// Additional verification methods
// Tensor type checks are already verified by the caller of these methods
template <typename T>
static mlir::LogicalResult verifySingleOperand(T* op)
static mlir::LogicalResult verifyBinaryArithOp(T* op)
{
if (!op->getOperand()->getType().template isa<NGTensorType>())
{
std::string msg;
raw_string_ostream os(msg);
os << "expects a Tensor Type for its argument, got "
<< op->getOperand()->getType();
return op->emitOpError(os.str());
}
// TODO: Check matching element types
return mlir::success();
}
}
}
void runtime::ngmlir::NG_FakeInput::build(mlir::Builder* builder,
mlir::OperationState* state,
mlir::Type resultType)
{
state->types.push_back(std::move(resultType));
}
mlir::LogicalResult runtime::ngmlir::NG_FakeInput::verify()
{
// TODO: Verify returned tensor types must match function return type.
return mlir::success();
}
void runtime::ngmlir::NG_AddOp::build(mlir::Builder* builder,
mlir::OperationState* state,
mlir::Value* lhs,
mlir::Value* rhs)
{
state->types.push_back(lhs->getType());
state->operands.push_back(lhs);
state->operands.push_back(rhs);
}
template <typename T>
static mlir::LogicalResult verifyOp(T* op)
{
return op->emitOpError("Unsupported verifier for this operation");
}
mlir::LogicalResult runtime::ngmlir::NG_AddOp::verify()
{
// TODO: verify matching elt types
verifyBinOperands(this);
return mlir::success();
}
// Per op specializations
template <>
mlir::LogicalResult verifyOp<NGMatMulBiasOp>(NGMatMulBiasOp* op)
{
// Verify that we have 3 operands
if (op->getNumOperands() != 3)
{
std::stringstream ss;
ss << "Unexpected MatmulBiasOp with " << op->getNumOperands()
<< " operands. 3 operands expected";
return op->emitOpError(ss.str());
}
void runtime::ngmlir::NG_MatmulBiasOp::build(mlir::Builder* builder,
mlir::OperationState* state,
mlir::Value* lhs,
mlir::Value* rhs)
{
state->types.push_back(lhs->getType());
state->operands.push_back(lhs);
state->operands.push_back(rhs);
}
// Bias operand must be null for now (not implemented).
if (op->getOperand(2) != nullptr)
{
return op->emitOpError("Bias operand is not null in MatmulBiasOp");
}
mlir::LogicalResult runtime::ngmlir::NG_MatmulBiasOp::verify()
{
// Verify that we have 3 operands
if (getNumOperands() != 3)
{
std::stringstream ss;
ss << "Unexpected MatmulBiasOp with " << getNumOperands()
<< " operands. 3 operands expected";
return emitOpError(ss.str());
}
// Verify that operand types are supported.
auto op0_tensor_ty = op->getOperand(0)->getType().cast<NGTensorType>();
auto op1_tensor_ty = op->getOperand(1)->getType().cast<NGTensorType>();
// Bias operand must be null for now (not implemented).
if (getOperand(2) != nullptr)
{
return emitOpError("Bias operand is not null in MatmulBiasOp");
}
// Verify that operand shapes are supported.
if (op0_tensor_ty.getRank() == 2 && op1_tensor_ty.getRank() == 2)
{
return op->emitOpError(
"Unsupported number of dimensions. Only 2D tensors are supported in "
"MatmulBiasOp");
}
// Verify that operand types are supported.
auto op0_tensor_ty = getOperand(0)->getType().dyn_cast<NGTensorType>();
auto op1_tensor_ty = getOperand(1)->getType().dyn_cast<NGTensorType>();
if (!op0_tensor_ty || !op1_tensor_ty)
{
return emitOpError("Unsupported non-tensor type in MatmulBiasOp");
}
// TODO(dcab): Improve verification: matching types, proper shapes, etc.
// Verify that operand shapes are supported.
if (op0_tensor_ty.getRank() == 2 && op1_tensor_ty.getRank() == 2)
{
return emitOpError(
"Unsupported number of dimensions. Only 2D tensors are supported in MatmulBiasOp");
return mlir::success();
}
}
// TODO(dcab): Improve verification: matching types, proper shapes, etc.
return mlir::success();
}
void runtime::ngmlir::NG_ReturnOp::build(mlir::Builder* builder,
mlir::OperationState* state,
std::vector<mlir::Value*> value_list)
using namespace mlir;
namespace runtime
{
for (auto value : value_list)
namespace ngmlir
{
if (value)
state->operands.push_back(value);
#define GET_OP_CLASSES
#include "ops.cpp.inc"
}
}
mlir::LogicalResult runtime::ngmlir::NG_ReturnOp::verify()
{
// TODO: Verify returned tensor types must match function return type.
return mlir::success();
}
}
......@@ -28,104 +28,13 @@ namespace ngraph
{
namespace ngmlir
{
// Fake instructions
/// Fake Input
/// Used as fake definitions during dialect conversion.
/// Used when we cannot insert the real definition once during lowering.
/// The are cleaned up after dialect lowering and replaced with real defintion.
class NG_FakeInput : public mlir::Op<NG_FakeInput,
mlir::OpTrait::NOperands<0>::Impl,
mlir::OpTrait::OneResult,
mlir::OpTrait::HasNoSideEffect>
{
public:
static llvm::StringRef getOperationName() { return "ng.fake.output"; }
mlir::LogicalResult verify();
static void
build(mlir::Builder* builder, mlir::OperationState* state, mlir::Type type);
/// Inherit constructor.
using Op::Op;
};
// Binary instructions
class NG_AddOp : public mlir::Op<NG_AddOp,
mlir::OpTrait::NOperands<2>::Impl,
mlir::OpTrait::OneResult,
mlir::OpTrait::HasNoSideEffect>
{
public:
static llvm::StringRef getOperationName() { return "ng.add"; }
/// custom verification
mlir::LogicalResult verify();
static void build(mlir::Builder* builder,
mlir::OperationState* state,
mlir::Value* lhs,
mlir::Value* rhs);
/// Convenience accessor for LHS of the expression.
mlir::Value* getLHS() { return getOperand(0); }
/// Convenience accessor for RHS of the expression.
mlir::Value* getRHS() { return getOperand(1); }
/// Inherit constructor.
using Op::Op;
};
// TODO(dcab): Doc
// TODO(dcab): Increase operands to 3 when supporting bias.
class NG_MatmulBiasOp : public mlir::Op<NG_MatmulBiasOp,
mlir::OpTrait::NOperands<2>::Impl,
mlir::OpTrait::OneResult,
mlir::OpTrait::HasNoSideEffect>
{
public:
static llvm::StringRef getOperationName() { return "ng.matmul.bias"; }
/// Custom verification.
mlir::LogicalResult verify();
static void build(mlir::Builder* builder,
mlir::OperationState* state,
mlir::Value* lhs,
mlir::Value* rhs);
/// Convenience accessor for LHS of the expression.
mlir::Value* getLHS() { return getOperand(0); }
/// Convenience accessor for RHS of the expression.
mlir::Value* getRHS() { return getOperand(1); }
/// Convenience accessor for bias operand.
mlir::Value* getBias() { return nullptr; } // TODO
/// Inherit constructor.
using Op::Op;
};
/// Return operations terminate blocks (and functions as well). They take a
/// single argument and the type must match the function return type.
class NG_ReturnOp : public mlir::Op<NG_ReturnOp,
mlir::OpTrait::VariadicOperands,
mlir::OpTrait::ZeroResult,
mlir::OpTrait::IsTerminator>
{
public:
static llvm::StringRef getOperationName() { return "ng.return"; }
/// Operations can add custom verification beyond the traits they define.
mlir::LogicalResult verify();
/// Interface to mlir::Builder::create<PrintOp>(...)
/// This method populate the `state` that MLIR use to create operations.
/// The `toy.return` operation accepts an optional single array as an argument
/// and does not have any returned value.
static void build(mlir::Builder* builder,
mlir::OperationState* state,
std::vector<mlir::Value*> value_list);
/// Return true if there is a returned value.
bool hasOperand() { return 0 != getNumOperands(); }
/// Helper to return the optional operand. Caller must check if the operand
/// is present before calling this.
mlir::Value* getOperand() { return getOperation()->getOperand(0); }
mlir::Value* getOperand(unsigned i) { return getOperation()->getOperand(i); }
/// Inherit constructor.
using Op::Op;
};
// TODO: We shouldn't have this here, but we need to expose mlir types for the .inc file to use
// we cannot forward declare the mlir types since they rely on the Ops we are defining (see. Op<NGAddOp, ...>)
//
// Other ways to avoid namespace pollution ?
using namespace mlir;
#define GET_OP_CLASSES
#include "ops.h.inc"
}
}
}
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
//
// This is the NGraph Dialect operation definition file.
//
//===----------------------------------------------------------------------===//
include "mlir/IR/OpBase.td"
// NGraph Dialect operations definitions
//
// This files declares NGraph operations that table-gen uses to create C++ code
// For more information about tablegen. See https://llvm.org/docs/TableGen/index.html
//
// The output files are ops.h.inc and ops.cpp.inc and are generated at build time
// The file declares base classes to ease opcode definitions and hoist common parts out.
// Each class fixes a set of attributes. For example:
// class NG_Unary_Arith_Op defines a base class for all unary arithmetic ops without side-effects
//
// An opcode is a record definition of the form
// def AbsOp : NG_Unary_Arith_Op<"abs">;
//
// Each def will corresponding to a C++ class
// NGraph Types
// This defines records equivalent to NGraph types. It doesn't generate code.
// This is used as a type in the DAG input/outputs.
// Constraints (CPred) are used to type-check args/results of that type during op verification
def NG_TensorType : Type<CPred<"{0}.isa<ngraph::runtime::ngmlir::NGTensorType>()">,
"NGraph Tensor Type">;
// NGraph operation base class.
// Prepends "ng." to operation name
class NG_Op<string mnemonic, list<OpTrait> traits = []> :
Op<!strconcat("ng.", mnemonic), traits> {}
// Operations producing single result.
// Will set OneResult trait based on Results out dag.
class NG_OneResult_Op<string mnemonic, list<OpTrait> traits = []> :
NG_Op<mnemonic, traits>, Results<(outs NG_TensorType:$res)> {}
// Operations producing no results
class NG_ZeroResult_Op<string mnemonic, list<OpTrait> traits = []> :
NG_Op<mnemonic, traits>, Results<(outs)> {}
// Arithmetic binary operations
// Input and outputs have same type
class NG_Unary_Arith_Op<string mnemonic, list<OpTrait> traits = []> :
NG_OneResult_Op<mnemonic, !listconcat([NoSideEffect, SameValueType], traits)>,
Arguments<(ins NG_TensorType:$arg)>
{
// TODO: Implement
let parser = [{ NGRAPH_FAIL() << "No parser support"; return false; }];
let verifier = [{ return verifyUnaryArithOp(this); }];
}
// Arithmetic binary operations
// Inputs and outputs have same type
class NG_Binary_Arith_Op<string mnemonic, list<OpTrait> traits = []> :
NG_OneResult_Op<mnemonic, !listconcat([NoSideEffect, SameValueType], traits)>,
Arguments<(ins NG_TensorType:$lhs, NG_TensorType:$rhs)>
{
// TODO: Implement
let parser = [{ NGRAPH_FAIL() << "No parser support"; return false; }];
let verifier = [{ return verifyBinaryArithOp(this); }];
}
// Base class for terminator operations.
class NG_Terminator_Op<string mnemonic, list<OpTrait> traits = []> :
NG_Op<mnemonic, !listconcat(traits, [Terminator])>,
Arguments<(ins Variadic<NG_TensorType>:$args)>, Results<(outs)> {}
// Unary Operations
def NGAbsOp : NG_Unary_Arith_Op<"abs">;
def NGCeilOp : NG_Unary_Arith_Op<"ceil">;
def NGConvertOp : NG_Unary_Arith_Op<"conv">;
def NGExpOp : NG_Unary_Arith_Op<"exp">;
// Binary Operations
def NGAddOp : NG_Binary_Arith_Op<"add", [Commutative]>;
def NGAndOp : NG_Binary_Arith_Op<"and", [Commutative]>;
def NGSubOp : NG_Binary_Arith_Op<"sub">;
def NGDivOp : NG_Binary_Arith_Op<"div">;
// Comparison
def NGEqOp : NG_OneResult_Op<"equal", [NoSideEffect]>;
def NGNotEqOp : NG_OneResult_Op<"not.equal", [NoSideEffect]>;
// Matrix Multiply
def NGMatMulBiasOp : NG_Binary_Arith_Op<"matmul.bias">
{
let verifier=[{return verifyOp(this);}];
}
// Terminator Ops
def NGReturnOp : NG_Terminator_Op<"return">;
// Fake ops
def NGFakeInputOp : NG_OneResult_Op<"fake.input", [NoSideEffect]>;
\ No newline at end of file
......@@ -28,14 +28,13 @@
#include "mlir/Transforms/DialectConversion.h"
#include "ngraph/assertion.hpp"
using namespace ngraph::runtime::ngmlir;
// anonymous namespace
// no need to expose any of the following outside of this file
namespace
{
using namespace mlir;
using namespace mlir::edsc;
using namespace ngraph::runtime::ngmlir;
using namespace ngraph::runtime;
class DialectLoweringPass;
#include "op_lowerers.inc"
......@@ -56,9 +55,9 @@ namespace
// Initialize the list of converters.
llvm::DenseSet<DialectOpConversion*> initConverters(MLIRContext* context) override
{
return ConversionListBuilder<NG_AddOpConversion,
NG_MatmulBiasOpConversion,
NG_ReturnOpConversion>::build(&allocator, context, m_pass);
return ConversionListBuilder<NGAddOpConversion,
NGMatMulBiasOpConversion,
NGReturnOpConversion>::build(&allocator, context, m_pass);
}
private:
......@@ -70,7 +69,7 @@ namespace
class DialectLoweringPass : public ModulePass<DialectLoweringPass>
{
public:
DialectLoweringPass(MLIRCompiler& compiler)
DialectLoweringPass(ngmlir::MLIRCompiler& compiler)
: m_dialectLowerer(*this)
, m_compiler(compiler)
{
......@@ -96,7 +95,7 @@ namespace
std::map<Value*, unsigned> m_outputValueMap;
// list of results values to add to func signature
SmallVector<Value*, 4> m_loweredOutputValues;
MLIRCompiler& m_compiler;
ngmlir::MLIRCompiler& m_compiler;
};
void DialectLoweringPass::runOnModule()
......@@ -132,7 +131,7 @@ namespace
// we find out output values by looking at returned values
// any return should return all outputs of the subgraph
f->walk<NG_ReturnOp>([this, &outputCount](NG_ReturnOp ret) {
f->walk<ngmlir::NGReturnOp>([this, &outputCount](ngmlir::NGReturnOp ret) {
for (unsigned i = 0; i < ret.getNumOperands(); i++)
{
this->m_outputValueMap.insert(std::pair<Value*, unsigned>(ret.getOperand(i), i));
......@@ -152,8 +151,8 @@ namespace
// however, due to how DialectConversion framework works, new func is only
// materialized after conversion is done (rewriter->getFunction, or even rewriter->getInsertionBlock()->getFunction()
// will give you the original func). This makes it very convoluted to insert instructions at entry block.
auto op = rewriter->create<NG_FakeInput>(rewriter->getUnknownLoc(),
IndexType::get(getModule().getContext()));
auto op = rewriter->create<ngmlir::NGFakeInputOp>(rewriter->getUnknownLoc(),
IndexType::get(getModule().getContext()));
// will be fixed later to read passed arg instead.
m_memMgrDefs.push_back(op.getResult());
return op.getResult();
......@@ -172,7 +171,7 @@ namespace
{
unsigned argId = (*it).second;
auto newResult = rewriter
.create<NG_FakeInput>(
.create<ngmlir::NGFakeInputOp>(
op->getLoc(),
m_dialectLowerer.convertType(
origResult->getType()) /* convert to lowered type */
......@@ -183,7 +182,7 @@ namespace
}
else
{
auto tensorType = origResult->getType().cast<NGTensorType>();
auto tensorType = origResult->getType().cast<ngmlir::NGTensorType>();
auto callBackFunc = getCallDecl("__mlir_allocate",
{rewriter.getIndexType(), rewriter.getIndexType()},
{tensorType.toMemref()},
......@@ -237,7 +236,8 @@ namespace
for (auto value : m_loweredOutputValues)
{
auto op = value->getDefiningOp();
NGRAPH_ASSERT(op->isa<NG_FakeInput>()) << "output value not defined by fake output?";
NGRAPH_ASSERT(op->isa<ngmlir::NGFakeInputOp>())
<< "output value not defined by fake output?";
value->replaceAllUsesWith(entryBlock->getArgument(oldFuncType.getNumInputs() + i));
op->erase();
i++;
......@@ -268,23 +268,23 @@ namespace
// NGDialect converters
Type DialectLowerer::convertType(Type t)
{
if (auto tensor = t.dyn_cast<NGTensorType>())
if (auto tensor = t.dyn_cast<ngmlir::NGTensorType>())
{
return tensor.toMemref();
}
// element type
if (auto type = t.dyn_cast<NGFloatType>())
if (auto type = t.dyn_cast<ngmlir::NGFloatType>())
{
// Float
// float types are already std type
return type;
}
if (auto type = t.dyn_cast<NGIntegerType>())
if (auto type = t.dyn_cast<ngmlir::NGIntegerType>())
{
// map it to std type
return type.toStdType();
}
if (auto type = t.dyn_cast<NGBoolType>())
if (auto type = t.dyn_cast<ngmlir::NGBoolType>())
{
return type.toStdType();
}
......@@ -293,11 +293,11 @@ namespace
}
// ADD
SmallVector<Value*, 4> NG_AddOpConversion::rewrite(Operation* op,
ArrayRef<Value*> operands,
FuncBuilder& rewriter) const
SmallVector<Value*, 4> NGAddOpConversion::rewrite(Operation* op,
ArrayRef<Value*> operands,
FuncBuilder& rewriter) const
{
auto add = op->cast<NG_AddOp>();
auto add = op->cast<ngmlir::NGAddOp>();
auto loc = add.getLoc();
Value *origResult, *newResult;
......@@ -330,15 +330,14 @@ namespace
return {result};
}
SmallVector<Value*, 4> NG_MatmulBiasOpConversion::rewrite(Operation* op,
ArrayRef<Value*> operands,
FuncBuilder& rewriter) const
SmallVector<Value*, 4> NGMatMulBiasOpConversion::rewrite(Operation* op,
ArrayRef<Value*> operands,
FuncBuilder& rewriter) const
{
auto matmul = op->cast<NG_MatmulBiasOp>();
auto matmul = op->cast<ngmlir::NGMatMulBiasOp>();
auto loc = matmul.getLoc();
NGRAPH_ASSERT(!matmul.getBias() && operands.size() == 2)
<< "Bias is not supported yet in MatmulBias operation";
NGRAPH_ASSERT(operands.size() == 2) << "Bias is not supported yet in MatmulBias operation";
// Retrieve/generate Values for operands and result.
ScopedContext scope(rewriter, loc);
......@@ -397,9 +396,9 @@ namespace
return {result};
}
SmallVector<Value*, 4> NG_ReturnOpConversion::rewrite(Operation* op,
ArrayRef<Value*> operands,
FuncBuilder& rewriter) const
SmallVector<Value*, 4> NGReturnOpConversion::rewrite(Operation* op,
ArrayRef<Value*> operands,
FuncBuilder& rewriter) const
{
rewriter.create<ReturnOp>(op->getLoc());
return {};
......
......@@ -29,8 +29,8 @@ public:\
DialectLoweringPass& m_pass;\
};
DECL_OP_CONV(NG_AddOp)
DECL_OP_CONV(NG_MatmulBiasOp)
DECL_OP_CONV(NG_ReturnOp)
DECL_OP_CONV(NGAddOp)
DECL_OP_CONV(NGMatMulBiasOp)
DECL_OP_CONV(NGReturnOp)
#undef DECL_OP_CONV
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
......
......@@ -220,10 +220,10 @@ if (NGRAPH_CPU_ENABLE)
DEFINITION MLIR_LLVM_INCLUDEPATH)
message(STATUS "Building CPU backend with MLIR")
message(STATUS "MLIR INCLUDE DIRS: ${MLIR_INCLUDE_PATH}")
message(STATUS "MLIR INCLUDE DIRS: ${MLIR_INCLUDE_PATHS}")
message(STATUS "LLVM INCLUDE DIRS: ${MLIR_LLVM_INCLUDEPATH}")
add_dependencies(cpu_backend mlir_backend)
target_include_directories(cpu_backend PUBLIC ${MLIR_INCLUDE_PATH} ${MLIR_LLVM_INCLUDEPATH})
target_include_directories(cpu_backend PUBLIC ${MLIR_INCLUDE_PATHS} ${MLIR_LLVM_INCLUDEPATH})
target_link_libraries(cpu_backend PUBLIC mlir_backend)
# TODO: Get rid of the compile time def, and move all MLIR code to separate src files
# and add them to cpu_backend here instead.
......
......@@ -44,7 +44,6 @@ namespace ngraph
namespace runtime
{
class Backend;
class Value;
}
std::string to_cplusplus_sourcecode_literal(bool val);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment