Commit e3c28fd2 authored by Nagy Mostafa's avatar Nagy Mostafa Committed by nmostafa

[MLIR] Move MLIR code into its own namespace. (#15)

* Use NGRAPH export macros instead of CPU

* Move code to ngmlir namespace
parent b19fa875
...@@ -47,7 +47,7 @@ ...@@ -47,7 +47,7 @@
using llvm::SmallVector; using llvm::SmallVector;
using llvm::StringRef; using llvm::StringRef;
using llvm::make_unique; using llvm::make_unique;
using namespace ngraph::runtime::cpu; using namespace ngraph::runtime::ngmlir;
#define COMPILE_OP_DECL(op_name) \ #define COMPILE_OP_DECL(op_name) \
create_op<op_name>(MLIRCompiler & compiler, const ngraph::Node* ng_node) create_op<op_name>(MLIRCompiler & compiler, const ngraph::Node* ng_node)
......
...@@ -40,7 +40,7 @@ namespace ngraph ...@@ -40,7 +40,7 @@ namespace ngraph
{ {
namespace runtime namespace runtime
{ {
namespace cpu namespace ngmlir
{ {
class MLIRCompiler class MLIRCompiler
{ {
......
...@@ -19,11 +19,11 @@ ...@@ -19,11 +19,11 @@
#include "type.hpp" #include "type.hpp"
namespace ngraph namespace ngraph
{ {
using namespace runtime::cpu; using namespace runtime::ngmlir;
/// Register a dialect and its types /// Register a dialect and its types
/// Usage: /// Usage:
/// mlir::registerDialect<ngraph::runtime::cpu::ngdialect::Dialect>(); /// mlir::registerDialect<ngraph::runtime::ngmlir::Dialect>();
NGDialect::NGDialect(mlir::MLIRContext* ctx) NGDialect::NGDialect(mlir::MLIRContext* ctx)
: mlir::Dialect("ng", ctx) : mlir::Dialect("ng", ctx)
{ {
......
...@@ -28,7 +28,7 @@ namespace ngraph ...@@ -28,7 +28,7 @@ namespace ngraph
{ {
namespace runtime namespace runtime
{ {
namespace cpu namespace ngmlir
{ {
class NGDialect : public mlir::Dialect class NGDialect : public mlir::Dialect
{ {
......
...@@ -31,7 +31,7 @@ namespace ngraph ...@@ -31,7 +31,7 @@ namespace ngraph
{ {
namespace runtime namespace runtime
{ {
namespace cpu namespace ngmlir
{ {
template <typename T> template <typename T>
static mlir::LogicalResult verifyBinOperands(T* op) static mlir::LogicalResult verifyBinOperands(T* op)
...@@ -69,20 +69,20 @@ namespace ngraph ...@@ -69,20 +69,20 @@ namespace ngraph
} }
} }
void runtime::cpu::NG_FakeInput::build(mlir::Builder* builder, void runtime::ngmlir::NG_FakeInput::build(mlir::Builder* builder,
mlir::OperationState* state, mlir::OperationState* state,
mlir::Type resultType) mlir::Type resultType)
{ {
state->types.push_back(std::move(resultType)); state->types.push_back(std::move(resultType));
} }
mlir::LogicalResult runtime::cpu::NG_FakeInput::verify() mlir::LogicalResult runtime::ngmlir::NG_FakeInput::verify()
{ {
// TODO: Verify returned tensor types must match function return type. // TODO: Verify returned tensor types must match function return type.
return mlir::success(); return mlir::success();
} }
void runtime::cpu::NG_AddOp::build(mlir::Builder* builder, void runtime::ngmlir::NG_AddOp::build(mlir::Builder* builder,
mlir::OperationState* state, mlir::OperationState* state,
mlir::Value* lhs, mlir::Value* lhs,
mlir::Value* rhs) mlir::Value* rhs)
...@@ -92,14 +92,14 @@ namespace ngraph ...@@ -92,14 +92,14 @@ namespace ngraph
state->operands.push_back(rhs); state->operands.push_back(rhs);
} }
mlir::LogicalResult runtime::cpu::NG_AddOp::verify() mlir::LogicalResult runtime::ngmlir::NG_AddOp::verify()
{ {
// TODO: verify matching elt types // TODO: verify matching elt types
verifyBinOperands(this); verifyBinOperands(this);
return mlir::success(); return mlir::success();
} }
void runtime::cpu::NG_MatmulBiasOp::build(mlir::Builder* builder, void runtime::ngmlir::NG_MatmulBiasOp::build(mlir::Builder* builder,
mlir::OperationState* state, mlir::OperationState* state,
mlir::Value* lhs, mlir::Value* lhs,
mlir::Value* rhs) mlir::Value* rhs)
...@@ -109,7 +109,7 @@ namespace ngraph ...@@ -109,7 +109,7 @@ namespace ngraph
state->operands.push_back(rhs); state->operands.push_back(rhs);
} }
mlir::LogicalResult runtime::cpu::NG_MatmulBiasOp::verify() mlir::LogicalResult runtime::ngmlir::NG_MatmulBiasOp::verify()
{ {
// Verify that we have 3 operands // Verify that we have 3 operands
if (getNumOperands() != 3) if (getNumOperands() != 3)
...@@ -146,7 +146,7 @@ namespace ngraph ...@@ -146,7 +146,7 @@ namespace ngraph
return mlir::success(); return mlir::success();
} }
void runtime::cpu::NG_ReturnOp::build(mlir::Builder* builder, void runtime::ngmlir::NG_ReturnOp::build(mlir::Builder* builder,
mlir::OperationState* state, mlir::OperationState* state,
std::vector<mlir::Value*> value_list) std::vector<mlir::Value*> value_list)
{ {
...@@ -157,7 +157,7 @@ namespace ngraph ...@@ -157,7 +157,7 @@ namespace ngraph
} }
} }
mlir::LogicalResult runtime::cpu::NG_ReturnOp::verify() mlir::LogicalResult runtime::ngmlir::NG_ReturnOp::verify()
{ {
// TODO: Verify returned tensor types must match function return type. // TODO: Verify returned tensor types must match function return type.
return mlir::success(); return mlir::success();
......
...@@ -26,7 +26,7 @@ namespace ngraph ...@@ -26,7 +26,7 @@ namespace ngraph
{ {
namespace runtime namespace runtime
{ {
namespace cpu namespace ngmlir
{ {
// Fake instructions // Fake instructions
......
...@@ -33,7 +33,7 @@ using llvm::Twine; ...@@ -33,7 +33,7 @@ using llvm::Twine;
namespace ngraph namespace ngraph
{ {
using namespace runtime::cpu; using namespace runtime::ngmlir;
/// Creates TensorType objects. They all point to the same storage if /// Creates TensorType objects. They all point to the same storage if
/// element type and shape are the same. /// element type and shape are the same.
NGTensorType NGTensorType::get(mlir::MLIRContext* context, EltType eltType, Shape shape) NGTensorType NGTensorType::get(mlir::MLIRContext* context, EltType eltType, Shape shape)
......
...@@ -27,7 +27,7 @@ namespace ngraph ...@@ -27,7 +27,7 @@ namespace ngraph
{ {
namespace runtime namespace runtime
{ {
namespace cpu namespace ngmlir
{ {
using llvm::raw_ostream; using llvm::raw_ostream;
......
...@@ -28,14 +28,14 @@ ...@@ -28,14 +28,14 @@
#include "dialect/ops.hpp" #include "dialect/ops.hpp"
#include "dialect/type.hpp" #include "dialect/type.hpp"
using namespace ngraph::runtime::cpu; using namespace ngraph::runtime::ngmlir;
// anonymous namespace // anonymous namespace
// no need to expose any of the following outside of this file // no need to expose any of the following outside of this file
namespace namespace
{ {
using namespace mlir; using namespace mlir;
using namespace mlir::edsc; using namespace mlir::edsc;
using namespace ngraph::runtime::cpu; using namespace ngraph::runtime::ngmlir;
class DialectLoweringPass; class DialectLoweringPass;
#include "op_lowerers.inc" #include "op_lowerers.inc"
...@@ -393,7 +393,7 @@ namespace ngraph ...@@ -393,7 +393,7 @@ namespace ngraph
{ {
namespace runtime namespace runtime
{ {
namespace cpu namespace ngmlir
{ {
Pass* createDialectLoweringPass(MLIRCompiler* compiler) Pass* createDialectLoweringPass(MLIRCompiler* compiler)
{ {
......
...@@ -23,7 +23,7 @@ namespace ngraph ...@@ -23,7 +23,7 @@ namespace ngraph
{ {
namespace runtime namespace runtime
{ {
namespace cpu namespace ngmlir
{ {
class MLIRCompiler; class MLIRCompiler;
......
...@@ -14,15 +14,14 @@ ...@@ -14,15 +14,14 @@
// limitations under the License. // limitations under the License.
//***************************************************************************** //*****************************************************************************
#include "memory_manager.hpp"
#include <llvm/ADT/STLExtras.h>
#include <memory> #include <memory>
#include "compiler.hpp" #include "memory_manager.hpp"
#include "ngraph/runtime/cpu/cpu_backend_visibility.h" #include "ngraph/ngraph_visibility.hpp"
using namespace ngraph::runtime::cpu;
using namespace ngraph::runtime::ngmlir;
/// Call back to allocate memory for temps from JIT'ed code /// Call back to allocate memory for temps from JIT'ed code
extern "C" CPU_BACKEND_API void* __mlir_allocate(MLIRMemMgr* mem_mgr, size_t size) extern "C" NGRAPH_API void* __mlir_allocate(MLIRMemMgr* mem_mgr, size_t size)
{ {
return mem_mgr->allocate(size); return mem_mgr->allocate(size);
} }
......
...@@ -22,7 +22,7 @@ namespace ngraph ...@@ -22,7 +22,7 @@ namespace ngraph
{ {
namespace runtime namespace runtime
{ {
namespace cpu namespace ngmlir
{ {
/// Memory manager for temporaries in MLIR compiled sub-graph /// Memory manager for temporaries in MLIR compiled sub-graph
/// It handles call-backs from the code and returns pointer to allocated memory /// It handles call-backs from the code and returns pointer to allocated memory
......
...@@ -22,7 +22,7 @@ class OP##Conversion : public mlir::DialectOpConversion \ ...@@ -22,7 +22,7 @@ class OP##Conversion : public mlir::DialectOpConversion \
{\ {\
public:\ public:\
explicit OP##Conversion(mlir::MLIRContext *context, DialectLoweringPass& pass)\ explicit OP##Conversion(mlir::MLIRContext *context, DialectLoweringPass& pass)\
: mlir::DialectOpConversion(ngraph::runtime::cpu::OP::getOperationName(), 1, context),\ : mlir::DialectOpConversion(ngraph::runtime::ngmlir::OP::getOperationName(), 1, context),\
m_pass(pass)\ m_pass(pass)\
{} \ {} \
SmallVector<Value *, 4> rewrite(Operation *op, ArrayRef<Value *> operands, FuncBuilder &rewriter) const override; \ SmallVector<Value *, 4> rewrite(Operation *op, ArrayRef<Value *> operands, FuncBuilder &rewriter) const override; \
......
...@@ -512,6 +512,7 @@ namespace ngraph ...@@ -512,6 +512,7 @@ namespace ngraph
// Get rid of the #ifdefs by moving MLIR hooks to separate files in cpu backend // Get rid of the #ifdefs by moving MLIR hooks to separate files in cpu backend
// we can then instead compile them conditionally based on NGRAPH_MLIR_ENABLE cmake flag // we can then instead compile them conditionally based on NGRAPH_MLIR_ENABLE cmake flag
#ifdef NGRAPH_MLIR_ENABLE #ifdef NGRAPH_MLIR_ENABLE
using namespace ngraph::runtime::ngmlir;
using namespace ngraph::runtime::cpu; using namespace ngraph::runtime::cpu;
CPUKernelFunctor Builder::build_mlir_single_output_binary_op(const ngraph::Node* node, CPUKernelFunctor Builder::build_mlir_single_output_binary_op(const ngraph::Node* node,
......
...@@ -1400,7 +1400,7 @@ void runtime::cpu::CPU_ExternalFunction::build(ngraph::pass::PassConfig& pass_co ...@@ -1400,7 +1400,7 @@ void runtime::cpu::CPU_ExternalFunction::build(ngraph::pass::PassConfig& pass_co
if (std::getenv("NGRAPH_MLIR") != nullptr) if (std::getenv("NGRAPH_MLIR") != nullptr)
{ {
// Initialize MLIR compiler // Initialize MLIR compiler
MLIRCompiler::init_mlir(); ngmlir::MLIRCompiler::init_mlir();
} }
#endif #endif
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment