Commit e3c28fd2 authored by Nagy Mostafa's avatar Nagy Mostafa Committed by nmostafa

[MLIR] Move MLIR code into its own namespace. (#15)

* Use NGRAPH export macros instead of CPU

* Move code to ngmlir namespace
parent b19fa875
......@@ -47,7 +47,7 @@
using llvm::SmallVector;
using llvm::StringRef;
using llvm::make_unique;
using namespace ngraph::runtime::cpu;
using namespace ngraph::runtime::ngmlir;
#define COMPILE_OP_DECL(op_name) \
create_op<op_name>(MLIRCompiler & compiler, const ngraph::Node* ng_node)
......
......@@ -40,7 +40,7 @@ namespace ngraph
{
namespace runtime
{
namespace cpu
namespace ngmlir
{
class MLIRCompiler
{
......
......@@ -19,11 +19,11 @@
#include "type.hpp"
namespace ngraph
{
using namespace runtime::cpu;
using namespace runtime::ngmlir;
/// Register a dialect and its types
/// Usage:
/// mlir::registerDialect<ngraph::runtime::cpu::ngdialect::Dialect>();
/// mlir::registerDialect<ngraph::runtime::ngmlir::Dialect>();
NGDialect::NGDialect(mlir::MLIRContext* ctx)
: mlir::Dialect("ng", ctx)
{
......
......@@ -28,7 +28,7 @@ namespace ngraph
{
namespace runtime
{
namespace cpu
namespace ngmlir
{
class NGDialect : public mlir::Dialect
{
......
......@@ -31,7 +31,7 @@ namespace ngraph
{
namespace runtime
{
namespace cpu
namespace ngmlir
{
template <typename T>
static mlir::LogicalResult verifyBinOperands(T* op)
......@@ -69,20 +69,20 @@ namespace ngraph
}
}
void runtime::cpu::NG_FakeInput::build(mlir::Builder* builder,
void runtime::ngmlir::NG_FakeInput::build(mlir::Builder* builder,
mlir::OperationState* state,
mlir::Type resultType)
{
state->types.push_back(std::move(resultType));
}
mlir::LogicalResult runtime::cpu::NG_FakeInput::verify()
mlir::LogicalResult runtime::ngmlir::NG_FakeInput::verify()
{
// TODO: Verify returned tensor types must match function return type.
return mlir::success();
}
void runtime::cpu::NG_AddOp::build(mlir::Builder* builder,
void runtime::ngmlir::NG_AddOp::build(mlir::Builder* builder,
mlir::OperationState* state,
mlir::Value* lhs,
mlir::Value* rhs)
......@@ -92,14 +92,14 @@ namespace ngraph
state->operands.push_back(rhs);
}
mlir::LogicalResult runtime::cpu::NG_AddOp::verify()
mlir::LogicalResult runtime::ngmlir::NG_AddOp::verify()
{
// TODO: verify matching elt types
verifyBinOperands(this);
return mlir::success();
}
void runtime::cpu::NG_MatmulBiasOp::build(mlir::Builder* builder,
void runtime::ngmlir::NG_MatmulBiasOp::build(mlir::Builder* builder,
mlir::OperationState* state,
mlir::Value* lhs,
mlir::Value* rhs)
......@@ -109,7 +109,7 @@ namespace ngraph
state->operands.push_back(rhs);
}
mlir::LogicalResult runtime::cpu::NG_MatmulBiasOp::verify()
mlir::LogicalResult runtime::ngmlir::NG_MatmulBiasOp::verify()
{
// Verify that we have 3 operands
if (getNumOperands() != 3)
......@@ -146,7 +146,7 @@ namespace ngraph
return mlir::success();
}
void runtime::cpu::NG_ReturnOp::build(mlir::Builder* builder,
void runtime::ngmlir::NG_ReturnOp::build(mlir::Builder* builder,
mlir::OperationState* state,
std::vector<mlir::Value*> value_list)
{
......@@ -157,7 +157,7 @@ namespace ngraph
}
}
mlir::LogicalResult runtime::cpu::NG_ReturnOp::verify()
mlir::LogicalResult runtime::ngmlir::NG_ReturnOp::verify()
{
// TODO: Verify returned tensor types must match function return type.
return mlir::success();
......
......@@ -26,7 +26,7 @@ namespace ngraph
{
namespace runtime
{
namespace cpu
namespace ngmlir
{
// Fake instructions
......
......@@ -33,7 +33,7 @@ using llvm::Twine;
namespace ngraph
{
using namespace runtime::cpu;
using namespace runtime::ngmlir;
/// Creates TensorType objects. They all point to the same storage if
/// element type and shape are the same.
NGTensorType NGTensorType::get(mlir::MLIRContext* context, EltType eltType, Shape shape)
......
......@@ -27,7 +27,7 @@ namespace ngraph
{
namespace runtime
{
namespace cpu
namespace ngmlir
{
using llvm::raw_ostream;
......
......@@ -28,14 +28,14 @@
#include "dialect/ops.hpp"
#include "dialect/type.hpp"
using namespace ngraph::runtime::cpu;
using namespace ngraph::runtime::ngmlir;
// anonymous namespace
// no need to expose any of the following outside of this file
namespace
{
using namespace mlir;
using namespace mlir::edsc;
using namespace ngraph::runtime::cpu;
using namespace ngraph::runtime::ngmlir;
class DialectLoweringPass;
#include "op_lowerers.inc"
......@@ -393,7 +393,7 @@ namespace ngraph
{
namespace runtime
{
namespace cpu
namespace ngmlir
{
Pass* createDialectLoweringPass(MLIRCompiler* compiler)
{
......
......@@ -23,7 +23,7 @@ namespace ngraph
{
namespace runtime
{
namespace cpu
namespace ngmlir
{
class MLIRCompiler;
......
......@@ -14,15 +14,14 @@
// limitations under the License.
//*****************************************************************************
#include "memory_manager.hpp"
#include <llvm/ADT/STLExtras.h>
#include <memory>
#include "compiler.hpp"
#include "ngraph/runtime/cpu/cpu_backend_visibility.h"
using namespace ngraph::runtime::cpu;
#include "memory_manager.hpp"
#include "ngraph/ngraph_visibility.hpp"
using namespace ngraph::runtime::ngmlir;
/// Call back to allocate memory for temps from JIT'ed code
extern "C" CPU_BACKEND_API void* __mlir_allocate(MLIRMemMgr* mem_mgr, size_t size)
extern "C" NGRAPH_API void* __mlir_allocate(MLIRMemMgr* mem_mgr, size_t size)
{
return mem_mgr->allocate(size);
}
......
......@@ -22,7 +22,7 @@ namespace ngraph
{
namespace runtime
{
namespace cpu
namespace ngmlir
{
/// Memory manager for temporaries in MLIR compiled sub-graph
/// It handles call-backs from the code and returns pointer to allocated memory
......
......@@ -22,7 +22,7 @@ class OP##Conversion : public mlir::DialectOpConversion \
{\
public:\
explicit OP##Conversion(mlir::MLIRContext *context, DialectLoweringPass& pass)\
: mlir::DialectOpConversion(ngraph::runtime::cpu::OP::getOperationName(), 1, context),\
: mlir::DialectOpConversion(ngraph::runtime::ngmlir::OP::getOperationName(), 1, context),\
m_pass(pass)\
{} \
SmallVector<Value *, 4> rewrite(Operation *op, ArrayRef<Value *> operands, FuncBuilder &rewriter) const override; \
......
......@@ -512,6 +512,7 @@ namespace ngraph
// Get rid of the #ifdefs by moving MLIR hooks to separate files in cpu backend
// we can then instead compile them conditionally based on NGRAPH_MLIR_ENABLE cmake flag
#ifdef NGRAPH_MLIR_ENABLE
using namespace ngraph::runtime::ngmlir;
using namespace ngraph::runtime::cpu;
CPUKernelFunctor Builder::build_mlir_single_output_binary_op(const ngraph::Node* node,
......
......@@ -1400,7 +1400,7 @@ void runtime::cpu::CPU_ExternalFunction::build(ngraph::pass::PassConfig& pass_co
if (std::getenv("NGRAPH_MLIR") != nullptr)
{
// Initialize MLIR compiler
MLIRCompiler::init_mlir();
ngmlir::MLIRCompiler::init_mlir();
}
#endif
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment