Commit 6fbed3b9 authored by Pruthvi's avatar Pruthvi Committed by Scott Cyphers

[MLIR] Graph pass to lower ngraph to ngraph dialect (#3835)

*  WIP graph pass to lower ngraph to ngraph dialect

* resolved compiler errors

* - refactor ngraph-dialect to graph pass

* - fix compilation issue
- unit test passes

*  - style fix

* Addressed PR comments

* - move NgDialectConversionPass to anonymous namespace

* - use getModule() to access module inside the graph pass
- address PR comments

*  - fix failing unit test case for negative padding
- make builder as an object isntead of pointer to an object

* Address PR Comments
parent c06c9405
......@@ -32,6 +32,8 @@ set(SRC
core/ngraph_dialect/ops.cpp
core/pass/mlir_subgraph_extraction.cpp
core/pass/mlir_subgraph_extraction.hpp
core/pass/ng_dialect_builder.cpp
core/pass/ng_dialect_builder.hpp
runtime/cpu/memory_manager.cpp
runtime/cpu/cpu_runtime.cpp
utils.cpp
......
This diff is collapsed.
......@@ -63,9 +63,6 @@ namespace ngraph
static void init();
public:
using TensorList = std::vector<descriptor::Tensor*>;
using TypeList = llvm::SmallVector<mlir::Type, 4>;
MLIRCompiler(const ngraph::op::CompiledKernel* compiled_kernel,
mlir::MLIRContext& context)
: m_compiledKernel(compiled_kernel)
......@@ -79,52 +76,12 @@ namespace ngraph
void compile();
mlir::OwningModuleRef& get_module() { return m_module; }
private:
struct TensorInfo
{
// MLIR values this tensor maps to.
mlir::Value* m_value;
};
private:
// Converts an nGraph sub-graph to MLIR nGraph dialect.
void buildNgDialectModule();
void buildNgDialect();
// Applies any nGraph dialect optimizations
void optimizeNgDialect() { /*TODO: Add Core NG dialect optimizations */}
mlir::Type getMlirType(const descriptor::Tensor* tensor);
mlir::Type getMlirType(const element::Type& type);
mlir::Type getMlirType(const ngraph::Node* node);
TensorInfo getTensorValue(descriptor::Tensor* tensor);
void updateTensorValue(descriptor::Tensor* tensor, mlir::Value* value);
template <typename Op>
static mlir::Operation* createOp(MLIRCompiler& compiler, const ngraph::Node* ngNode)
{
throw std::runtime_error("Unimplemented op '" + ngNode->description() +
"' in MLIR Compiler");
}
// Generic op lowerer to ng dialect.
// Simply maps ngraph tensors to values and generate an OP. No op-specific logic.
template <typename Op>
mlir::Operation* createGenericOp(const ngraph::Node* ngNode);
template <typename RedOp>
mlir::Operation* createIndexReduction(const ngraph::Node* ngNode);
void createReturn();
/// Converts nGraph shape-like types \p ng_shape to MLIR shape \p mlir_shape.
template <typename T>
void getMlirShape(T ngShape, llvm::SmallVectorImpl<int64_t>& mlirShape);
/// Converts an ngraph shape to an I64 array attribute
template <typename T>
mlir::ArrayAttr getShapeAsAttr(T ngShape);
private:
// Sub-graph to be compiled and executed with MLIR.
const ngraph::op::CompiledKernel* m_compiledKernel;
......@@ -132,20 +89,8 @@ namespace ngraph
// MLIR context that holds all the MLIR information related to the sub-graph
// compilation.
mlir::MLIRContext& m_context;
mlir::OwningModuleRef m_module;
std::unique_ptr<mlir::OpBuilder> m_builder;
using TensorToInfo = std::pair<descriptor::Tensor*, TensorInfo>;
using TensorToInfoMap = std::unordered_map<descriptor::Tensor*, TensorInfo>;
using MLIRCompOpFunction =
std::function<mlir::Operation*(MLIRCompiler& compiler, const ngraph::Node*)>;
using MLIRCompOpMap = std::unordered_map<std::type_index, MLIRCompOpFunction>;
// Maps tensor to the value it represents in the IR
// use for MLIR dialect gen
TensorToInfoMap m_tensorToValueMap;
static const MLIRCompOpMap opDispatcher;
// Global initialization for MLIR compiler
static bool initialized;
};
......
......@@ -277,14 +277,11 @@ mlir::LogicalResult verifyOp(NGConvolutionOp* op)
auto s = std::get<0>(attrs).cast<IntegerAttr>().getInt();
auto pb = std::get<1>(attrs).cast<IntegerAttr>().getInt();
auto pa = std::get<2>(attrs).cast<IntegerAttr>().getInt();
if (s <= 0)
{
return op->emitOpError("Window stride must be non-negative");
}
if (pb < 0 || pa < 0)
{
return op->emitOpError("Paddings must be positive");
}
stridesVal.push_back(s);
padBelowVal.push_back(pb);
padAboveVal.push_back(pa);
......
This diff is collapsed.
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
// NOTE: This file follows nGraph format style.
// Follows nGraph naming convention for public APIs only, else MLIR naming convention.
#pragma once
#include "contrib/mlir/core/compiler.hpp"
#include "contrib/mlir/runtime/cpu/memory_manager.hpp"
#include "ngraph/check.hpp"
#include "ngraph/descriptor/tensor.hpp"
#include "ngraph/node.hpp"
#include <mlir/ExecutionEngine/MemRefUtils.h>
#include <mlir/IR/Builders.h>
#include <mlir/IR/Module.h>
#include <mlir/IR/Types.h>
#include <mlir/Pass/Pass.h>
#include <typeindex>
#include <unordered_map>
#include <vector>
using namespace ngraph::runtime::ngmlir;
namespace ngraph
{
namespace pass
{
std::unique_ptr<mlir::Pass>
createNgDialectConversionPass(const ngraph::op::CompiledKernel* compiledKernel,
mlir::MLIRContext* context);
}
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment