Commit 81597f3a authored by Diego Caballero's avatar Diego Caballero Committed by Scott Cyphers

[MLIR] Bump MLIR repo to commit 26c683c, 07/29/2019. (#3310)

* [MLIR] Bump MLIR repo to commit 59167c2, 07/25/2019.

MLIR commit:
Author: River Riddle <riverriddle@google.com>
Date:   Wed Jul 24 16:41:11 2019 -0700

    NFC: Use ValueOfRange instead of T in Diagnostic::appendRange.

        For iterator_range, T is often the name of another iterator type
        and not the the value of the range.

LLVM commit:
Author: Marshall Clow <mclow.lists@gmail.com>
Date:   Thu Jul 25 03:26:05 2019 +0000

    Implement change #4 of P1466: Change weekday to accept both 0 and 7
    as Sunday. Add accessors 'c_encoding' and 'iso_encoding' to provide
    different interpretations of the weekday. Remove 'operator unsigned'

* style

* Move MLIR/LLVM repos a bit more forward
parent 5ece6de2
......@@ -20,8 +20,8 @@ set(MLIR_LLVM_REPO_URL https://github.com/llvm/llvm-project.git)
set(MLIR_REPO_URL https://github.com/tensorflow/mlir.git)
# Change these commit IDs to move to latest stable versions
set(MLIR_LLVM_COMMIT_ID c0cad98)
set(MLIR_COMMIT_ID 82d5084)
set(MLIR_LLVM_COMMIT_ID a2a6f85)
set(MLIR_COMMIT_ID 26c683c)
set(MLIR_PROJECT_ROOT ${CMAKE_CURRENT_BINARY_DIR}/mlir_project)
set(MLIR_LLVM_ROOT ${MLIR_PROJECT_ROOT}/llvm-projects)
set(MLIR_SOURCE_DIR ${MLIR_LLVM_ROOT}/llvm/projects/mlir)
......
......@@ -50,6 +50,7 @@
#include <llvm/Support/MemoryBuffer.h>
#include <llvm/Support/SourceMgr.h>
#include <llvm/Support/TargetSelect.h>
#include <mlir/Conversion/ControlFlowToCFG/ConvertControlFlowToCFG.h>
#include <mlir/Conversion/StandardToLLVM/ConvertStandardToLLVM.h>
#include <mlir/Conversion/StandardToLLVM/ConvertStandardToLLVMPass.h>
#include <mlir/ExecutionEngine/ExecutionEngine.h>
......@@ -108,11 +109,16 @@ void MLIRCompiler::run(std::vector<void*>& external_tensors)
cleanup();
}
unsigned MLIRCompiler::get_mem_mgr_arg_id(mlir::FuncOp& func)
{
return func.getNumArguments() - 1;
}
// Creates an MLIR module and function with nGraph dialect ops from the input CompiledKernel.
void MLIRCompiler::build_ng_dialect_module()
{
// initialize an empty module
m_module = make_unique<mlir::Module>(&m_context);
m_module = mlir::ModuleOp::create(mlir::UnknownLoc::get(&m_context));
TypeList args_type_list, result_type_list;
......@@ -133,15 +139,14 @@ void MLIRCompiler::build_ng_dialect_module()
}
auto func_type = mlir::FunctionType::get(args_type_list, result_type_list, &m_context);
auto function =
make_unique<mlir::Function>(mlir::UnknownLoc::get(&m_context), "main", func_type);
function->addEntryBlock();
auto function = mlir::FuncOp::create(mlir::UnknownLoc::get(&m_context), "main", func_type);
function.addEntryBlock();
// populate Tensor->Value maps
int i = 0;
for (auto input : kernel_inputs)
{
mlir::Value* arg = function->getArgument(i);
mlir::Value* arg = function.getArgument(i);
TensorInfo tensor_info{arg};
m_tensor_to_value_map.insert(
TensorToInfo(input->get_output_tensor_ptr().get(), tensor_info));
......@@ -149,9 +154,9 @@ void MLIRCompiler::build_ng_dialect_module()
}
// create builder
m_builder = llvm::make_unique<mlir::OpBuilder>(function->getBody());
m_builder = llvm::make_unique<mlir::OpBuilder>(function.getBody());
build_ng_dialect();
m_module->getFunctions().push_back(function.release());
m_module->push_back(function);
if (failed(m_module->verify()))
{
NGRAPH_CHECK(false, "Invalid module after lowering to NG dialect");
......@@ -260,19 +265,21 @@ void MLIRCompiler::lower_ng_dialect()
NGRAPH_CHECK(m_module, "MLIR module is not ready.");
// Lower Standard dialect to LLVM dialect.
// TODO: Do this via PassManager
mlir::LLVMTypeConverter llvm_converter(&m_context);
OwningRewritePatternList patterns;
mlir::populateLoopToStdConversionPatterns(patterns, &m_context);
mlir::populateStdToLLVMConversionPatterns(llvm_converter, patterns);
mlir::ConversionTarget target(m_context);
target.addLegalDialect<mlir::LLVM::LLVMDialect>();
auto result = applyConversionPatterns(*m_module, target, llvm_converter, std::move(patterns));
target.addLegalOp<mlir::ModuleOp, mlir::ModuleTerminatorOp>();
target.addDynamicallyLegalOp<mlir::FuncOp>(
[&](mlir::FuncOp op) { return llvm_converter.isSignatureLegal(op.getType()); });
auto result = applyFullConversion(*m_module, target, std::move(patterns), &llvm_converter);
NGRAPH_CHECK(succeeded(result), "Standard to LLVM dialect conversion failed");
dump_mlir_module("LLVM-IR Dialect Dump:");
// Lower to LLVM BC and optimize
// Initialize LLVM targets.
llvm::InitializeNativeTarget();
llvm::InitializeNativeTargetAsmPrinter();
......@@ -509,8 +516,8 @@ void MLIRCompiler::bind_arguments(std::vector<void*>& external_tensors)
{
NGRAPH_CHECK(m_module, "MLIR module is not ready.");
mlir::Function* func = m_module->getNamedFunction("main");
NGRAPH_CHECK(func && !func->getBlocks().empty(), "Function not found");
mlir::FuncOp func = m_module->lookupSymbol<mlir::FuncOp>("main");
NGRAPH_CHECK(func && !func.getBlocks().empty(), "Function not found");
// Set external arguments
NGRAPH_CHECK(m_compiled_kernel, "No compiled kernel set for compiler");
......
......@@ -77,10 +77,7 @@ namespace ngraph
/// Returns the memory manager used by this sub-graph compiler.
MLIRMemMgr& get_mem_mgr() { return m_mem_mgr; }
/// Returns memory manager pointer argument ID in call interface.
unsigned get_mem_mgr_arg_id(mlir::Function* func)
{
return func->getNumArguments() - 1;
}
unsigned get_mem_mgr_arg_id(mlir::FuncOp& func);
private:
struct TensorInfo
......@@ -147,7 +144,7 @@ namespace ngraph
// compilation.
mlir::MLIRContext m_context;
std::unique_ptr<mlir::Module> m_module;
mlir::OwningModuleRef m_module;
std::unique_ptr<mlir::OpBuilder> m_builder;
std::unique_ptr<mlir::ExecutionEngine> m_engine;
......
......@@ -74,7 +74,7 @@ namespace
\
PatternMatchResult matchAndRewrite(Operation* op, \
ArrayRef<Value*> operands, \
PatternRewriter& rewriter) const override; \
ConversionPatternRewriter& rewriter) const override; \
};
#include "op_lowerers.inc"
......@@ -117,14 +117,15 @@ namespace
SmallVector<Value*, 4> buildOutputDefs(Operation* op, PatternRewriter& rewriter);
Value* createTempTensor(Type type, PatternRewriter& rewriter);
mlir::Function* getCallDecl(StringRef name,
ArrayRef<Type> args,
ArrayRef<Type> output,
PatternRewriter& rewriter);
mlir::FuncOp getCallDecl(StringRef name,
ArrayRef<Type> args,
ArrayRef<Type> output,
PatternRewriter& rewriter);
/// Inserts dealloc Ops for each temporary allocated by AllocOp
void insertDeallocs(PatternRewriter& rewriter);
NGraphTypeConverter& getTypeConverter() { return typeConverter; }
private:
/// Collect a set of patterns to convert from the nGraph dialect to Affine dialect.
void populateNGraphToAffineConversionPatterns(OwningRewritePatternList& patterns);
......@@ -150,6 +151,9 @@ namespace
// Create type converter and initialize conversion patterns.
NGraphTypeConverter converter;
OwningRewritePatternList patterns;
// Add default FuncOp type conversion. It replaces the incoming FuncOp with a *new* one
// with the converted types.
mlir::populateFuncOpTypeConversionPattern(patterns, &getContext(), typeConverter);
populateNGraphToAffineConversionPatterns(patterns);
// Create target that defines legal ops for nGraph dialect to be lowered to.
......@@ -157,14 +161,18 @@ namespace
// TODO: Remove NGFakeInputOp. We need to set NGFakeInputOp as legal op because we generate
// it as part of the lowering to affine/standard.
target.addLegalDialect<AffineOpsDialect, StandardOpsDialect>();
target.addLegalOp<NGFakeInputOp>();
target.addLegalOp<ModuleOp, ModuleTerminatorOp, NGFakeInputOp>();
target.addDynamicallyLegalOp<FuncOp>([&](FuncOp op) {
// FuncOp is legal only if types have been converted to Std types.
return typeConverter.isSignatureLegal(op.getType());
});
// capture output values by looking for the Return and grabbing the values
// the order of the returned values matches the order of the lowered func signature for
// results. This is used to find the arg_id that a defined value maps to if it is an output
findOutputValues();
if (failed(applyConversionPatterns(getModule(), target, converter, std::move(patterns))))
if (failed(applyFullConversion(getModule(), target, std::move(patterns), &converter)))
{
emitError(mlir::UnknownLoc::get(&getContext()), "Error lowering nGraph dialect\n");
signalPassFailure();
......@@ -187,13 +195,13 @@ namespace
void DialectLoweringPass::findOutputValues()
{
// get original function
auto f = getModule().getNamedFunction("main");
auto f = getModule().lookupSymbol<mlir::FuncOp>("main");
SmallVector<Value*, 4> outputList;
unsigned outputCount = 0;
// we find out output values by looking at returned values
// any return should return all outputs of the subgraph
f->walk<NGReturnOp>([this, &outputCount](NGReturnOp ret) {
f.walk<NGReturnOp>([this, &outputCount](NGReturnOp ret) {
for (unsigned i = 0; i < ret.getNumOperands(); i++)
{
auto outputValue = ret.getOperand(i);
......@@ -280,9 +288,9 @@ namespace
void DialectLoweringPass::processFakeInstrs()
{
auto context = getModule().getContext();
auto f = getModule().getNamedFunction("main");
mlir::Block* entryBlock = &*(f->begin());
auto oldFuncType = f->getType();
auto f = getModule().lookupSymbol<mlir::FuncOp>("main");
mlir::Block* entryBlock = &*(f.begin());
auto oldFuncType = f.getType();
ArrayRef<mlir::Type> ipArgs = oldFuncType.getInputs();
ArrayRef<mlir::Type> opArgs = oldFuncType.getResults();
SmallVector<mlir::Type, 4> allArgs;
......@@ -304,7 +312,7 @@ namespace
entryBlock->addArgument(indexType);
// update type
auto newFuncType = mlir::FunctionType::get(allArgs, {}, context);
f->setType(newFuncType);
f.setType(newFuncType);
// RAUW fake outputs with result values
unsigned i = 0;
......@@ -327,13 +335,13 @@ namespace
/// by nGraph op semantics.
void DialectLoweringPass::insertNoAliasArgAttrs()
{
auto func = getModule().getNamedFunction("main");
auto func = getModule().lookupSymbol<mlir::FuncOp>("main");
unsigned int argIdx = 0;
for (auto* arg : func->getArguments())
for (auto* arg : func.getArguments())
{
if (arg->getType().isa<MemRefType>())
{
func->setArgAttr(argIdx, "llvm.noalias", BoolAttr::get(true, &getContext()));
func.setArgAttr(argIdx, "llvm.noalias", BoolAttr::get(true, &getContext()));
}
++argIdx;
......@@ -348,21 +356,19 @@ namespace
}
}
mlir::Function* DialectLoweringPass::getCallDecl(StringRef name,
ArrayRef<Type> args,
ArrayRef<Type> output,
PatternRewriter& rewriter)
mlir::FuncOp DialectLoweringPass::getCallDecl(StringRef name,
ArrayRef<Type> args,
ArrayRef<Type> output,
PatternRewriter& rewriter)
{
auto callBackFuncPtr = getModule().getNamedFunction(name);
if (callBackFuncPtr == nullptr)
auto callBackFunc = getModule().lookupSymbol<mlir::FuncOp>(name);
if (!callBackFunc)
{
auto callBackType = rewriter.getFunctionType(args, output);
auto callBackFunc =
llvm::make_unique<mlir::Function>(rewriter.getUnknownLoc(), name, callBackType);
callBackFuncPtr = callBackFunc.get();
getModule().getFunctions().push_back(callBackFunc.release());
auto callBackFunc = mlir::FuncOp::create(rewriter.getUnknownLoc(), name, callBackType);
getModule().push_back(callBackFunc);
}
return callBackFuncPtr;
return callBackFunc;
}
// NGDialect converters
......@@ -394,15 +400,15 @@ namespace
return mlir::IntegerType::get(1 /* width */, boolType.getContext());
}
NGRAPH_CHECK(false, "Unsupported type to lower");
// Do not assert/NGRAPH_CHECK here. Type convertion infra expects `convertType` to return
// the input type if the type is not supported.
return type;
}
#define REWRITER(OP) \
PatternMatchResult OP##Conversion::matchAndRewrite( \
Operation* op, ArrayRef<Value*> operands, PatternRewriter& rewriter) const
Operation* op, ArrayRef<Value*> operands, ConversionPatternRewriter& rewriter) const
// ADD
REWRITER(NGAddOp)
{
lower_binary_elementwise<mlir::NGAddOp>(op, operands, rewriter, pass);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment