Commit 81597f3a authored by Diego Caballero's avatar Diego Caballero Committed by Scott Cyphers

[MLIR] Bump MLIR repo to commit 26c683c, 07/29/2019. (#3310)

* [MLIR] Bump MLIR repo to commit 59167c2, 07/25/2019.

MLIR commit:
Author: River Riddle <riverriddle@google.com>
Date:   Wed Jul 24 16:41:11 2019 -0700

    NFC: Use ValueOfRange instead of T in Diagnostic::appendRange.

        For iterator_range, T is often the name of another iterator type
        and not the the value of the range.

LLVM commit:
Author: Marshall Clow <mclow.lists@gmail.com>
Date:   Thu Jul 25 03:26:05 2019 +0000

    Implement change #4 of P1466: Change weekday to accept both 0 and 7
    as Sunday. Add accessors 'c_encoding' and 'iso_encoding' to provide
    different interpretations of the weekday. Remove 'operator unsigned'

* style

* Move MLIR/LLVM repos a bit more forward
parent 5ece6de2
...@@ -20,8 +20,8 @@ set(MLIR_LLVM_REPO_URL https://github.com/llvm/llvm-project.git) ...@@ -20,8 +20,8 @@ set(MLIR_LLVM_REPO_URL https://github.com/llvm/llvm-project.git)
set(MLIR_REPO_URL https://github.com/tensorflow/mlir.git) set(MLIR_REPO_URL https://github.com/tensorflow/mlir.git)
# Change these commit IDs to move to latest stable versions # Change these commit IDs to move to latest stable versions
set(MLIR_LLVM_COMMIT_ID c0cad98) set(MLIR_LLVM_COMMIT_ID a2a6f85)
set(MLIR_COMMIT_ID 82d5084) set(MLIR_COMMIT_ID 26c683c)
set(MLIR_PROJECT_ROOT ${CMAKE_CURRENT_BINARY_DIR}/mlir_project) set(MLIR_PROJECT_ROOT ${CMAKE_CURRENT_BINARY_DIR}/mlir_project)
set(MLIR_LLVM_ROOT ${MLIR_PROJECT_ROOT}/llvm-projects) set(MLIR_LLVM_ROOT ${MLIR_PROJECT_ROOT}/llvm-projects)
set(MLIR_SOURCE_DIR ${MLIR_LLVM_ROOT}/llvm/projects/mlir) set(MLIR_SOURCE_DIR ${MLIR_LLVM_ROOT}/llvm/projects/mlir)
......
...@@ -50,6 +50,7 @@ ...@@ -50,6 +50,7 @@
#include <llvm/Support/MemoryBuffer.h> #include <llvm/Support/MemoryBuffer.h>
#include <llvm/Support/SourceMgr.h> #include <llvm/Support/SourceMgr.h>
#include <llvm/Support/TargetSelect.h> #include <llvm/Support/TargetSelect.h>
#include <mlir/Conversion/ControlFlowToCFG/ConvertControlFlowToCFG.h>
#include <mlir/Conversion/StandardToLLVM/ConvertStandardToLLVM.h> #include <mlir/Conversion/StandardToLLVM/ConvertStandardToLLVM.h>
#include <mlir/Conversion/StandardToLLVM/ConvertStandardToLLVMPass.h> #include <mlir/Conversion/StandardToLLVM/ConvertStandardToLLVMPass.h>
#include <mlir/ExecutionEngine/ExecutionEngine.h> #include <mlir/ExecutionEngine/ExecutionEngine.h>
...@@ -108,11 +109,16 @@ void MLIRCompiler::run(std::vector<void*>& external_tensors) ...@@ -108,11 +109,16 @@ void MLIRCompiler::run(std::vector<void*>& external_tensors)
cleanup(); cleanup();
} }
unsigned MLIRCompiler::get_mem_mgr_arg_id(mlir::FuncOp& func)
{
return func.getNumArguments() - 1;
}
// Creates an MLIR module and function with nGraph dialect ops from the input CompiledKernel. // Creates an MLIR module and function with nGraph dialect ops from the input CompiledKernel.
void MLIRCompiler::build_ng_dialect_module() void MLIRCompiler::build_ng_dialect_module()
{ {
// initialize an empty module // initialize an empty module
m_module = make_unique<mlir::Module>(&m_context); m_module = mlir::ModuleOp::create(mlir::UnknownLoc::get(&m_context));
TypeList args_type_list, result_type_list; TypeList args_type_list, result_type_list;
...@@ -133,15 +139,14 @@ void MLIRCompiler::build_ng_dialect_module() ...@@ -133,15 +139,14 @@ void MLIRCompiler::build_ng_dialect_module()
} }
auto func_type = mlir::FunctionType::get(args_type_list, result_type_list, &m_context); auto func_type = mlir::FunctionType::get(args_type_list, result_type_list, &m_context);
auto function = auto function = mlir::FuncOp::create(mlir::UnknownLoc::get(&m_context), "main", func_type);
make_unique<mlir::Function>(mlir::UnknownLoc::get(&m_context), "main", func_type); function.addEntryBlock();
function->addEntryBlock();
// populate Tensor->Value maps // populate Tensor->Value maps
int i = 0; int i = 0;
for (auto input : kernel_inputs) for (auto input : kernel_inputs)
{ {
mlir::Value* arg = function->getArgument(i); mlir::Value* arg = function.getArgument(i);
TensorInfo tensor_info{arg}; TensorInfo tensor_info{arg};
m_tensor_to_value_map.insert( m_tensor_to_value_map.insert(
TensorToInfo(input->get_output_tensor_ptr().get(), tensor_info)); TensorToInfo(input->get_output_tensor_ptr().get(), tensor_info));
...@@ -149,9 +154,9 @@ void MLIRCompiler::build_ng_dialect_module() ...@@ -149,9 +154,9 @@ void MLIRCompiler::build_ng_dialect_module()
} }
// create builder // create builder
m_builder = llvm::make_unique<mlir::OpBuilder>(function->getBody()); m_builder = llvm::make_unique<mlir::OpBuilder>(function.getBody());
build_ng_dialect(); build_ng_dialect();
m_module->getFunctions().push_back(function.release()); m_module->push_back(function);
if (failed(m_module->verify())) if (failed(m_module->verify()))
{ {
NGRAPH_CHECK(false, "Invalid module after lowering to NG dialect"); NGRAPH_CHECK(false, "Invalid module after lowering to NG dialect");
...@@ -260,19 +265,21 @@ void MLIRCompiler::lower_ng_dialect() ...@@ -260,19 +265,21 @@ void MLIRCompiler::lower_ng_dialect()
NGRAPH_CHECK(m_module, "MLIR module is not ready."); NGRAPH_CHECK(m_module, "MLIR module is not ready.");
// Lower Standard dialect to LLVM dialect. // Lower Standard dialect to LLVM dialect.
// TODO: Do this via PassManager
mlir::LLVMTypeConverter llvm_converter(&m_context); mlir::LLVMTypeConverter llvm_converter(&m_context);
OwningRewritePatternList patterns; OwningRewritePatternList patterns;
mlir::populateLoopToStdConversionPatterns(patterns, &m_context);
mlir::populateStdToLLVMConversionPatterns(llvm_converter, patterns); mlir::populateStdToLLVMConversionPatterns(llvm_converter, patterns);
mlir::ConversionTarget target(m_context); mlir::ConversionTarget target(m_context);
target.addLegalDialect<mlir::LLVM::LLVMDialect>(); target.addLegalDialect<mlir::LLVM::LLVMDialect>();
auto result = applyConversionPatterns(*m_module, target, llvm_converter, std::move(patterns)); target.addLegalOp<mlir::ModuleOp, mlir::ModuleTerminatorOp>();
target.addDynamicallyLegalOp<mlir::FuncOp>(
[&](mlir::FuncOp op) { return llvm_converter.isSignatureLegal(op.getType()); });
auto result = applyFullConversion(*m_module, target, std::move(patterns), &llvm_converter);
NGRAPH_CHECK(succeeded(result), "Standard to LLVM dialect conversion failed"); NGRAPH_CHECK(succeeded(result), "Standard to LLVM dialect conversion failed");
dump_mlir_module("LLVM-IR Dialect Dump:"); dump_mlir_module("LLVM-IR Dialect Dump:");
// Lower to LLVM BC and optimize
// Initialize LLVM targets. // Initialize LLVM targets.
llvm::InitializeNativeTarget(); llvm::InitializeNativeTarget();
llvm::InitializeNativeTargetAsmPrinter(); llvm::InitializeNativeTargetAsmPrinter();
...@@ -509,8 +516,8 @@ void MLIRCompiler::bind_arguments(std::vector<void*>& external_tensors) ...@@ -509,8 +516,8 @@ void MLIRCompiler::bind_arguments(std::vector<void*>& external_tensors)
{ {
NGRAPH_CHECK(m_module, "MLIR module is not ready."); NGRAPH_CHECK(m_module, "MLIR module is not ready.");
mlir::Function* func = m_module->getNamedFunction("main"); mlir::FuncOp func = m_module->lookupSymbol<mlir::FuncOp>("main");
NGRAPH_CHECK(func && !func->getBlocks().empty(), "Function not found"); NGRAPH_CHECK(func && !func.getBlocks().empty(), "Function not found");
// Set external arguments // Set external arguments
NGRAPH_CHECK(m_compiled_kernel, "No compiled kernel set for compiler"); NGRAPH_CHECK(m_compiled_kernel, "No compiled kernel set for compiler");
......
...@@ -77,10 +77,7 @@ namespace ngraph ...@@ -77,10 +77,7 @@ namespace ngraph
/// Returns the memory manager used by this sub-graph compiler. /// Returns the memory manager used by this sub-graph compiler.
MLIRMemMgr& get_mem_mgr() { return m_mem_mgr; } MLIRMemMgr& get_mem_mgr() { return m_mem_mgr; }
/// Returns memory manager pointer argument ID in call interface. /// Returns memory manager pointer argument ID in call interface.
unsigned get_mem_mgr_arg_id(mlir::Function* func) unsigned get_mem_mgr_arg_id(mlir::FuncOp& func);
{
return func->getNumArguments() - 1;
}
private: private:
struct TensorInfo struct TensorInfo
...@@ -147,7 +144,7 @@ namespace ngraph ...@@ -147,7 +144,7 @@ namespace ngraph
// compilation. // compilation.
mlir::MLIRContext m_context; mlir::MLIRContext m_context;
std::unique_ptr<mlir::Module> m_module; mlir::OwningModuleRef m_module;
std::unique_ptr<mlir::OpBuilder> m_builder; std::unique_ptr<mlir::OpBuilder> m_builder;
std::unique_ptr<mlir::ExecutionEngine> m_engine; std::unique_ptr<mlir::ExecutionEngine> m_engine;
......
...@@ -74,7 +74,7 @@ namespace ...@@ -74,7 +74,7 @@ namespace
\ \
PatternMatchResult matchAndRewrite(Operation* op, \ PatternMatchResult matchAndRewrite(Operation* op, \
ArrayRef<Value*> operands, \ ArrayRef<Value*> operands, \
PatternRewriter& rewriter) const override; \ ConversionPatternRewriter& rewriter) const override; \
}; };
#include "op_lowerers.inc" #include "op_lowerers.inc"
...@@ -117,14 +117,15 @@ namespace ...@@ -117,14 +117,15 @@ namespace
SmallVector<Value*, 4> buildOutputDefs(Operation* op, PatternRewriter& rewriter); SmallVector<Value*, 4> buildOutputDefs(Operation* op, PatternRewriter& rewriter);
Value* createTempTensor(Type type, PatternRewriter& rewriter); Value* createTempTensor(Type type, PatternRewriter& rewriter);
mlir::Function* getCallDecl(StringRef name, mlir::FuncOp getCallDecl(StringRef name,
ArrayRef<Type> args, ArrayRef<Type> args,
ArrayRef<Type> output, ArrayRef<Type> output,
PatternRewriter& rewriter); PatternRewriter& rewriter);
/// Inserts dealloc Ops for each temporary allocated by AllocOp /// Inserts dealloc Ops for each temporary allocated by AllocOp
void insertDeallocs(PatternRewriter& rewriter); void insertDeallocs(PatternRewriter& rewriter);
NGraphTypeConverter& getTypeConverter() { return typeConverter; }
private: private:
/// Collect a set of patterns to convert from the nGraph dialect to Affine dialect. /// Collect a set of patterns to convert from the nGraph dialect to Affine dialect.
void populateNGraphToAffineConversionPatterns(OwningRewritePatternList& patterns); void populateNGraphToAffineConversionPatterns(OwningRewritePatternList& patterns);
...@@ -150,6 +151,9 @@ namespace ...@@ -150,6 +151,9 @@ namespace
// Create type converter and initialize conversion patterns. // Create type converter and initialize conversion patterns.
NGraphTypeConverter converter; NGraphTypeConverter converter;
OwningRewritePatternList patterns; OwningRewritePatternList patterns;
// Add default FuncOp type conversion. It replaces the incoming FuncOp with a *new* one
// with the converted types.
mlir::populateFuncOpTypeConversionPattern(patterns, &getContext(), typeConverter);
populateNGraphToAffineConversionPatterns(patterns); populateNGraphToAffineConversionPatterns(patterns);
// Create target that defines legal ops for nGraph dialect to be lowered to. // Create target that defines legal ops for nGraph dialect to be lowered to.
...@@ -157,14 +161,18 @@ namespace ...@@ -157,14 +161,18 @@ namespace
// TODO: Remove NGFakeInputOp. We need to set NGFakeInputOp as legal op because we generate // TODO: Remove NGFakeInputOp. We need to set NGFakeInputOp as legal op because we generate
// it as part of the lowering to affine/standard. // it as part of the lowering to affine/standard.
target.addLegalDialect<AffineOpsDialect, StandardOpsDialect>(); target.addLegalDialect<AffineOpsDialect, StandardOpsDialect>();
target.addLegalOp<NGFakeInputOp>(); target.addLegalOp<ModuleOp, ModuleTerminatorOp, NGFakeInputOp>();
target.addDynamicallyLegalOp<FuncOp>([&](FuncOp op) {
// FuncOp is legal only if types have been converted to Std types.
return typeConverter.isSignatureLegal(op.getType());
});
// capture output values by looking for the Return and grabbing the values // capture output values by looking for the Return and grabbing the values
// the order of the returned values matches the order of the lowered func signature for // the order of the returned values matches the order of the lowered func signature for
// results. This is used to find the arg_id that a defined value maps to if it is an output // results. This is used to find the arg_id that a defined value maps to if it is an output
findOutputValues(); findOutputValues();
if (failed(applyConversionPatterns(getModule(), target, converter, std::move(patterns)))) if (failed(applyFullConversion(getModule(), target, std::move(patterns), &converter)))
{ {
emitError(mlir::UnknownLoc::get(&getContext()), "Error lowering nGraph dialect\n"); emitError(mlir::UnknownLoc::get(&getContext()), "Error lowering nGraph dialect\n");
signalPassFailure(); signalPassFailure();
...@@ -187,13 +195,13 @@ namespace ...@@ -187,13 +195,13 @@ namespace
void DialectLoweringPass::findOutputValues() void DialectLoweringPass::findOutputValues()
{ {
// get original function // get original function
auto f = getModule().getNamedFunction("main"); auto f = getModule().lookupSymbol<mlir::FuncOp>("main");
SmallVector<Value*, 4> outputList; SmallVector<Value*, 4> outputList;
unsigned outputCount = 0; unsigned outputCount = 0;
// we find out output values by looking at returned values // we find out output values by looking at returned values
// any return should return all outputs of the subgraph // any return should return all outputs of the subgraph
f->walk<NGReturnOp>([this, &outputCount](NGReturnOp ret) { f.walk<NGReturnOp>([this, &outputCount](NGReturnOp ret) {
for (unsigned i = 0; i < ret.getNumOperands(); i++) for (unsigned i = 0; i < ret.getNumOperands(); i++)
{ {
auto outputValue = ret.getOperand(i); auto outputValue = ret.getOperand(i);
...@@ -280,9 +288,9 @@ namespace ...@@ -280,9 +288,9 @@ namespace
void DialectLoweringPass::processFakeInstrs() void DialectLoweringPass::processFakeInstrs()
{ {
auto context = getModule().getContext(); auto context = getModule().getContext();
auto f = getModule().getNamedFunction("main"); auto f = getModule().lookupSymbol<mlir::FuncOp>("main");
mlir::Block* entryBlock = &*(f->begin()); mlir::Block* entryBlock = &*(f.begin());
auto oldFuncType = f->getType(); auto oldFuncType = f.getType();
ArrayRef<mlir::Type> ipArgs = oldFuncType.getInputs(); ArrayRef<mlir::Type> ipArgs = oldFuncType.getInputs();
ArrayRef<mlir::Type> opArgs = oldFuncType.getResults(); ArrayRef<mlir::Type> opArgs = oldFuncType.getResults();
SmallVector<mlir::Type, 4> allArgs; SmallVector<mlir::Type, 4> allArgs;
...@@ -304,7 +312,7 @@ namespace ...@@ -304,7 +312,7 @@ namespace
entryBlock->addArgument(indexType); entryBlock->addArgument(indexType);
// update type // update type
auto newFuncType = mlir::FunctionType::get(allArgs, {}, context); auto newFuncType = mlir::FunctionType::get(allArgs, {}, context);
f->setType(newFuncType); f.setType(newFuncType);
// RAUW fake outputs with result values // RAUW fake outputs with result values
unsigned i = 0; unsigned i = 0;
...@@ -327,13 +335,13 @@ namespace ...@@ -327,13 +335,13 @@ namespace
/// by nGraph op semantics. /// by nGraph op semantics.
void DialectLoweringPass::insertNoAliasArgAttrs() void DialectLoweringPass::insertNoAliasArgAttrs()
{ {
auto func = getModule().getNamedFunction("main"); auto func = getModule().lookupSymbol<mlir::FuncOp>("main");
unsigned int argIdx = 0; unsigned int argIdx = 0;
for (auto* arg : func->getArguments()) for (auto* arg : func.getArguments())
{ {
if (arg->getType().isa<MemRefType>()) if (arg->getType().isa<MemRefType>())
{ {
func->setArgAttr(argIdx, "llvm.noalias", BoolAttr::get(true, &getContext())); func.setArgAttr(argIdx, "llvm.noalias", BoolAttr::get(true, &getContext()));
} }
++argIdx; ++argIdx;
...@@ -348,21 +356,19 @@ namespace ...@@ -348,21 +356,19 @@ namespace
} }
} }
mlir::Function* DialectLoweringPass::getCallDecl(StringRef name, mlir::FuncOp DialectLoweringPass::getCallDecl(StringRef name,
ArrayRef<Type> args, ArrayRef<Type> args,
ArrayRef<Type> output, ArrayRef<Type> output,
PatternRewriter& rewriter) PatternRewriter& rewriter)
{ {
auto callBackFuncPtr = getModule().getNamedFunction(name); auto callBackFunc = getModule().lookupSymbol<mlir::FuncOp>(name);
if (callBackFuncPtr == nullptr) if (!callBackFunc)
{ {
auto callBackType = rewriter.getFunctionType(args, output); auto callBackType = rewriter.getFunctionType(args, output);
auto callBackFunc = auto callBackFunc = mlir::FuncOp::create(rewriter.getUnknownLoc(), name, callBackType);
llvm::make_unique<mlir::Function>(rewriter.getUnknownLoc(), name, callBackType); getModule().push_back(callBackFunc);
callBackFuncPtr = callBackFunc.get();
getModule().getFunctions().push_back(callBackFunc.release());
} }
return callBackFuncPtr; return callBackFunc;
} }
// NGDialect converters // NGDialect converters
...@@ -394,15 +400,15 @@ namespace ...@@ -394,15 +400,15 @@ namespace
return mlir::IntegerType::get(1 /* width */, boolType.getContext()); return mlir::IntegerType::get(1 /* width */, boolType.getContext());
} }
NGRAPH_CHECK(false, "Unsupported type to lower"); // Do not assert/NGRAPH_CHECK here. Type convertion infra expects `convertType` to return
// the input type if the type is not supported.
return type; return type;
} }
#define REWRITER(OP) \ #define REWRITER(OP) \
PatternMatchResult OP##Conversion::matchAndRewrite( \ PatternMatchResult OP##Conversion::matchAndRewrite( \
Operation* op, ArrayRef<Value*> operands, PatternRewriter& rewriter) const Operation* op, ArrayRef<Value*> operands, ConversionPatternRewriter& rewriter) const
// ADD
REWRITER(NGAddOp) REWRITER(NGAddOp)
{ {
lower_binary_elementwise<mlir::NGAddOp>(op, operands, rewriter, pass); lower_binary_elementwise<mlir::NGAddOp>(op, operands, rewriter, pass);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment