Commit e2673387 authored by Diego Caballero's avatar Diego Caballero Committed by Sang Ik Lee

[MLIR] Update MLIR/LLVM repos (#3950)

* [MLIR] Update MLIR/LLVM repos

* Move MLIR/LLVM repos forward

This includes fix to affine fusion algorithm.

* Fix issues after merge

* Fix lit test
parent 7f3d52a5
...@@ -20,8 +20,8 @@ set(MLIR_LLVM_REPO_URL https://github.com/llvm/llvm-project.git) ...@@ -20,8 +20,8 @@ set(MLIR_LLVM_REPO_URL https://github.com/llvm/llvm-project.git)
set(MLIR_REPO_URL https://github.com/tensorflow/mlir.git) set(MLIR_REPO_URL https://github.com/tensorflow/mlir.git)
# Change these commit IDs to move to latest stable versions # Change these commit IDs to move to latest stable versions
set(MLIR_LLVM_COMMIT_ID e0f1d9d8729) set(MLIR_LLVM_COMMIT_ID 372ad327)
set(MLIR_COMMIT_ID c61db4bb) set(MLIR_COMMIT_ID 721a07c)
# MLIR environment variables. Some of them are used by LIT tool. # MLIR environment variables. Some of them are used by LIT tool.
set(MLIR_PROJECT_ROOT ${CMAKE_CURRENT_BINARY_DIR}/mlir_project) set(MLIR_PROJECT_ROOT ${CMAKE_CURRENT_BINARY_DIR}/mlir_project)
......
...@@ -46,6 +46,7 @@ llvm_map_components_to_libnames(llvm_libs support core irreader) ...@@ -46,6 +46,7 @@ llvm_map_components_to_libnames(llvm_libs support core irreader)
# Link MLIR libs # Link MLIR libs
target_link_libraries( target_link_libraries(
mlir_backend PRIVATE mlir_backend PRIVATE
MLIRAffineToStandard
MLIRAnalysis MLIRAnalysis
MLIREDSC MLIREDSC
MLIRExecutionEngine MLIRExecutionEngine
......
...@@ -33,7 +33,7 @@ def NGSqueezeOp : ...@@ -33,7 +33,7 @@ def NGSqueezeOp :
}]; }];
let parser = [{ NGRAPH_CHECK(false, "No parser support"); return mlir::failure(); }]; let parser = [{ NGRAPH_CHECK(false, "No parser support"); return mlir::failure(); }];
let verifier = [{ return mlir::success(); /* TBD */ }]; // TODO: verifier
} }
// Unsqueeze Op // Unsqueeze Op
...@@ -47,7 +47,7 @@ def NGUnSqueezeOp : ...@@ -47,7 +47,7 @@ def NGUnSqueezeOp :
}]; }];
let parser = [{ NGRAPH_CHECK(false, "No parser support"); return mlir::failure(); }]; let parser = [{ NGRAPH_CHECK(false, "No parser support"); return mlir::failure(); }];
let verifier = [{ return mlir::success(); /* TBD */ }]; // TODO: verifier
} }
// Squared Difference Op // Squared Difference Op
...@@ -60,7 +60,7 @@ def NGSquaredDiffOp : ...@@ -60,7 +60,7 @@ def NGSquaredDiffOp :
}]; }];
let parser = [{ NGRAPH_CHECK(false, "No parser support"); return mlir::failure(); }]; let parser = [{ NGRAPH_CHECK(false, "No parser support"); return mlir::failure(); }];
let verifier = [{ return mlir::success(); /* TBD */ }]; // TODO: verifier
} }
// Split Op // Split Op
...@@ -74,7 +74,7 @@ def NGSplitOp : ...@@ -74,7 +74,7 @@ def NGSplitOp :
}]; }];
let parser = [{ NGRAPH_CHECK(false, "No parser support"); return mlir::failure(); }]; let parser = [{ NGRAPH_CHECK(false, "No parser support"); return mlir::failure(); }];
let verifier = [{ return mlir::success(); /* TBD */ }]; // TODO: verifier
let extraClassDeclaration = [{ let extraClassDeclaration = [{
void setAxis(const Attribute& attr) { this->setAttr("axis", attr); } void setAxis(const Attribute& attr) { this->setAttr("axis", attr); }
...@@ -102,7 +102,7 @@ def NGSpaceToDepthOp : ...@@ -102,7 +102,7 @@ def NGSpaceToDepthOp :
}]; }];
let parser = [{ NGRAPH_CHECK(false, "No parser support"); return mlir::failure(); }]; let parser = [{ NGRAPH_CHECK(false, "No parser support"); return mlir::failure(); }];
let verifier = [{ return mlir::success(); /* TBD */ }]; // TODO: verifier
let extraClassDeclaration = [{ let extraClassDeclaration = [{
void setBlockSize(const Attribute& attr) { this->setAttr("blockSize", attr); } void setBlockSize(const Attribute& attr) { this->setAttr("blockSize", attr); }
...@@ -127,7 +127,7 @@ def NGShuffleChannelsOp : ...@@ -127,7 +127,7 @@ def NGShuffleChannelsOp :
}]; }];
let parser = [{ NGRAPH_CHECK(false, "No parser support"); return mlir::failure(); }]; let parser = [{ NGRAPH_CHECK(false, "No parser support"); return mlir::failure(); }];
let verifier = [{ return mlir::success(); /* TBD */ }]; // TODO: verifier
let extraClassDeclaration = [{ let extraClassDeclaration = [{
void setAxis(const Attribute& axis) { this->setAttr("axis", axis); } void setAxis(const Attribute& axis) { this->setAttr("axis", axis); }
...@@ -146,7 +146,7 @@ def NGScaleShiftOp : ...@@ -146,7 +146,7 @@ def NGScaleShiftOp :
}]; }];
let parser = [{ NGRAPH_CHECK(false, "No parser support"); return mlir::failure(); }]; let parser = [{ NGRAPH_CHECK(false, "No parser support"); return mlir::failure(); }];
let verifier = [{ return mlir::success(); /* TBD */ }]; // TODO: verifier
} }
// RNN Cell Op // RNN Cell Op
...@@ -166,7 +166,7 @@ def NGRNNCellOp : ...@@ -166,7 +166,7 @@ def NGRNNCellOp :
}]; }];
let parser = [{ NGRAPH_CHECK(false, "No parser support"); return mlir::failure(); }]; let parser = [{ NGRAPH_CHECK(false, "No parser support"); return mlir::failure(); }];
let verifier = [{ return mlir::success(); /* TBD */ }]; // TODO: verifier
let builders = [ let builders = [
OpBuilder< OpBuilder<
...@@ -213,7 +213,7 @@ def NGPrelu : ...@@ -213,7 +213,7 @@ def NGPrelu :
}]; }];
let parser = [{ NGRAPH_CHECK(false, "No parser support"); return mlir::failure(); }]; let parser = [{ NGRAPH_CHECK(false, "No parser support"); return mlir::failure(); }];
let verifier = [{ return mlir::success(); /* TBD */ }]; // TODO: verifier
} }
// Normalize L2 Op // Normalize L2 Op
...@@ -233,7 +233,7 @@ def NGNormalizeL2Op : ...@@ -233,7 +233,7 @@ def NGNormalizeL2Op :
}]; }];
let parser = [{ NGRAPH_CHECK(false, "No parser support"); return mlir::failure(); }]; let parser = [{ NGRAPH_CHECK(false, "No parser support"); return mlir::failure(); }];
let verifier = [{ return mlir::success(); /* TBD */ }]; // TODO: verifier
let extraClassDeclaration = [{ let extraClassDeclaration = [{
void setEpsMode(const Attribute& epsMode) { this->setAttr("epsMOde", epsMode); } void setEpsMode(const Attribute& epsMode) { this->setAttr("epsMOde", epsMode); }
...@@ -358,9 +358,7 @@ def NGLSTMCellOp : ...@@ -358,9 +358,7 @@ def NGLSTMCellOp :
}]; }];
let parser = [{ NGRAPH_CHECK(false, "No parser support"); return mlir::failure(); }]; let parser = [{ NGRAPH_CHECK(false, "No parser support"); return mlir::failure(); }];
let verifier = [{ // TODO: verifier
return mlir::success(); /* TBD */
}];
let builders = [ let builders = [
OpBuilder< OpBuilder<
...@@ -445,9 +443,7 @@ def NGLSTMSequenceOp : ...@@ -445,9 +443,7 @@ def NGLSTMSequenceOp :
}]; }];
let parser = [{ NGRAPH_CHECK(false, "No parser support"); return mlir::failure(); }]; let parser = [{ NGRAPH_CHECK(false, "No parser support"); return mlir::failure(); }];
let verifier = [{ // TODO: verifier
return mlir::success(); /* TBD */
}];
let extraClassDeclaration = [{ let extraClassDeclaration = [{
void setHiddenSize (const Attribute& attr) { this->setAttr("hiddenSize", attr); } void setHiddenSize (const Attribute& attr) { this->setAttr("hiddenSize", attr); }
...@@ -499,7 +495,7 @@ def NGGRUCellOp : ...@@ -499,7 +495,7 @@ def NGGRUCellOp :
}]; }];
let parser = [{ NGRAPH_CHECK(false, "No parser support"); return mlir::failure(); }]; let parser = [{ NGRAPH_CHECK(false, "No parser support"); return mlir::failure(); }];
let verifier = [{ return mlir::success(); /* TBD */ }]; // TODO: verifier
let builders = [ let builders = [
OpBuilder< OpBuilder<
...@@ -716,8 +712,7 @@ def NGGroupConvOp : ...@@ -716,8 +712,7 @@ def NGGroupConvOp :
I64ArrayAttr:$padBelow, I64ArrayAttr:$padBelow,
I64ArrayAttr:$padAbove, I64ArrayAttr:$padAbove,
I64Attr:$groups, I64Attr:$groups,
DefaultValuedAttr<PadTypeEnumAttr, DefaultValuedAttr<PadTypeEnumAttr, "MLIRPadType::EXPLICIT">:$padType)>
"static_cast<int64_t>(MLIRPadType::EXPLICIT)">:$padType)>
{ {
let summary = "Group Convolution Op"; let summary = "Group Convolution Op";
let description = [{ let description = [{
...@@ -755,8 +750,7 @@ def NGGroupConvTransposeOp : ...@@ -755,8 +750,7 @@ def NGGroupConvTransposeOp :
I64ArrayAttr:$strides, I64ArrayAttr:$padBelow, I64ArrayAttr:$padAbove, I64ArrayAttr:$strides, I64ArrayAttr:$padBelow, I64ArrayAttr:$padAbove,
I64ArrayAttr:$outputPad, I64ArrayAttr:$outputPad,
DefaultValuedAttr<I64Attr, "1UL">:$groups, DefaultValuedAttr<I64Attr, "1UL">:$groups,
DefaultValuedAttr<PadTypeEnumAttr, DefaultValuedAttr<PadTypeEnumAttr, "MLIRPadType::EXPLICIT">:$padType,
"static_cast<int64_t>(MLIRPadType::EXPLICIT)">:$padType,
I64ArrayAttr:$outputShape)> I64ArrayAttr:$outputShape)>
{ {
let summary = "Group Transpose Convolution (Deconvolution)"; let summary = "Group Transpose Convolution (Deconvolution)";
...@@ -898,11 +892,10 @@ def NGEluOp : ...@@ -898,11 +892,10 @@ def NGEluOp :
// FakeQuant Op // FakeQuant Op
def NGFakeQuantOp : def NGFakeQuantOp :
NG_OneResult_Op<"fakeQuant", [NoSideEffect, DeclareOpInterfaceMethods<FusedOp>]>, NG_OneResult_Op<"fakeQuant", [NoSideEffect, DeclareOpInterfaceMethods<FusedOp>]>,
Arguments<(ins NG_TensorType:$data, NG_TensorType:$inputLow, NG_TensorType:$inputHigh, Arguments<(ins NG_TensorType:$data, NG_TensorType:$inputLow, NG_TensorType:$inputHigh,
NG_TensorType:$outputLow, NG_TensorType:$outputHigh, NG_TensorType:$outputLow, NG_TensorType:$outputHigh,
I64Attr:$levels, I64Attr:$levels,
DefaultValuedAttr<AutoBroadcastEnumAttr, DefaultValuedAttr<AutoBroadcastEnumAttr, "MLIRAutoBroadcastMode::NONE">:$autoBroadcast)>
"static_cast<int64_t>(MLIRPadType::EXPLICIT)">:$autoBroadcast)>
{ {
let summary = "Op performing element-wise linear quantization."; let summary = "Op performing element-wise linear quantization.";
let description = [{ let description = [{
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#include <cstdarg> #include <cstdarg>
#include "mlir/IR/Builders.h" #include "mlir/IR/Builders.h"
#include "mlir/IR/OpDefinition.h" #include "mlir/IR/OpDefinition.h"
#include "mlir/IR/OpImplementation.h"
#include "mlir/IR/OperationSupport.h" #include "mlir/IR/OperationSupport.h"
#include "mlir/IR/StandardTypes.h" #include "mlir/IR/StandardTypes.h"
#include "mlir/Support/STLExtras.h" #include "mlir/Support/STLExtras.h"
......
...@@ -21,8 +21,9 @@ ...@@ -21,8 +21,9 @@
// NOTE: This file follows nGraph format style and MLIR naming convention since it does // NOTE: This file follows nGraph format style and MLIR naming convention since it does
// not expose public API to the rest of nGraph codebase and heavily depends on MLIR API. // not expose public API to the rest of nGraph codebase and heavily depends on MLIR API.
include "mlir/IR/OpBase.td"
include "core/ngraph_dialect/ops_interfaces.td" include "core/ngraph_dialect/ops_interfaces.td"
include "mlir/IR/OpBase.td"
// nGraph Dialect operations definitions // nGraph Dialect operations definitions
// //
// This files declares nGraph operations that table-gen uses to create C++ code // This files declares nGraph operations that table-gen uses to create C++ code
......
...@@ -188,7 +188,7 @@ def NGAvgPoolOp : ...@@ -188,7 +188,7 @@ def NGAvgPoolOp :
I64ArrayAttr :$padBelow, I64ArrayAttr :$padBelow,
I64ArrayAttr :$padAbove, I64ArrayAttr :$padAbove,
DefaultValuedAttr<BoolAttr, "false">:$includePadding, DefaultValuedAttr<BoolAttr, "false">:$includePadding,
DefaultValuedAttr<PadTypeEnumAttr, "static_cast<int64_t>(MLIRPadType::EXPLICIT)"> :$padType, DefaultValuedAttr<PadTypeEnumAttr, "MLIRPadType::EXPLICIT"> :$padType,
DefaultValuedAttr<BoolAttr, "false"> :$ceilMode DefaultValuedAttr<BoolAttr, "false"> :$ceilMode
)> )>
{ {
...@@ -398,7 +398,7 @@ def NGMaxPoolOp : ...@@ -398,7 +398,7 @@ def NGMaxPoolOp :
I64ArrayAttr :$windowMovementStrides, I64ArrayAttr :$windowMovementStrides,
I64ArrayAttr :$padBelow, I64ArrayAttr :$padBelow,
I64ArrayAttr :$padAbove, I64ArrayAttr :$padAbove,
DefaultValuedAttr<PadTypeEnumAttr, "static_cast<int64_t>(MLIRPadType::EXPLICIT)"> :$padType, DefaultValuedAttr<PadTypeEnumAttr, "MLIRPadType::EXPLICIT"> :$padType,
DefaultValuedAttr<BoolAttr, "false"> :$ceilMode DefaultValuedAttr<BoolAttr, "false"> :$ceilMode
)> )>
{ {
...@@ -525,7 +525,7 @@ def NGPadOp : ...@@ -525,7 +525,7 @@ def NGPadOp :
NG_TensorType :$padValue, NG_TensorType :$padValue,
I64ArrayAttr :$padBelow, I64ArrayAttr :$padBelow,
I64ArrayAttr :$padAbove, I64ArrayAttr :$padAbove,
DefaultValuedAttr<PadModeEnumAttr, "static_cast<int64_t>(MLIRPadMode::CONSTANT)"> :$padMode)> DefaultValuedAttr<PadModeEnumAttr, "MLIRPadMode::CONSTANT"> :$padMode)>
{ {
let parser = [{ NGRAPH_CHECK(false, "No parser support"); return mlir::failure(); }]; let parser = [{ NGRAPH_CHECK(false, "No parser support"); return mlir::failure(); }];
let verifier = [{ return verifyOp(this); }]; let verifier = [{ return verifyOp(this); }];
...@@ -662,7 +662,7 @@ def NGTopKOp : ...@@ -662,7 +662,7 @@ def NGTopKOp :
I64Attr :$axis, I64Attr :$axis,
TypeAttr :$indexType, TypeAttr :$indexType,
DefaultValuedAttr<BoolAttr, "true"> :$computeMax, DefaultValuedAttr<BoolAttr, "true"> :$computeMax,
DefaultValuedAttr<SortTypeEnumAttr, "static_cast<int64_t>(MLIRSortType::VALUES)"> :$sortType)> DefaultValuedAttr<SortTypeEnumAttr, "MLIRSortType::VALUES"> :$sortType)>
{ {
let summary = "Softmax operation."; let summary = "Softmax operation.";
let description = [{ let description = [{
......
...@@ -103,7 +103,8 @@ void MLIRCPURuntime::bindArguments(std::vector<void*>& externalTensors) ...@@ -103,7 +103,8 @@ void MLIRCPURuntime::bindArguments(std::vector<void*>& externalTensors)
for (size_t i = 0, numArgs = m_invokeArgs.size(); i < numArgs; ++i) for (size_t i = 0, numArgs = m_invokeArgs.size(); i < numArgs; ++i)
{ {
auto* memRefArg = *(reinterpret_cast<StaticMemRef**>(m_invokeArgs[i])); auto* memRefArg = *(reinterpret_cast<StaticMemRef**>(m_invokeArgs[i]));
memRefArg->data = reinterpret_cast<float*>((*m_externalTensors)[i]); memRefArg->allocatedPtr = (*m_externalTensors)[i];
memRefArg->alignedPtr = (*m_externalTensors)[i];
} }
} }
...@@ -161,6 +162,7 @@ StaticMemRef* MLIRCPURuntime::allocateMemrefDescriptor() ...@@ -161,6 +162,7 @@ StaticMemRef* MLIRCPURuntime::allocateMemrefDescriptor()
// We should expand this with different types and dynamic MemRefs // We should expand this with different types and dynamic MemRefs
auto* descriptor = reinterpret_cast<StaticMemRef*>(malloc(sizeof(StaticMemRef))); auto* descriptor = reinterpret_cast<StaticMemRef*>(malloc(sizeof(StaticMemRef)));
NGRAPH_CHECK(descriptor != nullptr, "NULL MemRef descriptor"); NGRAPH_CHECK(descriptor != nullptr, "NULL MemRef descriptor");
descriptor->data = nullptr; descriptor->allocatedPtr = nullptr;
descriptor->alignedPtr = nullptr;
return descriptor; return descriptor;
} }
...@@ -35,7 +35,8 @@ namespace ngraph ...@@ -35,7 +35,8 @@ namespace ngraph
{ {
struct StaticMemRef struct StaticMemRef
{ {
void* data; void* allocatedPtr;
void* alignedPtr;
}; };
/// A CPU Runtime is an MLIR runtime that owns an MLIR context and a module /// A CPU Runtime is an MLIR runtime that owns an MLIR context and a module
/// The module should be in LLVM dialect and ready to be lowered via an MLIR /// The module should be in LLVM dialect and ready to be lowered via an MLIR
......
This diff is collapsed.
...@@ -115,7 +115,7 @@ TEST(MLIR, ops_attributes) ...@@ -115,7 +115,7 @@ TEST(MLIR, ops_attributes)
.getOperation(); .getOperation();
auto avgPool = cast<NGAvgPoolOp>(operation); auto avgPool = cast<NGAvgPoolOp>(operation);
auto padType = static_cast<MLIRPadType>(avgPool.padType().getSExtValue()); auto padType = avgPool.padType();
EXPECT_TRUE(padType == MLIRPadType::SAME_LOWER); EXPECT_TRUE(padType == MLIRPadType::SAME_LOWER);
operation = operation =
...@@ -131,7 +131,7 @@ TEST(MLIR, ops_attributes) ...@@ -131,7 +131,7 @@ TEST(MLIR, ops_attributes)
.getOperation(); .getOperation();
avgPool = cast<NGAvgPoolOp>(operation); avgPool = cast<NGAvgPoolOp>(operation);
padType = static_cast<MLIRPadType>(avgPool.padType().getSExtValue()); padType = avgPool.padType();
EXPECT_TRUE(padType == MLIRPadType::EXPLICIT); EXPECT_TRUE(padType == MLIRPadType::EXPLICIT);
auto ceilMode = avgPool.ceilMode(); auto ceilMode = avgPool.ceilMode();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment