Commit e2673387 authored by Diego Caballero's avatar Diego Caballero Committed by Sang Ik Lee

[MLIR] Update MLIR/LLVM repos (#3950)

* [MLIR] Update MLIR/LLVM repos

* Move MLIR/LLVM repos forward

This includes fix to affine fusion algorithm.

* Fix issues after merge

* Fix lit test
parent 7f3d52a5
......@@ -20,8 +20,8 @@ set(MLIR_LLVM_REPO_URL https://github.com/llvm/llvm-project.git)
set(MLIR_REPO_URL https://github.com/tensorflow/mlir.git)
# Change these commit IDs to move to latest stable versions
set(MLIR_LLVM_COMMIT_ID e0f1d9d8729)
set(MLIR_COMMIT_ID c61db4bb)
set(MLIR_LLVM_COMMIT_ID 372ad327)
set(MLIR_COMMIT_ID 721a07c)
# MLIR environment variables. Some of them are used by LIT tool.
set(MLIR_PROJECT_ROOT ${CMAKE_CURRENT_BINARY_DIR}/mlir_project)
......
......@@ -46,6 +46,7 @@ llvm_map_components_to_libnames(llvm_libs support core irreader)
# Link MLIR libs
target_link_libraries(
mlir_backend PRIVATE
MLIRAffineToStandard
MLIRAnalysis
MLIREDSC
MLIRExecutionEngine
......
......@@ -33,7 +33,7 @@ def NGSqueezeOp :
}];
let parser = [{ NGRAPH_CHECK(false, "No parser support"); return mlir::failure(); }];
let verifier = [{ return mlir::success(); /* TBD */ }];
// TODO: verifier
}
// Unsqueeze Op
......@@ -47,7 +47,7 @@ def NGUnSqueezeOp :
}];
let parser = [{ NGRAPH_CHECK(false, "No parser support"); return mlir::failure(); }];
let verifier = [{ return mlir::success(); /* TBD */ }];
// TODO: verifier
}
// Squared Difference Op
......@@ -60,7 +60,7 @@ def NGSquaredDiffOp :
}];
let parser = [{ NGRAPH_CHECK(false, "No parser support"); return mlir::failure(); }];
let verifier = [{ return mlir::success(); /* TBD */ }];
// TODO: verifier
}
// Split Op
......@@ -74,7 +74,7 @@ def NGSplitOp :
}];
let parser = [{ NGRAPH_CHECK(false, "No parser support"); return mlir::failure(); }];
let verifier = [{ return mlir::success(); /* TBD */ }];
// TODO: verifier
let extraClassDeclaration = [{
void setAxis(const Attribute& attr) { this->setAttr("axis", attr); }
......@@ -102,7 +102,7 @@ def NGSpaceToDepthOp :
}];
let parser = [{ NGRAPH_CHECK(false, "No parser support"); return mlir::failure(); }];
let verifier = [{ return mlir::success(); /* TBD */ }];
// TODO: verifier
let extraClassDeclaration = [{
void setBlockSize(const Attribute& attr) { this->setAttr("blockSize", attr); }
......@@ -127,7 +127,7 @@ def NGShuffleChannelsOp :
}];
let parser = [{ NGRAPH_CHECK(false, "No parser support"); return mlir::failure(); }];
let verifier = [{ return mlir::success(); /* TBD */ }];
// TODO: verifier
let extraClassDeclaration = [{
void setAxis(const Attribute& axis) { this->setAttr("axis", axis); }
......@@ -146,7 +146,7 @@ def NGScaleShiftOp :
}];
let parser = [{ NGRAPH_CHECK(false, "No parser support"); return mlir::failure(); }];
let verifier = [{ return mlir::success(); /* TBD */ }];
// TODO: verifier
}
// RNN Cell Op
......@@ -166,7 +166,7 @@ def NGRNNCellOp :
}];
let parser = [{ NGRAPH_CHECK(false, "No parser support"); return mlir::failure(); }];
let verifier = [{ return mlir::success(); /* TBD */ }];
// TODO: verifier
let builders = [
OpBuilder<
......@@ -213,7 +213,7 @@ def NGPrelu :
}];
let parser = [{ NGRAPH_CHECK(false, "No parser support"); return mlir::failure(); }];
let verifier = [{ return mlir::success(); /* TBD */ }];
// TODO: verifier
}
// Normalize L2 Op
......@@ -233,7 +233,7 @@ def NGNormalizeL2Op :
}];
let parser = [{ NGRAPH_CHECK(false, "No parser support"); return mlir::failure(); }];
let verifier = [{ return mlir::success(); /* TBD */ }];
// TODO: verifier
let extraClassDeclaration = [{
void setEpsMode(const Attribute& epsMode) { this->setAttr("epsMOde", epsMode); }
......@@ -358,9 +358,7 @@ def NGLSTMCellOp :
}];
let parser = [{ NGRAPH_CHECK(false, "No parser support"); return mlir::failure(); }];
let verifier = [{
return mlir::success(); /* TBD */
}];
// TODO: verifier
let builders = [
OpBuilder<
......@@ -445,9 +443,7 @@ def NGLSTMSequenceOp :
}];
let parser = [{ NGRAPH_CHECK(false, "No parser support"); return mlir::failure(); }];
let verifier = [{
return mlir::success(); /* TBD */
}];
// TODO: verifier
let extraClassDeclaration = [{
void setHiddenSize (const Attribute& attr) { this->setAttr("hiddenSize", attr); }
......@@ -499,7 +495,7 @@ def NGGRUCellOp :
}];
let parser = [{ NGRAPH_CHECK(false, "No parser support"); return mlir::failure(); }];
let verifier = [{ return mlir::success(); /* TBD */ }];
// TODO: verifier
let builders = [
OpBuilder<
......@@ -716,8 +712,7 @@ def NGGroupConvOp :
I64ArrayAttr:$padBelow,
I64ArrayAttr:$padAbove,
I64Attr:$groups,
DefaultValuedAttr<PadTypeEnumAttr,
"static_cast<int64_t>(MLIRPadType::EXPLICIT)">:$padType)>
DefaultValuedAttr<PadTypeEnumAttr, "MLIRPadType::EXPLICIT">:$padType)>
{
let summary = "Group Convolution Op";
let description = [{
......@@ -755,8 +750,7 @@ def NGGroupConvTransposeOp :
I64ArrayAttr:$strides, I64ArrayAttr:$padBelow, I64ArrayAttr:$padAbove,
I64ArrayAttr:$outputPad,
DefaultValuedAttr<I64Attr, "1UL">:$groups,
DefaultValuedAttr<PadTypeEnumAttr,
"static_cast<int64_t>(MLIRPadType::EXPLICIT)">:$padType,
DefaultValuedAttr<PadTypeEnumAttr, "MLIRPadType::EXPLICIT">:$padType,
I64ArrayAttr:$outputShape)>
{
let summary = "Group Transpose Convolution (Deconvolution)";
......@@ -898,11 +892,10 @@ def NGEluOp :
// FakeQuant Op
def NGFakeQuantOp :
NG_OneResult_Op<"fakeQuant", [NoSideEffect, DeclareOpInterfaceMethods<FusedOp>]>,
Arguments<(ins NG_TensorType:$data, NG_TensorType:$inputLow, NG_TensorType:$inputHigh,
NG_TensorType:$outputLow, NG_TensorType:$outputHigh,
Arguments<(ins NG_TensorType:$data, NG_TensorType:$inputLow, NG_TensorType:$inputHigh,
NG_TensorType:$outputLow, NG_TensorType:$outputHigh,
I64Attr:$levels,
DefaultValuedAttr<AutoBroadcastEnumAttr,
"static_cast<int64_t>(MLIRPadType::EXPLICIT)">:$autoBroadcast)>
DefaultValuedAttr<AutoBroadcastEnumAttr, "MLIRAutoBroadcastMode::NONE">:$autoBroadcast)>
{
let summary = "Op performing element-wise linear quantization.";
let description = [{
......
......@@ -22,6 +22,7 @@
#include <cstdarg>
#include "mlir/IR/Builders.h"
#include "mlir/IR/OpDefinition.h"
#include "mlir/IR/OpImplementation.h"
#include "mlir/IR/OperationSupport.h"
#include "mlir/IR/StandardTypes.h"
#include "mlir/Support/STLExtras.h"
......
......@@ -21,8 +21,9 @@
// NOTE: This file follows nGraph format style and MLIR naming convention since it does
// not expose public API to the rest of nGraph codebase and heavily depends on MLIR API.
include "mlir/IR/OpBase.td"
include "core/ngraph_dialect/ops_interfaces.td"
include "mlir/IR/OpBase.td"
// nGraph Dialect operations definitions
//
// This files declares nGraph operations that table-gen uses to create C++ code
......
......@@ -188,7 +188,7 @@ def NGAvgPoolOp :
I64ArrayAttr :$padBelow,
I64ArrayAttr :$padAbove,
DefaultValuedAttr<BoolAttr, "false">:$includePadding,
DefaultValuedAttr<PadTypeEnumAttr, "static_cast<int64_t>(MLIRPadType::EXPLICIT)"> :$padType,
DefaultValuedAttr<PadTypeEnumAttr, "MLIRPadType::EXPLICIT"> :$padType,
DefaultValuedAttr<BoolAttr, "false"> :$ceilMode
)>
{
......@@ -398,7 +398,7 @@ def NGMaxPoolOp :
I64ArrayAttr :$windowMovementStrides,
I64ArrayAttr :$padBelow,
I64ArrayAttr :$padAbove,
DefaultValuedAttr<PadTypeEnumAttr, "static_cast<int64_t>(MLIRPadType::EXPLICIT)"> :$padType,
DefaultValuedAttr<PadTypeEnumAttr, "MLIRPadType::EXPLICIT"> :$padType,
DefaultValuedAttr<BoolAttr, "false"> :$ceilMode
)>
{
......@@ -525,7 +525,7 @@ def NGPadOp :
NG_TensorType :$padValue,
I64ArrayAttr :$padBelow,
I64ArrayAttr :$padAbove,
DefaultValuedAttr<PadModeEnumAttr, "static_cast<int64_t>(MLIRPadMode::CONSTANT)"> :$padMode)>
DefaultValuedAttr<PadModeEnumAttr, "MLIRPadMode::CONSTANT"> :$padMode)>
{
let parser = [{ NGRAPH_CHECK(false, "No parser support"); return mlir::failure(); }];
let verifier = [{ return verifyOp(this); }];
......@@ -662,7 +662,7 @@ def NGTopKOp :
I64Attr :$axis,
TypeAttr :$indexType,
DefaultValuedAttr<BoolAttr, "true"> :$computeMax,
DefaultValuedAttr<SortTypeEnumAttr, "static_cast<int64_t>(MLIRSortType::VALUES)"> :$sortType)>
DefaultValuedAttr<SortTypeEnumAttr, "MLIRSortType::VALUES"> :$sortType)>
{
let summary = "Softmax operation.";
let description = [{
......
......@@ -103,7 +103,8 @@ void MLIRCPURuntime::bindArguments(std::vector<void*>& externalTensors)
for (size_t i = 0, numArgs = m_invokeArgs.size(); i < numArgs; ++i)
{
auto* memRefArg = *(reinterpret_cast<StaticMemRef**>(m_invokeArgs[i]));
memRefArg->data = reinterpret_cast<float*>((*m_externalTensors)[i]);
memRefArg->allocatedPtr = (*m_externalTensors)[i];
memRefArg->alignedPtr = (*m_externalTensors)[i];
}
}
......@@ -161,6 +162,7 @@ StaticMemRef* MLIRCPURuntime::allocateMemrefDescriptor()
// We should expand this with different types and dynamic MemRefs
auto* descriptor = reinterpret_cast<StaticMemRef*>(malloc(sizeof(StaticMemRef)));
NGRAPH_CHECK(descriptor != nullptr, "NULL MemRef descriptor");
descriptor->data = nullptr;
descriptor->allocatedPtr = nullptr;
descriptor->alignedPtr = nullptr;
return descriptor;
}
......@@ -35,7 +35,8 @@ namespace ngraph
{
struct StaticMemRef
{
void* data;
void* allocatedPtr;
void* alignedPtr;
};
/// A CPU Runtime is an MLIR runtime that owns an MLIR context and a module
/// The module should be in LLVM dialect and ready to be lowered via an MLIR
......
This diff is collapsed.
......@@ -115,7 +115,7 @@ TEST(MLIR, ops_attributes)
.getOperation();
auto avgPool = cast<NGAvgPoolOp>(operation);
auto padType = static_cast<MLIRPadType>(avgPool.padType().getSExtValue());
auto padType = avgPool.padType();
EXPECT_TRUE(padType == MLIRPadType::SAME_LOWER);
operation =
......@@ -131,7 +131,7 @@ TEST(MLIR, ops_attributes)
.getOperation();
avgPool = cast<NGAvgPoolOp>(operation);
padType = static_cast<MLIRPadType>(avgPool.padType().getSExtValue());
padType = avgPool.padType();
EXPECT_TRUE(padType == MLIRPadType::EXPLICIT);
auto ceilMode = avgPool.ceilMode();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment