Commit a643213d authored by Diego Caballero's avatar Diego Caballero Committed by Scott Cyphers

[MLIR] Update build system to use LLVM mono-repo for MLIR (#4188)

* [MLIR] Update build system to use LLVM mono-repo for MLIR

* [MLIR] LLVM mono-repo conflicts

* Disable lit tests

* Fix formatting

* Fix memopt tests

* PR fix

* Fix view test
Co-authored-by: 's avatarNagy Mostafa <nagy.mostafa@gmail.com>
parent 1e13ad94
......@@ -17,11 +17,9 @@
include(ExternalProject)
set(MLIR_LLVM_REPO_URL https://github.com/llvm/llvm-project.git)
set(MLIR_REPO_URL https://github.com/tensorflow/mlir.git)
# Change these commit IDs to move to latest stable versions
set(MLIR_LLVM_COMMIT_ID c36773c7)
set(MLIR_COMMIT_ID 606e96a1)
set(MLIR_LLVM_COMMIT_ID d6295255)
# MLIR environment variables. Some of them are used by LIT tool.
......@@ -32,9 +30,10 @@ else()
endif()
set(MLIR_LLVM_ROOT ${MLIR_PROJECT_ROOT}/llvm-projects)
set(MLIR_SOURCE_DIR ${MLIR_LLVM_ROOT}/llvm/projects/mlir)
set(MLIR_BUILD_DIR ${MLIR_LLVM_ROOT}/build)
set(MLIR_TOOLS_DIR ${MLIR_BUILD_DIR}/bin)
set(MLIR_LLVM_SOURCE_DIR ${MLIR_LLVM_ROOT}/llvm)
set(MLIR_SOURCE_DIR ${MLIR_LLVM_ROOT}/mlir)
set(MLIR_LLVM_BUILD_DIR ${MLIR_PROJECT_ROOT}/build)
set(MLIR_LLVM_TOOLS_DIR ${MLIR_LLVM_BUILD_DIR}/bin)
set(NGRAPH_LIT_TEST_SRC_DIR ${CMAKE_SOURCE_DIR}/test/mlir)
set(NGRAPH_LIT_TEST_BUILD_DIR ${CMAKE_CURRENT_BINARY_DIR}/test/mlir)
......@@ -48,17 +47,13 @@ if (NOT NGRAPH_USE_PREBUILT_MLIR)
execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" .
WORKING_DIRECTORY "${MLIR_PROJECT_ROOT}")
# clone and build llvm
# Clone and build llvm + mlir.
execute_process(COMMAND "${CMAKE_COMMAND}" --build . --target ext_mlir_llvm
WORKING_DIRECTORY "${MLIR_PROJECT_ROOT}")
# clone and build mlir
execute_process(COMMAND "${CMAKE_COMMAND}" --build . --target ext_mlir
WORKING_DIRECTORY "${MLIR_PROJECT_ROOT}")
endif()
# Enable modules for LLVM.
set(LLVM_DIR "${MLIR_BUILD_DIR}/lib/cmake/llvm"
set(LLVM_DIR "${MLIR_LLVM_BUILD_DIR}/lib/cmake/llvm"
CACHE PATH "Path to LLVM cmake modules")
list(APPEND CMAKE_MODULE_PATH "${LLVM_DIR}")
include(AddLLVM)
......@@ -71,7 +66,7 @@ message(STATUS "Using modules in: ${LLVM_DIR}")
message(STATUS "LLVM RTTI is ${LLVM_ENABLE_RTTI}")
set(MLIR_SRC_INCLUDE_PATH ${MLIR_SOURCE_DIR}/include)
set(MLIR_BIN_INCLUDE_PATH ${MLIR_BUILD_DIR}/projects/mlir/include)
set(MLIR_BIN_INCLUDE_PATH ${MLIR_LLVM_BUILD_DIR}/tools/mlir/include)
set(MLIR_INCLUDE_PATHS ${MLIR_SRC_INCLUDE_PATH};${MLIR_BIN_INCLUDE_PATH})
set(MLIR_LLVM_INCLUDE_PATH ${LLVM_INCLUDE_DIRS})
......
......@@ -20,22 +20,6 @@ include(ExternalProject)
project(mlir-fetch NONE)
ExternalProject_Add(
ext_mlir_llvm
PREFIX mlir_llvm
GIT_REPOSITORY @MLIR_LLVM_REPO_URL@
GIT_TAG @MLIR_LLVM_COMMIT_ID@
CONFIGURE_COMMAND ""
BUILD_COMMAND ""
INSTALL_COMMAND ""
UPDATE_COMMAND ""
SOURCE_DIR @MLIR_LLVM_ROOT@
DOWNLOAD_NO_PROGRESS TRUE
EXCLUDE_FROM_ALL TRUE
)
set(MLIR_DEPENDS ext_mlir_llvm)
include(ProcessorCount)
ProcessorCount(N)
if(N EQUAL 0)
......@@ -43,22 +27,18 @@ if(N EQUAL 0)
endif()
ExternalProject_Add(
ext_mlir
PREFIX mlir
DEPENDS ${MLIR_DEPENDS}
GIT_REPOSITORY @MLIR_REPO_URL@
GIT_TAG @MLIR_COMMIT_ID@
CONFIGURE_COMMAND ""
CMAKE_GENERATOR "@CMAKE_GENERATOR@"
ext_mlir_llvm
PREFIX mlir_llvm
GIT_REPOSITORY @MLIR_LLVM_REPO_URL@
GIT_TAG @MLIR_LLVM_COMMIT_ID@
CMAKE_GENERATOR @CMAKE_GENERATOR@
CMAKE_GENERATOR_PLATFORM @CMAKE_GENERATOR_PLATFORM@
CMAKE_GENERATOR_TOOLSET @CMAKE_GENERATOR_TOOLSET@
BUILD_COMMAND @CMAKE_COMMAND@ ../llvm -DLLVM_BUILD_EXAMPLES=ON -DLLVM_TARGETS_TO_BUILD=host -DLLVM_ENABLE_RTTI=ON -DCMAKE_BUILD_TYPE=@CMAKE_BUILD_TYPE@
COMMAND @CMAKE_COMMAND@ --build . --target check-mlir -- -j${N}
CONFIGURE_COMMAND @CMAKE_COMMAND@ @MLIR_LLVM_SOURCE_DIR@ -DLLVM_ENABLE_PROJECTS=mlir -DLLVM_BUILD_EXAMPLES=ON -DLLVM_TARGETS_TO_BUILD=host -DLLVM_ENABLE_RTTI=ON -DCMAKE_BUILD_TYPE=@CMAKE_BUILD_TYPE@
BUILD_COMMAND @CMAKE_COMMAND@ --build . --target check-mlir -- -j${N}
INSTALL_COMMAND ""
UPDATE_COMMAND ""
SOURCE_DIR @MLIR_SOURCE_DIR@
BINARY_DIR @MLIR_BUILD_DIR@
SOURCE_DIR @MLIR_LLVM_ROOT@
BINARY_DIR @MLIR_LLVM_BUILD_DIR@
STAMP_DIR "@MLIR_PROJECT_ROOT@/mlir/stamp"
DOWNLOAD_NO_PROGRESS TRUE
EXCLUDE_FROM_ALL TRUE
......
......@@ -81,7 +81,6 @@ mlir::Type NGraphOpsDialect::parseEltType(mlir::DialectAsmParser& parser) const
{
// Process nGraph integer element types.
MLIRContext* context = getContext();
int width = 0;
bool isSigned = false;
llvm::SMLoc loc = parser.getCurrentLocation();
......
......@@ -171,7 +171,7 @@ def NGRNNCellOp :
let builders = [
OpBuilder<
"Builder *builder, OperationState &tblgen_state, Type res,"
"Value *X, Value* W, Value* R, Value* H_t, "
"Value X, Value W, Value R, Value H_t, "
"Attribute hiddenSize, ArrayAttr activations,"
"ArrayAttr activationAlpha, ArrayAttr activationBeta, Attribute clip", [{
tblgen_state.addOperands({X, W, R, H_t});
......@@ -192,7 +192,7 @@ def NGRNNCellOp :
void setClip(const Attribute& attr) { this->setAttr("clip", attr); }
// get bias operand if present
Value* B()
Value B()
{
auto varArgs = optionalArgs();
return varArgs.begin() != varArgs.end() ? *varArgs.begin() : nullptr;
......@@ -263,7 +263,7 @@ def NGMVN :
let builders = [
OpBuilder<
"Builder *builder, OperationState &tblgen_state, Type res,"
"Value *data, ArrayAttr reductionAxes, Attribute normalizeVariance,"
"Value data, ArrayAttr reductionAxes, Attribute normalizeVariance,"
"Attribute eps", [{
tblgen_state.addOperands(data);
tblgen_state.addAttribute("reductionAxes", reductionAxes);
......@@ -363,7 +363,7 @@ def NGLSTMCellOp :
let builders = [
OpBuilder<
"Builder *builder, OperationState &tblgen_state, Type res,"
"Value *X, Value* W, Value* R, Value* H_t, Value* C_t,"
"Value X, Value W, Value R, Value H_t, Value C_t,"
"Attribute hiddenSize, ArrayAttr activations,"
"ArrayAttr activationAlpha, ArrayAttr activationBeta,"
"Attribute clip, Attribute inputForget", [{
......@@ -379,7 +379,7 @@ def NGLSTMCellOp :
OpBuilder<
"Builder *builder, OperationState &tblgen_state, Type res,"
"Value *X, Value* W, Value* R, Value* H_t, Value* C_t,"
"Value X, Value W, Value R, Value H_t, Value C_t,"
"Attribute hiddenSize",
[{
tblgen_state.addOperands({X, W, R, H_t, C_t});
......@@ -390,13 +390,13 @@ def NGLSTMCellOp :
let extraClassDeclaration = [{
// get bias operand if present
Value* B()
Value B()
{
auto varArgs = optionalArgs();
return varArgs.begin() != varArgs.end() ? *varArgs.begin() : nullptr;
}
// get peephole weights operand if present
Value* P()
Value P()
{
auto varArgs = optionalArgs();
auto it = varArgs.begin();
......@@ -452,7 +452,7 @@ def NGLSTMSequenceOp :
void setActivationsBeta (const ArrayAttr& attr) { this->setAttr("activatiBeta", attr); }
void setClip(const Attribute& attr) { this->setAttr("clip", attr); }
Value* P()
Value P()
{
auto varArgs = optionalArgs();
return varArgs.begin() != varArgs.end() ? *varArgs.begin() : nullptr;
......@@ -500,7 +500,7 @@ def NGGRUCellOp :
let builders = [
OpBuilder<
"Builder *builder, OperationState &tblgen_state, Type res,"
"Value *X, Value* W, Value* R, Value* H_t,"
"Value X, Value W, Value R, Value H_t,"
"Attribute hiddenSize, ArrayAttr activations,"
"ArrayAttr activationAlpha, ArrayAttr activationBeta,"
"Attribute clip, Attribute linearBeforeReset", [{
......@@ -515,7 +515,7 @@ def NGGRUCellOp :
OpBuilder<
"Builder *builder, OperationState &tblgen_state, Type res,"
"Value *X, Value* W, Value* R, Value* H_t,"
"Value X, Value W, Value R, Value H_t,"
"Attribute hiddenSize",
[{
tblgen_state.addOperands({X, W, R, H_t});
......@@ -532,7 +532,7 @@ def NGGRUCellOp :
void setLinearBeforeReset(const Attribute& attr) { this->setAttr("linearBeforeReset", attr); }
// get Bias operand if present
Value* P()
Value P()
{
auto varArgs = optionalArgs();
return varArgs.begin() != varArgs.end() ? *varArgs.begin() : nullptr;
......@@ -557,7 +557,7 @@ def NGLayerNormOp :
let builders = [
OpBuilder<
"Builder *builder, OperationState &tblgen_state, ArrayRef<Type> res,"
"Value *data, Attribute keepStats, Attribute beginNormAxis, Attribute epsilon", [{
"Value data, Attribute keepStats, Attribute beginNormAxis, Attribute epsilon", [{
tblgen_state.addOperands(data);
tblgen_state.addAttribute("keepStats", keepStats);
tblgen_state.addAttribute("beginNormAxis", beginNormAxis);
......@@ -568,13 +568,13 @@ def NGLayerNormOp :
let extraClassDeclaration = [{
// get Scale operand if present
Value* Scale()
Value Scale()
{
auto varArgs = optionalArgs();
return varArgs.begin() != varArgs.end() ? *varArgs.begin() : nullptr;
}
// get Bias operand if present
Value* Bias()
Value Bias()
{
auto varArgs = optionalArgs();
auto it = varArgs.begin();
......@@ -608,7 +608,7 @@ def NGLayerNormBackpropOp :
let builders = [
OpBuilder<
"Builder *builder, OperationState &tblgen_state, ArrayRef<Type> res,"
"Value *data, Value *delta, Value *mean, Value *variance,"
"Value data, Value delta, Value mean, Value variance,"
"Attribute beginNormAxis, Attribute epsilon", [{
tblgen_state.addOperands({data, delta, mean, variance});
tblgen_state.addAttribute("beginNormAxis", beginNormAxis);
......@@ -618,7 +618,7 @@ def NGLayerNormBackpropOp :
OpBuilder<
"Builder *builder, OperationState &tblgen_state, ArrayRef<Type> res,"
"Value *data, Value *delta, Value *scale,"
"Value data, Value delta, Value scale,"
"Attribute beginNormAxis, Attribute epsilon", [{
tblgen_state.addOperands({data, delta, scale});
tblgen_state.addAttribute("beginNormAxis", beginNormAxis);
......@@ -628,7 +628,7 @@ def NGLayerNormBackpropOp :
OpBuilder<
"Builder *builder, OperationState &tblgen_state, ArrayRef<Type> res,"
"Value *data, Value *delta,"
"Value data, Value delta,"
"Attribute beginNormAxis, Attribute epsilon", [{
tblgen_state.addOperands({data, delta});
tblgen_state.addAttribute("beginNormAxis", beginNormAxis);
......@@ -639,13 +639,13 @@ def NGLayerNormBackpropOp :
let extraClassDeclaration = [{
// get Mean operand if present
Value* Mean()
Value Mean()
{
auto varArgs = optionalArgs();
return varArgs.begin() != varArgs.end() ? *varArgs.begin() : nullptr;
}
// get Variance operand if present
Value* Variance()
Value Variance()
{
auto varArgs = optionalArgs();
auto it = varArgs.begin();
......@@ -722,8 +722,8 @@ def NGGroupConvOp :
let builders = [
// Builder without padType
OpBuilder<
"Builder *builder, OperationState &tblgen_state, Type res, Value *images,"
"Value *filters, ArrayAttr strides, ArrayAttr padBelow, ArrayAttr padAbove,"
"Builder *builder, OperationState &tblgen_state, Type res, Value images,"
"Value filters, ArrayAttr strides, ArrayAttr padBelow, ArrayAttr padAbove,"
"Attribute groups",
[{
tblgen_state.addOperands({images, filters});
......@@ -772,18 +772,18 @@ def NGGroupConvTransposeOp :
let builders = [
OpBuilder<"Builder *builder, OperationState &tblgen_state, Type res,"
"Value *images, Value *filters, Attribute groups", [{
"Value images, Value filters, Attribute groups", [{
tblgen_state.addOperands({images, filters});
tblgen_state.addAttribute("groups", groups);
tblgen_state.addTypes(res);
}]>,
OpBuilder<"Builder *builder, OperationState &tblgen_state, Type res,"
"Value *images, Value *filters", [{
"Value images, Value filters", [{
tblgen_state.addOperands({images, filters});
tblgen_state.addTypes(res);
}]>,
OpBuilder<"Builder *builder, OperationState &tblgen_state, Type res,"
"Value *images, Value *filters, ArrayAttr strides,"
"Value images, Value filters, ArrayAttr strides,"
"ArrayAttr outputPad, ArrayAttr outputShape,"
"Attribute groups", [{
tblgen_state.addOperands({images, filters});
......@@ -793,7 +793,7 @@ def NGGroupConvTransposeOp :
tblgen_state.addAttribute("groups", groups);
}]>,
OpBuilder<"Builder *builder, OperationState &tblgen_state, Type res,"
"Value *images, Value *filters,"
"Value images, Value filters,"
"ArrayAttr outputShape, Attribute groups", [{
tblgen_state.addOperands({images, filters});
tblgen_state.addAttribute("outputShape", outputShape);
......@@ -951,7 +951,7 @@ def NGConvBiasOp :
let builders = [
OpBuilder<
"Builder *builder, OperationState &tblgen_state, Type res,"
"Value *images, Value *filters, Value *bias, Attribute withRelu", [{
"Value images, Value filters, Value bias, Attribute withRelu", [{
tblgen_state.addOperands({images, filters, bias});
tblgen_state.addAttribute("withRelu", withRelu);
tblgen_state.addTypes(res);
......@@ -959,7 +959,7 @@ def NGConvBiasOp :
OpBuilder<
"Builder *builder, OperationState &tblgen_state, Type res,"
"Value *images, Value *filters, Value *bias", [{
"Value images, Value filters, Value bias", [{
tblgen_state.addOperands({images, filters, bias});
tblgen_state.addTypes(res);
}]>
......
......@@ -78,7 +78,7 @@ class NG_Unary_Arith_Op<string mnemonic, list<OpTrait> traits = []> :
// TODO: Implement
let parser = [{ NGRAPH_CHECK(false, "No parser support"); return mlir::failure(); }];
let verifier = [{ return verifyUnaryArithOp(this); }];
let verifier = [{ return verifyUnaryArithOp(*this); }];
}
// Base class for arithmetic binary operations without side effects.
......@@ -98,7 +98,7 @@ class NG_Binary_Arith_Op<string mnemonic, list<OpTrait> traits = []> :
// TODO: Implement
let parser = [{ NGRAPH_CHECK(false, "No parser support"); return mlir::failure(); }];
let verifier = [{ return verifyBinaryArithOp(this); }];
let verifier = [{ return verifyBinaryArithOp(*this); }];
}
// Base class for comparison operations with verifier.
......@@ -109,7 +109,7 @@ class NG_Cmp_Op<string mnemonic, list<OpTrait> traits = []> :
// TODO: Implement
let parser = [{ NGRAPH_CHECK(false, "No parser support"); return mlir::failure(); }];
let verifier = [{ return verifyCmpOp(this); }];
let verifier = [{ return verifyCmpOp(*this); }];
}
// Base class for ternary operations without side effects.
......@@ -133,7 +133,7 @@ class NG_Axis_Reduction_Op<string mnemonic, list<OpTrait> traits = []> :
let parser = [{ NGRAPH_CHECK(false, "No parser support"); return mlir::failure(); }];
// TODO
let verifier = [{ return verifyAxisReductionOp(this); }];
let verifier = [{ return verifyAxisReductionOp(*this); }];
}
// Base class for terminator operations.
......
......@@ -68,7 +68,7 @@ namespace
struct TensorInfo
{
// MLIR values this tensor maps to.
mlir::Value* m_value;
mlir::Value m_value;
};
private:
......@@ -84,7 +84,7 @@ namespace
mlir::Type getMlirType(const ngraph::Node* node);
TensorInfo getTensorValue(descriptor::Tensor* tensor);
void updateTensorValue(descriptor::Tensor* tensor, mlir::Value* value);
void updateTensorValue(descriptor::Tensor* tensor, mlir::Value value);
template <typename Op>
static mlir::Operation* createOp(NgDialectConversionPass& NgDialectObj,
......@@ -176,7 +176,7 @@ void NgDialectConversionPass::runOnModule()
int i = 0;
for (auto input : kernelInputs)
{
mlir::Value* arg = function.getArgument(i);
auto arg = function.getArgument(i);
TensorInfo tensorInfo{arg};
m_tensorToValueMap.insert(TensorToInfo(input->get_output_tensor_ptr().get(), tensorInfo));
i++;
......@@ -264,7 +264,7 @@ mlir::Type NgDialectConversionPass::getMlirType(const ngraph::Node* node)
return getMlirType(outTensor);
}
void NgDialectConversionPass::updateTensorValue(descriptor::Tensor* tensor, mlir::Value* value)
void NgDialectConversionPass::updateTensorValue(descriptor::Tensor* tensor, mlir::Value value)
{
NGRAPH_CHECK(m_tensorToValueMap.find(tensor) == m_tensorToValueMap.end(),
"tensor value already defined");
......@@ -307,7 +307,7 @@ void NgDialectConversionPass::buildNgDialect(mlir::FuncOp function)
{
for (auto i = 0; i < op->getNumResults(); i++)
{
mlir::Value* result = op->getResult(i);
auto result = op->getResult(i);
if (result)
{
updateTensorValue(np->get_output_tensor_ptr(i).get(), result);
......@@ -600,7 +600,6 @@ template <>
mlir::Operation* NgDialectConversionPass::COMPILE_OP_DECL(ngraph::op::Softmax)
{
mlir::Operation* op = NgDialectObj.createGenericOp<mlir::NGSoftMaxOp>(ngNode, 1);
auto softmaxNode = static_cast<const ngraph::op::Softmax*>(ngNode);
auto softmaxOp = llvm::cast<mlir::NGSoftMaxOp>(op);
auto originArg = NgDialectObj.getOriginArg(ngNode->input_value(1).get_node());
......@@ -614,7 +613,7 @@ mlir::Operation* NgDialectConversionPass::COMPILE_OP_DECL(ngraph::op::Softmax)
template <typename Op>
mlir::Operation* NgDialectConversionPass::createGenericOp(const ngraph::Node* ngNode, int inNum)
{
std::vector<mlir::Value*> argValues;
std::vector<mlir::Value> argValues;
std::vector<mlir::Type> resTypes;
auto inputMap = m_compiledKernel->get_input_map();
std::shared_ptr<descriptor::Tensor> argTensor;
......@@ -650,7 +649,7 @@ mlir::Operation* NgDialectConversionPass::createGenericOp(const ngraph::Node* ng
return (m_builder.create<Op,
ArrayRef<mlir::Type>,
ArrayRef<mlir::Value*>,
ArrayRef<mlir::Value>,
ArrayRef<mlir::NamedAttribute>>(
mlir::UnknownLoc::get(m_context), resTypes, argValues, {/* no attrs */}))
.getOperation();
......@@ -663,7 +662,7 @@ const NgDialectConversionPass::MLIRCompOpMap NgDialectConversionPass::opDispatch
void NgDialectConversionPass::createReturn()
{
std::vector<mlir::Value*> valueList;
std::vector<mlir::Value> valueList;
for (auto output : m_compiledKernel->get_kernel_outputs())
{
valueList.push_back(getTensorValue(output->get_output_tensor_ptr().get()).m_value);
......
......@@ -140,7 +140,6 @@ void MLIRCPURuntime::execute()
void MLIRCPURuntime::cleanup()
{
// Free void double pointer arguments without freeing external tensor data.
int i = 0;
for (auto* arg : m_invokeArgs)
{
auto* memRefArg = *(reinterpret_cast<StaticMemRef**>(arg));
......
......@@ -16,7 +16,7 @@
# Enable use of the lit tool that we build from MLIR repo.
set(LLVM_LIT ${LLVM_MAIN_SRC_DIR}/utils/lit/lit.py)
set(LLVM_DEFAULT_EXTERNAL_LIT ${MLIR_TOOLS_DIR}/llvm-lit)
set(LLVM_DEFAULT_EXTERNAL_LIT ${MLIR_LLVM_TOOLS_DIR}/llvm-lit)
configure_lit_site_cfg(
${CMAKE_CURRENT_SOURCE_DIR}/lit.site.cfg.py.in
......
......@@ -150,7 +150,8 @@ func @simple_dot(%arg0: !ng.tensor<16x8xf32>, %arg1: !ng.tensor<8x32xf32>) -> !n
// -----
// std.view
// CHECK-DAG: #[[MAP0:[a-zA-Z0-9]+]] = (d0, d1) -> (d0 * 2 + d1)
// CHECK: #[[MAP0:[a-zA-Z0-9]+]] = affine_map<(d0, d1) -> (d0 * 2 + d1)>
// CHECK: %[[T1:[0-9]+]] = alloc() : memref<24xi8>
// CHECK-NEXT: %[[T2:[0-9]+]] = std.view %[[T1]][][] : memref<24xi8> to memref<3x2xf32, #[[MAP0]]>
// CHECK: affine.store %{{[0-9]+}}, %[[T2]][%{{.*}}, %{{.*}}] : memref<3x2xf32, #[[MAP0]]>
......@@ -198,12 +199,12 @@ func @convolution(%arg0: !ng.tensor<1x2x2x2xf32>, %arg1: !ng.tensor<2x2x1x1xf32>
// -----
//
// Group Convolution
// CHECK-DAG: #[[M0:.*]] = (d0) -> (d0 * 2)
// CHECK-DAG: #[[M1:.*]] = (d0) -> (d0 * 2 + 2)
// CHECK-DAG: #[[M2:.*]] = (d0) -> (d0)
// CHECK-DAG: #[[M3:.*]] = (d0) -> (d0 + 1)
// CHECK-DAG: #[[M8:.*]] = (d0, d1) -> (d0 + d1)
// CHECK-DAG: #[[M9:.*]] = (d0, d1) -> (d0 - d1 * 2)
// CHECK-DAG: #[[M0:.*]] = affine_map<(d0) -> (d0 * 2)>
// CHECK-DAG: #[[M1:.*]] = affine_map<(d0) -> (d0 * 2 + 2)>
// CHECK-DAG: #[[M2:.*]] = affine_map<(d0) -> (d0)>
// CHECK-DAG: #[[M3:.*]] = affine_map<(d0) -> (d0 + 1)>
// CHECK-DAG: #[[M8:.*]] = affine_map<(d0, d1) -> (d0 + d1)>
// CHECK-DAG: #[[M9:.*]] = affine_map<(d0, d1) -> (d0 - d1 * 2)>
// CHECK-LABEL: func @groupConv
//
// Outer groups loops
......
// RUN: ngraph-opt %s --split-input-file --ngraph-memory-opt --ngraph-memory-opt-concat --ngraph-memory-opt-eltwise -convert-ngraph-to-affine | FileCheck %s
// CHECK-DAG: #[[MAP0:[a-zA-Z0-9]+]] = (d0, d1) -> (d0 * 2 + d1)
// CHECK: #[[MAP0:[a-zA-Z0-9]+]] = affine_map<(d0, d1) -> (d0 * 2 + d1)>
// CHECK-LABEL: test0
// CHECK: %[[B:.*]] = alloc() : memref<16xi8>
// CHECK: std.view %[[B]][][] : memref<16xi8> to memref<2x2xf32, #[[MAP0]]>
......@@ -17,8 +17,8 @@ func @test0(%arg0: !ng.tensor<2x2xf32>, %arg1: !ng.tensor<2x2xf32>) -> !ng.tenso
// -----
// CHECK-DAG: #[[MAP0:[a-zA-Z0-9]+]] = (d0, d1) -> (d0 * 2 + d1)
// CHECK-DAG: #[[MAP1:[a-zA-Z0-9]+]] = (d0, d1) -> (d0 * 2 + d1 + 4)
// CHECK-DAG: #[[MAP0:[a-zA-Z0-9]+]] = affine_map<(d0, d1) -> (d0 * 2 + d1)>
// CHECK-DAG: #[[MAP1:[a-zA-Z0-9]+]] = affine_map<(d0, d1) -> (d0 * 2 + d1 + 4)>
// CHECK-LABEL: test1
// CHECK: %[[B:.*]] = alloc() : memref<32xi8>
// CHECK: std.view %[[B]][][] : memref<32xi8> to memref<2x2xf32, #[[MAP0]]>
......@@ -35,10 +35,10 @@ func @test1(%arg0: !ng.tensor<2x2xf32>, %arg1: !ng.tensor<2x2xf32>) -> !ng.tenso
// -----
// CHECK-DAG: #[[MAP0:[a-zA-Z0-9]+]] = (d0, d1, d2) -> (d0 * 4 + d1 * 2 + d2)
// CHECK-DAG: #[[MAP1:[a-zA-Z0-9]+]] = (d0, d1, d2) -> (d0 * 4 + d1 * 2 + d2 + 4)
// CHECK-DAG: #[[MAP2:[a-zA-Z0-9]+]] = (d0, d1, d2) -> (d0 * 8 + d1 * 2 + d2)
// CHECK-DAG: #[[MAP3:[a-zA-Z0-9]+]] = (d0, d1, d2) -> (d0 * 16 + d1 * 2 + d2)
// CHECK-DAG: #[[MAP0:[a-zA-Z0-9]+]] = affine_map<(d0, d1, d2) -> (d0 * 4 + d1 * 2 + d2)>
// CHECK-DAG: #[[MAP1:[a-zA-Z0-9]+]] = affine_map<(d0, d1, d2) -> (d0 * 4 + d1 * 2 + d2 + 4)>
// CHECK-DAG: #[[MAP2:[a-zA-Z0-9]+]] = affine_map<(d0, d1, d2) -> (d0 * 8 + d1 * 2 + d2)>
// CHECK-DAG: #[[MAP3:[a-zA-Z0-9]+]] = affine_map<(d0, d1, d2) -> (d0 * 16 + d1 * 2 + d2)>
// CHECK-LABEL: test2
// CHECK: %[[B1:.*]] = alloc() : memref<32xi8>
// CHECK: std.view %[[B1]][][] : memref<32xi8> to memref<1x2x2xf32, #[[MAP0]]>
......@@ -66,13 +66,13 @@ func @test2(%arg0: !ng.tensor<1x2x2xf32>, %arg1: !ng.tensor<1x2x2xf32>) -> (!ng.
// -----
// CHECK-DAG: #[[MAP0:[a-zA-Z0-9]+]] = (d0, d1, d2) -> (d0 * 8 + d1 * 2 + d2)
// CHECK-DAG: #[[MAP8:[a-zA-Z0-9]+]] = (d0, d1, d2) -> (d0 * 8 + d1 * 2 + d2 + 8)
// CHECK-DAG: #[[MAP9:[a-zA-Z0-9]+]] = (d0, d1, d2) -> (d0 * 8 + d1 * 2 + d2 + 16)
// CHECK-DAG: #[[MAP10:[a-zA-Z0-9]+]] = (d0, d1, d2) -> (d0 * 8 + d1 * 2 + d2 + 24)
// CHECK-DAG: #[[MAP11:[a-zA-Z0-9]+]] = (d0, d1, d2) -> (d0 * 16 + d1 * 2 + d2)
// CHECK-DAG: #[[MAP12:[a-zA-Z0-9]+]] = (d0, d1, d2) -> (d0 * 16 + d1 * 2 + d2 + 16)
// CHECK-DAG: #[[MAP13:[a-zA-Z0-9]+]] = (d0, d1, d2) -> (d0 * 32 + d1 * 2 + d2)
// CHECK-DAG: #[[MAP0:[a-zA-Z0-9]+]] = affine_map<(d0, d1, d2) -> (d0 * 8 + d1 * 2 + d2)>
// CHECK-DAG: #[[MAP8:[a-zA-Z0-9]+]] = affine_map<(d0, d1, d2) -> (d0 * 8 + d1 * 2 + d2 + 8)>
// CHECK-DAG: #[[MAP9:[a-zA-Z0-9]+]] = affine_map<(d0, d1, d2) -> (d0 * 8 + d1 * 2 + d2 + 16)>
// CHECK-DAG: #[[MAP10:[a-zA-Z0-9]+]] = affine_map<(d0, d1, d2) -> (d0 * 8 + d1 * 2 + d2 + 24)>
// CHECK-DAG: #[[MAP11:[a-zA-Z0-9]+]] = affine_map<(d0, d1, d2) -> (d0 * 16 + d1 * 2 + d2)>
// CHECK-DAG: #[[MAP12:[a-zA-Z0-9]+]] = affine_map<(d0, d1, d2) -> (d0 * 16 + d1 * 2 + d2 + 16)>
// CHECK-DAG: #[[MAP13:[a-zA-Z0-9]+]] = affine_map<(d0, d1, d2) -> (d0 * 32 + d1 * 2 + d2)>
// CHECK-LABEL: test3
// CHECK: %[[B:.*]] = alloc() : memref<128xi8>
// CHECK: std.view %[[B]][][] : memref<128xi8> to memref<1x4x2xf32, #[[MAP0]]>
......@@ -97,10 +97,10 @@ func @test3(%arg0: !ng.tensor<1x2x2xf32>, %arg1: !ng.tensor<1x2x2xf32>) -> !ng.t
// -----
//CHECK-DAG: #[[MAP4:[a-zA-Z0-9]+]] = (d0, d1, d2) -> (d0 * 4 + d1 * 2 + d2 + 4)
//CHECK-DAG: #[[MAP5:[a-zA-Z0-9]+]] = (d0, d1, d2) -> (d0 * 4 + d1 * 2 + d2)
//CHECK-DAG: #[[MAP6:[a-zA-Z0-9]+]] = (d0, d1, d2) -> (d0 * 4 + d1 * 2 + d2 + 8)
//CHECK-DAG: #[[MAP12:[a-zA-Z0-9]+]] = (d0, d1, d2) -> (d0 * 12 + d1 * 2 + d2)
//CHECK-DAG: #[[MAP4:[a-zA-Z0-9]+]] = affine_map<(d0, d1, d2) -> (d0 * 4 + d1 * 2 + d2 + 4)>
//CHECK-DAG: #[[MAP5:[a-zA-Z0-9]+]] = affine_map<(d0, d1, d2) -> (d0 * 4 + d1 * 2 + d2)>
//CHECK-DAG: #[[MAP6:[a-zA-Z0-9]+]] = affine_map<(d0, d1, d2) -> (d0 * 4 + d1 * 2 + d2 + 8)>
//CHECK-DAG: #[[MAP12:[a-zA-Z0-9]+]] = affine_map<(d0, d1, d2) -> (d0 * 12 + d1 * 2 + d2)>
// CHECK-LABEL: test4
//CHECK: %[[B1:.*]] = alloc() : memref<1x2x2xf32>
//CHECK: %[[B2:.*]] = alloc() : memref<48xi8>
......
......@@ -17,9 +17,9 @@
import lit.llvm
config.llvm_tools_dir = "@MLIR_TOOLS_DIR@"
config.mlir_obj_root = "@MLIR_BUILD_DIR@"
config.mlir_tools_dir = "@MLIR_TOOLS_DIR@"
config.llvm_tools_dir = "@MLIR_LLVM_TOOLS_DIR@"
config.mlir_obj_root = "@MLIR_LLVM_BUILD_DIR@"
config.mlir_tools_dir = "@MLIR_LLVM_TOOLS_DIR@"
config.suffixes = ['.mlir']
config.ngraph_mlir_tools_dir = "@NGRAPH_BUILD_BIN@"
......
......@@ -31,7 +31,6 @@ using namespace mlir;
OpBuilder createBuilder(MLIRContext* context)
{
auto module = ModuleOp::create(UnknownLoc::get(context));
auto funcType = FunctionType::get({}, {}, context);
auto function = FuncOp::create(UnknownLoc::get(context), "main", funcType);
function.addEntryBlock();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment