Commit 8e46ff86 authored by Nagy Mostafa's avatar Nagy Mostafa Committed by Sang Ik Lee

[MLIR] Enable lowering of GroupConv in MLIR CPU backend (#4102)

* WIP

* WIP

* Refactored existing convolution

* Add Channel and num of filters bounds parameters to helper

* Works on unit-tests. v1 op gets converted and breaks

* Fixed group conv with groups in filters shape. Tests pass

* style

* add LIT tests

* Switch outer loop to affine loop

* re-org code

* PR fixes

* Revert ops.td

* PR fixes
parent c2bdccfa
......@@ -36,6 +36,7 @@ MLIR_OP(NGDotOp , false )
MLIR_OP(NGGatherOp , false )
MLIR_OP(NGGemmOp , false )
MLIR_OP(NGGreaterOp , true )
MLIR_OP(NGGroupConvOp , false )
MLIR_OP(NGLessOp , true )
MLIR_OP(NGGreaterEqOp , true )
MLIR_OP(NGLessEqOp , true )
......
......@@ -740,6 +740,7 @@ def NGGroupConvOp :
void setPadAbove(const ArrayAttr& attr) { this->setAttr("padAbove", attr); }
void setPadBelow(const ArrayAttr& attr) { this->setAttr("padBelow", attr); }
void setPadType(const Attribute& attr) { this->setAttr("padType", attr); }
void setGroups(const Attribute& attr) { this->setAttr("groups", attr); }
}];
}
......
......@@ -15,6 +15,7 @@ MLIR_OP(Convolution)
MLIR_OP(Gather)
MLIR_OP(Gemm)
MLIR_OP(Greater)
MLIR_OP(GroupConvolution)
MLIR_OP(Less)
MLIR_OP(GreaterEq)
MLIR_OP(LessEq)
......
......@@ -112,7 +112,8 @@ namespace
/// Converts an ngraph shape to an I64 array attribute
template <typename T>
mlir::ArrayAttr getShapeAsAttr(T ngShape);
/// Returns the builder
mlir::OpBuilder& getBuilder() { return m_builder; }
/// Return the real input node corresponding to the fake node
ngraph::Node* getOriginArg(ngraph::Node* node) const;
......@@ -452,6 +453,27 @@ mlir::Operation* NgDialectConversionPass::COMPILE_OP_DECL(ngraph::op::Convolutio
attr = NgDialectObj.getShapeAsAttr(convNode->get_padding_above());
convOp.setPadAbove(attr);
return op;
}
template <>
mlir::Operation* NgDialectConversionPass::COMPILE_OP_DECL(ngraph::op::GroupConvolution)
{
mlir::Operation* op = NgDialectObj.createGenericOp<mlir::NGGroupConvOp>(ngNode);
auto gConvNode = static_cast<const ngraph::op::GroupConvolution*>(ngNode);
auto gConvOp = llvm::cast<mlir::NGGroupConvOp>(op);
mlir::ArrayAttr attr = NgDialectObj.getShapeAsAttr(gConvNode->get_window_movement_strides());
gConvOp.setStrides(attr);
attr = NgDialectObj.getShapeAsAttr(gConvNode->get_padding_below());
gConvOp.setPadBelow(attr);
attr = NgDialectObj.getShapeAsAttr(gConvNode->get_padding_above());
gConvOp.setPadAbove(attr);
gConvOp.setGroups(NgDialectObj.getBuilder().getI64IntegerAttr(gConvNode->get_groups()));
return op;
}
......@@ -589,7 +611,6 @@ mlir::Operation* NgDialectConversionPass::COMPILE_OP_DECL(ngraph::op::Softmax)
softmaxOp.setAxes(attr);
return op;
}
template <typename Op>
mlir::Operation* NgDialectConversionPass::createGenericOp(const ngraph::Node* ngNode, int inNum)
{
......
......@@ -150,7 +150,6 @@ func @simple_dot(%arg0: !ng.tensor<16x8xf32>, %arg1: !ng.tensor<8x32xf32>) -> !n
// -----
// std.view
// CHECK-DAG: #[[MAP0:[a-zA-Z0-9]+]] = (d0, d1) -> (d0 * 2 + d1)
// CHECK: %[[T1:[0-9]+]] = alloc() : memref<24xi8>
// CHECK-NEXT: %[[T2:[0-9]+]] = std.view %[[T1]][][] : memref<24xi8> to memref<3x2xf32, #[[MAP0]]>
......@@ -165,3 +164,82 @@ func @add(%arg0: !ng.tensor<3x2xf32>, %arg1: !ng.tensor<3x2xf32>) -> !ng.tensor<
%3 = "ng.add"(%2, %2) : (!ng.tensor<3x2xf32>, !ng.tensor<3x2xf32>) -> !ng.tensor<3x2xf32>
"ng.return"(%3) : (!ng.tensor<3x2xf32>) -> ()
}
// -----
// Convolution
// CHECK-LABEL: func @convolution
// Initialization loops
// CHECK: affine.for
// CHECK-NEXT: affine.for
// CHECK-NEXT: affine.for
// CHECK-NEXT: affine.for
// CHECK: affine.store
// Convolution loops
// CHECK: affine.for %[[a3:.*]] = 0 to 1
// CHECK: affine.for %[[a4:.*]] = 0 to 2
// CHECK: affine.for %[[a5:.*]] = 0 to 2
// CHECK: affine.for %[[a6:.*]] = 0 to 2
// CHECK: affine.for %[[a7:.*]] = 0 to 2
// CHECK: affine.for %[[a8:.*]] = 0 to 1
// CHECK: affine.for %[[a9:.*]] = 0 to 1
// CHECK: affine.load %{{.*}}[%[[a4]], %{{.*}}, %[[a8]], %[[a9]]] : memref<2x2x1x1xf32>
// CHECK: affine.load %{{.*}}[%[[a3]], %[[a5]], %{{.*}}, {{.*}}] : memref<1x2x2x2xf32>
// CHECK-NEXT: mulf
// CHECK-NEXT: affine.load %{{.*}}[%[[a3]], %[[a4]], %[[a6]], %[[a7]]] : memref<1x2x2x2xf32>
// CHECK-NEXT: %[[v4:.*]] = addf
// CHECK-NEXT: affine.store %[[v4]], %{{.*}}[%[[a3]], %[[a4]], %[[a6]], %[[a7]]] : memref<1x2x2x2xf32>
func @convolution(%arg0: !ng.tensor<1x2x2x2xf32>, %arg1: !ng.tensor<2x2x1x1xf32>) -> !ng.tensor<1x2x2x2xf32> {
%0 = "ng.convolution"(%arg0, %arg1) {padAbove = [0, 0], padBelow = [0, 0], strides = [1, 1]} : (!ng.tensor<1x2x2x2xf32>, !ng.tensor<2x2x1x1xf32>) -> !ng.tensor<1x2x2x2xf32>
"ng.return"(%0) : (!ng.tensor<1x2x2x2xf32>) -> ()
}
// -----
//
// Group Convolution
// CHECK-DAG: #[[M0:.*]] = (d0) -> (d0 * 2)
// CHECK-DAG: #[[M1:.*]] = (d0) -> (d0 * 2 + 2)
// CHECK-DAG: #[[M2:.*]] = (d0) -> (d0)
// CHECK-DAG: #[[M3:.*]] = (d0) -> (d0 + 1)
// CHECK-DAG: #[[M8:.*]] = (d0, d1) -> (d0 + d1)
// CHECK-DAG: #[[M9:.*]] = (d0, d1) -> (d0 - d1 * 2)
// CHECK-LABEL: func @groupConv
//
// Outer groups loops
// CHECK: affine.for %[[gid:.*]] = 0 to 2
// CHECK: %[[v0:.*]] = affine.apply #[[M0]](%[[gid]])
// CHECK: %[[v1:.*]] = affine.apply #[[M1]](%[[gid]])
// CHECK: %[[v2:.*]] = affine.apply #[[M2]](%[[gid]])
// CHECK: %[[v3:.*]] = affine.apply #[[M3]](%[[gid]])
//
// Initialization loops
// CHECK: affine.for
// CHECK-NEXT: affine.for
// CHECK-NEXT: affine.for
// CHECK-NEXT: affine.for
// CHECK: %[[cst:.*]] = constant 0
// CHECK: affine.store %[[cst]]
//
// Convolution loops
// CHECK: affine.for %[[a4:.*]] = 0 to 1
// CHECK: affine.for %[[a5:.*]] = #[[M2]](%[[v2]]) to #[[M2]](%[[v3]])
// CHECK: affine.for %[[a6:.*]] = #[[M2]](%[[v0]]) to #[[M2]](%[[v1]])
// CHECK: affine.for %[[a7:.*]] = 0 to 2
// CHECK: affine.for %[[a8:.*]] = 0 to 2
// CHECK: affine.for %[[a9:.*]] = 0 to 1
// CHECK: affine.for %[[a10:.*]] = 0 to 1
// CHECK: %[[v6:.*]] = affine.apply #[[M8]](%[[a7]], %[[a9]])
// CHECK: %[[v7:.*]] = affine.apply #[[M8]](%[[a8]], %[[a10]])
// CHECK: %[[v8:.*]] = affine.apply #[[M9]](%[[a6]], %[[a3]])
// CHECK: affine.load %{{.*}}[%[[a5]], %[[v8]], %[[a9]], %[[a10]]] : memref<2x2x1x1xf32>
// CHECK: affine.load %{{.*}}[%[[a4]], %[[a6]], %[[v6]], %[[v7]]] : memref<1x4x2x2xf32>
// CHECK-NEXT: mulf
// CHECK-NEXT: affine.load %{{.*}}[%[[a4]], %[[a5]], %[[a7]], %[[a8]]] : memref<1x2x2x2xf32>
// CHECK-NEXT: %[[v4:.*]] = addf
// CHECK-NEXT: affine.store %[[v4]], %{{.*}}[%[[a4]], %[[a5]], %[[a7]], %[[a8]]] : memref<1x2x2x2xf32>
func @groupConv(%arg0: !ng.tensor<1x4x2x2xf32>, %arg1: !ng.tensor<2x2x1x1xf32>) -> !ng.tensor<1x2x2x2xf32> {
%0 = "ng.groupConv"(%arg0, %arg1) {groups = 2 : i64, padAbove = [0, 0], padBelow = [0, 0], strides = [1, 1]} : (!ng.tensor<1x4x2x2xf32>, !ng.tensor<2x2x1x1xf32>) -> !ng.tensor<1x2x2x2xf32>
"ng.return"(%0) : (!ng.tensor<1x2x2x2xf32>) -> ()
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment