Commit e5436889 authored by Nagy Mostafa's avatar Nagy Mostafa Committed by Scott Cyphers

[MLIR] Fused Ops dialect declaration (#3860)

* WIP

* WIP

* WIP

* All ops

* Fix layernorm backprop op name

* WIP: Adding tests

* WIP: Adding LIT parsing/printing tests

* WIP

* Added LSTM cells. Fixed some ops

* All builder tests

* PR fixes

* Fix spacing. Add missing setter to SpaceToDepth

* Update spaceToDepth lit test

* PR fixes

* Build fix

* Another fix

* Fixed optional args
parent 3ee833b7
......@@ -21,9 +21,10 @@
#ifdef NG_FUSED_OPS
#else
#define NG_FUSED_OPS
// Squeeze Op
def NGSqueezeOp :
NG_OneResult_Op<"squeeze", [NoSideEffect, FusedOp]>,
NG_OneResult_Op<"squeeze", [NoSideEffect, DeclareOpInterfaceMethods<FusedOp>]>,
Arguments<(ins NG_TensorType:$data, NG_TensorType:$axes)>
{
let summary = "Squeeze Op";
......@@ -32,14 +33,988 @@ def NGSqueezeOp :
}];
let parser = [{ NGRAPH_CHECK(false, "No parser support"); return mlir::failure(); }];
let verifier = [{ return mlir::success(); /* TBD */ }];
}
// Unsqueeze Op
def NGUnSqueezeOp :
NG_OneResult_Op<"unsqueeze", [NoSideEffect, DeclareOpInterfaceMethods<FusedOp>]>,
Arguments<(ins NG_TensorType:$data, NG_TensorType:$axes)>
{
let summary = "Unsqueeze Op";
let description = [{
Unsqueeze Op
}];
let parser = [{ NGRAPH_CHECK(false, "No parser support"); return mlir::failure(); }];
let verifier = [{ return mlir::success(); /* TBD */ }];
}
// Squared Difference Op
def NGSquaredDiffOp :
NG_Binary_Op<"sqrdDiff", [DeclareOpInterfaceMethods<FusedOp>]>
{
let summary = "Squared Difference Op";
let description = [{
Squared Difference Op
}];
let parser = [{ NGRAPH_CHECK(false, "No parser support"); return mlir::failure(); }];
let verifier = [{ return mlir::success(); /* TBD */ }];
}
// Split Op
def NGSplitOp :
NG_Variadic_Result_Op<"split", [DeclareOpInterfaceMethods<FusedOp>]>,
Arguments<(ins NG_TensorType:$data, I64Attr:$axis, I64ArrayAttr:$numSplits)>
{
let summary = "Split op";
let description = [{
Splits the input tensor into a list of smaller tensors ("pieces")
}];
let parser = [{ NGRAPH_CHECK(false, "No parser support"); return mlir::failure(); }];
let verifier = [{ return mlir::success(); /* TBD */ }];
let extraClassDeclaration = [{
void setAxis(const Attribute& attr) { this->setAttr("axis", attr); }
void setNumSplits(const ArrayAttr& arrayAttr) { this->setAttr("numSplits", arrayAttr); }
}];
}
// SpaceToDepth Op
def NGSpaceToDepthOp :
NG_OneResult_Op<"spaceToDepth", [NoSideEffect, DeclareOpInterfaceMethods<FusedOp>]>,
Arguments<(ins NG_TensorType:$data,
DefaultValuedAttr<I64Attr, "1">:$blockSize,
DepthSpaceModeEnumAttr:$mode)>
{
let summary = "Space to depth op";
let description = [{
SpaceToDepth permutes input tensor blocks of spatial data into depth dimension.
Values from the height and width dimensions are moved to the depth dimension.
Output node produces a tensor with shape:
[N, C * blocksize * blocksize, H / blocksize, W / blocksize]
data Node producing the input tensor
blockSize The size of the block of values to be moved
}];
let parser = [{ NGRAPH_CHECK(false, "No parser support"); return mlir::failure(); }];
let verifier = [{ return mlir::success(); /* TBD */ }];
let extraClassDeclaration = [{
void setBlockSize(const Attribute& attr) { this->setAttr("blockSize", attr); }
void setMode(const Attribute& attr) { this->setAttr("mode", attr); }
}];
}
// ShuffleChannels Op
def NGShuffleChannelsOp :
NG_OneResult_Op<"shuffleChannels", [NoSideEffect, DeclareOpInterfaceMethods<FusedOp>]>,
Arguments<(ins NG_TensorType:$data, I64Attr:$axis, I64Attr:$groups)>
{
let summary = "Shuffle Channels op";
let description = [{
Constructs a ShuffleChannels node.
data Node producing the input tensor
axis channel dimension index in the data tensor. A negative value means
that the index should be calculated from the back of the input data
shape.
groups number of groups the channel dimension specified by axis should be
split into
}];
let parser = [{ NGRAPH_CHECK(false, "No parser support"); return mlir::failure(); }];
let verifier = [{ return mlir::success(); /* TBD */ }];
let extraClassDeclaration = [{
void setAxis(const Attribute& axis) { this->setAttr("axis", axis); }
void setGroups(const Attribute& groups) { this->setAttr("groups", groups); }
}];
}
// ScaleShift Op
def NGScaleShiftOp :
NG_Ternary_Op<"scaleShift", [DeclareOpInterfaceMethods<FusedOp>]>
{
let summary = "scaleShift op";
let description = [{
Operator performing Scale Shift transformation.
Y = Scale * Data + Shift
}];
let parser = [{ NGRAPH_CHECK(false, "No parser support"); return mlir::failure(); }];
let verifier = [{ return mlir::success(); /* TBD */ }];
}
// RNN Cell Op
def NGRNNCellOp :
NG_OneResult_Op<"rnnCell", [NoSideEffect, DeclareOpInterfaceMethods<FusedOp>]>,
Arguments<(ins NG_TensorType:$X, NG_TensorType:$W, NG_TensorType:$R, NG_TensorType:$H_t,
Variadic<NG_TensorType>:$optionalArgs,
DefaultValuedAttr<StrArrayAttr, "{\"tanh\"}">:$activations,
DefaultValuedAttr<F32ArrayAttr, "{}">:$activationAlpha,
DefaultValuedAttr<F32ArrayAttr, "{}">:$activationBeta,
DefaultValuedAttr<F32Attr, "0.0">:$clip,
I64Attr:$hiddenSize)>
{
let summary = "RNN Cell";
let description = [{
RNN Cell
}];
let parser = [{ NGRAPH_CHECK(false, "No parser support"); return mlir::failure(); }];
let verifier = [{ return mlir::success(); /* TBD */ }];
let builders = [
OpBuilder<
"Builder *builder, OperationState &tblgen_state, Type res,"
"Value *X, Value* W, Value* R, Value* H_t, "
"Attribute hiddenSize, ArrayAttr activations,"
"ArrayAttr activationAlpha, ArrayAttr activationBeta, Attribute clip", [{
tblgen_state.addOperands({X, W, R, H_t});
tblgen_state.addAttribute("hiddenSize", hiddenSize);
tblgen_state.addAttribute("activations", activations);
tblgen_state.addAttribute("activationAlpha", activationAlpha);
tblgen_state.addAttribute("activationBeta", activationBeta);
tblgen_state.addAttribute("clip", clip);
tblgen_state.addTypes(res);
}]>
];
let extraClassDeclaration = [{
void setHiddenSize(const Attribute& attr) { this->setAttr("hiddenSize", attr); }
void setActivations(const ArrayAttr& attr) { this->setAttr("activations", attr); }
void setActivationAlpha(const ArrayAttr& attr) { this->setAttr("activationAlpha", attr); }
void setActivationBeta(const ArrayAttr& attr) { this->setAttr("activationBeta", attr); }
void setClip(const Attribute& attr) { this->setAttr("clip", attr); }
// get bias operand if present
Value* B()
{
auto varArgs = optionalArgs();
return varArgs.begin() != varArgs.end() ? *varArgs.begin() : nullptr;
}
}];
}
// Prelu Op
def NGPrelu :
NG_OneResult_Op<"prelu", [NoSideEffect, DeclareOpInterfaceMethods<FusedOp>]>,
Arguments<(ins NG_TensorType:$data, NG_TensorType:$slope)>
{
let summary = "Prelu op";
let description = [{
Prametrized Relu
x < 0 => f(x) = x * slope
x >= 0 => f(x) = x
}];
let parser = [{ NGRAPH_CHECK(false, "No parser support"); return mlir::failure(); }];
let verifier = [{ return mlir::success(); /* TBD */ }];
}
// Normalize L2 Op
def NGNormalizeL2Op :
NG_OneResult_Op<"normalizeL2", [NoSideEffect, DeclareOpInterfaceMethods<FusedOp>]>,
Arguments<(ins NG_TensorType:$data, NG_TensorType:$axis, F32Attr:$eps, EpsModeEnumAttr:$epsMode)>
{
let summary = "NormalizeL2 op";
let description = [{
Constructs a Normalize operation.
data - Node producing the input tensor
axes - Node indicating axes along which reduction is
calculated
eps - The epsilon added to L2 norm.
eps_mode - Specifies how eps is combined with L2 value calculated
before division
}];
let parser = [{ NGRAPH_CHECK(false, "No parser support"); return mlir::failure(); }];
let verifier = [{ return mlir::success(); /* TBD */ }];
let extraClassDeclaration = [{
void setEpsMode(const Attribute& epsMode) { this->setAttr("epsMOde", epsMode); }
void setEps(const Attribute& eps) { this->setAttr("eps", eps); }
}];
}
// MVN Op
def NGMVN :
NG_OneResult_Op<"mvn", [NoSideEffect, DeclareOpInterfaceMethods<FusedOp>]>,
Arguments<(ins NG_TensorType:$data,
DefaultValuedAttr<BoolAttr, "true">: $acrossChannels,
DefaultValuedAttr<BoolAttr, "true">: $normalizeVariance,
DefaultValuedAttr<F32Attr, "1e-9"> : $eps)>
{
let summary = "MVN op";
let description = [{
data Input tensor with data
normalize_variance flag that denotes whether to perform variance
normalization.
across_channels flag that denotes if mean values are shared across channels.
eps the number to be added to the variance to avoid division by zero when
normalizing the value
reduction_axes a list of axes, along which to reduce.
}];
let builders = [
OpBuilder<
"Builder *builder, OperationState &tblgen_state, Type res,"
"Value *data, ArrayAttr reductionAxes, Attribute normalizeVariance,"
"Attribute eps", [{
tblgen_state.addOperands(data);
tblgen_state.addAttribute("reductionAxes", reductionAxes);
tblgen_state.addAttribute("normalizeVariance", normalizeVariance);
tblgen_state.addAttribute("eps", eps);
tblgen_state.addTypes(res);
}]>
];
let extraClassDeclaration = [{
void setAcrossChannels(const Attribute& attr) { this->setAttr("acrossChannels", attr); }
void setNormalizeVariance(const Attribute& attr) { this->setAttr("normalizeVariance", attr); }
void setEps(const Attribute& attr) { this->setAttr("eps", attr); }
void setReductionAxes(const ArrayAttr& attr) { this->setAttr("reductionAxes", attr); }
}];
}
// MatMul Op
def NGMatMul :
NG_OneResult_Op<"matmul", [NoSideEffect, DeclareOpInterfaceMethods<FusedOp>]>,
Arguments<(ins NG_TensorType:$A, NG_TensorType:$B,
DefaultValuedAttr<BoolAttr, "false">:$transposeA,
DefaultValuedAttr<BoolAttr, "false">:$transposeB)>
{
let summary = "MatMul op";
let description = [{
A Matrix A
B Matrix B
transpose_a If matrix A should be transposed.
transpose_b If matrix B should be transposed.
}];
let extraClassDeclaration = [{
void setTransposeA(const Attribute& attr) { this->setAttr("transposeA", attr); }
void setTransposeB(const Attribute& attr) { this->setAttr("transposeB", attr); }
}];
}
// LSTM Cell Op
//
def NGLSTMCellOp :
NG_OneResult_Op<"lstmCell", [NoSideEffect, DeclareOpInterfaceMethods<FusedOp>]>,
Arguments<(ins NG_TensorType:$X, NG_TensorType:$W, NG_TensorType:$R,
NG_TensorType:$H_t, NG_TensorType:$C_t,
Variadic<NG_TensorType>:$optionalArgs,
I64Attr:$hiddenSize,
DefaultValuedAttr<LSTMWeightsFormatEnumAttr,
"static_cast<int64_t>(MLIRLSTMWeightsFormat::IFCO)">:$weightFormat,
DefaultValuedAttr<StrArrayAttr, "{\"sigmoid\",\"tanh\",\"tanh\"}">:$activations,
DefaultValuedAttr<F32ArrayAttr, "{}">:$activationAlpha,
DefaultValuedAttr<F32ArrayAttr, "{}">:$activationBeta,
DefaultValuedAttr<F32Attr, "0.0">:$clip,
DefaultValuedAttr<BoolAttr, "false">:$inputForget)>
{
let summary = "LSTM Cell";
let description = [{
LSTM Cell
X The input tensor with shape: [batch_size,
H_t The hidden state tensor at current time step with shape: [batch_size, hidden_size].
C_t The cell state tensor at current time step with shape: [batch_size, hidden_size].
W The weight tensor with shape: [4*hidden_size, input_size].
R The recurrence weight tensor with shape: [4*hidden_size, hidden_size].
B [Optional] The bias tensor for gates with shape: [4*hidden_size].
P [Optional] The weight tensor for peepholes with shape:
[3*hidden_size] - 3 equals to only iof gates.
The order is: input, output, forget gates.
hiddenSize The number of hidden units for recurrent cell.
weightsFormat The order of gates in weights tensors.
The default format is IFCO since it is used by DNNL.
activations The vector of activation functions used inside
recurrent cell.
activationsAlpha The vector of alpha parameters for activation
functions in order respective to activation list.
activationsBeta The vector of beta parameters for activation
functions in order respective to activation list.
clip The value defining clipping range [-clip, clip] on
input of activation functions.
inputForget Controls coupling input and forget gates.
}];
let parser = [{ NGRAPH_CHECK(false, "No parser support"); return mlir::failure(); }];
let verifier = [{
return mlir::success(); /* TBD */
}];
let builders = [
OpBuilder<
"Builder *builder, OperationState &tblgen_state, Type res,"
"Value *X, Value* W, Value* R, Value* H_t, Value* C_t,"
"Attribute hiddenSize, ArrayAttr activations,"
"ArrayAttr activationAlpha, ArrayAttr activationBeta,"
"Attribute clip, Attribute inputForget", [{
tblgen_state.addOperands({X, W, R, H_t, C_t});
tblgen_state.addAttribute("hiddenSize", hiddenSize);
tblgen_state.addAttribute("activations", activations);
tblgen_state.addAttribute("activationAlpha", activationAlpha);
tblgen_state.addAttribute("activationBeta", activationBeta);
tblgen_state.addAttribute("clip", clip);
tblgen_state.addAttribute("inputForget", inputForget);
tblgen_state.addTypes(res);
}]>,
OpBuilder<
"Builder *builder, OperationState &tblgen_state, Type res,"
"Value *X, Value* W, Value* R, Value* H_t, Value* C_t,"
"Attribute hiddenSize",
[{
tblgen_state.addOperands({X, W, R, H_t, C_t});
tblgen_state.addAttribute("hiddenSize", hiddenSize);
tblgen_state.addTypes(res);
}]>
];
let extraClassDeclaration = [{
// get bias operand if present
Value* B()
{
auto varArgs = optionalArgs();
return varArgs.begin() != varArgs.end() ? *varArgs.begin() : nullptr;
}
// get peephole weights operand if present
Value* P()
{
auto varArgs = optionalArgs();
auto it = varArgs.begin();
it = std::next(it);
if (it == varArgs.end())
return nullptr;
it = std::next(it);
return it == varArgs.end() ? *it : nullptr;
}
void setHiddenSize (const Attribute& attr) { this->setAttr("hiddenSize", attr); }
void setActivations (const ArrayAttr& attr) { this->setAttr("activation", attr); }
void setActivationsAlpha (const ArrayAttr& attr) { this->setAttr("activatiAlpha", attr); }
void setActivationsBeta (const ArrayAttr& attr) { this->setAttr("activatiBeta", attr); }
void setClip(const Attribute& attr) { this->setAttr("clip", attr); }
}];
}
// LSTM Sequence Op
def NGLSTMSequenceOp :
NG_OneResult_Op<"lstmSeq", [NoSideEffect, DeclareOpInterfaceMethods<FusedOp>]>,
Arguments<(ins NG_TensorType:$X, NG_TensorType:$H_t, NG_TensorType:$C_t,
NG_TensorType:$S_l, NG_TensorType:$W, NG_TensorType:$R,
NG_TensorType:$B, Variadic<NG_TensorType>:$optionalArgs,
I64Attr:$hiddenSize,
LSTMSeqDirectionsEnumAttr:$direction,
DefaultValuedAttr<LSTMWeightsFormatEnumAttr,
"static_cast<int32_t>(MLIRLSTMWeightsFormat::IFCO)">:$weightFormat,
DefaultValuedAttr<StrArrayAttr, "{\"sigmoid\",\"tanh\",\"tanh\"}">:$activations,
DefaultValuedAttr<F32ArrayAttr, "{}">:$activationAlpha,
DefaultValuedAttr<F32ArrayAttr, "{}">:$activationBeta,
DefaultValuedAttr<F32Attr, "0.0">:$clip,
DefaultValuedAttr<BoolAttr, "false">:$inputForget)>
{
let summary = "LSTM Sequence";
let description = [{
Class for lstm sequence node.
It follows notation and equations defined as in ONNX standard:
https://github.com/onnx/onnx/blob/master/docs/Operators.md#LSTM
See: LSTMCell, RNNCell, GRUCell
}];
let parser = [{ NGRAPH_CHECK(false, "No parser support"); return mlir::failure(); }];
let verifier = [{
return mlir::success(); /* TBD */
}];
let verifier = [{ return verifyOp(this); }];
let extraClassDeclaration = [{
void setHiddenSize (const Attribute& attr) { this->setAttr("hiddenSize", attr); }
void setActivations (const ArrayAttr& attr) { this->setAttr("activation", attr); }
void setActivationsAlpha (const ArrayAttr& attr) { this->setAttr("activatiAlpha", attr); }
void setActivationsBeta (const ArrayAttr& attr) { this->setAttr("activatiBeta", attr); }
void setClip(const Attribute& attr) { this->setAttr("clip", attr); }
Value* P()
{
auto varArgs = optionalArgs();
return varArgs.begin() != varArgs.end() ? *varArgs.begin() : nullptr;
}
}];
}
// GRU Cell Op
def NGGRUCellOp :
NG_OneResult_Op<"gruCell", [NoSideEffect, DeclareOpInterfaceMethods<FusedOp>]>,
Arguments<(ins NG_TensorType:$X, NG_TensorType:$W, NG_TensorType:$R,
NG_TensorType:$H_t, Variadic<NG_TensorType>:$optionalArgs,
I64Attr:$hiddenSize,
DefaultValuedAttr<StrArrayAttr, "{\"sigmoid\",\"tanh\"}">:$activations,
DefaultValuedAttr<F32ArrayAttr, "{}">:$activationAlpha,
DefaultValuedAttr<F32ArrayAttr, "{}">:$activationBeta,
DefaultValuedAttr<F32Attr, "0.0">:$clip,
DefaultValuedAttr<BoolAttr, "false">:$linearBeforeReset)>
{
let summary = "This class represents only single *cell* and not whole GRU *layer*";
let description = [{
X The input tensor with shape: [batch_size, input_size].
W The weight tensor with shape:
[gates_count * hidden_size, input_size].
R The recurrence weight tensor with shape:
[gates_count * hidden_size, hidden_size].
H_t The hidden state tensor at current time step with
shape: [batch_size, hidden_size].
hidden_size The number of hidden units for recurrent cell.
B[Optional] The bias tensor for input gate with shape:
[2 * gates_count * hidden_size].
activations The vector of activation functions used inside
recurrent cell.
activation_alpha The vector of alpha parameters for activation
functions in order respective to activation list.
activation_beta The vector of beta parameters for activation functions
in order respective to activation list.
clip The value defining clipping range [-clip, clip] on
input of activation functions.
}];
let parser = [{ NGRAPH_CHECK(false, "No parser support"); return mlir::failure(); }];
let verifier = [{ return mlir::success(); /* TBD */ }];
let builders = [
OpBuilder<
"Builder *builder, OperationState &tblgen_state, Type res,"
"Value *X, Value* W, Value* R, Value* H_t,"
"Attribute hiddenSize, ArrayAttr activations,"
"ArrayAttr activationAlpha, ArrayAttr activationBeta,"
"Attribute clip, Attribute linearBeforeReset", [{
tblgen_state.addOperands({X, W, R, H_t});
tblgen_state.addAttribute("hiddenSize", hiddenSize);
tblgen_state.addAttribute("activations", activations);
tblgen_state.addAttribute("activationAlpha", activationAlpha);
tblgen_state.addAttribute("activationBeta", activationBeta);
tblgen_state.addAttribute("linearBeforeReset", linearBeforeReset);
tblgen_state.addTypes(res);
}]>,
OpBuilder<
"Builder *builder, OperationState &tblgen_state, Type res,"
"Value *X, Value* W, Value* R, Value* H_t,"
"Attribute hiddenSize",
[{
tblgen_state.addOperands({X, W, R, H_t});
tblgen_state.addAttribute("hiddenSize", hiddenSize);
tblgen_state.addTypes(res);
}]>
];
let extraClassDeclaration = [{
void setHiddenSize (const Attribute& attr) { this->setAttr("hiddenSize", attr); }
void setActivations (const ArrayAttr& attr) { this->setAttr("activation", attr); }
void setActivationsAlpha (const ArrayAttr& attr) { this->setAttr("activatiAlpha", attr); }
void setActivationsBeta (const ArrayAttr& attr) { this->setAttr("activatiBeta", attr); }
void setLinearBeforeReset(const Attribute& attr) { this->setAttr("linearBeforeReset", attr); }
// get Bias operand if present
Value* P()
{
auto varArgs = optionalArgs();
return varArgs.begin() != varArgs.end() ? *varArgs.begin() : nullptr;
}
}];
}
// LayerNorm Op
// Op produces 3 results when keepStats is true
// Op is defined with 3 results, 2 of which are invalid/dead if keepStats is false
def NGLayerNormOp :
NG_Op<"layernorm", [NoSideEffect, DeclareOpInterfaceMethods<FusedOp>]>,
Results<(outs NG_TensorType:$res, NG_TensorType:$mean, NG_TensorType:$var)>,
Arguments<(ins NG_TensorType:$data, Variadic<NG_TensorType>:$optionalArgs,
DefaultValuedAttr<BoolAttr, "true">:$keepStats,
DefaultValuedAttr<I64Attr, "1"> :$beginNormAxis,
DefaultValuedAttr<F32Attr, "1e-5"> :$epsilon)>
{
let summary = "LayerNorm Op";
let description = "Constructs a LayerNorm operation.";
let builders = [
OpBuilder<
"Builder *builder, OperationState &tblgen_state, ArrayRef<Type> res,"
"Value *data, Attribute keepStats, Attribute beginNormAxis, Attribute epsilon", [{
tblgen_state.addOperands(data);
tblgen_state.addAttribute("keepStats", keepStats);
tblgen_state.addAttribute("beginNormAxis", beginNormAxis);
tblgen_state.addAttribute("epsilon", epsilon);
tblgen_state.addTypes(res);
}]>
];
let extraClassDeclaration = [{
void decompose() {
//TODO: Call a templatized helper: decompose(this) to do the actual decomposition
// get Scale operand if present
Value* Scale()
{
auto varArgs = optionalArgs();
return varArgs.begin() != varArgs.end() ? *varArgs.begin() : nullptr;
}
// get Bias operand if present
Value* Bias()
{
auto varArgs = optionalArgs();
auto it = varArgs.begin();
it = std::next(it);
if (it == varArgs.end())
return nullptr;
it = std::next(it);
return it == varArgs.end() ? *it : nullptr;
}
void setKeepstats(const Attribute& attr) { this->setAttr("keepStats", attr); }
void setBeginNormAxis(const Attribute& attr) { this->setAttr("beginNormAxis", attr);}
void setEpsilonAxis(const Attribute& attr) { this->setAttr("epsilon", attr); }
}];
}
// LayerNormBackprop Op
// Scale can be optional, in which case use a constant op of 0
def NGLayerNormBackpropOp :
NG_Op<"layernormBackprop", [NoSideEffect, DeclareOpInterfaceMethods<FusedOp>]>,
Results<(outs NG_TensorType:$d_data, NG_TensorType:$d_scale, NG_TensorType:$d_bias)>,
Arguments<(ins NG_TensorType:$data, NG_TensorType:$delta, NG_TensorType:$scale,
Variadic<NG_TensorType>:$optionalArgs,
DefaultValuedAttr<I64Attr, "1">:$beginNormAxis,
DefaultValuedAttr<F32Attr, "1e-5">:$epsilon)>
{
let summary = "LayerNorm Op";
let description = "Constructs an LayerNorm operation.";
let builders = [
OpBuilder<
"Builder *builder, OperationState &tblgen_state, ArrayRef<Type> res,"
"Value *data, Value *delta, Value *mean, Value *variance,"
"Attribute beginNormAxis, Attribute epsilon", [{
tblgen_state.addOperands({data, delta, mean, variance});
tblgen_state.addAttribute("beginNormAxis", beginNormAxis);
tblgen_state.addAttribute("epsilon", epsilon);
tblgen_state.addTypes(res);
}]>,
OpBuilder<
"Builder *builder, OperationState &tblgen_state, ArrayRef<Type> res,"
"Value *data, Value *delta, Value *scale,"
"Attribute beginNormAxis, Attribute epsilon", [{
tblgen_state.addOperands({data, delta, scale});
tblgen_state.addAttribute("beginNormAxis", beginNormAxis);
tblgen_state.addAttribute("epsilon", epsilon);
tblgen_state.addTypes(res);
}]>,
OpBuilder<
"Builder *builder, OperationState &tblgen_state, ArrayRef<Type> res,"
"Value *data, Value *delta,"
"Attribute beginNormAxis, Attribute epsilon", [{
tblgen_state.addOperands({data, delta});
tblgen_state.addAttribute("beginNormAxis", beginNormAxis);
tblgen_state.addAttribute("epsilon", epsilon);
tblgen_state.addTypes(res);
}]>,
];
let extraClassDeclaration = [{
// get Mean operand if present
Value* Mean()
{
auto varArgs = optionalArgs();
return varArgs.begin() != varArgs.end() ? *varArgs.begin() : nullptr;
}
// get Variance operand if present
Value* Variance()
{
auto varArgs = optionalArgs();
auto it = varArgs.begin();
it = std::next(it);
if (it == varArgs.end())
return nullptr;
it = std::next(it);
return it == varArgs.end() ? *it : nullptr;
}
void setBeginNormAxis(const Attribute& attr) { this->setAttr("beginNormAxis", attr);}
void setEpsilonAxis(const Attribute& attr) { this->setAttr("epsilon", attr); }
}];
}
// HardSigmoid Op
def NGHardSigmoid :
NG_OneResult_Op<"hardSigmoid", [NoSideEffect, DeclareOpInterfaceMethods<FusedOp>]>,
Arguments<(ins NG_TensorType:$data,
F32Attr:$alpha, F32Attr:$beta)>
{
let summary = "Hard sigmoid op";
let description = [{
Parameterized, bounded sigmoid-like, piecewise linear
function. min(max(alpha*x + beta, 0), 1)
}];
let extraClassDeclaration = [{
void setAlpha(const Attribute& attr) { this->setAttr("alpha", attr); }
void setBeta(const Attribute& attr) { this->setAttr("beta", attr); }
}];
}
// Gemm Op
def NGGemmOp :
NG_OneResult_Op<"gemm", [NoSideEffect, DeclareOpInterfaceMethods<FusedOp>]>,
Arguments<(ins NG_TensorType:$A, NG_TensorType:$B, NG_TensorType:$C,
DefaultValuedAttr<F32Attr, "1.0">:$alpha,
DefaultValuedAttr<F32Attr, "1.0">:$beta,
DefaultValuedAttr<BoolAttr, "false">:$transA,
DefaultValuedAttr<BoolAttr, "false">:$transB)>
{
let summary = "Gemm Op";
let description = [{
A' = transpose(A) if transA else A
B' = transpose(B) if transB else B
Compute Y = alpha * A' * B' + beta * C
}];
let extraClassDeclaration = [{
void setAlpha(const Attribute& attr) { this->setAttr("alpha", attr);}
void setBeta(const Attribute& attr) { this->setAttr("beta", attr); }
void setTransA(const Attribute& attr) { this->setAttr("transA", attr); }
void setTransB(const Attribute& attr) { this->setAttr("transB", attr); }
}];
}
// GroupConv Op
def NGGroupConvOp :
NG_OneResult_Op<"groupConv", [NoSideEffect, DeclareOpInterfaceMethods<FusedOp>]>,
Arguments<(ins NG_TensorType:$images, NG_TensorType:$filters,
I64ArrayAttr:$strides,
I64ArrayAttr:$padBelow,
I64ArrayAttr:$padAbove,
I64Attr:$groups,
DefaultValuedAttr<PadTypeEnumAttr,
"static_cast<int64_t>(MLIRPadType::EXPLICIT)">:$padType)>
{
let summary = "Group Convolution Op";
let description = [{
Group Convolution
}];
let builders = [
// Builder without padType
OpBuilder<
"Builder *builder, OperationState &tblgen_state, Type res, Value *images,"
"Value *filters, ArrayAttr strides, ArrayAttr padBelow, ArrayAttr padAbove,"
"Attribute groups",
[{
tblgen_state.addOperands({images, filters});
tblgen_state.addAttribute("strides", strides);
tblgen_state.addAttribute("padBelow", padBelow);
tblgen_state.addAttribute("padAbove", padAbove);
tblgen_state.addAttribute("groups", groups);
tblgen_state.addTypes(res);
}]>
];
let extraClassDeclaration = [{
void setStrides(const ArrayAttr& attr) { this->setAttr("strides", attr); }
void setPadAbove(const ArrayAttr& attr) { this->setAttr("padAbove", attr); }
void setPadBelow(const ArrayAttr& attr) { this->setAttr("padBelow", attr); }
void setPadType(const Attribute& attr) { this->setAttr("padType", attr); }
}];
}
// GroupConvTranspose Op
def NGGroupConvTransposeOp :
NG_OneResult_Op<"groupConvTranspose", [NoSideEffect, DeclareOpInterfaceMethods<FusedOp>]>,
Arguments<(ins NG_TensorType:$images, NG_TensorType:$filters,
I64ArrayAttr:$strides, I64ArrayAttr:$padBelow, I64ArrayAttr:$padAbove,
I64ArrayAttr:$outputPad,
DefaultValuedAttr<I64Attr, "1UL">:$groups,
DefaultValuedAttr<PadTypeEnumAttr,
"static_cast<int64_t>(MLIRPadType::EXPLICIT)">:$padType,
I64ArrayAttr:$outputShape)>
{
let summary = "Group Transpose Convolution (Deconvolution)";
let description = [{
images The node producing input images data.
filters The node producing filters data.
strides The strides along each feature axis.
padBelow The padding added at the beggining of each feature axis.
padAbove The padding added at the end of each feature axis.
outputPad The zero-padding (adjustment) added to one side of the
output.
groups The number of groups the input channels and output
channels are divided into.
padType The provided padding type.
outputShape The output shape. When provided padding values are
automatically inferred.
}];
let builders = [
OpBuilder<"Builder *builder, OperationState &tblgen_state, Type res,"
"Value *images, Value *filters, Attribute groups", [{
tblgen_state.addOperands({images, filters});
tblgen_state.addAttribute("groups", groups);
tblgen_state.addTypes(res);
}]>,
OpBuilder<"Builder *builder, OperationState &tblgen_state, Type res,"
"Value *images, Value *filters", [{
tblgen_state.addOperands({images, filters});
tblgen_state.addTypes(res);
}]>,
OpBuilder<"Builder *builder, OperationState &tblgen_state, Type res,"
"Value *images, Value *filters, ArrayAttr strides,"
"ArrayAttr outputPad, ArrayAttr outputShape,"
"Attribute groups", [{
tblgen_state.addOperands({images, filters});
tblgen_state.addAttribute("strides", strides);
tblgen_state.addAttribute("outputPad", outputPad);
tblgen_state.addAttribute("outputShape", outputShape);
tblgen_state.addAttribute("groups", groups);
}]>,
OpBuilder<"Builder *builder, OperationState &tblgen_state, Type res,"
"Value *images, Value *filters,"
"ArrayAttr outputShape, Attribute groups", [{
tblgen_state.addOperands({images, filters});
tblgen_state.addAttribute("outputShape", outputShape);
tblgen_state.addAttribute("groups", groups);
}]>
];
let extraClassDeclaration = [{
void setStrides(const ArrayAttr& attr) { this->setAttr("strides", attr); }
void setPadAbove(const ArrayAttr& attr) { this->setAttr("padAbove", attr); }
void setPadBelow(const ArrayAttr& attr) { this->setAttr("padBelow", attr); }
void setPadType(const Attribute& attr) { this->setAttr("padType", attr); }
void setOutputPad(const ArrayAttr& attr) { this->setAttr("outputPad", attr);}
void setOutputShape(const ArrayAttr& attr){ this->setAttr("outputShape", attr);}
}];
}
// GRN Op
def NGGRNOp :
NG_OneResult_Op<"grn", [NoSideEffect, DeclareOpInterfaceMethods<FusedOp>]>,
Arguments<(ins NG_TensorType:$data,
DefaultValuedAttr<F32Attr, "1.0">:$bias)>
{
let summary = "GRN Op";
let description = [{
Global Response Normalization with L2 norm (across channels only)
data - Node producing the input tensor
bias - The bias added to the variance.
}];
let extraClassDeclaration = [{
void setBias(const Attribute& attr) { this->setAttr("bias", attr); }
}];
}
// Clamp Op
def NGClampOp :
NG_OneResult_Op<"clamp", [NoSideEffect, DeclareOpInterfaceMethods<FusedOp>]>,
Arguments<(ins NG_TensorType:$data, F64Attr:$min, F64Attr:$max)>
{
let summary = "Clamp Op";
let description = [{
Performs a clipping operation on all elements of the input node
All input values that are outside of the <min;max> range are set to 'min' or 'max'
depending on which side of the <min;max> range they are. The values that fall into
this range remain unchanged.
}];
let extraClassDeclaration = [{
void setMin(const Attribute& attr) { this->setAttr("min", attr); }
void setMax(const Attribute& attr) { this->setAttr("max", attr); }
}];
}
// Gelu Op
def NGGeluOp :
NG_OneResult_Op<"gelu", [NoSideEffect, DeclareOpInterfaceMethods<FusedOp>]>,
Arguments<(ins NG_TensorType:$data)>
{
let summary = "Gelu Op";
let description = [{
Gaussian Error Linear Unit
f(x) = 0.5 * x * (1 + erf( x / sqrt(2) )
}];
}
// GeluBackpropFactor Op
def NGGeluBackpropFactorOp :
NG_OneResult_Op<"geluBackpropFactor", [NoSideEffect, DeclareOpInterfaceMethods<FusedOp>]>,
Arguments<(ins NG_TensorType:$data)>
{
let summary = "Gelu Backprop Op";
let description = [{
Backprop for Gelu(x) is GeluBackprop(x) * delta
}];
}
// Elu Op
def NGEluOp :
NG_OneResult_Op<"elu", [NoSideEffect, DeclareOpInterfaceMethods<FusedOp>]>,
Arguments<(ins NG_TensorType:$data, F64Attr:$alpha)>
{
let summary = "Elu Op";
let description = [{
Exponential Linear Unit
x < 0 => f(x) = alpha * (exp(x) - 1.)
x >= 0 => f(x) = x
}];
let extraClassDeclaration = [{
void setAlpha(const Attribute& attr) { this->setAttr("alpha", attr); }
}];
}
// FakeQuant Op
def NGFakeQuantOp :
NG_OneResult_Op<"fakeQuant", [NoSideEffect, DeclareOpInterfaceMethods<FusedOp>]>,
Arguments<(ins NG_TensorType:$data, NG_TensorType:$inputLow, NG_TensorType:$inputHigh,
NG_TensorType:$outputLow, NG_TensorType:$outputHigh,
I64Attr:$levels,
DefaultValuedAttr<AutoBroadcastEnumAttr,
"static_cast<int64_t>(MLIRPadType::EXPLICIT)">:$autoBroadcast)>
{
let summary = "Op performing element-wise linear quantization.";
let description = [{
Input floating point values are quantized into a discrete
set of floating point values.
Implementation This class creates a node which performs the following
operation:
round((data - input_low) / (input_high - input_low) * (levels-1)) /
(levels-1) * (output_high - output_low) + output_low
}];
let extraClassDeclaration = [{
void setLevels(const Attribute& attr) { this->setAttr("levels", attr); }
void setAutoBroadcast(const Attribute& attr) { this->setAttr("autoBroadcast", attr); }
}];
}
// DepthToSpace Op
def NGDepthToSpaceOp :
NG_OneResult_Op<"depthToSpace", [NoSideEffect, DeclareOpInterfaceMethods<FusedOp>]>,
Arguments<(ins NG_TensorType:$data,
DefaultValuedAttr<I64Attr, "1">:$blockSize,
DepthSpaceModeEnumAttr:$mode)>
{
let summary = "DepthToSpace Op";
let description = [{
DepthToSpace permutes data from the depth dimension of the input blob into
spatial dimensions.
Values from the depth dimension (assuming NCHW layout) are moved in
spatial blocks to the height and width dimensions.
Output node produces a tensor with shape:
[N, C/(blocksize * blocksize), H * blocksize, W * blocksize]
}];
let extraClassDeclaration = [{
void setBlockSize(const Attribute& attr) { this->setAttr("blockSize", attr);}
void setMode(const Attribute& attr) { this->setAttr("mode", attr); }
}];
}
// ConvolutionBias Op
def NGConvBiasOp :
NG_OneResult_Op<"convBias", [NoSideEffect, DeclareOpInterfaceMethods<FusedOp>]>,
Arguments<(ins NG_TensorType:$images, NG_TensorType:$filters, NG_TensorType:$bias,
I64ArrayAttr:$strides, I64ArrayAttr:$padBelow, I64ArrayAttr:$padAbove,
DefaultValuedAttr<BoolAttr, "false">:$withRelu)>
{
let summary = "Convolution Bias Op";
let description = "Convolution + bias forward prop for batched convolution operation.";
let builders = [
OpBuilder<
"Builder *builder, OperationState &tblgen_state, Type res,"
"Value *images, Value *filters, Value *bias, Attribute withRelu", [{
tblgen_state.addOperands({images, filters, bias});
tblgen_state.addAttribute("withRelu", withRelu);
tblgen_state.addTypes(res);
}]>,
OpBuilder<
"Builder *builder, OperationState &tblgen_state, Type res,"
"Value *images, Value *filters, Value *bias", [{
tblgen_state.addOperands({images, filters, bias});
tblgen_state.addTypes(res);
}]>
];
let extraClassDeclaration = [{
void setStrides(const ArrayAttr& attr) { this->setAttr("strides", attr); }
void setPadAbove(const ArrayAttr& attr) { this->setAttr("padAbove", attr); }
void setPadBelow(const ArrayAttr& attr) { this->setAttr("padBelow", attr); }
void setWithRelu(const Attribute& attr) {this->setAttr("withRelu", attr); }
}];
}
// ConvBiasBackpropFiltersBias Op
def NGConvBiasBackpropFiltersBias :
NG_Op<"convBiasBackpropFiltersBias", [NoSideEffect, DeclareOpInterfaceMethods<FusedOp>]>,
Results<(outs NG_TensorType:$filter, NG_TensorType:$bias)>,
Arguments<(ins NG_TensorType:$images, NG_TensorType:$outputDelta,
I64ArrayAttr:$filtersShape, I64ArrayAttr:$biasShape,
I64ArrayAttr:$strides, I64ArrayAttr:$padBelow, I64ArrayAttr:$padAbove)>
{
let extraClassDeclaration = [{
void setFiltersShape(const ArrayAttr& attr) { this->setAttr("filtersShape", attr); }
void setBiasShape(const ArrayAttr& attr) { this->setAttr("biasShape", attr); }
void setStrides(const ArrayAttr& attr) { this->setAttr("strides", attr); }
void setPadAbove(const ArrayAttr& attr) { this->setAttr("padAbove", attr); }
void setPadBelow(const ArrayAttr& attr) { this->setAttr("padBelow", attr); }
}];
}
// ConvBiasAdd Op
def NGConvBiasAddOp :
NG_OneResult_Op<"convBiasAdd", [NoSideEffect, DeclareOpInterfaceMethods<FusedOp>]>,
Arguments<(ins NG_TensorType:$images, NG_TensorType:$filters,
NG_TensorType:$bias, NG_TensorType:$sumInput,
I64ArrayAttr:$strides, I64ArrayAttr:$padBelow, I64ArrayAttr:$padAbove,
DefaultValuedAttr<BoolAttr, "false">:$withRelu)>
{
let summary = "Convolution Bias Add Op";
let description = "Convolution + bias + add forward prop for batched convolution operation.";
let extraClassDeclaration = [{
void setStrides(const ArrayAttr& attr) { this->setAttr("strides", attr); }
void setPadAbove(const ArrayAttr& attr) { this->setAttr("padAbove", attr); }
void setPadBelow(const ArrayAttr& attr) { this->setAttr("padBelow", attr); }
void setWithRelu(const Attribute& attr) {this->setAttr("withRelu", attr); }
}];
}
#endif //NG_FUSED_OPS
......@@ -335,3 +335,100 @@ namespace mlir
#define GET_OP_CLASSES
#include "ops.cpp.inc"
}
// Fused Ops decompose
// Stubs for now
// TODO: Implement and move to another file
void mlir::NGSpaceToDepthOp::decompose()
{
}
void mlir::NGSplitOp::decompose()
{
}
void mlir::NGScaleShiftOp::decompose()
{
}
void mlir::NGUnSqueezeOp::decompose()
{
}
void mlir::NGSquaredDiffOp::decompose()
{
}
void mlir::NGSqueezeOp::decompose()
{
}
void mlir::NGShuffleChannelsOp::decompose()
{
}
void mlir::NGRNNCellOp::decompose()
{
}
void mlir::NGFakeQuantOp::decompose()
{
}
void mlir::NGMVN::decompose()
{
}
void mlir::NGHardSigmoid::decompose()
{
}
void mlir::NGGRNOp::decompose()
{
}
void mlir::NGNormalizeL2Op::decompose()
{
}
void mlir::NGConvBiasBackpropFiltersBias::decompose()
{
}
void mlir::NGPrelu::decompose()
{
}
void mlir::NGLayerNormBackpropOp::decompose()
{
}
void mlir::NGGemmOp::decompose()
{
}
void mlir::NGClampOp::decompose()
{
}
void mlir::NGGroupConvTransposeOp::decompose()
{
}
void mlir::NGConvBiasOp::decompose()
{
}
void mlir::NGConvBiasAddOp::decompose()
{
}
void mlir::NGGRUCellOp::decompose()
{
}
void mlir::NGGroupConvOp::decompose()
{
}
void mlir::NGGeluOp::decompose()
{
}
void mlir::NGGeluBackpropFactorOp::decompose()
{
}
void mlir::NGLSTMCellOp::decompose()
{
}
void mlir::NGLSTMSequenceOp::decompose()
{
}
void mlir::NGMatMul::decompose()
{
}
void mlir::NGLayerNormOp::decompose()
{
}
void mlir::NGDepthToSpaceOp::decompose()
{
}
void mlir::NGEluOp::decompose()
{
}
......@@ -140,7 +140,9 @@ class NG_Terminator_Op<string mnemonic, list<OpTrait> traits = []> :
NG_Op<mnemonic, !listconcat(traits, [Terminator])>,
Arguments<(ins Variadic<NG_TensorType>:$args)>, Results<(outs)> {}
class NG_Variadic_Result_Op<string mnemonic, list<OpTrait> traits = []> :
NG_Op<mnemonic, !listconcat(traits, [])>,
Results<(outs Variadic<NG_TensorType>:$args)> {}
// Terminator Ops
def NGReturnOp : NG_Terminator_Op<"return">;
......
......@@ -56,7 +56,8 @@ def PadModeEdge : I32EnumAttrCase<"EDGE", 1> ;
def PadModeReflect : I32EnumAttrCase<"REFLECT", 2> ;
def PadModeSymmetric: I32EnumAttrCase<"SYMMETRIC", 3> ;
def PadModeEnumAttr : I32EnumAttr<"MLIRPadMode", "Padding modes for pad operator",
def PadModeEnumAttr : I32EnumAttr<"MLIRPadMode",
"Padding modes for pad operator",
[PadModeConstant, PadModeEdge, PadModeReflect, PadModeSymmetric]>;
// Sort Types for TopK
......@@ -67,4 +68,51 @@ def SortTypeValues : I32EnumAttrCase<"VALUES", 2>;
def SortTypeEnumAttr : I32EnumAttr<"MLIRSortType", "Sort types for topk operator",
[SortTypeNone, SortTypeIndices, SortTypeValues]>;
// Modes for normalizeL2
def EpsModeAdd : I32EnumAttrCase<"ADD", 0>;
def EpsModeMax : I32EnumAttrCase<"MAX", 1>;
def EpsModeEnumAttr : I32EnumAttr<"MLIREpsMode",
"Specifies how eps is combined with L2 value",
[EpsModeAdd, EpsModeMax]>;
def AutoBroadcastNone : I32EnumAttrCase<"NONE", 0>;
def AutoBroadcastExplicit : I32EnumAttrCase<"EXPLICIT", 1>;
def AutoBroadcastNumPy : I32EnumAttrCase<"NUMPY", 2>;
def AutoBroadcastPDPD : I32EnumAttrCase<"PDPD", 3>;
def AutoBroadcastEnumAttr : I32EnumAttr<"MLIRAutoBroadcastMode",
"Specifies auto-broadcast for an op",
[AutoBroadcastNone, AutoBroadcastExplicit,
AutoBroadcastNumPy, AutoBroadcastPDPD]>;
def DepthSpaceModeBlocks : I32EnumAttrCase<"BLOCKS_FIRST", 0>;
def DepthSpaceModeDepth : I32EnumAttrCase<"DEPTH_FIRST", 1>;
def DepthSpaceModeEnumAttr: I32EnumAttr<"MLIRDepthToSpaceMode",
"Specifies how the input depth dimension is split to block coordinates",
[DepthSpaceModeBlocks, DepthSpaceModeDepth]>;
def LSTMWeightsFormatFICO : I32EnumAttrCase<"FICO", 0>; // IE
def LSTMWeightsFormatICOF : I32EnumAttrCase<"ICOF", 1>; // PyTorch
def LSTMWeightsFormatIFCO : I32EnumAttrCase<"IFCO", 2>; // DNNL, TF, MxNet
def LSTMWeightsFormatIFOC : I32EnumAttrCase<"IFOC", 3>; // Caffe
def LSTMWeightsFormatIOFC : I32EnumAttrCase<"IOFC", 4>; // ONNX
def LSTMWeightsFormatEnumAttr: I32EnumAttr<"MLIRLSTMWeightsFormat",
"LSTM Cell Weights Format",
[LSTMWeightsFormatFICO, LSTMWeightsFormatICOF,
LSTMWeightsFormatIFCO, LSTMWeightsFormatIFOC,
LSTMWeightsFormatIOFC]>;
def LSTMSeqDirectionFWD : I32EnumAttrCase<"FORWARD", 0>;
def LSTMSeqDirectionRVS : I32EnumAttrCase<"REVERSE", 1>;
def LSTMSeqDirectionBID : I32EnumAttrCase<"BIDIRECTIONAL", 2>;
def LSTMSeqDirectionsEnumAttr: I32EnumAttr<"MLIRLSTMSeqDirection",
"LSTM Sequence Direction",
[LSTMSeqDirectionFWD, LSTMSeqDirectionRVS,
LSTMSeqDirectionBID]>;
#endif // NG_OP_ATTRIBUTES
......@@ -500,7 +500,7 @@ def NGMaxPoolBackPropOp :
}
// OneHot
def NGOneHOtOp :
def NGOneHotOp :
NG_OneResult_Op<"oneHot", [NoSideEffect, OpVersion0]>,
Arguments<(ins NG_TensorType :$arg,
I64ArrayAttr :$shape,
......@@ -552,7 +552,7 @@ def NGPadOp :
}
// ReplaceSlice
def NGReplaceSlice :
def NGReplaceSliceOp :
NG_OneResult_Op<"replaceSlice", [NoSideEffect, OpVersion0]>,
Arguments<(ins NG_TensorType:$arg0,
NG_TensorType :$arg1,
......@@ -583,7 +583,7 @@ def NGReplaceSlice :
}
// slice
def NGSlice :
def NGSliceOp :
NG_OneResult_Op<"slice", [NoSideEffect, OpVersion0]>,
Arguments<(ins NG_TensorType:$arg,
I64ArrayAttr :$lowerBounds,
......@@ -611,7 +611,7 @@ def NGSlice :
}
// reshape
def NGReshape :
def NGReshapeOp :
NG_OneResult_Op<"reshape", [NoSideEffect, OpVersion0]>,
Arguments<(ins NG_TensorType:$arg,
I64ArrayAttr :$axisOrder,
......@@ -636,7 +636,7 @@ def NGReshape :
}
// softmax
def NGSoftMax :
def NGSoftMaxOp :
NG_OneResult_Op<"softmax", [NoSideEffect, OpVersion0]>,
Arguments<(ins NG_TensorType :$arg,
I64ArrayAttr :$axes)>
......@@ -655,7 +655,7 @@ def NGSoftMax :
}
// topk
def NGTopK :
def NGTopKOp :
NG_OneResult_Op<"topk", [NoSideEffect, OpVersion0]>,
Arguments<(ins NG_TensorType :$arg,
NG_TensorType :$k,
......
// RUN: ngraph-opt %s | FileCheck %s
// Verify the printed output can be parsed.
// RUN: ngraph-opt %s | ngraph-opt | FileCheck %s
// These tests verify the parser, builder and printer of element-wise binary ops.
// CHECK-LABEL: func @squeeze
func @squeeze(%arg0: !ng.tensor<2x1x2x1xf32>, %arg1: !ng.tensor<2xi64>) -> !ng.tensor<2x2xf32> {
// CHECK: %{{[0-9]+}} = "ng.squeeze"(%{{.*}}, %{{.*}}) : (!ng.tensor<2x1x2x1xf32>, !ng.tensor<2xi64>) -> !ng.tensor<2x2xf32>
%0 = "ng.squeeze"(%arg0, %arg1) : (!ng.tensor<2x1x2x1xf32>, !ng.tensor<2xi64>) -> !ng.tensor<2x2xf32>
"ng.return"(%0) : (!ng.tensor<2x2xf32>) -> ()
}
// -----
// CHECK-LABEL: func @unsqueeze
func @unsqueeze(%arg0: !ng.tensor<2x2xf32>, %arg1: !ng.tensor<2xi64>) -> !ng.tensor<2x1x2x1xf32> {
// CHECK: %{{[0-9]+}} = "ng.unsqueeze"(%{{.*}}, %{{.*}}) : (!ng.tensor<2x2xf32>, !ng.tensor<2xi64>) -> !ng.tensor<2x1x2x1xf32>
%0 = "ng.unsqueeze"(%arg0, %arg1) : (!ng.tensor<2x2xf32>, !ng.tensor<2xi64>) -> !ng.tensor<2x1x2x1xf32>
"ng.return"(%0) : (!ng.tensor<2x1x2x1xf32>) -> ()
}
// -----
// CHECK-LABEL: func @sqrddiff
func @sqrddiff(%arg0: !ng.tensor<2x2xf32>, %arg1: !ng.tensor<2x2xf32>) -> !ng.tensor<2x2xf32> {
// CHECK: %{{[0-9]+}} = "ng.sqrdDiff"(%{{.*}}, %{{.*}}) : (!ng.tensor<2x2xf32>, !ng.tensor<2x2xf32>) -> !ng.tensor<2x2xf32>
%0 = "ng.sqrdDiff"(%arg0, %arg1) : (!ng.tensor<2x2xf32>, !ng.tensor<2x2xf32>) -> !ng.tensor<2x2xf32>
"ng.return"(%0) : (!ng.tensor<2x2xf32>) -> ()
}
// -----
// CHECK-LABEL: func @split
func @split(%arg0: !ng.tensor<2x2x16xf32>) -> !ng.tensor<2x2x4xf32> {
// CHECK: %{{[0-9]+}}:4 = "ng.split"(%{{.*}}) {axis = 2 : i64, numSplits = [4, 4, 4, 4]} : (!ng.tensor<2x2x16xf32>) -> (!ng.tensor<2x2x4xf32>, !ng.tensor<2x2x4xf32>, !ng.tensor<2x2x4xf32>, !ng.tensor<2x2x4xf32>)
%0:4= "ng.split"(%arg0) {axis = 2, numSplits = [4, 4, 4, 4]}
: (!ng.tensor<2x2x16xf32>) -> (!ng.tensor<2x2x4xf32>, !ng.tensor<2x2x4xf32>, !ng.tensor<2x2x4xf32>, !ng.tensor<2x2x4xf32>)
"ng.return"(%0#0) : (!ng.tensor<2x2x4xf32>) -> ()
}
// -----
// CHECK-LABEL: func @spaceToDepth
func @spaceToDepth(%arg0: !ng.tensor<1x4x16x16xf32>) -> !ng.tensor<1x64x4x4xf32> {
// CHECK: %{{[0-9]+}} = "ng.spaceToDepth"(%{{.*}}) {blockSize = 4 : i64, mode = 0 : i32} : (!ng.tensor<1x4x16x16xf32>) -> !ng.tensor<1x64x4x4xf32>
%0 = "ng.spaceToDepth"(%arg0) {blockSize = 4, mode = 0 : i32} : (!ng.tensor<1x4x16x16xf32>) -> (!ng.tensor<1x64x4x4xf32>)
"ng.return"(%0) : (!ng.tensor<1x64x4x4xf32>) -> ()
}
// -----
// CHECK-LABEL: func @shuffleChannels
func @shuffleChannels(%arg0: !ng.tensor<1x16x16x16xf32>) -> !ng.tensor<1x16x16x16xf32> {
// CHECK: %{{[0-9]+}} = "ng.shuffleChannels"(%{{.*}}) {axis = 1 : i64, groups = 4 : i64} : (!ng.tensor<1x16x16x16xf32>) -> !ng.tensor<1x16x16x16xf32>
%0 = "ng.shuffleChannels"(%arg0) {axis = 1 : i64, groups = 4 : i64} : (!ng.tensor<1x16x16x16xf32>) -> !ng.tensor<1x16x16x16xf32>
"ng.return"(%0) : (!ng.tensor<1x16x16x16xf32>) -> ()
}
// -----
// CHECK-LABEL: func @scaleShift
func @scaleShift(%arg0: !ng.tensor<2x2xf32>, %arg1: !ng.tensor<2x2xf32>, %arg2: !ng.tensor<2x2xf32>) -> !ng.tensor<2x2xf32> {
// CHECK: %{{[0-9]+}} = "ng.scaleShift"(%{{.*}}, %{{.*}}, %{{.*}}) : (!ng.tensor<2x2xf32>, !ng.tensor<2x2xf32>, !ng.tensor<2x2xf32>) -> !ng.tensor<2x2xf32>
%0 = "ng.scaleShift"(%arg0, %arg1, %arg2) : (!ng.tensor<2x2xf32>, !ng.tensor<2x2xf32>, !ng.tensor<2x2xf32>) -> !ng.tensor<2x2xf32>
"ng.return"(%0) : (!ng.tensor<2x2xf32>) -> ()
}
// -----
// CHECK-LABEL: func @prelu
func @prelu(%arg0: !ng.tensor<2x2xf32>, %arg1: !ng.tensor<2x2xf32>) -> !ng.tensor<2x2xf32> {
// CHECK: %{{[0-9]+}} = "ng.prelu"(%{{.*}}, %{{.*}}) : (!ng.tensor<2x2xf32>, !ng.tensor<2x2xf32>) -> !ng.tensor<2x2xf32>
%0 = "ng.prelu"(%arg0, %arg1) {} : (!ng.tensor<2x2xf32>, !ng.tensor<2x2xf32>) -> (!ng.tensor<2x2xf32>)
"ng.return"(%0) : (!ng.tensor<2x2xf32>) -> ()
}
// -----
// CHECK-LABEL: func @normalizeL2
func @normalizeL2(%arg0: !ng.tensor<1x2x3x4xf32>, %arg1: !ng.tensor<3x!ng.i64>) -> !ng.tensor<1x2x3x4xf32> {
// CHECK: %{{[0-9]+}} = "ng.normalizeL2"(%{{.*}}, %{{.*}}) {eps = {{0.[0-9]+}} : f32, epsMode = 0 : i32} : (!ng.tensor<1x2x3x4xf32>, !ng.tensor<3x!ng.i64>) -> !ng.tensor<1x2x3x4xf32>
%0 = "ng.normalizeL2"(%arg0, %arg1) {eps = 0.01 : f32, epsMode = 0 : i32} : (!ng.tensor<1x2x3x4xf32> , !ng.tensor<3x!ng.i64>) -> !ng.tensor<1x2x3x4xf32>
"ng.return"(%0) : (!ng.tensor<1x2x3x4xf32>) -> ()
}
// -----
// CHECK-LABEL: func @mvn
func @mvn(%arg0: !ng.tensor<1x2x5xf32>) -> !ng.tensor<1x2x5xf32> {
// CHECK: %{{[0-9]+}} = "ng.mvn"(%{{.*}}) {normalizeVariance = false} : (!ng.tensor<1x2x5xf32>) -> !ng.tensor<1x2x5xf32>
%0 = "ng.mvn"(%arg0) {normalizeVariance = false} : (!ng.tensor<1x2x5xf32>) -> !ng.tensor<1x2x5xf32>
"ng.return"(%0) : (!ng.tensor<1x2x5xf32>) -> ()
}
// -----
// CHECK-LABEL: func @matmul
func @matmul(%arg0: !ng.tensor<2x5xf32>, %arg1: !ng.tensor<2x5xf32>) -> !ng.tensor<2x5xf32> {
// CHECK: %{{[0-9]+}} = "ng.matmul"(%{{.*}}, %{{.*}}) : (!ng.tensor<2x5xf32>, !ng.tensor<2x5xf32>) -> !ng.tensor<2x5xf32>
%0 = "ng.matmul"(%arg0, %arg1) : (!ng.tensor<2x5xf32>, !ng.tensor<2x5xf32>) -> !ng.tensor<2x5xf32>
"ng.return"(%0) : (!ng.tensor<2x5xf32>) -> ()
}
// ------
// CHECK-LABEL: func @layernorm
func @layernorm(%arg0: !ng.tensor<2x4xf32>, %arg1: !ng.tensor<4xf32>, %arg2: !ng.tensor<4xf32>) -> !ng.tensor<2x4xf32> {
// CHECK %{{[0-9]+}}:3 = "ng.layernorm"(%{{.*}}, %{{.*}}, %{{.*}}) : (!ng.tensor<2x4xf32>, !ng.tensor<4xf32>, !ng.tensor<4xf32>) -> (!ng.tensor<2x4xf32>, !ng.tensor<2xf32>, !ng.tensor<2xf32>)
%0:3 = "ng.layernorm"(%arg0, %arg1, %arg2)
: (!ng.tensor<2x4xf32>, !ng.tensor<4xf32>, !ng.tensor<4xf32>) -> (!ng.tensor<2x4xf32>, !ng.tensor<2xf32>, !ng.tensor<2xf32>)
// CHECK %{{[0-9]+}}:3 = "ng.layernorm"(%{{.*}}) : (!ng.tensor<2x4xf32>) -> (!ng.tensor<2x4xf32>, !ng.tensor<2xf32>, !ng.tensor<2xf32>)
%1:3 = "ng.layernorm"(%arg0)
: (!ng.tensor<2x4xf32>) -> (!ng.tensor<2x4xf32>, !ng.tensor<2xf32>, !ng.tensor<2xf32>)
"ng.return"(%0#0) : (!ng.tensor<2x4xf32>) -> ()
}
// -----
// CHECK-LABEL: func @layernormBackprop
func @layernormBackprop(%arg0: !ng.tensor<2x4xf32>, %arg1: !ng.tensor<2x4xf32>, %arg2: !ng.tensor<2xf32>, %arg3: !ng.tensor<2xf32>, %arg4: !ng.tensor<4xf32>) -> !ng.tensor<2x4xf32> {
// CHECK: %{{[0-9]+}}:3 = "ng.layernormBackprop"(%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) : (!ng.tensor<2x4xf32>, !ng.tensor<2x4xf32>, !ng.tensor<4xf32>, !ng.tensor<2xf32>, !ng.tensor<2xf32>) -> (!ng.tensor<2x4xf32>, !ng.tensor<2xf32>, !ng.tensor<2xf32>)
%0:3 = "ng.layernormBackprop"(%arg0, %arg1, %arg4, %arg2, %arg3)
: (!ng.tensor<2x4xf32>, !ng.tensor<2x4xf32>, !ng.tensor<4xf32>, !ng.tensor<2xf32>, !ng.tensor<2xf32>) -> (!ng.tensor<2x4xf32>, !ng.tensor<2xf32>, !ng.tensor<2xf32>)
// CHECK: %{{[0-9]+}}:3 = "ng.layernormBackprop"(%{{.*}}, %{{.*}}, %{{.*}}) : (!ng.tensor<2x4xf32>, !ng.tensor<2x4xf32>, !ng.tensor<4xf32>) -> (!ng.tensor<2x4xf32>, !ng.tensor<2xf32>, !ng.tensor<2xf32>)
%1:3 = "ng.layernormBackprop"(%arg0, %arg1, %arg4)
: (!ng.tensor<2x4xf32>, !ng.tensor<2x4xf32>, !ng.tensor<4xf32>) -> (!ng.tensor<2x4xf32>, !ng.tensor<2xf32>, !ng.tensor<2xf32>)
"ng.return"(%0#0) : (!ng.tensor<2x4xf32>) -> ()
}
// -----
// CHECK-LABEL: func @hardSigmoid
func @hardSigmoid(%arg0: !ng.tensor<2x7xf32>) -> !ng.tensor<2x7xf32>
{
%0 = "ng.hardSigmoid"(%arg0) {alpha = 0.125 : f32, beta = 0.642 : f32} : (!ng.tensor<2x7xf32>) -> !ng.tensor<2x7xf32>
"ng.return"(%0) : (!ng.tensor<2x7xf32>) -> ()
}
// -----
// CHECK-LABEL: func @gemm
func @gemm(%arg0: !ng.tensor<3x6xf32>, %arg1: !ng.tensor<6x4xf32>, %arg2: !ng.tensor<3x4xf32>) -> !ng.tensor<3x4xf32> {
// CHECK: %{{[0-9]+}} = "ng.gemm"(%{{.*}}, %{{.*}}, %{{.*}}) : (!ng.tensor<3x6xf32>, !ng.tensor<6x4xf32>, !ng.tensor<3x4xf32>) -> !ng.tensor<3x4xf32>
%0 = "ng.gemm"(%arg0, %arg1, %arg2) : (!ng.tensor<3x6xf32>, !ng.tensor<6x4xf32>, !ng.tensor<3x4xf32>) -> !ng.tensor<3x4xf32>
"ng.return"(%0) : (!ng.tensor<3x4xf32>) -> ()
}
// -----
// CHECK-LABEL: func @groupConv
func @groupConv(%arg0: !ng.tensor<1x4x2x2xf32>, %arg1: !ng.tensor<2x2x1x1xf32>) -> !ng.tensor<1x2x2x2xf32>
{
// CHECK: %{{[0-9]+}} = "ng.groupConv"(%{{.*}}, %{{.*}}) {groups = 2 : i64, padAbove = [0, 0], padBelow = [0, 0], strides = [1, 1]} : (!ng.tensor<1x4x2x2xf32>, !ng.tensor<2x2x1x1xf32>) -> !ng.tensor<1x2x2x2xf32>
%0 = "ng.groupConv"(%arg0, %arg1) {groups=2 : i64, padAbove=[0,0], padBelow=[0,0], strides=[1, 1]}
: (!ng.tensor<1x4x2x2xf32>, !ng.tensor<2x2x1x1xf32>) -> !ng.tensor<1x2x2x2xf32>
"ng.return"(%0) : (!ng.tensor<1x2x2x2xf32>) -> ()
}
// -----
// CHECK-LABEL: func @groupConvTranspose
func @groupConvTranspose(%arg0: !ng.tensor<1x4x2x2xf32>, %arg1: !ng.tensor<2x2x1x1xf32>) -> !ng.tensor<1x2x2x2xf32>
{
// CHECK: %{{[0-9]+}} = "ng.groupConvTranspose"(%{{.*}}, %{{.*}}) {groups = 2 : i64, outputPad = [1, 1], outputShape = [], padAbove = [0, 0], padBelow = [0, 0], strides = [1, 1]} : (!ng.tensor<1x4x2x2xf32>, !ng.tensor<2x2x1x1xf32>) -> !ng.tensor<1x2x2x2xf32>
%0 = "ng.groupConvTranspose"(%arg0, %arg1) {groups=2 : i64, padAbove=[0,0], padBelow=[0,0], outputPad=[1,1], outputShape=[], strides=[1, 1]}
: (!ng.tensor<1x4x2x2xf32>, !ng.tensor<2x2x1x1xf32>) -> !ng.tensor<1x2x2x2xf32>
"ng.return"(%0) : (!ng.tensor<1x2x2x2xf32>) -> ()
}
// -----
// CHECK-LABEL: func @grn
func @grn(%arg0: !ng.tensor<1x2x3x4xf32>) -> !ng.tensor<1x2x3x4xf32>
{
//CHECK: %{{[0-9]+}} = "ng.grn"(%{{.*}}) {bias = {{.*}} : f32} : (!ng.tensor<1x2x3x4xf32>) -> !ng.tensor<1x2x3x4xf32>
%0 = "ng.grn"(%arg0) {bias = 0.1 : f32 } : (!ng.tensor<1x2x3x4xf32>) -> !ng.tensor<1x2x3x4xf32>
"ng.return"(%0) : (!ng.tensor<1x2x3x4xf32>) -> ()
}
// -----
//CHECK-LABEL: func @clamp
func @clamp(%arg0: !ng.tensor<4x4xf32>) -> !ng.tensor<4x4xf32>
{
//CHECK: %{{[0-9]+}} = "ng.clamp"(%{{.*}}) {max = {{.*}} : f64, min = {{.*}} : f64} : (!ng.tensor<4x4xf32>) -> !ng.tensor<4x4xf32>
%0 = "ng.clamp"(%arg0) {max = 20.0 : f64, min = 10.0 : f64} : (!ng.tensor<4x4xf32>) -> !ng.tensor<4x4xf32>
"ng.return"(%0) : (!ng.tensor<4x4xf32>) -> ()
}
// -----
//CHECK-LABEL: func @gelu
func @gelu(%arg0: !ng.tensor<4x4xf32>) -> !ng.tensor<4x4xf32>
{
//CHECK: %{{[0-9]+}} = "ng.gelu"({{.*}}) : (!ng.tensor<4x4xf32>) -> !ng.tensor<4x4xf32>
%0 = "ng.gelu"(%arg0) : (!ng.tensor<4x4xf32>) -> !ng.tensor<4x4xf32>
"ng.return"(%0) : (!ng.tensor<4x4xf32>) -> ()
}
// -----
//CHECK-LABEL: func @geluBackpropFactor
func @geluBackpropFactor(%arg0: !ng.tensor<4x4xf32>) -> !ng.tensor<4x4xf32>
{
//CHECK: %{{[0-9]+}} = "ng.geluBackpropFactor"({{.*}}) : (!ng.tensor<4x4xf32>) -> !ng.tensor<4x4xf32>
%0 = "ng.geluBackpropFactor"(%arg0) : (!ng.tensor<4x4xf32>) -> !ng.tensor<4x4xf32>
"ng.return"(%0) : (!ng.tensor<4x4xf32>) -> ()
}
// -----
//CHECK-LABEL: func @elu
func @elu(%arg0: !ng.tensor<4x4xf32>) -> !ng.tensor<4x4xf32>
{
//CHECK: %{{[0-9]+}} = "ng.elu"({{.*}}) {alpha = {{.*}} : f64} : (!ng.tensor<4x4xf32>) -> !ng.tensor<4x4xf32>
%0 = "ng.elu"(%arg0) {alpha = 0.001 : f64}: (!ng.tensor<4x4xf32>) -> !ng.tensor<4x4xf32>
"ng.return"(%0) : (!ng.tensor<4x4xf32>) -> ()
}
// -----
//CHECK-LABEL: func @fakeQuant
func @fakeQuant(%arg0: !ng.tensor<1x2x3x4xf32>, %arg1: !ng.tensor<1xf32>,
%arg2: !ng.tensor<1xf32>, %arg3: !ng.tensor<1xf32>, %arg4: !ng.tensor<1xf32>) -> !ng.tensor<1x2x3x4xf32>
{
//CHECK: %{{[0-9]+}} = "ng.fakeQuant"(%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {levels = 4 : i64} : (!ng.tensor<1x2x3x4xf32>, !ng.tensor<1xf32>, !ng.tensor<1xf32>, !ng.tensor<1xf32>, !ng.tensor<1xf32>) -> !ng.tensor<1x2x3x4xf32>
%0 = "ng.fakeQuant"(%arg0, %arg1, %arg2, %arg3, %arg4) {levels = 4 : i64}
: (!ng.tensor<1x2x3x4xf32>, !ng.tensor<1xf32>, !ng.tensor<1xf32>, !ng.tensor<1xf32>, !ng.tensor<1xf32>) -> !ng.tensor<1x2x3x4xf32>
"ng.return"(%0) : (!ng.tensor<1x2x3x4xf32>) -> ()
}
// -----
//CHECK-LABEL: func @depthToSpace
func @depthToSpace(%arg0: !ng.tensor<1x8x2x2xf32>) -> !ng.tensor<1x2x4x4xf32>
{
//CHECK: %{{[0-9]+}} = "ng.depthToSpace"(%{{.*}}) {blockSize = 2 : i64, mode = 0 : i32} : (!ng.tensor<1x8x2x2xf32>) -> !ng.tensor<1x2x4x4xf32>
%0 = "ng.depthToSpace"(%arg0) {blockSize = 2 : i64, mode = 0 : i32} : (!ng.tensor<1x8x2x2xf32>) -> !ng.tensor<1x2x4x4xf32>
"ng.return"(%0) : (!ng.tensor<1x2x4x4xf32>) -> ()
}
// -----
//CHECK-LABEL: func @convBias
func @convBias(%arg0: !ng.tensor<1x3x2xf32>, %arg1: !ng.tensor<2x3x1xf32>, %arg2: !ng.tensor<2xf32>) -> (!ng.tensor<1x2x2xf32>)
{
//CHECK: %{{[0-9]+}} = "ng.convBias"(%{{.*}}, %{{.*}}, %{{.*}}) {padAbove = [0], padBelow = [0], strides = [1]} : (!ng.tensor<1x3x2xf32>, !ng.tensor<2x3x1xf32>, !ng.tensor<2xf32>) -> !ng.tensor<1x2x2xf32>
%0 = "ng.convBias"(%arg0, %arg1, %arg2) {padAbove=[0], padBelow=[0], strides=[1]}
: (!ng.tensor<1x3x2xf32>, !ng.tensor<2x3x1xf32>, !ng.tensor<2xf32>) -> !ng.tensor<1x2x2xf32>
"ng.return"(%0) : (!ng.tensor<1x2x2xf32>) -> ()
}
// -----
//CHECK-LABEL: func @convBiasBackprop
func @convBiasBackprop(%arg0: !ng.tensor<1x3x2x2xf32>, %arg1: !ng.tensor<1x2x2x2xf32>) -> (!ng.tensor<12x3x1x1xf32>)
{
//CHECK: %{{[0-9]+}}:2 = "ng.convBiasBackpropFiltersBias"(%{{.*}}, %{{.*}}) {biasShape = [2], filtersShape = [2, 3, 1, 1], padAbove = [0], padBelow = [0], strides = [1]} : (!ng.tensor<1x3x2x2xf32>, !ng.tensor<1x2x2x2xf32>) -> (!ng.tensor<2x3x1x1xf32>, !ng.tensor<2xf32>)
%0:2 = "ng.convBiasBackpropFiltersBias"(%arg0, %arg1) {biasShape=[2], filtersShape=[2, 3, 1, 1], padAbove=[0], padBelow=[0], strides=[1]}
: (!ng.tensor<1x3x2x2xf32>, !ng.tensor<1x2x2x2xf32>) -> (!ng.tensor<2x3x1x1xf32>, !ng.tensor<2xf32>)
"ng.return"(%0#0) : (!ng.tensor<2x3x1x1xf32>) -> ()
}
// -----
//CHECK-LABEL: func @convBiasAdd
func @convBiasAdd(%arg0: !ng.tensor<1x3x2x2xf32>, %arg1: !ng.tensor<2x3x1x1xf32>, %arg2: !ng.tensor<2xf32>, %arg3: !ng.tensor<1x2x2x2xf32>) -> !ng.tensor<1x2x2x2xf32>
{
// CHECK: %{{[0-9]+}} = "ng.convBiasAdd"(%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {padAbove = [0, 0], padBelow = [0, 0], strides = [1, 1]} : (!ng.tensor<1x3x2x2xf32>, !ng.tensor<2x3x1x1xf32>, !ng.tensor<2xf32>, !ng.tensor<1x2x2x2xf32>) -> !ng.tensor<1x2x2x2xf32>
%0 = "ng.convBiasAdd" (%arg0, %arg1, %arg2, %arg3) {padAbove=[0, 0], padBelow=[0, 0], strides=[1, 1]}
: (!ng.tensor<1x3x2x2xf32>, !ng.tensor<2x3x1x1xf32>, !ng.tensor<2xf32>, !ng.tensor<1x2x2x2xf32>) -> !ng.tensor<1x2x2x2xf32>
"ng.return"(%0) : (!ng.tensor<1x2x2x2xf32>) -> ()
}
// -----
//CHECK-LABEL: func @rnnCell
func @rnnCell(%arg0: !ng.tensor<2x3xf32>, %arg1: !ng.tensor<2x3xf32>, %arg2: !ng.tensor<2x3xf32>, %arg3: !ng.tensor<3x3xf32>, %arg4: !ng.tensor<3xf32>) -> !ng.tensor<2x3xf32>
{
// CHECK: %{{[0-9]+}} = "ng.rnnCell"(%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {hiddenSize = 3 : i64} : (!ng.tensor<2x3xf32>, !ng.tensor<2x3xf32>, !ng.tensor<2x3xf32>, !ng.tensor<3x3xf32>) -> !ng.tensor<2x3xf32>
%0 = "ng.rnnCell" (%arg0, %arg1, %arg2, %arg3) {hiddenSize = 3 : i64}
: (!ng.tensor<2x3xf32>, !ng.tensor<2x3xf32>, !ng.tensor<2x3xf32>, !ng.tensor<3x3xf32>) -> !ng.tensor<2x3xf32>
// CHECK: %{{[0-9]+}} = "ng.rnnCell"(%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {hiddenSize = 3 : i64} : (!ng.tensor<2x3xf32>, !ng.tensor<2x3xf32>, !ng.tensor<2x3xf32>, !ng.tensor<3x3xf32>, !ng.tensor<3xf32>) -> !ng.tensor<2x3xf32>
%1 = "ng.rnnCell" (%arg0, %arg1, %arg2, %arg3, %arg4) {hiddenSize = 3 : i64}
: (!ng.tensor<2x3xf32>, !ng.tensor<2x3xf32>, !ng.tensor<2x3xf32>, !ng.tensor<3x3xf32>, !ng.tensor<3xf32>) -> !ng.tensor<2x3xf32>
"ng.return" (%0) : (!ng.tensor<2x3xf32>)->()
}
// -----
//CHECK-LABEL: func @lstmCell
func @lstmCell(%arg0: !ng.tensor<2x3xf32>, %arg1: !ng.tensor<2x3xf32>, %arg2: !ng.tensor<2x3xf32>, %arg3: !ng.tensor<12x3xf32>, %arg4: !ng.tensor<12x3xf32>) -> !ng.tensor<2x3xf32>
{
// CHECK: %{{[0-9]+}} = "ng.lstmCell"(%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {hiddenSize = 3 : i64} : (!ng.tensor<2x3xf32>, !ng.tensor<2x3xf32>, !ng.tensor<2x3xf32>, !ng.tensor<12x3xf32>, !ng.tensor<12x3xf32>) -> !ng.tensor<2x3xf32>
%0 = "ng.lstmCell" (%arg0, %arg1, %arg2, %arg3, %arg4) {hiddenSize = 3 : i64}
: (!ng.tensor<2x3xf32>, !ng.tensor<2x3xf32>, !ng.tensor<2x3xf32>, !ng.tensor<12x3xf32>, !ng.tensor<12x3xf32>) -> !ng.tensor<2x3xf32>
"ng.return" (%0) : (!ng.tensor<2x3xf32>)->()
}
// -----
//CHECK-LABEL: func @gruCell
func @gruCell(%arg0: !ng.tensor<2x3xf32>, %arg1: !ng.tensor<9x3xf32>, %arg2: !ng.tensor<9x3xf32>, %arg3: !ng.tensor<2x3xf32>, %arg4: !ng.tensor<18xf32>) -> !ng.tensor<2x3xf32>
{
// CHECK: %{{[0-9]+}} = "ng.gruCell"(%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {activations = ["sigmoid", "tanh"], activationsAlpha = [], clip = {{.*}} : f32, hiddenSize = 3 : i64} : (!ng.tensor<2x3xf32>, !ng.tensor<9x3xf32>, !ng.tensor<9x3xf32>, !ng.tensor<2x3xf32>, !ng.tensor<18xf32>) -> !ng.tensor<2x3xf32>
%0 = "ng.gruCell" (%arg0, %arg1, %arg2, %arg3, %arg4) {activations=["sigmoid", "tanh"], activationsAlpha = [], clip = 2.88 : f32, hiddenSize = 3 : i64}
: (!ng.tensor<2x3xf32>, !ng.tensor<9x3xf32>, !ng.tensor<9x3xf32>, !ng.tensor<2x3xf32>, !ng.tensor<18xf32>) -> !ng.tensor<2x3xf32>
// CHECK: %{{[0-9]+}} = "ng.gruCell"(%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {activations = ["sigmoid", "tanh"], activationsAlpha = [], clip = {{.*}} : f32, hiddenSize = 3 : i64} : (!ng.tensor<2x3xf32>, !ng.tensor<9x3xf32>, !ng.tensor<9x3xf32>, !ng.tensor<2x3xf32>) -> !ng.tensor<2x3xf32>
%1 = "ng.gruCell" (%arg0, %arg1, %arg2, %arg3) {activations=["sigmoid", "tanh"], activationsAlpha = [], clip = 2.88 : f32, hiddenSize = 3 : i64}
: (!ng.tensor<2x3xf32>, !ng.tensor<9x3xf32>, !ng.tensor<9x3xf32>, !ng.tensor<2x3xf32>) -> !ng.tensor<2x3xf32>
"ng.return" (%0) : (!ng.tensor<2x3xf32>) -> ()
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment