Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in / Register
Toggle navigation
N
ngraph
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
submodule
ngraph
Commits
4ef010fc
Commit
4ef010fc
authored
Jun 13, 2019
by
nmostafa
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Add support for ArgMax.
parent
6e672209
Hide whitespace changes
Inline
Side-by-side
Showing
9 changed files
with
351 additions
and
147 deletions
+351
-147
compiler.cpp
src/contrib/mlir/compiler.cpp
+32
-14
compiler.hpp
src/contrib/mlir/compiler.hpp
+3
-0
type.hpp
src/contrib/mlir/dialect/type.hpp
+0
-3
helpers.cpp
src/contrib/mlir/helpers.cpp
+1
-1
lowerer.cpp
src/contrib/mlir/lowerer.cpp
+114
-128
op_lowerers.inc
src/contrib/mlir/op_lowerers.inc
+1
-0
ops_supported.inc
src/contrib/mlir/ops_supported.inc
+1
-0
mlir_subgraph_extraction.cpp
src/contrib/mlir/pass/mlir_subgraph_extraction.cpp
+8
-0
backend_arg_reduce.in.cpp
test/backend_arg_reduce.in.cpp
+191
-1
No files found.
src/contrib/mlir/compiler.cpp
View file @
4ef010fc
//*****************************************************************************
//*****************************************************************************
// Copyright 201
7-201
9 Intel Corporation
// Copyright 2019 Intel Corporation
//
//
// Licensed under the Apache License, Version 2.0 (the "License");
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// you may not use this file except in compliance with the License.
...
@@ -25,6 +25,7 @@
...
@@ -25,6 +25,7 @@
#include "ngraph/node.hpp"
#include "ngraph/node.hpp"
#include "ngraph/op/add.hpp"
#include "ngraph/op/add.hpp"
#include "ngraph/op/argmin.hpp"
#include "ngraph/op/argmin.hpp"
#include "ngraph/op/argmax.hpp"
#include "ngraph/op/dot.hpp"
#include "ngraph/op/dot.hpp"
#include "ngraph/op/experimental/compiled_kernel.hpp"
#include "ngraph/op/experimental/compiled_kernel.hpp"
#include "ngraph/op/util/index_reduction.hpp"
#include "ngraph/op/util/index_reduction.hpp"
...
@@ -289,20 +290,13 @@ mlir::Value* MLIRCompiler::COMPILE_OP_DECL(ngraph::op::Add)
...
@@ -289,20 +290,13 @@ mlir::Value* MLIRCompiler::COMPILE_OP_DECL(ngraph::op::Add)
template
<>
template
<>
mlir
::
Value
*
MLIRCompiler
::
COMPILE_OP_DECL
(
ngraph
::
op
::
ArgMin
)
mlir
::
Value
*
MLIRCompiler
::
COMPILE_OP_DECL
(
ngraph
::
op
::
ArgMin
)
{
{
auto
*
idx_red
=
static_cast
<
const
ngraph
::
op
::
util
::
IndexReduction
*>
(
ng_node
);
return
compiler
.
create_index_reduction
<
mlir
::
NGArgMinRedOp
>
(
ng_node
);
}
auto
arg
=
idx_red
->
get_argument
(
0
);
size_t
red_axis
=
idx_red
->
get_reduction_axis
();
mlir
::
Value
*
arg_val
=
compiler
.
get_tensor_value
(
arg
->
get_output_tensor_ptr
().
get
()).
m_value
;
mlir
::
ArrayAttr
red_axes_attr
=
compiler
.
m_builder
->
getI64ArrayAttr
({(
int64_t
)
red_axis
});
return
compiler
.
m_builder
template
<>
->
create
<
mlir
::
NGArgMinRedOp
>
(
mlir
::
UnknownLoc
::
get
(
&
compiler
.
m_context
),
mlir
::
Value
*
MLIRCompiler
::
COMPILE_OP_DECL
(
ngraph
::
op
::
ArgMax
)
compiler
.
get_mlir_type
(
ng_node
),
{
arg_val
,
return
compiler
.
create_index_reduction
<
mlir
::
NGArgMaxRedOp
>
(
ng_node
);
red_axes_attr
)
.
getResult
();
}
}
template
<>
template
<>
...
@@ -338,6 +332,24 @@ void MLIRCompiler::create_return()
...
@@ -338,6 +332,24 @@ void MLIRCompiler::create_return()
m_builder
->
create
<
mlir
::
NGReturnOp
>
(
mlir
::
UnknownLoc
::
get
(
&
m_context
),
value_list
);
m_builder
->
create
<
mlir
::
NGReturnOp
>
(
mlir
::
UnknownLoc
::
get
(
&
m_context
),
value_list
);
}
}
template
<
typename
RedOp
>
mlir
::
Value
*
MLIRCompiler
::
create_index_reduction
(
const
ngraph
::
Node
*
ng_node
)
{
auto
*
idx_red
=
static_cast
<
const
ngraph
::
op
::
util
::
IndexReduction
*>
(
ng_node
);
auto
arg
=
idx_red
->
get_argument
(
0
);
size_t
red_axis
=
idx_red
->
get_reduction_axis
();
mlir
::
Value
*
arg_val
=
get_tensor_value
(
arg
->
get_output_tensor_ptr
().
get
()).
m_value
;
mlir
::
ArrayAttr
red_axes_attr
=
m_builder
->
getI64ArrayAttr
({(
int64_t
)
red_axis
});
return
m_builder
->
create
<
RedOp
>
(
mlir
::
UnknownLoc
::
get
(
&
m_context
),
get_mlir_type
(
ng_node
),
arg_val
,
red_axes_attr
)
.
getResult
();
}
// Binds MLIR function arguments to the proper values. This includes externally allocated tensors
// Binds MLIR function arguments to the proper values. This includes externally allocated tensors
// helpers to be used inside the function.
// helpers to be used inside the function.
void
MLIRCompiler
::
bind_arguments
()
void
MLIRCompiler
::
bind_arguments
()
...
@@ -393,6 +405,12 @@ void MLIRCompiler::execute()
...
@@ -393,6 +405,12 @@ void MLIRCompiler::execute()
llvm
::
InitializeNativeTarget
();
llvm
::
InitializeNativeTarget
();
llvm
::
InitializeNativeTargetAsmPrinter
();
llvm
::
InitializeNativeTargetAsmPrinter
();
unsigned
opt_level
=
3
;
if
(
char
*
opt_level_str
=
std
::
getenv
(
"NGRAPH_MLIR_OPT_LEVEL"
))
{
opt_level
=
std
::
stoi
(
opt_level_str
);
NGRAPH_CHECK
(
opt_level
>=
0
&&
opt_level
<=
3
,
"Invalid optimization level"
);
}
// Create an MLIR execution engine. We use a null MLIR pass manager for now to make sure we
// Create an MLIR execution engine. We use a null MLIR pass manager for now to make sure we
// don't run MLIR passes that were already run. We also pass a default transformer to run
// don't run MLIR passes that were already run. We also pass a default transformer to run
// LLVM optimizations at level 3.
// LLVM optimizations at level 3.
...
...
src/contrib/mlir/compiler.hpp
View file @
4ef010fc
...
@@ -108,6 +108,9 @@ namespace ngraph
...
@@ -108,6 +108,9 @@ namespace ngraph
template
<
typename
BinOp
>
template
<
typename
BinOp
>
mlir
::
Value
*
create_binary_op
(
const
ngraph
::
Node
*
ng_node
);
mlir
::
Value
*
create_binary_op
(
const
ngraph
::
Node
*
ng_node
);
template
<
typename
RedOp
>
mlir
::
Value
*
create_index_reduction
(
const
ngraph
::
Node
*
ng_node
);
void
create_return
();
void
create_return
();
/// Helper to create memref arguments for MLIR function signature
/// Helper to create memref arguments for MLIR function signature
...
...
src/contrib/mlir/dialect/type.hpp
View file @
4ef010fc
...
@@ -51,7 +51,6 @@ namespace mlir
...
@@ -51,7 +51,6 @@ namespace mlir
// reuse std float types as-is
// reuse std float types as-is
using
NGFloatType
=
mlir
::
FloatType
;
using
NGFloatType
=
mlir
::
FloatType
;
using
NGIndexType
=
mlir
::
IndexType
;
/// Integer type. It represents an integer of width 8,16,32,64. Signed or not.
/// Integer type. It represents an integer of width 8,16,32,64. Signed or not.
class
NGIntegerType
:
public
mlir
::
Type
::
TypeBase
<
NGIntegerType
,
mlir
::
Type
>
class
NGIntegerType
:
public
mlir
::
Type
::
TypeBase
<
NGIntegerType
,
mlir
::
Type
>
...
@@ -234,8 +233,6 @@ namespace mlir
...
@@ -234,8 +233,6 @@ namespace mlir
return
intType
.
getWidth
();
return
intType
.
getWidth
();
if
(
NGFloatType
floatType
=
type
.
dyn_cast
<
NGFloatType
>
())
if
(
NGFloatType
floatType
=
type
.
dyn_cast
<
NGFloatType
>
())
return
floatType
.
getIntOrFloatBitWidth
();
return
floatType
.
getIntOrFloatBitWidth
();
if
(
NGIndexType
indexType
=
type
.
dyn_cast
<
NGIndexType
>
())
return
sizeof
(
intptr_t
);
if
(
NGBoolType
boolType
=
type
.
dyn_cast
<
NGBoolType
>
())
if
(
NGBoolType
boolType
=
type
.
dyn_cast
<
NGBoolType
>
())
return
boolType
.
getWidth
();
return
boolType
.
getWidth
();
NGRAPH_FAIL
()
<<
"Unknown type"
;
NGRAPH_FAIL
()
<<
"Unknown type"
;
...
...
src/contrib/mlir/helpers.cpp
View file @
4ef010fc
...
@@ -21,7 +21,7 @@
...
@@ -21,7 +21,7 @@
/// Call back to copy Index tensor to Int tensor
/// Call back to copy Index tensor to Int tensor
/// Can handle int tensors of bitwidth 8, 16, 32 and 64
/// Can handle int tensors of bitwidth 8, 16, 32 and 64
/// Index width is always intptr_t
/// Index width is always intptr_t
extern
"C"
NGRAPH_API
void
*
__mlir_convert_index_to_int
(
mlir
::
StaticFloatMemRef
dst
,
mlir
::
StaticFloatMemRef
src
,
size_t
numElements
,
size_t
intWidth
)
extern
"C"
NGRAPH_API
void
__mlir_convert_index_to_int
(
mlir
::
StaticFloatMemRef
dst
,
mlir
::
StaticFloatMemRef
src
,
size_t
numElements
,
size_t
intWidth
)
{
{
size_t
indexSize
=
sizeof
(
intptr_t
);
size_t
indexSize
=
sizeof
(
intptr_t
);
auto
pSrc
=
reinterpret_cast
<
intptr_t
*>
(
src
.
data
);
auto
pSrc
=
reinterpret_cast
<
intptr_t
*>
(
src
.
data
);
...
...
src/contrib/mlir/lowerer.cpp
View file @
4ef010fc
...
@@ -45,6 +45,10 @@ namespace
...
@@ -45,6 +45,10 @@ namespace
#include "op_lowerers.inc"
#include "op_lowerers.inc"
// Helpers
template
<
typename
RedOp
>
void
lowerIndexReduction
(
Operation
*
op
,
ArrayRef
<
Value
*>
operands
,
PatternRewriter
&
rewriter
,
DialectLoweringPass
&
m_pass
,
bool
isMin
);
/// Use Dialect Converson Framework
/// Use Dialect Converson Framework
class
DialectLowerer
:
public
DialectConversion
class
DialectLowerer
:
public
DialectConversion
{
{
...
@@ -63,6 +67,7 @@ namespace
...
@@ -63,6 +67,7 @@ namespace
{
{
RewriteListBuilder
<
NGAddOpConversion
,
RewriteListBuilder
<
NGAddOpConversion
,
NGArgMinRedOpConversion
,
NGArgMinRedOpConversion
,
NGArgMaxRedOpConversion
,
NGDotOpConversion
,
NGDotOpConversion
,
NGReturnOpConversion
>::
build
(
patterns
,
mlirContext
,
m_pass
);
NGReturnOpConversion
>::
build
(
patterns
,
mlirContext
,
m_pass
);
}
}
...
@@ -308,7 +313,6 @@ namespace
...
@@ -308,7 +313,6 @@ namespace
// ADD
// ADD
REWRITER
(
NGAddOp
)
REWRITER
(
NGAddOp
)
{
{
auto
add
=
cast
<
NGAddOp
>
(
op
);
auto
add
=
cast
<
NGAddOp
>
(
op
);
auto
loc
=
add
.
getLoc
();
auto
loc
=
add
.
getLoc
();
...
@@ -409,142 +413,124 @@ namespace
...
@@ -409,142 +413,124 @@ namespace
REWRITER
(
NGArgMinRedOp
)
REWRITER
(
NGArgMinRedOp
)
{
{
auto
argmin
=
cast
<
NGArgMinRedOp
>
(
op
);
lowerIndexReduction
<
mlir
::
NGArgMinRedOp
>
(
op
,
operands
,
rewriter
,
m_pass
,
true
);
auto
loc
=
argmin
.
getLoc
();
}
auto
axesAttr
=
argmin
.
axes
();
NGRAPH_ASSERT
(
axesAttr
.
size
()
==
1
)
<<
"ArgMin should have one reduction axis"
;
unsigned
axis
=
axesAttr
.
begin
()
->
dyn_cast
<
IntegerAttr
>
().
getInt
();
NGRAPH_ASSERT
(
operands
.
size
()
==
1
&&
operands
[
0
]
!=
nullptr
)
<<
"Expected one non-null operand in ArgMin op"
;
// Retrieve/generate Values for operands and result.
REWRITER
(
NGArgMaxRedOp
)
ScopedContext
scope
(
rewriter
,
loc
);
{
Value
*
arg
=
operands
[
0
];
lowerIndexReduction
<
mlir
::
NGArgMaxRedOp
>
(
op
,
operands
,
rewriter
,
m_pass
,
false
);
auto
arg_type
=
arg
->
getType
().
cast
<
MemRefType
>
();
}
Value
*
finalResult
=
m_pass
.
buildOutputDefs
(
op
,
rewriter
)[
0
];
auto
resultTy
=
argmin
.
getResult
()
->
getType
().
cast
<
NGTensorType
>
();
// MLIR doesn't support Index to/from Integer type-conversion
// We have to store our result in an IndexType tensor and call-back to a type-conversion routine in nGraph
// TODO: Fix this once MLIR provides explicit cast operations.
Value
*
result
=
m_pass
.
createTempTensor
(
rewriter
.
getMemRefType
(
resultTy
.
getShape
(),
rewriter
.
getIndexType
()),
resultTy
.
getSizeInBytes
(),
rewriter
);
// Views
REWRITER
(
NGReturnOp
)
{
rewriter
.
replaceOpWithNewOp
<
ReturnOp
>
(
op
);
}
MemRefView
vRes
(
result
),
vArg
(
arg
);
#undef REWRITER
// Index Values
IndexedValue
iRes
(
result
),
iArg
(
arg
);
// Bounds Index Handles
auto
resLbs
=
vRes
.
getLbs
();
auto
resUbs
=
vRes
.
getUbs
();
auto
argLbs
=
vArg
.
getLbs
();
auto
argUbs
=
vArg
.
getUbs
();
{
// Loop induction vars
auto
ivs
=
IndexHandle
::
makeIndexHandles
(
vRes
.
rank
());
auto
pivs
=
IndexHandle
::
makeIndexHandlePointers
(
ivs
);
// Steps
auto
steps
=
vRes
.
getSteps
();
auto
initVal
=
vArg
.
lb
(
axis
);
// clang-format off
LoopNestBuilder
(
pivs
,
resLbs
,
resUbs
,
steps
)(
// single stmt body
[
&
]
{
iRes
(
ivs
)
=
initVal
;
}
);
}
// reduction loops
template
<
typename
T
>
{
void
lowerIndexReduction
(
Operation
*
op
,
ArrayRef
<
Value
*>
operands
,
PatternRewriter
&
rewriter
,
DialectLoweringPass
&
m_pass
,
bool
isMin
)
auto
allIVs
=
IndexHandle
::
makeIndexHandles
(
vArg
.
rank
());
{
auto
pAllIVs
=
IndexHandle
::
makeIndexHandlePointers
(
allIVs
);
T
argmin
=
cast
<
T
>
(
op
);
SmallVector
<
IndexHandle
,
8
>
nonRedIVs
;
auto
loc
=
argmin
.
getLoc
();
auto
axesAttr
=
argmin
.
axes
();
auto
steps
=
vArg
.
getSteps
();
NGRAPH_CHECK
(
axesAttr
.
size
()
==
1
,
"Index Reduction op should have one reduction axis"
);
Attribute
axisAttr
=
*
axesAttr
.
begin
();
// iterate over all argument dimensions
unsigned
axis
=
axisAttr
.
dyn_cast
<
IntegerAttr
>
().
getInt
();
LoopNestBuilder
(
pAllIVs
,
argLbs
,
argUbs
,
steps
)(
[
&
]
{
NGRAPH_CHECK
(
operands
.
size
()
==
1
&&
operands
[
0
]
!=
nullptr
,
// build a list of non-reduction IVs
"Expected one non-null operand in Index Reduction op"
);
for
(
auto
i
=
0
;
i
<
vArg
.
rank
();
i
++
)
{
// Retrieve/generate Values for operands and result.
if
(
i
!=
axis
)
ScopedContext
scope
(
rewriter
,
loc
);
nonRedIVs
.
push_back
(
allIVs
[
i
]);
Value
*
arg
=
operands
[
0
];
}
auto
arg_type
=
arg
->
getType
().
cast
<
MemRefType
>
();
// load current min index
ValueHandle
currMinIndx
=
iRes
(
nonRedIVs
);
Value
*
finalResult
=
m_pass
.
buildOutputDefs
(
op
,
rewriter
)[
0
];
auto
tempIVs
=
allIVs
;
Type
type
=
argmin
.
getResult
()
->
getType
();
// build list of IVs including current min index
NGTensorType
resultTy
=
type
.
cast
<
NGTensorType
>
();
tempIVs
[
axis
]
=
currMinIndx
;
// MLIR doesn't support Index to/from Integer type-conversion
iRes
(
nonRedIVs
)
=
edsc
::
intrinsics
::
select
(
iArg
(
allIVs
)
<
iArg
(
tempIVs
),
allIVs
[
axis
],
currMinIndx
);
// We have to store our result in an IndexType tensor and call-back to a type-conversion routine in nGraph
// TODO: Fix this once MLIR provides explicit cast operations.
Value
*
result
=
m_pass
.
createTempTensor
(
rewriter
.
getMemRefType
(
resultTy
.
getShape
(),
rewriter
.
getIndexType
()),
resultTy
.
getNumElements
()
*
sizeof
(
intptr_t
),
/* hacky way to get target-dependent size of IndexType */
rewriter
);
// Views
MemRefView
vRes
(
result
),
vArg
(
arg
);
// Index Values
IndexedValue
iRes
(
result
),
iArg
(
arg
);
// Bounds Index Handles
auto
resLbs
=
vRes
.
getLbs
();
auto
resUbs
=
vRes
.
getUbs
();
auto
argLbs
=
vArg
.
getLbs
();
auto
argUbs
=
vArg
.
getUbs
();
{
// Loop induction vars
auto
ivs
=
IndexHandle
::
makeIndexHandles
(
vRes
.
rank
());
auto
pivs
=
IndexHandle
::
makeIndexHandlePointers
(
ivs
);
// Steps
auto
steps
=
vRes
.
getSteps
();
auto
initVal
=
vArg
.
lb
(
axis
);
// clang-format off
LoopNestBuilder
(
pivs
,
resLbs
,
resUbs
,
steps
)(
// single stmt body
[
&
]
{
iRes
(
ivs
)
=
initVal
;
}
}
);
);
}
}
// Call-back to convert Index tensor to Integer tensor
// reduction loops
auto
callBackFunc
=
m_pass
.
getCallDecl
(
"__mlir_convert_index_to_int"
,
{
{
finalResult
->
getType
(),
result
->
getType
(),
rewriter
.
getIndexType
(),
rewriter
.
getIndexType
()},
auto
allIVs
=
IndexHandle
::
makeIndexHandles
(
vArg
.
rank
());
{},
auto
pAllIVs
=
IndexHandle
::
makeIndexHandlePointers
(
allIVs
);
rewriter
);
SmallVector
<
IndexHandle
,
8
>
nonRedIVs
;
SmallVector
<
mlir
::
Value
*
,
4
>
args
=
{
finalResult
,
/* dst tensor */
result
,
/* src tensor */
/* Num of Elements */
rewriter
.
create
<
mlir
::
ConstantIndexOp
>
(
rewriter
.
getUnknownLoc
(),
resultTy
.
getNumElements
()
),
/* Integer size used */
rewriter
.
create
<
mlir
::
ConstantIndexOp
>
(
rewriter
.
getUnknownLoc
(),
resultTy
.
getElementType
().
cast
<
NGIntegerType
>
().
getWidth
()
)
};
rewriter
.
create
<
mlir
::
CallOp
>
(
rewriter
.
getUnknownLoc
(),
callBackFunc
,
args
);
rewriter
.
replaceOp
(
op
,
{
finalResult
});
#if 0
MemRefView v_res(result), v_arg(arg);
unsigned n_dim = v_arg.fastestVarying() - 1;
unsigned m_dim = v_arg.fastestVarying();
// Constants, indexed values and other vars to be used inside the loop nest.
IndexedValue i_res(result), i_arg(arg);
// Initialize result to zero.
IndexHandle m_init;
IndexHandle m_lb_init(v_arg.lb(m_dim));
IndexHandle m_ub_init(v_arg.ub(m_dim));
int64_t m_step = v_arg.step(m_dim);
LoopBuilder(&m_init, m_lb_init, m_ub_init, m_step)([&] { i_res(m_init) = m_lb_init; });
// Main loop nest for argmin
IndexHandle n, m;
IndexHandle n_lb(v_arg.lb(n_dim)), m_lb(v_arg.lb(m_dim));
IndexHandle n_ub(v_arg.ub(n_dim)), m_ub(v_arg.ub(m_dim));
ValueHandle curr_res(res_elem_ty);
int64_t n_step = v_arg.step(n_dim);
LoopBuilder(&n, n_lb, n_ub, n_step)([&] {
auto
steps
=
vArg
.
getSteps
();
LoopBuilder(&m, m_lb, m_ub, m_step)([&] {
curr_res = i_res(m);
i_res(m) = edsc::intrinsics::select(i_arg(n, m) < i_arg(curr_res, m), n, curr_res);
});
});
#endif
// iterate over all argument dimensions
LoopNestBuilder
(
pAllIVs
,
argLbs
,
argUbs
,
steps
)(
[
&
]
{
// build a list of non-reduction IVs
for
(
auto
i
=
0
;
i
<
vArg
.
rank
();
i
++
)
{
if
(
i
!=
axis
)
nonRedIVs
.
push_back
(
allIVs
[
i
]);
}
// load current min index
ValueHandle
currMinIndx
=
iRes
(
nonRedIVs
);
auto
tempIVs
=
allIVs
;
// build list of IVs including current min index
tempIVs
[
axis
]
=
currMinIndx
;
iRes
(
nonRedIVs
)
=
isMin
?
edsc
::
intrinsics
::
select
(
iArg
(
allIVs
)
<
iArg
(
tempIVs
),
allIVs
[
axis
],
currMinIndx
)
:
edsc
::
intrinsics
::
select
(
iArg
(
tempIVs
)
<
iArg
(
allIVs
),
allIVs
[
axis
],
currMinIndx
);
}
);
}
}
REWRITER
(
NGReturnOp
)
{
rewriter
.
replaceOpWithNewOp
<
ReturnOp
>
(
op
);
}
// Call-back to convert Index tensor to Integer tensor
#undef REWRITER
auto
callBackFunc
=
m_pass
.
getCallDecl
(
"__mlir_convert_index_to_int"
,
{
finalResult
->
getType
(),
result
->
getType
(),
rewriter
.
getIndexType
(),
rewriter
.
getIndexType
()},
{},
rewriter
);
SmallVector
<
mlir
::
Value
*
,
4
>
args
=
{
finalResult
,
/* dst tensor */
result
,
/* src tensor */
/* Num of Elements */
rewriter
.
create
<
mlir
::
ConstantIndexOp
>
(
rewriter
.
getUnknownLoc
(),
resultTy
.
getNumElements
()
),
/* Integer size used in final result*/
rewriter
.
create
<
mlir
::
ConstantIndexOp
>
(
rewriter
.
getUnknownLoc
(),
resultTy
.
getElementType
().
cast
<
NGIntegerType
>
().
getWidth
()
)
};
rewriter
.
create
<
mlir
::
CallOp
>
(
rewriter
.
getUnknownLoc
(),
callBackFunc
,
args
);
rewriter
.
replaceOp
(
op
,
{
finalResult
});
}
}
}
namespace
mlir
namespace
mlir
...
...
src/contrib/mlir/op_lowerers.inc
View file @
4ef010fc
...
@@ -31,6 +31,7 @@ public:\
...
@@ -31,6 +31,7 @@ public:\
DECL_OP_CONV
(
NGAddOp
)
DECL_OP_CONV
(
NGAddOp
)
DECL_OP_CONV
(
NGArgMinRedOp
)
DECL_OP_CONV
(
NGArgMinRedOp
)
DECL_OP_CONV
(
NGArgMaxRedOp
)
DECL_OP_CONV
(
NGDotOp
)
DECL_OP_CONV
(
NGDotOp
)
DECL_OP_CONV
(
NGReturnOp
)
DECL_OP_CONV
(
NGReturnOp
)
...
...
src/contrib/mlir/ops_supported.inc
View file @
4ef010fc
...
@@ -5,6 +5,7 @@
...
@@ -5,6 +5,7 @@
MLIR_OP
(
Add
)
MLIR_OP
(
Add
)
MLIR_OP
(
ArgMin
)
MLIR_OP
(
ArgMin
)
MLIR_OP
(
ArgMax
)
MLIR_OP
(
Dot
)
MLIR_OP
(
Dot
)
// Add new supported ops here
// Add new supported ops here
...
...
src/contrib/mlir/pass/mlir_subgraph_extraction.cpp
View file @
4ef010fc
...
@@ -20,6 +20,7 @@
...
@@ -20,6 +20,7 @@
#include "ngraph/graph_util.hpp"
#include "ngraph/graph_util.hpp"
#include "ngraph/op/add.hpp"
#include "ngraph/op/add.hpp"
#include "ngraph/op/argmin.hpp"
#include "ngraph/op/argmin.hpp"
#include "ngraph/op/argmax.hpp"
#include "ngraph/op/dot.hpp"
#include "ngraph/op/dot.hpp"
#include "ngraph/op/experimental/compiled_kernel.hpp"
#include "ngraph/op/experimental/compiled_kernel.hpp"
#include "ngraph/op/get_output_element.hpp"
#include "ngraph/op/get_output_element.hpp"
...
@@ -107,6 +108,13 @@ bool MLIRSubgraphExtractionPass::is_supported_mlir_op(std::shared_ptr<Node> node
...
@@ -107,6 +108,13 @@ bool MLIRSubgraphExtractionPass::is_supported_mlir_op(std::shared_ptr<Node> node
return
false
;
return
false
;
}
}
}
}
if
(
TI
(
ngraph
::
op
::
ArgMin
)
==
TI
(
*
node
)
||
TI
(
ngraph
::
op
::
ArgMax
)
==
TI
(
*
node
))
{
// TODO: Remove this when MLIR has float point cmp support
if
(
!
node
->
input
(
0
).
get_element_type
().
is_integral
())
return
false
;
}
return
true
;
return
true
;
}
}
...
...
test/backend_arg_reduce.in.cpp
View file @
4ef010fc
...
@@ -55,7 +55,7 @@ NGRAPH_TEST(${BACKEND_NAME}, argmin_trivial)
...
@@ -55,7 +55,7 @@ NGRAPH_TEST(${BACKEND_NAME}, argmin_trivial)
EXPECT_EQ
((
vector
<
int
>
{
3
,
2
,
1
}),
read_vector
<
int
>
(
result
));
EXPECT_EQ
((
vector
<
int
>
{
3
,
2
,
1
}),
read_vector
<
int
>
(
result
));
}
}
NGRAPH_TEST
(
$
{
BACKEND_NAME
},
argmin_
trivial
_i32
)
NGRAPH_TEST
(
$
{
BACKEND_NAME
},
argmin_
2D
_i32
)
{
{
Shape
shape
{
4
,
3
};
Shape
shape
{
4
,
3
};
Shape
rshape
{
3
};
Shape
rshape
{
3
};
...
@@ -74,6 +74,91 @@ NGRAPH_TEST(${BACKEND_NAME}, argmin_trivial_i32)
...
@@ -74,6 +74,91 @@ NGRAPH_TEST(${BACKEND_NAME}, argmin_trivial_i32)
EXPECT_EQ
((
vector
<
int
>
{
3
,
2
,
1
}),
read_vector
<
int
>
(
result
));
EXPECT_EQ
((
vector
<
int
>
{
3
,
2
,
1
}),
read_vector
<
int
>
(
result
));
}
}
NGRAPH_TEST
(
$
{
BACKEND_NAME
},
argmin_3D_i32
)
{
Shape
shape
{
3
,
3
,
4
};
Shape
rshape
{
3
,
4
};
auto
A
=
make_shared
<
op
::
Parameter
>
(
element
::
i32
,
shape
);
auto
f
=
make_shared
<
Function
>
(
make_shared
<
op
::
ArgMin
>
(
A
,
1
,
element
::
i32
),
ParameterVector
{
A
});
auto
backend
=
runtime
::
Backend
::
create
(
"${BACKEND_NAME}"
);
// Create some tensors for input/output
auto
a
=
backend
->
create_tensor
(
element
::
i32
,
shape
);
copy_data
(
a
,
test
::
NDArray
<
int
,
3
>
({
{{
12
,
2
,
10
,
9
},{
3
,
5
,
0
,
8
},{
7
,
9
,
1
,
5
}},
{{
7
,
2
,
4
,
10
},{
6
,
10
,
2
,
2
},{
12
,
1
,
1
,
1
}},
{{
10
,
2
,
2
,
4
},{
1
,
5
,
5
,
1
},{
7
,
12
,
2
,
2
}}
}).
get_vector
());
auto
result
=
backend
->
create_tensor
(
element
::
i32
,
rshape
);
auto
handle
=
backend
->
compile
(
f
);
handle
->
call_with_validate
({
result
},
{
a
});
EXPECT_EQ
((
vector
<
int
>
{
1
,
0
,
1
,
2
,
1
,
2
,
2
,
2
,
1
,
0
,
0
,
1
}),
read_vector
<
int
>
(
result
));
}
NGRAPH_TEST
(
$
{
BACKEND_NAME
},
argmin_3D_i64
)
{
Shape
shape
{
3
,
3
,
4
};
Shape
rshape
{
3
,
4
};
auto
A
=
make_shared
<
op
::
Parameter
>
(
element
::
i32
,
shape
);
auto
f
=
make_shared
<
Function
>
(
make_shared
<
op
::
ArgMin
>
(
A
,
1
,
element
::
i64
),
ParameterVector
{
A
});
auto
backend
=
runtime
::
Backend
::
create
(
"${BACKEND_NAME}"
);
// Create some tensors for input/output
auto
a
=
backend
->
create_tensor
(
element
::
i32
,
shape
);
copy_data
(
a
,
test
::
NDArray
<
int
,
3
>
({
{{
12
,
2
,
10
,
9
},{
3
,
5
,
0
,
8
},{
7
,
9
,
1
,
5
}},
{{
7
,
2
,
4
,
10
},{
6
,
10
,
2
,
2
},{
12
,
1
,
1
,
1
}},
{{
10
,
2
,
2
,
4
},{
1
,
5
,
5
,
1
},{
7
,
12
,
2
,
2
}}
}).
get_vector
());
auto
result
=
backend
->
create_tensor
(
element
::
i64
,
rshape
);
auto
handle
=
backend
->
compile
(
f
);
handle
->
call_with_validate
({
result
},
{
a
});
EXPECT_EQ
((
vector
<
int64_t
>
{
1
,
0
,
1
,
2
,
1
,
2
,
2
,
2
,
1
,
0
,
0
,
1
}),
read_vector
<
int64_t
>
(
result
));
}
NGRAPH_TEST
(
$
{
BACKEND_NAME
},
argmin_4D_i64
)
{
Shape
shape
{
2
,
2
,
5
,
5
};
// NCHW ->(0,1,2,3)
Shape
rshape
{
2
,
2
,
5
};
auto
A
=
make_shared
<
op
::
Parameter
>
(
element
::
f32
,
shape
);
auto
f
=
make_shared
<
Function
>
(
make_shared
<
op
::
ArgMin
>
(
A
,
3
,
element
::
i64
),
ParameterVector
{
A
});
auto
backend
=
runtime
::
Backend
::
create
(
"${BACKEND_NAME}"
);
// Create some tensors for input/output
auto
a
=
backend
->
create_tensor
(
element
::
f32
,
shape
);
copy_data
(
a
,
test
::
NDArray
<
int
,
4
>
({{{{
3
,
1
,
1
,
2
,
105
},
{
0
,
3
,
2
,
1
,
2
},
{
2
,
4
,
2
,
0
,
1
},
{
2
,
5
,
1
,
1
,
22
},
{
5
,
2
,
1
,
7
,
5
}},
{{
3
,
1
,
2
,
2
,
1
},
{
1
,
7
,
3
,
8
,
1
},
{
2
,
10
,
1
,
3
,
2
},
{
3
,
1
,
0
,
0
,
6
},
{
2
,
0
,
0
,
0
,
0
}}},
{{{
0
,
2
,
1
,
1
,
0
},
{
0
,
0
,
0
,
0
,
1
},
{
0
,
0
,
1
,
0
,
3
},
{
2
,
0
,
0
,
3
,
0
},
{
0
,
0
,
0
,
0
,
1
}},
{{
2
,
1
,
0
,
0
,
1
},
{
0
,
2
,
0
,
0
,
0
},
{
1
,
1
,
2
,
0
,
2
},
{
1
,
1
,
1
,
0
,
1
},
{
1
,
0
,
0
,
0
,
2
}}}})
.
get_vector
());
auto
result
=
backend
->
create_tensor
(
element
::
i64
,
rshape
);
auto
handle
=
backend
->
compile
(
f
);
handle
->
call_with_validate
({
result
},
{
a
});
EXPECT_EQ
((
vector
<
int64_t
>
{
1
,
0
,
3
,
2
,
2
,
1
,
0
,
2
,
2
,
1
,
0
,
0
,
0
,
1
,
0
,
2
,
0
,
3
,
3
,
1
}),
read_vector
<
int64_t
>
(
result
));
}
NGRAPH_TEST
(
$
{
BACKEND_NAME
},
argmin_4D_axis_3_i64
)
NGRAPH_TEST
(
$
{
BACKEND_NAME
},
argmin_4D_axis_3_i64
)
{
{
Shape
shape
{
2
,
2
,
5
,
5
};
// NCHW ->(0,1,2,3)
Shape
shape
{
2
,
2
,
5
,
5
};
// NCHW ->(0,1,2,3)
...
@@ -177,6 +262,111 @@ NGRAPH_TEST(${BACKEND_NAME}, argmax_trivial)
...
@@ -177,6 +262,111 @@ NGRAPH_TEST(${BACKEND_NAME}, argmax_trivial)
EXPECT_EQ
((
vector
<
int
>
{
1
,
3
,
0
}),
read_vector
<
int
>
(
result
));
EXPECT_EQ
((
vector
<
int
>
{
1
,
3
,
0
}),
read_vector
<
int
>
(
result
));
}
}
NGRAPH_TEST
(
$
{
BACKEND_NAME
},
argmax_2D_i32
)
{
Shape
shape
{
4
,
3
};
Shape
rshape
{
3
};
auto
A
=
make_shared
<
op
::
Parameter
>
(
element
::
i32
,
shape
);
auto
f
=
make_shared
<
Function
>
(
make_shared
<
op
::
ArgMax
>
(
A
,
0
,
element
::
i32
),
ParameterVector
{
A
});
auto
backend
=
runtime
::
Backend
::
create
(
"${BACKEND_NAME}"
);
// Create some tensors for input/output
auto
a
=
backend
->
create_tensor
(
element
::
i32
,
shape
);
copy_data
(
a
,
vector
<
int
>
{
12
,
2
,
10
,
9
,
8
,
4
,
6
,
1
,
5
,
3
,
11
,
7
});
auto
result
=
backend
->
create_tensor
(
element
::
i32
,
rshape
);
auto
handle
=
backend
->
compile
(
f
);
handle
->
call_with_validate
({
result
},
{
a
});
EXPECT_EQ
((
vector
<
int
>
{
0
,
3
,
0
}),
read_vector
<
int
>
(
result
));
}
NGRAPH_TEST
(
$
{
BACKEND_NAME
},
argmax_3D_i32
)
{
Shape
shape
{
3
,
3
,
4
};
Shape
rshape
{
3
,
4
};
auto
A
=
make_shared
<
op
::
Parameter
>
(
element
::
i32
,
shape
);
auto
f
=
make_shared
<
Function
>
(
make_shared
<
op
::
ArgMax
>
(
A
,
1
,
element
::
i32
),
ParameterVector
{
A
});
auto
backend
=
runtime
::
Backend
::
create
(
"${BACKEND_NAME}"
);
// Create some tensors for input/output
auto
a
=
backend
->
create_tensor
(
element
::
i32
,
shape
);
copy_data
(
a
,
test
::
NDArray
<
int
,
3
>
({
{{
12
,
2
,
10
,
9
},{
3
,
5
,
0
,
8
},{
7
,
9
,
1
,
5
}},
{{
7
,
2
,
4
,
10
},{
6
,
10
,
2
,
2
},{
12
,
1
,
1
,
1
}},
{{
10
,
2
,
2
,
4
},{
1
,
5
,
5
,
1
},{
7
,
12
,
2
,
2
}}
}).
get_vector
());
auto
result
=
backend
->
create_tensor
(
element
::
i32
,
rshape
);
auto
handle
=
backend
->
compile
(
f
);
handle
->
call_with_validate
({
result
},
{
a
});
EXPECT_EQ
((
vector
<
int
>
{
0
,
2
,
0
,
0
,
2
,
1
,
0
,
0
,
0
,
2
,
1
,
0
}),
read_vector
<
int
>
(
result
));
}
NGRAPH_TEST
(
$
{
BACKEND_NAME
},
argmax_3D_i64
)
{
Shape
shape
{
3
,
3
,
4
};
Shape
rshape
{
3
,
4
};
auto
A
=
make_shared
<
op
::
Parameter
>
(
element
::
i32
,
shape
);
auto
f
=
make_shared
<
Function
>
(
make_shared
<
op
::
ArgMax
>
(
A
,
1
,
element
::
i64
),
ParameterVector
{
A
});
auto
backend
=
runtime
::
Backend
::
create
(
"${BACKEND_NAME}"
);
// Create some tensors for input/output
auto
a
=
backend
->
create_tensor
(
element
::
i32
,
shape
);
copy_data
(
a
,
test
::
NDArray
<
int
,
3
>
({
{{
12
,
2
,
10
,
9
},{
3
,
5
,
0
,
8
},{
7
,
9
,
1
,
5
}},
{{
7
,
2
,
4
,
10
},{
6
,
10
,
2
,
2
},{
12
,
1
,
1
,
1
}},
{{
10
,
2
,
2
,
4
},{
1
,
5
,
5
,
1
},{
7
,
12
,
2
,
2
}}
}).
get_vector
());
auto
result
=
backend
->
create_tensor
(
element
::
i64
,
rshape
);
auto
handle
=
backend
->
compile
(
f
);
handle
->
call_with_validate
({
result
},
{
a
});
EXPECT_EQ
((
vector
<
int64_t
>
{
0
,
2
,
0
,
0
,
2
,
1
,
0
,
0
,
0
,
2
,
1
,
0
}),
read_vector
<
int64_t
>
(
result
));
}
NGRAPH_TEST
(
$
{
BACKEND_NAME
},
argmax_4D_i64
)
{
Shape
shape
{
2
,
2
,
5
,
5
};
// NCHW ->(0,1,2,3)
Shape
rshape
{
2
,
2
,
5
};
auto
A
=
make_shared
<
op
::
Parameter
>
(
element
::
f32
,
shape
);
auto
f
=
make_shared
<
Function
>
(
make_shared
<
op
::
ArgMax
>
(
A
,
3
,
element
::
i64
),
ParameterVector
{
A
});
auto
backend
=
runtime
::
Backend
::
create
(
"${BACKEND_NAME}"
);
// Create some tensors for input/output
auto
a
=
backend
->
create_tensor
(
element
::
f32
,
shape
);
copy_data
(
a
,
test
::
NDArray
<
int
,
4
>
({{{{
3
,
1
,
1
,
2
,
105
},
{
0
,
3
,
2
,
1
,
2
},
{
2
,
4
,
2
,
0
,
1
},
{
2
,
5
,
1
,
1
,
22
},
{
5
,
2
,
1
,
7
,
5
}},
{{
3
,
1
,
2
,
2
,
1
},
{
1
,
7
,
3
,
8
,
1
},
{
2
,
10
,
1
,
3
,
2
},
{
3
,
1
,
0
,
0
,
6
},
{
2
,
0
,
0
,
0
,
0
}}},
{{{
0
,
2
,
1
,
1
,
0
},
{
0
,
0
,
0
,
0
,
1
},
{
0
,
0
,
1
,
0
,
3
},
{
2
,
0
,
0
,
3
,
0
},
{
0
,
0
,
0
,
0
,
1
}},
{{
2
,
1
,
0
,
0
,
1
},
{
0
,
2
,
0
,
0
,
0
},
{
1
,
1
,
2
,
0
,
2
},
{
1
,
1
,
1
,
0
,
1
},
{
1
,
0
,
0
,
0
,
2
}}}})
.
get_vector
());
auto
result
=
backend
->
create_tensor
(
element
::
i64
,
rshape
);
auto
handle
=
backend
->
compile
(
f
);
handle
->
call_with_validate
({
result
},
{
a
});
EXPECT_EQ
((
vector
<
int64_t
>
{
4
,
1
,
1
,
4
,
3
,
0
,
3
,
1
,
4
,
0
,
1
,
4
,
4
,
3
,
4
,
0
,
1
,
2
,
0
,
4
}),
read_vector
<
int64_t
>
(
result
));
}
NGRAPH_TEST
(
$
{
BACKEND_NAME
},
argmax_3D_axis_0
)
// Along Channels
NGRAPH_TEST
(
$
{
BACKEND_NAME
},
argmax_3D_axis_0
)
// Along Channels
{
{
Shape
shape
{
3
,
4
,
2
};
// CHW ->(0,1,2)
Shape
shape
{
3
,
4
,
2
};
// CHW ->(0,1,2)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment