Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in / Register
Toggle navigation
N
ngraph
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
submodule
ngraph
Commits
955361bb
Commit
955361bb
authored
Jun 13, 2019
by
nmostafa
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Fix rebase issues. Style-apply
parent
4ef010fc
Show whitespace changes
Inline
Side-by-side
Showing
7 changed files
with
73 additions
and
68 deletions
+73
-68
compiler.cpp
src/contrib/mlir/compiler.cpp
+7
-9
compiler.hpp
src/contrib/mlir/compiler.hpp
+1
-1
type.hpp
src/contrib/mlir/dialect/type.hpp
+1
-1
helpers.cpp
src/contrib/mlir/helpers.cpp
+6
-3
lowerer.cpp
src/contrib/mlir/lowerer.cpp
+25
-14
mlir_subgraph_extraction.cpp
src/contrib/mlir/pass/mlir_subgraph_extraction.cpp
+1
-1
backend_arg_reduce.in.cpp
test/backend_arg_reduce.in.cpp
+32
-39
No files found.
src/contrib/mlir/compiler.cpp
View file @
955361bb
...
...
@@ -24,8 +24,8 @@
#include "ngraph/graph_util.hpp"
#include "ngraph/node.hpp"
#include "ngraph/op/add.hpp"
#include "ngraph/op/argmin.hpp"
#include "ngraph/op/argmax.hpp"
#include "ngraph/op/argmin.hpp"
#include "ngraph/op/dot.hpp"
#include "ngraph/op/experimental/compiled_kernel.hpp"
#include "ngraph/op/util/index_reduction.hpp"
...
...
@@ -287,13 +287,13 @@ mlir::Value* MLIRCompiler::COMPILE_OP_DECL(ngraph::op::Add)
return
compiler
.
create_binary_op
<
mlir
::
NGAddOp
>
(
ng_node
);
}
template
<>
template
<>
mlir
::
Value
*
MLIRCompiler
::
COMPILE_OP_DECL
(
ngraph
::
op
::
ArgMin
)
{
return
compiler
.
create_index_reduction
<
mlir
::
NGArgMinRedOp
>
(
ng_node
);
}
template
<>
template
<>
mlir
::
Value
*
MLIRCompiler
::
COMPILE_OP_DECL
(
ngraph
::
op
::
ArgMax
)
{
return
compiler
.
create_index_reduction
<
mlir
::
NGArgMaxRedOp
>
(
ng_node
);
...
...
@@ -332,7 +332,7 @@ void MLIRCompiler::create_return()
m_builder
->
create
<
mlir
::
NGReturnOp
>
(
mlir
::
UnknownLoc
::
get
(
&
m_context
),
value_list
);
}
template
<
typename
RedOp
>
template
<
typename
RedOp
>
mlir
::
Value
*
MLIRCompiler
::
create_index_reduction
(
const
ngraph
::
Node
*
ng_node
)
{
auto
*
idx_red
=
static_cast
<
const
ngraph
::
op
::
util
::
IndexReduction
*>
(
ng_node
);
...
...
@@ -344,10 +344,8 @@ mlir::Value* MLIRCompiler::create_index_reduction(const ngraph::Node* ng_node)
mlir
::
ArrayAttr
red_axes_attr
=
m_builder
->
getI64ArrayAttr
({(
int64_t
)
red_axis
});
return
m_builder
->
create
<
RedOp
>
(
mlir
::
UnknownLoc
::
get
(
&
m_context
),
get_mlir_type
(
ng_node
),
arg_val
,
red_axes_attr
)
->
create
<
RedOp
>
(
mlir
::
UnknownLoc
::
get
(
&
m_context
),
get_mlir_type
(
ng_node
),
arg_val
,
red_axes_attr
)
.
getResult
();
}
// Binds MLIR function arguments to the proper values. This includes externally allocated tensors
...
...
@@ -409,7 +407,7 @@ void MLIRCompiler::execute()
if
(
char
*
opt_level_str
=
std
::
getenv
(
"NGRAPH_MLIR_OPT_LEVEL"
))
{
opt_level
=
std
::
stoi
(
opt_level_str
);
NGRAPH_CHECK
(
opt_level
>=
0
&&
opt_level
<=
3
,
"Invalid optimization level"
);
NGRAPH_CHECK
(
opt_level
>=
0
&&
opt_level
<=
3
,
"Invalid optimization level"
);
}
// Create an MLIR execution engine. We use a null MLIR pass manager for now to make sure we
// don't run MLIR passes that were already run. We also pass a default transformer to run
...
...
src/contrib/mlir/compiler.hpp
View file @
955361bb
...
...
@@ -108,7 +108,7 @@ namespace ngraph
template
<
typename
BinOp
>
mlir
::
Value
*
create_binary_op
(
const
ngraph
::
Node
*
ng_node
);
template
<
typename
RedOp
>
template
<
typename
RedOp
>
mlir
::
Value
*
create_index_reduction
(
const
ngraph
::
Node
*
ng_node
);
void
create_return
();
...
...
src/contrib/mlir/dialect/type.hpp
View file @
955361bb
...
...
@@ -235,7 +235,7 @@ namespace mlir
return
floatType
.
getIntOrFloatBitWidth
();
if
(
NGBoolType
boolType
=
type
.
dyn_cast
<
NGBoolType
>
())
return
boolType
.
getWidth
();
NGRAPH_
FAIL
()
<<
"Unknown type"
;
NGRAPH_
CHECK
(
false
,
"Unknown type"
)
;
return
-
1
;
}
/// Get number of elements
...
...
src/contrib/mlir/helpers.cpp
View file @
955361bb
...
...
@@ -14,21 +14,24 @@
// limitations under the License.
//*****************************************************************************
#include <mlir/ExecutionEngine/MemRefUtils.h>
#include <stdint.h>
#include "ngraph/ngraph_visibility.hpp"
#include <mlir/ExecutionEngine/MemRefUtils.h>
/// Call back to copy Index tensor to Int tensor
/// Can handle int tensors of bitwidth 8, 16, 32 and 64
/// Index width is always intptr_t
extern
"C"
NGRAPH_API
void
__mlir_convert_index_to_int
(
mlir
::
StaticFloatMemRef
dst
,
mlir
::
StaticFloatMemRef
src
,
size_t
numElements
,
size_t
intWidth
)
extern
"C"
NGRAPH_API
void
__mlir_convert_index_to_int
(
mlir
::
StaticFloatMemRef
dst
,
mlir
::
StaticFloatMemRef
src
,
size_t
numElements
,
size_t
intWidth
)
{
size_t
indexSize
=
sizeof
(
intptr_t
);
auto
pSrc
=
reinterpret_cast
<
intptr_t
*>
(
src
.
data
);
auto
pDst
=
reinterpret_cast
<
char
*>
(
dst
.
data
);
for
(
auto
i
=
0
;
i
<
numElements
;
i
++
)
{
switch
(
intWidth
)
switch
(
intWidth
)
{
case
8
:
*
pDst
=
static_cast
<
char
>
(
pSrc
[
i
]);
...
...
src/contrib/mlir/lowerer.cpp
View file @
955361bb
...
...
@@ -46,8 +46,12 @@ namespace
#include "op_lowerers.inc"
// Helpers
template
<
typename
RedOp
>
void
lowerIndexReduction
(
Operation
*
op
,
ArrayRef
<
Value
*>
operands
,
PatternRewriter
&
rewriter
,
DialectLoweringPass
&
m_pass
,
bool
isMin
);
template
<
typename
RedOp
>
void
lowerIndexReduction
(
Operation
*
op
,
ArrayRef
<
Value
*>
operands
,
PatternRewriter
&
rewriter
,
DialectLoweringPass
&
m_pass
,
bool
isMin
);
/// Use Dialect Converson Framework
class
DialectLowerer
:
public
DialectConversion
...
...
@@ -94,6 +98,7 @@ namespace
ArrayRef
<
Type
>
args
,
ArrayRef
<
Type
>
output
,
PatternRewriter
&
rewriter
);
private
:
void
findOutputValues
();
void
processFakeInstrs
();
...
...
@@ -189,14 +194,17 @@ namespace
else
{
auto
tensorType
=
origResult
->
getType
().
cast
<
NGTensorType
>
();
auto
newResult
=
createTempTensor
(
m_dialectLowerer
.
convertType
(
tensorType
),
tensorType
.
getSizeInBytes
(),
rewriter
);
auto
newResult
=
createTempTensor
(
m_dialectLowerer
.
convertType
(
tensorType
),
tensorType
.
getSizeInBytes
(),
rewriter
);
newResults
.
push_back
(
newResult
);
}
}
return
newResults
;
}
Value
*
DialectLoweringPass
::
createTempTensor
(
Type
type
,
unsigned
size
,
PatternRewriter
&
rewriter
)
Value
*
DialectLoweringPass
::
createTempTensor
(
Type
type
,
unsigned
size
,
PatternRewriter
&
rewriter
)
{
auto
callBackFunc
=
getCallDecl
(
"__mlir_allocate"
,
{
rewriter
.
getIndexType
(),
rewriter
.
getIndexType
()},
...
...
@@ -206,8 +214,7 @@ namespace
insertMemMgrDef
(
&
rewriter
),
/* pointer to mem manager */
rewriter
.
create
<
mlir
::
ConstantIndexOp
>
(
rewriter
.
getUnknownLoc
(),
size
)};
/* size to allocate */
auto
newTemp
=
rewriter
.
create
<
mlir
::
CallOp
>
(
rewriter
.
getUnknownLoc
(),
callBackFunc
,
args
)
auto
newTemp
=
rewriter
.
create
<
mlir
::
CallOp
>
(
rewriter
.
getUnknownLoc
(),
callBackFunc
,
args
)
.
getResult
(
0
);
return
newTemp
;
}
...
...
@@ -424,14 +431,18 @@ namespace
REWRITER
(
NGReturnOp
)
{
rewriter
.
replaceOpWithNewOp
<
ReturnOp
>
(
op
);
}
#undef REWRITER
template
<
typename
T
>
void
lowerIndexReduction
(
Operation
*
op
,
ArrayRef
<
Value
*>
operands
,
PatternRewriter
&
rewriter
,
DialectLoweringPass
&
m_pass
,
bool
isMin
)
{
template
<
typename
T
>
void
lowerIndexReduction
(
Operation
*
op
,
ArrayRef
<
Value
*>
operands
,
PatternRewriter
&
rewriter
,
DialectLoweringPass
&
m_pass
,
bool
isMin
)
{
T
argmin
=
cast
<
T
>
(
op
);
auto
loc
=
argmin
.
getLoc
();
auto
axesAttr
=
argmin
.
axes
();
NGRAPH_CHECK
(
axesAttr
.
size
()
==
1
,
"Index Reduction op should have one reduction axis"
);
NGRAPH_CHECK
(
axesAttr
.
size
()
==
1
,
"Index Reduction op should have one reduction axis"
);
Attribute
axisAttr
=
*
axesAttr
.
begin
();
unsigned
axis
=
axisAttr
.
dyn_cast
<
IntegerAttr
>
().
getInt
();
...
...
@@ -450,10 +461,10 @@ void lowerIndexReduction(Operation* op, ArrayRef<Value*> operands, PatternRewrit
// We have to store our result in an IndexType tensor and call-back to a type-conversion routine in nGraph
// TODO: Fix this once MLIR provides explicit cast operations.
Value
*
result
=
m_pass
.
createTempTensor
(
rewriter
.
getMemRefType
(
resultTy
.
getShape
(),
rewriter
.
getIndexType
()),
resultTy
.
getNumElements
()
*
sizeof
(
intptr_t
),
/* hacky way to get target-dependent size of IndexType */
rewriter
);
rewriter
.
getMemRefType
(
resultTy
.
getShape
(),
rewriter
.
getIndexType
()),
resultTy
.
getNumElements
()
*
sizeof
(
intptr_t
),
/* hacky way to get target-dependent size of IndexType */
rewriter
);
// Views
MemRefView
vRes
(
result
),
vArg
(
arg
);
...
...
src/contrib/mlir/pass/mlir_subgraph_extraction.cpp
View file @
955361bb
...
...
@@ -19,8 +19,8 @@
#include "ngraph/assertion.hpp"
#include "ngraph/graph_util.hpp"
#include "ngraph/op/add.hpp"
#include "ngraph/op/argmin.hpp"
#include "ngraph/op/argmax.hpp"
#include "ngraph/op/argmin.hpp"
#include "ngraph/op/dot.hpp"
#include "ngraph/op/experimental/compiled_kernel.hpp"
#include "ngraph/op/get_output_element.hpp"
...
...
test/backend_arg_reduce.in.cpp
View file @
955361bb
...
...
@@ -85,16 +85,16 @@ NGRAPH_TEST(${BACKEND_NAME}, argmin_3D_i32)
// Create some tensors for input/output
auto
a
=
backend
->
create_tensor
(
element
::
i32
,
shape
);
copy_data
(
a
,
test
::
NDArray
<
int
,
3
>
({
{{
12
,
2
,
10
,
9
},{
3
,
5
,
0
,
8
},{
7
,
9
,
1
,
5
}},
{{
7
,
2
,
4
,
10
},{
6
,
10
,
2
,
2
},{
12
,
1
,
1
,
1
}},
{{
10
,
2
,
2
,
4
},{
1
,
5
,
5
,
1
},{
7
,
12
,
2
,
2
}}
})
.
get_vector
());
copy_data
(
a
,
test
::
NDArray
<
int
,
3
>
({{{
12
,
2
,
10
,
9
},
{
3
,
5
,
0
,
8
},
{
7
,
9
,
1
,
5
}},
{{
7
,
2
,
4
,
10
},
{
6
,
10
,
2
,
2
},
{
12
,
1
,
1
,
1
}},
{{
10
,
2
,
2
,
4
},
{
1
,
5
,
5
,
1
},
{
7
,
12
,
2
,
2
}}})
.
get_vector
());
auto
result
=
backend
->
create_tensor
(
element
::
i32
,
rshape
);
auto
handle
=
backend
->
compile
(
f
);
handle
->
call_with_validate
({
result
},
{
a
});
EXPECT_EQ
((
vector
<
int
>
{
1
,
0
,
1
,
2
,
1
,
2
,
2
,
2
,
1
,
0
,
0
,
1
}),
read_vector
<
int
>
(
result
));
EXPECT_EQ
((
vector
<
int
>
{
1
,
0
,
1
,
2
,
1
,
2
,
2
,
2
,
1
,
0
,
0
,
1
}),
read_vector
<
int
>
(
result
));
}
NGRAPH_TEST
(
$
{
BACKEND_NAME
},
argmin_3D_i64
)
...
...
@@ -108,19 +108,18 @@ NGRAPH_TEST(${BACKEND_NAME}, argmin_3D_i64)
// Create some tensors for input/output
auto
a
=
backend
->
create_tensor
(
element
::
i32
,
shape
);
copy_data
(
a
,
test
::
NDArray
<
int
,
3
>
({
{{
12
,
2
,
10
,
9
},{
3
,
5
,
0
,
8
},{
7
,
9
,
1
,
5
}},
{{
7
,
2
,
4
,
10
},{
6
,
10
,
2
,
2
},{
12
,
1
,
1
,
1
}},
{{
10
,
2
,
2
,
4
},{
1
,
5
,
5
,
1
},{
7
,
12
,
2
,
2
}}
})
.
get_vector
());
copy_data
(
a
,
test
::
NDArray
<
int
,
3
>
({{{
12
,
2
,
10
,
9
},
{
3
,
5
,
0
,
8
},
{
7
,
9
,
1
,
5
}},
{{
7
,
2
,
4
,
10
},
{
6
,
10
,
2
,
2
},
{
12
,
1
,
1
,
1
}},
{{
10
,
2
,
2
,
4
},
{
1
,
5
,
5
,
1
},
{
7
,
12
,
2
,
2
}}})
.
get_vector
());
auto
result
=
backend
->
create_tensor
(
element
::
i64
,
rshape
);
auto
handle
=
backend
->
compile
(
f
);
handle
->
call_with_validate
({
result
},
{
a
});
EXPECT_EQ
((
vector
<
int64_t
>
{
1
,
0
,
1
,
2
,
1
,
2
,
2
,
2
,
1
,
0
,
0
,
1
}),
read_vector
<
int64_t
>
(
result
));
EXPECT_EQ
((
vector
<
int64_t
>
{
1
,
0
,
1
,
2
,
1
,
2
,
2
,
2
,
1
,
0
,
0
,
1
}),
read_vector
<
int64_t
>
(
result
));
}
NGRAPH_TEST
(
$
{
BACKEND_NAME
},
argmin_4D_i64
)
{
Shape
shape
{
2
,
2
,
5
,
5
};
// NCHW ->(0,1,2,3)
...
...
@@ -130,8 +129,10 @@ NGRAPH_TEST(${BACKEND_NAME}, argmin_4D_i64)
auto
backend
=
runtime
::
Backend
::
create
(
"${BACKEND_NAME}"
);
// Create some tensors for input/output
auto
a
=
backend
->
create_tensor
(
element
::
f32
,
shape
);
copy_data
(
a
,
test
::
NDArray
<
int
,
4
>
({{{{
3
,
1
,
1
,
2
,
105
},
copy_data
(
a
,
test
::
NDArray
<
int
,
4
>
(
{{{{
3
,
1
,
1
,
2
,
105
},
{
0
,
3
,
2
,
1
,
2
},
{
2
,
4
,
2
,
0
,
1
},
{
2
,
5
,
1
,
1
,
22
},
...
...
@@ -141,11 +142,7 @@ NGRAPH_TEST(${BACKEND_NAME}, argmin_4D_i64)
{
2
,
10
,
1
,
3
,
2
},
{
3
,
1
,
0
,
0
,
6
},
{
2
,
0
,
0
,
0
,
0
}}},
{{{
0
,
2
,
1
,
1
,
0
},
{
0
,
0
,
0
,
0
,
1
},
{
0
,
0
,
1
,
0
,
3
},
{
2
,
0
,
0
,
3
,
0
},
{
0
,
0
,
0
,
0
,
1
}},
{{{
0
,
2
,
1
,
1
,
0
},
{
0
,
0
,
0
,
0
,
1
},
{
0
,
0
,
1
,
0
,
3
},
{
2
,
0
,
0
,
3
,
0
},
{
0
,
0
,
0
,
0
,
1
}},
{{
2
,
1
,
0
,
0
,
1
},
{
0
,
2
,
0
,
0
,
0
},
{
1
,
1
,
2
,
0
,
2
},
...
...
@@ -292,11 +289,11 @@ NGRAPH_TEST(${BACKEND_NAME}, argmax_3D_i32)
// Create some tensors for input/output
auto
a
=
backend
->
create_tensor
(
element
::
i32
,
shape
);
copy_data
(
a
,
test
::
NDArray
<
int
,
3
>
({
{{
12
,
2
,
10
,
9
},{
3
,
5
,
0
,
8
},{
7
,
9
,
1
,
5
}},
{{
7
,
2
,
4
,
10
},{
6
,
10
,
2
,
2
},{
12
,
1
,
1
,
1
}},
{{
10
,
2
,
2
,
4
},{
1
,
5
,
5
,
1
},{
7
,
12
,
2
,
2
}}
})
.
get_vector
());
copy_data
(
a
,
test
::
NDArray
<
int
,
3
>
({{{
12
,
2
,
10
,
9
},
{
3
,
5
,
0
,
8
},
{
7
,
9
,
1
,
5
}},
{{
7
,
2
,
4
,
10
},
{
6
,
10
,
2
,
2
},
{
12
,
1
,
1
,
1
}},
{{
10
,
2
,
2
,
4
},
{
1
,
5
,
5
,
1
},
{
7
,
12
,
2
,
2
}}})
.
get_vector
());
auto
result
=
backend
->
create_tensor
(
element
::
i32
,
rshape
);
auto
handle
=
backend
->
compile
(
f
);
...
...
@@ -315,11 +312,11 @@ NGRAPH_TEST(${BACKEND_NAME}, argmax_3D_i64)
// Create some tensors for input/output
auto
a
=
backend
->
create_tensor
(
element
::
i32
,
shape
);
copy_data
(
a
,
test
::
NDArray
<
int
,
3
>
({
{{
12
,
2
,
10
,
9
},{
3
,
5
,
0
,
8
},{
7
,
9
,
1
,
5
}},
{{
7
,
2
,
4
,
10
},{
6
,
10
,
2
,
2
},{
12
,
1
,
1
,
1
}},
{{
10
,
2
,
2
,
4
},{
1
,
5
,
5
,
1
},{
7
,
12
,
2
,
2
}}
})
.
get_vector
());
copy_data
(
a
,
test
::
NDArray
<
int
,
3
>
({{{
12
,
2
,
10
,
9
},
{
3
,
5
,
0
,
8
},
{
7
,
9
,
1
,
5
}},
{{
7
,
2
,
4
,
10
},
{
6
,
10
,
2
,
2
},
{
12
,
1
,
1
,
1
}},
{{
10
,
2
,
2
,
4
},
{
1
,
5
,
5
,
1
},
{
7
,
12
,
2
,
2
}}})
.
get_vector
());
auto
result
=
backend
->
create_tensor
(
element
::
i64
,
rshape
);
auto
handle
=
backend
->
compile
(
f
);
...
...
@@ -327,7 +324,6 @@ NGRAPH_TEST(${BACKEND_NAME}, argmax_3D_i64)
EXPECT_EQ
((
vector
<
int64_t
>
{
0
,
2
,
0
,
0
,
2
,
1
,
0
,
0
,
0
,
2
,
1
,
0
}),
read_vector
<
int64_t
>
(
result
));
}
NGRAPH_TEST
(
$
{
BACKEND_NAME
},
argmax_4D_i64
)
{
Shape
shape
{
2
,
2
,
5
,
5
};
// NCHW ->(0,1,2,3)
...
...
@@ -337,8 +333,10 @@ NGRAPH_TEST(${BACKEND_NAME}, argmax_4D_i64)
auto
backend
=
runtime
::
Backend
::
create
(
"${BACKEND_NAME}"
);
// Create some tensors for input/output
auto
a
=
backend
->
create_tensor
(
element
::
f32
,
shape
);
copy_data
(
a
,
test
::
NDArray
<
int
,
4
>
({{{{
3
,
1
,
1
,
2
,
105
},
copy_data
(
a
,
test
::
NDArray
<
int
,
4
>
(
{{{{
3
,
1
,
1
,
2
,
105
},
{
0
,
3
,
2
,
1
,
2
},
{
2
,
4
,
2
,
0
,
1
},
{
2
,
5
,
1
,
1
,
22
},
...
...
@@ -348,11 +346,7 @@ NGRAPH_TEST(${BACKEND_NAME}, argmax_4D_i64)
{
2
,
10
,
1
,
3
,
2
},
{
3
,
1
,
0
,
0
,
6
},
{
2
,
0
,
0
,
0
,
0
}}},
{{{
0
,
2
,
1
,
1
,
0
},
{
0
,
0
,
0
,
0
,
1
},
{
0
,
0
,
1
,
0
,
3
},
{
2
,
0
,
0
,
3
,
0
},
{
0
,
0
,
0
,
0
,
1
}},
{{{
0
,
2
,
1
,
1
,
0
},
{
0
,
0
,
0
,
0
,
1
},
{
0
,
0
,
1
,
0
,
3
},
{
2
,
0
,
0
,
3
,
0
},
{
0
,
0
,
0
,
0
,
1
}},
{{
2
,
1
,
0
,
0
,
1
},
{
0
,
2
,
0
,
0
,
0
},
{
1
,
1
,
2
,
0
,
2
},
...
...
@@ -366,7 +360,6 @@ NGRAPH_TEST(${BACKEND_NAME}, argmax_4D_i64)
read_vector
<
int64_t
>
(
result
));
}
NGRAPH_TEST
(
$
{
BACKEND_NAME
},
argmax_3D_axis_0
)
// Along Channels
{
Shape
shape
{
3
,
4
,
2
};
// CHW ->(0,1,2)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment