Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in / Register
Toggle navigation
N
ngraph
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
submodule
ngraph
Commits
04ecfeb0
Commit
04ecfeb0
authored
Jul 24, 2019
by
nmostafa
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Re-use LLVM module
parent
5f914429
Show whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
83 additions
and
62 deletions
+83
-62
compiler.cpp
src/contrib/mlir/compiler.cpp
+47
-59
compiler.hpp
src/contrib/mlir/compiler.hpp
+2
-3
mlir.in.cpp
test/backend/mlir.in.cpp
+34
-0
No files found.
src/contrib/mlir/compiler.cpp
View file @
04ecfeb0
...
@@ -99,7 +99,6 @@ void MLIRCompiler::compile()
...
@@ -99,7 +99,6 @@ void MLIRCompiler::compile()
{
{
build_ng_dialect_module
();
build_ng_dialect_module
();
lower_ng_dialect
();
lower_ng_dialect
();
optimize
();
}
}
void
MLIRCompiler
::
run
()
void
MLIRCompiler
::
run
()
...
@@ -239,9 +238,11 @@ MLIRCompiler::TensorInfo MLIRCompiler::get_tensor_value(descriptor::Tensor* tens
...
@@ -239,9 +238,11 @@ MLIRCompiler::TensorInfo MLIRCompiler::get_tensor_value(descriptor::Tensor* tens
return
it
->
second
;
return
it
->
second
;
}
}
// Lowers nGraph dialect
to affine dialect.
// Lowers nGraph dialect
all the way to LLVM module.
void
MLIRCompiler
::
lower_ng_dialect
()
void
MLIRCompiler
::
lower_ng_dialect
()
{
{
// Lower NG dialect to Affine
{
mlir
::
PassManager
pm
;
mlir
::
PassManager
pm
;
pm
.
addPass
(
mlir
::
createDialectLoweringPass
(
this
));
pm
.
addPass
(
mlir
::
createDialectLoweringPass
(
this
));
pm
.
addPass
(
mlir
::
createCanonicalizerPass
());
pm
.
addPass
(
mlir
::
createCanonicalizerPass
());
...
@@ -254,13 +255,10 @@ void MLIRCompiler::lower_ng_dialect()
...
@@ -254,13 +255,10 @@ void MLIRCompiler::lower_ng_dialect()
}
}
dump_mlir_module
(
"Affine Dialect Dump:"
);
dump_mlir_module
(
"Affine Dialect Dump:"
);
}
}
// Receives affine dialect as input and applies affine and standard dialect based optimizations.
// Lower Affine to Std Dialect
// Lowering from affine dialect to standard dialect happens along the way. Output consists of
{
// standard dialect only ops.
void
MLIRCompiler
::
optimize
()
{
mlir
::
PassManager
pm
;
mlir
::
PassManager
pm
;
// Lower affine ops
// Lower affine ops
pm
.
addPass
(
mlir
::
createLowerAffinePass
());
pm
.
addPass
(
mlir
::
createLowerAffinePass
());
...
@@ -268,6 +266,42 @@ void MLIRCompiler::optimize()
...
@@ -268,6 +266,42 @@ void MLIRCompiler::optimize()
NGRAPH_CHECK
(
succeeded
(
rr
),
"Affine loop lowering failed"
);
NGRAPH_CHECK
(
succeeded
(
rr
),
"Affine loop lowering failed"
);
dump_mlir_module
(
"Standard Dialect Dump:"
);
dump_mlir_module
(
"Standard Dialect Dump:"
);
}
NGRAPH_CHECK
(
m_module
,
"MLIR module is not ready."
);
// Lower Standard dialect to LLVM dialect.
// TODO: Do this via PassManager
mlir
::
LLVMTypeConverter
llvm_converter
(
&
m_context
);
OwningRewritePatternList
patterns
;
mlir
::
populateStdToLLVMConversionPatterns
(
llvm_converter
,
patterns
);
mlir
::
ConversionTarget
target
(
m_context
);
target
.
addLegalDialect
<
mlir
::
LLVM
::
LLVMDialect
>
();
auto
result
=
applyConversionPatterns
(
*
m_module
,
target
,
llvm_converter
,
std
::
move
(
patterns
));
NGRAPH_CHECK
(
succeeded
(
result
),
"Standard to LLVM dialect conversion failed"
);
dump_mlir_module
(
"LLVM-IR Dialect Dump:"
);
// Lower to LLVM BC and optimize
// Initialize LLVM targets.
llvm
::
InitializeNativeTarget
();
llvm
::
InitializeNativeTargetAsmPrinter
();
unsigned
opt_level
=
3
;
if
(
char
*
opt_level_str
=
std
::
getenv
(
"NGRAPH_MLIR_OPT_LEVEL"
))
{
opt_level
=
std
::
stoi
(
opt_level_str
);
NGRAPH_CHECK
(
opt_level
>=
0
&&
opt_level
<=
3
,
"Invalid optimization level"
);
}
// Create an MLIR execution engine. We use a null MLIR pass manager for now to make sure we
// don't run MLIR passes that were already run. We also pass a default transformer to run
// LLVM optimizations at level 3.
auto
llvm_transformer
=
mlir
::
makeOptimizingTransformer
(
opt_level
/*optLevel*/
,
0
/*sizeLevel*/
);
auto
maybeEngine
=
mlir
::
ExecutionEngine
::
create
(
m_module
.
get
(),
llvm_transformer
);
NGRAPH_CHECK
(
maybeEngine
,
"failed to construct an execution engine"
);
m_engine
=
std
::
move
(
maybeEngine
.
get
());
}
}
// MLIR builders
// MLIR builders
...
@@ -470,7 +504,7 @@ void MLIRCompiler::bind_arguments()
...
@@ -470,7 +504,7 @@ void MLIRCompiler::bind_arguments()
// actual pointer to the data.
// actual pointer to the data.
// create MemRef args
// create MemRef args
auto
expected_arguments
=
allocate_memref_args
(
func
);
auto
expected_arguments
=
allocate_memref_args
();
NGRAPH_CHECK
(
expected_arguments
.
size
(),
"Arguments can't be created"
);
NGRAPH_CHECK
(
expected_arguments
.
size
(),
"Arguments can't be created"
);
m_invoke_args
=
std
::
move
(
expected_arguments
);
m_invoke_args
=
std
::
move
(
expected_arguments
);
...
@@ -497,39 +531,6 @@ void MLIRCompiler::bind_arguments()
...
@@ -497,39 +531,6 @@ void MLIRCompiler::bind_arguments()
// Lowers standard dialect to LLVM dialect and uses the MLIR execution engine to execute the code.
// Lowers standard dialect to LLVM dialect and uses the MLIR execution engine to execute the code.
void
MLIRCompiler
::
execute
()
void
MLIRCompiler
::
execute
()
{
{
NGRAPH_CHECK
(
m_module
,
"MLIR module is not ready."
);
// Lower Standard dialect to LLVM dialect.
mlir
::
LLVMTypeConverter
llvm_converter
(
&
m_context
);
OwningRewritePatternList
patterns
;
mlir
::
populateStdToLLVMConversionPatterns
(
llvm_converter
,
patterns
);
mlir
::
ConversionTarget
target
(
m_context
);
target
.
addLegalDialect
<
mlir
::
LLVM
::
LLVMDialect
>
();
auto
result
=
applyConversionPatterns
(
*
m_module
,
target
,
llvm_converter
,
std
::
move
(
patterns
));
NGRAPH_CHECK
(
succeeded
(
result
),
"Standard to LLVM dialect conversion failed"
);
dump_mlir_module
(
"LLVM-IR Dialect Dump:"
);
// Initialize LLVM targets.
llvm
::
InitializeNativeTarget
();
llvm
::
InitializeNativeTargetAsmPrinter
();
unsigned
opt_level
=
3
;
if
(
char
*
opt_level_str
=
std
::
getenv
(
"NGRAPH_MLIR_OPT_LEVEL"
))
{
opt_level
=
std
::
stoi
(
opt_level_str
);
NGRAPH_CHECK
(
opt_level
>=
0
&&
opt_level
<=
3
,
"Invalid optimization level"
);
}
// Create an MLIR execution engine. We use a null MLIR pass manager for now to make sure we
// don't run MLIR passes that were already run. We also pass a default transformer to run
// LLVM optimizations at level 3.
auto
llvm_transformer
=
mlir
::
makeOptimizingTransformer
(
opt_level
/*optLevel*/
,
0
/*sizeLevel*/
);
auto
maybeEngine
=
mlir
::
ExecutionEngine
::
create
(
m_module
.
get
(),
llvm_transformer
);
NGRAPH_CHECK
(
maybeEngine
,
"failed to construct an execution engine"
);
m_engine
=
std
::
move
(
maybeEngine
.
get
());
// Invoke the JIT-compiled function with the arguments. Note that, for API
// Invoke the JIT-compiled function with the arguments. Note that, for API
// uniformity reasons, it takes a list of type-erased pointers to arguments.
// uniformity reasons, it takes a list of type-erased pointers to arguments.
// Please, note that 'invoke' method is overloaded with a parameter pack version.
// Please, note that 'invoke' method is overloaded with a parameter pack version.
...
@@ -556,32 +557,19 @@ void MLIRCompiler::cleanup()
...
@@ -556,32 +557,19 @@ void MLIRCompiler::cleanup()
m_mem_mgr
.
freeAll
();
m_mem_mgr
.
freeAll
();
}
}
SmallVector
<
void
*
,
8
>
MLIRCompiler
::
allocate_memref_args
(
mlir
::
Function
*
func
)
SmallVector
<
void
*
,
8
>
MLIRCompiler
::
allocate_memref_args
()
{
{
SmallVector
<
void
*
,
8
>
args
;
SmallVector
<
void
*
,
8
>
args
;
args
.
reserve
(
func
->
getNumArguments
());
for
(
auto
i
=
0
;
i
<
m_external_tensors
->
size
();
i
++
)
for
(
const
auto
&
arg
:
func
->
getArguments
())
{
auto
descriptor
=
allocate_memref_descriptor
(
arg
->
getType
());
if
(
!
descriptor
)
{
{
continue
;
auto
descriptor
=
allocate_memref_descriptor
();
}
args
.
push_back
(
descriptor
);
args
.
push_back
(
descriptor
);
}
}
return
args
;
return
args
;
}
}
mlir
::
StaticFloatMemRef
*
MLIRCompiler
::
allocate_memref_descriptor
(
mlir
::
Type
type
)
mlir
::
StaticFloatMemRef
*
MLIRCompiler
::
allocate_memref_descriptor
()
{
{
auto
memRefType
=
type
.
dyn_cast
<
mlir
::
MemRefType
>
();
if
(
!
memRefType
)
{
return
nullptr
;
}
NGRAPH_CHECK
(
memRefType
.
getNumDynamicDims
()
==
0
,
"No support for dynamic shapes"
);
// We only use StaticFloatMemRef because that's what MLIR currently offers.
// We only use StaticFloatMemRef because that's what MLIR currently offers.
// We should expand this with different types and dynamic MemRefs
// We should expand this with different types and dynamic MemRefs
auto
*
descriptor
=
auto
*
descriptor
=
...
...
src/contrib/mlir/compiler.hpp
View file @
04ecfeb0
...
@@ -88,7 +88,6 @@ namespace ngraph
...
@@ -88,7 +88,6 @@ namespace ngraph
private
:
private
:
void
build_ng_dialect_module
();
void
build_ng_dialect_module
();
void
lower_ng_dialect
();
void
lower_ng_dialect
();
void
optimize
();
void
bind_arguments
();
void
bind_arguments
();
void
execute
();
void
execute
();
void
cleanup
();
void
cleanup
();
...
@@ -125,10 +124,10 @@ namespace ngraph
...
@@ -125,10 +124,10 @@ namespace ngraph
void
create_return
();
void
create_return
();
/// Helper to create memref arguments for MLIR function signature
/// Helper to create memref arguments for MLIR function signature
llvm
::
SmallVector
<
void
*
,
8
>
allocate_memref_args
(
mlir
::
Function
*
func
);
llvm
::
SmallVector
<
void
*
,
8
>
allocate_memref_args
();
/// Helper to allocate a mem ref object. Handles static shapes only for now.
/// Helper to allocate a mem ref object. Handles static shapes only for now.
mlir
::
StaticFloatMemRef
*
allocate_memref_descriptor
(
mlir
::
Type
type
);
mlir
::
StaticFloatMemRef
*
allocate_memref_descriptor
();
/// Helper to dump MLIR module into llvm::dbgs prepended by the message \p msg.
/// Helper to dump MLIR module into llvm::dbgs prepended by the message \p msg.
void
dump_mlir_module
(
const
std
::
string
msg
);
void
dump_mlir_module
(
const
std
::
string
msg
);
...
...
test/backend/mlir.in.cpp
View file @
04ecfeb0
...
@@ -248,3 +248,36 @@ NGRAPH_TEST(${BACKEND_NAME}, mlir_subgraphs_cycle)
...
@@ -248,3 +248,36 @@ NGRAPH_TEST(${BACKEND_NAME}, mlir_subgraphs_cycle)
EXPECT_TRUE
(
EXPECT_TRUE
(
test
::
all_close_f
(
read_vector
<
float
>
(
result
),
vector
<
float
>
{
70
,
80
,
90
,
136
,
164
,
192
}));
test
::
all_close_f
(
read_vector
<
float
>
(
result
),
vector
<
float
>
{
70
,
80
,
90
,
136
,
164
,
192
}));
}
}
NGRAPH_TEST
(
$
{
BACKEND_NAME
},
mlir_multi_call
)
{
Shape
shape_in1
{
2
,
3
};
Shape
shape_in2
{
3
,
3
};
Shape
shape_out
{
2
,
3
};
auto
A
=
make_shared
<
op
::
Parameter
>
(
element
::
f32
,
shape_in1
);
auto
B
=
make_shared
<
op
::
Parameter
>
(
element
::
f32
,
shape_in2
);
auto
dot
=
make_shared
<
op
::
Dot
>
(
A
,
B
);
auto
C
=
make_shared
<
op
::
Parameter
>
(
element
::
f32
,
shape_in1
);
auto
add
=
make_shared
<
op
::
Add
>
(
dot
,
C
);
auto
f
=
make_shared
<
Function
>
(
add
,
ParameterVector
{
A
,
B
,
C
});
auto
backend
=
runtime
::
Backend
::
create
(
"${BACKEND_NAME}"
);
// Create some tensors for input/output
shared_ptr
<
runtime
::
Tensor
>
a
=
backend
->
create_tensor
(
element
::
f32
,
shape_in1
);
shared_ptr
<
runtime
::
Tensor
>
b
=
backend
->
create_tensor
(
element
::
f32
,
shape_in2
);
shared_ptr
<
runtime
::
Tensor
>
c
=
backend
->
create_tensor
(
element
::
f32
,
shape_in1
);
shared_ptr
<
runtime
::
Tensor
>
result
=
backend
->
create_tensor
(
element
::
f32
,
shape_out
);
copy_data
(
a
,
vector
<
float
>
{
1.
f
,
2.
f
,
3.
f
,
4.
f
,
5.
f
,
6.
f
});
copy_data
(
b
,
vector
<
float
>
{
1.
f
,
2.
f
,
3.
f
,
4.
f
,
5.
f
,
6.
f
,
7.
f
,
8.
f
,
9.
f
});
copy_data
(
c
,
vector
<
float
>
{
5.
f
,
4.
f
,
3.
f
,
2.
f
,
1.
f
,
0.
f
});
auto
handle
=
backend
->
compile
(
f
);
handle
->
call_with_validate
({
result
},
{
a
,
b
,
c
});
handle
->
call_with_validate
({
result
},
{
a
,
b
,
c
});
handle
->
call_with_validate
({
result
},
{
a
,
b
,
c
});
handle
->
call_with_validate
({
result
},
{
a
,
b
,
c
});
EXPECT_TRUE
(
test
::
all_close_f
(
read_vector
<
float
>
(
result
),
vector
<
float
>
{
35.
f
,
40.
f
,
45.
f
,
68.
f
,
82.
f
,
96.
f
}));
}
\ No newline at end of file
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment