Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in / Register
Toggle navigation
N
ngraph
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
submodule
ngraph
Commits
bbf66498
Commit
bbf66498
authored
Oct 26, 2018
by
shssf
Committed by
Scott Cyphers
Oct 26, 2018
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
IntelGPU backend: Add graph dump ability (#1925)
parent
4e08d9aa
Show whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
404 additions
and
0 deletions
+404
-0
CMakeLists.txt
src/ngraph/runtime/intelgpu/CMakeLists.txt
+1
-0
intelgpu_backend.cpp
src/ngraph/runtime/intelgpu/intelgpu_backend.cpp
+17
-0
intelgpu_backend.hpp
src/ngraph/runtime/intelgpu/intelgpu_backend.hpp
+1
-0
visualize_tree.cpp
src/ngraph/runtime/intelgpu/visualize_tree.cpp
+350
-0
visualize_tree.hpp
src/ngraph/runtime/intelgpu/visualize_tree.hpp
+35
-0
No files found.
src/ngraph/runtime/intelgpu/CMakeLists.txt
View file @
bbf66498
...
...
@@ -25,6 +25,7 @@ set(SRC
intelgpu_op_softmax.cpp
intelgpu_op_custom_func_call.cpp
code_writer.cpp
visualize_tree.cpp
)
if
(
NGRAPH_INTELGPU_ENABLE
)
...
...
src/ngraph/runtime/intelgpu/intelgpu_backend.cpp
View file @
bbf66498
...
...
@@ -54,6 +54,7 @@
#include "ngraph/runtime/intelgpu/intelgpu_op_custom_kernels.hpp"
#include "ngraph/runtime/intelgpu/intelgpu_op_softmax.hpp"
#include "ngraph/runtime/intelgpu/intelgpu_tensor_view.hpp"
#include "ngraph/runtime/intelgpu/visualize_tree.hpp"
#include "ngraph/function.hpp"
#include "ngraph/node.hpp"
...
...
@@ -328,6 +329,12 @@ runtime::intelgpu::IntelGPUBackend::IntelGPUBackend()
m_disable_backend_optimizations
=
true
;
}
// Dumps the input Function into Graphviz format
if
(
getenv
(
"NGRAPH_INTELGPU_DUMP_FUNCTION"
)
!=
nullptr
)
{
m_dump_graph_enable
=
true
;
}
cldnn
::
engine_configuration
cldnn_configuration
(
profiling
);
ocl_engine
=
make_shared
<
cldnn
::
engine
>
(
cldnn_configuration
);
}
...
...
@@ -356,6 +363,11 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func)
cldnn
::
topology
topology
;
if
(
m_dump_graph_enable
)
{
visualize_tree
(
func
,
"intelgpu_"
,
"_orig"
);
}
if
(
!
m_disable_backend_optimizations
)
{
ngraph
::
pass
::
Manager
pass_manager
;
...
...
@@ -369,6 +381,11 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func)
pass_manager
.
register_pass
<
ngraph
::
pass
::
GetOutputElementElimination
>
();
pass_manager
.
run_passes
(
func
);
if
(
m_dump_graph_enable
)
{
visualize_tree
(
func
,
"intelgpu_"
,
"_opt"
);
}
}
for
(
shared_ptr
<
Node
>
op
:
func
->
get_ops
())
...
...
src/ngraph/runtime/intelgpu/intelgpu_backend.hpp
View file @
bbf66498
...
...
@@ -82,5 +82,6 @@ private:
bool
m_profile_enable
=
false
;
long
m_profile_lines_limit_count
=
10
;
bool
m_dump_graph_enable
=
false
;
std
::
string
delim
=
std
::
string
(
":"
);
};
src/ngraph/runtime/intelgpu/visualize_tree.cpp
0 → 100644
View file @
bbf66498
//*****************************************************************************
// Copyright 2017-2018 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include <fstream>
#include <map>
#include <memory>
#include "ngraph/runtime/intelgpu/visualize_tree.hpp"
#include "ngraph/node.hpp"
#include "ngraph/op/avg_pool.hpp"
#include "ngraph/op/batch_norm.hpp"
#include "ngraph/op/broadcast.hpp"
#include "ngraph/op/concat.hpp"
#include "ngraph/op/convolution.hpp"
#include "ngraph/op/get_output_element.hpp"
#include "ngraph/op/max_pool.hpp"
#include "ngraph/op/product.hpp"
#include "ngraph/op/reshape.hpp"
#include "ngraph/op/sum.hpp"
#include "ngraph/util.hpp"
using
namespace
ngraph
;
using
namespace
std
;
#define NGRAPH_OP(a, b) a,
enum
class
OP_TYPEID
{
#include "ngraph/op/op_tbl.hpp"
};
#undef NGRAPH_OP
static
OP_TYPEID
get_typeid
(
const
string
&
s
)
{
// This expands the op list in op_tbl.hpp into a list of enumerations that look like this:
// {"Abs", OP_TYPEID::Abs},
// {"Acos", OP_TYPEID::Acos},
// ...
#define NGRAPH_OP(a, b) {#a, OP_TYPEID::a},
static
const
unordered_map
<
string
,
OP_TYPEID
>
typeid_map
{
#include "ngraph/op/op_tbl.hpp"
};
#undef NGRAPH_OP
auto
it
=
typeid_map
.
find
(
s
);
if
(
it
==
typeid_map
.
end
())
{
throw
unsupported_op
(
"Unsupported op '"
+
s
+
"'"
);
}
return
it
->
second
;
}
static
const
string
table_begin
=
"<table border=
\"
0
\"
>"
;
static
const
string
table_end
=
"</table>"
;
static
const
string
cell_end
=
"</td>"
;
static
const
string
table_row_end
=
cell_end
+
"</tr>"
;
static
const
string
font_small_begin
=
"<font point-size=
\"
7
\"
>"
;
static
const
string
font_end
=
"</font>"
;
static
string
cell_begin
(
const
string
&
align
=
string
(
"left"
))
{
return
string
(
"<td align=
\"
"
)
+
align
+
"
\"
>"
;
}
static
string
table_row_begin
(
const
string
&
align
=
string
(
"left"
))
{
return
string
(
"<tr>"
)
+
cell_begin
(
align
);
}
void
print_node_parameters
(
ostringstream
&
writer
,
const
shared_ptr
<
Node
>&
node
)
{
switch
(
get_typeid
(
node
->
description
()))
{
case
OP_TYPEID
:
:
BatchNormTrainingBackprop
:
{
const
shared_ptr
<
op
::
BatchNormTrainingBackprop
>
batch_norm
=
static_pointer_cast
<
op
::
BatchNormTrainingBackprop
>
(
node
);
const
double
eps
=
batch_norm
->
get_eps_value
();
writer
<<
table_row_begin
()
<<
font_small_begin
<<
"EPS:"
<<
eps
<<
font_end
<<
table_row_end
;
break
;
}
case
OP_TYPEID
:
:
BatchNormInference
:
case
OP_TYPEID
:
:
BatchNormTraining
:
{
const
shared_ptr
<
op
::
BatchNormBase
>
batch_norm
=
static_pointer_cast
<
op
::
BatchNormBase
>
(
node
);
const
double
eps
=
batch_norm
->
get_eps_value
();
writer
<<
table_row_begin
()
<<
font_small_begin
<<
"EPS:"
<<
eps
<<
font_end
<<
table_row_end
;
break
;
}
case
OP_TYPEID
:
:
GetOutputElement
:
{
const
shared_ptr
<
op
::
GetOutputElement
>
elem
=
static_pointer_cast
<
op
::
GetOutputElement
>
(
node
);
writer
<<
table_row_begin
()
<<
font_small_begin
<<
"element:"
<<
elem
->
get_n
()
<<
font_end
<<
table_row_end
;
break
;
}
case
OP_TYPEID
:
:
MaxPool
:
{
const
shared_ptr
<
op
::
MaxPool
>
max_pool
=
static_pointer_cast
<
op
::
MaxPool
>
(
node
);
writer
<<
table_row_begin
()
<<
font_small_begin
<<
"win_shape"
<<
vector_to_string
(
max_pool
->
get_window_shape
())
<<
font_end
<<
table_row_end
<<
table_row_begin
()
<<
font_small_begin
<<
"win_strides"
<<
vector_to_string
(
max_pool
->
get_window_movement_strides
())
<<
font_end
<<
table_row_end
<<
table_row_begin
()
<<
font_small_begin
<<
"pad_above"
<<
vector_to_string
(
max_pool
->
get_padding_above
())
<<
font_end
<<
table_row_end
<<
table_row_begin
()
<<
font_small_begin
<<
"pad_below"
<<
vector_to_string
(
max_pool
->
get_padding_below
())
<<
font_end
<<
table_row_end
;
break
;
}
case
OP_TYPEID
:
:
MaxPoolBackprop
:
{
const
shared_ptr
<
op
::
MaxPoolBackprop
>
max_pool_b
=
static_pointer_cast
<
op
::
MaxPoolBackprop
>
(
node
);
writer
<<
table_row_begin
()
<<
font_small_begin
<<
"win_shape"
<<
vector_to_string
(
max_pool_b
->
get_window_shape
())
<<
font_end
<<
table_row_end
<<
table_row_begin
()
<<
font_small_begin
<<
"win_strides"
<<
vector_to_string
(
max_pool_b
->
get_window_movement_strides
())
<<
font_end
<<
table_row_end
<<
table_row_begin
()
<<
font_small_begin
<<
"pad_above"
<<
vector_to_string
(
max_pool_b
->
get_padding_above
())
<<
font_end
<<
table_row_end
<<
table_row_begin
()
<<
font_small_begin
<<
"pad_below"
<<
vector_to_string
(
max_pool_b
->
get_padding_below
())
<<
font_end
<<
table_row_end
;
break
;
}
case
OP_TYPEID
:
:
AvgPool
:
{
const
shared_ptr
<
op
::
AvgPool
>
avg_pool
=
static_pointer_cast
<
op
::
AvgPool
>
(
node
);
writer
<<
table_row_begin
()
<<
font_small_begin
<<
"win_shape"
<<
vector_to_string
(
avg_pool
->
get_window_shape
())
<<
font_end
<<
table_row_end
<<
table_row_begin
()
<<
font_small_begin
<<
"win_strides"
<<
vector_to_string
(
avg_pool
->
get_window_movement_strides
())
<<
font_end
<<
table_row_end
<<
table_row_begin
()
<<
font_small_begin
<<
"pad_above"
<<
vector_to_string
(
avg_pool
->
get_padding_above
())
<<
font_end
<<
table_row_end
<<
table_row_begin
()
<<
font_small_begin
<<
"pad_below"
<<
vector_to_string
(
avg_pool
->
get_padding_below
())
<<
font_end
<<
table_row_end
<<
table_row_begin
()
<<
font_small_begin
<<
"pad_included:"
<<
avg_pool
->
get_include_padding_in_avg_computation
()
<<
font_end
<<
table_row_end
;
break
;
}
case
OP_TYPEID
:
:
AvgPoolBackprop
:
{
const
shared_ptr
<
op
::
AvgPoolBackprop
>
avg_pool_b
=
static_pointer_cast
<
op
::
AvgPoolBackprop
>
(
node
);
writer
<<
table_row_begin
()
<<
font_small_begin
<<
"win_shape"
<<
vector_to_string
(
avg_pool_b
->
get_window_shape
())
<<
font_end
<<
table_row_end
<<
table_row_begin
()
<<
font_small_begin
<<
"win_strides"
<<
vector_to_string
(
avg_pool_b
->
get_window_movement_strides
())
<<
font_end
<<
table_row_end
<<
table_row_begin
()
<<
font_small_begin
<<
"pad_above"
<<
vector_to_string
(
avg_pool_b
->
get_padding_above
())
<<
font_end
<<
table_row_end
<<
table_row_begin
()
<<
font_small_begin
<<
"pad_below"
<<
vector_to_string
(
avg_pool_b
->
get_padding_below
())
<<
font_end
<<
table_row_end
<<
table_row_begin
()
<<
font_small_begin
<<
"pad_included:"
<<
avg_pool_b
->
get_include_padding_in_avg_computation
()
<<
font_end
<<
table_row_end
;
break
;
}
case
OP_TYPEID
:
:
Broadcast
:
{
const
shared_ptr
<
op
::
Broadcast
>
broadcast
=
static_pointer_cast
<
op
::
Broadcast
>
(
node
);
writer
<<
table_row_begin
()
<<
font_small_begin
<<
"broadcast_axis"
<<
vector_to_string
(
broadcast
->
get_broadcast_axes
())
<<
font_end
<<
table_row_end
;
break
;
}
case
OP_TYPEID
:
:
Sum
:
{
const
shared_ptr
<
op
::
Sum
>
sum
=
static_pointer_cast
<
op
::
Sum
>
(
node
);
writer
<<
table_row_begin
()
<<
font_small_begin
<<
"reduction_axis"
<<
vector_to_string
(
sum
->
get_reduction_axes
())
<<
font_end
<<
table_row_end
;
break
;
}
case
OP_TYPEID
:
:
Product
:
{
const
shared_ptr
<
op
::
Product
>
prod
=
static_pointer_cast
<
op
::
Product
>
(
node
);
writer
<<
table_row_begin
()
<<
font_small_begin
<<
"reduction_axis"
<<
vector_to_string
(
prod
->
get_reduction_axes
())
<<
font_end
<<
table_row_end
;
break
;
}
case
OP_TYPEID
:
:
Reshape
:
{
const
shared_ptr
<
op
::
Reshape
>
op_reshape
=
static_pointer_cast
<
op
::
Reshape
>
(
node
);
writer
<<
table_row_begin
()
<<
font_small_begin
<<
"broadcast_axes:"
<<
vector_to_string
(
op_reshape
->
get_input_order
())
<<
font_end
<<
table_row_end
<<
table_row_begin
()
<<
font_small_begin
<<
"transpose:"
<<
op_reshape
->
get_is_transpose
()
<<
font_end
<<
table_row_end
;
break
;
}
case
OP_TYPEID
:
:
Concat
:
{
const
shared_ptr
<
op
::
Concat
>
concat_op
=
static_pointer_cast
<
op
::
Concat
>
(
node
);
writer
<<
table_row_begin
()
<<
font_small_begin
<<
"concat_axis:"
<<
concat_op
->
get_concatenation_axis
()
<<
font_end
<<
table_row_end
;
break
;
}
case
OP_TYPEID
:
:
Convolution
:
{
const
shared_ptr
<
op
::
Convolution
>
conv_op
=
static_pointer_cast
<
op
::
Convolution
>
(
node
);
writer
<<
table_row_begin
()
<<
font_small_begin
<<
"win_stride"
<<
vector_to_string
(
conv_op
->
get_window_movement_strides
())
<<
font_end
<<
table_row_end
<<
table_row_begin
()
<<
font_small_begin
<<
"win_dilation"
<<
vector_to_string
(
conv_op
->
get_window_dilation_strides
())
<<
font_end
<<
table_row_end
<<
table_row_begin
()
<<
font_small_begin
<<
"data_dilation"
<<
vector_to_string
(
conv_op
->
get_data_dilation_strides
())
<<
font_end
<<
table_row_end
<<
table_row_begin
()
<<
font_small_begin
<<
"pad_below"
<<
vector_to_string
(
conv_op
->
get_padding_below
())
<<
font_end
<<
table_row_end
<<
table_row_begin
()
<<
font_small_begin
<<
"pad_above"
<<
vector_to_string
(
conv_op
->
get_padding_above
())
<<
font_end
<<
table_row_end
<<
table_row_begin
()
<<
font_small_begin
<<
"def_val"
<<
vector_to_string
(
conv_op
->
get_default_value
()
->
get_shape
())
<<
font_end
<<
table_row_end
;
}
}
}
void
print_node
(
ostringstream
&
writer
,
const
shared_ptr
<
Node
>&
node
)
{
writer
<<
node
->
get_name
()
<<
" ["
;
if
(
node
->
is_parameter
())
{
writer
<<
"shape=box color=blue "
;
}
else
if
(
node
->
is_output
())
{
writer
<<
"shape=box style=filled fillcolor=pink "
;
}
else
{
writer
<<
"shape=ellipse color=black"
;
}
// Print text inside figure using HTML layout
writer
<<
" label=<"
<<
table_begin
;
if
(
!
node
->
get_inputs
().
empty
())
{
size_t
arg_idx
=
0
;
for
(
const
descriptor
::
Input
&
op_input
:
node
->
get_inputs
())
{
writer
<<
table_row_begin
()
<<
font_small_begin
<<
op_input
.
get_element_type
().
c_type_string
()
<<
" input"
<<
arg_idx
<<
vector_to_string
(
op_input
.
get_shape
())
<<
font_end
<<
table_row_end
;
++
arg_idx
;
}
}
if
(
!
node
->
get_outputs
().
empty
())
{
size_t
arg_idx
=
0
;
for
(
const
descriptor
::
Output
&
op_output
:
node
->
get_outputs
())
{
writer
<<
table_row_begin
()
<<
font_small_begin
<<
op_output
.
get_element_type
().
c_type_string
()
<<
" output"
<<
arg_idx
<<
vector_to_string
(
op_output
.
get_shape
())
<<
font_end
<<
table_row_end
;
++
arg_idx
;
}
}
writer
<<
table_row_begin
(
"center"
)
<<
node
->
get_name
()
<<
table_row_end
;
print_node_parameters
(
writer
,
node
);
writer
<<
table_end
;
writer
<<
" >]
\n
"
;
}
void
runtime
::
intelgpu
::
visualize_tree
(
const
shared_ptr
<
Function
>&
func
,
const
string
&
file_prefix
,
const
string
&
file_suffix
)
{
map
<
string
,
size_t
>
operations
;
ostringstream
writer
;
// Begin of the main graph
writer
<<
"digraph ngraph
\n
{
\n
"
;
for
(
const
shared_ptr
<
Node
>
op
:
func
->
get_ordered_ops
())
{
print_node
(
writer
,
op
);
for
(
const
descriptor
::
Input
&
op_input
:
op
->
get_inputs
())
{
writer
<<
op_input
.
get_output
().
get_node
()
->
get_name
()
<<
" -> "
<<
op
->
get_name
()
<<
";
\n
"
;
}
// collect summary statistic for operations used in the graph
const
string
op_name
=
op
->
get_name
().
substr
(
0
,
op
->
get_name
().
find_first_of
(
"_"
));
auto
it
=
operations
.
find
(
op_name
);
if
(
it
==
operations
.
end
())
{
it
=
operations
.
emplace
(
op_name
,
0
).
first
;
}
++
(
it
->
second
);
}
// print summary with operations used
writer
<<
"subgraph clusterFooter
\n
{
\n
margin=0
\n
style=
\"
invis
\"\n
LEGEND ["
<<
"shape=box style=filled fillcolor=gray margin=0 label=<"
<<
table_begin
<<
table_row_begin
(
"center"
)
<<
"Operations summary"
<<
table_row_end
;
size_t
total_op_count
=
0
;
for
(
const
auto
&
it
:
operations
)
{
writer
<<
table_row_begin
()
<<
font_small_begin
<<
it
.
first
<<
":"
<<
font_end
<<
cell_end
<<
cell_begin
()
<<
font_small_begin
<<
it
.
second
<<
font_end
<<
table_row_end
;
total_op_count
+=
it
.
second
;
}
writer
<<
table_row_begin
()
<<
"Total:"
<<
cell_end
<<
cell_begin
()
<<
total_op_count
<<
table_row_end
<<
table_end
<<
" >]
\n
}
\n
"
;
// End of the main graph
writer
<<
"}
\n
"
;
ofstream
out_file
(
file_prefix
+
func
->
get_name
()
+
file_suffix
+
".dot"
);
if
(
out_file
)
{
out_file
<<
writer
.
str
();
out_file
.
close
();
}
}
src/ngraph/runtime/intelgpu/visualize_tree.hpp
0 → 100644
View file @
bbf66498
//*****************************************************************************
// Copyright 2017-2018 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include "ngraph/function.hpp"
namespace
ngraph
{
namespace
runtime
{
namespace
intelgpu
{
// This function writes the input func into file in Graphviz format.
// On large graphs, the "dot" utility requires lot of time to visualize the input.
// Example: dot -Tpdf intelgpu_Function_0_orig.dot -o intelgpu_Function_0_orig.pdf
void
visualize_tree
(
const
std
::
shared_ptr
<
Function
>&
func
,
const
std
::
string
&
file_prefix
,
const
std
::
string
&
file_suffix
);
}
}
}
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment