Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in / Register
Toggle navigation
N
ngraph
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
submodule
ngraph
Commits
46e0dea7
Commit
46e0dea7
authored
Feb 23, 2018
by
Jayaram Bobba
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Enable optimal layouts on MKLDNN convolution backprop ops
parent
d0f8dff2
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
7 changed files
with
173 additions
and
53 deletions
+173
-53
cpu_emitter.cpp
src/ngraph/runtime/cpu/cpu_emitter.cpp
+55
-48
cpu_tensor_view.cpp
src/ngraph/runtime/cpu/cpu_tensor_view.cpp
+3
-2
mkldnn_utils.cpp
src/ngraph/runtime/cpu/mkldnn_utils.cpp
+44
-3
mkldnn_utils.hpp
src/ngraph/runtime/cpu/mkldnn_utils.hpp
+6
-0
cpu_assignment.cpp
src/ngraph/runtime/cpu/pass/cpu_assignment.cpp
+58
-0
cpu_layout.cpp
src/ngraph/runtime/cpu/pass/cpu_layout.cpp
+0
-0
cpu_layout.hpp
src/ngraph/runtime/cpu/pass/cpu_layout.hpp
+7
-0
No files found.
src/ngraph/runtime/cpu/cpu_emitter.cpp
View file @
46e0dea7
...
...
@@ -2001,11 +2001,7 @@ namespace ngraph
auto
arg1_shape
=
args
[
1
].
get_shape
();
auto
result_shape
=
out
[
0
].
get_shape
();
auto
op_annotations
=
static_cast
<
const
ngraph
::
op
::
Op
*>
(
node
)
->
get_op_annotations
();
if
(
op_annotations
&&
static_pointer_cast
<
ngraph
::
runtime
::
cpu
::
CPUOpAnnotations
>
(
op_annotations
)
->
is_mkldnn_op
())
if
(
runtime
::
cpu
::
mkldnn_utils
::
use_mkldnn_kernel
(
node
))
{
// For dilation, MKLDNN wants to know how many elements to insert between, not how far
// apart to space the elements like nGraph. So we have to subtract 1 from each pos.
...
...
@@ -2014,22 +2010,13 @@ namespace ngraph
{
window_dilation_strides_adjusted
.
push_back
(
s
-
1
);
}
auto
input_tvl
=
node
->
get_inputs
()[
0
]
.
get_output
()
.
get_tensor_view
()
->
get_tensor_view_layout
();
auto
weights_tvl
=
node
->
get_inputs
()[
1
]
.
get_output
()
.
get_tensor_view
()
->
get_tensor_view_layout
();
auto
output_tvl
=
node
->
get_output_tensor_view
(
0
)
->
get_tensor_view_layout
();
auto
input_format
=
dynamic_cast
<
runtime
::
cpu
::
LayoutDescriptor
&>
(
*
input_tvl
)
.
get_mkldnn_format
();
auto
input_format
=
runtime
::
cpu
::
mkldnn_utils
::
get_input_mkldnn_format
(
node
,
0
);
auto
weights_format
=
dynamic_cast
<
runtime
::
cpu
::
LayoutDescriptor
&>
(
*
weights_tvl
)
.
get_mkldnn_format
();
auto
output_format
=
dynamic_cast
<
runtime
::
cpu
::
LayoutDescriptor
&>
(
*
output_tvl
)
.
get_mkldnn_format
();
runtime
::
cpu
::
mkldnn_utils
::
get_input_mkldnn_format
(
node
,
1
);
auto
output_format
=
runtime
::
cpu
::
mkldnn_utils
::
get_output_mkldnn_format
(
node
,
0
);
auto
&
mkldnn_emitter
=
external_function
->
get_mkldnn_emitter
();
auto
input_data_desc
=
...
...
@@ -2091,17 +2078,8 @@ namespace ngraph
auto
arg0_shape
=
args
[
0
].
get_shape
();
auto
arg1_shape
=
args
[
1
].
get_shape
();
auto
result_shape
=
out
[
0
].
get_shape
();
auto
arg0_rank
=
arg0_shape
.
size
();
auto
arg1_rank
=
arg1_shape
.
size
();
bool
data_dilated
=
false
;
for
(
size_t
s
:
convolution
->
get_data_dilation_strides_forward
())
{
data_dilated
=
data_dilated
||
(
s
!=
1
);
}
if
(
!
data_dilated
&&
arg0_rank
==
4
&&
arg1_rank
==
4
&&
args
[
0
].
get_element_type
()
==
element
::
f32
)
if
(
runtime
::
cpu
::
mkldnn_utils
::
use_mkldnn_kernel
(
node
))
{
const
string
&
elem_type
=
runtime
::
cpu
::
mkldnn_utils
::
get_mkldnn_data_type_string
(
...
...
@@ -2112,12 +2090,19 @@ namespace ngraph
{
window_dilation_strides_adjusted
.
push_back
(
s
-
1
);
}
auto
data_format
=
runtime
::
cpu
::
mkldnn_utils
::
get_input_mkldnn_format
(
node
,
0
);
auto
delta_format
=
runtime
::
cpu
::
mkldnn_utils
::
get_input_mkldnn_format
(
node
,
1
);
auto
result_format
=
runtime
::
cpu
::
mkldnn_utils
::
get_output_mkldnn_format
(
node
,
0
);
auto
emit_memory_desc
=
[
&
writer
](
const
std
::
string
&
var
,
const
std
::
string
&
shape
,
const
std
::
string
&
type
,
const
std
::
string
&
layout
)
{
writer
<<
"memory::desc "
<<
var
<<
" = memory::desc({"
<<
shape
<<
"}, "
<<
type
<<
",
memory::format::
"
<<
layout
<<
");
\n
"
;
<<
type
<<
", "
<<
layout
<<
");
\n
"
;
};
auto
emit_memory
=
[
&
writer
](
...
...
@@ -2135,9 +2120,21 @@ namespace ngraph
writer
<<
"try
\n
"
;
writer
.
block_begin
();
writer
<<
"engine cpu_engine = engine(engine::cpu, 0);
\n
"
;
emit_memory_desc
(
"data_desc"
,
join
(
arg0_shape
),
elem_type
,
"nchw"
);
emit_memory_desc
(
"delta_desc"
,
join
(
arg1_shape
),
elem_type
,
"nchw"
);
emit_memory_desc
(
"result_desc"
,
join
(
result_shape
),
elem_type
,
"oihw"
);
emit_memory_desc
(
"data_desc"
,
join
(
arg0_shape
),
elem_type
,
runtime
::
cpu
::
mkldnn_utils
::
get_mkldnn_format_string
(
data_format
));
emit_memory_desc
(
"delta_desc"
,
join
(
arg1_shape
),
elem_type
,
runtime
::
cpu
::
mkldnn_utils
::
get_mkldnn_format_string
(
delta_format
));
emit_memory_desc
(
"result_desc"
,
join
(
result_shape
),
elem_type
,
runtime
::
cpu
::
mkldnn_utils
::
get_mkldnn_format_string
(
result_format
));
emit_memory
(
"data"
,
"data_desc"
,
args
[
0
].
get_name
());
emit_memory
(
"delta"
,
"delta_desc"
,
args
[
1
].
get_name
());
emit_memory
(
"result"
,
"result_desc"
,
out
[
0
].
get_name
());
...
...
@@ -2202,17 +2199,8 @@ namespace ngraph
auto
arg0_shape
=
args
[
0
].
get_shape
();
auto
arg1_shape
=
args
[
1
].
get_shape
();
auto
result_shape
=
out
[
0
].
get_shape
();
auto
arg0_rank
=
arg0_shape
.
size
();
auto
arg1_rank
=
arg1_shape
.
size
();
bool
data_dilated
=
false
;
for
(
size_t
s
:
convolution
->
get_data_dilation_strides_forward
())
{
data_dilated
=
data_dilated
||
(
s
!=
1
);
}
if
(
!
data_dilated
&&
arg0_rank
==
4
&&
arg1_rank
==
4
&&
args
[
0
].
get_element_type
()
==
element
::
f32
)
if
(
runtime
::
cpu
::
mkldnn_utils
::
use_mkldnn_kernel
(
node
))
{
const
string
&
elem_type
=
runtime
::
cpu
::
mkldnn_utils
::
get_mkldnn_data_type_string
(
...
...
@@ -2224,12 +2212,19 @@ namespace ngraph
window_dilation_strides_adjusted
.
push_back
(
s
-
1
);
}
auto
weight_format
=
runtime
::
cpu
::
mkldnn_utils
::
get_input_mkldnn_format
(
node
,
0
);
auto
delta_format
=
runtime
::
cpu
::
mkldnn_utils
::
get_input_mkldnn_format
(
node
,
1
);
auto
result_format
=
runtime
::
cpu
::
mkldnn_utils
::
get_output_mkldnn_format
(
node
,
0
);
auto
emit_memory_desc
=
[
&
writer
](
const
std
::
string
&
var
,
const
std
::
string
&
shape
,
const
std
::
string
&
type
,
const
std
::
string
&
layout
)
{
writer
<<
"memory::desc "
<<
var
<<
" = memory::desc({"
<<
shape
<<
"}, "
<<
type
<<
",
memory::format::
"
<<
layout
<<
");
\n
"
;
<<
type
<<
", "
<<
layout
<<
");
\n
"
;
};
auto
emit_memory
=
[
&
writer
](
...
...
@@ -2247,9 +2242,21 @@ namespace ngraph
writer
<<
"try
\n
"
;
writer
.
block_begin
();
writer
<<
"engine cpu_engine = engine(engine::cpu, 0);
\n
"
;
emit_memory_desc
(
"weight_desc"
,
join
(
arg0_shape
),
elem_type
,
"oihw"
);
emit_memory_desc
(
"delta_desc"
,
join
(
arg1_shape
),
elem_type
,
"nchw"
);
emit_memory_desc
(
"result_desc"
,
join
(
result_shape
),
elem_type
,
"nchw"
);
emit_memory_desc
(
"weight_desc"
,
join
(
arg0_shape
),
elem_type
,
runtime
::
cpu
::
mkldnn_utils
::
get_mkldnn_format_string
(
weight_format
));
emit_memory_desc
(
"delta_desc"
,
join
(
arg1_shape
),
elem_type
,
runtime
::
cpu
::
mkldnn_utils
::
get_mkldnn_format_string
(
delta_format
));
emit_memory_desc
(
"result_desc"
,
join
(
result_shape
),
elem_type
,
runtime
::
cpu
::
mkldnn_utils
::
get_mkldnn_format_string
(
result_format
));
emit_memory
(
"weight"
,
"weight_desc"
,
args
[
0
].
get_name
());
emit_memory
(
"delta"
,
"delta_desc"
,
args
[
1
].
get_name
());
emit_memory
(
"result"
,
"result_desc"
,
out
[
0
].
get_name
());
...
...
src/ngraph/runtime/cpu/cpu_tensor_view.cpp
View file @
46e0dea7
...
...
@@ -107,8 +107,9 @@ void runtime::cpu::CPUTensorView::read(void* target, size_t tensor_offset, size_
auto
tvl
=
this
->
get_tensor_view_layout
();
auto
cpu_tvl
=
dynamic_cast
<
runtime
::
cpu
::
LayoutDescriptor
*>
(
tvl
.
get
());
if
(
cpu_tvl
&&
cpu_tvl
->
get_mkldnn_format
()
!=
memory
::
format
::
format_undef
&&
cpu_tvl
->
get_mkldnn_format
()
!=
runtime
::
cpu
::
mkldnn_utils
::
CreateNativeDataFormat
(
*
cpu_tvl
))
!
runtime
::
cpu
::
mkldnn_utils
::
compare_mkldnn_formats
(
cpu_tvl
->
get_mkldnn_format
(),
runtime
::
cpu
::
mkldnn_utils
::
CreateNativeDataFormat
(
*
cpu_tvl
)))
{
auto
tensor_shape
=
this
->
get_shape
();
auto
input_format
=
cpu_tvl
->
get_mkldnn_format
();
...
...
src/ngraph/runtime/cpu/mkldnn_utils.cpp
View file @
46e0dea7
...
...
@@ -19,18 +19,21 @@
#include <typeinfo>
#include <unordered_set>
#include "ngraph/types/element_type.hpp"
#include "ngraph/node.hpp"
#include "ngraph/ops/avg_pool.hpp"
#include "ngraph/ops/batch_norm.hpp"
#include "ngraph/ops/convolution.hpp"
#include "ngraph/ops/max_pool.hpp"
#include "ngraph/ops/relu.hpp"
#include "ngraph/runtime/cpu/cpu_layout_descriptor.hpp"
#include "ngraph/runtime/cpu/cpu_op_annotations.hpp"
#include "ngraph/types/element_type.hpp"
#include "mkldnn_utils.hpp"
using
namespace
mkldnn
;
using
namespace
ngraph
;
using
namespace
std
;
#define TI(x) std::type_index(typeid(x))
...
...
@@ -120,7 +123,8 @@ mkldnn::memory::format runtime::cpu::mkldnn_utils::CreateNativeDataFormat(
}
}
const
std
::
string
&
runtime
::
cpu
::
mkldnn_utils
::
get_mkldnn_data_type_string
(
const
ngraph
::
element
::
Type
&
type
)
const
std
::
string
&
runtime
::
cpu
::
mkldnn_utils
::
get_mkldnn_data_type_string
(
const
ngraph
::
element
::
Type
&
type
)
{
auto
it
=
s_mkldnn_data_type_string_map
.
find
(
type
);
if
(
it
==
s_mkldnn_data_type_string_map
.
end
()
||
it
->
second
.
empty
())
...
...
@@ -128,7 +132,8 @@ const std::string& runtime::cpu::mkldnn_utils::get_mkldnn_data_type_string(const
return
it
->
second
;
}
mkldnn
::
memory
::
data_type
runtime
::
cpu
::
mkldnn_utils
::
get_mkldnn_data_type
(
const
ngraph
::
element
::
Type
&
type
)
mkldnn
::
memory
::
data_type
runtime
::
cpu
::
mkldnn_utils
::
get_mkldnn_data_type
(
const
ngraph
::
element
::
Type
&
type
)
{
auto
it
=
s_mkldnn_data_type_map
.
find
(
type
);
if
(
it
==
s_mkldnn_data_type_map
.
end
()
||
it
->
second
==
memory
::
data_type
::
data_undef
)
...
...
@@ -146,3 +151,38 @@ const std::string& runtime::cpu::mkldnn_utils::get_mkldnn_format_string(memory::
std
::
to_string
(
fmt
));
return
it
->
second
;
}
mkldnn
::
memory
::
format
runtime
::
cpu
::
mkldnn_utils
::
get_input_mkldnn_format
(
const
Node
*
node
,
int
index
)
{
auto
tvl
=
node
->
get_inputs
()[
index
].
get_output
().
get_tensor_view
()
->
get_tensor_view_layout
();
return
dynamic_cast
<
runtime
::
cpu
::
LayoutDescriptor
&>
(
*
tvl
).
get_mkldnn_format
();
}
mkldnn
::
memory
::
format
runtime
::
cpu
::
mkldnn_utils
::
get_output_mkldnn_format
(
const
Node
*
node
,
int
index
)
{
auto
tvl
=
node
->
get_output_tensor_view
(
0
)
->
get_tensor_view_layout
();
return
dynamic_cast
<
runtime
::
cpu
::
LayoutDescriptor
&>
(
*
tvl
).
get_mkldnn_format
();
}
bool
runtime
::
cpu
::
mkldnn_utils
::
use_mkldnn_kernel
(
const
ngraph
::
Node
*
node
)
{
auto
op_annotations
=
static_cast
<
const
ngraph
::
op
::
Op
*>
(
node
)
->
get_op_annotations
();
return
(
op_annotations
&&
static_pointer_cast
<
ngraph
::
runtime
::
cpu
::
CPUOpAnnotations
>
(
op_annotations
)
->
is_mkldnn_op
());
}
bool
runtime
::
cpu
::
mkldnn_utils
::
compare_mkldnn_formats
(
mkldnn
::
memory
::
format
fmt1
,
mkldnn
::
memory
::
format
fmt2
)
{
set
<
mkldnn
::
memory
::
format
>
similar_4d_formats
{
mkldnn
::
memory
::
format
::
nchw
,
mkldnn
::
memory
::
format
::
oihw
};
if
((
fmt1
==
fmt2
)
||
(
similar_4d_formats
.
find
(
fmt1
)
!=
similar_4d_formats
.
end
()
&&
similar_4d_formats
.
find
(
fmt2
)
!=
similar_4d_formats
.
end
()))
{
return
true
;
}
return
false
;
}
\ No newline at end of file
src/ngraph/runtime/cpu/mkldnn_utils.hpp
View file @
46e0dea7
...
...
@@ -38,6 +38,12 @@ namespace ngraph
const
std
::
string
&
get_mkldnn_data_type_string
(
const
ngraph
::
element
::
Type
&
type
);
mkldnn
::
memory
::
data_type
get_mkldnn_data_type
(
const
ngraph
::
element
::
Type
&
type
);
const
std
::
string
&
get_mkldnn_format_string
(
mkldnn
::
memory
::
format
fmt
);
mkldnn
::
memory
::
format
get_input_mkldnn_format
(
const
Node
*
node
,
int
index
);
mkldnn
::
memory
::
format
get_output_mkldnn_format
(
const
Node
*
node
,
int
index
);
bool
use_mkldnn_kernel
(
const
ngraph
::
Node
*
node
);
bool
compare_mkldnn_formats
(
mkldnn
::
memory
::
format
fmt1
,
mkldnn
::
memory
::
format
fmt2
);
}
}
}
...
...
src/ngraph/runtime/cpu/pass/cpu_assignment.cpp
View file @
46e0dea7
...
...
@@ -66,6 +66,60 @@ namespace ngraph
convolution
->
set_op_annotations
(
op_annotations
);
}
}
template
<>
void
CPUAssignment
::
ASSIGN_DECL
(
ngraph
::
op
::
ConvolutionBackpropData
)
{
auto
convolution
=
static_cast
<
op
::
ConvolutionBackpropData
*>
(
node
);
auto
arg0_shape
=
node
->
get_input_shape
(
0
);
auto
arg1_shape
=
node
->
get_input_shape
(
1
);
auto
result_shape
=
node
->
get_output_shape
(
0
);
auto
arg0_rank
=
arg0_shape
.
size
();
auto
arg1_rank
=
arg1_shape
.
size
();
bool
data_dilated
=
false
;
for
(
size_t
s
:
convolution
->
get_data_dilation_strides_forward
())
{
data_dilated
=
data_dilated
||
(
s
!=
1
);
}
if
(
!
data_dilated
&&
arg0_rank
==
4
&&
arg1_rank
==
4
&&
node
->
get_input_element_type
(
0
)
==
element
::
f32
)
{
auto
op_annotations
=
std
::
make_shared
<
ngraph
::
runtime
::
cpu
::
CPUOpAnnotations
>
();
op_annotations
->
set_mkldnn_op
(
true
);
convolution
->
set_op_annotations
(
op_annotations
);
}
}
template
<>
void
CPUAssignment
::
ASSIGN_DECL
(
ngraph
::
op
::
ConvolutionBackpropFilters
)
{
auto
convolution
=
static_cast
<
op
::
ConvolutionBackpropFilters
*>
(
node
);
auto
arg0_shape
=
node
->
get_input_shape
(
0
);
auto
arg1_shape
=
node
->
get_input_shape
(
1
);
auto
result_shape
=
node
->
get_output_shape
(
0
);
auto
arg0_rank
=
arg0_shape
.
size
();
auto
arg1_rank
=
arg1_shape
.
size
();
bool
data_dilated
=
false
;
for
(
size_t
s
:
convolution
->
get_data_dilation_strides_forward
())
{
data_dilated
=
data_dilated
||
(
s
!=
1
);
}
if
(
!
data_dilated
&&
arg0_rank
==
4
&&
arg1_rank
==
4
&&
node
->
get_input_element_type
(
0
)
==
element
::
f32
)
{
auto
op_annotations
=
std
::
make_shared
<
ngraph
::
runtime
::
cpu
::
CPUOpAnnotations
>
();
op_annotations
->
set_mkldnn_op
(
true
);
convolution
->
set_op_annotations
(
op_annotations
);
}
}
}
}
}
...
...
@@ -76,6 +130,10 @@ namespace ngraph
static
const
runtime
::
cpu
::
pass
::
AssignOpMap
s_dispatcher
{
{
TI
(
ngraph
::
op
::
Convolution
),
&
runtime
::
cpu
::
pass
::
CPUAssignment
::
assign
<
ngraph
::
op
::
Convolution
>
},
{
TI
(
ngraph
::
op
::
ConvolutionBackpropData
),
&
runtime
::
cpu
::
pass
::
CPUAssignment
::
assign
<
ngraph
::
op
::
ConvolutionBackpropData
>
},
{
TI
(
ngraph
::
op
::
ConvolutionBackpropFilters
),
&
runtime
::
cpu
::
pass
::
CPUAssignment
::
assign
<
ngraph
::
op
::
ConvolutionBackpropFilters
>
},
};
bool
runtime
::
cpu
::
pass
::
CPUAssignment
::
run_on_call_graph
(
...
...
src/ngraph/runtime/cpu/pass/cpu_layout.cpp
View file @
46e0dea7
This diff is collapsed.
Click to expand it.
src/ngraph/runtime/cpu/pass/cpu_layout.hpp
View file @
46e0dea7
...
...
@@ -53,6 +53,13 @@ namespace ngraph
private
:
std
::
shared_ptr
<
CPU_ExternalFunction
>
m_external_function
;
static
std
::
shared_ptr
<
Node
>
insert_input_conversions
(
CPU_ExternalFunction
*
external_function
,
std
::
shared_ptr
<
Node
>&
node
,
const
std
::
vector
<
mkldnn
::
memory
::
format
>&
required_formats
);
static
void
set_output_layouts
(
std
::
shared_ptr
<
Node
>&
node
,
const
std
::
vector
<
mkldnn
::
memory
::
format
>&
output_formats
);
static
void
set_default_layouts
(
CPU_ExternalFunction
*
external_function
,
std
::
shared_ptr
<
Node
>
node
);
};
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment