Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in / Register
Toggle navigation
N
ngraph
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
submodule
ngraph
Commits
5b9000ac
Commit
5b9000ac
authored
May 31, 2019
by
nmostafa
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
[MLIR] Fix issues after rebase on ngraph/master
parent
8cb95f71
Hide whitespace changes
Inline
Side-by-side
Showing
12 changed files
with
690 additions
and
103 deletions
+690
-103
compiler.cpp
src/contrib/mlir/compiler.cpp
+1
-1
memory_manager.cpp
src/contrib/mlir/memory_manager.cpp
+0
-3
tan.cpp
src/ngraph/op/tan.cpp
+0
-1
mlir_subgraph_extraction.cpp
src/ngraph/pass/mlir_subgraph_extraction.cpp
+3
-2
matmul_bias.cpp
src/ngraph/runtime/cpu/builder/matmul_bias.cpp
+0
-8
mlir_cpu_compiled_kernel.cpp
src/ngraph/runtime/cpu/builder/mlir_cpu_compiled_kernel.cpp
+9
-7
cpu_builder.cpp
src/ngraph/runtime/cpu/cpu_builder.cpp
+0
-1
cpu_emitter.cpp
src/ngraph/runtime/cpu/cpu_emitter.cpp
+0
-1
cpu_external_function.cpp
src/ngraph/runtime/cpu/cpu_external_function.cpp
+1
-3
serializer.cpp
src/ngraph/serializer.cpp
+675
-75
util.hpp
src/ngraph/util.hpp
+1
-0
cpu_fusion.cpp
test/cpu_fusion.cpp
+0
-1
No files found.
src/contrib/mlir/compiler.cpp
View file @
5b9000ac
...
...
@@ -22,7 +22,7 @@
#include "lowerer.hpp"
#include "ngraph/descriptor/tensor.hpp"
#include "ngraph/graph_util.hpp"
#include "ngraph/node
_vector
.hpp"
#include "ngraph/node.hpp"
#include "ngraph/op/add.hpp"
#include "ngraph/op/dot.hpp"
#include "ngraph/op/experimental/compiled_kernel.hpp"
...
...
src/contrib/mlir/memory_manager.cpp
View file @
5b9000ac
...
...
@@ -15,12 +15,9 @@
//*****************************************************************************
#include "memory_manager.hpp"
#include "ngraph/ngraph_visibility.hpp"
#include <memory>
using
namespace
ngraph
::
runtime
::
ngmlir
;
/// Call back to allocate memory for temps from JIT'ed code
...
...
src/ngraph/op/tan.cpp
View file @
5b9000ac
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
...
...
src/ngraph/pass/mlir_subgraph_extraction.cpp
View file @
5b9000ac
...
...
@@ -20,6 +20,7 @@
#include "ngraph/op/dot.hpp"
#include "ngraph/op/experimental/compiled_kernel.hpp"
#include "ngraph/op/get_output_element.hpp"
#include "ngraph/assertion.hpp"
using
namespace
ngraph
::
descriptor
;
using
namespace
ngraph
::
op
;
...
...
@@ -64,9 +65,9 @@ bool MLIRSubgraphExtractionPass::run_on_function(std::shared_ptr<Function> func)
auto
&
out_desc
=
output_descs
[
0
];
// 'replace_output' invalidates iterator of the original container. Use a copy instead.
std
::
set
<
Input
*>
input_descs
{
out_desc
.
get_inputs
()}
;
const
std
::
set
<
descriptor
::
Input
*>
input_descs
=
out_desc
.
get_inputs
()
;
for
(
Input
*
in_desc
:
input_descs
)
for
(
descriptor
::
Input
*
in_desc
:
input_descs
)
{
in_desc
->
replace_output
(
ck
,
i
);
}
...
...
src/ngraph/runtime/cpu/builder/matmul_bias.cpp
View file @
5b9000ac
...
...
@@ -38,14 +38,6 @@ namespace ngraph
auto
arg1_buffer_index
=
external_function
->
get_buffer_index
(
args
[
1
].
get_name
());
auto
out0_buffer_index
=
external_function
->
get_buffer_index
(
out
[
0
].
get_name
());
// TODO: Quick hook for MLIR.
if
(
std
::
getenv
(
"NGRAPH_MLIR"
)
!=
nullptr
)
{
functors
.
emplace_back
(
build_mlir_single_output_binary_op
(
node
,
arg0_tensor
,
arg1_tensor
,
out0_tensor
));
return
;
}
const
ngraph
::
op
::
MatmulBias
*
mm
=
static_cast
<
const
ngraph
::
op
::
MatmulBias
*>
(
node
);
const
auto
&
arg0_shape
=
mm
->
get_a_shape
();
...
...
src/ngraph/runtime/cpu/builder/mlir_cpu_compiled_kernel.cpp
View file @
5b9000ac
...
...
@@ -40,29 +40,31 @@ namespace ngraph
// Tensors haven't been allocated yet so we have to keep a pointer to the pointer
// that will hold the future memory address.
std
::
vector
<
void
**>
double_ptr_arg
s
;
std
::
vector
<
size_t
>
buffer_indice
s
;
for
(
const
TensorViewWrapper
&
arg
:
args
)
{
double_ptr_args
.
push_back
(
&
external_function
->
get_tensor_data
(
arg
.
get_name
()));
auto
buffer_index
=
external_function
->
get_buffer_index
(
arg
.
get_name
());
buffer_indices
.
push_back
(
buffer_index
);
}
for
(
const
TensorViewWrapper
&
result
:
out
)
{
double_ptr_args
.
push_back
(
&
external_function
->
get_tensor_data
(
result
.
get_name
())
);
auto
buffer_index
=
external_function
->
get_buffer_index
(
result
.
get_name
());
buffer_indices
.
push_back
(
buffer_index
);
}
// Create functor that will be executed to compile and run this CompiledKernel.
// Note that 'double_ptr_args' must be captured by value since it's a local var.
auto
functor
=
[
node
,
double_ptr_arg
s
](
CPURuntimeContext
*
ctx
,
auto
functor
=
[
node
,
buffer_indice
s
](
CPURuntimeContext
*
ctx
,
CPUExecutionContext
*
ectx
)
{
// MLIR requires a list of type-erased pointer to arguments. Tensors must have
// been allocated at this point so we can get rid of the extra reference.
std
::
vector
<
void
*>
ptr_args
;
for
(
auto
&
double_ptr
:
double_ptr_arg
s
)
for
(
auto
&
buffer_index
:
buffer_indice
s
)
{
ptr_args
.
push_back
(
*
double_ptr
);
ptr_args
.
push_back
(
ctx
->
buffer_data
[
buffer_index
]
);
}
// Compile nodes within the CompiledKernel op.
...
...
src/ngraph/runtime/cpu/cpu_builder.cpp
View file @
5b9000ac
...
...
@@ -103,7 +103,6 @@
#include "ngraph/runtime/cpu/kernel/subtract.hpp"
#include "ngraph/runtime/cpu/kernel/tan.hpp"
#include "ngraph/runtime/cpu/kernel/tanh.hpp"
#include "ngraph/runtime/cpu/mlir/compiler.hpp"
#include "ngraph/runtime/cpu/op/convert_layout.hpp"
#include "ngraph/runtime/cpu/op/halide_op.hpp"
#include "ngraph/type/element_type.hpp"
...
...
src/ngraph/runtime/cpu/cpu_emitter.cpp
View file @
5b9000ac
...
...
@@ -118,7 +118,6 @@
#include "ngraph/runtime/cpu/op/batch_mat_mul_transpose.hpp"
#include "ngraph/runtime/cpu/op/batch_norm_relu.hpp"
#include "ngraph/runtime/cpu/op/bounded_relu.hpp"
#include "ngraph/op/experimental/compiled_kernel.hpp"
#include "ngraph/runtime/cpu/op/conv_add.hpp"
#include "ngraph/runtime/cpu/op/conv_relu.hpp"
#include "ngraph/runtime/cpu/op/convert_layout.hpp"
...
...
src/ngraph/runtime/cpu/cpu_external_function.cpp
View file @
5b9000ac
...
...
@@ -170,7 +170,6 @@
#include "ngraph/runtime/cpu/op/deconv.hpp"
#include "ngraph/runtime/cpu/op/group_conv_bias.hpp"
#include "ngraph/runtime/cpu/op/leaky_relu.hpp"
#include "ngraph/runtime/cpu/op/loop_kernel.hpp"
#include "ngraph/runtime/cpu/op/lstm.hpp"
#include "ngraph/runtime/cpu/op/matmul_bias.hpp"
#include "ngraph/runtime/cpu/op/max_pool_with_indices.hpp"
...
...
@@ -427,8 +426,7 @@ static const runtime::cpu::OpMap dispatcher{
{
TI
(
ngraph
::
op
::
And
),
&
runtime
::
cpu
::
CPU_Emitter
::
emit
<
op
::
And
>
},
{
TI
(
ngraph
::
op
::
Or
),
&
runtime
::
cpu
::
CPU_Emitter
::
emit
<
op
::
Or
>
},
{
TI
(
ngraph
::
op
::
CPULeakyRelu
),
&
runtime
::
cpu
::
CPU_Emitter
::
emit
<
op
::
CPULeakyRelu
>
},
{
TI
(
ngraph
::
runtime
::
cpu
::
op
::
LoopKernel
),
&
runtime
::
cpu
::
CPU_Emitter
::
emit
<
runtime
::
cpu
::
op
::
LoopKernel
>
},
{
TI
(
ngraph
::
op
::
CompiledKernel
),
&
runtime
::
cpu
::
CPU_Emitter
::
emit
<
op
::
CompiledKernel
>
},
{
TI
(
ngraph
::
op
::
LRN
),
&
runtime
::
cpu
::
CPU_Emitter
::
emit
<
ngraph
::
op
::
LRN
>
},
{
TI
(
ngraph
::
op
::
GenerateMask
),
&
runtime
::
cpu
::
CPU_Emitter
::
emit
<
ngraph
::
op
::
GenerateMask
>
},
{
TI
(
ngraph
::
op
::
ConvolutionAdd
),
&
runtime
::
cpu
::
CPU_Emitter
::
emit
<
op
::
ConvolutionAdd
>
},
...
...
src/ngraph/serializer.cpp
View file @
5b9000ac
...
...
@@ -64,9 +64,31 @@
#include "ngraph/op/experimental/quantized_dot_bias.hpp"
#include "ngraph/op/experimental/quantized_max_pool.hpp"
#include "ngraph/op/experimental/shape_of.hpp"
#include "ngraph/op/experimental/tile.hpp"
#include "ngraph/op/experimental/transpose.hpp"
#include "ngraph/op/floor.hpp"
#include "ngraph/op/fused/clamp.hpp"
#include "ngraph/op/fused/conv_fused.hpp"
#include "ngraph/op/fused/depth_to_space.hpp"
#include "ngraph/op/fused/elu.hpp"
#include "ngraph/op/fused/fake_quantize.hpp"
#include "ngraph/op/fused/gemm.hpp"
#include "ngraph/op/fused/grn.hpp"
#include "ngraph/op/fused/group_conv.hpp"
#include "ngraph/op/fused/hard_sigmoid.hpp"
#include "ngraph/op/fused/leaky_relu.hpp"
#include "ngraph/op/fused/mvn.hpp"
#include "ngraph/op/fused/normalize.hpp"
#include "ngraph/op/fused/prelu.hpp"
#include "ngraph/op/fused/scale_shift.hpp"
#include "ngraph/op/fused/shuffle_channels.hpp"
#include "ngraph/op/fused/space_to_depth.hpp"
#include "ngraph/op/fused/split.hpp"
#include "ngraph/op/fused/squared_difference.hpp"
#include "ngraph/op/fused/squeeze.hpp"
#include "ngraph/op/fused/unsqueeze.hpp"
#include "ngraph/op/gather.hpp"
#include "ngraph/op/gather_nd.hpp"
#include "ngraph/op/get_output_element.hpp"
#include "ngraph/op/greater.hpp"
#include "ngraph/op/greater_eq.hpp"
...
...
@@ -97,6 +119,8 @@
#include "ngraph/op/result.hpp"
#include "ngraph/op/reverse.hpp"
#include "ngraph/op/reverse_sequence.hpp"
#include "ngraph/op/scatter_add.hpp"
#include "ngraph/op/scatter_nd_add.hpp"
#include "ngraph/op/select.hpp"
#include "ngraph/op/sigmoid.hpp"
#include "ngraph/op/sign.hpp"
...
...
@@ -120,6 +144,14 @@ using namespace std;
using
json
=
nlohmann
::
json
;
using
const_data_callback_t
=
shared_ptr
<
Node
>
(
const
string
&
,
const
element
::
Type
&
,
const
Shape
&
);
static
bool
s_serialize_output_shapes_enabled
=
(
std
::
getenv
(
"NGRAPH_SERIALIZER_OUTPUT_SHAPES"
)
!=
nullptr
);
void
ngraph
::
set_serialize_output_shapes
(
bool
enable
)
{
s_serialize_output_shapes_enabled
=
enable
;
}
// This expands the op list in op_tbl.hpp into a list of enumerations that look like this:
// Abs,
// Acos,
...
...
@@ -207,7 +239,7 @@ static json write_partial_shape(const PartialShape& s)
{
vals
[
i
]
=
write_dimension
(
s
[
i
]);
}
return
vals
;
return
move
(
vals
)
;
}
}
...
...
@@ -228,6 +260,27 @@ static PartialShape read_partial_shape(const json& j)
}
}
static
json
write_auto_broadcast
(
const
op
::
AutoBroadcastSpec
&
autob
)
{
json
j
;
j
[
"type"
]
=
autob
.
m_type
;
j
[
"axis"
]
=
autob
.
m_axis
;
return
j
;
}
static
op
::
AutoBroadcastSpec
read_auto_broadcast
(
const
json
&
j
)
{
if
(
!
j
.
is_object
())
{
return
op
::
AutoBroadcastSpec
();
}
else
{
return
op
::
AutoBroadcastSpec
(
static_cast
<
op
::
AutoBroadcastType
>
(
j
.
at
(
"type"
)),
j
.
at
(
"axis"
).
get
<
size_t
>
());
}
}
static
json
write_element_type
(
const
ngraph
::
element
::
Type
&
n
)
{
json
j
;
...
...
@@ -276,6 +329,12 @@ void ngraph::serialize(const string& path, shared_ptr<ngraph::Function> func, si
}
void
ngraph
::
serialize
(
ostream
&
out
,
shared_ptr
<
ngraph
::
Function
>
func
,
size_t
indent
)
{
out
<<
::
serialize
(
func
,
indent
,
false
);
}
#if defined ENABLE_CPIO_FILE
static
void
serialize_to_cpio
(
ostream
&
out
,
shared_ptr
<
ngraph
::
Function
>
func
,
size_t
indent
)
{
string
j
=
::
serialize
(
func
,
indent
,
true
);
cpio
::
Writer
writer
(
out
);
...
...
@@ -295,6 +354,7 @@ void ngraph::serialize(ostream& out, shared_ptr<ngraph::Function> func, size_t i
true
);
});
}
#endif
static
string
serialize
(
shared_ptr
<
ngraph
::
Function
>
func
,
size_t
indent
,
bool
binary_constant_data
)
{
...
...
@@ -428,6 +488,18 @@ static json write(const Function& f, bool binary_constant_data)
return
function
;
}
template
<
typename
T
>
T
get_value
(
nlohmann
::
json
js
,
const
string
&
key
)
{
T
rc
;
auto
it
=
js
.
find
(
key
);
if
(
it
!=
js
.
end
())
{
rc
=
it
->
get
<
T
>
();
}
return
rc
;
}
static
shared_ptr
<
ngraph
::
Function
>
read_function
(
const
json
&
func_js
,
unordered_map
<
string
,
shared_ptr
<
Function
>>&
function_map
,
...
...
@@ -444,28 +516,23 @@ static shared_ptr<ngraph::Function>
try
{
string
node_name
=
node_js
.
at
(
"name"
).
get
<
string
>
();
string
friendly_name
;
auto
it
=
node_js
.
find
(
"friendly_name"
);
if
(
it
!=
node_js
.
end
())
{
friendly_name
=
it
->
get
<
string
>
();
}
string
node_op
=
node_js
.
at
(
"op"
).
get
<
string
>
();
vector
<
string
>
node_inputs
=
node_js
.
at
(
"inputs"
).
get
<
vector
<
string
>>
(
);
vector
<
string
>
control_deps_inputs
=
get_or_default
<
vector
<
string
>>
(
node_js
,
"control_deps"
,
vector
<
string
>
{}
);
vector
<
string
>
node_outputs
=
node_js
.
at
(
"outputs"
).
get
<
vector
<
string
>>
(
);
string
friendly_name
=
get_value
<
string
>
(
node_js
,
"friendly_name"
);
vector
<
string
>
node_inputs
=
get_value
<
vector
<
string
>>
(
node_js
,
"inputs"
);
vector
<
string
>
control_deps_inputs
=
get_value
<
vector
<
string
>>
(
node_js
,
"control_deps"
);
vector
<
string
>
node_outputs
=
get_value
<
vector
<
string
>>
(
node_js
,
"outputs"
);
shared_ptr
<
Node
>
node
;
vector
<
shared_ptr
<
Node
>>
args
;
vector
<
shared_ptr
<
Node
>>
control_deps
;
for
(
const
string
&
name
:
node_inputs
)
{
args
.
push_back
(
node_map
.
at
(
name
));
}
#if !(defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ == 8)
#pragma GCC diagnostic push
#pragma GCC diagnostic error "-Wswitch"
#pragma GCC diagnostic error "-Wswitch-enum"
// #pragma GCC diagnostic error "-Wimplicit-fallthrough"
// #pragma GCC diagnostic error "-Wimplicit-fallthrough"
#endif
switch
(
get_typeid
(
node_op
))
{
case
OP_TYPEID
:
:
Abs
:
...
...
@@ -480,7 +547,8 @@ static shared_ptr<ngraph::Function>
}
case
OP_TYPEID
:
:
Add
:
{
node
=
make_shared
<
op
::
Add
>
(
args
[
0
],
args
[
1
]);
node
=
make_shared
<
op
::
Add
>
(
args
[
0
],
args
[
1
],
read_auto_broadcast
(
node_js
[
"autob"
]));
break
;
}
case
OP_TYPEID
:
:
All
:
...
...
@@ -496,7 +564,8 @@ static shared_ptr<ngraph::Function>
}
case
OP_TYPEID
:
:
And
:
{
node
=
make_shared
<
op
::
And
>
(
args
[
0
],
args
[
1
]);
node
=
make_shared
<
op
::
And
>
(
args
[
0
],
args
[
1
],
read_auto_broadcast
(
node_js
[
"autob"
]));
break
;
}
case
OP_TYPEID
:
:
Any
:
...
...
@@ -538,12 +607,16 @@ static shared_ptr<ngraph::Function>
auto
padding_above
=
node_js
.
at
(
"padding_above"
).
get
<
vector
<
size_t
>>
();
auto
include_padding_in_avg_computation
=
node_js
.
at
(
"include_padding_in_avg_computation"
).
get
<
bool
>
();
op
::
PadType
pad_type
=
node_js
[
"pad_type"
].
empty
()
?
op
::
PadType
::
EXPLICIT
:
static_cast
<
op
::
PadType
>
(
node_js
.
at
(
"pad_type"
));
node
=
make_shared
<
op
::
AvgPool
>
(
args
[
0
],
window_shape
,
window_movement_strides
,
padding_below
,
padding_above
,
include_padding_in_avg_computation
);
include_padding_in_avg_computation
,
pad_type
);
break
;
}
case
OP_TYPEID
:
:
AvgPoolBackprop
:
...
...
@@ -565,6 +638,12 @@ static shared_ptr<ngraph::Function>
include_padding_in_avg_computation
);
break
;
}
case
OP_TYPEID
:
:
BatchMatMul
:
{
node
=
make_shared
<
op
::
BatchMatMul
>
(
args
[
0
],
args
[
1
]);
break
;
}
case
OP_TYPEID
:
:
BatchNormTraining
:
{
auto
epsilon
=
node_js
.
at
(
"eps"
).
get
<
double
>
();
...
...
@@ -611,6 +690,13 @@ static shared_ptr<ngraph::Function>
node
=
make_shared
<
op
::
Ceiling
>
(
args
[
0
]);
break
;
}
case
OP_TYPEID
:
:
Clamp
:
{
const
auto
clamp_min
=
node_js
.
at
(
"min"
).
get
<
float
>
();
const
auto
clamp_max
=
node_js
.
at
(
"max"
).
get
<
float
>
();
node
=
make_shared
<
op
::
Clamp
>
(
args
[
0
],
clamp_min
,
clamp_max
);
break
;
}
case
OP_TYPEID
:
:
Concat
:
{
auto
axis
=
node_js
.
at
(
"axis"
).
get
<
size_t
>
();
...
...
@@ -623,16 +709,8 @@ static shared_ptr<ngraph::Function>
node_js
.
count
(
"element_type"
)
==
0
?
node_js
.
at
(
"value_type"
)
:
node_js
;
auto
element_type
=
read_element_type
(
type_node_js
.
at
(
"element_type"
));
auto
shape
=
type_node_js
.
at
(
"shape"
);
auto
value_it
=
node_js
.
find
(
"value"
);
if
(
value_it
!=
node_js
.
end
())
{
auto
value
=
value_it
->
get
<
vector
<
string
>>
();
node
=
make_shared
<
op
::
Constant
>
(
element_type
,
shape
,
value
);
}
else
{
node
=
const_data_callback
(
node_name
,
element_type
,
shape
);
}
auto
value
=
node_js
.
at
(
"value"
).
get
<
vector
<
string
>>
();
node
=
make_shared
<
op
::
Constant
>
(
element_type
,
shape
,
value
);
break
;
}
case
OP_TYPEID
:
:
Convert
:
...
...
@@ -658,6 +736,10 @@ static shared_ptr<ngraph::Function>
data_dilation_strides_maybe
=
node_js
[
"image_dilation_strides"
];
}
op
::
PadType
pad_type
=
node_js
[
"pad_type"
].
empty
()
?
op
::
PadType
::
EXPLICIT
:
static_cast
<
op
::
PadType
>
(
node_js
.
at
(
"pad_type"
));
if
(
data_dilation_strides_maybe
.
empty
())
{
node
=
make_shared
<
op
::
Convolution
>
(
args
[
0
],
...
...
@@ -676,7 +758,8 @@ static shared_ptr<ngraph::Function>
window_dilation_strides
,
padding_below
,
padding_above
,
data_dilation_strides_maybe
.
get
<
std
::
vector
<
size_t
>>
());
data_dilation_strides_maybe
.
get
<
std
::
vector
<
size_t
>>
(),
pad_type
);
}
break
;
}
...
...
@@ -726,6 +809,75 @@ static shared_ptr<ngraph::Function>
data_dilation_strides_forward
);
break
;
}
case
OP_TYPEID
:
:
ConvolutionBias
:
{
auto
window_movement_strides
=
node_js
.
at
(
"window_movement_strides"
).
get
<
vector
<
size_t
>>
();
auto
window_dilation_strides
=
node_js
.
at
(
"window_dilation_strides"
).
get
<
vector
<
size_t
>>
();
auto
padding_below
=
node_js
.
at
(
"padding_below"
).
get
<
vector
<
std
::
ptrdiff_t
>>
();
auto
padding_above
=
node_js
.
at
(
"padding_above"
).
get
<
vector
<
std
::
ptrdiff_t
>>
();
auto
data_dilation_strides
=
node_js
.
at
(
"data_dilation_strides"
).
get
<
vector
<
size_t
>>
();
node
=
make_shared
<
op
::
ConvolutionBias
>
(
args
[
0
],
args
[
1
],
args
[
2
],
window_movement_strides
,
window_dilation_strides
,
padding_below
,
padding_above
,
data_dilation_strides
);
break
;
}
case
OP_TYPEID
:
:
ConvolutionBiasAdd
:
{
auto
window_movement_strides
=
node_js
.
at
(
"window_movement_strides"
).
get
<
vector
<
size_t
>>
();
auto
window_dilation_strides
=
node_js
.
at
(
"window_dilation_strides"
).
get
<
vector
<
size_t
>>
();
auto
padding_below
=
node_js
.
at
(
"padding_below"
).
get
<
vector
<
std
::
ptrdiff_t
>>
();
auto
padding_above
=
node_js
.
at
(
"padding_above"
).
get
<
vector
<
std
::
ptrdiff_t
>>
();
auto
data_dilation_strides
=
node_js
.
at
(
"data_dilation_strides"
).
get
<
vector
<
size_t
>>
();
node
=
make_shared
<
op
::
ConvolutionBiasAdd
>
(
args
[
0
],
args
[
1
],
args
[
2
],
args
[
3
],
window_movement_strides
,
window_dilation_strides
,
padding_below
,
padding_above
,
data_dilation_strides
);
break
;
}
case
OP_TYPEID
:
:
ConvolutionBiasBackpropFiltersBias
:
{
auto
filters_shape
=
node_js
.
at
(
"filters_shape"
).
get
<
vector
<
size_t
>>
();
auto
bias_shape
=
node_js
.
at
(
"bias_shape"
).
get
<
vector
<
size_t
>>
();
auto
window_movement_strides_forward
=
node_js
.
at
(
"window_movement_strides_forward"
).
get
<
vector
<
size_t
>>
();
auto
window_dilation_strides_forward
=
node_js
.
at
(
"window_dilation_strides_forward"
).
get
<
vector
<
size_t
>>
();
auto
padding_below_forward
=
node_js
.
at
(
"padding_below_forward"
).
get
<
vector
<
std
::
ptrdiff_t
>>
();
auto
padding_above_forward
=
node_js
.
at
(
"padding_above_forward"
).
get
<
vector
<
std
::
ptrdiff_t
>>
();
auto
data_dilation_strides_forward
=
node_js
.
at
(
"data_dilation_strides_forward"
).
get
<
vector
<
size_t
>>
();
node
=
make_shared
<
op
::
ConvolutionBiasBackpropFiltersBias
>
(
args
[
0
],
filters_shape
,
bias_shape
,
args
[
1
],
window_movement_strides_forward
,
window_dilation_strides_forward
,
padding_below_forward
,
padding_above_forward
,
data_dilation_strides_forward
);
break
;
}
case
OP_TYPEID
:
:
Cos
:
{
node
=
make_shared
<
op
::
Cos
>
(
args
[
0
]);
...
...
@@ -736,6 +888,12 @@ static shared_ptr<ngraph::Function>
node
=
make_shared
<
op
::
Cosh
>
(
args
[
0
]);
break
;
}
case
OP_TYPEID
:
:
DepthToSpace
:
{
auto
block_size
=
node_js
.
at
(
"block_size"
).
get
<
size_t
>
();
node
=
make_shared
<
op
::
DepthToSpace
>
(
args
[
0
],
block_size
);
break
;
}
case
OP_TYPEID
:
:
Dequantize
:
{
auto
type
=
read_element_type
(
node_js
.
at
(
"type"
));
...
...
@@ -745,7 +903,8 @@ static shared_ptr<ngraph::Function>
}
case
OP_TYPEID
:
:
Divide
:
{
node
=
make_shared
<
op
::
Divide
>
(
args
[
0
],
args
[
1
]);
node
=
make_shared
<
op
::
Divide
>
(
args
[
0
],
args
[
1
],
read_auto_broadcast
(
node_js
[
"autob"
]));
break
;
}
case
OP_TYPEID
:
:
Dot
:
...
...
@@ -783,6 +942,11 @@ static shared_ptr<ngraph::Function>
node
=
make_shared
<
op
::
DynSlice
>
(
args
[
0
],
args
[
1
],
args
[
2
],
args
[
3
]);
break
;
}
case
OP_TYPEID
:
:
Elu
:
{
node
=
make_shared
<
op
::
Elu
>
(
args
[
0
],
args
[
1
]);
break
;
}
case
OP_TYPEID
:
:
EmbeddingLookup
:
{
node
=
make_shared
<
op
::
EmbeddingLookup
>
(
args
[
0
],
args
[
1
]);
...
...
@@ -790,7 +954,8 @@ static shared_ptr<ngraph::Function>
}
case
OP_TYPEID
:
:
Equal
:
{
node
=
make_shared
<
op
::
Equal
>
(
args
[
0
],
args
[
1
]);
node
=
make_shared
<
op
::
Equal
>
(
args
[
0
],
args
[
1
],
read_auto_broadcast
(
node_js
[
"autob"
]));
break
;
}
case
OP_TYPEID
:
:
Erf
:
...
...
@@ -803,11 +968,39 @@ static shared_ptr<ngraph::Function>
node
=
make_shared
<
op
::
Exp
>
(
args
[
0
]);
break
;
}
case
OP_TYPEID
:
:
FakeQuantize
:
{
size_t
levels
=
node_js
.
at
(
"levels"
).
get
<
size_t
>
();
node
=
make_shared
<
op
::
FakeQuantize
>
(
args
[
0
],
args
[
1
],
args
[
2
],
args
[
3
],
args
[
4
],
levels
);
break
;
}
case
OP_TYPEID
:
:
Floor
:
{
node
=
make_shared
<
op
::
Floor
>
(
args
[
0
]);
break
;
}
case
OP_TYPEID
:
:
Gather
:
{
auto
axis
=
node_js
.
at
(
"axis"
).
get
<
size_t
>
();
node
=
make_shared
<
op
::
Gather
>
(
args
[
0
],
args
[
1
],
axis
);
break
;
}
case
OP_TYPEID
:
:
GatherND
:
{
node
=
make_shared
<
op
::
GatherND
>
(
args
[
0
],
args
[
1
]);
break
;
}
case
OP_TYPEID
:
:
Gemm
:
{
auto
alpha
=
node_js
.
at
(
"alpha"
).
get
<
double
>
();
auto
beta
=
node_js
.
at
(
"beta"
).
get
<
double
>
();
auto
transA
=
node_js
.
at
(
"transA"
).
get
<
bool
>
();
auto
transB
=
node_js
.
at
(
"transB"
).
get
<
bool
>
();
node
=
make_shared
<
op
::
Gemm
>
(
args
[
0
],
args
[
1
],
args
[
2
],
alpha
,
beta
,
transA
,
transB
);
break
;
}
case
OP_TYPEID
:
:
GenerateMask
:
{
auto
output_shape
=
node_js
.
at
(
"output_shape"
).
get
<
vector
<
size_t
>>
();
...
...
@@ -826,22 +1019,71 @@ static shared_ptr<ngraph::Function>
}
case
OP_TYPEID
:
:
Greater
:
{
node
=
make_shared
<
op
::
Greater
>
(
args
[
0
],
args
[
1
]);
node
=
make_shared
<
op
::
Greater
>
(
args
[
0
],
args
[
1
],
read_auto_broadcast
(
node_js
[
"autob"
]));
break
;
}
case
OP_TYPEID
:
:
GreaterEq
:
{
node
=
make_shared
<
op
::
GreaterEq
>
(
args
[
0
],
args
[
1
]);
node
=
make_shared
<
op
::
GreaterEq
>
(
args
[
0
],
args
[
1
],
read_auto_broadcast
(
node_js
[
"autob"
]));
break
;
}
case
OP_TYPEID
:
:
GRN
:
{
auto
bias
=
node_js
.
at
(
"bias"
).
get
<
float
>
();
node
=
make_shared
<
op
::
GRN
>
(
args
[
0
],
bias
);
break
;
}
case
OP_TYPEID
:
:
HardSigmoid
:
{
auto
alpha
=
node_js
.
at
(
"alpha"
).
get
<
float
>
();
auto
beta
=
node_js
.
at
(
"beta"
).
get
<
float
>
();
node
=
make_shared
<
op
::
HardSigmoid
>
(
args
[
0
],
alpha
,
beta
);
break
;
}
case
OP_TYPEID
:
:
GroupConvolution
:
{
auto
window_movement_strides
=
node_js
.
at
(
"window_movement_strides"
).
get
<
vector
<
size_t
>>
();
auto
window_dilation_strides
=
node_js
.
at
(
"window_dilation_strides"
).
get
<
vector
<
size_t
>>
();
auto
padding_below
=
node_js
.
at
(
"padding_below"
).
get
<
vector
<
std
::
ptrdiff_t
>>
();
auto
padding_above
=
node_js
.
at
(
"padding_above"
).
get
<
vector
<
std
::
ptrdiff_t
>>
();
auto
data_dilation_strides
=
node_js
.
at
(
"data_dilation_strides"
).
get
<
vector
<
size_t
>>
();
auto
groups
=
node_js
.
at
(
"groups"
).
get
<
size_t
>
();
op
::
PadType
pad_type
=
node_js
[
"pad_type"
].
empty
()
?
op
::
PadType
::
EXPLICIT
:
static_cast
<
op
::
PadType
>
(
node_js
.
at
(
"pad_type"
));
node
=
make_shared
<
op
::
GroupConvolution
>
(
args
[
0
],
args
[
1
],
window_movement_strides
,
window_dilation_strides
,
padding_below
,
padding_above
,
data_dilation_strides
,
groups
,
pad_type
);
break
;
}
case
OP_TYPEID
:
:
LeakyRelu
:
{
node
=
make_shared
<
op
::
LeakyRelu
>
(
args
[
0
],
args
[
1
]);
break
;
}
case
OP_TYPEID
:
:
Less
:
{
node
=
make_shared
<
op
::
Less
>
(
args
[
0
],
args
[
1
]);
node
=
make_shared
<
op
::
Less
>
(
args
[
0
],
args
[
1
],
read_auto_broadcast
(
node_js
[
"autob"
]));
break
;
}
case
OP_TYPEID
:
:
LessEq
:
{
node
=
make_shared
<
op
::
LessEq
>
(
args
[
0
],
args
[
1
]);
node
=
make_shared
<
op
::
LessEq
>
(
args
[
0
],
args
[
1
],
read_auto_broadcast
(
node_js
[
"autob"
]));
break
;
}
case
OP_TYPEID
:
:
Log
:
...
...
@@ -873,6 +1115,9 @@ static shared_ptr<ngraph::Function>
// omitted.
auto
padding_below_maybe
=
node_js
[
"padding_below"
];
auto
padding_above_maybe
=
node_js
[
"padding_above"
];
op
::
PadType
pad_type
=
node_js
[
"pad_type"
].
empty
()
?
op
::
PadType
::
EXPLICIT
:
static_cast
<
op
::
PadType
>
(
node_js
.
at
(
"pad_type"
));
if
(
padding_below_maybe
.
empty
()
&&
!
padding_above_maybe
.
empty
())
{
throw
runtime_error
(
...
...
@@ -891,7 +1136,8 @@ static shared_ptr<ngraph::Function>
window_shape
,
window_movement_strides
,
padding_below
,
padding_above
);
padding_above
,
pad_type
);
}
else
{
...
...
@@ -929,7 +1175,8 @@ static shared_ptr<ngraph::Function>
}
case
OP_TYPEID
:
:
Maximum
:
{
node
=
make_shared
<
op
::
Maximum
>
(
args
[
0
],
args
[
1
]);
node
=
make_shared
<
op
::
Maximum
>
(
args
[
0
],
args
[
1
],
read_auto_broadcast
(
node_js
[
"autob"
]));
break
;
}
case
OP_TYPEID
:
:
Min
:
...
...
@@ -940,12 +1187,22 @@ static shared_ptr<ngraph::Function>
}
case
OP_TYPEID
:
:
Minimum
:
{
node
=
make_shared
<
op
::
Minimum
>
(
args
[
0
],
args
[
1
]);
node
=
make_shared
<
op
::
Minimum
>
(
args
[
0
],
args
[
1
],
read_auto_broadcast
(
node_js
[
"autob"
]));
break
;
}
case
OP_TYPEID
:
:
Multiply
:
{
node
=
make_shared
<
op
::
Multiply
>
(
args
[
0
],
args
[
1
]);
node
=
make_shared
<
op
::
Multiply
>
(
args
[
0
],
args
[
1
],
read_auto_broadcast
(
node_js
[
"autob"
]));
break
;
}
case
OP_TYPEID
:
:
MVN
:
{
auto
normalize_variance
=
node_js
.
at
(
"normalize_variance"
).
get
<
bool
>
();
auto
across_channels
=
node_js
.
at
(
"across_channels"
).
get
<
bool
>
();
auto
eps
=
node_js
.
at
(
"eps"
).
get
<
double
>
();
node
=
make_shared
<
op
::
MVN
>
(
args
[
0
],
normalize_variance
,
across_channels
,
eps
);
break
;
}
case
OP_TYPEID
:
:
Negative
:
...
...
@@ -953,9 +1210,19 @@ static shared_ptr<ngraph::Function>
node
=
make_shared
<
op
::
Negative
>
(
args
[
0
]);
break
;
}
case
OP_TYPEID
:
:
Normalize
:
{
bool
across_spatial
=
node_js
.
at
(
"across_spatial"
).
get
<
bool
>
();
bool
channel_shared
=
node_js
.
at
(
"channel_shared"
).
get
<
bool
>
();
float
eps
=
node_js
.
at
(
"eps"
).
get
<
float
>
();
node
=
make_shared
<
op
::
Normalize
>
(
args
[
0
],
args
[
1
],
across_spatial
,
channel_shared
,
eps
);
break
;
}
case
OP_TYPEID
:
:
NotEqual
:
{
node
=
make_shared
<
op
::
NotEqual
>
(
args
[
0
],
args
[
1
]);
node
=
make_shared
<
op
::
NotEqual
>
(
args
[
0
],
args
[
1
],
read_auto_broadcast
(
node_js
[
"autob"
]));
break
;
}
case
OP_TYPEID
:
:
Not
:
...
...
@@ -972,7 +1239,7 @@ static shared_ptr<ngraph::Function>
}
case
OP_TYPEID
:
:
Or
:
{
node
=
make_shared
<
op
::
Or
>
(
args
[
0
],
args
[
1
]);
node
=
make_shared
<
op
::
Or
>
(
args
[
0
],
args
[
1
]
,
read_auto_broadcast
(
node_js
[
"autob"
])
);
break
;
}
case
OP_TYPEID
:
:
Pad
:
...
...
@@ -983,15 +1250,11 @@ static shared_ptr<ngraph::Function>
// This is a legacy field whose functionality is no longer supported. The new
// behavior is equivalent to interior padding of 0, so we will accept it under
// those conditions.
auto
padding_interior_maybe
=
node_js
.
find
(
"padding_interior"
);
if
(
padding_interior_maybe
!=
node_js
.
end
())
{
auto
padding_interior
=
padding_interior_maybe
->
get
<
vector
<
size_t
>>
();
NGRAPH_ASSERT
(
std
::
all_of
(
padding_interior
.
begin
(),
padding_interior
.
end
(),
[](
size_t
s
)
{
return
s
==
0
;
}))
<<
"Legacy padding_interior field must be zero everywhere."
;
}
auto
padding_interior
=
get_value
<
vector
<
size_t
>>
(
node_js
,
"padding_interior"
);
NGRAPH_CHECK
(
std
::
all_of
(
padding_interior
.
begin
(),
padding_interior
.
end
(),
[](
size_t
s
)
{
return
s
==
0
;
}),
"Legacy padding_interior field must be zero everywhere."
);
auto
pad_mode
=
node_js
.
count
(
"pad_mode"
)
==
0
?
op
::
PadMode
::
CONSTANT
...
...
@@ -1030,7 +1293,8 @@ static shared_ptr<ngraph::Function>
}
case
OP_TYPEID
:
:
Power
:
{
node
=
make_shared
<
op
::
Power
>
(
args
[
0
],
args
[
1
]);
node
=
make_shared
<
op
::
Power
>
(
args
[
0
],
args
[
1
],
read_auto_broadcast
(
node_js
[
"autob"
]));
break
;
}
case
OP_TYPEID
:
:
PRelu
:
...
...
@@ -1167,6 +1431,21 @@ static shared_ptr<ngraph::Function>
node
=
make_shared
<
op
::
ScalarConstantLike
>
(
args
[
0
],
value
);
break
;
}
case
OP_TYPEID
:
:
ScaleShift
:
{
node
=
make_shared
<
op
::
ScaleShift
>
(
args
[
0
],
args
[
1
],
args
[
2
]);
break
;
}
case
OP_TYPEID
:
:
ScatterAdd
:
{
node
=
make_shared
<
op
::
ScatterAdd
>
(
args
[
0
],
args
[
1
],
args
[
2
]);
break
;
}
case
OP_TYPEID
:
:
ScatterNDAdd
:
{
node
=
make_shared
<
op
::
ScatterNDAdd
>
(
args
[
0
],
args
[
1
],
args
[
2
]);
break
;
}
case
OP_TYPEID
:
:
Select
:
{
node
=
make_shared
<
op
::
Select
>
(
args
[
0
],
args
[
1
],
args
[
2
]);
...
...
@@ -1177,6 +1456,13 @@ static shared_ptr<ngraph::Function>
node
=
make_shared
<
op
::
ShapeOf
>
(
args
[
0
]);
break
;
}
case
OP_TYPEID
:
:
ShuffleChannels
:
{
const
auto
axis
=
node_js
.
at
(
"axis"
).
get
<
size_t
>
();
const
auto
groups
=
node_js
.
at
(
"groups"
).
get
<
size_t
>
();
node
=
make_shared
<
op
::
ShuffleChannels
>
(
args
[
0
],
axis
,
groups
);
break
;
}
case
OP_TYPEID
:
:
Sigmoid
:
{
node
=
make_shared
<
op
::
Sigmoid
>
(
args
[
0
]);
...
...
@@ -1216,14 +1502,38 @@ static shared_ptr<ngraph::Function>
node
=
make_shared
<
op
::
Softmax
>
(
args
[
0
],
softmax_axes
);
break
;
}
case
OP_TYPEID
:
:
SpaceToDepth
:
{
auto
block_size
=
node_js
.
at
(
"block_size"
).
get
<
size_t
>
();
node
=
make_shared
<
op
::
SpaceToDepth
>
(
args
[
0
],
block_size
);
break
;
}
case
OP_TYPEID
:
:
Split
:
{
const
auto
axis
=
node_js
.
at
(
"axis"
).
get
<
size_t
>
();
const
auto
splits
=
node_js
.
at
(
"splits"
).
get
<
vector
<
size_t
>>
();
node
=
make_shared
<
op
::
Split
>
(
args
[
0
],
axis
,
splits
);
break
;
}
case
OP_TYPEID
:
:
Sqrt
:
{
node
=
make_shared
<
op
::
Sqrt
>
(
args
[
0
]);
break
;
}
case
OP_TYPEID
:
:
SquaredDifference
:
{
node
=
make_shared
<
op
::
SquaredDifference
>
(
args
[
0
],
args
[
1
]);
break
;
}
case
OP_TYPEID
:
:
Squeeze
:
{
node
=
make_shared
<
op
::
Squeeze
>
(
args
[
0
],
args
[
1
]);
break
;
}
case
OP_TYPEID
:
:
Subtract
:
{
node
=
make_shared
<
op
::
Subtract
>
(
args
[
0
],
args
[
1
]);
node
=
make_shared
<
op
::
Subtract
>
(
args
[
0
],
args
[
1
],
read_auto_broadcast
(
node_js
[
"autob"
]));
break
;
}
case
OP_TYPEID
:
:
Sum
:
...
...
@@ -1242,6 +1552,11 @@ static shared_ptr<ngraph::Function>
node
=
make_shared
<
op
::
Tanh
>
(
args
[
0
]);
break
;
}
case
OP_TYPEID
:
:
Tile
:
{
node
=
make_shared
<
op
::
Tile
>
(
args
[
0
],
args
[
1
]);
break
;
}
case
OP_TYPEID
:
:
TopK
:
{
auto
top_k_axis
=
node_js
.
at
(
"top_k_axis"
).
get
<
size_t
>
();
...
...
@@ -1261,6 +1576,11 @@ static shared_ptr<ngraph::Function>
node
=
make_shared
<
op
::
StopGradient
>
(
args
[
0
]);
break
;
}
case
OP_TYPEID
:
:
Unsqueeze
:
{
node
=
make_shared
<
op
::
Unsqueeze
>
(
args
[
0
],
args
[
1
]);
break
;
}
case
OP_TYPEID
:
:
UnknownOp
:
{
stringstream
ss
;
...
...
@@ -1268,7 +1588,9 @@ static shared_ptr<ngraph::Function>
throw
runtime_error
(
ss
.
str
());
}
}
#if !(defined(__GNUC__) && (__GNUC__ == 4 && __GNUC_MINOR__ == 8))
#pragma GCC diagnostic pop
#endif
for
(
const
string
&
name
:
control_deps_inputs
)
{
...
...
@@ -1348,24 +1670,33 @@ static json write(const Node& n, bool binary_constant_data)
json
control_deps
=
json
::
array
();
json
outputs
=
json
::
array
();
for
(
const
descriptor
::
Input
&
input
:
n
.
get_
inputs
())
for
(
auto
&
input
:
n
.
inputs
())
{
inputs
.
push_back
(
input
.
get_output
().
get_node
()
->
get_name
());
inputs
.
push_back
(
input
.
get_
source_
output
().
get_node
()
->
get_name
());
}
for
(
auto
cdep
:
n
.
get_control_dependencies
())
{
control_deps
.
push_back
(
cdep
->
get_name
());
}
for
(
size_t
i
=
0
;
i
<
n
.
get_output_size
();
++
i
)
for
(
auto
&
output
:
n
.
outputs
()
)
{
outputs
.
push_back
(
n
.
get_output_tensor
(
i
).
get_name
());
outputs
.
push_back
(
output
.
get_tensor
(
).
get_name
());
}
node
[
"inputs"
]
=
inputs
;
node
[
"control_deps"
]
=
control_deps
;
node
[
"outputs"
]
=
outputs
;
if
(
!
inputs
.
empty
())
{
node
[
"inputs"
]
=
inputs
;
}
if
(
!
control_deps
.
empty
())
{
node
[
"control_deps"
]
=
control_deps
;
}
if
(
!
outputs
.
empty
())
{
node
[
"outputs"
]
=
outputs
;
}
if
(
s
td
::
getenv
(
"NGRAPH_SERIALIZER_OUTPUT_SHAPES"
)
!=
nullptr
)
if
(
s
_serialize_output_shapes_enabled
)
{
json
output_shapes
=
json
::
array
();
for
(
size_t
i
=
0
;
i
<
n
.
get_output_size
();
++
i
)
...
...
@@ -1376,17 +1707,26 @@ static json write(const Node& n, bool binary_constant_data)
}
string
node_op
=
n
.
description
();
#if !(defined(__GNUC__) && (__GNUC__ == 4 && __GNUC_MINOR__ == 8))
#pragma GCC diagnostic push
#pragma GCC diagnostic error "-Wswitch"
#pragma GCC diagnostic error "-Wswitch-enum"
// #pragma GCC diagnostic error "-Wimplicit-fallthrough"
// #pragma GCC diagnostic error "-Wimplicit-fallthrough"
#endif
switch
(
get_typeid
(
node_op
))
{
case
OP_TYPEID
:
:
Abs
:
{
break
;
}
case
OP_TYPEID
:
:
Acos
:
{
break
;
}
case
OP_TYPEID
:
:
Add
:
{
break
;
case
OP_TYPEID
:
:
Add
:
{
auto
tmp
=
dynamic_cast
<
const
op
::
Add
*>
(
&
n
);
if
(
tmp
->
get_autob
().
m_type
!=
op
::
AutoBroadcastType
::
NONE
)
{
node
[
"autob"
]
=
write_auto_broadcast
(
tmp
->
get_autob
());
}
break
;
}
case
OP_TYPEID
:
:
ArgMin
:
{
...
...
@@ -1410,7 +1750,14 @@ static json write(const Node& n, bool binary_constant_data)
}
case
OP_TYPEID
:
:
AllReduce
:
{
break
;
}
case
OP_TYPEID
:
:
And
:
{
break
;
case
OP_TYPEID
:
:
And
:
{
auto
tmp
=
dynamic_cast
<
const
op
::
And
*>
(
&
n
);
if
(
tmp
->
get_autob
().
m_type
!=
op
::
AutoBroadcastType
::
NONE
)
{
node
[
"autob"
]
=
write_auto_broadcast
(
tmp
->
get_autob
());
}
break
;
}
case
OP_TYPEID
:
:
Any
:
{
...
...
@@ -1430,6 +1777,7 @@ static json write(const Node& n, bool binary_constant_data)
node
[
"padding_below"
]
=
tmp
->
get_padding_below
();
node
[
"padding_above"
]
=
tmp
->
get_padding_above
();
node
[
"include_padding_in_avg_computation"
]
=
tmp
->
get_include_padding_in_avg_computation
();
node
[
"pad_type"
]
=
tmp
->
get_pad_type
();
break
;
}
case
OP_TYPEID
:
:
AvgPoolBackprop
:
...
...
@@ -1443,6 +1791,8 @@ static json write(const Node& n, bool binary_constant_data)
node
[
"include_padding_in_avg_computation"
]
=
tmp
->
get_include_padding_in_avg_computation
();
break
;
}
case
OP_TYPEID
:
:
BatchMatMul
:
{
break
;
}
case
OP_TYPEID
:
:
BatchNormTraining
:
{
auto
tmp
=
dynamic_cast
<
const
op
::
BatchNormTraining
*>
(
&
n
);
...
...
@@ -1478,6 +1828,13 @@ static json write(const Node& n, bool binary_constant_data)
}
case
OP_TYPEID
:
:
Ceiling
:
{
break
;
}
case
OP_TYPEID
:
:
Clamp
:
{
auto
tmp
=
dynamic_cast
<
const
op
::
Clamp
*>
(
&
n
);
node
[
"min"
]
=
tmp
->
get_min
();
node
[
"max"
]
=
tmp
->
get_max
();
break
;
}
case
OP_TYPEID
:
:
Concat
:
{
auto
tmp
=
dynamic_cast
<
const
op
::
Concat
*>
(
&
n
);
...
...
@@ -1487,7 +1844,13 @@ static json write(const Node& n, bool binary_constant_data)
case
OP_TYPEID
:
:
Constant
:
{
auto
tmp
=
dynamic_cast
<
const
op
::
Constant
*>
(
&
n
);
if
(
!
binary_constant_data
)
if
(
tmp
->
are_all_data_elements_bitwise_identical
())
{
vector
<
string
>
vs
;
vs
.
push_back
(
tmp
->
convert_value_to_string
(
0
));
node
[
"value"
]
=
vs
;
}
else
{
node
[
"value"
]
=
tmp
->
get_value_strings
();
}
...
...
@@ -1509,6 +1872,7 @@ static json write(const Node& n, bool binary_constant_data)
node
[
"padding_below"
]
=
tmp
->
get_padding_below
();
node
[
"padding_above"
]
=
tmp
->
get_padding_above
();
node
[
"data_dilation_strides"
]
=
tmp
->
get_data_dilation_strides
();
node
[
"pad_type"
]
=
tmp
->
get_pad_type
();
break
;
}
case
OP_TYPEID
:
:
ConvolutionBackpropData
:
...
...
@@ -1533,6 +1897,38 @@ static json write(const Node& n, bool binary_constant_data)
node
[
"data_dilation_strides_forward"
]
=
tmp
->
get_data_dilation_strides_forward
();
break
;
}
case
OP_TYPEID
:
:
ConvolutionBias
:
{
auto
tmp
=
dynamic_cast
<
const
op
::
ConvolutionBias
*>
(
&
n
);
node
[
"window_movement_strides"
]
=
tmp
->
get_window_movement_strides
();
node
[
"window_dilation_strides"
]
=
tmp
->
get_window_dilation_strides
();
node
[
"padding_below"
]
=
tmp
->
get_padding_below
();
node
[
"padding_above"
]
=
tmp
->
get_padding_above
();
node
[
"data_dilation_strides"
]
=
tmp
->
get_data_dilation_strides
();
break
;
}
case
OP_TYPEID
:
:
ConvolutionBiasAdd
:
{
auto
tmp
=
dynamic_cast
<
const
op
::
ConvolutionBiasAdd
*>
(
&
n
);
node
[
"window_movement_strides"
]
=
tmp
->
get_window_movement_strides
();
node
[
"window_dilation_strides"
]
=
tmp
->
get_window_dilation_strides
();
node
[
"padding_below"
]
=
tmp
->
get_padding_below
();
node
[
"padding_above"
]
=
tmp
->
get_padding_above
();
node
[
"data_dilation_strides"
]
=
tmp
->
get_data_dilation_strides
();
break
;
}
case
OP_TYPEID
:
:
ConvolutionBiasBackpropFiltersBias
:
{
auto
tmp
=
dynamic_cast
<
const
op
::
ConvolutionBiasBackpropFiltersBias
*>
(
&
n
);
node
[
"filters_shape"
]
=
tmp
->
get_filters_shape
();
node
[
"bias_shape"
]
=
tmp
->
get_bias_shape
();
node
[
"window_movement_strides_forward"
]
=
tmp
->
get_window_movement_strides_forward
();
node
[
"window_dilation_strides_forward"
]
=
tmp
->
get_window_dilation_strides_forward
();
node
[
"padding_below_forward"
]
=
tmp
->
get_padding_below_forward
();
node
[
"padding_above_forward"
]
=
tmp
->
get_padding_above_forward
();
node
[
"data_dilation_strides_forward"
]
=
tmp
->
get_data_dilation_strides_forward
();
break
;
}
case
OP_TYPEID
:
:
Cos
:
{
break
;
}
case
OP_TYPEID
:
:
Cosh
:
{
break
;
...
...
@@ -1544,7 +1940,21 @@ static json write(const Node& n, bool binary_constant_data)
node
[
"axes"
]
=
tmp
->
get_axes
();
break
;
}
case
OP_TYPEID
:
:
Divide
:
{
break
;
case
OP_TYPEID
:
:
DepthToSpace
:
{
auto
tmp
=
dynamic_cast
<
const
op
::
DepthToSpace
*>
(
&
n
);
node
[
"type"
]
=
write_element_type
(
tmp
->
get_element_type
());
node
[
"block_size"
]
=
tmp
->
get_block_size
();
break
;
}
case
OP_TYPEID
:
:
Divide
:
{
auto
tmp
=
dynamic_cast
<
const
op
::
Divide
*>
(
&
n
);
if
(
tmp
->
get_autob
().
m_type
!=
op
::
AutoBroadcastType
::
NONE
)
{
node
[
"autob"
]
=
write_auto_broadcast
(
tmp
->
get_autob
());
}
break
;
}
case
OP_TYPEID
:
:
Dot
:
{
...
...
@@ -1560,22 +1970,54 @@ static json write(const Node& n, bool binary_constant_data)
}
case
OP_TYPEID
:
:
DynSlice
:
{
break
;
}
case
OP_TYPEID
:
:
Elu
:
{
break
;
}
case
OP_TYPEID
:
:
EmbeddingLookup
:
{
break
;
}
case
OP_TYPEID
:
:
Equal
:
{
break
;
case
OP_TYPEID
:
:
Equal
:
{
auto
tmp
=
dynamic_cast
<
const
op
::
Equal
*>
(
&
n
);
if
(
tmp
->
get_autob
().
m_type
!=
op
::
AutoBroadcastType
::
NONE
)
{
node
[
"autob"
]
=
write_auto_broadcast
(
tmp
->
get_autob
());
}
break
;
}
case
OP_TYPEID
:
:
Erf
:
{
break
;
}
case
OP_TYPEID
:
:
Exp
:
{
break
;
}
case
OP_TYPEID
:
:
FakeQuantize
:
{
auto
tmp
=
dynamic_cast
<
const
op
::
FakeQuantize
*>
(
&
n
);
node
[
"levels"
]
=
tmp
->
get_levels
();
break
;
}
case
OP_TYPEID
:
:
Floor
:
{
break
;
}
case
OP_TYPEID
:
:
Gather
:
{
auto
tmp
=
dynamic_cast
<
const
op
::
Gather
*>
(
&
n
);
node
[
"axis"
]
=
tmp
->
get_axis
();
break
;
}
case
OP_TYPEID
:
:
GatherND
:
{
break
;
}
case
OP_TYPEID
:
:
GetOutputElement
:
{
auto
tmp
=
dynamic_cast
<
const
op
::
GetOutputElement
*>
(
&
n
);
node
[
"n"
]
=
tmp
->
get_n
();
break
;
}
case
OP_TYPEID
:
:
Gemm
:
{
auto
tmp
=
dynamic_cast
<
const
op
::
Gemm
*>
(
&
n
);
node
[
"alpha"
]
=
tmp
->
get_alpha
();
node
[
"beta"
]
=
tmp
->
get_beta
();
node
[
"transA"
]
=
tmp
->
get_transA
();
node
[
"transB"
]
=
tmp
->
get_transB
();
break
;
}
case
OP_TYPEID
:
:
GenerateMask
:
{
auto
tmp
=
dynamic_cast
<
const
op
::
GenerateMask
*>
(
&
n
);
...
...
@@ -1585,13 +2027,68 @@ static json write(const Node& n, bool binary_constant_data)
node
[
"probability"
]
=
tmp
->
get_probability
();
break
;
}
case
OP_TYPEID
:
:
Greater
:
{
break
;
case
OP_TYPEID
:
:
Greater
:
{
auto
tmp
=
dynamic_cast
<
const
op
::
Greater
*>
(
&
n
);
if
(
tmp
->
get_autob
().
m_type
!=
op
::
AutoBroadcastType
::
NONE
)
{
node
[
"autob"
]
=
write_auto_broadcast
(
tmp
->
get_autob
());
}
break
;
}
case
OP_TYPEID
:
:
GreaterEq
:
{
break
;
case
OP_TYPEID
:
:
GreaterEq
:
{
auto
tmp
=
dynamic_cast
<
const
op
::
GreaterEq
*>
(
&
n
);
if
(
tmp
->
get_autob
().
m_type
!=
op
::
AutoBroadcastType
::
NONE
)
{
node
[
"autob"
]
=
write_auto_broadcast
(
tmp
->
get_autob
());
}
break
;
}
case
OP_TYPEID
:
:
Less
:
{
break
;
case
OP_TYPEID
:
:
GRN
:
{
auto
tmp
=
dynamic_cast
<
const
op
::
GRN
*>
(
&
n
);
node
[
"bias"
]
=
tmp
->
get_bias
();
break
;
}
case
OP_TYPEID
:
:
LessEq
:
{
break
;
case
OP_TYPEID
:
:
HardSigmoid
:
{
auto
tmp
=
dynamic_cast
<
const
op
::
HardSigmoid
*>
(
&
n
);
node
[
"alpha"
]
=
tmp
->
get_alpha
();
node
[
"beta"
]
=
tmp
->
get_beta
();
break
;
}
case
OP_TYPEID
:
:
GroupConvolution
:
{
auto
tmp
=
dynamic_cast
<
const
op
::
GroupConvolution
*>
(
&
n
);
node
[
"window_movement_strides"
]
=
tmp
->
get_window_movement_strides
();
node
[
"window_dilation_strides"
]
=
tmp
->
get_window_dilation_strides
();
node
[
"padding_below"
]
=
tmp
->
get_padding_below
();
node
[
"padding_above"
]
=
tmp
->
get_padding_above
();
node
[
"data_dilation_strides"
]
=
tmp
->
get_data_dilation_strides
();
node
[
"groups"
]
=
tmp
->
get_groups
();
node
[
"pad_type"
]
=
tmp
->
get_pad_type
();
break
;
}
case
OP_TYPEID
:
:
LeakyRelu
:
{
break
;
}
case
OP_TYPEID
:
:
Less
:
{
auto
tmp
=
dynamic_cast
<
const
op
::
Less
*>
(
&
n
);
if
(
tmp
->
get_autob
().
m_type
!=
op
::
AutoBroadcastType
::
NONE
)
{
node
[
"autob"
]
=
write_auto_broadcast
(
tmp
->
get_autob
());
}
break
;
}
case
OP_TYPEID
:
:
LessEq
:
{
auto
tmp
=
dynamic_cast
<
const
op
::
LessEq
*>
(
&
n
);
if
(
tmp
->
get_autob
().
m_type
!=
op
::
AutoBroadcastType
::
NONE
)
{
node
[
"autob"
]
=
write_auto_broadcast
(
tmp
->
get_autob
());
}
break
;
}
case
OP_TYPEID
:
:
Log
:
{
break
;
}
...
...
@@ -1617,6 +2114,7 @@ static json write(const Node& n, bool binary_constant_data)
node
[
"window_movement_strides"
]
=
tmp
->
get_window_movement_strides
();
node
[
"padding_below"
]
=
tmp
->
get_padding_below
();
node
[
"padding_above"
]
=
tmp
->
get_padding_above
();
node
[
"pad_type"
]
=
tmp
->
get_pad_type
();
break
;
}
case
OP_TYPEID
:
:
MaxPoolBackprop
:
...
...
@@ -1628,7 +2126,14 @@ static json write(const Node& n, bool binary_constant_data)
node
[
"padding_above"
]
=
tmp
->
get_padding_above
();
break
;
}
case
OP_TYPEID
:
:
Maximum
:
{
break
;
case
OP_TYPEID
:
:
Maximum
:
{
auto
tmp
=
dynamic_cast
<
const
op
::
Maximum
*>
(
&
n
);
if
(
tmp
->
get_autob
().
m_type
!=
op
::
AutoBroadcastType
::
NONE
)
{
node
[
"autob"
]
=
write_auto_broadcast
(
tmp
->
get_autob
());
}
break
;
}
case
OP_TYPEID
:
:
Min
:
{
...
...
@@ -1636,13 +2141,50 @@ static json write(const Node& n, bool binary_constant_data)
node
[
"reduction_axes"
]
=
tmp
->
get_reduction_axes
();
break
;
}
case
OP_TYPEID
:
:
Minimum
:
{
break
;
case
OP_TYPEID
:
:
Minimum
:
{
auto
tmp
=
dynamic_cast
<
const
op
::
Minimum
*>
(
&
n
);
if
(
tmp
->
get_autob
().
m_type
!=
op
::
AutoBroadcastType
::
NONE
)
{
node
[
"autob"
]
=
write_auto_broadcast
(
tmp
->
get_autob
());
}
break
;
}
case
OP_TYPEID
:
:
Multiply
:
{
break
;
case
OP_TYPEID
:
:
Multiply
:
{
auto
tmp
=
dynamic_cast
<
const
op
::
Multiply
*>
(
&
n
);
if
(
tmp
->
get_autob
().
m_type
!=
op
::
AutoBroadcastType
::
NONE
)
{
node
[
"autob"
]
=
write_auto_broadcast
(
tmp
->
get_autob
());
}
break
;
}
case
OP_TYPEID
:
:
MVN
:
{
auto
tmp
=
dynamic_cast
<
const
op
::
MVN
*>
(
&
n
);
node
[
"normalize_variance"
]
=
tmp
->
get_normalize_variance
();
node
[
"across_channels"
]
=
tmp
->
get_across_channels
();
node
[
"eps"
]
=
tmp
->
get_eps
();
break
;
}
case
OP_TYPEID
:
:
Negative
:
{
break
;
}
case
OP_TYPEID
:
:
NotEqual
:
{
break
;
case
OP_TYPEID
:
:
Normalize
:
{
auto
tmp
=
dynamic_cast
<
const
op
::
Normalize
*>
(
&
n
);
node
[
"across_spatial"
]
=
tmp
->
get_across_spatial
();
node
[
"channel_shared"
]
=
tmp
->
get_channel_shared
();
node
[
"eps"
]
=
tmp
->
get_eps
();
break
;
}
case
OP_TYPEID
:
:
NotEqual
:
{
auto
tmp
=
dynamic_cast
<
const
op
::
NotEqual
*>
(
&
n
);
if
(
tmp
->
get_autob
().
m_type
!=
op
::
AutoBroadcastType
::
NONE
)
{
node
[
"autob"
]
=
write_auto_broadcast
(
tmp
->
get_autob
());
}
break
;
}
case
OP_TYPEID
:
:
Not
:
{
break
;
}
...
...
@@ -1653,7 +2195,14 @@ static json write(const Node& n, bool binary_constant_data)
node
[
"one_hot_axis"
]
=
tmp
->
get_one_hot_axis
();
break
;
}
case
OP_TYPEID
:
:
Or
:
{
break
;
case
OP_TYPEID
:
:
Or
:
{
auto
tmp
=
dynamic_cast
<
const
op
::
Or
*>
(
&
n
);
if
(
tmp
->
get_autob
().
m_type
!=
op
::
AutoBroadcastType
::
NONE
)
{
node
[
"autob"
]
=
write_auto_broadcast
(
tmp
->
get_autob
());
}
break
;
}
case
OP_TYPEID
:
:
Pad
:
{
...
...
@@ -1696,7 +2245,14 @@ static json write(const Node& n, bool binary_constant_data)
node
[
"reduction_axes"
]
=
tmp
->
get_reduction_axes
();
break
;
}
case
OP_TYPEID
:
:
Power
:
{
break
;
case
OP_TYPEID
:
:
Power
:
{
auto
tmp
=
dynamic_cast
<
const
op
::
Power
*>
(
&
n
);
if
(
tmp
->
get_autob
().
m_type
!=
op
::
AutoBroadcastType
::
NONE
)
{
node
[
"autob"
]
=
write_auto_broadcast
(
tmp
->
get_autob
());
}
break
;
}
case
OP_TYPEID
:
:
Quantize
:
{
...
...
@@ -1789,10 +2345,23 @@ static json write(const Node& n, bool binary_constant_data)
node
[
"element_type"
]
=
write_element_type
(
constant
->
get_element_type
());
break
;
}
case
OP_TYPEID
:
:
ScaleShift
:
{
break
;
}
case
OP_TYPEID
:
:
ScatterAdd
:
{
break
;
}
case
OP_TYPEID
:
:
ScatterNDAdd
:
{
break
;
}
case
OP_TYPEID
:
:
Select
:
{
break
;
}
case
OP_TYPEID
:
:
ShapeOf
:
{
break
;
}
case
OP_TYPEID
:
:
ShuffleChannels
:
{
const
auto
tmp
=
dynamic_cast
<
const
op
::
ShuffleChannels
*>
(
&
n
);
node
[
"axis"
]
=
tmp
->
get_axis
();
node
[
"groups"
]
=
tmp
->
get_groups
();
break
;
}
case
OP_TYPEID
:
:
Sigmoid
:
{
break
;
}
case
OP_TYPEID
:
:
SigmoidBackprop
:
{
break
;
...
...
@@ -1811,11 +2380,36 @@ static json write(const Node& n, bool binary_constant_data)
node
[
"strides"
]
=
tmp
->
get_strides
();
break
;
}
case
OP_TYPEID
:
:
SpaceToDepth
:
{
auto
tmp
=
dynamic_cast
<
const
op
::
SpaceToDepth
*>
(
&
n
);
node
[
"type"
]
=
write_element_type
(
tmp
->
get_element_type
());
node
[
"block_size"
]
=
tmp
->
get_block_size
();
break
;
}
case
OP_TYPEID
:
:
Split
:
{
auto
tmp
=
dynamic_cast
<
const
op
::
Split
*>
(
&
n
);
node
[
"axis"
]
=
tmp
->
get_axis
();
node
[
"splits"
]
=
tmp
->
get_splits
();
break
;
}
case
OP_TYPEID
:
:
Sqrt
:
{
break
;
}
case
OP_TYPEID
:
:
SquaredDifference
:
{
break
;
}
case
OP_TYPEID
:
:
Squeeze
:
{
break
;
}
case
OP_TYPEID
:
:
StopGradient
:
{
break
;
}
case
OP_TYPEID
:
:
Subtract
:
{
break
;
case
OP_TYPEID
:
:
Subtract
:
{
auto
tmp
=
dynamic_cast
<
const
op
::
Subtract
*>
(
&
n
);
if
(
tmp
->
get_autob
().
m_type
!=
op
::
AutoBroadcastType
::
NONE
)
{
node
[
"autob"
]
=
write_auto_broadcast
(
tmp
->
get_autob
());
}
break
;
}
case
OP_TYPEID
:
:
Sum
:
{
...
...
@@ -1833,6 +2427,8 @@ static json write(const Node& n, bool binary_constant_data)
}
case
OP_TYPEID
:
:
Tanh
:
{
break
;
}
case
OP_TYPEID
:
:
Tile
:
{
break
;
}
case
OP_TYPEID
:
:
TopK
:
{
auto
tmp
=
dynamic_cast
<
const
op
::
TopK
*>
(
&
n
);
...
...
@@ -1844,10 +2440,14 @@ static json write(const Node& n, bool binary_constant_data)
}
case
OP_TYPEID
:
:
Transpose
:
{
break
;
}
case
OP_TYPEID
:
:
Unsqueeze
:
{
break
;
}
case
OP_TYPEID
:
:
UnknownOp
:
{
break
;
}
}
#if !(defined(__GNUC__) && (__GNUC__ == 4 && __GNUC_MINOR__ == 8))
#pragma GCC diagnostic pop
#endif
return
node
;
}
src/ngraph/util.hpp
View file @
5b9000ac
...
...
@@ -44,6 +44,7 @@ namespace ngraph
namespace
runtime
{
class
Backend
;
class
Value
;
}
std
::
string
to_cplusplus_sourcecode_literal
(
bool
val
);
...
...
test/cpu_fusion.cpp
View file @
5b9000ac
...
...
@@ -74,7 +74,6 @@
#include "ngraph/runtime/cpu/op/sigmoid_mul.hpp"
#include "ngraph/runtime/cpu/op/update_slice.hpp"
#include "ngraph/runtime/cpu/pass/cpu_fusion.hpp"
#include "ngraph/runtime/cpu/pass/cpu_loop_kernel_fusion.hpp"
#include "ngraph/runtime/cpu/pass/cpu_mat_fusion.hpp"
#include "ngraph/runtime/cpu/pass/cpu_post_layout_optimizations.hpp"
#include "ngraph/runtime/cpu/pass/cpu_rnn_fusion.hpp"
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment