Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in / Register
Toggle navigation
N
ngraph
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
submodule
ngraph
Commits
1cfa3d66
Unverified
Commit
1cfa3d66
authored
Nov 16, 2019
by
Scott Cyphers
Committed by
GitHub
Nov 16, 2019
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'master' into gwenger/deprecate_copy_from
parents
1f39edbe
f6a404eb
Hide whitespace changes
Inline
Side-by-side
Showing
32 changed files
with
456 additions
and
79 deletions
+456
-79
external_gtest.cmake
cmake/external_gtest.cmake
+2
-0
ops.py
python/ngraph/ops.py
+7
-2
space_to_depth.cpp
python/pyngraph/ops/fused/space_to_depth.cpp
+1
-1
test_ops_fused.py
python/test/ngraph/test_ops_fused.py
+2
-1
space_to_depth.cpp
src/ngraph/frontend/onnx_import/op/space_to_depth.cpp
+3
-1
add.hpp
src/ngraph/op/add.hpp
+8
-2
atan2.hpp
src/ngraph/op/atan2.hpp
+4
-1
divide.hpp
src/ngraph/op/divide.hpp
+9
-2
gelu.cpp
src/ngraph/op/fused/gelu.cpp
+12
-0
group_conv.cpp
src/ngraph/op/fused/group_conv.cpp
+4
-0
layer_norm.cpp
src/ngraph/op/fused/layer_norm.cpp
+2
-2
layer_norm.hpp
src/ngraph/op/fused/layer_norm.hpp
+2
-2
space_to_depth.cpp
src/ngraph/op/fused/space_to_depth.cpp
+33
-3
space_to_depth.hpp
src/ngraph/op/fused/space_to_depth.hpp
+20
-1
maximum.hpp
src/ngraph/op/maximum.hpp
+9
-2
minimum.hpp
src/ngraph/op/minimum.hpp
+9
-2
multiply.hpp
src/ngraph/op/multiply.hpp
+9
-2
power.hpp
src/ngraph/op/power.hpp
+9
-2
relu.cpp
src/ngraph/op/relu.cpp
+1
-1
sigmoid.cpp
src/ngraph/op/sigmoid.cpp
+1
-1
sigmoid.hpp
src/ngraph/op/sigmoid.hpp
+5
-1
subtract.hpp
src/ngraph/op/subtract.hpp
+5
-1
attr_types.cpp
src/ngraph/op/util/attr_types.cpp
+3
-0
attr_types.hpp
src/ngraph/op/util/attr_types.hpp
+6
-0
binary_elementwise_arithmetic.cpp
src/ngraph/op/util/binary_elementwise_arithmetic.cpp
+2
-1
binary_elementwise_arithmetic.hpp
src/ngraph/op/util/binary_elementwise_arithmetic.hpp
+5
-5
constant_folding_binary.cpp
src/ngraph/pass/constant_folding_binary.cpp
+215
-38
gelu_backprop.cpp
src/ngraph/runtime/cpu/op/gelu_backprop.cpp
+1
-1
serializer.cpp
src/ngraph/serializer.cpp
+3
-1
fused_op.in.cpp
test/backend/fused_op.in.cpp
+21
-2
serialize.cpp
test/serialize.cpp
+22
-0
space_to_depth.cpp
test/type_prop/space_to_depth.cpp
+21
-1
No files found.
cmake/external_gtest.cmake
View file @
1cfa3d66
...
...
@@ -60,6 +60,8 @@ ExternalProject_Add(
${
GTEST_CMAKE_ARGS
}
BINARY_DIR
"
${
EXTERNAL_PROJECTS_ROOT
}
/gtest/build"
EXCLUDE_FROM_ALL TRUE
BUILD_BYPRODUCTS
${
CMAKE_BINARY_DIR
}
/ngraph/gtest/build/googlemock/gtest/libgtest.a
BUILD_BYPRODUCTS
${
CMAKE_BINARY_DIR
}
/ngraph/gtest/build/googlemock/libgmock.a
)
#------------------------------------------------------------------------------
...
...
python/ngraph/ops.py
View file @
1cfa3d66
...
...
@@ -310,7 +310,7 @@ def scale_shift(data, scale, shift, name=None): # type: (Node, Node, Node, str)
@nameable_op
def
space_to_depth
(
data
,
block_size
,
name
=
None
):
# type: (Node
, int, str) -> Node
def
space_to_depth
(
data
,
mode
,
block_size
,
name
=
None
):
# type: (Node, str
, int, str) -> Node
"""Perform SpaceToDepth operation on the input tensor.
SpaceToDepth rearranges blocks of spatial data into depth.
...
...
@@ -318,11 +318,16 @@ def space_to_depth(data, block_size, name=None): # type: (Node, int, str) -> No
and width dimensions are moved to the depth dimension.
:param data: The node with data tensor.
:param mode: Specifies how the output depth dimension is gathered from block coordinates.
blocks_first: The output depth is gathered from [block_size, ..., block_size, C]
depth_first: The output depth is gathered from [C, block_size, ..., block_size]
:param block_size: The size of the block of values to be moved. Scalar value.
:param name: Optional output node name.
:return: The new node performing a SpaceToDepth operation on input tensor.
"""
return
SpaceToDepth
(
data
,
block_size
)
return
SpaceToDepth
(
data
,
mode
,
block_size
)
@nameable_op
...
...
python/pyngraph/ops/fused/space_to_depth.cpp
View file @
1cfa3d66
...
...
@@ -27,5 +27,5 @@ void regclass_pyngraph_op_SpaceToDepth(py::module m)
py
::
class_
<
ngraph
::
op
::
SpaceToDepth
,
std
::
shared_ptr
<
ngraph
::
op
::
SpaceToDepth
>
,
ngraph
::
op
::
Op
>
spacetodepth
(
m
,
"SpaceToDepth"
);
spacetodepth
.
doc
()
=
"ngraph.impl.op.SpaceToDepth wraps ngraph::op::SpaceToDepth"
;
spacetodepth
.
def
(
py
::
init
<
const
std
::
shared_ptr
<
ngraph
::
Node
>&
,
int
&>
());
spacetodepth
.
def
(
py
::
init
<
const
std
::
shared_ptr
<
ngraph
::
Node
>&
,
const
std
::
string
&
,
int
&>
());
}
python/test/ngraph/test_ops_fused.py
View file @
1cfa3d66
...
...
@@ -429,11 +429,12 @@ def test_space_to_depth_operator():
data_shape
=
[
1
,
2
,
4
,
4
]
data_value
=
np
.
arange
(
start
=
0
,
stop
=
32
,
step
=
1.0
,
dtype
=
np
.
float32
)
.
reshape
(
data_shape
)
mode
=
'blocks_first'
block_size
=
2
parameter_data
=
ng
.
parameter
(
data_shape
,
name
=
'Data'
,
dtype
=
np
.
float32
)
model
=
ng
.
space_to_depth
(
parameter_data
,
block_size
)
model
=
ng
.
space_to_depth
(
parameter_data
,
mode
,
block_size
)
computation
=
runtime
.
computation
(
model
,
parameter_data
)
result
=
computation
(
data_value
)
...
...
src/ngraph/frontend/onnx_import/op/space_to_depth.cpp
View file @
1cfa3d66
...
...
@@ -29,7 +29,9 @@ namespace ngraph
{
auto
data
=
node
.
get_ng_inputs
().
at
(
0
);
std
::
size_t
block_size
=
node
.
get_attribute_value
<
std
::
int64_t
>
(
"blocksize"
);
return
NodeVector
{
std
::
make_shared
<
ngraph
::
op
::
SpaceToDepth
>
(
data
,
block_size
)};
const
auto
mode
=
ngraph
::
op
::
SpaceToDepth
::
SpaceToDepthMode
::
BLOCKS_FIRST
;
return
NodeVector
{
std
::
make_shared
<
ngraph
::
op
::
SpaceToDepth
>
(
data
,
mode
,
block_size
)};
}
}
// namespace set_1
...
...
src/ngraph/op/add.hpp
View file @
1cfa3d66
...
...
@@ -35,7 +35,10 @@ namespace ngraph
static
constexpr
NodeTypeInfo
type_info
{
"Add"
,
0
};
const
NodeTypeInfo
&
get_type_info
()
const
override
{
return
type_info
;
}
/// \brief Constructs an uninitialized addition operation
Add
()
=
default
;
Add
()
:
util
::
BinaryElementwiseArithmetic
(
AutoBroadcastSpec
::
NONE
)
{
}
/// \brief Constructs an addition operation.
///
...
...
@@ -71,7 +74,10 @@ namespace ngraph
static
constexpr
NodeTypeInfo
type_info
{
"Add"
,
1
};
const
NodeTypeInfo
&
get_type_info
()
const
override
{
return
type_info
;
}
/// \brief Constructs an uninitialized addition operation
Add
()
=
default
;
Add
()
:
util
::
BinaryElementwiseArithmetic
(
AutoBroadcastSpec
::
NUMPY
)
{
}
/// \brief Constructs an addition operation.
///
...
...
src/ngraph/op/atan2.hpp
View file @
1cfa3d66
...
...
@@ -31,7 +31,10 @@ namespace ngraph
NGRAPH_API
static
constexpr
NodeTypeInfo
type_info
{
"Atan2"
,
0
};
const
NodeTypeInfo
&
get_type_info
()
const
override
{
return
type_info
;
}
Atan2
()
=
default
;
Atan2
()
:
util
::
BinaryElementwiseArithmetic
(
AutoBroadcastSpec
::
NONE
)
{
}
/// \brief atan2(y,x) is the angle from the origin to the point (x,y) (note reversed
/// order).
...
...
src/ngraph/op/divide.hpp
View file @
1cfa3d66
...
...
@@ -32,7 +32,10 @@ namespace ngraph
static
constexpr
NodeTypeInfo
type_info
{
"Divide"
,
0
};
const
NodeTypeInfo
&
get_type_info
()
const
override
{
return
type_info
;
}
/// \brief Constructs a division operation.
Divide
()
=
default
;
Divide
()
:
util
::
BinaryElementwiseArithmetic
(
AutoBroadcastSpec
::
NONE
)
{
}
/// \brief Constructs a division operation.
///
/// \param arg0 Node that produces the first input tensor.
...
...
@@ -76,7 +79,11 @@ namespace ngraph
static
constexpr
NodeTypeInfo
type_info
{
"Divide"
,
1
};
const
NodeTypeInfo
&
get_type_info
()
const
override
{
return
type_info
;
}
/// \brief Constructs a division operation.
Divide
()
=
default
;
Divide
()
:
util
::
BinaryElementwiseArithmetic
(
AutoBroadcastSpec
::
NUMPY
)
{
}
/// \brief Constructs a division operation.
///
/// \param arg0 Node that produces the first input tensor.
...
...
src/ngraph/op/fused/gelu.cpp
View file @
1cfa3d66
...
...
@@ -66,12 +66,18 @@ shared_ptr<Node> op::Gelu::copy_with_new_args(const NodeVector& new_args) const
void
op
::
Gelu
::
pre_validate_and_infer_types
()
{
element
::
Type
input_element_type
=
get_input_element_type
(
0
);
PartialShape
input_pshape
=
get_input_partial_shape
(
0
);
NODE_VALIDATION_CHECK
(
this
,
input_element_type
.
is_dynamic
()
||
input_element_type
.
is_real
(),
"Argument element type must be f16, bf16, f32, f64 or dynamic (got "
,
input_element_type
,
")."
);
if
(
input_pshape
.
is_dynamic
())
{
set_output_type
(
0
,
input_element_type
,
input_pshape
);
}
}
void
op
::
Gelu
::
generate_adjoints
(
autodiff
::
Adjoints
&
adjoints
,
const
NodeVector
&
deltas
)
...
...
@@ -94,12 +100,18 @@ op::GeluBackpropFactor::GeluBackpropFactor(const Output<Node>& x)
void
op
::
GeluBackpropFactor
::
pre_validate_and_infer_types
()
{
element
::
Type
input_element_type
=
get_input_element_type
(
0
);
PartialShape
input_pshape
=
get_input_partial_shape
(
0
);
NODE_VALIDATION_CHECK
(
this
,
input_element_type
.
is_dynamic
()
||
input_element_type
.
is_real
(),
"Argument element type must be f16, bf16, f32, f64 or dynamic (got "
,
input_element_type
,
")."
);
if
(
input_pshape
.
is_dynamic
())
{
set_output_type
(
0
,
input_element_type
,
input_pshape
);
}
}
shared_ptr
<
Node
>
op
::
GeluBackpropFactor
::
copy_with_new_args
(
const
NodeVector
&
new_args
)
const
...
...
src/ngraph/op/fused/group_conv.cpp
View file @
1cfa3d66
...
...
@@ -100,6 +100,10 @@ void op::GroupConvolution::pre_validate_and_infer_types()
get_groups
())
==
data_shape
.
to_shape
()[
1
],
"Incorrect number of channels per filter"
);
}
else
{
set_output_type
(
0
,
get_input_element_type
(
0
),
PartialShape
::
dynamic
());
}
}
void
op
::
GroupConvolution
::
post_validate_and_infer_types
()
...
...
src/ngraph/op/fused/layer_norm.cpp
View file @
1cfa3d66
...
...
@@ -170,7 +170,7 @@ shared_ptr<Node> op::LayerNorm::copy_with_new_args(const NodeVector& new_args) c
}
}
void
op
::
LayerNorm
::
pre_
validate_and_infer_types
()
void
op
::
LayerNorm
::
validate_and_infer_types
()
{
element
::
Type
input_element_type
=
get_input_element_type
(
0
);
...
...
@@ -509,7 +509,7 @@ shared_ptr<Node> op::LayerNormBackprop::copy_with_new_args(const NodeVector& new
}
}
void
op
::
LayerNormBackprop
::
pre_
validate_and_infer_types
()
void
op
::
LayerNormBackprop
::
validate_and_infer_types
()
{
element
::
Type
input_element_type
=
get_input_element_type
(
0
);
...
...
src/ngraph/op/fused/layer_norm.hpp
View file @
1cfa3d66
...
...
@@ -55,7 +55,7 @@ namespace ngraph
virtual
NodeVector
decompose_op
()
const
override
;
void
pre_
validate_and_infer_types
()
override
;
void
validate_and_infer_types
()
override
;
virtual
std
::
shared_ptr
<
Node
>
copy_with_new_args
(
const
NodeVector
&
new_args
)
const
override
;
...
...
@@ -121,7 +121,7 @@ namespace ngraph
virtual
NodeVector
decompose_op
()
const
override
;
void
pre_
validate_and_infer_types
()
override
;
void
validate_and_infer_types
()
override
;
virtual
std
::
shared_ptr
<
Node
>
copy_with_new_args
(
const
NodeVector
&
new_args
)
const
override
;
...
...
src/ngraph/op/fused/space_to_depth.cpp
View file @
1cfa3d66
...
...
@@ -25,13 +25,21 @@ using namespace ngraph;
constexpr
NodeTypeInfo
op
::
SpaceToDepth
::
type_info
;
op
::
SpaceToDepth
::
SpaceToDepth
(
const
Output
<
Node
>&
data
,
const
size_t
block_size
)
op
::
SpaceToDepth
::
SpaceToDepth
(
const
Output
<
Node
>&
data
,
const
SpaceToDepthMode
&
mode
,
size_t
block_size
)
:
FusedOp
({
data
})
,
m_blocksize
(
block_size
)
,
m_mode
(
mode
)
{
constructor_validate_and_infer_types
();
}
op
::
SpaceToDepth
::
SpaceToDepth
(
const
Output
<
Node
>&
data
,
const
std
::
string
&
mode
,
size_t
block_size
)
:
SpaceToDepth
(
data
,
mode_from_string
(
mode
),
block_size
)
{
}
NodeVector
op
::
SpaceToDepth
::
decompose_op
()
const
{
auto
data
=
input_value
(
0
);
...
...
@@ -74,7 +82,17 @@ NodeVector op::SpaceToDepth::decompose_op() const
// rearrange them so as appropriate chunks of data where close to their
// destination place. Finally squeeze data from respective dimensions.
Output
<
Node
>
flat_node
=
builder
::
reshape
(
data
,
Shape
{
n
,
c
,
h_flat
,
bs
,
w_flat
,
bs
});
flat_node
=
builder
::
reorder_axes
(
flat_node
,
{
0
,
3
,
5
,
1
,
2
,
4
});
switch
(
m_mode
)
{
case
SpaceToDepthMode
:
:
DEPTH_FIRST
:
{
flat_node
=
builder
::
reorder_axes
(
flat_node
,
{
0
,
1
,
3
,
5
,
2
,
4
});
break
;
}
case
SpaceToDepthMode
:
:
BLOCKS_FIRST
:
default
:
{
flat_node
=
builder
::
reorder_axes
(
flat_node
,
{
0
,
3
,
5
,
1
,
2
,
4
});
}
}
return
NodeVector
{
builder
::
reshape
(
flat_node
,
Shape
{
n
,
c_high
,
h_flat
,
w_flat
})};
}
...
...
@@ -84,5 +102,17 @@ shared_ptr<Node> op::SpaceToDepth::copy_with_new_args(const NodeVector& new_args
{
throw
ngraph_error
(
"Incorrect number of new arguments"
);
}
return
make_shared
<
SpaceToDepth
>
(
new_args
.
at
(
0
),
m_blocksize
);
return
make_shared
<
SpaceToDepth
>
(
new_args
.
at
(
0
),
m_mode
,
m_blocksize
);
}
op
::
SpaceToDepth
::
SpaceToDepthMode
op
::
SpaceToDepth
::
mode_from_string
(
const
std
::
string
&
mode
)
const
{
static
const
std
::
map
<
std
::
string
,
SpaceToDepthMode
>
allowed_values
=
{
{
"blocks_first"
,
SpaceToDepthMode
::
BLOCKS_FIRST
},
{
"depth_first"
,
SpaceToDepthMode
::
DEPTH_FIRST
}};
NODE_VALIDATION_CHECK
(
this
,
allowed_values
.
count
(
mode
)
>
0
,
"Invalid 'depth_to_space_mode' value passed in."
);
return
allowed_values
.
at
(
mode
);
}
src/ngraph/op/fused/space_to_depth.hpp
View file @
1cfa3d66
...
...
@@ -32,6 +32,14 @@ namespace ngraph
class
SpaceToDepth
:
public
ngraph
::
op
::
util
::
FusedOp
{
public
:
enum
class
SpaceToDepthMode
{
// The output depth is gathered from [block_size, ..., block_size, C]
BLOCKS_FIRST
,
// The output depth is gathered from [C, block_size, ..., block_size]
DEPTH_FIRST
};
NGRAPH_API
static
constexpr
NodeTypeInfo
type_info
{
"SpaceToDepth"
,
0
};
const
NodeTypeInfo
&
get_type_info
()
const
override
{
return
type_info
;
}
...
...
@@ -39,10 +47,19 @@ namespace ngraph
/// \brief Constructs a SpaceToDepth operation.
///
/// \param data - Node producing the input tensor
/// \param mode Specifies how the output depth dimension is gathered
/// from block coordinates and the old depth dimension.
/// \param block_size - the size of the block of values to be moved
SpaceToDepth
(
const
Output
<
Node
>&
data
,
std
::
size_t
block_size
);
SpaceToDepth
(
const
Output
<
Node
>&
data
,
const
SpaceToDepthMode
&
mode
,
std
::
size_t
block_size
=
1
);
SpaceToDepth
(
const
Output
<
Node
>&
data
,
const
std
::
string
&
mode
,
std
::
size_t
block_size
=
1
);
std
::
size_t
get_block_size
()
const
{
return
m_blocksize
;
}
SpaceToDepthMode
get_mode
()
const
{
return
m_mode
;
}
virtual
NodeVector
decompose_op
()
const
override
;
virtual
std
::
shared_ptr
<
Node
>
...
...
@@ -50,6 +67,8 @@ namespace ngraph
protected
:
std
::
size_t
m_blocksize
;
SpaceToDepthMode
m_mode
;
SpaceToDepthMode
mode_from_string
(
const
std
::
string
&
mode
)
const
;
};
}
}
src/ngraph/op/maximum.hpp
View file @
1cfa3d66
...
...
@@ -32,7 +32,10 @@ namespace ngraph
static
constexpr
NodeTypeInfo
type_info
{
"Maximum"
,
0
};
const
NodeTypeInfo
&
get_type_info
()
const
override
{
return
type_info
;
}
/// \brief Constructs a maximum operation.
Maximum
()
=
default
;
Maximum
()
:
util
::
BinaryElementwiseArithmetic
(
AutoBroadcastSpec
::
NONE
)
{
}
/// \brief Constructs a maximum operation.
///
/// \param arg0 Node that produces the first input tensor.
...
...
@@ -62,7 +65,11 @@ namespace ngraph
static
constexpr
NodeTypeInfo
type_info
{
"Maximum"
,
1
};
const
NodeTypeInfo
&
get_type_info
()
const
override
{
return
type_info
;
}
/// \brief Constructs a maximum operation.
Maximum
()
=
default
;
Maximum
()
:
util
::
BinaryElementwiseArithmetic
(
AutoBroadcastSpec
::
NUMPY
)
{
}
/// \brief Constructs a maximum operation.
///
/// \param arg0 Node that produces the first input tensor.
...
...
src/ngraph/op/minimum.hpp
View file @
1cfa3d66
...
...
@@ -32,7 +32,10 @@ namespace ngraph
static
constexpr
NodeTypeInfo
type_info
{
"Minimum"
,
0
};
const
NodeTypeInfo
&
get_type_info
()
const
override
{
return
type_info
;
}
/// \brief Constructs a minimum operation.
Minimum
()
=
default
;
Minimum
()
:
util
::
BinaryElementwiseArithmetic
(
AutoBroadcastSpec
::
NONE
)
{
}
/// \brief Constructs a minimum operation.
///
/// \param arg0 Node that produces the first input tensor.
...
...
@@ -62,7 +65,11 @@ namespace ngraph
static
constexpr
NodeTypeInfo
type_info
{
"Minimum"
,
1
};
const
NodeTypeInfo
&
get_type_info
()
const
override
{
return
type_info
;
}
/// \brief Constructs a minimum operation.
Minimum
()
=
default
;
Minimum
()
:
util
::
BinaryElementwiseArithmetic
(
AutoBroadcastSpec
::
NUMPY
)
{
}
/// \brief Constructs a minimum operation.
///
/// \param arg0 Node that produces the first input tensor.
...
...
src/ngraph/op/multiply.hpp
View file @
1cfa3d66
...
...
@@ -32,7 +32,10 @@ namespace ngraph
static
constexpr
NodeTypeInfo
type_info
{
"Multiply"
,
0
};
const
NodeTypeInfo
&
get_type_info
()
const
override
{
return
type_info
;
}
/// \brief Constructs a multiplication operation.
Multiply
()
=
default
;
Multiply
()
:
util
::
BinaryElementwiseArithmetic
(
AutoBroadcastSpec
::
NONE
)
{
}
/// \brief Constructs a multiplication operation.
///
/// \param arg0 Node that produces the first input tensor.
...
...
@@ -62,7 +65,11 @@ namespace ngraph
static
constexpr
NodeTypeInfo
type_info
{
"Multiply"
,
1
};
const
NodeTypeInfo
&
get_type_info
()
const
override
{
return
type_info
;
}
/// \brief Constructs a multiplication operation.
Multiply
()
=
default
;
Multiply
()
:
util
::
BinaryElementwiseArithmetic
(
AutoBroadcastSpec
::
NUMPY
)
{
}
/// \brief Constructs a multiplication operation.
///
/// \param arg0 Node that produces the first input tensor.
...
...
src/ngraph/op/power.hpp
View file @
1cfa3d66
...
...
@@ -46,7 +46,10 @@ namespace ngraph
NGRAPH_API
static
constexpr
NodeTypeInfo
type_info
{
"Power"
,
0
};
const
NodeTypeInfo
&
get_type_info
()
const
override
{
return
type_info
;
}
Power
()
=
default
;
Power
()
:
util
::
BinaryElementwiseArithmetic
(
AutoBroadcastSpec
::
NONE
)
{
}
/// \brief Constructs an exponentiation operation.
///
/// \param arg0 Node that produces the first input tensor.
...
...
@@ -89,7 +92,11 @@ namespace ngraph
NGRAPH_API
static
constexpr
NodeTypeInfo
type_info
{
"Power"
,
1
};
const
NodeTypeInfo
&
get_type_info
()
const
override
{
return
type_info
;
}
Power
()
=
default
;
Power
()
:
util
::
BinaryElementwiseArithmetic
(
AutoBroadcastSpec
::
NUMPY
)
{
}
/// \brief Constructs an exponentiation operation.
///
/// \param arg0 Node that produces the first input tensor.
...
...
src/ngraph/op/relu.cpp
View file @
1cfa3d66
...
...
@@ -36,7 +36,7 @@ shared_ptr<Node> op::Relu::copy_with_new_args(const NodeVector& new_args) const
}
op
::
ReluBackprop
::
ReluBackprop
(
shared_ptr
<
Node
>
arg
,
shared_ptr
<
Node
>
delta
)
:
BinaryElementwiseArithmetic
(
arg
,
delta
)
:
BinaryElementwiseArithmetic
(
arg
,
delta
,
AutoBroadcastSpec
::
NONE
)
{
constructor_validate_and_infer_types
();
}
...
...
src/ngraph/op/sigmoid.cpp
View file @
1cfa3d66
...
...
@@ -37,7 +37,7 @@ op::Sigmoid::Sigmoid(const Output<Node>& arg)
}
op
::
SigmoidBackprop
::
SigmoidBackprop
(
const
Output
<
Node
>&
arg
,
const
Output
<
Node
>&
delta
)
:
BinaryElementwiseArithmetic
(
arg
,
delta
)
:
BinaryElementwiseArithmetic
(
arg
,
delta
,
AutoBroadcastSpec
::
NONE
)
{
constructor_validate_and_infer_types
();
}
...
...
src/ngraph/op/sigmoid.hpp
View file @
1cfa3d66
...
...
@@ -47,7 +47,11 @@ namespace ngraph
NGRAPH_API
static
constexpr
NodeTypeInfo
type_info
{
"SigmoidBackprop"
,
0
};
const
NodeTypeInfo
&
get_type_info
()
const
override
{
return
type_info
;
}
SigmoidBackprop
()
=
default
;
SigmoidBackprop
()
:
util
::
BinaryElementwiseArithmetic
(
AutoBroadcastSpec
::
NONE
)
{
}
/// \brief Constructs a SigmoidBackprop operation.
///
/// \param arg Node that produces the Sigmoid forward input tensor.
...
...
src/ngraph/op/subtract.hpp
View file @
1cfa3d66
...
...
@@ -29,7 +29,11 @@ namespace ngraph
NGRAPH_API
static
constexpr
NodeTypeInfo
type_info
{
"Subtract"
,
0
};
const
NodeTypeInfo
&
get_type_info
()
const
override
{
return
type_info
;
}
Subtract
()
=
default
;
Subtract
()
:
util
::
BinaryElementwiseArithmetic
(
AutoBroadcastSpec
::
NONE
)
{
}
/// \brief Constructs a subtraction operation.
///
/// \param arg0 Node that produces the first input tensor.
...
...
src/ngraph/op/util/attr_types.cpp
View file @
1cfa3d66
...
...
@@ -19,6 +19,9 @@
using
namespace
ngraph
;
const
op
::
AutoBroadcastSpec
op
::
AutoBroadcastSpec
::
NUMPY
(
AutoBroadcastType
::
NUMPY
,
0
);
const
op
::
AutoBroadcastSpec
op
::
AutoBroadcastSpec
::
NONE
{
AutoBroadcastType
::
NONE
,
0
};
namespace
ngraph
{
template
<>
...
...
src/ngraph/op/util/attr_types.hpp
View file @
1cfa3d66
...
...
@@ -20,6 +20,7 @@
#include <ostream>
#include "ngraph/attribute_adapter.hpp"
#include "ngraph/ngraph_visibility.hpp"
#include "ngraph/type.hpp"
namespace
ngraph
...
...
@@ -269,6 +270,11 @@ namespace ngraph
{
return
a
.
m_type
==
m_type
&&
a
.
m_axis
==
m_axis
;
}
NGRAPH_API
static
const
AutoBroadcastSpec
NUMPY
;
NGRAPH_API
static
const
AutoBroadcastSpec
NONE
;
};
}
}
src/ngraph/op/util/binary_elementwise_arithmetic.cpp
View file @
1cfa3d66
...
...
@@ -19,7 +19,8 @@
using
namespace
std
;
using
namespace
ngraph
;
op
::
util
::
BinaryElementwiseArithmetic
::
BinaryElementwiseArithmetic
()
op
::
util
::
BinaryElementwiseArithmetic
::
BinaryElementwiseArithmetic
(
const
AutoBroadcastSpec
&
autob
)
:
m_autob
(
autob
)
{
}
...
...
src/ngraph/op/util/binary_elementwise_arithmetic.hpp
View file @
1cfa3d66
...
...
@@ -54,12 +54,12 @@ namespace ngraph
class
BinaryElementwiseArithmetic
:
public
Op
{
protected
:
/// \brief Constructs a binary elementwise arithmetic operation.
BinaryElementwiseArithmetic
();
BinaryElementwiseArithmetic
(
const
AutoBroadcastSpec
&
autob
);
/// \brief Constructs a binary elementwise arithmetic operation.
BinaryElementwiseArithmetic
(
const
std
::
shared_ptr
<
Node
>&
arg0
,
const
std
::
shared_ptr
<
Node
>&
arg1
,
const
AutoBroadcastSpec
&
autob
=
AutoBroadcastSpec
()
);
const
AutoBroadcastSpec
&
autob
);
/// \brief Constructs a binary elementwise arithmetic operation.
///
...
...
@@ -67,7 +67,7 @@ namespace ngraph
/// \param arg1 Output that produces the second input tensor.
BinaryElementwiseArithmetic
(
const
Output
<
Node
>&
arg0
,
const
Output
<
Node
>&
arg1
,
const
AutoBroadcastSpec
&
autob
=
AutoBroadcastSpec
()
);
const
AutoBroadcastSpec
&
autob
);
/// \brief Constructs a binary elementwise arithmetic operation.
///
...
...
@@ -77,7 +77,7 @@ namespace ngraph
BinaryElementwiseArithmetic
(
const
std
::
string
&
node_type
,
const
std
::
shared_ptr
<
Node
>&
arg0
,
const
std
::
shared_ptr
<
Node
>&
arg1
,
const
AutoBroadcastSpec
&
autob
=
AutoBroadcastSpec
()
);
const
AutoBroadcastSpec
&
autob
);
public
:
void
validate_and_infer_types
()
override
;
...
...
src/ngraph/pass/constant_folding_binary.cpp
View file @
1cfa3d66
...
...
@@ -74,7 +74,7 @@ static shared_ptr<op::Constant> fold_constant_binary_logical(shared_ptr<op::Cons
}
else
{
if
(
auto
and_
node
=
as_type_ptr
<
op
::
And
>
(
binary
))
if
(
auto
and_
v0_node
=
as_type_ptr
<
op
::
v0
::
And
>
(
binary
))
{
vector
<
char
>
out_vec
(
shape_size
(
out_shape
));
runtime
::
reference
::
logical_and
<
char
>
(
a
->
get_data_ptr
<
char
>
(),
...
...
@@ -82,21 +82,21 @@ static shared_ptr<op::Constant> fold_constant_binary_logical(shared_ptr<op::Cons
out_vec
.
data
(),
a
->
get_shape
(),
b
->
get_shape
(),
and_node
->
get_autob
());
and_
v0_
node
->
get_autob
());
return
make_shared
<
op
::
Constant
>
(
binary
->
get_element_type
(),
out_shape
,
out_vec
);
}
else
if
(
auto
logical_
xor_node
=
as_type_ptr
<
op
::
v1
::
LogicalXor
>
(
binary
))
else
if
(
auto
logical_
and_node
=
as_type_ptr
<
op
::
v1
::
LogicalAnd
>
(
binary
))
{
vector
<
char
>
out_vec
(
shape_size
(
out_shape
));
runtime
::
reference
::
logical_
xor
<
char
>
(
a
->
get_data_ptr
<
char
>
(),
runtime
::
reference
::
logical_
and
<
char
>
(
a
->
get_data_ptr
<
char
>
(),
b
->
get_data_ptr
<
char
>
(),
out_vec
.
data
(),
a
->
get_shape
(),
b
->
get_shape
(),
logical_
xor
_node
->
get_autob
());
logical_
and
_node
->
get_autob
());
return
make_shared
<
op
::
Constant
>
(
binary
->
get_element_type
(),
out_shape
,
out_vec
);
}
else
if
(
auto
or_node
=
as_type_ptr
<
op
::
Or
>
(
binary
))
else
if
(
auto
or_node
=
as_type_ptr
<
op
::
v0
::
Or
>
(
binary
))
{
vector
<
char
>
out_vec
(
shape_size
(
out_shape
));
runtime
::
reference
::
logical_or
<
char
>
(
a
->
get_data_ptr
<
char
>
(),
...
...
@@ -107,6 +107,17 @@ static shared_ptr<op::Constant> fold_constant_binary_logical(shared_ptr<op::Cons
or_node
->
get_autob
());
return
make_shared
<
op
::
Constant
>
(
binary
->
get_element_type
(),
out_shape
,
out_vec
);
}
else
if
(
auto
logical_or_node
=
as_type_ptr
<
op
::
v1
::
LogicalOr
>
(
binary
))
{
vector
<
char
>
out_vec
(
shape_size
(
out_shape
));
runtime
::
reference
::
logical_or
<
char
>
(
a
->
get_data_ptr
<
char
>
(),
b
->
get_data_ptr
<
char
>
(),
out_vec
.
data
(),
a
->
get_shape
(),
b
->
get_shape
(),
logical_or_node
->
get_autob
());
return
make_shared
<
op
::
Constant
>
(
binary
->
get_element_type
(),
out_shape
,
out_vec
);
}
else
if
(
auto
xor_node
=
as_type_ptr
<
op
::
v0
::
Xor
>
(
binary
))
{
vector
<
char
>
out_vec
(
shape_size
(
out_shape
));
...
...
@@ -118,6 +129,17 @@ static shared_ptr<op::Constant> fold_constant_binary_logical(shared_ptr<op::Cons
xor_node
->
get_autob
());
return
make_shared
<
op
::
Constant
>
(
binary
->
get_element_type
(),
out_shape
,
out_vec
);
}
else
if
(
auto
logical_xor_node
=
as_type_ptr
<
op
::
v1
::
LogicalXor
>
(
binary
))
{
vector
<
char
>
out_vec
(
shape_size
(
out_shape
));
runtime
::
reference
::
logical_xor
<
char
>
(
a
->
get_data_ptr
<
char
>
(),
b
->
get_data_ptr
<
char
>
(),
out_vec
.
data
(),
a
->
get_shape
(),
b
->
get_shape
(),
logical_xor_node
->
get_autob
());
return
make_shared
<
op
::
Constant
>
(
binary
->
get_element_type
(),
out_shape
,
out_vec
);
}
else
{
NGRAPH_CHECK
(
...
...
@@ -151,7 +173,18 @@ shared_ptr<op::Constant> fold_constant_binary_comparison(shared_ptr<op::Constant
}
else
{
if
(
auto
equal_node
=
as_type_ptr
<
op
::
Equal
>
(
binary
))
if
(
auto
equal_v0_node
=
as_type_ptr
<
op
::
v0
::
Equal
>
(
binary
))
{
vector
<
char
>
out_vec
(
shape_size
(
out_shape
));
runtime
::
reference
::
equal
<
Tin
>
(
a
->
get_data_ptr
<
Tin
>
(),
b
->
get_data_ptr
<
Tin
>
(),
out_vec
.
data
(),
a
->
get_shape
(),
b
->
get_shape
(),
equal_v0_node
->
get_autob
());
return
make_shared
<
op
::
Constant
>
(
binary
->
get_element_type
(),
out_shape
,
out_vec
);
}
else
if
(
auto
equal_v1_node
=
as_type_ptr
<
op
::
v1
::
Equal
>
(
binary
))
{
vector
<
char
>
out_vec
(
shape_size
(
out_shape
));
runtime
::
reference
::
equal
<
Tin
>
(
a
->
get_data_ptr
<
Tin
>
(),
...
...
@@ -159,10 +192,10 @@ shared_ptr<op::Constant> fold_constant_binary_comparison(shared_ptr<op::Constant
out_vec
.
data
(),
a
->
get_shape
(),
b
->
get_shape
(),
equal_node
->
get_autob
());
equal_
v1_
node
->
get_autob
());
return
make_shared
<
op
::
Constant
>
(
binary
->
get_element_type
(),
out_shape
,
out_vec
);
}
else
if
(
auto
greater_
node
=
as_type_ptr
<
op
::
Greater
>
(
binary
))
else
if
(
auto
greater_
v0_node
=
as_type_ptr
<
op
::
v0
::
Greater
>
(
binary
))
{
vector
<
char
>
out_vec
(
shape_size
(
out_shape
));
runtime
::
reference
::
greater
<
Tin
>
(
a
->
get_data_ptr
<
Tin
>
(),
...
...
@@ -170,10 +203,32 @@ shared_ptr<op::Constant> fold_constant_binary_comparison(shared_ptr<op::Constant
out_vec
.
data
(),
a
->
get_shape
(),
b
->
get_shape
(),
greater_node
->
get_autob
());
greater_
v0_
node
->
get_autob
());
return
make_shared
<
op
::
Constant
>
(
binary
->
get_element_type
(),
out_shape
,
out_vec
);
}
else
if
(
auto
greater_eq_node
=
as_type_ptr
<
op
::
GreaterEq
>
(
binary
))
else
if
(
auto
greater_v1_node
=
as_type_ptr
<
op
::
v1
::
Greater
>
(
binary
))
{
vector
<
char
>
out_vec
(
shape_size
(
out_shape
));
runtime
::
reference
::
greater
<
Tin
>
(
a
->
get_data_ptr
<
Tin
>
(),
b
->
get_data_ptr
<
Tin
>
(),
out_vec
.
data
(),
a
->
get_shape
(),
b
->
get_shape
(),
greater_v1_node
->
get_autob
());
return
make_shared
<
op
::
Constant
>
(
binary
->
get_element_type
(),
out_shape
,
out_vec
);
}
else
if
(
auto
greater_eq_v0_node
=
as_type_ptr
<
op
::
v0
::
GreaterEq
>
(
binary
))
{
vector
<
char
>
out_vec
(
shape_size
(
out_shape
));
runtime
::
reference
::
greater_eq
<
Tin
>
(
a
->
get_data_ptr
<
Tin
>
(),
b
->
get_data_ptr
<
Tin
>
(),
out_vec
.
data
(),
a
->
get_shape
(),
b
->
get_shape
(),
greater_eq_v0_node
->
get_autob
());
return
make_shared
<
op
::
Constant
>
(
binary
->
get_element_type
(),
out_shape
,
out_vec
);
}
else
if
(
auto
greater_eq_v1_node
=
as_type_ptr
<
op
::
v1
::
GreaterEq
>
(
binary
))
{
vector
<
char
>
out_vec
(
shape_size
(
out_shape
));
runtime
::
reference
::
greater_eq
<
Tin
>
(
a
->
get_data_ptr
<
Tin
>
(),
...
...
@@ -181,10 +236,21 @@ shared_ptr<op::Constant> fold_constant_binary_comparison(shared_ptr<op::Constant
out_vec
.
data
(),
a
->
get_shape
(),
b
->
get_shape
(),
greater_eq_node
->
get_autob
());
greater_eq_v1_node
->
get_autob
());
return
make_shared
<
op
::
Constant
>
(
binary
->
get_element_type
(),
out_shape
,
out_vec
);
}
else
if
(
auto
less_v0_node
=
as_type_ptr
<
op
::
v0
::
Less
>
(
binary
))
{
vector
<
char
>
out_vec
(
shape_size
(
out_shape
));
runtime
::
reference
::
less
<
Tin
>
(
a
->
get_data_ptr
<
Tin
>
(),
b
->
get_data_ptr
<
Tin
>
(),
out_vec
.
data
(),
a
->
get_shape
(),
b
->
get_shape
(),
less_v0_node
->
get_autob
());
return
make_shared
<
op
::
Constant
>
(
binary
->
get_element_type
(),
out_shape
,
out_vec
);
}
else
if
(
auto
less_
node
=
as_type_ptr
<
op
::
Less
>
(
binary
))
else
if
(
auto
less_
v1_node
=
as_type_ptr
<
op
::
v1
::
Less
>
(
binary
))
{
vector
<
char
>
out_vec
(
shape_size
(
out_shape
));
runtime
::
reference
::
less
<
Tin
>
(
a
->
get_data_ptr
<
Tin
>
(),
...
...
@@ -192,10 +258,21 @@ shared_ptr<op::Constant> fold_constant_binary_comparison(shared_ptr<op::Constant
out_vec
.
data
(),
a
->
get_shape
(),
b
->
get_shape
(),
less_node
->
get_autob
());
less_v1_node
->
get_autob
());
return
make_shared
<
op
::
Constant
>
(
binary
->
get_element_type
(),
out_shape
,
out_vec
);
}
else
if
(
auto
less_eq_v0_node
=
as_type_ptr
<
op
::
v0
::
LessEq
>
(
binary
))
{
vector
<
char
>
out_vec
(
shape_size
(
out_shape
));
runtime
::
reference
::
less_eq
<
Tin
>
(
a
->
get_data_ptr
<
Tin
>
(),
b
->
get_data_ptr
<
Tin
>
(),
out_vec
.
data
(),
a
->
get_shape
(),
b
->
get_shape
(),
less_eq_v0_node
->
get_autob
());
return
make_shared
<
op
::
Constant
>
(
binary
->
get_element_type
(),
out_shape
,
out_vec
);
}
else
if
(
auto
less_eq_
node
=
as_type_ptr
<
op
::
LessEq
>
(
binary
))
else
if
(
auto
less_eq_
v1_node
=
as_type_ptr
<
op
::
v1
::
LessEqual
>
(
binary
))
{
vector
<
char
>
out_vec
(
shape_size
(
out_shape
));
runtime
::
reference
::
less_eq
<
Tin
>
(
a
->
get_data_ptr
<
Tin
>
(),
...
...
@@ -203,10 +280,21 @@ shared_ptr<op::Constant> fold_constant_binary_comparison(shared_ptr<op::Constant
out_vec
.
data
(),
a
->
get_shape
(),
b
->
get_shape
(),
less_eq_node
->
get_autob
());
less_eq_v1_node
->
get_autob
());
return
make_shared
<
op
::
Constant
>
(
binary
->
get_element_type
(),
out_shape
,
out_vec
);
}
else
if
(
auto
not_equal_v0_node
=
as_type_ptr
<
op
::
v0
::
NotEqual
>
(
binary
))
{
vector
<
char
>
out_vec
(
shape_size
(
out_shape
));
runtime
::
reference
::
not_equal
<
Tin
>
(
a
->
get_data_ptr
<
Tin
>
(),
b
->
get_data_ptr
<
Tin
>
(),
out_vec
.
data
(),
a
->
get_shape
(),
b
->
get_shape
(),
not_equal_v0_node
->
get_autob
());
return
make_shared
<
op
::
Constant
>
(
binary
->
get_element_type
(),
out_shape
,
out_vec
);
}
else
if
(
auto
not_equal_
node
=
as_type_ptr
<
op
::
NotEqual
>
(
binary
))
else
if
(
auto
not_equal_
v1_node
=
as_type_ptr
<
op
::
v1
::
NotEqual
>
(
binary
))
{
vector
<
char
>
out_vec
(
shape_size
(
out_shape
));
runtime
::
reference
::
not_equal
<
Tin
>
(
a
->
get_data_ptr
<
Tin
>
(),
...
...
@@ -214,7 +302,7 @@ shared_ptr<op::Constant> fold_constant_binary_comparison(shared_ptr<op::Constant
out_vec
.
data
(),
a
->
get_shape
(),
b
->
get_shape
(),
not_equal_node
->
get_autob
());
not_equal_
v1_
node
->
get_autob
());
return
make_shared
<
op
::
Constant
>
(
binary
->
get_element_type
(),
out_shape
,
out_vec
);
}
else
...
...
@@ -249,7 +337,7 @@ shared_ptr<op::Constant> fold_constant_binary_arithmetic(shared_ptr<op::Constant
}
else
{
if
(
auto
add_
node
=
as_type_ptr
<
op
::
Add
>
(
binary
))
if
(
auto
add_
v0_node
=
as_type_ptr
<
op
::
v0
::
Add
>
(
binary
))
{
NGRAPH_CHECK
(
element
::
from
<
Tin
>
()
==
element
::
from
<
Tout
>
(),
"Input/output types do not match"
);
...
...
@@ -259,26 +347,55 @@ shared_ptr<op::Constant> fold_constant_binary_arithmetic(shared_ptr<op::Constant
out_vec
.
data
(),
a
->
get_shape
(),
b
->
get_shape
(),
add_node
->
get_autob
());
add_
v0_
node
->
get_autob
());
return
make_shared
<
op
::
Constant
>
(
binary
->
get_element_type
(),
out_shape
,
out_vec
);
}
else
if
(
auto
divide_node
=
as_type_ptr
<
op
::
Divide
>
(
binary
))
else
if
(
auto
add_v1_node
=
as_type_ptr
<
op
::
v1
::
Add
>
(
binary
))
{
NGRAPH_CHECK
(
element
::
from
<
Tin
>
()
==
element
::
from
<
Tout
>
(),
"Input/output types do not match"
);
vector
<
Tout
>
out_vec
(
shape_size
(
out_shape
));
shared_ptr
<
op
::
Divide
>
divop
=
as_type_ptr
<
op
::
Divide
>
(
binary
);
runtime
::
reference
::
add
<
Tin
>
(
a
->
get_data_ptr
<
Tin
>
(),
b
->
get_data_ptr
<
Tin
>
(),
out_vec
.
data
(),
a
->
get_shape
(),
b
->
get_shape
(),
add_v1_node
->
get_autob
());
return
make_shared
<
op
::
Constant
>
(
binary
->
get_element_type
(),
out_shape
,
out_vec
);
}
else
if
(
auto
divide_v0_node
=
as_type_ptr
<
op
::
v0
::
Divide
>
(
binary
))
{
NGRAPH_CHECK
(
element
::
from
<
Tin
>
()
==
element
::
from
<
Tout
>
(),
"Input/output types do not match"
);
vector
<
Tout
>
out_vec
(
shape_size
(
out_shape
));
shared_ptr
<
op
::
v0
::
Divide
>
divop
=
as_type_ptr
<
op
::
v0
::
Divide
>
(
binary
);
bool
pythondiv
=
divop
->
is_pythondiv
();
runtime
::
reference
::
divide
<
Tin
>
(
a
->
get_data_ptr
<
Tin
>
(),
b
->
get_data_ptr
<
Tin
>
(),
out_vec
.
data
(),
a
->
get_shape
(),
b
->
get_shape
(),
divide_node
->
get_autob
(),
divide_
v0_
node
->
get_autob
(),
pythondiv
);
return
make_shared
<
op
::
Constant
>
(
binary
->
get_element_type
(),
out_shape
,
out_vec
);
}
else
if
(
auto
maximum_node
=
as_type_ptr
<
op
::
Maximum
>
(
binary
))
else
if
(
auto
divide_v1_node
=
as_type_ptr
<
op
::
v1
::
Divide
>
(
binary
))
{
NGRAPH_CHECK
(
element
::
from
<
Tin
>
()
==
element
::
from
<
Tout
>
(),
"Input/output types do not match"
);
vector
<
Tout
>
out_vec
(
shape_size
(
out_shape
));
shared_ptr
<
op
::
v1
::
Divide
>
divop
=
as_type_ptr
<
op
::
v1
::
Divide
>
(
binary
);
bool
pythondiv
=
divop
->
is_pythondiv
();
runtime
::
reference
::
divide
<
Tin
>
(
a
->
get_data_ptr
<
Tin
>
(),
b
->
get_data_ptr
<
Tin
>
(),
out_vec
.
data
(),
a
->
get_shape
(),
b
->
get_shape
(),
divide_v1_node
->
get_autob
(),
pythondiv
);
return
make_shared
<
op
::
Constant
>
(
binary
->
get_element_type
(),
out_shape
,
out_vec
);
}
else
if
(
auto
maximum_v0_node
=
as_type_ptr
<
op
::
v0
::
Maximum
>
(
binary
))
{
NGRAPH_CHECK
(
element
::
from
<
Tin
>
()
==
element
::
from
<
Tout
>
(),
"Input/output types do not match"
);
...
...
@@ -288,10 +405,36 @@ shared_ptr<op::Constant> fold_constant_binary_arithmetic(shared_ptr<op::Constant
out_vec
.
data
(),
a
->
get_shape
(),
b
->
get_shape
(),
maximum_node
->
get_autob
());
maximum_
v0_
node
->
get_autob
());
return
make_shared
<
op
::
Constant
>
(
binary
->
get_element_type
(),
out_shape
,
out_vec
);
}
else
if
(
auto
minimum_node
=
as_type_ptr
<
op
::
Minimum
>
(
binary
))
else
if
(
auto
maximum_v1_node
=
as_type_ptr
<
op
::
v1
::
Maximum
>
(
binary
))
{
NGRAPH_CHECK
(
element
::
from
<
Tin
>
()
==
element
::
from
<
Tout
>
(),
"Input/output types do not match"
);
vector
<
Tout
>
out_vec
(
shape_size
(
out_shape
));
runtime
::
reference
::
maximum
<
Tin
>
(
a
->
get_data_ptr
<
Tin
>
(),
b
->
get_data_ptr
<
Tin
>
(),
out_vec
.
data
(),
a
->
get_shape
(),
b
->
get_shape
(),
maximum_v1_node
->
get_autob
());
return
make_shared
<
op
::
Constant
>
(
binary
->
get_element_type
(),
out_shape
,
out_vec
);
}
else
if
(
auto
minimum_v0_node
=
as_type_ptr
<
op
::
v0
::
Minimum
>
(
binary
))
{
NGRAPH_CHECK
(
element
::
from
<
Tin
>
()
==
element
::
from
<
Tout
>
(),
"Input/output types do not match"
);
vector
<
Tout
>
out_vec
(
shape_size
(
out_shape
));
runtime
::
reference
::
minimum
<
Tin
>
(
a
->
get_data_ptr
<
Tin
>
(),
b
->
get_data_ptr
<
Tin
>
(),
out_vec
.
data
(),
a
->
get_shape
(),
b
->
get_shape
(),
minimum_v0_node
->
get_autob
());
return
make_shared
<
op
::
Constant
>
(
binary
->
get_element_type
(),
out_shape
,
out_vec
);
}
else
if
(
auto
minimum_v1_node
=
as_type_ptr
<
op
::
v1
::
Minimum
>
(
binary
))
{
NGRAPH_CHECK
(
element
::
from
<
Tin
>
()
==
element
::
from
<
Tout
>
(),
"Input/output types do not match"
);
...
...
@@ -301,10 +444,23 @@ shared_ptr<op::Constant> fold_constant_binary_arithmetic(shared_ptr<op::Constant
out_vec
.
data
(),
a
->
get_shape
(),
b
->
get_shape
(),
minimum_node
->
get_autob
());
minimum_v1_node
->
get_autob
());
return
make_shared
<
op
::
Constant
>
(
binary
->
get_element_type
(),
out_shape
,
out_vec
);
}
else
if
(
auto
multiply_v0_node
=
as_type_ptr
<
op
::
v0
::
Multiply
>
(
binary
))
{
NGRAPH_CHECK
(
element
::
from
<
Tin
>
()
==
element
::
from
<
Tout
>
(),
"Input/output types do not match"
);
vector
<
Tout
>
out_vec
(
shape_size
(
out_shape
));
runtime
::
reference
::
multiply
<
Tin
>
(
a
->
get_data_ptr
<
Tin
>
(),
b
->
get_data_ptr
<
Tin
>
(),
out_vec
.
data
(),
a
->
get_shape
(),
b
->
get_shape
(),
multiply_v0_node
->
get_autob
());
return
make_shared
<
op
::
Constant
>
(
binary
->
get_element_type
(),
out_shape
,
out_vec
);
}
else
if
(
auto
multiply_
node
=
as_type_ptr
<
op
::
Multiply
>
(
binary
))
else
if
(
auto
multiply_
v1_node
=
as_type_ptr
<
op
::
v1
::
Multiply
>
(
binary
))
{
NGRAPH_CHECK
(
element
::
from
<
Tin
>
()
==
element
::
from
<
Tout
>
(),
"Input/output types do not match"
);
...
...
@@ -314,21 +470,35 @@ shared_ptr<op::Constant> fold_constant_binary_arithmetic(shared_ptr<op::Constant
out_vec
.
data
(),
a
->
get_shape
(),
b
->
get_shape
(),
multiply_node
->
get_autob
());
multiply_v1_node
->
get_autob
());
return
make_shared
<
op
::
Constant
>
(
binary
->
get_element_type
(),
out_shape
,
out_vec
);
}
else
if
(
auto
power_v0_node
=
as_type_ptr
<
op
::
v0
::
Power
>
(
binary
))
{
NGRAPH_CHECK
(
element
::
from
<
Tin
>
()
==
element
::
from
<
Tout
>
(),
"Input/output types do not match"
);
vector
<
Tout
>
out_vec
(
shape_size
(
out_shape
));
shared_ptr
<
op
::
v0
::
Power
>
powop
=
as_type_ptr
<
op
::
v0
::
Power
>
(
binary
);
runtime
::
reference
::
power
<
Tin
>
(
a
->
get_data_ptr
<
Tin
>
(),
b
->
get_data_ptr
<
Tin
>
(),
out_vec
.
data
(),
a
->
get_shape
(),
b
->
get_shape
(),
power_v0_node
->
get_autob
());
return
make_shared
<
op
::
Constant
>
(
binary
->
get_element_type
(),
out_shape
,
out_vec
);
}
else
if
(
auto
power_
node
=
as_type_ptr
<
op
::
Power
>
(
binary
))
else
if
(
auto
power_
v1_node
=
as_type_ptr
<
op
::
v1
::
Power
>
(
binary
))
{
NGRAPH_CHECK
(
element
::
from
<
Tin
>
()
==
element
::
from
<
Tout
>
(),
"Input/output types do not match"
);
vector
<
Tout
>
out_vec
(
shape_size
(
out_shape
));
shared_ptr
<
op
::
Power
>
powop
=
as_type_ptr
<
op
::
Power
>
(
binary
);
shared_ptr
<
op
::
v1
::
Power
>
powop
=
as_type_ptr
<
op
::
v1
::
Power
>
(
binary
);
runtime
::
reference
::
power
<
Tin
>
(
a
->
get_data_ptr
<
Tin
>
(),
b
->
get_data_ptr
<
Tin
>
(),
out_vec
.
data
(),
a
->
get_shape
(),
b
->
get_shape
(),
power_node
->
get_autob
());
power_
v1_
node
->
get_autob
());
return
make_shared
<
op
::
Constant
>
(
binary
->
get_element_type
(),
out_shape
,
out_vec
);
}
else
if
(
auto
subtract_node
=
as_type_ptr
<
op
::
Subtract
>
(
binary
))
...
...
@@ -375,12 +545,19 @@ shared_ptr<op::Constant> fold_constant_binary_helper(shared_ptr<op::Constant> a,
bool
is_supported_binary_op
(
std
::
shared_ptr
<
Node
>
n
)
{
return
(
is_type
<
op
::
Add
>
(
n
)
||
is_type
<
op
::
And
>
(
n
)
||
is_type
<
op
::
Divide
>
(
n
)
||
is_type
<
op
::
Equal
>
(
n
)
||
is_type
<
op
::
Greater
>
(
n
)
||
is_type
<
op
::
GreaterEq
>
(
n
)
||
is_type
<
op
::
Less
>
(
n
)
||
is_type
<
op
::
LessEq
>
(
n
)
||
is_type
<
op
::
Maximum
>
(
n
)
||
is_type
<
op
::
Minimum
>
(
n
)
||
is_type
<
op
::
Multiply
>
(
n
)
||
is_type
<
op
::
NotEqual
>
(
n
)
||
is_type
<
op
::
Or
>
(
n
)
||
is_type
<
op
::
Power
>
(
n
)
||
is_type
<
op
::
Subtract
>
(
n
)
||
is_type
<
op
::
Xor
>
(
n
));
return
(
is_type
<
op
::
v0
::
Add
>
(
n
)
||
is_type
<
op
::
v1
::
Add
>
(
n
)
||
is_type
<
op
::
v0
::
Multiply
>
(
n
)
||
is_type
<
op
::
v1
::
Multiply
>
(
n
)
||
is_type
<
op
::
v0
::
Divide
>
(
n
)
||
is_type
<
op
::
v1
::
Divide
>
(
n
)
||
is_type
<
op
::
v0
::
Power
>
(
n
)
||
is_type
<
op
::
v1
::
Power
>
(
n
)
||
is_type
<
op
::
v0
::
Equal
>
(
n
)
||
is_type
<
op
::
v1
::
Equal
>
(
n
)
||
is_type
<
op
::
v0
::
NotEqual
>
(
n
)
||
is_type
<
op
::
v1
::
NotEqual
>
(
n
)
||
is_type
<
op
::
v0
::
Greater
>
(
n
)
||
is_type
<
op
::
v1
::
Greater
>
(
n
)
||
is_type
<
op
::
v0
::
GreaterEq
>
(
n
)
||
is_type
<
op
::
v1
::
GreaterEq
>
(
n
)
||
is_type
<
op
::
v0
::
Less
>
(
n
)
||
is_type
<
op
::
v1
::
Less
>
(
n
)
||
is_type
<
op
::
v0
::
LessEq
>
(
n
)
||
is_type
<
op
::
v1
::
LessEqual
>
(
n
)
||
is_type
<
op
::
v0
::
Maximum
>
(
n
)
||
is_type
<
op
::
v1
::
Maximum
>
(
n
)
||
is_type
<
op
::
v0
::
Minimum
>
(
n
)
||
is_type
<
op
::
v1
::
Minimum
>
(
n
)
||
is_type
<
op
::
v0
::
And
>
(
n
)
||
is_type
<
op
::
v1
::
LogicalAnd
>
(
n
)
||
is_type
<
op
::
v0
::
Or
>
(
n
)
||
is_type
<
op
::
v1
::
LogicalOr
>
(
n
)
||
is_type
<
op
::
v0
::
Xor
>
(
n
)
||
is_type
<
op
::
v1
::
LogicalXor
>
(
n
)
||
is_type
<
op
::
Subtract
>
(
n
));
}
void
pass
::
ConstantFolding
::
construct_constant_binary
()
...
...
src/ngraph/runtime/cpu/op/gelu_backprop.cpp
View file @
1cfa3d66
...
...
@@ -22,7 +22,7 @@ using namespace ngraph;
constexpr
NodeTypeInfo
op
::
GeluBackprop
::
type_info
;
op
::
GeluBackprop
::
GeluBackprop
(
const
Output
<
ngraph
::
Node
>&
arg
,
const
Output
<
ngraph
::
Node
>&
delta
)
:
BinaryElementwiseArithmetic
(
arg
,
delta
)
:
BinaryElementwiseArithmetic
(
arg
,
delta
,
AutoBroadcastSpec
::
NONE
)
{
constructor_validate_and_infer_types
();
set_output_size
(
1
);
...
...
src/ngraph/serializer.cpp
View file @
1cfa3d66
...
...
@@ -2595,7 +2595,8 @@ shared_ptr<Node> JSONDeserializer::deserialize_node(json node_js)
case
OP_TYPEID
:
:
SpaceToDepth
:
{
auto
block_size
=
node_js
.
at
(
"block_size"
).
get
<
size_t
>
();
node
=
make_shared
<
op
::
SpaceToDepth
>
(
args
[
0
],
block_size
);
auto
mode
=
node_js
.
at
(
"mode"
).
get
<
op
::
SpaceToDepth
::
SpaceToDepthMode
>
();
node
=
make_shared
<
op
::
SpaceToDepth
>
(
args
[
0
],
mode
,
block_size
);
break
;
}
case
OP_TYPEID
:
:
Split
:
...
...
@@ -4069,6 +4070,7 @@ json JSONSerializer::serialize_node(const Node& n)
{
auto
tmp
=
static_cast
<
const
op
::
SpaceToDepth
*>
(
&
n
);
node
[
"type"
]
=
write_element_type
(
tmp
->
get_element_type
());
node
[
"mode"
]
=
tmp
->
get_mode
();
node
[
"block_size"
]
=
tmp
->
get_block_size
();
break
;
}
...
...
test/backend/fused_op.in.cpp
View file @
1cfa3d66
...
...
@@ -573,10 +573,11 @@ NGRAPH_TEST(${BACKEND_NAME}, group_conv_groups_included_in_shape)
EXPECT_EQ
(
expected
,
read_vector
<
float
>
(
result0
));
}
NGRAPH_TEST
(
$
{
BACKEND_NAME
},
space_to_depth
)
NGRAPH_TEST
(
$
{
BACKEND_NAME
},
space_to_depth
_block_first
)
{
auto
A
=
make_shared
<
op
::
Parameter
>
(
element
::
f32
,
Shape
{
1
,
2
,
4
,
4
});
auto
space_to_depth
=
make_shared
<
op
::
SpaceToDepth
>
(
A
,
2
);
const
auto
mode
=
ngraph
::
op
::
SpaceToDepth
::
SpaceToDepthMode
::
BLOCKS_FIRST
;
auto
space_to_depth
=
make_shared
<
op
::
SpaceToDepth
>
(
A
,
mode
,
2
);
auto
function
=
make_shared
<
Function
>
(
NodeVector
{
space_to_depth
},
ParameterVector
{
A
});
auto
test_case
=
test
::
NgraphTestCase
(
function
,
"${BACKEND_NAME}"
);
...
...
@@ -593,6 +594,24 @@ NGRAPH_TEST(${BACKEND_NAME}, space_to_depth)
test_case
.
run
();
}
NGRAPH_TEST
(
$
{
BACKEND_NAME
},
space_to_depth_depth_first
)
{
auto
A
=
make_shared
<
op
::
Parameter
>
(
element
::
f32
,
Shape
{
1
,
2
,
4
,
4
});
const
auto
mode
=
ngraph
::
op
::
SpaceToDepth
::
SpaceToDepthMode
::
DEPTH_FIRST
;
auto
space_to_depth
=
make_shared
<
op
::
SpaceToDepth
>
(
A
,
mode
,
2
);
auto
function
=
make_shared
<
Function
>
(
NodeVector
{
space_to_depth
},
ParameterVector
{
A
});
auto
test_case
=
test
::
NgraphTestCase
(
function
,
"${BACKEND_NAME}"
);
test_case
.
add_input
<
float
>
({
0.
f
,
16.
f
,
2.
f
,
18.
f
,
1.
f
,
17.
f
,
3.
f
,
19.
f
,
8.
f
,
24.
f
,
10.
f
,
26.
f
,
9.
f
,
25.
f
,
11.
f
,
27.
f
,
4.
f
,
20.
f
,
6.
f
,
22.
f
,
5.
f
,
21.
f
,
7.
f
,
23.
f
,
12.
f
,
28.
f
,
14.
f
,
30.
f
,
13.
f
,
29.
f
,
15.
f
,
31.
f
});
test_case
.
add_expected_output
<
float
>
(
Shape
{
1
,
8
,
2
,
2
},
{
0.
f
,
2.
f
,
8.
f
,
10.
f
,
16.
f
,
18.
f
,
24.
f
,
26.
f
,
1.
f
,
3.
f
,
9.
f
,
11.
f
,
17.
f
,
19.
f
,
25.
f
,
27.
f
,
4.
f
,
6.
f
,
12.
f
,
14.
f
,
20.
f
,
22.
f
,
28.
f
,
30.
f
,
5.
f
,
7.
f
,
13.
f
,
15.
f
,
21.
f
,
23.
f
,
29.
f
,
31.
f
});
test_case
.
run
();
}
NGRAPH_TEST
(
$
{
BACKEND_NAME
},
depth_to_space_block_first
)
{
auto
A
=
make_shared
<
op
::
Parameter
>
(
element
::
f32
,
Shape
{
1
,
8
,
2
,
2
});
...
...
test/serialize.cpp
View file @
1cfa3d66
...
...
@@ -760,3 +760,25 @@ TEST(serialize, depth_to_space)
EXPECT_EQ
(
depth_to_space_out
->
get_block_size
(),
block_size
);
EXPECT_EQ
(
depth_to_space_out
->
get_mode
(),
mode
);
}
TEST
(
serialize
,
space_to_depth
)
{
auto
arg
=
make_shared
<
op
::
Parameter
>
(
element
::
f32
,
Shape
{
4
,
6
,
8
});
auto
mode
=
op
::
SpaceToDepth
::
SpaceToDepthMode
::
BLOCKS_FIRST
;
size_t
block_size
=
2
;
auto
space_to_depth_in
=
make_shared
<
op
::
SpaceToDepth
>
(
arg
,
mode
,
block_size
);
auto
result
=
make_shared
<
op
::
Result
>
(
space_to_depth_in
);
auto
f
=
make_shared
<
Function
>
(
ResultVector
{
result
},
ParameterVector
{
arg
});
string
s
=
serialize
(
f
);
shared_ptr
<
Function
>
g
=
deserialize
(
s
);
auto
g_result
=
g
->
get_results
().
at
(
0
);
auto
g_space_to_depth
=
g_result
->
input
(
0
).
get_source_output
().
get_node_shared_ptr
();
auto
depth_to_space_out
=
as_type_ptr
<
op
::
SpaceToDepth
>
(
g_space_to_depth
);
EXPECT_EQ
(
depth_to_space_out
->
description
(),
"SpaceToDepth"
);
EXPECT_EQ
(
depth_to_space_out
->
get_version
(),
0
);
EXPECT_EQ
(
depth_to_space_out
->
get_block_size
(),
block_size
);
EXPECT_EQ
(
depth_to_space_out
->
get_mode
(),
mode
);
}
test/type_prop/space_to_depth.cpp
View file @
1cfa3d66
...
...
@@ -24,8 +24,28 @@ using namespace ngraph;
TEST
(
type_prop
,
space_to_depth
)
{
auto
A
=
make_shared
<
op
::
Parameter
>
(
element
::
f32
,
Shape
{
1
,
2
,
64
,
64
});
auto
space_to_depth
=
make_shared
<
op
::
SpaceToDepth
>
(
A
,
8
);
const
auto
mode
=
ngraph
::
op
::
SpaceToDepth
::
SpaceToDepthMode
::
BLOCKS_FIRST
;
auto
space_to_depth
=
make_shared
<
op
::
SpaceToDepth
>
(
A
,
mode
,
8
);
ASSERT_EQ
(
space_to_depth
->
get_element_type
(),
element
::
f32
);
ASSERT_EQ
(
space_to_depth
->
get_shape
(),
(
Shape
{
1
,
128
,
8
,
8
}));
}
TEST
(
type_prop
,
space_to_depth_input_rank_not_supported
)
{
auto
A
=
make_shared
<
op
::
Parameter
>
(
element
::
f32
,
Shape
{
1
,
8
,
8
,
8
,
4
});
try
{
auto
space_to_depth
=
make_shared
<
op
::
DepthToSpace
>
(
A
,
op
::
DepthToSpace
::
DepthToSpaceMode
::
DEPTH_FIRST
,
2
);
FAIL
()
<<
"Not supported input shape for SpaceToDepth exception not thrown"
;
}
catch
(
const
ngraph_error
&
error
)
{
EXPECT_HAS_SUBSTRING
(
error
.
what
(),
"The provided tensor shape: "
);
}
catch
(...)
{
FAIL
()
<<
"SpaceToDepth decomposition failed for unexpected reason"
;
}
}
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment