Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in / Register
Toggle navigation
N
ngraph
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
submodule
ngraph
Commits
82622ac8
Commit
82622ac8
authored
Sep 20, 2019
by
Mateusz Bencer
Committed by
Michał Karzyński
Sep 20, 2019
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
[SPEC] Implement ReduceSum:v1 and ReduceProduct:v1 (#3556)
parent
dfb1476a
Show whitespace changes
Inline
Side-by-side
Showing
19 changed files
with
732 additions
and
21 deletions
+732
-21
CMakeLists.txt
src/ngraph/CMakeLists.txt
+4
-0
ngraph.hpp
src/ngraph/ngraph.hpp
+2
-0
product.cpp
src/ngraph/op/product.cpp
+6
-6
product.hpp
src/ngraph/op/product.hpp
+5
-0
reduce_prod.cpp
src/ngraph/op/reduce_prod.cpp
+90
-0
reduce_prod.hpp
src/ngraph/op/reduce_prod.hpp
+64
-0
reduce_sum.cpp
src/ngraph/op/reduce_sum.cpp
+101
-0
reduce_sum.hpp
src/ngraph/op/reduce_sum.hpp
+115
-0
sum.cpp
src/ngraph/op/sum.cpp
+6
-6
sum.hpp
src/ngraph/op/sum.hpp
+6
-1
opset1_upgrade.cpp
src/ngraph/pass/opset1_upgrade.cpp
+22
-0
cpu_emitter.hpp
src/ngraph/runtime/cpu/cpu_emitter.hpp
+2
-2
serializer.cpp
src/ngraph/serializer.cpp
+44
-6
CMakeLists.txt
test/CMakeLists.txt
+5
-0
product_opset_pass.cpp
test/opset_pass/product_opset_pass.cpp
+48
-0
sum_opset_pass.cpp
test/opset_pass/sum_opset_pass.cpp
+48
-0
serialize.cpp
test/serialize.cpp
+42
-0
reduce_prod.cpp
test/type_prop/reduce_prod.cpp
+61
-0
reduce_sum.cpp
test/type_prop/reduce_sum.cpp
+61
-0
No files found.
src/ngraph/CMakeLists.txt
View file @
82622ac8
...
...
@@ -252,6 +252,10 @@ set (SRC
op/power.hpp
op/product.cpp
op/product.hpp
op/reduce_prod.cpp
op/reduce_prod.hpp
op/reduce_sum.cpp
op/reduce_sum.hpp
op/quantize.cpp
op/quantize.hpp
op/quantized_convolution.cpp
...
...
src/ngraph/ngraph.hpp
View file @
82622ac8
...
...
@@ -178,6 +178,8 @@ namespace ngraph
#include "ngraph/op/quantized_convolution.hpp"
#include "ngraph/op/quantized_dot.hpp"
#include "ngraph/op/recv.hpp"
#include "ngraph/op/reduce_prod.hpp"
#include "ngraph/op/reduce_sum.hpp"
#include "ngraph/op/relu.hpp"
#include "ngraph/op/replace_slice.hpp"
#include "ngraph/op/reshape.hpp"
...
...
src/ngraph/op/product.cpp
View file @
82622ac8
...
...
@@ -20,27 +20,27 @@
using
namespace
std
;
using
namespace
ngraph
;
constexpr
NodeTypeInfo
op
::
Product
::
type_info
;
constexpr
NodeTypeInfo
op
::
v0
::
Product
::
type_info
;
op
::
Product
::
Product
(
const
Output
<
Node
>&
arg
,
const
AxisSet
&
reduction_axes
)
op
::
v0
::
Product
::
Product
(
const
Output
<
Node
>&
arg
,
const
AxisSet
&
reduction_axes
)
:
ArithmeticReduction
(
arg
,
reduction_axes
)
{
constructor_validate_and_infer_types
();
}
op
::
Product
::
Product
(
const
Output
<
Node
>&
arg
,
const
Output
<
Node
>&
reduction_axes
)
op
::
v0
::
Product
::
Product
(
const
Output
<
Node
>&
arg
,
const
Output
<
Node
>&
reduction_axes
)
:
ArithmeticReduction
(
arg
,
reduction_axes
)
{
constructor_validate_and_infer_types
();
}
shared_ptr
<
Node
>
op
::
Product
::
copy_with_new_args
(
const
NodeVector
&
new_args
)
const
shared_ptr
<
Node
>
op
::
v0
::
Product
::
copy_with_new_args
(
const
NodeVector
&
new_args
)
const
{
check_new_args_count
(
this
,
new_args
);
return
make_shared
<
Product
>
(
new_args
.
at
(
0
),
get_reduction_axes
());
return
make_shared
<
op
::
v0
::
Product
>
(
new_args
.
at
(
0
),
get_reduction_axes
());
}
shared_ptr
<
Node
>
op
::
Product
::
get_default_value
()
const
shared_ptr
<
Node
>
op
::
v0
::
Product
::
get_default_value
()
const
{
return
ngraph
::
make_constant_from_string
(
"1"
,
get_element_type
(),
get_shape
());
}
src/ngraph/op/product.hpp
View file @
82622ac8
...
...
@@ -21,6 +21,8 @@
namespace
ngraph
{
namespace
op
{
namespace
v0
{
/// \brief Product reduction operation.
///
...
...
@@ -51,4 +53,7 @@ namespace ngraph
copy_with_new_args
(
const
NodeVector
&
new_args
)
const
override
;
};
}
// default opset version
using
v0
::
Product
;
}
}
src/ngraph/op/reduce_prod.cpp
0 → 100644
View file @
82622ac8
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "ngraph/op/reduce_prod.hpp"
#include "ngraph/graph_util.hpp"
using
namespace
std
;
using
namespace
ngraph
;
constexpr
NodeTypeInfo
op
::
v1
::
ReduceProd
::
type_info
;
op
::
v1
::
ReduceProd
::
ReduceProd
(
const
Output
<
Node
>&
arg
,
const
Output
<
Node
>&
reduction_axes
,
bool
keep_dims
)
:
ArithmeticReduction
(
arg
,
reduction_axes
)
,
m_keep_dims
{
keep_dims
}
{
constructor_validate_and_infer_types
();
}
shared_ptr
<
Node
>
op
::
v1
::
ReduceProd
::
get_default_value
()
const
{
return
ngraph
::
make_constant_from_string
(
"1"
,
get_element_type
(),
get_shape
());
}
shared_ptr
<
Node
>
op
::
v1
::
ReduceProd
::
copy_with_new_args
(
const
NodeVector
&
new_args
)
const
{
check_new_args_count
(
this
,
new_args
);
return
make_shared
<
ReduceProd
>
(
new_args
.
at
(
0
),
new_args
.
at
(
1
),
m_keep_dims
);
}
void
op
::
v1
::
ReduceProd
::
validate_and_infer_types
()
{
if
(
m_keep_dims
)
{
auto
reduction_axes
=
get_reduction_axes
();
auto
input_shape
=
get_input_partial_shape
(
0
);
auto
input_rank
=
input_shape
.
rank
();
PartialShape
result_shape
{
PartialShape
::
dynamic
()};
if
(
input_rank
.
is_static
()
&&
reduction_axes_constant
())
{
std
::
vector
<
Dimension
>
dims
;
for
(
auto
axis
:
reduction_axes
)
{
NODE_VALIDATION_CHECK
(
this
,
axis
<
size_t
(
input_rank
),
"Reduction axis ("
,
axis
,
") is out of bounds "
,
"(argument shape: "
,
input_shape
,
", reduction axes: "
,
reduction_axes
,
")"
);
}
for
(
size_t
i
=
0
;
i
<
size_t
(
input_rank
);
i
++
)
{
if
(
reduction_axes
.
count
(
i
)
==
0
)
{
dims
.
push_back
(
input_shape
[
i
]);
}
else
{
dims
.
push_back
(
Dimension
{
1
});
}
}
result_shape
=
PartialShape
(
dims
);
}
set_input_is_relevant_to_shape
(
1
);
set_output_type
(
0
,
get_input_element_type
(
0
),
result_shape
);
}
else
{
ArithmeticReduction
::
validate_and_infer_types
();
}
}
src/ngraph/op/reduce_prod.hpp
0 → 100644
View file @
82622ac8
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include "ngraph/op/util/arithmetic_reduction.hpp"
namespace
ngraph
{
namespace
op
{
namespace
v1
{
/// \brief Product reduction operation.
///
/// Reduces the tensor, eliminating the specified reduction axes by taking the product.
class
ReduceProd
:
public
util
::
ArithmeticReduction
{
public
:
NGRAPH_API
static
constexpr
NodeTypeInfo
type_info
{
"Product"
,
1
};
const
NodeTypeInfo
&
get_type_info
()
const
override
{
return
type_info
;
}
/// \brief Constructs a product reduction operation.
ReduceProd
()
=
default
;
/// \brief Constructs a product reduction operation.
///
/// \param arg The tensor to be reduced.
/// \param reduction_axes The axis positions (0-based) to be eliminated.
/// \param keep_dims If set to true it holds axes that are used for reduction.
ReduceProd
(
const
Output
<
Node
>&
arg
,
const
Output
<
Node
>&
reduction_axes
,
bool
keep_dims
=
false
);
void
validate_and_infer_types
()
override
;
size_t
get_version
()
const
override
{
return
1
;
}
/// \return If set to 1 it holds axes that are used for reduction.
/// For each such axis, output dimension is equal to 1.
bool
get_keep_dims
()
const
{
return
m_keep_dims
;
}
/// \return The default value for Product.
virtual
std
::
shared_ptr
<
Node
>
get_default_value
()
const
override
;
virtual
std
::
shared_ptr
<
Node
>
copy_with_new_args
(
const
NodeVector
&
new_args
)
const
override
;
private
:
bool
m_keep_dims
;
};
}
}
}
src/ngraph/op/reduce_sum.cpp
0 → 100644
View file @
82622ac8
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "ngraph/op/reduce_sum.hpp"
#include "ngraph/graph_util.hpp"
#include "ngraph/op/broadcast.hpp"
using
namespace
std
;
using
namespace
ngraph
;
constexpr
NodeTypeInfo
op
::
v1
::
ReduceSum
::
type_info
;
op
::
v1
::
ReduceSum
::
ReduceSum
(
const
Output
<
Node
>&
arg
,
const
Output
<
Node
>&
reduction_axes
,
bool
keep_dims
)
:
ArithmeticReduction
(
arg
,
reduction_axes
)
,
m_keep_dims
{
keep_dims
}
{
constructor_validate_and_infer_types
();
}
shared_ptr
<
Node
>
op
::
v1
::
ReduceSum
::
get_default_value
()
const
{
return
ngraph
::
make_constant_from_string
(
"0"
,
get_element_type
(),
get_shape
());
}
shared_ptr
<
Node
>
op
::
v1
::
ReduceSum
::
copy_with_new_args
(
const
NodeVector
&
new_args
)
const
{
check_new_args_count
(
this
,
new_args
);
return
make_shared
<
ReduceSum
>
(
new_args
.
at
(
0
),
new_args
.
at
(
1
),
m_keep_dims
);
}
void
op
::
v1
::
ReduceSum
::
generate_adjoints
(
autodiff
::
Adjoints
&
adjoints
,
const
NodeVector
&
deltas
)
{
auto
delta
=
deltas
.
at
(
0
);
auto
x
=
input_value
(
0
);
auto
&
x_shape
=
x
.
get_shape
();
adjoints
.
add_delta
(
x
,
make_shared
<
op
::
Broadcast
>
(
delta
,
x_shape
,
get_reduction_axes
()));
}
void
op
::
v1
::
ReduceSum
::
validate_and_infer_types
()
{
if
(
m_keep_dims
)
{
auto
reduction_axes
=
get_reduction_axes
();
auto
input_shape
=
get_input_partial_shape
(
0
);
auto
input_rank
=
input_shape
.
rank
();
PartialShape
result_shape
{
PartialShape
::
dynamic
()};
if
(
input_rank
.
is_static
()
&&
reduction_axes_constant
())
{
std
::
vector
<
Dimension
>
dims
;
for
(
auto
axis
:
reduction_axes
)
{
NODE_VALIDATION_CHECK
(
this
,
axis
<
size_t
(
input_rank
),
"Reduction axis ("
,
axis
,
") is out of bounds "
,
"(argument shape: "
,
input_shape
,
", reduction axes: "
,
reduction_axes
,
")"
);
}
for
(
size_t
i
=
0
;
i
<
size_t
(
input_rank
);
i
++
)
{
if
(
reduction_axes
.
count
(
i
)
==
0
)
{
dims
.
push_back
(
input_shape
[
i
]);
}
else
{
dims
.
push_back
(
Dimension
{
1
});
}
}
result_shape
=
PartialShape
(
dims
);
}
set_input_is_relevant_to_shape
(
1
);
set_output_type
(
0
,
get_input_element_type
(
0
),
result_shape
);
}
else
{
ArithmeticReduction
::
validate_and_infer_types
();
}
}
src/ngraph/op/reduce_sum.hpp
0 → 100644
View file @
82622ac8
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include "ngraph/axis_set.hpp"
#include "ngraph/op/util/arithmetic_reduction.hpp"
namespace
ngraph
{
namespace
op
{
namespace
v1
{
// clang-format off
/// \brief Tensor sum operation.
///
/// Element-wise sums the input tensor, eliminating the specified reduction axes.
/// For example:
///
/// \f[
/// \mathit{sum}\left(\{0\},
/// \left[ \begin{array}{ccc}
/// 1 & 2 \\ 3 & 4 \\ 5 & 6 \end{array} \right]\right) =
/// \left[ (1 + 3 + 5), (2 + 4 + 6) \right] =
/// \left[ 9, 12 \right]~~~\text{(dimension 0 (rows) is eliminated)}
/// \f]
///
/// \f[
/// \mathit{sum}\left(\{1\},
/// \left[ \begin{array}{ccc}
/// 1 & 2 \\ 3 & 4 \\ 5 & 6 \end{array} \right]\right) =
/// \left[ (1 + 2), (3 + 4), (5 + 6) \right] =
/// \left[ 3, 7, 11 \right]~~~\text{(dimension 1 (columns) is eliminated)}
/// \f]
///
/// \f[
/// \mathit{sum}\left(\{0,1\},
/// \left[ \begin{array}{ccc}
/// 1 & 2 \\ 3 & 4 \\ 5 & 6 \end{array} \right]\right) =
/// (1 + 2) + (3 + 4) + (5 + 6) =
/// 21~~~\text{(both dimensions (rows and columns) are eliminated)}
/// \f]
///
/// ## Parameters
///
/// | | Description |
/// | -------------------- | ---------------------------------------- |
/// | `reduction_axes` | The axes to eliminate through summation. |
/// | `keep_dims` | If set to 1 it holds axes that are used for reduction. |
///
/// ## Inputs
///
/// | | Type | Description |
/// | ----- | --------------------------------- | ------------------------------------------------------ |
/// | `arg` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | An input tensor of any shape and numeric element type. |
///
/// ## Output
///
/// | Type | Description |
/// | ----------------------------------------- | ---------------------------------------------------------------------------------------------------------------- |
/// | \f$N[\textit{delete}(A,d_1,\dots,d_n)]\f$ | The tensor \f$T\f$, where \f$T\f$ is the input tensor with the `reduction_axes` \f$A\f$ eliminated by summation. |
// clang-format off
class
ReduceSum
:
public
util
::
ArithmeticReduction
{
public
:
NGRAPH_API
static
constexpr
NodeTypeInfo
type_info
{
"Sum"
,
1
};
const
NodeTypeInfo
&
get_type_info
()
const
override
{
return
type_info
;
}
/// \brief Constructs a summation operation.
ReduceSum
()
=
default
;
/// \brief Constructs a summation operation.
///
/// \param arg The tensor to be summed.
/// \param reduction_axes The axis positions (0-based) to be eliminated.
/// \param keep_dims If set to 1 it holds axes that are used for reduction.
ReduceSum
(
const
Output
<
Node
>&
arg
,
const
Output
<
Node
>&
reduction_axes
,
bool
keep_dims
=
false
);
void
validate_and_infer_types
()
override
;
size_t
get_version
()
const
override
{
return
1
;
}
/// \return If set to 1 it holds axes that are used for reduction.
/// For each such axis, output dimension is equal to 1.
bool
get_keep_dims
()
const
{
return
m_keep_dims
;
}
virtual
std
::
shared_ptr
<
Node
>
copy_with_new_args
(
const
NodeVector
&
new_args
)
const
override
;
/// \return The default value for Sum.
virtual
std
::
shared_ptr
<
Node
>
get_default_value
()
const
override
;
protected
:
virtual
void
generate_adjoints
(
autodiff
::
Adjoints
&
adjoints
,
const
NodeVector
&
deltas
)
override
;
private
:
bool
m_keep_dims
;
};
}
}
}
src/ngraph/op/sum.cpp
View file @
82622ac8
...
...
@@ -21,15 +21,15 @@
using
namespace
std
;
using
namespace
ngraph
;
constexpr
NodeTypeInfo
op
::
Sum
::
type_info
;
constexpr
NodeTypeInfo
op
::
v0
::
Sum
::
type_info
;
op
::
Sum
::
Sum
(
const
Output
<
Node
>&
arg
,
const
AxisSet
&
reduction_axes
)
op
::
v0
::
Sum
::
Sum
(
const
Output
<
Node
>&
arg
,
const
AxisSet
&
reduction_axes
)
:
ArithmeticReduction
(
arg
,
reduction_axes
)
{
constructor_validate_and_infer_types
();
}
op
::
Sum
::
Sum
(
const
Output
<
Node
>&
arg
,
const
Output
<
Node
>&
reduction_axes
)
op
::
v0
::
Sum
::
Sum
(
const
Output
<
Node
>&
arg
,
const
Output
<
Node
>&
reduction_axes
)
:
ArithmeticReduction
(
arg
,
reduction_axes
)
{
constructor_validate_and_infer_types
();
...
...
@@ -38,10 +38,10 @@ op::Sum::Sum(const Output<Node>& arg, const Output<Node>& reduction_axes)
shared_ptr
<
Node
>
op
::
Sum
::
copy_with_new_args
(
const
NodeVector
&
new_args
)
const
{
check_new_args_count
(
this
,
new_args
);
return
make_shared
<
Sum
>
(
new_args
.
at
(
0
),
new_args
.
at
(
1
));
return
make_shared
<
op
::
v0
::
Sum
>
(
new_args
.
at
(
0
),
new_args
.
at
(
1
));
}
void
op
::
Sum
::
generate_adjoints
(
autodiff
::
Adjoints
&
adjoints
,
const
NodeVector
&
deltas
)
void
op
::
v0
::
Sum
::
generate_adjoints
(
autodiff
::
Adjoints
&
adjoints
,
const
NodeVector
&
deltas
)
{
auto
delta
=
deltas
.
at
(
0
);
...
...
@@ -51,7 +51,7 @@ void op::Sum::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector&
adjoints
.
add_delta
(
x
,
make_shared
<
op
::
Broadcast
>
(
delta
,
x_shape
,
get_reduction_axes
()));
}
shared_ptr
<
Node
>
op
::
Sum
::
get_default_value
()
const
shared_ptr
<
Node
>
op
::
v0
::
Sum
::
get_default_value
()
const
{
return
ngraph
::
make_constant_from_string
(
"0"
,
get_element_type
(),
get_shape
());
}
src/ngraph/op/sum.hpp
View file @
82622ac8
...
...
@@ -23,6 +23,8 @@
namespace
ngraph
{
namespace
op
{
namespace
v0
{
// clang-format off
/// \brief Tensor sum operation.
...
...
@@ -76,7 +78,7 @@ namespace ngraph
{
public
:
NGRAPH_API
static
constexpr
NodeTypeInfo
type_info
{
"Sum"
,
0
};
static
constexpr
NodeTypeInfo
type_info
{
"Sum"
,
0
};
const
NodeTypeInfo
&
get_type_info
()
const
override
{
return
type_info
;
}
/// \brief Constructs a summation operation.
Sum
()
=
default
;
...
...
@@ -102,4 +104,7 @@ namespace ngraph
const
NodeVector
&
deltas
)
override
;
};
}
// default opset version
using
v0
::
Sum
;
}
}
src/ngraph/pass/opset1_upgrade.cpp
View file @
82622ac8
...
...
@@ -19,7 +19,11 @@
#include "ngraph/op/gather.hpp"
#include "ngraph/op/get_output_element.hpp"
#include "ngraph/op/pad.hpp"
#include "ngraph/op/product.hpp"
#include "ngraph/op/reduce_prod.hpp"
#include "ngraph/op/reduce_sum.hpp"
#include "ngraph/op/softmax.hpp"
#include "ngraph/op/sum.hpp"
using
namespace
std
;
using
namespace
ngraph
;
...
...
@@ -94,6 +98,24 @@ bool pass::Opset1Upgrade::run_on_node(shared_ptr<Node> node)
modified
=
true
;
break
;
}
case
OP_TYPEID
:
:
Product
:
{
bool
keep_dims
=
false
;
auto
replacement_node
=
make_shared
<
op
::
v1
::
ReduceProd
>
(
node
->
input
(
0
).
get_source_output
(),
node
->
input
(
1
).
get_source_output
(),
keep_dims
);
replace_node
(
node
,
replacement_node
);
modified
=
true
;
break
;
}
case
OP_TYPEID
:
:
Sum
:
{
bool
keep_dims
=
false
;
auto
replacement_node
=
make_shared
<
op
::
v1
::
ReduceSum
>
(
node
->
input
(
0
).
get_source_output
(),
node
->
input
(
1
).
get_source_output
(),
keep_dims
);
replace_node
(
node
,
replacement_node
);
modified
=
true
;
break
;
}
case
OP_TYPEID
:
:
Pad
:
{
auto
tmp
=
dynamic_cast
<
const
op
::
v0
::
Pad
*>
(
node
.
get
());
...
...
src/ngraph/runtime/cpu/cpu_emitter.hpp
View file @
82622ac8
...
...
@@ -23,6 +23,8 @@
#include "ngraph/node.hpp"
#include "ngraph/op/gather.hpp"
#include "ngraph/op/pad.hpp"
#include "ngraph/op/product.hpp"
#include "ngraph/op/sum.hpp"
#include "ngraph/runtime/cpu/cpu_external_function.hpp"
#include "ngraph/runtime/cpu/cpu_tensor_view_wrapper.hpp"
...
...
@@ -77,7 +79,6 @@ namespace ngraph
class
Reshape
;
class
Sign
;
class
Slice
;
class
Sum
;
class
Exp
;
class
EmbeddingLookup
;
class
Sin
;
...
...
@@ -130,7 +131,6 @@ namespace ngraph
class
AvgPoolBackprop
;
class
MaxPoolBackprop
;
class
MaxPoolWithIndicesBackprop
;
class
Product
;
class
Max
;
class
Erf
;
class
Min
;
...
...
src/ngraph/serializer.cpp
View file @
82622ac8
...
...
@@ -122,6 +122,8 @@
#include "ngraph/op/quantized_convolution.hpp"
#include "ngraph/op/quantized_dot.hpp"
#include "ngraph/op/recv.hpp"
#include "ngraph/op/reduce_prod.hpp"
#include "ngraph/op/reduce_sum.hpp"
#include "ngraph/op/relu.hpp"
#include "ngraph/op/replace_slice.hpp"
#include "ngraph/op/reshape.hpp"
...
...
@@ -1629,9 +1631,20 @@ shared_ptr<Node> JSONDeserializer::deserialize_node(json node_js)
break
;
}
case
OP_TYPEID
:
:
Product
:
{
if
(
op_version
==
0
)
{
auto
reduction_axes
=
deserialize_axis_set
(
node_js
.
at
(
"reduction_axes"
));
node
=
make_shared
<
op
::
Product
>
(
args
[
0
],
reduction_axes
);
if
(
reduction_axes
.
empty
())
node
=
make_shared
<
op
::
v0
::
Product
>
(
args
[
0
],
args
[
1
]);
else
node
=
make_shared
<
op
::
v0
::
Product
>
(
args
[
0
],
reduction_axes
);
}
if
(
op_version
==
1
)
{
auto
keep_dims
=
node_js
.
at
(
"keep_dims"
).
get
<
bool
>
();
node
=
make_shared
<
op
::
v1
::
ReduceProd
>
(
args
[
0
],
args
[
1
],
keep_dims
);
}
break
;
}
case
OP_TYPEID
:
:
Quantize
:
...
...
@@ -1918,9 +1931,20 @@ shared_ptr<Node> JSONDeserializer::deserialize_node(json node_js)
break
;
}
case
OP_TYPEID
:
:
Sum
:
{
if
(
op_version
==
0
)
{
auto
reduction_axes
=
deserialize_axis_set
(
node_js
.
at
(
"reduction_axes"
));
node
=
make_shared
<
op
::
Sum
>
(
args
[
0
],
reduction_axes
);
if
(
reduction_axes
.
empty
())
node
=
make_shared
<
op
::
v0
::
Sum
>
(
args
[
0
],
args
[
1
]);
else
node
=
make_shared
<
op
::
v0
::
Sum
>
(
args
[
0
],
reduction_axes
);
}
if
(
op_version
==
1
)
{
auto
keep_dims
=
node_js
.
at
(
"keep_dims"
).
get
<
bool
>
();
node
=
make_shared
<
op
::
v1
::
ReduceSum
>
(
args
[
0
],
args
[
1
],
keep_dims
);
}
break
;
}
case
OP_TYPEID
:
:
Tan
:
...
...
@@ -2751,8 +2775,15 @@ json JSONSerializer::serialize_node(const Node& n)
}
case
OP_TYPEID
:
:
Product
:
{
auto
tmp
=
dynamic_cast
<
const
op
::
Product
*>
(
&
n
);
node
[
"reduction_axes"
]
=
serialize_axis_set
(
tmp
->
get_reduction_axes
());
if
(
op_version
==
0
)
{
break
;
}
if
(
op_version
==
1
)
{
auto
tmp
=
dynamic_cast
<
const
op
::
v1
::
ReduceProd
*>
(
&
n
);
node
[
"keep_dims"
]
=
tmp
->
get_keep_dims
();
}
break
;
}
case
OP_TYPEID
:
:
Power
:
...
...
@@ -2950,8 +2981,15 @@ json JSONSerializer::serialize_node(const Node& n)
}
case
OP_TYPEID
:
:
Sum
:
{
auto
tmp
=
dynamic_cast
<
const
op
::
Sum
*>
(
&
n
);
node
[
"reduction_axes"
]
=
serialize_axis_set
(
tmp
->
get_reduction_axes
());
if
(
op_version
==
0
)
{
break
;
}
if
(
op_version
==
1
)
{
auto
tmp
=
dynamic_cast
<
const
op
::
v1
::
ReduceSum
*>
(
&
n
);
node
[
"keep_dims"
]
=
tmp
->
get_keep_dims
();
}
break
;
}
case
OP_TYPEID
:
:
Softmax
:
...
...
test/CMakeLists.txt
View file @
82622ac8
...
...
@@ -69,6 +69,9 @@ set(SRC
node_input_output.cpp
nop_elimination.cpp
op.cpp
opset_pass/sum_opset_pass.cpp
opset_pass/product_opset_pass.cpp
opset_pass/softmax_opset_pass.cpp
opset_pass/softmax_opset_pass.cpp
opset_pass/gather_opset_pass.cpp
opset_pass/pad_opset_pass.cpp
...
...
@@ -153,6 +156,8 @@ set(SRC
type_prop/squared_difference.cpp
type_prop/squeeze.cpp
type_prop/sum.cpp
type_prop/reduce_prod.cpp
type_prop/reduce_sum.cpp
type_prop/tile.cpp
type_prop/top_k.cpp
type_prop/transpose.cpp
...
...
test/opset_pass/product_opset_pass.cpp
0 → 100644
View file @
82622ac8
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "ngraph/pass/manager.hpp"
#include "ngraph/pass/opset1_upgrade.hpp"
#include "util/type_prop.hpp"
using
namespace
std
;
using
namespace
ngraph
;
TEST
(
serialize
,
opset1_product_upgrade
)
{
const
auto
data
=
make_shared
<
op
::
Parameter
>
(
element
::
f32
,
Shape
{
1
,
2
,
3
});
const
AxisSet
reduction_axes
{
1
,
2
};
const
auto
product_v0
=
make_shared
<
op
::
Product
>
(
data
,
reduction_axes
);
const
auto
result
=
make_shared
<
op
::
Result
>
(
product_v0
);
auto
f
=
make_shared
<
Function
>
(
ResultVector
{
result
},
ParameterVector
{
data
});
ngraph
::
pass
::
Manager
pass_manager
;
pass_manager
.
register_pass
<
pass
::
Opset1Upgrade
>
();
pass_manager
.
run_passes
(
f
);
const
auto
pass_replacement_node
=
f
->
get_result
()
->
input
(
0
).
get_source_output
().
get_node_shared_ptr
();
const
auto
reduce_prod_v1
=
static_pointer_cast
<
op
::
v1
::
ReduceProd
>
(
pass_replacement_node
);
EXPECT_EQ
(
reduce_prod_v1
->
description
(),
"Product"
);
EXPECT_EQ
(
reduce_prod_v1
->
get_version
(),
1
);
EXPECT_EQ
(
reduce_prod_v1
->
get_keep_dims
(),
false
);
}
test/opset_pass/sum_opset_pass.cpp
0 → 100644
View file @
82622ac8
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "ngraph/pass/manager.hpp"
#include "ngraph/pass/opset1_upgrade.hpp"
#include "util/type_prop.hpp"
using
namespace
std
;
using
namespace
ngraph
;
TEST
(
serialize
,
opset1_sum_upgrade
)
{
const
auto
data
=
make_shared
<
op
::
Parameter
>
(
element
::
f32
,
Shape
{
1
,
2
,
3
});
const
AxisSet
reduction_axes
{
1
,
2
};
const
auto
sum_v0
=
make_shared
<
op
::
Sum
>
(
data
,
reduction_axes
);
const
auto
result
=
make_shared
<
op
::
Result
>
(
sum_v0
);
auto
f
=
make_shared
<
Function
>
(
ResultVector
{
result
},
ParameterVector
{
data
});
ngraph
::
pass
::
Manager
pass_manager
;
pass_manager
.
register_pass
<
pass
::
Opset1Upgrade
>
();
pass_manager
.
run_passes
(
f
);
const
auto
pass_replacement_node
=
f
->
get_result
()
->
input
(
0
).
get_source_output
().
get_node_shared_ptr
();
const
auto
reduce_sum_v1
=
static_pointer_cast
<
op
::
v1
::
ReduceProd
>
(
pass_replacement_node
);
EXPECT_EQ
(
reduce_sum_v1
->
description
(),
"Sum"
);
EXPECT_EQ
(
reduce_sum_v1
->
get_version
(),
1
);
EXPECT_EQ
(
reduce_sum_v1
->
get_keep_dims
(),
false
);
}
test/serialize.cpp
View file @
82622ac8
...
...
@@ -376,6 +376,48 @@ TEST(serialize, opset1_gather)
EXPECT_EQ
(
g_gather
->
get_version
(),
1
);
}
TEST
(
serialize
,
opset1_product
)
{
auto
arg
=
make_shared
<
op
::
Parameter
>
(
element
::
f32
,
Shape
{
1
,
2
,
3
});
auto
keep_dims
=
true
;
auto
axes
=
make_shared
<
op
::
Constant
>
(
element
::
i64
,
Shape
{
2
},
vector
<
int64_t
>
{
1
,
2
});
auto
reduce_prod
=
make_shared
<
op
::
v1
::
ReduceProd
>
(
arg
,
axes
,
keep_dims
);
auto
result
=
make_shared
<
op
::
Result
>
(
reduce_prod
);
auto
f
=
make_shared
<
Function
>
(
ResultVector
{
result
},
ParameterVector
{
arg
});
string
s
=
serialize
(
f
);
shared_ptr
<
Function
>
g
=
deserialize
(
s
);
auto
g_result
=
g
->
get_results
().
at
(
0
);
auto
g_red_prod
=
g_result
->
input
(
0
).
get_source_output
().
get_node_shared_ptr
();
EXPECT_EQ
(
g_red_prod
->
description
(),
"Product"
);
EXPECT_EQ
(
g_red_prod
->
get_version
(),
1
);
EXPECT_EQ
(
dynamic_cast
<
const
op
::
v1
::
ReduceProd
*>
(
g_red_prod
.
get
())
->
get_keep_dims
(),
1
);
EXPECT_EQ
(
dynamic_cast
<
const
op
::
v1
::
ReduceProd
*>
(
g_red_prod
.
get
())
->
get_reduction_axes
(),
AxisSet
({
1
,
2
}));
}
TEST
(
serialize
,
opset1_sum
)
{
auto
arg
=
make_shared
<
op
::
Parameter
>
(
element
::
f32
,
Shape
{
1
,
2
,
3
});
auto
keep_dims
=
true
;
auto
axes
=
make_shared
<
op
::
Constant
>
(
element
::
i64
,
Shape
{
2
},
vector
<
int64_t
>
{
1
,
2
});
auto
reduce_sum
=
make_shared
<
op
::
v1
::
ReduceSum
>
(
arg
,
axes
,
keep_dims
);
auto
result
=
make_shared
<
op
::
Result
>
(
reduce_sum
);
auto
f
=
make_shared
<
Function
>
(
ResultVector
{
result
},
ParameterVector
{
arg
});
string
s
=
serialize
(
f
);
shared_ptr
<
Function
>
g
=
deserialize
(
s
);
auto
g_result
=
g
->
get_results
().
at
(
0
);
auto
g_red_sum
=
g_result
->
input
(
0
).
get_source_output
().
get_node_shared_ptr
();
EXPECT_EQ
(
g_red_sum
->
description
(),
"Sum"
);
EXPECT_EQ
(
g_red_sum
->
get_version
(),
1
);
EXPECT_EQ
(
dynamic_cast
<
const
op
::
v1
::
ReduceSum
*>
(
g_red_sum
.
get
())
->
get_keep_dims
(),
1
);
EXPECT_EQ
(
dynamic_cast
<
const
op
::
v1
::
ReduceSum
*>
(
g_red_sum
.
get
())
->
get_reduction_axes
(),
AxisSet
({
1
,
2
}));
}
TEST
(
serialize
,
opset1_pad
)
{
auto
arg
=
make_shared
<
op
::
Parameter
>
(
element
::
f32
,
Shape
{
4
,
5
,
6
});
...
...
test/type_prop/reduce_prod.cpp
0 → 100644
View file @
82622ac8
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "util/type_prop.hpp"
using
namespace
std
;
using
namespace
ngraph
;
TEST
(
type_prop
,
reduce_prod_v1_axis_out_of_range
)
{
auto
arg
=
make_shared
<
op
::
Parameter
>
(
element
::
f32
,
Shape
{
1
,
2
,
3
});
auto
axes
=
make_shared
<
op
::
Constant
>
(
element
::
i64
,
Shape
{
2
},
vector
<
int64_t
>
{
2
,
3
});
try
{
auto
reduce_prod
=
make_shared
<
op
::
v1
::
ReduceProd
>
(
arg
,
axes
);
// Should have thrown, so fail if it didn't
FAIL
()
<<
"Incorrect axes values exception not thrown"
;
}
catch
(
const
NodeValidationFailure
&
error
)
{
EXPECT_HAS_SUBSTRING
(
error
.
what
(),
std
::
string
(
"Reduction axis ("
));
}
catch
(...)
{
FAIL
()
<<
"Deduced type check failed for unexpected reason"
;
}
}
TEST
(
type_prop
,
reduce_prod_v1_shape_if_keep_dims
)
{
auto
arg
=
make_shared
<
op
::
Parameter
>
(
element
::
f32
,
Shape
{
3
,
4
,
5
});
auto
axes
=
make_shared
<
op
::
Constant
>
(
element
::
i64
,
Shape
{
2
},
vector
<
int64_t
>
{
1
,
2
});
auto
keep_dims
=
true
;
auto
reduce_prod
=
make_shared
<
op
::
v1
::
ReduceProd
>
(
arg
,
axes
,
keep_dims
);
ASSERT_TRUE
(
reduce_prod
->
get_output_partial_shape
(
0
).
compatible
(
PartialShape
{
3
,
1
,
1
}));
}
TEST
(
type_prop
,
reduce_prod_v1_shape_if_not_keep_dims
)
{
auto
arg
=
make_shared
<
op
::
Parameter
>
(
element
::
f32
,
Shape
{
3
,
4
,
5
});
auto
axes
=
make_shared
<
op
::
Constant
>
(
element
::
i64
,
Shape
{
2
},
vector
<
int64_t
>
{
1
,
2
});
auto
keep_dims
=
false
;
auto
reduce_prod
=
make_shared
<
op
::
v1
::
ReduceProd
>
(
arg
,
axes
,
keep_dims
);
ASSERT_TRUE
(
reduce_prod
->
get_output_partial_shape
(
0
).
compatible
(
PartialShape
{
3
}));
}
test/type_prop/reduce_sum.cpp
0 → 100644
View file @
82622ac8
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "util/type_prop.hpp"
using
namespace
std
;
using
namespace
ngraph
;
TEST
(
type_prop
,
reduce_sum_v1_axis_out_of_range
)
{
auto
arg
=
make_shared
<
op
::
Parameter
>
(
element
::
f32
,
Shape
{
1
,
2
,
3
});
auto
axes
=
make_shared
<
op
::
Constant
>
(
element
::
i64
,
Shape
{
2
},
vector
<
int64_t
>
{
2
,
3
});
try
{
auto
reduce_sum
=
make_shared
<
op
::
v1
::
ReduceSum
>
(
arg
,
axes
);
// Should have thrown, so fail if it didn't
FAIL
()
<<
"Incorrect axes values exception not thrown"
;
}
catch
(
const
NodeValidationFailure
&
error
)
{
EXPECT_HAS_SUBSTRING
(
error
.
what
(),
std
::
string
(
"Reduction axis ("
));
}
catch
(...)
{
FAIL
()
<<
"Deduced type check failed for unexpected reason"
;
}
}
TEST
(
type_prop
,
reduce_sum_v1_shape_if_keep_dims
)
{
auto
arg
=
make_shared
<
op
::
Parameter
>
(
element
::
f32
,
Shape
{
3
,
4
,
5
});
auto
axes
=
make_shared
<
op
::
Constant
>
(
element
::
i64
,
Shape
{
2
},
vector
<
int64_t
>
{
1
,
2
});
auto
keep_dims
=
true
;
auto
reduce_prod
=
make_shared
<
op
::
v1
::
ReduceSum
>
(
arg
,
axes
,
keep_dims
);
ASSERT_TRUE
(
reduce_prod
->
get_output_partial_shape
(
0
).
compatible
(
PartialShape
{
3
,
1
,
1
}));
}
TEST
(
type_prop
,
reduce_sum_v1_shape_if_not_keep_dims
)
{
auto
arg
=
make_shared
<
op
::
Parameter
>
(
element
::
f32
,
Shape
{
3
,
4
,
5
});
auto
axes
=
make_shared
<
op
::
Constant
>
(
element
::
i64
,
Shape
{
2
},
vector
<
int64_t
>
{
1
,
2
});
auto
keep_dims
=
false
;
auto
reduce_prod
=
make_shared
<
op
::
v1
::
ReduceSum
>
(
arg
,
axes
,
keep_dims
);
ASSERT_TRUE
(
reduce_prod
->
get_output_partial_shape
(
0
).
compatible
(
PartialShape
{
3
}));
}
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment