Commit 3ee833b7 authored by Katarzyna Mitrus's avatar Katarzyna Mitrus Committed by Scott Cyphers

[ONNX] Add CumSum to ONNX importer (#3918)

* Register CumSum operator in onnx importer

* Missing whitespace

* Update CMakeLists.txt

* ONNX importer - CumSum op init

* Simple CumSum onnx model

* ONNX CumSum model simple test

* Default axis

* Axis input test

* Inputs variable

* Style apply

* Test 3d exclusive reverse

* Apply style

* Add memory header and std namespace

* Add model_cum_sum tests to plsidml unit_test.manifest

* Add model_cum_sum tests to plaidml unit_test.manifest

* Changed default axis type

* Test model update

* Style apply

* Add test for dynamic axis input
parent 6433a8f0
......@@ -75,6 +75,8 @@ add_library(onnx_import STATIC
op/conv_integer.hpp
op/conv_transpose.cpp
op/conv_transpose.hpp
op/cum_sum.cpp
op/cum_sum.hpp
op/depth_to_space.cpp
op/depth_to_space.hpp
op/dequantize_linear.cpp
......
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include <memory>
#include "cum_sum.hpp"
#include "ngraph/op/constant.hpp"
#include "ngraph/op/cum_sum.hpp"
namespace ngraph
{
namespace onnx_import
{
namespace op
{
namespace set_1
{
NodeVector cum_sum(const Node& node)
{
auto inputs = node.get_ng_inputs();
auto data = inputs.at(0);
bool exclusive = node.get_attribute_value<std::int64_t>("exclusive", 0);
bool reverse = node.get_attribute_value<std::int64_t>("reverse", 0);
std::shared_ptr<ngraph::Node> axis;
if (inputs.size() > 1)
{
axis = inputs.at(1); // optional input, 0-D tensor
}
else
{
axis = ngraph::op::Constant::create(element::i64, Shape{}, {0}); // default
}
return NodeVector{
std::make_shared<ngraph::op::CumSum>(data, axis, exclusive, reverse)};
}
} // namespace set_1
} // namespace op
} // namespace onnx_import
} // namespace ngraph
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include "core/node.hpp"
#include "ngraph/node.hpp"
namespace ngraph
{
namespace onnx_import
{
namespace op
{
namespace set_1
{
NodeVector cum_sum(const Node& node);
} // namespace set_1
} // namespace op
} // namespace onnx_import
} // namespace ngraph
......@@ -45,6 +45,7 @@
#include "op/conv_transpose.hpp"
#include "op/cos.hpp"
#include "op/cosh.hpp"
#include "op/cum_sum.hpp"
#include "op/depth_to_space.hpp"
#include "op/dequantize_linear.hpp"
#include "op/div.hpp"
......@@ -258,6 +259,7 @@ namespace ngraph
REGISTER_OPERATOR("ConvTranspose", 1, conv_transpose);
REGISTER_OPERATOR("Cos", 1, cos);
REGISTER_OPERATOR("Cosh", 1, cosh);
REGISTER_OPERATOR("CumSum", 1, cum_sum);
REGISTER_OPERATOR("DepthToSpace", 1, depth_to_space);
REGISTER_OPERATOR("DequantizeLinear", 1, dequantize_linear);
REGISTER_OPERATOR("Div", 1, div);
......
......@@ -246,6 +246,10 @@ cum_sum_default
cum_sum_2dim
cum_sum_3d
cum_sum_2dim_allmodes
model_cum_sum_1d
model_cum_sum_2d_axis_input
model_cum_sum_2d_dynamic_axis_input
model_cum_sum_3d_exclusive_reverse
# onnx tests
model_quant_conv_linear_2d
......
ir_version: 5
producer_name: "nGraph ONNX Importer"
graph {
node {
input: "x"
output: "y"
op_type: "CumSum"
attribute {
name: "exclusive"
i: 0
type: INT
}
attribute {
name: "reverse"
i: 0
type: INT
}
}
name: "test_cum_sum"
input {
name: "x"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 3
}
}
}
}
}
output {
name: "y"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 3
}
}
}
}
}
}
opset_import {
version: 11
}
ir_version: 5
producer_name: "nGraph ONNX Importer"
graph {
node {
input: "x"
input: "axis"
output: "y"
op_type: "CumSum"
attribute {
name: "exclusive"
i: 0
type: INT
}
attribute {
name: "reverse"
i: 0
type: INT
}
}
name: "test_cum_sum"
input {
name: "x"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 2
}
dim {
dim_value: 3
}
}
}
}
}
initializer {
data_type: 7
name: "axis"
int64_data: 1
}
input {
name: "axis"
type {
tensor_type {
elem_type: 7
}
}
}
output {
name: "y"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 2
}
dim {
dim_value: 3
}
}
}
}
}
}
opset_import {
version: 11
}
ir_version: 5
producer_name: "nGraph ONNX Importer"
graph {
node {
input: "x"
input: "axis"
output: "y"
op_type: "CumSum"
attribute {
name: "exclusive"
i: 0
type: INT
}
attribute {
name: "reverse"
i: 0
type: INT
}
}
name: "test_cum_sum"
input {
name: "x"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 2
}
dim {
dim_value: 3
}
}
}
}
}
input {
name: "axis"
type {
tensor_type {
elem_type: 6
}
}
}
output {
name: "y"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 2
}
dim {
dim_value: 3
}
}
}
}
}
}
opset_import {
version: 11
}
ir_version: 5
producer_name: "nGraph ONNX Importer"
graph {
node {
input: "x"
output: "y"
op_type: "CumSum"
attribute {
name: "exclusive"
i: 1
type: INT
}
attribute {
name: "reverse"
i: 1
type: INT
}
}
name: "test_cum_sum"
input {
name: "x"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 2
}
dim {
dim_value: 3
}
dim {
dim_value: 4
}
}
}
}
}
output {
name: "y"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 2
}
dim {
dim_value: 3
}
dim {
dim_value: 4
}
}
}
}
}
}
opset_import {
version: 11
}
......@@ -436,6 +436,55 @@ NGRAPH_TEST(onnx_${BACKEND_NAME}, model_sum_one_input)
EXPECT_TRUE(test::all_close_f(expected_outputs.front(), outputs.front()));
}
NGRAPH_TEST(onnx_${BACKEND_NAME}, model_cum_sum_1d)
{
auto function = onnx_import::import_onnx_model(
file_util::path_join(SERIALIZED_ZOO, "onnx/cum_sum_1d.prototxt"));
auto test_case = ngraph::test::NgraphTestCase(function, "${BACKEND_NAME}");
test_case.add_input<float>({1.f, 2.f, 3.f});
test_case.add_expected_output<float>(Shape{3}, {1.f, 3.f, 6.f});
test_case.run();
}
NGRAPH_TEST(onnx_${BACKEND_NAME}, model_cum_sum_2d_axis_input)
{
auto function = onnx_import::import_onnx_model(
file_util::path_join(SERIALIZED_ZOO, "onnx/cum_sum_2d_axis_input.prototxt"));
auto test_case = ngraph::test::NgraphTestCase(function, "${BACKEND_NAME}");
test_case.add_input<float>({1.f, 2.f, 3.f, 4.f, 5.f, 6.f});
test_case.add_expected_output<float>(Shape{2, 3}, {1.f, 3.f, 6.f, 4.f, 9.f, 15.f});
test_case.run();
}
NGRAPH_TEST(onnx_${BACKEND_NAME}, model_cum_sum_2d_dynamic_axis_input)
{
auto function = onnx_import::import_onnx_model(
file_util::path_join(SERIALIZED_ZOO, "onnx/cum_sum_2d_dynamic_axis_input.prototxt"));
auto test_case = ngraph::test::NgraphTestCase(function, "${BACKEND_NAME}");
test_case.add_input<float>({1.f, 2.f, 3.f, 4.f, 5.f, 6.f});
test_case.add_input<std::int32_t>({1});
test_case.add_expected_output<float>(Shape{2, 3}, {1.f, 3.f, 6.f, 4.f, 9.f, 15.f});
test_case.run();
}
NGRAPH_TEST(onnx_${BACKEND_NAME}, model_cum_sum_3d_exclusive_reverse)
{
auto function = onnx_import::import_onnx_model(
file_util::path_join(SERIALIZED_ZOO, "onnx/cum_sum_3d_exclusive_reverse.prototxt"));
auto test_case = ngraph::test::NgraphTestCase(function, "${BACKEND_NAME}");
test_case.add_input<float>({1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f,
9.f, 10.f, 11.f, 12.f, 13.f, 14.f, 15.f, 16.f,
17.f, 18.f, 19.f, 20.f, 21.f, 22.f, 23.f, 24.f});
test_case.add_expected_output<float>(
Shape{2, 3, 4}, {13.f, 14.f, 15.f, 16.f, 17.f, 18.f, 19.f, 20.f, 21.f, 22.f, 23.f, 24.f,
0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f});
test_case.run();
}
NGRAPH_TEST(onnx_${BACKEND_NAME}, model_min_two_inputs)
{
auto function = onnx_import::import_onnx_model(
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment