Commit 1cdae06e authored by tsocha's avatar tsocha Committed by Robert Kimball

[ONNX] Shape operator (#1586)

* [ONNX] Shape operator

* Review fix pt. 1

* Style check
parent d81d0c93
......@@ -67,6 +67,8 @@ add_library(onnx_import STATIC
op/relu.hpp
op/reshape.cpp
op/reshape.hpp
op/shape.cpp
op/shape.hpp
op/softmax.cpp
op/softmax.hpp
op/split.cpp
......
//*****************************************************************************
// Copyright 2017-2018 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include <memory>
#include "ngraph/node.hpp"
#include "ngraph/shape.hpp"
#include "ngraph/type/element_type.hpp"
#include "ngraph/op/constant.hpp"
#include "shape.hpp"
namespace ngraph
{
namespace onnx_import
{
namespace op
{
NodeVector shape(const Node& node)
{
auto data = node.get_ng_inputs().at(0);
auto data_shape = data->get_shape();
return {std::make_shared<ngraph::op::Constant>(
ngraph::element::i64, Shape{data_shape.size()}, data_shape)};
}
} // namespace op
} // namespace onnx_import
} // namespace ngraph
//*****************************************************************************
// Copyright 2017-2018 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include "ngraph/node_vector.hpp"
#include "core/node.hpp"
namespace ngraph
{
namespace onnx_import
{
namespace op
{
NodeVector shape(const Node& node);
} // namespace op
} // namespace onnx_import
} // namespace ngraph
......@@ -43,6 +43,7 @@
#include "op/reduce.hpp"
#include "op/relu.hpp"
#include "op/reshape.hpp"
#include "op/shape.hpp"
#include "op/softmax.hpp"
#include "op/split.hpp"
#include "op/sub.hpp"
......@@ -132,6 +133,7 @@ namespace ngraph
std::bind(op::reduce_sum_square, std::placeholders::_1));
m_map.emplace("Relu", std::bind(op::relu, std::placeholders::_1));
m_map.emplace("Reshape", std::bind(op::reshape, std::placeholders::_1));
m_map.emplace("Shape", std::bind(op::shape, std::placeholders::_1));
m_map.emplace("Softmax", std::bind(op::softmax, std::placeholders::_1));
m_map.emplace("Split", std::bind(op::split, std::placeholders::_1));
m_map.emplace("Sub", std::bind(op::sub, std::placeholders::_1));
......
 backend-test:E
xy"Shape
test_shapeZ
x



b
y

B
\ No newline at end of file
......@@ -21,6 +21,7 @@
#include "gtest/gtest.h"
#include "ngraph/frontend/onnx_import/onnx.hpp"
#include "ngraph/ngraph.hpp"
#include "util/all_close.hpp"
#include "util/all_close_f.hpp"
#include "util/ndarray.hpp"
#include "util/test_tools.hpp"
......@@ -744,3 +745,22 @@ TEST(onnx, model_reshape_output_shape_as_input)
Outputs outputs{execute(function, inputs, "INTERPRETER")};
EXPECT_TRUE(test::all_close_f(expected_outputs.front(), outputs.front()));
}
TEST(onnx, model_shape)
{
auto function =
onnx_import::import_onnx_function(file_util::path_join(SERIALIZED_ZOO, "onnx/shape.onnx"));
Inputs inputs;
inputs.emplace_back(test::NDArray<float, 3>(
{{{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}},
{{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}},
{{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}}})
.get_vector());
std::vector<std::vector<int64_t>> expected_output{{3, 4, 5}};
std::vector<std::vector<int64_t>> outputs =
execute<float, int64_t>(function, inputs, "INTERPRETER");
EXPECT_TRUE(test::all_close(expected_output.front(), outputs.front()));
}
......@@ -95,10 +95,10 @@ size_t count_ops_of_type(std::shared_ptr<ngraph::Function> f)
return count;
}
template <typename T>
std::vector<std::vector<T>> execute(const std::shared_ptr<ngraph::Function>& function,
std::vector<std::vector<T>> args,
const std::string& backend_id)
template <typename T, typename T1 = T>
std::vector<std::vector<T1>> execute(const std::shared_ptr<ngraph::Function>& function,
std::vector<std::vector<T>> args,
const std::string& backend_id)
{
auto backend = ngraph::runtime::Backend::create(backend_id);
......@@ -128,10 +128,10 @@ std::vector<std::vector<T>> execute(const std::shared_ptr<ngraph::Function>& fun
backend->call_with_validate(function, result_tensors, arg_tensors);
std::vector<std::vector<T>> result_vectors;
std::vector<std::vector<T1>> result_vectors;
for (auto rt : result_tensors)
{
result_vectors.push_back(read_vector<T>(rt));
result_vectors.push_back(read_vector<T1>(rt));
}
return result_vectors;
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment