Commit 10c2a7fd authored by Adam Rogowiec's avatar Adam Rogowiec Committed by Michał Karzyński

[ONNX] SpaceToDepth operator. (#2343)

parent c88999d6
......@@ -130,6 +130,8 @@ add_library(onnx_import STATIC
op/softplus.hpp
op/softsign.cpp
op/softsign.hpp
op/space_to_depth.cpp
op/space_to_depth.hpp
op/split.cpp
op/split.hpp
op/sqrt.hpp
......
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include <cstddef>
#include <cstdint>
#include <memory>
#include "exceptions.hpp"
#include "ngraph/node.hpp"
#include "ngraph/shape.hpp"
#include "space_to_depth.hpp"
#include "utils/reshape.hpp"
namespace ngraph
{
namespace onnx_import
{
namespace op
{
namespace set_1
{
NodeVector space_to_depth(const Node& node)
{
auto data = node.get_ng_inputs().at(0);
const Shape& data_shape = data->get_shape();
std::int64_t block_size{node.get_attribute_value<std::int64_t>("blocksize")};
// Set default values to each dimension to be able to work with both 3D or 4D data.
std::size_t n{1}, c{1}, h{1}, w{1};
ASSERT_VALID_ARGUMENT(node, (data_shape.size() == 3 || data_shape.size() == 4))
<< "The provided tensor shape: " << data_shape << " is not supported.";
// Assume NCHW data layout
if (data_shape.size() == 4)
{
n = data_shape.at(0);
c = data_shape.at(1);
h = data_shape.at(2);
w = data_shape.at(3);
}
// Without batch.
else if (data_shape.size() == 3)
{
c = data_shape.at(0);
h = data_shape.at(1);
w = data_shape.at(2);
}
ASSERT_VALID_ARGUMENT(
node, (h % block_size == 0 && w % block_size == 0 && block_size > 0))
<< "The width and height axes size must be a multiple of squared block_size"
" attribute value";
std::size_t bs = static_cast<std::size_t>(block_size);
std::size_t w_flat = w / bs;
std::size_t h_flat = h / bs;
std::size_t c_high = c * bs * bs;
// First we have to disperse the data from height and width channels, then
// rearrange them so as appropriate chunks of data where close to their
// destination place. Finally squeeze data from respective dimensions.
std::shared_ptr<ngraph::Node> flat_node =
reshape::reshape(data, ngraph::Shape{n, c, h_flat, bs, w_flat, bs});
flat_node = reshape::reorder_axes(flat_node, {0, 3, 5, 1, 2, 4});
return {reshape::reshape(flat_node, ngraph::Shape{n, c_high, h_flat, w_flat})};
}
} // namespace set_1
} //namespace op
} // namespace onnx_import
} // namespace ngraph
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include "core/node.hpp"
#include "ngraph/node_vector.hpp"
namespace ngraph
{
namespace onnx_import
{
namespace op
{
namespace set_1
{
/// \brief Permutes input tensor blocks of spatial data into depth.
///
/// \param[in] node The ONNX input node describing operation.
///
/// \return NodeVector containing Tensor with shape:
/// [N, C * blocksize * blocksize, H / blocksize, W / blocksize]
NodeVector space_to_depth(const Node& node);
} // namespace set_1
} //namespace op
} // namespace onnx_import
} // namespace ngraph
......@@ -84,6 +84,7 @@
#include "op/softmax.hpp"
#include "op/softplus.hpp"
#include "op/softsign.hpp"
#include "op/space_to_depth.hpp"
#include "op/split.hpp"
#include "op/sqrt.hpp"
#include "op/squeeze.hpp"
......@@ -263,6 +264,7 @@ namespace ngraph
REGISTER_OPERATOR("Softmax", 1, softmax);
REGISTER_OPERATOR("Softplus", 1, softplus);
REGISTER_OPERATOR("Softsign", 1, softsign);
REGISTER_OPERATOR("SpaceToDepth", 1, space_to_depth);
REGISTER_OPERATOR("Split", 1, split);
REGISTER_OPERATOR("Sqrt", 1, sqrt);
REGISTER_OPERATOR("Squeeze", 1, squeeze);
......
ONNXNgraphImporter:q
&
AB" SpaceToDepth*
blocksize compute_graphZ
A




b
B




B
\ No newline at end of file
ONNXNgraphImporter:q
&
AB" SpaceToDepth*
blocksize compute_graphZ
A




b
B




B
\ No newline at end of file
ONNXNgraphImporter:i
&
AB" SpaceToDepth*
blocksize compute_graphZ
A



b
B



B
\ No newline at end of file
ONNXNgraphImporter:_

AB" SpaceToDepth compute_graphZ
A




b
B




B
\ No newline at end of file
......@@ -1712,3 +1712,60 @@ TEST(onnx_${BACKEND_NAME}, is_op_supported)
});
EXPECT_TRUE(onnx_import::is_operator_supported("AddQ", 1, "com.intel.ai"));
}
TEST(onnx_${BACKEND_NAME}, model_space_to_depth)
{
auto function = onnx_import::import_onnx_model(
file_util::path_join(SERIALIZED_ZOO, "onnx/space_to_depth.onnx"));
Inputs inputs;
inputs.emplace_back(std::vector<float>{0.f, 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f,
8.f, 9.f, 10.f, 11.f, 12.f, 13.f, 14.f, 15.f,
16.f, 17.f, 18.f, 19.f, 20.f, 21.f, 22.f, 23.f,
24.f, 25.f, 26.f, 27.f, 28.f, 29.f, 30.f, 31.f});
Outputs expected_output{std::vector<float>{
0.f, 2.f, 8.f, 10.f, 16.f, 18.f, 24.f, 26.f, 1.f, 3.f, 9.f, 11.f, 17.f, 19.f, 25.f, 27.f,
4.f, 6.f, 12.f, 14.f, 20.f, 22.f, 28.f, 30.f, 5.f, 7.f, 13.f, 15.f, 21.f, 23.f, 29.f, 31.f,
}};
Outputs outputs{execute(function, inputs, "${BACKEND_NAME}")};
EXPECT_TRUE(test::all_close_f(expected_output.front(), outputs.front()));
}
TEST(onnx_${BACKEND_NAME}, model_space_to_depth_chw)
{
auto function = onnx_import::import_onnx_model(
file_util::path_join(SERIALIZED_ZOO, "onnx/space_to_depth_chw.onnx"));
Inputs inputs;
inputs.emplace_back(std::vector<float>{0.f, 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f,
8.f, 9.f, 10.f, 11.f, 12.f, 13.f, 14.f, 15.f,
16.f, 17.f, 18.f, 19.f, 20.f, 21.f, 22.f, 23.f,
24.f, 25.f, 26.f, 27.f, 28.f, 29.f, 30.f, 31.f});
Outputs expected_output{std::vector<float>{
0.f, 2.f, 8.f, 10.f, 16.f, 18.f, 24.f, 26.f, 1.f, 3.f, 9.f, 11.f, 17.f, 19.f, 25.f, 27.f,
4.f, 6.f, 12.f, 14.f, 20.f, 22.f, 28.f, 30.f, 5.f, 7.f, 13.f, 15.f, 21.f, 23.f, 29.f, 31.f,
}};
Outputs outputs{execute(function, inputs, "${BACKEND_NAME}")};
EXPECT_TRUE(test::all_close_f(expected_output.front(), outputs.front()));
}
TEST(onnx_${BACKEND_NAME}, model_space_to_depth_bad_blocksize)
{
// This model fails to import since the depth channel length must be a multiple of the
// `blocksize` attribute value.
EXPECT_THROW(onnx_import::import_onnx_model(file_util::path_join(
SERIALIZED_ZOO, "onnx/space_to_depth_bad_blocksize.onnx")),
std::runtime_error);
}
TEST(onnx_${BACKEND_NAME}, model_space_to_depth_no_blocksize)
{
// This model fails to import since it lacks of required `blocksize` attribute.
EXPECT_THROW(onnx_import::import_onnx_model(
file_util::path_join(SERIALIZED_ZOO, "onnx/space_to_depth_no_blocksize.onnx")),
std::runtime_error);
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment