Unverified Commit c62feaf0 authored by Ivan Tikhonov's avatar Ivan Tikhonov Committed by GitHub

[Fused] Add BatchToSpace, SpaceToBatch ops (#4367)

parent e7f7886e
...@@ -365,6 +365,8 @@ set (SRC ...@@ -365,6 +365,8 @@ set (SRC
op/xor.hpp op/xor.hpp
op/fused/batch_mat_mul_transpose.cpp op/fused/batch_mat_mul_transpose.cpp
op/fused/batch_mat_mul_transpose.hpp op/fused/batch_mat_mul_transpose.hpp
op/fused/batch_to_space.cpp
op/fused/batch_to_space.hpp
op/fused/clamp.cpp op/fused/clamp.cpp
op/fused/clamp.hpp op/fused/clamp.hpp
op/fused/conv_fused.cpp op/fused/conv_fused.cpp
...@@ -421,6 +423,8 @@ set (SRC ...@@ -421,6 +423,8 @@ set (SRC
op/fused/shuffle_channels.hpp op/fused/shuffle_channels.hpp
op/fused/softmax_crossentropy.cpp op/fused/softmax_crossentropy.cpp
op/fused/softmax_crossentropy.hpp op/fused/softmax_crossentropy.hpp
op/fused/space_to_batch.cpp
op/fused/space_to_batch.hpp
op/fused/space_to_depth.cpp op/fused/space_to_depth.cpp
op/fused/space_to_depth.hpp op/fused/space_to_depth.hpp
op/fused/split.cpp op/fused/split.cpp
......
//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include <cmath>
#include <cstddef>
#include <memory>
#include <ops.hpp>
#include "ngraph/builder/make_constant.hpp"
#include "ngraph/builder/reshape.hpp"
#include "ngraph/node.hpp"
#include "ngraph/op/fused/batch_to_space.hpp"
#include "ngraph/op/reshape.hpp"
#include "ngraph/shape.hpp"
using namespace std;
using namespace ngraph;
constexpr NodeTypeInfo op::v1::BatchToSpace::type_info;
ngraph::op::v1::BatchToSpace::BatchToSpace(const ngraph::Output<ngraph::Node>& data,
const ngraph::Output<ngraph::Node>& block_shape,
const ngraph::Output<ngraph::Node>& crops_begin,
const ngraph::Output<ngraph::Node>& crops_end)
: FusedOp({data, block_shape, crops_begin, crops_end})
{
constructor_validate_and_infer_types();
}
NodeVector op::v1::BatchToSpace::decompose_op() const
{
auto data = input_value(0);
auto block = input_value(1);
auto crops_begin = input_value(2);
auto crops_end = input_value(3);
const auto& data_shape = data.get_shape();
NODE_VALIDATION_CHECK(this,
(data_shape.size() >= 2),
"The data tensor with rank lower than 2 is not supported (data rank: ",
data_shape.size(),
")");
const auto block_const = as_type_ptr<op::Constant>(block.get_node_shared_ptr());
const auto crops_begin_const = as_type_ptr<op::Constant>(crops_begin.get_node_shared_ptr());
const auto crops_end_const = as_type_ptr<op::Constant>(crops_end.get_node_shared_ptr());
vector<int64_t> block_values, crops_end_values;
block_values = block_const->cast_vector<int64_t>();
crops_end_values = crops_end_const->cast_vector<int64_t>();
// First we have to disperse the data from batch, then rearrange them
// so as appropriate chunks of data where close to their destination place.
// Finally squeeze data from respective dimensions.
vector<int64_t> dispersed_shape;
int64_t b_dim_divider = 1;
for (const auto& el : block_values)
{
NODE_VALIDATION_CHECK(this, el > 0, "block_shape values must be greater than 0");
b_dim_divider *= el;
}
NODE_VALIDATION_CHECK(this,
data_shape.at(0) % b_dim_divider == 0,
"BatchToSpace: The input data's 'batch' axis size: ",
data_shape.at(0),
" must be a multiple of ",
" product of block_shape values: ",
b_dim_divider);
// note: B_0 is expected to be 1.
// x' = reshape(`data`, [B_1, ..., B_{N - 1}, batch / (B_1 * ... B_{N - 1}), D_1, D_2, ...,
// D_{N - 1}]),
// where B_i = block_shape[i]
dispersed_shape.insert(dispersed_shape.begin(), block_values.begin() + 1, block_values.end());
dispersed_shape.push_back(data_shape.at(0) / b_dim_divider);
for (size_t i = 1; i < data_shape.size(); ++i)
{
dispersed_shape.push_back(data_shape.at(i));
}
const auto out_pattern_1 =
op::Constant::create(element::i64, Shape{dispersed_shape.size()}, dispersed_shape);
const bool special_zero = false;
auto flat_node = make_shared<ngraph::op::v1::Reshape>(data, out_pattern_1, special_zero)
->add_provenance_group_members_above({data});
// calculate axes to transpose
// x'' = transpose(x', [N, N + 1, 0, N + 2, 1, ..., N + N - 1, N - 1])
vector<size_t> axes_order{block_values.size() - 1};
for (size_t i = 0; i < block_values.size() - 1; ++i)
{
axes_order.push_back(i + block_values.size());
axes_order.push_back(i);
}
flat_node = builder::opset1::reorder_axes(flat_node, axes_order);
// x''' = reshape(x'', [batch / (B_1 * ... * B_{N - 1}), D_1 * B_1, D_2 * B_2, ... , D_{N - 1}
// * B_{N - 1}])
vector<int64_t> squeezed_shape;
squeezed_shape.push_back(data_shape.at(0) / b_dim_divider);
for (size_t i = 1; i < block_values.size(); ++i)
{
squeezed_shape.push_back(data_shape.at(i) * block_values.at(i));
}
const auto out_pattern_2 =
op::Constant::create(element::i64, Shape{squeezed_shape.size()}, squeezed_shape);
flat_node = make_shared<ngraph::op::v1::Reshape>(flat_node, out_pattern_2, special_zero)
->add_provenance_group_members_above({data});
// Crop the start and end of dimensions according to `crops_begin`, `crops_end` to produce
// the output of shape:
// note: `crops_begin[0], crops_end[0]` are expected to be 0.
// `y = [batch / (B_1 * ... * B_{N - 1}), crop(D_1 * B_1, crops_begin[1], crops_end[1]),
// crop(D_2 * B_2, crops_begin[2], crops_end[2]), ... ,
// crop(D_{N - 1} * B_{N - 1}, crops_begin[N - 1], crops_end[N - 1])]`
vector<int64_t> upperbounds_values;
auto flat_node_shape = flat_node->get_shape();
for (size_t i = 0; i < flat_node_shape.size(); ++i)
{
upperbounds_values.push_back(flat_node_shape.at(i) - crops_end_values.at(i));
}
const auto upperbounds = op::Constant::create(
crops_end.get_element_type(), Shape{upperbounds_values.size()}, upperbounds_values);
vector<int64_t> begin_mask(data_shape.size(), 0);
vector<int64_t> end_mask(data_shape.size(), 0);
flat_node = make_shared<op::v1::StridedSlice>(
flat_node, crops_begin_const, upperbounds, begin_mask, end_mask);
return NodeVector{flat_node};
}
void ngraph::op::v1::BatchToSpace::pre_validate_and_infer_types()
{
PartialShape data_pshape = get_input_partial_shape(0);
auto data = input_value(0);
auto block = input_value(1);
auto crops_begin = input_value(2);
auto crops_end = input_value(3);
NGRAPH_CHECK(block.get_node_shared_ptr()->is_constant(),
"block_shape input node is expected to be a static constant");
NGRAPH_CHECK(crops_begin.get_node_shared_ptr()->is_constant(),
"crops_begin input node is expected to be a static constant");
NGRAPH_CHECK(crops_end.get_node_shared_ptr()->is_constant(),
"crops_end input node is expected to be a static constant");
const auto& data_type = get_input_element_type(0);
const auto& block_shape_type = get_input_element_type(1);
const auto& crops_begin_type = get_input_element_type(2);
const auto& crops_end_type = get_input_element_type(3);
NODE_VALIDATION_CHECK(this,
block_shape_type.is_integral_number(),
"block_shape must be an integral number but got (",
block_shape_type,
").");
NODE_VALIDATION_CHECK(this,
crops_begin_type.is_integral_number(),
"crops_begin must be an integral number but got (",
crops_begin_type,
").");
NODE_VALIDATION_CHECK(this,
crops_end_type.is_integral_number(),
"crops_end must be an integral number but got (",
crops_end_type,
").");
if (data_pshape.is_dynamic())
{
set_output_type(0, data_type, PartialShape::dynamic());
}
}
std::shared_ptr<ngraph::Node>
ngraph::op::v1::BatchToSpace::copy_with_new_args(const ngraph::NodeVector& new_args) const
{
check_new_args_count(this, new_args);
return make_shared<BatchToSpace>(
new_args.at(0), new_args.at(1), new_args.at(2), new_args.at(3));
}
bool ngraph::op::v1::BatchToSpace::visit_attributes(ngraph::AttributeVisitor& visitor)
{
return true;
}
//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include "ngraph/node.hpp"
#include "ngraph/op/util/fused_op.hpp"
namespace ngraph
{
namespace op
{
namespace v1
{
/// \brief BatchToSpace permutes data from the batch dimension of the data tensor into
/// spatial dimensions.
///
/// \note Values from the batch dimension are moved in spatial blocks dimensions.
///
/// Output node produces a tensor with shape:
/// `[batch / (block_shape[0] * block_shape[1] * ... * block_shape[N - 1]),
/// D_1 * block_shape[1] - crops_begin[1] - crops_end[1],
/// D_2 * block_shape[2] - crops_begin[2] - crops_end[2], ...,
/// D_{N - 1} * block_shape[N - 1] - crops_begin[N - 1] - crops_end[N - 1]`
/// of the same type as `data` input.
class NGRAPH_API BatchToSpace : public ngraph::op::util::FusedOp
{
public:
static constexpr NodeTypeInfo type_info{"BatchToSpace", 1};
const NodeTypeInfo& get_type_info() const override { return type_info; }
BatchToSpace() = default;
/// \brief Constructs a BatchToSpace operation.
///
/// \param data Node producing the data tensor
/// \param block_shape The sizes of the block of values to be moved
/// \param crops_begin Specifies the amount to crop from the beginning along each
/// axis of `data` input
/// \param crops_end Specifies the amount to crop from the ending along each axis of
/// `data` input.
BatchToSpace(const Output<Node>& data,
const Output<Node>& block_shape,
const Output<Node>& crops_begin,
const Output<Node>& crops_end);
NodeVector decompose_op() const override;
void pre_validate_and_infer_types() override;
std::shared_ptr<Node> copy_with_new_args(const NodeVector& new_args) const override;
bool visit_attributes(AttributeVisitor& visitor) override;
};
}
}
}
//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include <cmath>
#include <cstddef>
#include <memory>
#include "ngraph/builder/make_constant.hpp"
#include "ngraph/builder/reshape.hpp"
#include "ngraph/node.hpp"
#include "ngraph/op/fused/space_to_batch.hpp"
#include "ngraph/op/pad.hpp"
#include "ngraph/shape.hpp"
using namespace std;
using namespace ngraph;
constexpr NodeTypeInfo op::v1::SpaceToBatch::type_info;
ngraph::op::v1::SpaceToBatch::SpaceToBatch(const ngraph::Output<ngraph::Node>& data,
const ngraph::Output<ngraph::Node>& block_shape,
const ngraph::Output<ngraph::Node>& pads_begin,
const ngraph::Output<ngraph::Node>& pads_end)
: FusedOp({data, block_shape, pads_begin, pads_end})
{
constructor_validate_and_infer_types();
}
NodeVector op::v1::SpaceToBatch::decompose_op() const
{
auto data = input_value(0);
auto block = input_value(1);
auto pads_begin = input_value(2);
auto pads_end = input_value(3);
const auto& data_shape = data.get_shape();
NODE_VALIDATION_CHECK(this,
(data_shape.size() >= 2),
"The data tensor with rank lower than 2 is not supported (data rank: ",
data_shape.size(),
")");
const auto block_const = as_type_ptr<op::Constant>(block.get_node_shared_ptr());
const auto pads_begin_const = as_type_ptr<op::Constant>(pads_begin.get_node_shared_ptr());
const auto pads_end_const = as_type_ptr<op::Constant>(pads_end.get_node_shared_ptr());
vector<int64_t> block_values;
block_values = block_const->cast_vector<int64_t>();
// Zero-pad the start and end of dimensions [D_0, ..., D_{N - 1}] of the input according to
// `pads_begin`
// and `pads_end`:
// note: P_0 for batch dimension is expected to be 0 (no-padding).
// x = [batch + P_0, D_1 + P_1, D_2 + P_2, ..., D_{N - 1} + P_{N - 1}], where P_i =
// pads_begin[i] + pads_end[i]
auto out = make_shared<op::v1::Pad>(data, pads_begin_const, pads_end_const, PadMode::CONSTANT);
auto out_shape = out->get_shape();
// First we have to disperse the data from spatial dimensions, then
// rearrange them so as appropriate chunks of data where close to their
// destination place. Finally squeeze data from respective dimensions.
Shape dispersed_shape{out_shape.at(0)};
// note: B_0 for batch is ignored.
// x' = reshape(x, [batch, (D_1 + P_1) / B_1, B_1, (D_2 + P_2) / B_2, B_2, ...,
// (D_{N - 1} + P_{N - 1}) / B_{N - 1}, B_{N - 1}]), where B_i = block_shape[i]
for (size_t i = 1; i < block_values.size(); ++i)
{
NODE_VALIDATION_CHECK(
this, block_values.at(i) > 0, "block_shape values must be greater than 0");
NODE_VALIDATION_CHECK(this,
out_shape.at(i) % block_values.at(i) == 0,
"The dimension on position: ",
i,
" equal to: ",
out_shape.at(i),
" must be a multiple of block_values[i]: ",
block_values.at(i));
dispersed_shape.push_back(out_shape.at(i) / block_values.at(i));
dispersed_shape.push_back(block_values.at(i));
}
auto flat_node = builder::opset1::reshape(out, dispersed_shape);
// x'' = transpose(x', [2, 4, ..., (N - 1) + (N - 1), 0, 1, 3, ..., N + (N - 1)])
vector<size_t> axes_order;
for (size_t i = 0, j = 2; i < block_values.size() - 1; ++i, j += 2)
{
axes_order.push_back(j);
}
axes_order.push_back(0);
for (size_t i = 0, j = 1; i < block_values.size() - 1; ++i, j += 2)
{
axes_order.push_back(j);
}
flat_node = builder::opset1::reorder_axes(flat_node, axes_order);
Shape squeezed_shape;
int64_t prod = 1;
for (const auto& el : block_values)
{
prod *= el;
}
// y = reshape(x'', [batch * B_1 * ... * B_{N - 1}, (D_1 + P_1) / B_1, (D_2 + P_2) / B_2, ...
// ,
// (D_{N - 1} + P_{N - 1}) / B_{N - 1}])
squeezed_shape.push_back(out_shape.at(0) * prod);
for (size_t i = 1; i < block_values.size(); ++i)
{
squeezed_shape.push_back(out_shape.at(i) / block_values.at(i));
}
flat_node = builder::opset1::reshape(flat_node, squeezed_shape);
return NodeVector{flat_node};
}
void ngraph::op::v1::SpaceToBatch::pre_validate_and_infer_types()
{
PartialShape data_pshape = get_input_partial_shape(0);
auto data = input_value(0);
auto block = input_value(1);
auto crops_begin = input_value(2);
auto crops_end = input_value(3);
NGRAPH_CHECK(block.get_node_shared_ptr()->is_constant(),
"block_shape input node is expected to be a static constant");
NGRAPH_CHECK(crops_begin.get_node_shared_ptr()->is_constant(),
"crops_begin input node is expected to be a static constant");
NGRAPH_CHECK(crops_end.get_node_shared_ptr()->is_constant(),
"crops_end input node is expected to be a static constant");
const auto& data_type = get_input_element_type(0);
const auto& block_shape_type = get_input_element_type(1);
const auto& crops_begin_type = get_input_element_type(2);
const auto& crops_end_type = get_input_element_type(3);
NODE_VALIDATION_CHECK(this,
block_shape_type.is_integral_number(),
"block_shape must be an integral number but got (",
block_shape_type,
").");
NODE_VALIDATION_CHECK(this,
crops_begin_type.is_integral_number(),
"crops_begin must be an integral number but got (",
crops_begin_type,
").");
NODE_VALIDATION_CHECK(this,
crops_end_type.is_integral_number(),
"crops_end must be an integral number but got (",
crops_end_type,
").");
if (data_pshape.is_dynamic())
{
set_output_type(0, data_type, PartialShape::dynamic());
}
}
std::shared_ptr<Node>
ngraph::op::v1::SpaceToBatch::copy_with_new_args(const ngraph::NodeVector& new_args) const
{
check_new_args_count(this, new_args);
return make_shared<SpaceToBatch>(
new_args.at(0), new_args.at(1), new_args.at(2), new_args.at(3));
}
bool ngraph::op::v1::SpaceToBatch::visit_attributes(ngraph::AttributeVisitor& visitor)
{
return true;
}
//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include "ngraph/node.hpp"
#include "ngraph/op/util/fused_op.hpp"
namespace ngraph
{
namespace op
{
namespace v1
{
/// \brief SpaceToBatch permutes data tensor blocks of spatial data into batch
/// dimension.
///
/// \note Values from spatial blocks dimensions are moved in the batch dimension.
///
/// Output node produces a tensor with shape: tensor with shape
/// `[batch * block_shape[0] * block_shape[1] * ... * block_shape[N - 1],
/// (pads_begin[1] + D_1 + pads_end[1]) / block_shape[1],
/// (pads_begin[2] + D_2 + pads_end[2]) / block_shape[2], ...,
/// (pads_begin[N - 1] + D_{N - 1} + pads_end[N - 1]) / block_shape[N - 1]`
/// of the same type as `data` input.
class NGRAPH_API SpaceToBatch : public ngraph::op::util::FusedOp
{
public:
static constexpr NodeTypeInfo type_info{"SpaceToBatch", 1};
const NodeTypeInfo& get_type_info() const override { return type_info; }
SpaceToBatch() = default;
/// \brief Constructs a SpaceToBatch operation.
///
/// \param data Node producing the data tensor
/// \param block_shape The sizes of the block of values to be moved
/// \param pads_begin Specifies the padding for the beginning along each axis of
/// `data` input
/// \param pads_end Specifies the padding for the ending along each axis of `data`
/// input.
SpaceToBatch(const Output<Node>& data,
const Output<Node>& block_shape,
const ngraph::Output<ngraph::Node>& pads_begin,
const ngraph::Output<ngraph::Node>& pads_end);
NodeVector decompose_op() const override;
void pre_validate_and_infer_types() override;
std::shared_ptr<Node> copy_with_new_args(const NodeVector& new_args) const override;
bool visit_attributes(AttributeVisitor& visitor) override;
};
}
using v1::SpaceToBatch;
}
}
...@@ -48,6 +48,7 @@ NGRAPH_OP(BatchMatMulTranspose, ngraph::op, 0) ...@@ -48,6 +48,7 @@ NGRAPH_OP(BatchMatMulTranspose, ngraph::op, 0)
NGRAPH_OP(BatchNormInference, ngraph::op, 0) NGRAPH_OP(BatchNormInference, ngraph::op, 0)
NGRAPH_OP(BatchNormTraining, ngraph::op, 0) NGRAPH_OP(BatchNormTraining, ngraph::op, 0)
NGRAPH_OP(BatchNormTrainingBackprop, ngraph::op, 0) NGRAPH_OP(BatchNormTrainingBackprop, ngraph::op, 0)
NGRAPH_OP(BatchToSpace, ngraph::op::v1, 1)
NGRAPH_OP(BinaryConvolution, ngraph::op::v1, 1) NGRAPH_OP(BinaryConvolution, ngraph::op::v1, 1)
NGRAPH_OP(Broadcast, ngraph::op::v0, 0) NGRAPH_OP(Broadcast, ngraph::op::v0, 0)
NGRAPH_OP(Broadcast, ngraph::op::v1, 1) NGRAPH_OP(Broadcast, ngraph::op::v1, 1)
...@@ -225,6 +226,7 @@ NGRAPH_OP(Softmax, ngraph::op::v0, 0) ...@@ -225,6 +226,7 @@ NGRAPH_OP(Softmax, ngraph::op::v0, 0)
NGRAPH_OP(Softmax, ngraph::op::v1, 1) NGRAPH_OP(Softmax, ngraph::op::v1, 1)
NGRAPH_OP(SoftmaxCrossEntropy, ngraph::op::v0, 0) NGRAPH_OP(SoftmaxCrossEntropy, ngraph::op::v0, 0)
NGRAPH_OP(SoftmaxCrossEntropyBackprop, ngraph::op::v0, 0) NGRAPH_OP(SoftmaxCrossEntropyBackprop, ngraph::op::v0, 0)
NGRAPH_OP(SpaceToBatch, ngraph::op::v1, 1)
NGRAPH_OP(SpaceToDepth, ngraph::op::v0, 0) NGRAPH_OP(SpaceToDepth, ngraph::op::v0, 0)
NGRAPH_OP(Split, ngraph::op::v1, 1) NGRAPH_OP(Split, ngraph::op::v1, 1)
NGRAPH_OP(Split, ngraph::op::v0, 0) NGRAPH_OP(Split, ngraph::op::v0, 0)
......
...@@ -83,6 +83,7 @@ ...@@ -83,6 +83,7 @@
#include "ngraph/op/floor.hpp" #include "ngraph/op/floor.hpp"
#include "ngraph/op/floor_mod.hpp" #include "ngraph/op/floor_mod.hpp"
#include "ngraph/op/fused/batch_mat_mul_transpose.hpp" #include "ngraph/op/fused/batch_mat_mul_transpose.hpp"
#include "ngraph/op/fused/batch_to_space.hpp"
#include "ngraph/op/fused/clamp.hpp" #include "ngraph/op/fused/clamp.hpp"
#include "ngraph/op/fused/conv_fused.hpp" #include "ngraph/op/fused/conv_fused.hpp"
#include "ngraph/op/fused/crossentropy.hpp" #include "ngraph/op/fused/crossentropy.hpp"
...@@ -111,6 +112,7 @@ ...@@ -111,6 +112,7 @@
#include "ngraph/op/fused/selu.hpp" #include "ngraph/op/fused/selu.hpp"
#include "ngraph/op/fused/shuffle_channels.hpp" #include "ngraph/op/fused/shuffle_channels.hpp"
#include "ngraph/op/fused/softmax_crossentropy.hpp" #include "ngraph/op/fused/softmax_crossentropy.hpp"
#include "ngraph/op/fused/space_to_batch.hpp"
#include "ngraph/op/fused/space_to_depth.hpp" #include "ngraph/op/fused/space_to_depth.hpp"
#include "ngraph/op/fused/split.hpp" #include "ngraph/op/fused/split.hpp"
#include "ngraph/op/fused/squared_difference.hpp" #include "ngraph/op/fused/squared_difference.hpp"
......
...@@ -23,3 +23,5 @@ ...@@ -23,3 +23,5 @@
#include "opset1_tbl.hpp" #include "opset1_tbl.hpp"
NGRAPH_OP(Gelu, ngraph::op::v0) NGRAPH_OP(Gelu, ngraph::op::v0)
NGRAPH_OP(BatchToSpace, ngraph::op::v1)
NGRAPH_OP(SpaceToBatch, ngraph::op::v1)
...@@ -586,7 +586,16 @@ namespace ...@@ -586,7 +586,16 @@ namespace
shared_ptr<Node> op_cast(shared_ptr<op::v1::Pad> node) shared_ptr<Node> op_cast(shared_ptr<op::v1::Pad> node)
{ {
const auto pad_arg = node->input_value(0); const auto pad_arg = node->input_value(0);
const auto pad_value = node->input_value(3); Output<Node> pad_value;
if (node->get_input_size() == 4)
{
pad_value = node->input_value(3);
}
else
{
pad_value =
make_shared<op::Constant>(pad_arg.get_element_type(), Shape{}, vector<float>{0.f});
}
auto replacement_node = make_shared<op::v0::Pad>( auto replacement_node = make_shared<op::v0::Pad>(
pad_arg, pad_value, node->get_pads_begin(), node->get_pads_end(), node->get_pad_mode()); pad_arg, pad_value, node->get_pads_begin(), node->get_pads_end(), node->get_pad_mode());
......
...@@ -1706,6 +1706,11 @@ std::string runtime::gpu::GPU_Emitter::emit_v0_ConvolutionBias(EMIT_ARGS) ...@@ -1706,6 +1706,11 @@ std::string runtime::gpu::GPU_Emitter::emit_v0_ConvolutionBias(EMIT_ARGS)
throw unsupported_op("Unsupported op '" + node->description() + "'"); throw unsupported_op("Unsupported op '" + node->description() + "'");
} }
std::string runtime::gpu::GPU_Emitter::emit_v1_SpaceToBatch(EMIT_ARGS)
{
throw unsupported_op("Unsupported op '" + node->description() + "'");
}
std::string runtime::gpu::GPU_Emitter::emit_v0_SpaceToDepth(EMIT_ARGS) std::string runtime::gpu::GPU_Emitter::emit_v0_SpaceToDepth(EMIT_ARGS)
{ {
throw unsupported_op("Unsupported op '" + node->description() + "'"); throw unsupported_op("Unsupported op '" + node->description() + "'");
...@@ -1761,6 +1766,11 @@ std::string runtime::gpu::GPU_Emitter::emit_v0_CTCGreedyDecoder(EMIT_ARGS) ...@@ -1761,6 +1766,11 @@ std::string runtime::gpu::GPU_Emitter::emit_v0_CTCGreedyDecoder(EMIT_ARGS)
throw unsupported_op("Unsupported op '" + node->description() + "'"); throw unsupported_op("Unsupported op '" + node->description() + "'");
} }
std::string runtime::gpu::GPU_Emitter::emit_v1_BatchToSpace(EMIT_ARGS)
{
throw unsupported_op("Unsupported op '" + node->description() + "'");
}
std::string runtime::gpu::GPU_Emitter::emit_v0_DepthToSpace(EMIT_ARGS) std::string runtime::gpu::GPU_Emitter::emit_v0_DepthToSpace(EMIT_ARGS)
{ {
throw unsupported_op("Unsupported op '" + node->description() + "'"); throw unsupported_op("Unsupported op '" + node->description() + "'");
......
...@@ -481,6 +481,8 @@ space_to_depth_block_first ...@@ -481,6 +481,8 @@ space_to_depth_block_first
space_to_depth_depth_first space_to_depth_depth_first
depth_to_space_block_first depth_to_space_block_first
depth_to_space_depth_first depth_to_space_depth_first
space_to_batch
batch_to_space
normalize_across_chw_4d normalize_across_chw_4d
normalize_across_empty_axes_input normalize_across_empty_axes_input
normalize_across_h_4d normalize_across_h_4d
......
...@@ -976,6 +976,11 @@ shared_ptr<Node> JSONDeserializer::deserialize_node(json node_js) ...@@ -976,6 +976,11 @@ shared_ptr<Node> JSONDeserializer::deserialize_node(json node_js)
args[2], args[0], args[1], args[3], args[4], args[5], epsilon); args[2], args[0], args[1], args[3], args[4], args[5], epsilon);
break; break;
} }
case OP_TYPEID::BatchToSpace_v1:
{
node = make_shared<op::v1::BatchToSpace>(args[0], args[1], args[2], args[3]);
break;
}
case OP_TYPEID::BinaryConvolution_v1: case OP_TYPEID::BinaryConvolution_v1:
{ {
auto strides = node_js.at("strides").get<vector<size_t>>(); auto strides = node_js.at("strides").get<vector<size_t>>();
...@@ -2792,6 +2797,11 @@ shared_ptr<Node> JSONDeserializer::deserialize_node(json node_js) ...@@ -2792,6 +2797,11 @@ shared_ptr<Node> JSONDeserializer::deserialize_node(json node_js)
args[0], args[1], args[2], soft_label, ignore_index); args[0], args[1], args[2], soft_label, ignore_index);
break; break;
} }
case OP_TYPEID::SpaceToBatch_v1:
{
node = make_shared<op::v1::SpaceToBatch>(args[0], args[1], args[2], args[3]);
break;
}
case OP_TYPEID::SpaceToDepth: case OP_TYPEID::SpaceToDepth:
{ {
auto block_size = node_js.at("block_size").get<size_t>(); auto block_size = node_js.at("block_size").get<size_t>();
...@@ -3293,6 +3303,8 @@ json JSONSerializer::serialize_node(const Node& n) ...@@ -3293,6 +3303,8 @@ json JSONSerializer::serialize_node(const Node& n)
node["eps"] = tmp->get_eps_value(); node["eps"] = tmp->get_eps_value();
break; break;
} }
case OP_TYPEID::BatchToSpace_v1: { break;
}
case OP_TYPEID::BinaryConvolution_v1: case OP_TYPEID::BinaryConvolution_v1:
{ {
auto tmp = static_cast<const op::v1::BinaryConvolution*>(&n); auto tmp = static_cast<const op::v1::BinaryConvolution*>(&n);
...@@ -4446,6 +4458,8 @@ json JSONSerializer::serialize_node(const Node& n) ...@@ -4446,6 +4458,8 @@ json JSONSerializer::serialize_node(const Node& n)
node["ellipsis_mask"] = tmp->get_ellipsis_mask(); node["ellipsis_mask"] = tmp->get_ellipsis_mask();
break; break;
} }
case OP_TYPEID::SpaceToBatch_v1: { break;
}
case OP_TYPEID::SpaceToDepth: case OP_TYPEID::SpaceToDepth:
{ {
auto tmp = static_cast<const op::SpaceToDepth*>(&n); auto tmp = static_cast<const op::SpaceToDepth*>(&n);
......
...@@ -114,6 +114,7 @@ set(SRC ...@@ -114,6 +114,7 @@ set(SRC
type_prop/batch_mat_mul.cpp type_prop/batch_mat_mul.cpp
type_prop/batch_mat_mul_transpose.cpp type_prop/batch_mat_mul_transpose.cpp
type_prop/batch_norm.cpp type_prop/batch_norm.cpp
type_prop/batch_to_space.cpp
type_prop/binary_elementwise.cpp type_prop/binary_elementwise.cpp
type_prop/broadcast.cpp type_prop/broadcast.cpp
type_prop/clamp.cpp type_prop/clamp.cpp
...@@ -179,6 +180,7 @@ set(SRC ...@@ -179,6 +180,7 @@ set(SRC
type_prop/shape_of.cpp type_prop/shape_of.cpp
type_prop/shuffle_channels.cpp type_prop/shuffle_channels.cpp
type_prop/slice.cpp type_prop/slice.cpp
type_prop/space_to_batch.cpp
type_prop/space_to_depth.cpp type_prop/space_to_depth.cpp
type_prop/split.cpp type_prop/split.cpp
type_prop/squared_difference.cpp type_prop/squared_difference.cpp
......
...@@ -716,6 +716,50 @@ NGRAPH_TEST(${BACKEND_NAME}, group_conv_groups_included_in_shape) ...@@ -716,6 +716,50 @@ NGRAPH_TEST(${BACKEND_NAME}, group_conv_groups_included_in_shape)
EXPECT_EQ(expected, read_vector<float>(result0)); EXPECT_EQ(expected, read_vector<float>(result0));
} }
NGRAPH_TEST(${BACKEND_NAME}, space_to_batch)
{
auto data = make_shared<op::Parameter>(element::f32, Shape{1, 2, 2, 3});
auto block_shape =
make_shared<op::Constant>(element::i64, Shape{4}, vector<int64_t>{1, 2, 3, 2});
auto pads_begin =
make_shared<op::Constant>(element::i64, Shape{4}, vector<int64_t>{0, 0, 1, 0});
auto pads_end = make_shared<op::Constant>(element::i64, Shape{4}, vector<int64_t>{0, 0, 0, 1});
auto space_to_batch =
make_shared<op::v1::SpaceToBatch>(data, block_shape, pads_begin, pads_end);
auto function = make_shared<Function>(NodeVector{space_to_batch}, ParameterVector{data});
auto test_case = test::NgraphTestCase(function, "${BACKEND_NAME}");
test_case.add_input<float>({0.f, 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f, 10.f, 11.f});
test_case.add_expected_output<float>(Shape{12, 1, 1, 2},
{
0.f, 0.f, 0.f, 0.f, 0.f, 2.f, 1.f, 0.f,
3.f, 5.f, 4.f, 0.f, 0.f, 0.f, 0.f, 0.f,
6.f, 8.f, 7.f, 0.f, 9.f, 11.f, 10.f, 0.f,
});
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, batch_to_space)
{
auto data = make_shared<op::Parameter>(element::f32, Shape{12, 1, 1, 2});
auto block_shape =
make_shared<op::Constant>(element::i64, Shape{4}, vector<int64_t>{1, 2, 3, 2});
auto pads_begin =
make_shared<op::Constant>(element::i64, Shape{4}, vector<int64_t>{0, 0, 1, 0});
auto pads_end = make_shared<op::Constant>(element::i64, Shape{4}, vector<int64_t>{0, 0, 0, 1});
auto batch_to_space =
make_shared<op::v1::BatchToSpace>(data, block_shape, pads_begin, pads_end);
auto function = make_shared<Function>(NodeVector{batch_to_space}, ParameterVector{data});
auto test_case = test::NgraphTestCase(function, "${BACKEND_NAME}");
test_case.add_input<float>({
0.f, 0.f, 0.f, 0.f, 0.f, 2.f, 1.f, 0.f, 3.f, 5.f, 4.f, 0.f,
0.f, 0.f, 0.f, 0.f, 6.f, 8.f, 7.f, 0.f, 9.f, 11.f, 10.f, 0.f,
});
test_case.add_expected_output<float>(
Shape{1, 2, 2, 3}, {0.f, 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f, 10.f, 11.f});
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, space_to_depth_block_first) NGRAPH_TEST(${BACKEND_NAME}, space_to_depth_block_first)
{ {
auto A = make_shared<op::Parameter>(element::f32, Shape{1, 2, 4, 4}); auto A = make_shared<op::Parameter>(element::f32, Shape{1, 2, 4, 4});
......
//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "util/type_prop.hpp"
using namespace std;
using namespace ngraph;
TEST(type_prop, batch_to_space_output_shape_2D)
{
auto data = make_shared<op::Parameter>(element::f32, Shape{10, 26});
auto block_shape = make_shared<op::Constant>(element::i64, Shape{2}, vector<int64_t>{1, 5});
auto pads_begin = make_shared<op::Constant>(element::i64, Shape{2}, vector<int64_t>{0, 2});
auto pads_end = make_shared<op::Constant>(element::i64, Shape{2}, vector<int64_t>{0, 0});
auto batch_to_space =
make_shared<op::v1::BatchToSpace>(data, block_shape, pads_begin, pads_end);
ASSERT_EQ(batch_to_space->get_element_type(), element::f32);
ASSERT_EQ(batch_to_space->get_shape(), (Shape{10 / 5, 26 * 5 - 2}));
}
TEST(type_prop, batch_to_space_output_shape_4D)
{
auto data = make_shared<op::Parameter>(element::f32, Shape{100, 7, 13, 3});
auto block_shape =
make_shared<op::Constant>(element::i64, Shape{4}, vector<int64_t>{1, 10, 5, 1});
auto pads_begin =
make_shared<op::Constant>(element::i64, Shape{4}, vector<int64_t>{0, 3, 1, 0});
auto pads_end = make_shared<op::Constant>(element::i64, Shape{4}, vector<int64_t>{0, 3, 0, 0});
auto batch_to_space =
make_shared<op::v1::BatchToSpace>(data, block_shape, pads_begin, pads_end);
ASSERT_EQ(batch_to_space->get_element_type(), element::f32);
ASSERT_EQ(batch_to_space->get_shape(), (Shape{100 / (10 * 5), 7 * 10 - 3 - 3, 13 * 5 - 1, 3}));
}
TEST(type_prop, batch_to_space_output_shape_5D)
{
auto data = make_shared<op::Parameter>(element::f32, Shape{960, 6, 13, 128, 16});
auto block_shape =
make_shared<op::Constant>(element::i32, Shape{5}, vector<int64_t>{1, 6, 5, 1, 16});
auto pads_begin =
make_shared<op::Constant>(element::i32, Shape{5}, vector<int64_t>{0, 2, 0, 0, 0});
auto pads_end =
make_shared<op::Constant>(element::i32, Shape{5}, vector<int64_t>{0, 2, 1, 0, 0});
auto batch_to_space =
make_shared<op::v1::BatchToSpace>(data, block_shape, pads_begin, pads_end);
ASSERT_EQ(batch_to_space->get_element_type(), element::f32);
ASSERT_EQ(batch_to_space->get_shape(),
(Shape{960 / (6 * 5 * 16), 6 * 6 - 2 - 2, 13 * 5 - 1, 128, 16 * 16}));
}
TEST(type_prop, batch_to_space_and_space_to_batch)
{
auto data = make_shared<op::Parameter>(element::f32, Shape{4800, 9, 11, 2});
auto block_shape =
make_shared<op::Constant>(element::i64, Shape{4}, vector<int64_t>{1, 12, 100, 2});
auto pads_begin =
make_shared<op::Constant>(element::i64, Shape{4}, vector<int64_t>{0, 3, 38, 1});
auto pads_end = make_shared<op::Constant>(element::i64, Shape{4}, vector<int64_t>{0, 5, 38, 0});
auto batch_to_space =
make_shared<op::v1::BatchToSpace>(data, block_shape, pads_begin, pads_end);
ASSERT_EQ(batch_to_space->get_element_type(), element::f32);
ASSERT_EQ(batch_to_space->get_shape(),
(Shape{4800 / (12 * 100 * 2), 9 * 12 - 3 - 5, 11 * 100 - 38 - 38, 2 * 2 - 1}));
auto space_to_batch =
make_shared<op::v1::SpaceToBatch>(batch_to_space, block_shape, pads_begin, pads_end);
ASSERT_EQ(space_to_batch->get_element_type(), element::f32);
ASSERT_EQ(space_to_batch->get_shape(), (Shape{4800, 9, 11, 2}));
}
//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "util/type_prop.hpp"
using namespace std;
using namespace ngraph;
TEST(type_prop, space_to_batch_output_shape_2D)
{
auto data = make_shared<op::Parameter>(element::f32, Shape{2, 128});
auto block_shape = make_shared<op::Constant>(element::i64, Shape{2}, vector<int64_t>{1, 5});
auto pads_begin = make_shared<op::Constant>(element::i64, Shape{2}, vector<int64_t>{0, 2});
auto pads_end = make_shared<op::Constant>(element::i64, Shape{2}, vector<int64_t>{0, 0});
auto space_to_batch =
make_shared<op::v1::SpaceToBatch>(data, block_shape, pads_begin, pads_end);
ASSERT_EQ(space_to_batch->get_element_type(), element::f32);
ASSERT_EQ(space_to_batch->get_shape(), (Shape{2 * 5, (128 + 2) / 5}));
}
TEST(type_prop, space_to_batch_output_shape_4D)
{
auto data = make_shared<op::Parameter>(element::f32, Shape{2, 64, 64, 3});
auto block_shape =
make_shared<op::Constant>(element::i64, Shape{4}, vector<int64_t>{1, 10, 5, 1});
auto pads_begin =
make_shared<op::Constant>(element::i64, Shape{4}, vector<int64_t>{0, 3, 1, 0});
auto pads_end = make_shared<op::Constant>(element::i64, Shape{4}, vector<int64_t>{0, 3, 0, 0});
auto space_to_batch =
make_shared<op::v1::SpaceToBatch>(data, block_shape, pads_begin, pads_end);
ASSERT_EQ(space_to_batch->get_element_type(), element::f32);
ASSERT_EQ(space_to_batch->get_shape(), (Shape{2 * 10 * 5, (64 + 3 + 3) / 10, (64 + 1) / 5, 3}));
}
TEST(type_prop, space_to_batch_output_shape_5D)
{
auto data = make_shared<op::Parameter>(element::f32, Shape{2, 32, 64, 128, 256});
auto block_shape =
make_shared<op::Constant>(element::i32, Shape{5}, vector<int64_t>{1, 6, 5, 1, 16});
auto pads_begin =
make_shared<op::Constant>(element::i32, Shape{5}, vector<int64_t>{0, 2, 0, 0, 0});
auto pads_end =
make_shared<op::Constant>(element::i32, Shape{5}, vector<int64_t>{0, 2, 1, 0, 0});
auto space_to_batch =
make_shared<op::v1::SpaceToBatch>(data, block_shape, pads_begin, pads_end);
ASSERT_EQ(space_to_batch->get_element_type(), element::f32);
ASSERT_EQ(space_to_batch->get_shape(),
(Shape{2 * 6 * 5 * 16, (32 + 2 + 2) / 6, (64 + 1) / 5, 128, 256 / 16}));
}
TEST(type_prop, space_to_batch_and_batch_to_space)
{
auto data = make_shared<op::Parameter>(element::f32, Shape{2, 100, 1024, 3});
auto block_shape =
make_shared<op::Constant>(element::i64, Shape{4}, vector<int64_t>{1, 12, 100, 2});
auto pads_begin =
make_shared<op::Constant>(element::i64, Shape{4}, vector<int64_t>{0, 3, 38, 1});
auto pads_end = make_shared<op::Constant>(element::i64, Shape{4}, vector<int64_t>{0, 5, 38, 0});
auto space_to_batch =
make_shared<op::v1::SpaceToBatch>(data, block_shape, pads_begin, pads_end);
ASSERT_EQ(space_to_batch->get_element_type(), element::f32);
ASSERT_EQ(space_to_batch->get_shape(),
(Shape{2 * 12 * 100 * 2, (100 + 3 + 5) / 12, (1024 + 38 + 38) / 100, (3 + 1) / 2}));
auto batch_to_space =
make_shared<op::v1::BatchToSpace>(space_to_batch, block_shape, pads_begin, pads_end);
ASSERT_EQ(batch_to_space->get_element_type(), element::f32);
ASSERT_EQ(batch_to_space->get_shape(), (Shape{2, 100, 1024, 3}));
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment