Commit 3d57b025 authored by Mateusz Bencer's avatar Mateusz Bencer Committed by Michał Karzyński

[Spec] Adjust fused op DepthToSpace to match specification (#3820)

parent 6e5b4cd0
...@@ -1065,7 +1065,7 @@ def convert(node, new_type, name=None): # type: (Node, NumericType, str) -> Nod ...@@ -1065,7 +1065,7 @@ def convert(node, new_type, name=None): # type: (Node, NumericType, str) -> Nod
@nameable_op @nameable_op
def depth_to_space(node, block_size, name=None): # type: (Node, int, str) -> Node def depth_to_space(node, mode, block_size, name=None): # type: (Node, str, int, str) -> Node
"""Rearranges input tensor from depth into blocks of spatial data. """Rearranges input tensor from depth into blocks of spatial data.
Values from the height and width dimensions are moved to the depth dimension. Values from the height and width dimensions are moved to the depth dimension.
...@@ -1078,12 +1078,17 @@ def depth_to_space(node, block_size, name=None): # type: (Node, int, str) -> No ...@@ -1078,12 +1078,17 @@ def depth_to_space(node, block_size, name=None): # type: (Node, int, str) -> No
[N, C * :code:`block_size` * :code:`block_size`, H / :code:`block_size`, W / :code:`block_size`] [N, C * :code:`block_size` * :code:`block_size`, H / :code:`block_size`, W / :code:`block_size`]
:param node: The node with input tensor data. :param node: The node with input tensor data.
:param mode: Specifies how the input depth dimension is split to block coordinates
blocks_first: The input is divided to [block_size, ..., block_size, new_depth]
depth_first: The input is divided to [new_depth, block_size, ..., block_size]
:param block_size: The size of the spatial block of values describing :param block_size: The size of the spatial block of values describing
how the tensor's data is to be rearranged. how the tensor's data is to be rearranged.
:param name: Optional output node name. :param name: Optional output node name.
:return: The new node performing an DepthToSpace operation on its input tensor. :return: The new node performing an DepthToSpace operation on its input tensor.
""" """
return DepthToSpace(node, block_size) return DepthToSpace(node, mode, block_size)
def gelu(node, name=None): # type: (NodeInput, str) -> Node def gelu(node, name=None): # type: (NodeInput, str) -> Node
......
...@@ -27,5 +27,6 @@ void regclass_pyngraph_op_DepthToSpace(py::module m) ...@@ -27,5 +27,6 @@ void regclass_pyngraph_op_DepthToSpace(py::module m)
py::class_<ngraph::op::DepthToSpace, std::shared_ptr<ngraph::op::DepthToSpace>, ngraph::op::Op> py::class_<ngraph::op::DepthToSpace, std::shared_ptr<ngraph::op::DepthToSpace>, ngraph::op::Op>
depthtospace(m, "DepthToSpace"); depthtospace(m, "DepthToSpace");
depthtospace.doc() = "ngraph.impl.op.DepthToSpace wraps ngraph::op::DepthToSpace"; depthtospace.doc() = "ngraph.impl.op.DepthToSpace wraps ngraph::op::DepthToSpace";
depthtospace.def(py::init<const std::shared_ptr<ngraph::Node>&, std::size_t&>()); depthtospace.def(
py::init<const std::shared_ptr<ngraph::Node>&, const std::string&, std::size_t&>());
} }
...@@ -107,12 +107,13 @@ def test_depth_to_space(): ...@@ -107,12 +107,13 @@ def test_depth_to_space():
[15, 16, 17]], [15, 16, 17]],
[[18, 19, 20], [[18, 19, 20],
[21, 22, 23]]]], dtype=np.float32) [21, 22, 23]]]], dtype=np.float32)
mode = 'blocks_first'
block_size = np.float32(2) block_size = np.float32(2)
data_shape = [1, 4, 2, 3] data_shape = [1, 4, 2, 3]
parameter_data = ng.parameter(data_shape, name='Data', dtype=np.float32) parameter_data = ng.parameter(data_shape, name='Data', dtype=np.float32)
model = ng.depth_to_space(parameter_data, block_size) model = ng.depth_to_space(parameter_data, mode, block_size)
computation = runtime.computation(model, parameter_data) computation = runtime.computation(model, parameter_data)
result = computation(data_value) result = computation(data_value)
......
...@@ -28,8 +28,13 @@ namespace ngraph ...@@ -28,8 +28,13 @@ namespace ngraph
NodeVector depth_to_space(const Node& node) NodeVector depth_to_space(const Node& node)
{ {
auto data = node.get_ng_inputs().at(0); auto data = node.get_ng_inputs().at(0);
std::size_t block_size = node.get_attribute_value<std::int64_t>("blocksize"); const auto mode = node.get_attribute_value<std::string>("mode", "DCR");
return NodeVector{std::make_shared<ngraph::op::DepthToSpace>(data, block_size)}; const auto ngraph_mode =
(mode == "DCR") ? ngraph::op::DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST
: ngraph::op::DepthToSpace::DepthToSpaceMode::DEPTH_FIRST;
const auto block_size = node.get_attribute_value<std::int64_t>("blocksize");
return NodeVector{
std::make_shared<ngraph::op::DepthToSpace>(data, ngraph_mode, block_size)};
} }
} // namespace set_1 } // namespace set_1
......
...@@ -26,13 +26,23 @@ using namespace ngraph; ...@@ -26,13 +26,23 @@ using namespace ngraph;
constexpr NodeTypeInfo op::DepthToSpace::type_info; constexpr NodeTypeInfo op::DepthToSpace::type_info;
op::DepthToSpace::DepthToSpace(const Output<Node>& data, const size_t block_size) op::DepthToSpace::DepthToSpace(const Output<Node>& data,
const DepthToSpaceMode& mode,
const size_t block_size)
: FusedOp({data}) : FusedOp({data})
, m_blocksize(block_size) , m_blocksize(block_size)
, m_mode(mode)
{ {
constructor_validate_and_infer_types(); constructor_validate_and_infer_types();
} }
op::DepthToSpace::DepthToSpace(const Output<Node>& data,
const std::string& mode,
const size_t block_size)
: DepthToSpace(data, mode_from_string(mode), block_size)
{
}
NodeVector op::DepthToSpace::decompose_op() const NodeVector op::DepthToSpace::decompose_op() const
{ {
auto data = input_value(0); auto data = input_value(0);
...@@ -72,8 +82,22 @@ NodeVector op::DepthToSpace::decompose_op() const ...@@ -72,8 +82,22 @@ NodeVector op::DepthToSpace::decompose_op() const
// First we have to disperse the data from depth channel, then rearrange them // First we have to disperse the data from depth channel, then rearrange them
// so as appropriate chunks of data where close to their destination place. // so as appropriate chunks of data where close to their destination place.
// Finally squeeze data from respective dimensions. // Finally squeeze data from respective dimensions.
shared_ptr<Node> flat_node = builder::reshape(data, Shape{n, bs, bs, c_flat, h, w}); shared_ptr<Node> flat_node;
flat_node = builder::reorder_axes(flat_node, {0, 3, 4, 1, 5, 2}); switch (m_mode)
{
case DepthToSpaceMode::DEPTH_FIRST:
{
flat_node = builder::reshape(data, Shape{n, c_flat, bs, bs, h, w});
flat_node = builder::reorder_axes(flat_node, {0, 1, 4, 2, 5, 3});
break;
}
case DepthToSpaceMode::BLOCKS_FIRST:
default:
{
flat_node = builder::reshape(data, Shape{n, bs, bs, c_flat, h, w});
flat_node = builder::reorder_axes(flat_node, {0, 3, 4, 1, 5, 2});
}
}
return NodeVector{builder::reshape(flat_node, Shape{n, c_flat, h * bs, w * bs})}; return NodeVector{builder::reshape(flat_node, Shape{n, c_flat, h * bs, w * bs})};
} }
...@@ -83,5 +107,17 @@ shared_ptr<Node> op::DepthToSpace::copy_with_new_args(const NodeVector& new_args ...@@ -83,5 +107,17 @@ shared_ptr<Node> op::DepthToSpace::copy_with_new_args(const NodeVector& new_args
{ {
throw ngraph_error("Incorrect number of new arguments"); throw ngraph_error("Incorrect number of new arguments");
} }
return make_shared<DepthToSpace>(new_args.at(0), m_blocksize); return make_shared<DepthToSpace>(new_args.at(0), m_mode, m_blocksize);
}
op::DepthToSpace::DepthToSpaceMode op::DepthToSpace::mode_from_string(const std::string& mode) const
{
static const std::map<std::string, DepthToSpaceMode> allowed_values = {
{"blocks_first", DepthToSpaceMode::BLOCKS_FIRST},
{"depth_first", DepthToSpaceMode::DEPTH_FIRST}};
NODE_VALIDATION_CHECK(
this, allowed_values.count(mode) > 0, "Invalid 'depth_to_space_mode' value passed in.");
return allowed_values.at(mode);
} }
...@@ -35,17 +35,33 @@ namespace ngraph ...@@ -35,17 +35,33 @@ namespace ngraph
class DepthToSpace : public ngraph::op::util::FusedOp class DepthToSpace : public ngraph::op::util::FusedOp
{ {
public: public:
enum class DepthToSpaceMode
{
// The input depth is divided to [block_size, ..., block_size, new_depth]
BLOCKS_FIRST,
// The input depth is divided to [new_depth, block_size, ..., block_size]
DEPTH_FIRST
};
NGRAPH_API NGRAPH_API
static constexpr NodeTypeInfo type_info{"DepthToSpace", 0}; static constexpr NodeTypeInfo type_info{"DepthToSpace", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; } const NodeTypeInfo& get_type_info() const override { return type_info; }
DepthToSpace() = default; DepthToSpace() = default;
/// \brief Constructs a DepthToSpace operation. /// \brief Constructs a DepthToSpace operation.
/// ///
/// \param data - Node producing the input tensor /// \param data Node producing the input tensor
/// \param block_size - the size of the block of values to be moved /// \param mode Specifies how the input depth dimension is split to block coordinates
DepthToSpace(const Output<Node>& data, std::size_t block_size); /// \param block_size The size of the block of values to be moved
DepthToSpace(const Output<Node>& data,
const DepthToSpaceMode& mode,
std::size_t block_size = 1);
DepthToSpace(const Output<Node>& data,
const std::string& mode,
std::size_t block_size = 1);
std::size_t get_block_size() const { return m_blocksize; } std::size_t get_block_size() const { return m_blocksize; }
DepthToSpaceMode get_mode() const { return m_mode; }
virtual NodeVector decompose_op() const override; virtual NodeVector decompose_op() const override;
virtual std::shared_ptr<Node> virtual std::shared_ptr<Node>
...@@ -53,6 +69,8 @@ namespace ngraph ...@@ -53,6 +69,8 @@ namespace ngraph
protected: protected:
std::size_t m_blocksize; std::size_t m_blocksize;
DepthToSpaceMode m_mode;
DepthToSpaceMode mode_from_string(const std::string& mode) const;
}; };
} }
} }
...@@ -1213,8 +1213,9 @@ shared_ptr<Node> JSONDeserializer::deserialize_node(json node_js) ...@@ -1213,8 +1213,9 @@ shared_ptr<Node> JSONDeserializer::deserialize_node(json node_js)
} }
case OP_TYPEID::DepthToSpace: case OP_TYPEID::DepthToSpace:
{ {
auto mode = node_js.at("mode").get<op::DepthToSpace::DepthToSpaceMode>();
auto block_size = node_js.at("block_size").get<size_t>(); auto block_size = node_js.at("block_size").get<size_t>();
node = make_shared<op::DepthToSpace>(args[0], block_size); node = make_shared<op::DepthToSpace>(args[0], mode, block_size);
break; break;
} }
case OP_TYPEID::Dequantize: case OP_TYPEID::Dequantize:
...@@ -2883,6 +2884,7 @@ json JSONSerializer::serialize_node(const Node& n) ...@@ -2883,6 +2884,7 @@ json JSONSerializer::serialize_node(const Node& n)
{ {
auto tmp = static_cast<const op::DepthToSpace*>(&n); auto tmp = static_cast<const op::DepthToSpace*>(&n);
node["type"] = write_element_type(tmp->get_element_type()); node["type"] = write_element_type(tmp->get_element_type());
node["mode"] = tmp->get_mode();
node["block_size"] = tmp->get_block_size(); node["block_size"] = tmp->get_block_size();
break; break;
} }
......
...@@ -587,10 +587,11 @@ NGRAPH_TEST(${BACKEND_NAME}, space_to_depth) ...@@ -587,10 +587,11 @@ NGRAPH_TEST(${BACKEND_NAME}, space_to_depth)
test_case.run(); test_case.run();
} }
NGRAPH_TEST(${BACKEND_NAME}, depth_to_space) NGRAPH_TEST(${BACKEND_NAME}, depth_to_space_block_first)
{ {
auto A = make_shared<op::Parameter>(element::f32, Shape{1, 8, 2, 2}); auto A = make_shared<op::Parameter>(element::f32, Shape{1, 8, 2, 2});
auto depth_to_space = make_shared<op::DepthToSpace>(A, 2); auto depth_to_space =
make_shared<op::DepthToSpace>(A, op::DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST, 2);
auto function = make_shared<Function>(NodeVector{depth_to_space}, ParameterVector{A}); auto function = make_shared<Function>(NodeVector{depth_to_space}, ParameterVector{A});
auto test_case = test::NgraphTestCase(function, "${BACKEND_NAME}"); auto test_case = test::NgraphTestCase(function, "${BACKEND_NAME}");
...@@ -605,6 +606,25 @@ NGRAPH_TEST(${BACKEND_NAME}, depth_to_space) ...@@ -605,6 +606,25 @@ NGRAPH_TEST(${BACKEND_NAME}, depth_to_space)
test_case.run(); test_case.run();
} }
NGRAPH_TEST(${BACKEND_NAME}, depth_to_space_depth_first)
{
auto A = make_shared<op::Parameter>(element::f32, Shape{1, 8, 2, 2});
auto depth_to_space =
make_shared<op::DepthToSpace>(A, op::DepthToSpace::DepthToSpaceMode::DEPTH_FIRST, 2);
auto function = make_shared<Function>(NodeVector{depth_to_space}, ParameterVector{A});
auto test_case = test::NgraphTestCase(function, "${BACKEND_NAME}");
test_case.add_input<float>({
0.f, 2.f, 8.f, 10.f, 16.f, 18.f, 24.f, 26.f, 1.f, 3.f, 9.f, 11.f, 17.f, 19.f, 25.f, 27.f,
4.f, 6.f, 12.f, 14.f, 20.f, 22.f, 28.f, 30.f, 5.f, 7.f, 13.f, 15.f, 21.f, 23.f, 29.f, 31.f,
});
test_case.add_expected_output<float>(
Shape{1, 2, 4, 4}, {0.f, 16.f, 2.f, 18.f, 1.f, 17.f, 3.f, 19.f, 8.f, 24.f, 10.f,
26.f, 9.f, 25.f, 11.f, 27.f, 4.f, 20.f, 6.f, 22.f, 5.f, 21.f,
7.f, 23.f, 12.f, 28.f, 14.f, 30.f, 13.f, 29.f, 15.f, 31.f});
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, normalize_across_chw_4d) NGRAPH_TEST(${BACKEND_NAME}, normalize_across_chw_4d)
{ {
Shape data_shape{1, 2, 3, 4}; Shape data_shape{1, 2, 3, 4};
......
...@@ -10,6 +10,11 @@ graph { ...@@ -10,6 +10,11 @@ graph {
i: 2 i: 2
type: INT type: INT
} }
attribute {
name: "mode"
s: "blocks_first"
type: STRING
}
} }
name: "compute_graph" name: "compute_graph"
input { input {
......
...@@ -10,6 +10,11 @@ graph { ...@@ -10,6 +10,11 @@ graph {
i: 3 i: 3
type: INT type: INT
} }
attribute {
name: "mode"
s: "blocks_first"
type: STRING
}
} }
name: "compute_graph" name: "compute_graph"
input { input {
......
...@@ -10,6 +10,11 @@ graph { ...@@ -10,6 +10,11 @@ graph {
i: 2 i: 2
type: INT type: INT
} }
attribute {
name: "mode"
s: "blocks_first"
type: STRING
}
} }
name: "compute_graph" name: "compute_graph"
input { input {
......
...@@ -5,6 +5,11 @@ graph { ...@@ -5,6 +5,11 @@ graph {
input: "A" input: "A"
output: "B" output: "B"
op_type: "DepthToSpace" op_type: "DepthToSpace"
attribute {
name: "mode"
s: "blocks_first"
type: STRING
}
} }
name: "compute_graph" name: "compute_graph"
input { input {
......
...@@ -519,3 +519,25 @@ TEST(serialize, opset1_binary_convolution) ...@@ -519,3 +519,25 @@ TEST(serialize, opset1_binary_convolution)
EXPECT_EQ(binary_conv_out->get_pad_value(), pad_value); EXPECT_EQ(binary_conv_out->get_pad_value(), pad_value);
EXPECT_EQ(binary_conv_out->get_auto_pad(), auto_pad); EXPECT_EQ(binary_conv_out->get_auto_pad(), auto_pad);
} }
TEST(serialize, depth_to_space)
{
auto arg = make_shared<op::Parameter>(element::f32, Shape{4, 5, 6});
auto mode = op::DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST;
size_t block_size = 2;
auto depth_to_space_in = make_shared<op::DepthToSpace>(arg, mode, block_size);
auto result = make_shared<op::Result>(depth_to_space_in);
auto f = make_shared<Function>(ResultVector{result}, ParameterVector{arg});
string s = serialize(f);
shared_ptr<Function> g = deserialize(s);
auto g_result = g->get_results().at(0);
auto g_depth_to_space = g_result->input(0).get_source_output().get_node_shared_ptr();
auto depth_to_space_out = as_type_ptr<op::DepthToSpace>(g_depth_to_space);
EXPECT_EQ(depth_to_space_out->description(), "DepthToSpace");
EXPECT_EQ(depth_to_space_out->get_version(), 0);
EXPECT_EQ(depth_to_space_out->get_block_size(), block_size);
EXPECT_EQ(depth_to_space_out->get_mode(), mode);
}
...@@ -24,8 +24,28 @@ using namespace ngraph; ...@@ -24,8 +24,28 @@ using namespace ngraph;
TEST(type_prop, depth_to_space) TEST(type_prop, depth_to_space)
{ {
auto A = make_shared<op::Parameter>(element::f32, Shape{1, 128, 8, 8}); auto A = make_shared<op::Parameter>(element::f32, Shape{1, 128, 8, 8});
auto space_to_depth = make_shared<op::DepthToSpace>(A, 8); auto space_to_depth =
make_shared<op::DepthToSpace>(A, op::DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST, 8);
ASSERT_EQ(space_to_depth->get_element_type(), element::f32); ASSERT_EQ(space_to_depth->get_element_type(), element::f32);
ASSERT_EQ(space_to_depth->get_shape(), (Shape{1, 2, 64, 64})); ASSERT_EQ(space_to_depth->get_shape(), (Shape{1, 2, 64, 64}));
} }
TEST(type_prop, depth_to_space_input_rank_not_supported)
{
auto A = make_shared<op::Parameter>(element::f32, Shape{1, 8, 8, 8, 4});
try
{
auto space_to_depth =
make_shared<op::DepthToSpace>(A, op::DepthToSpace::DepthToSpaceMode::DEPTH_FIRST, 2);
FAIL() << "Not supported input shape for DepthToSpace exception not thrown";
}
catch (const ngraph_error& error)
{
EXPECT_HAS_SUBSTRING(error.what(), "The provided tensor shape: ");
}
catch (...)
{
FAIL() << "DepthToSpace decomposition failed for unexpected reason";
}
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment