Commit 55d33755 authored by Tomasz Socha's avatar Tomasz Socha Committed by Scott Cyphers

[SPEC] Adjust Tile op to the new specification (#3800)

* Rename input arg -> data

* Update node validation and shape propagation to support data rank and number of repeats to be different.

* Add type_prop tests

* Add UT for new tile cases

* style

* Modify CPU backend to support the new case.

* Fix backend selection in test

* Add new line back.
parent f143bb13
......@@ -23,8 +23,8 @@ using namespace ngraph;
constexpr NodeTypeInfo op::Tile::type_info;
op::Tile::Tile(const Output<Node>& arg, const Output<Node>& repeats)
: Op({arg, repeats})
op::Tile::Tile(const Output<Node>& data, const Output<Node>& repeats)
: Op({data, repeats})
{
constructor_validate_and_infer_types();
}
......@@ -41,36 +41,31 @@ void op::Tile::validate_and_infer_types()
repeats_et);
auto arg_shape = get_input_partial_shape(0);
auto arg_rank = arg_shape.rank();
auto repeats_shape = get_input_partial_shape(1);
auto repeats_rank = repeats_shape.rank();
auto output_rank = Rank::dynamic();
NODE_VALIDATION_CHECK(this, repeats_rank.compatible(1), "Shape of repeats must be of rank 1");
if (arg_rank.is_static())
{
// Repeats shapes should be of form {arg_rank} or dynamic
NODE_VALIDATION_CHECK(this,
repeats_shape.compatible(PartialShape{arg_rank}),
"Arg and repeats ranks mismatch");
output_rank = arg_rank;
}
auto out_shape = PartialShape::dynamic(output_rank);
auto out_shape = PartialShape::dynamic();
if (auto const_repeats = as_type_ptr<op::Constant>(input_value(1).get_node_shared_ptr()))
{
if (arg_shape.is_static())
{
auto shape = arg_shape.to_shape();
auto data_shape = arg_shape.to_shape();
auto data_rank = data_shape.size();
auto repeats_val = const_repeats->get_vector<int64_t>();
auto repeats_rank = repeats_val.size();
auto output_rank = std::max(data_rank, repeats_rank);
// expand data shape and repeats to output rank
data_shape.insert(data_shape.begin(), output_rank - data_rank, 1);
repeats_val.insert(repeats_val.begin(), output_rank - repeats_rank, 1);
Shape output_shape(shape.size());
for (size_t i = 0; i < shape.size(); i++)
Shape output_shape(output_rank);
for (size_t i = 0; i < output_rank; i++)
{
output_shape[i] = shape[i] * repeats_val[i];
output_shape[i] = data_shape[i] * repeats_val[i];
}
set_output_type(0, arg_et, output_shape);
}
......@@ -84,6 +79,7 @@ void op::Tile::validate_and_infer_types()
set_output_type(0, arg_et, out_shape);
}
set_input_is_relevant_to_shape(0);
set_input_is_relevant_to_shape(1);
}
......
......@@ -33,9 +33,9 @@ namespace ngraph
Tile() = default;
/// \brief Perform dynamic padding of a tensor
///
/// \param arg The node producing input tensor to be padded.
/// \param data The node producing input tensor to be padded.
/// \param repeats The node producing the per-dimension replication factor
Tile(const Output<Node>& arg, const Output<Node>& repeats);
Tile(const Output<Node>& data, const Output<Node>& repeats);
void validate_and_infer_types() override;
......
......@@ -59,9 +59,11 @@ namespace ngraph
}
else
{
auto out_rank = out_shape.size();
arg_shape.insert(arg_shape.begin(), out_rank - arg_rank, 1);
std::function<decltype(runtime::cpu::kernel::tile<float, 2>)> kernel;
SELECT_KERNEL_ET_RANK(
kernel, out[0].get_element_type(), arg_rank, runtime::cpu::kernel::tile);
kernel, out[0].get_element_type(), out_rank, runtime::cpu::kernel::tile);
auto functor =
[&, kernel, arg_shape, out_shape, arg_buffer_index, out_buffer_index](
CPURuntimeContext* ctx, CPUExecutionContext* ectx) {
......
......@@ -4513,9 +4513,11 @@ namespace ngraph
}
else
{
auto out_rank = out_shape.size();
arg_shape.insert(arg_shape.begin(), out_rank - arg_rank, 1);
writer.block_begin();
writer << "cpu::kernel::tile<" << et.c_type_string() << ", "
<< std::to_string(arg_rank) << ">(" << args[0].get_name() << ", "
<< std::to_string(out_rank) << ">(" << args[0].get_name() << ", "
<< out[0].get_name() << ", {" << join(arg_shape) << "}, {"
<< join(out_shape) << "}, 0);\n";
......
......@@ -18,3 +18,7 @@ convert_bf16_float32
# ONNX TopK with dynamic K
top_k_opset_10
top_k_opset_11_const_k_smallest
# Tile op case that the number of elements in "repeats" and shape of "data" are different
tile_3d_small_data_rank
tile_3d_few_repeats
......@@ -309,5 +309,9 @@ convert_bf16_float32
normalize_across_c_2x2_shape
normalize_across_c_2x4_shape
# Tile op case that the number of elements in "repeats" and shape of "data" are different
tile_3d_small_data_rank
tile_3d_few_repeats
# dyn shape
dyn_generate_mask
......@@ -342,6 +342,7 @@ set(MULTI_TEST_SRC
backend/tan.in.cpp
backend/tanh.in.cpp
backend/tensorview_custom_mem.in.cpp
backend/tile.in.cpp
backend/topk.in.cpp
backend/transpose.in.cpp
backend/unhandled_op.in.cpp
......
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include <algorithm>
#include <cinttypes>
#include <cmath>
#include <cstdlib>
#include <random>
#include <string>
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "util/all_close.hpp"
#include "util/all_close_f.hpp"
#include "util/ndarray.hpp"
#include "util/test_control.hpp"
#include "util/test_tools.hpp"
using namespace std;
using namespace ngraph;
static string s_manifest = "${MANIFEST}";
NGRAPH_TEST(${BACKEND_NAME}, tile_3d_small_data_rank)
{
Shape shape_a{3};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_re{3};
auto repeats = make_shared<op::Constant>(element::i64, shape_re, vector<int>{2, 2, 1});
Shape shape_r{2, 2, 3};
auto tile = make_shared<op::Tile>(A, repeats);
auto f = make_shared<Function>(tile, ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{1, 2, 3});
auto result = backend->create_tensor(element::f32, shape_r);
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_TRUE(test::all_close_f(vector<float>{1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3},
read_vector<float>(result),
MIN_FLOAT_TOLERANCE_BITS));
}
NGRAPH_TEST(${BACKEND_NAME}, tile_3d_few_repeats)
{
Shape shape_a{2, 1, 3};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_re{2};
auto repeats = make_shared<op::Constant>(element::i64, shape_re, vector<int>{2, 1});
Shape shape_r{2, 2, 3};
auto tile = make_shared<op::Tile>(A, repeats);
auto f = make_shared<Function>(tile, ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{1, 2, 3, 4, 5, 6});
auto result = backend->create_tensor(element::f32, shape_r);
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_TRUE(test::all_close_f(vector<float>{1, 2, 3, 1, 2, 3, 4, 5, 6, 4, 5, 6},
read_vector<float>(result),
MIN_FLOAT_TOLERANCE_BITS));
}
......@@ -29,3 +29,21 @@ TEST(type_prop, tile)
ASSERT_EQ(top->get_element_type(), element::f32);
ASSERT_EQ(top->get_shape(), (Shape{18, 32, 10}));
}
TEST(type_prop, tile_small_data_rank)
{
auto param0 = make_shared<op::Parameter>(element::f32, Shape{8, 10});
auto param1 = op::Constant::create(element::i64, Shape{3}, {3, 4, 1});
auto top = make_shared<op::Tile>(param0, param1);
ASSERT_EQ(top->get_element_type(), element::f32);
ASSERT_EQ(top->get_shape(), (Shape{3, 32, 10}));
}
TEST(type_prop, tile_few_repeats)
{
auto param0 = make_shared<op::Parameter>(element::f32, Shape{6, 8, 10});
auto param1 = op::Constant::create(element::i64, Shape{2}, {4, 1});
auto top = make_shared<op::Tile>(param0, param1);
ASSERT_EQ(top->get_element_type(), element::f32);
ASSERT_EQ(top->get_shape(), (Shape{6, 32, 10}));
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment