Commit 5b349479 authored by Sergey Shalnov's avatar Sergey Shalnov Committed by Scott Cyphers

IntelGPU backend: ReverseSequence operation implemenation (#2550)

parent 066037c2
......@@ -86,6 +86,7 @@
#include "ngraph/op/quantize.hpp"
#include "ngraph/op/reshape.hpp"
#include "ngraph/op/reverse.hpp"
#include "ngraph/op/reverse_sequence.hpp"
#include "ngraph/op/slice.hpp"
#include "ngraph/op/softmax.hpp"
#include "ngraph/op/sum.hpp"
......@@ -555,6 +556,7 @@ shared_ptr<runtime::Executable>
do_reverse_operation(topology,
get_input_name(op),
get_input_shape(op),
get_input_type(op),
get_output_name(op),
get_output_shape(op),
get_output_type(op),
......@@ -562,6 +564,29 @@ shared_ptr<runtime::Executable>
}
break;
}
case OP_TYPEID::ReverseSequence:
{
arguments_check(op, 2, 1);
const shared_ptr<op::ReverseSequence> revseq_op =
static_pointer_cast<op::ReverseSequence>(op);
const size_t batch_axis = revseq_op->get_batch_axis();
const size_t seq_axis = revseq_op->get_sequence_axis();
do_reverse_sequence_operation(topology,
get_input_name(op, 0),
get_input_shape(op, 0),
get_input_type(op, 0),
get_input_name(op, 1),
get_input_shape(op, 1),
get_input_type(op, 1),
get_output_name(op),
get_output_shape(op),
get_output_type(op),
seq_axis,
batch_axis);
break;
}
case OP_TYPEID::Convert:
{
arguments_check(op, 1, 1);
......@@ -1922,7 +1947,6 @@ shared_ptr<runtime::Executable>
case OP_TYPEID::QuantizedMaxPool:
case OP_TYPEID::ReplaceSlice:
case OP_TYPEID::GenerateMask:
case OP_TYPEID::ReverseSequence:
case OP_TYPEID::ScalarConstantLike:
case OP_TYPEID::ShapeOf:
case OP_TYPEID::StopGradient:
......
......@@ -1243,6 +1243,7 @@ void runtime::intelgpu::do_eltwise_kernel(cldnn::topology& topology,
void runtime::intelgpu::do_reverse_operation(cldnn::topology& topology,
const string& input_name,
const Shape& input_shape,
const element::Type& input_type,
const string& output_name,
const Shape& output_shape,
const element::Type& output_type,
......@@ -1253,7 +1254,12 @@ void runtime::intelgpu::do_reverse_operation(cldnn::topology& topology,
CodeWriter writer;
vector<size_t> gws;
gen_func_def(writer, entry_point_name, {"float"}, {input_shape}, "float", output_shape);
gen_func_def(writer,
entry_point_name,
{get_opencl_type_name(input_type)},
{input_shape},
get_opencl_type_name(output_type),
output_shape);
writer.block_begin();
{
......@@ -1277,6 +1283,100 @@ void runtime::intelgpu::do_reverse_operation(cldnn::topology& topology,
topology.add(op_reverse);
}
void runtime::intelgpu::do_reverse_sequence_operation(cldnn::topology& topology,
const string& input0_name,
const Shape& input0_shape,
const element::Type& input0_type,
const string& input1_name,
const Shape& input1_shape,
const element::Type& input1_type,
const string& output_name,
const Shape& output_shape,
const element::Type& output_type,
const size_t reversed_axis,
const size_t batch_axis)
{
const cldnn::layout layout = IntelGPULayout::create_cldnn_layout(output_type, output_shape);
const string entry_point_name = "reverse_sequence_" + output_name;
CodeWriter writer;
vector<size_t> gws;
gen_func_def(writer,
entry_point_name,
{get_opencl_type_name(input0_type), get_opencl_type_name(input1_type)},
{input0_shape, input1_shape},
get_opencl_type_name(output_type),
output_shape);
writer.block_begin();
{
writer << "//reversed_axis:" << reversed_axis << "\n";
writer << "//batch_axis:" << batch_axis << "\n\n";
gws = generate_loops(writer, output_shape, true);
writer << get_opencl_type_name(input1_type) << " orig_seq_index = "
<< "input1[i" << batch_axis << "];\n";
writer << "if (orig_seq_index == 0)\n";
writer.block_begin();
{
writer << "orig_seq_index = 1;\n";
}
writer.block_end();
writer << get_opencl_type_name(input1_type) << " sequence_index;\n";
writer << "if (i" << reversed_axis << " < orig_seq_index)\n";
writer.block_begin();
{
writer << "sequence_index = orig_seq_index - i" << reversed_axis << " - 1;\n";
}
writer.block_end();
writer << "else\n";
writer.block_begin();
{
writer << "sequence_index = i" << reversed_axis << ";\n";
}
writer.block_end();
writer << "output" << access_dims(output_shape) << " = input0";
if (output_shape.empty())
{
writer << "[0]";
}
else
{
size_t var_idx = 0;
for (auto const& i : output_shape)
{
if (var_idx == reversed_axis)
{
writer << "[sequence_index]";
}
else
{
writer << "[i" << var_idx << "]";
}
++var_idx;
}
}
writer << ";\n";
generate_loops(writer, output_shape, false);
}
writer.block_end();
const cldnn::custom_gpu_primitive op_reverse_seq(output_name,
{input0_name, input1_name},
{writer.get_code()},
entry_point_name,
get_kernel_args(2, 1),
"",
layout,
gws);
topology.add(op_reverse_seq);
}
void runtime::intelgpu::do_not_operation(cldnn::topology& topology,
const string& input_name,
const Shape& input_shape,
......
......@@ -139,11 +139,25 @@ namespace ngraph
void do_reverse_operation(cldnn::topology& topology,
const std::string& input_name,
const Shape& input_shape,
const element::Type& input_type,
const std::string& output_name,
const Shape& output_shape,
const element::Type& output_type,
const AxisSet& reversed_axes);
void do_reverse_sequence_operation(cldnn::topology& topology,
const std::string& input0_name,
const Shape& input0_shape,
const element::Type& input0_type,
const std::string& input1_name,
const Shape& input1_shape,
const element::Type& input1_type,
const std::string& output_name,
const Shape& output_shape,
const element::Type& output_type,
const size_t reversed_axis,
const size_t batch_axis);
void do_not_operation(cldnn::topology& topology,
const std::string& input_name,
const Shape& input_shape,
......
......@@ -4,8 +4,6 @@ backwards_dot_scalar_tensor
backwards_dot_tensor_scalar
backwards_dot_tensor_vector
backwards_replace_slice
backwards_reverse_sequence_n3_c2_h3
backwards_reverse_sequence_n4d2c3h2w2
backwards_slice
batch_norm_bprop_n4c3h2w2
divide_by_zero_int32
......@@ -20,9 +18,6 @@ replace_slice_matrix
replace_slice_matrix_inplace
replace_slice_scalar
replace_slice_vector
reverse_sequence_n2c3h4w2
reverse_sequence_n4c3h2w2
reverse_sequence_n4d2c3h2w2
shape_of_5d
shape_of_matrix
shape_of_scalar
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment