Commit 97aa1d7f authored by Anna Alberska's avatar Anna Alberska Committed by Robert Kimball

IntelGPU backend: Not, Minimum, OneHot operations (#1414)

* IntelGPU backend: Minimum, Not, OneHot operations

* Code format update

* Refactor OneHot op

* PR1414. exclude one test

* Fix errors
parent 35dacf8c
......@@ -53,6 +53,7 @@
#include "ngraph/op/max.hpp"
#include "ngraph/op/max_pool.hpp"
#include "ngraph/op/min.hpp"
#include "ngraph/op/one_hot.hpp"
#include "ngraph/op/pad.hpp"
#include "ngraph/op/parameter_vector.hpp"
#include "ngraph/op/product.hpp"
......@@ -437,6 +438,10 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func)
{
do_eltwise_operation(topology, op, cldnn::eltwise_mode::max);
}
else if ("Minimum" == op->description())
{
do_eltwise_operation(topology, op, cldnn::eltwise_mode::min);
}
else if ("Constant" == op->description())
{
arguments_check(op, 0, 1);
......@@ -692,6 +697,17 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func)
{
do_unary_operation(topology, op, activation_logistic);
}
else if ("Not" == op->description())
{
arguments_check(op, 1, 1);
do_not_operation(topology,
get_input_name(op),
get_input_shape(op),
get_output_name(op),
get_output_shape(op),
get_output_type(op));
}
else if ("Greater" == op->description())
{
do_logical_operation(topology, op, " > ");
......@@ -958,6 +974,22 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func)
axis,
false);
}
else if ("OneHot" == op->description())
{
arguments_check(op, 1, 1);
const shared_ptr<op::OneHot> one_hot_op = static_pointer_cast<op::OneHot>(op);
const size_t one_hot_axis = one_hot_op->get_one_hot_axis();
do_one_hot_operation(topology,
get_input_name(op),
get_input_shape(op),
get_input_type(op),
get_output_name(op),
get_output_shape(op),
get_output_type(op),
one_hot_axis);
}
else
{
throw invalid_argument("IntelGPU: Unsupported operation \"" + op->description() + "\"");
......
......@@ -914,6 +914,124 @@ void runtime::intelgpu::do_reverse_operation(cldnn::topology& topology,
topology.add(op_reverse);
}
void runtime::intelgpu::do_not_operation(cldnn::topology& topology,
const string& input_name,
const Shape& input_shape,
const string& output_name,
const Shape& output_shape,
const element::Type& output_type)
{
const cldnn::layout layout = IntelGPULayout::create_cldnn_layout(output_type, output_shape);
const string entry_point_name = "logic_" + output_name;
codegen::CodeWriter writer;
writer << "__kernel void " << entry_point_name << "(const __global char input"
<< array_dims(input_shape) << ", __global char output" << array_dims(output_shape)
<< ")\n";
writer.block_begin();
{
size_t var_idx = 0;
for (auto const& i : output_shape)
{
writer << "for (uint i" << var_idx << " = 0; i" << var_idx << " < " << i << "; ++i"
<< var_idx << ")\n";
writer.block_begin();
++var_idx;
}
writer << "output" << access_dims(output_shape) << " = !input" << access_dims(input_shape)
<< ";\n";
for (auto const& i : output_shape)
{
writer.block_end();
}
}
writer.block_end();
const cldnn::custom_gpu_primitive op_not(output_name,
{input_name},
{writer.get_code()},
entry_point_name,
get_kernel_args(1, 1),
"",
layout,
{1});
topology.add(op_not);
}
void runtime::intelgpu::do_one_hot_operation(cldnn::topology& topology,
const std::string& input_name,
const Shape& input_shape,
const element::Type& input_type,
const std::string& output_name,
const Shape& output_shape,
const element::Type& output_type,
const size_t one_hot_axis)
{
const cldnn::layout layout = IntelGPULayout::create_cldnn_layout(output_type, output_shape);
const string entry_point_name = "one_hot_" + output_name;
codegen::CodeWriter writer;
writer << "__kernel void " << entry_point_name << "(const __global "
<< input_type.c_type_string() << " input" << array_dims(input_shape) << ", __global "
<< output_type.c_type_string() << " output" << array_dims(output_shape) << ")\n";
writer.block_begin();
{
size_t var_idx = 0;
writer << "for (uint i = 0; i < " << output_shape.at(one_hot_axis) << "; ++i)\n";
writer.block_begin();
{
for (auto const& i : input_shape)
{
writer << "for (uint i" << var_idx << " = 0; i" << var_idx << " < " << i << "; ++i"
<< var_idx << ")\n";
writer.block_begin();
++var_idx;
}
size_t current_input = 0;
string buffer;
const size_t output_shape_size = output_shape.size();
for (uint j = 0; j < output_shape_size; j++)
{
if (j == one_hot_axis)
{
buffer += "[i]";
}
else
{
buffer += "[i" + to_string(current_input) + "]";
++current_input;
}
}
writer << "output" << buffer << " = input" << access_dims(input_shape)
<< " == i ? 1 : 0;\n";
for (auto const& i : input_shape)
{
writer.block_end();
}
}
writer.block_end();
}
writer.block_end();
const cldnn::custom_gpu_primitive op_one_hot(output_name,
{input_name},
{writer.get_code()},
entry_point_name,
get_kernel_args(1, 1),
"",
layout,
{1});
topology.add(op_one_hot);
}
void runtime::intelgpu::do_convert_operation(cldnn::topology& topology,
const string& input_name,
const Shape& input_shape,
......
......@@ -104,6 +104,22 @@ namespace ngraph
const element::Type& output_type,
const AxisSet& reversed_axes);
void do_not_operation(cldnn::topology& topology,
const std::string& input_name,
const Shape& input_shape,
const std::string& output_name,
const Shape& output_shape,
const element::Type& output_type);
void do_one_hot_operation(cldnn::topology& topology,
const std::string& input_name,
const Shape& input_shape,
const element::Type& input_type,
const std::string& output_name,
const Shape& output_shape,
const element::Type& output_type,
const size_t one_hot_axis);
void do_convert_operation(cldnn::topology& topology,
const std::string& input_name,
const Shape& input_shape,
......
......@@ -17,6 +17,7 @@ backwards_dot_scalar_tensor
backwards_dot_tensor3_tensor3
backwards_dot_tensor_scalar
backwards_dot_tensor_vector
backwards_exp
backwards_floor
backwards_maxpool_n2_c1_hw5_3x3_str2_max
backwards_maxpool_n4_c1_hw4_2x2_max
......@@ -45,22 +46,17 @@ floor
function_call
lrn
max_pool_3d
minimum
not
numeric_double_inf
numeric_double_nan
one_hot_matrix_0
one_hot_scalar_0_in_3
one_hot_scalar_1_in_3
one_hot_scalar_2_in_3
one_hot_scalar_fp_nonint_in_3
one_hot_scalar_oob_in_3
one_hot_vector_0
one_hot_vector_1
one_hot_vector_1_barely_oob
one_hot_vector_1_far_oob
one_hot_vector_1_fp
one_hot_vector_1_fp_nonint
reduce_3d_to_vector
reduce_matrix_cols_zero
reduce_matrix_columns
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment