Commit 0866980a authored by Anna Alberska's avatar Anna Alberska Committed by Robert Kimball

IntelGPU backend: Atan, Ceiling, Floor, Sign, Tan operations (#1643)

parent 39700785
...@@ -759,6 +759,66 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func) ...@@ -759,6 +759,66 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func)
{ {
do_eltwise_operation(topology, op, cldnn::eltwise_mode::pow); do_eltwise_operation(topology, op, cldnn::eltwise_mode::pow);
} }
else if ("Atan" == op->description())
{
arguments_check(op, 1, 1);
do_custom_eltwise_operation(topology,
get_input_name(op),
get_input_shape(op),
get_input_type(op),
get_output_name(op),
get_output_shape(op),
get_output_type(op),
CUSTOM_ELTWISE::Atan);
}
else if ("Ceiling" == op->description())
{
arguments_check(op, 1, 1);
do_custom_eltwise_operation(topology,
get_input_name(op),
get_input_shape(op),
get_input_type(op),
get_output_name(op),
get_output_shape(op),
get_output_type(op),
CUSTOM_ELTWISE::Ceil);
}
else if ("Floor" == op->description())
{
arguments_check(op, 1, 1);
do_custom_eltwise_operation(topology,
get_input_name(op),
get_input_shape(op),
get_input_type(op),
get_output_name(op),
get_output_shape(op),
get_output_type(op),
CUSTOM_ELTWISE::Floor);
}
else if ("Sign" == op->description())
{
arguments_check(op, 1, 1);
do_custom_eltwise_operation(topology,
get_input_name(op),
get_input_shape(op),
get_input_type(op),
get_output_name(op),
get_output_shape(op),
get_output_type(op),
CUSTOM_ELTWISE::Sign);
}
else if ("Tan" == op->description())
{
arguments_check(op, 1, 1);
do_custom_eltwise_operation(topology,
get_input_name(op),
get_input_shape(op),
get_input_type(op),
get_output_name(op),
get_output_shape(op),
get_output_type(op),
CUSTOM_ELTWISE::Tan);
}
else if ("Pad" == op->description()) else if ("Pad" == op->description())
{ {
arguments_check(op, 2, 1); arguments_check(op, 2, 1);
......
...@@ -1371,3 +1371,71 @@ void runtime::intelgpu::do_sigmoid_backprop_operation(cldnn::topology& topology, ...@@ -1371,3 +1371,71 @@ void runtime::intelgpu::do_sigmoid_backprop_operation(cldnn::topology& topology,
gws); gws);
topology.add(op_sigmoid_backprop); topology.add(op_sigmoid_backprop);
} }
void runtime::intelgpu::do_custom_eltwise_operation(cldnn::topology& topology,
const string& input_name,
const Shape& input_shape,
const element::Type& input_type,
const string& output_name,
const Shape& output_shape,
const element::Type& output_type,
const CUSTOM_ELTWISE operation_name)
{
const string entry_point_name = "op_custom_eltwise_" + output_name;
codegen::CodeWriter writer;
vector<size_t> gws;
gen_func_def(writer,
entry_point_name,
{get_opencl_type_name(input_type)},
{input_shape},
get_opencl_type_name(output_type),
output_shape);
writer.block_begin();
{
gws = generate_loops(writer, output_shape, true);
writer << "output" << access_dims(output_shape) << " = ";
switch (operation_name)
{
case CUSTOM_ELTWISE::Atan:
{
writer << "atan";
break;
}
case CUSTOM_ELTWISE::Ceil:
{
writer << "ceil";
break;
}
case CUSTOM_ELTWISE::Floor:
{
writer << "floor";
break;
}
case CUSTOM_ELTWISE::Sign:
{
writer << "sign";
break;
}
case CUSTOM_ELTWISE::Tan:
{
writer << "tan";
break;
}
}
writer << "(input0" << access_dims(input_shape) << ");\n";
generate_loops(writer, output_shape, false);
}
writer.block_end();
const cldnn::layout layout = IntelGPULayout::create_cldnn_layout(output_type, output_shape);
const cldnn::custom_gpu_primitive op_custom_eltwise(output_name,
{input_name},
{writer.get_code()},
entry_point_name,
get_kernel_args(1, 1),
"",
layout,
gws);
topology.add(op_custom_eltwise);
}
...@@ -147,6 +147,24 @@ namespace ngraph ...@@ -147,6 +147,24 @@ namespace ngraph
const Shape& output_shape, const Shape& output_shape,
const element::Type& output_type); const element::Type& output_type);
enum class CUSTOM_ELTWISE
{
Atan,
Ceil,
Floor,
Sign,
Tan
};
void do_custom_eltwise_operation(cldnn::topology& topology,
const std::string& input_name,
const Shape& input_shape,
const element::Type& input_type,
const std::string& output_name,
const Shape& output_shape,
const element::Type& output_type,
const CUSTOM_ELTWISE operation_name);
// Helper functions used in cldnn::custom_gpu_primitive kernels // Helper functions used in cldnn::custom_gpu_primitive kernels
std::string get_opencl_type_name(const element::Type& ngraph_type); std::string get_opencl_type_name(const element::Type& ngraph_type);
std::vector<cldnn_arg> get_kernel_args(size_t input, size_t output); std::vector<cldnn_arg> get_kernel_args(size_t input, size_t output);
......
argmax_trivial argmax_trivial
argmin_trivial argmin_trivial
atan
avg_pool_2d_2channel_2image_padded_only_above avg_pool_2d_2channel_2image_padded_only_above
avg_pool_3d avg_pool_3d
backwards_abs
backwards_acos backwards_acos
backwards_atan
backwards_batch_norm_three_outputs backwards_batch_norm_three_outputs
backwards_ceiling backwards_ceiling
backwards_dot_scalar_tensor backwards_dot_scalar_tensor
...@@ -21,15 +18,12 @@ backwards_reverse_sequence_n3_c2_h3 ...@@ -21,15 +18,12 @@ backwards_reverse_sequence_n3_c2_h3
backwards_reverse_sequence_n4d2c3h2w2 backwards_reverse_sequence_n4d2c3h2w2
backwards_sign backwards_sign
backwards_slice backwards_slice
backwards_tan
backwards_tanh backwards_tanh
batch_norm_one_output batch_norm_one_output
batch_norm_three_outputs batch_norm_three_outputs
ceiling
concat_matrix_int64 concat_matrix_int64
divide_by_zero_int32 divide_by_zero_int32
dot_matrix_vector_int64 dot_matrix_vector_int64
floor
function_call function_call
lrn lrn
max_pool_3d max_pool_3d
...@@ -63,8 +57,6 @@ reverse_sequence_n4d2c3h2w2 ...@@ -63,8 +57,6 @@ reverse_sequence_n4d2c3h2w2
select_and_scatter_3d_without_overlap select_and_scatter_3d_without_overlap
select_and_scatter_with_overlap select_and_scatter_with_overlap
select_and_scatter_without_overlap select_and_scatter_without_overlap
sign
tan
topk_1d_max_all topk_1d_max_all
topk_1d_max_one topk_1d_max_one
topk_1d_max_partial topk_1d_max_partial
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment