Commit 11421efd authored by dmyershov's avatar dmyershov Committed by Robert Kimball

IntelGPU backend: GNMT training enabling: Added Negative operation functionality…

IntelGPU backend: GNMT training enabling: Added Negative operation functionality for i32 data type (#1845)
parent 0563a3cf
......@@ -770,8 +770,25 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func)
}
case OP_TYPEID::Negative:
{
const cldnn_activation_additional_params param = {-1.f, 0.f};
do_unary_operation(topology, op, activation_linear, param);
if (get_input_type(op) == ngraph::element::i32)
{
// This is workaround to enable GNMT in training mode.
// clDNN doesn't support i32 data type for activation primitive.
// Exception from clDNN: implementation_map for N5cldnn10activationE
// could not find any implementation to match key
do_negative_operation(topology,
get_input_name(op),
get_input_shape(op),
get_input_type(op),
get_output_name(op),
get_output_shape(op),
get_output_type(op));
}
else
{
const cldnn_activation_additional_params param = {-1.f, 0.f};
do_unary_operation(topology, op, activation_linear, param);
}
break;
}
case OP_TYPEID::Relu:
......
......@@ -1598,3 +1598,43 @@ void runtime::intelgpu::do_arg_max_min_operation(cldnn::topology& topology,
gws);
topology.add(op_arg_max_min);
}
void runtime::intelgpu::do_negative_operation(cldnn::topology& topology,
const string& input_name,
const Shape& input_shape,
const element::Type& input_type,
const string& output_name,
const Shape& output_shape,
const element::Type& output_type)
{
const cldnn::layout layout = IntelGPULayout::create_cldnn_layout(output_type, output_shape);
const string entry_point_name = "negative_" + output_name;
const string& input_type_name = get_opencl_type_name(input_type);
const string& output_type_name = get_opencl_type_name(output_type);
codegen::CodeWriter writer;
vector<size_t> gws;
gen_func_def(
writer, entry_point_name, {input_type_name}, {input_shape}, output_type_name, output_shape);
writer.block_begin();
{
gws = generate_loops(writer, output_shape, true);
writer << "output" << access_dims(output_shape) << " = - (input0"
<< access_dims(input_shape) << ");\n";
generate_loops(writer, output_shape, false);
}
writer.block_end();
const cldnn::custom_gpu_primitive op_negative(output_name,
{input_name},
{writer.get_code()},
entry_point_name,
get_kernel_args(1, 1),
"",
layout,
gws);
topology.add(op_negative);
}
......@@ -186,6 +186,14 @@ namespace ngraph
const size_t reduction_axis,
const bool is_max);
void do_negative_operation(cldnn::topology& topology,
const std::string& input_name,
const Shape& input_shape,
const element::Type& input_type,
const std::string& output_name,
const Shape& output_shape,
const element::Type& output_type);
// Helper functions used in cldnn::custom_gpu_primitive kernels
std::string get_opencl_type_name(const element::Type& ngraph_type);
std::vector<cldnn_arg> get_kernel_args(size_t input, size_t output);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment