Commit 9248588c authored by dmyershov's avatar dmyershov Committed by Sang Ik Lee

IntelGPU backend: Select operation fix (clDNN doesn't support int32 data type);…

IntelGPU backend: Select operation fix (clDNN doesn't support int32 data type); Switched to clDNN 14.0 drop (#2702)
parent e67c9c60
......@@ -22,7 +22,7 @@ include(ExternalProject)
#------------------------------------------------------------------------------
set(CLDNN_GIT_REPO_URL https://github.com/intel/clDNN.git)
set(CLDNN_GIT_LABEL e0efde85b3cfdd776b53447500ce0bb2aa8bca54)
set(CLDNN_GIT_LABEL a9197ba89553ad1f55ffda617cfc772b60af5bc2)
set(BOOST_VERSION 1.64.0)
set(OUT_DIR ${EXTERNAL_PROJECTS_ROOT}/cldnn/out)
......
......@@ -99,8 +99,6 @@ using namespace ngraph;
using intelgpu_space = runtime::intelgpu::IntelGPULayout;
#define USE_INTELGPU_CUSTOM_KERNELS 0
// This expands the op list in op_tbl.hpp into a list of enumerations that look like this:
// Abs,
// Acos,
......@@ -521,25 +519,27 @@ shared_ptr<runtime::Executable>
{
arguments_check(op, 3, 1);
// Leave it here for some time
#if USE_INTELGPU_CUSTOM_KERNELS
do_select_operation(topology,
get_input_name(op, 0),
get_input_shape(op, 0),
get_input_name(op, 1),
get_input_shape(op, 1),
get_input_name(op, 2),
get_input_shape(op, 2),
get_output_name(op),
get_output_shape(op),
get_output_type(op));
#else
const cldnn::select cldnn_select(get_output_name(op),
get_input_name(op, 1),
get_input_name(op, 2),
get_input_name(op));
topology.add(cldnn_select);
#endif
if (get_output_type(op) != element::f32)
{
do_select_operation(topology,
get_input_name(op, 0),
get_input_shape(op, 0),
get_input_name(op, 1),
get_input_shape(op, 1),
get_input_name(op, 2),
get_input_shape(op, 2),
get_output_name(op),
get_output_shape(op),
get_output_type(op));
}
else
{
const cldnn::select cldnn_select(get_output_name(op),
get_input_name(op, 1),
get_input_name(op, 2),
get_input_name(op));
topology.add(cldnn_select);
}
break;
}
case OP_TYPEID::Reverse:
......@@ -1616,8 +1616,8 @@ shared_ptr<runtime::Executable>
if ((pad_below.at(0) != pad_above.at(0)) || (pad_below.at(1) != pad_above.at(1)))
{
// Different input padding for operation workarounded by adding aux layer
const cldnn::tensor border_pad_above(0, 0, pad_below.at(1), pad_below.at(0));
const cldnn::tensor border_pad_below(0, 0, pad_above.at(1), pad_above.at(0));
const cldnn::tensor border_pad_above(0, 0, pad_below.at(1), pad_below.at(0), 0);
const cldnn::tensor border_pad_below(0, 0, pad_above.at(1), pad_above.at(0), 0);
input_offset_x = 0;
input_offset_y = 0;
op_input_name = op_input_name + "_" + get_output_name(op) + "_bordered";
......@@ -1630,7 +1630,7 @@ shared_ptr<runtime::Executable>
topology.add(cldnn_border);
}
const cldnn::tensor input_offset(0, 0, input_offset_x, input_offset_y);
const cldnn::tensor input_offset(0, 0, input_offset_x, input_offset_y, 0);
const cldnn::tensor strides(1, 1, win_stride.at(1), win_stride.at(0));
const cldnn::tensor dilation(1, 1, win_dilation.at(1), win_dilation.at(0));
......@@ -1716,8 +1716,8 @@ shared_ptr<runtime::Executable>
if ((pad_below_x != pad_above_x) && (pad_below_y != pad_above_y))
{
// Different input padding for operation workarounded by adding aux layer
const cldnn::tensor border_pad_above(0, 0, pad_above_x, pad_above_y);
const cldnn::tensor border_pad_below(0, 0, pad_below_x, pad_below_y);
const cldnn::tensor border_pad_above(0, 0, pad_above_x, pad_above_y, 0);
const cldnn::tensor border_pad_below(0, 0, pad_below_x, pad_below_y, 0);
input_offset_x = 0;
input_offset_y = 0;
op_input_name = op_input_name + "_" + get_output_name(op) + "_bordered";
......@@ -1729,7 +1729,7 @@ shared_ptr<runtime::Executable>
topology.add(cldnn_border);
}
const cldnn::tensor input_offset(0, 0, input_offset_x, input_offset_y);
const cldnn::tensor input_offset(0, 0, input_offset_x, input_offset_y, 0);
const cldnn::tensor strides(1, 1, win_dilation.at(1), win_dilation.at(0));
const cldnn::convolution_grad_weights conv_back_flt(get_output_name(op),
......@@ -1797,8 +1797,8 @@ shared_ptr<runtime::Executable>
else
{
// Different input padding for operation workarounded by adding aux layer
const cldnn::tensor crop_pad_below(0, 0, -pad_below.at(1), -pad_below.at(0));
const cldnn::tensor crop_pad_above(0, 0, -pad_above.at(1), -pad_above.at(0));
const cldnn::tensor crop_pad_below(0, 0, -pad_below.at(1), -pad_below.at(0), 0);
const cldnn::tensor crop_pad_above(0, 0, -pad_above.at(1), -pad_above.at(0), 0);
op_input_name = op_input_name + "_" + get_output_name(op) + "_cropped";
const cldnn::crop cldnn_crop(op_input_name,
......@@ -1809,7 +1809,7 @@ shared_ptr<runtime::Executable>
topology.add(cldnn_crop);
}
const cldnn::tensor input_offset(0, 0, input_offset_xy, input_offset_xy);
const cldnn::tensor input_offset(0, 0, input_offset_xy, input_offset_xy, 0);
const cldnn::tensor strides(1, 1, win_stride.at(1), win_stride.at(0));
const cldnn::convolution_grad_input cldnn_conv_back_data(get_output_name(op),
......
......@@ -107,7 +107,7 @@ cldnn::tensor runtime::intelgpu::IntelGPULayout::create_cldnn_offset(const Shape
offset.at(ridx - 1) = -(*i);
}
const cldnn::tensor input_offset(offset.at(0), offset.at(1), offset.at(3), offset.at(2));
const cldnn::tensor input_offset(offset.at(0), offset.at(1), offset.at(3), offset.at(2), 0);
return input_offset;
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment