Commit 62e1bc26 authored by Sergey Shalnov's avatar Sergey Shalnov Committed by Scott Cyphers

IntelGPU backend: Double datatype workaround implemented (#2435)

parent f75e10c3
......@@ -1332,7 +1332,8 @@ shared_ptr<runtime::Executable>
arguments_check(op, 5, 1);
if (get_input_shape(op, 2).size() != 4)
if ((get_input_shape(op, 2).size() != 4) ||
(get_input_type(op) != ngraph::element::f32))
{
do_batch_norm_operation(topology,
get_output_name(op),
......@@ -1364,7 +1365,8 @@ shared_ptr<runtime::Executable>
static_pointer_cast<op::BatchNormTraining>(op);
const double eps = bnorm->get_eps_value();
if (get_input_shape(op, 2).size() != 4)
if ((get_input_shape(op, 2).size() != 4) ||
(get_input_type(op) != ngraph::element::f32))
{
string mean_name;
string variance_name;
......
......@@ -54,32 +54,19 @@ bool runtime::intelgpu::IntelGPULayout::
cldnn::data_types
runtime::intelgpu::IntelGPULayout::get_cldnn_type(const element::Type& element_type)
{
if ((element_type == ngraph::element::i8) || (element_type == ngraph::element::boolean))
switch (element_type.get_type_enum())
{
return cldnn::data_types::i8;
}
else if (element_type == ngraph::element::u8)
{
return cldnn::data_types::u8;
}
else if (element_type == ngraph::element::i32)
{
return cldnn::data_types::i32;
}
else if (element_type == ngraph::element::i64)
{
return cldnn::data_types::i64;
}
else if (element_type == ngraph::element::f32)
{
return cldnn::data_types::f32;
}
else
{
ostringstream os;
os << "IntelGPULayout::get_cldnn_type: Unknown type " << element_type;
throw invalid_argument(os.str());
case element::Type_t::i8:
case element::Type_t::boolean: return cldnn::data_types::i8;
case element::Type_t::u8: return cldnn::data_types::u8;
case element::Type_t::i32: return cldnn::data_types::i32;
case element::Type_t::i64: return cldnn::data_types::i64;
case element::Type_t::f32: return cldnn::data_types::f32;
}
ostringstream os;
os << "IntelGPULayout::get_cldnn_type: Unknown type " << element_type;
throw invalid_argument(os.str());
}
cldnn::tensor runtime::intelgpu::IntelGPULayout::create_cldnn_tensor(const Shape& element_shape)
......@@ -131,13 +118,27 @@ cldnn::layout runtime::intelgpu::IntelGPULayout::create_cldnn_layout(
const cldnn::tensor tensor = create_cldnn_tensor(element_shape);
cldnn::data_types data_type;
if ((element_type == ngraph::element::i16) || (element_type == ngraph::element::u16))
switch (element_type.get_type_enum())
{
case element::Type_t::i16:
case element::Type_t::u16:
{
data_type = cldnn::data_types::f16;
break;
}
else
case element::Type_t::u32:
{
data_type = get_cldnn_type(element_type);
data_type = cldnn::data_types::i32;
break;
}
case element::Type_t::u64:
case element::Type_t::f64:
{
data_type = cldnn::data_types::i64;
break;
}
default: { data_type = get_cldnn_type(element_type);
}
}
return cldnn::layout(data_type, format, tensor);
......
all_2x2x3_eliminate_dims_0_1
argmin_trivial_in_double
avg_pool_2d_2channel_2image_padded_only_above_do_not_include_in_computation
avg_pool_2d_2channel_2image_padded_only_above_include_in_computation
avg_pool_3d_uneven_strided_padded
......@@ -30,10 +29,7 @@ embedding_lookup_10x1_arbitrary
embedding_lookup_10x1_arbitrary_index_type_int
embedding_lookup_4x5_reverse
generate_mask
max_3d_to_scalar_double
max_pool_3d
numeric_double_inf
numeric_double_nan
quantize
quantize_axes
quantize_clamp_int32
......@@ -69,9 +65,6 @@ shape_of_matrix
shape_of_scalar
shape_of_vector
softmax_axis_3d_double
sum_stable_acc_double
sum_stable_simple_double
sum_trivial_in_double
topk_1d_max_all
topk_1d_max_one
topk_1d_max_partial
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment