Commit 0e008cc5 authored by shssf's avatar shssf Committed by Robert Kimball

IntelGPU backend: Datatype workaround for NCF model (#1729)

parent 8c82136a
......@@ -128,8 +128,17 @@ cldnn::layout runtime::intelgpu::IntelGPULayout::create_cldnn_layout(
const ngraph::element::Type& element_type, const Shape& element_shape)
{
const cldnn::format::type format = cldnn::format::bfyx;
const cldnn::data_types data_type = get_cldnn_type(element_type);
const cldnn::tensor tensor = create_cldnn_tensor(element_shape);
cldnn::data_types data_type;
if ((element_type == ngraph::element::i16) || (element_type == ngraph::element::u16))
{
data_type = cldnn::data_types::f16;
}
else
{
data_type = get_cldnn_type(element_type);
}
return cldnn::layout(data_type, format, tensor);
}
......
......@@ -36,6 +36,22 @@ string runtime::intelgpu::get_opencl_type_name(const element::Type& ngraph_type)
{
return "int";
}
else if (ngraph_type == ngraph::element::i16)
{
return "short";
}
else if (ngraph_type == ngraph::element::u16)
{
return "ushort";
}
else if (ngraph_type == ngraph::element::i8)
{
return "char";
}
else if (ngraph_type == ngraph::element::u8)
{
return "uchar";
}
else
{
return ngraph_type.c_type_string();
......
......@@ -14,6 +14,9 @@ backwards_slice
backwards_tanh
batch_norm_one_output
batch_norm_three_outputs
dequantize
dequantize_axes
dequantize_int8
divide_by_zero_int32
function_call
max_pool_3d
......@@ -21,8 +24,8 @@ numeric_double_inf
numeric_double_nan
quantize
quantize_axes
quantize_int8
quantize_clamp
quantize_int8
reduce_3d_to_vector
reduce_matrix_cols_zero
reduce_matrix_columns
......@@ -49,8 +52,8 @@ reverse_sequence_n2c3h4w2
reverse_sequence_n4c3h2w2
reverse_sequence_n4d2c3h2w2
select_and_scatter_3d_without_overlap
select_and_scatter_with_overlap
select_and_scatter_without_overlap
select_and_scatter_with_overlap
topk_1d_max_all
topk_1d_max_one
topk_1d_max_partial
......
......@@ -2431,6 +2431,24 @@ NGRAPH_TEST(${BACKEND_NAME}, convert_int32_float32)
EXPECT_EQ((vector<float>{1, 2, 3, 4}), read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, convert_uint16_float32)
{
Shape shape{2, 2};
auto A = make_shared<op::Parameter>(element::u16, shape);
auto f =
make_shared<Function>(make_shared<op::Convert>(A, element::f32), op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::u16, shape);
copy_data(a, vector<uint16_t>{1, 2, 3, 4});
auto result = backend->create_tensor(element::f32, shape);
backend->call_with_validate(f, {result}, {a});
EXPECT_EQ((vector<float>{1, 2, 3, 4}), read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, convert_int32_bool)
{
Shape shape{2, 2};
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment