Commit 86394f10 authored by Ayan Moitra's avatar Ayan Moitra Committed by Scott Cyphers

Unit tests for relevant resnet50 integer ops (#2456)

* Int unit tests that fail with bfloat

* move tests out of single file

* style

* Incorporate Bob's comments

* edits

* Incorporate comments

* style

* edits

* Add failing test to intel gpu manifest

* comments incoprorated
parent 77fb38bd
......@@ -91,3 +91,7 @@ all_2x2x3_eliminate_dims_0_1
all_2x2x3_eliminate_dims_0_2
all_2x2x3_eliminate_dims_1_2
all_2x2x3_eliminate_dims_0_1_2
# GPU backend uses floats to implement these ops for int32
floor_int32
divide_int32
......@@ -50,3 +50,4 @@ topk_3d_min_partial
topk_3d_single_output
topk_5d_max_partial
topk_int64
floor_int32
......@@ -148,6 +148,28 @@ NGRAPH_TEST(${BACKEND_NAME}, divide)
EXPECT_EQ((vector<float>{2, 2, 2, 2}), read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, divide_int32)
{
Shape shape{2, 2};
auto A = make_shared<op::Parameter>(element::i32, shape);
auto B = make_shared<op::Parameter>(element::i32, shape);
auto f = make_shared<Function>(make_shared<op::Divide>(A, B), ParameterVector{A, B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::i32, shape);
copy_data(a, vector<int32_t>{0x40000140, 0x40000001, 8, 16});
auto b = backend->create_tensor(element::i32, shape);
copy_data(b, vector<int32_t>{2, 5, 4, 8});
auto result = backend->create_tensor(element::i32, shape);
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a, b});
EXPECT_EQ((vector<int32_t>{536871072, 214748365, 2, 2}), read_vector<int32_t>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, divide_overload)
{
Shape shape{2, 2};
......@@ -371,7 +393,7 @@ NGRAPH_TEST(${BACKEND_NAME}, minimum_int64)
NGRAPH_TEST(${BACKEND_NAME}, maximum_int32)
{
Shape shape{2, 2, 2};
Shape shape{2, 2};
auto A = make_shared<op::Parameter>(element::i32, shape);
auto B = make_shared<op::Parameter>(element::i32, shape);
auto f = make_shared<Function>(make_shared<op::Maximum>(A, B), ParameterVector{A, B});
......@@ -380,14 +402,14 @@ NGRAPH_TEST(${BACKEND_NAME}, maximum_int32)
// Create some tensors for input/output
auto a = backend->create_tensor(element::i32, shape);
copy_data(a, vector<int32_t>{1, 8, -8, 17, -5, 67635216, 2, 1});
copy_data(a, vector<int32_t>{0x40000140, 0x40000001, -8, 17});
auto b = backend->create_tensor(element::i32, shape);
copy_data(b, vector<int32_t>{1, 2, 4, 8, 0, 18448, 1, 6});
copy_data(b, vector<int32_t>{0x40000170, 0x40000000, 4, 8});
auto result = backend->create_tensor(element::i32, shape);
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a, b});
EXPECT_EQ((vector<int32_t>{1, 8, 4, 17, 0, 67635216, 2, 6}), read_vector<int32_t>(result));
EXPECT_EQ((vector<int32_t>{0x40000170, 0x40000001, 4, 17}), read_vector<int32_t>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, maximum_int64)
......
......@@ -101,6 +101,27 @@ NGRAPH_TEST(${BACKEND_NAME}, greater)
EXPECT_EQ((vector<char>{0, 1, 0, 1, 0, 1, 1, 0}), read_vector<char>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, greater_int64)
{
Shape shape{2, 2, 2};
auto A = make_shared<op::Parameter>(element::i64, shape);
auto B = make_shared<op::Parameter>(element::i64, shape);
auto f = make_shared<Function>(make_shared<op::Greater>(A, B), ParameterVector{A, B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::i64, shape);
copy_data(a, vector<int64_t>{0x4000000000000002, 0x4000000000000006, -8, 17, -5, 5, 2, 1});
auto b = backend->create_tensor(element::i64, shape);
copy_data(b, vector<int64_t>{0x4000000000000001, 0x4000000000000002, 4, 8, 0, 0, 1, 2});
auto result = backend->create_tensor(element::boolean, shape);
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a, b});
EXPECT_EQ((vector<char>{1, 1, 0, 1, 0, 1, 1, 0}), read_vector<char>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, greatereq)
{
Shape shape{2, 2, 2};
......@@ -164,6 +185,27 @@ NGRAPH_TEST(${BACKEND_NAME}, lesseq)
EXPECT_EQ((vector<char>{1, 0, 1, 0, 1, 1, 0, 1}), read_vector<char>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, lesseq_int32)
{
Shape shape{2, 2};
auto A = make_shared<op::Parameter>(element::i32, shape);
auto B = make_shared<op::Parameter>(element::i32, shape);
auto f = make_shared<Function>(make_shared<op::LessEq>(A, B), ParameterVector{A, B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::i32, shape);
copy_data(a, vector<int32_t>{0x40000170, 0x40000005, 0x40000005, -5});
auto b = backend->create_tensor(element::i32, shape);
copy_data(b, vector<int32_t>{0x40000140, 0x40000001, 0x40000005, 0});
auto result = backend->create_tensor(element::boolean, shape);
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a, b});
EXPECT_EQ((vector<char>{0, 0, 1, 1}), read_vector<char>(result)); // NNP result {1, 1, 0, 1}
}
NGRAPH_TEST(${BACKEND_NAME}, lesseq_bool)
{
Shape shape{2, 2, 2};
......
......@@ -403,6 +403,28 @@ NGRAPH_TEST(${BACKEND_NAME}, sum_3d_to_scalar)
read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, sum_3d_to_scalar_int32)
{
Shape shape_a{3, 3, 3};
auto A = make_shared<op::Parameter>(element::i32, shape_a);
Shape shape_rt{};
auto f = make_shared<Function>(make_shared<op::Sum>(A, AxisSet{0, 1, 2}), ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::i32, shape_a);
copy_data(a, vector<int32_t>{0x40000001, 10, 19, 4, 13, 22, 7, 16, 25, 2, 11, 20, 5, 14,
23, 8, 17, 26, 3, 12, 21, 6, 15, 24, 9, 18, 27});
auto result = backend->create_tensor(element::i32, shape_rt);
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_EQ((vector<int32_t>{0x40000001 + 10 + 19 + 4 + 13 + 22 + 7 + 16 + 25 + 2 + 11 + 20 + 5 +
14 + 23 + 8 + 17 + 26 + 3 + 12 + 21 + 6 + 15 + 24 + 9 + 18 + 27}),
read_vector<int32_t>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, sum_3d_eliminate_zero_dim)
{
Shape shape_a{3, 0, 2};
......
......@@ -1374,12 +1374,12 @@ NGRAPH_TEST(${BACKEND_NAME}, convert_int32_float32)
// Create some tensors for input/output
auto a = backend->create_tensor(element::i32, shape);
copy_data(a, vector<int32_t>{1, 2, 3, 4});
copy_data(a, vector<int32_t>{281, 2, 3, 4});
auto result = backend->create_tensor(element::f32, shape);
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_EQ((vector<float>{1, 2, 3, 4}), read_vector<float>(result));
EXPECT_EQ((vector<float>{281, 2, 3, 4}), read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, convert_uint16_float32)
......@@ -1781,7 +1781,7 @@ NGRAPH_TEST(${BACKEND_NAME}, scalar_constant_float32)
NGRAPH_TEST(${BACKEND_NAME}, scalar_constant_int64)
{
auto r = op::Constant::create(element::i64, Shape{}, {2112});
auto r = op::Constant::create(element::i64, Shape{}, {0x4000000000000001});
auto f = make_shared<Function>(r, ParameterVector{});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
......@@ -1791,7 +1791,7 @@ NGRAPH_TEST(${BACKEND_NAME}, scalar_constant_int64)
auto handle = backend->compile(f);
handle->call_with_validate({result}, {});
EXPECT_EQ(vector<int64_t>{2112}, read_vector<int64_t>(result));
EXPECT_EQ(vector<int64_t>{0x4000000000000001}, read_vector<int64_t>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, tensor_constant_float32)
......@@ -1812,18 +1812,16 @@ NGRAPH_TEST(${BACKEND_NAME}, tensor_constant_float32)
NGRAPH_TEST(${BACKEND_NAME}, tensor_constant_int64)
{
Shape shape{2, 2};
auto r = op::Constant::create(element::i64, shape, {2112, 1848, 1776, 1964});
Shape shape{2};
auto r = op::Constant::create(element::i64, shape, {0x4000000000000001, 0x4000000000000002});
auto f = make_shared<Function>(r, ParameterVector{});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto result = backend->create_tensor(element::i64, shape);
auto handle = backend->compile(f);
handle->call_with_validate({result}, {});
EXPECT_EQ((vector<int64_t>{2112, 1848, 1776, 1964}), read_vector<int64_t>(result));
EXPECT_EQ((vector<int64_t>{0x4000000000000001, 0x4000000000000002}),
read_vector<int64_t>(result));
}
// TODO: Kahan sum only works in limited cases with CPU / Interpreter backend
......
......@@ -256,12 +256,13 @@ NGRAPH_TEST(${BACKEND_NAME}, floor_int32)
// Create some tensors for input/output
auto a = backend->create_tensor(element::i32, shape);
copy_data(a, vector<int32_t>{-2, -136314880, 0, 4});
copy_data(a, vector<int32_t>{-2, -136314888, 0x40000010, 0x40000001});
auto result = backend->create_tensor(element::i32, shape);
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_EQ((vector<int32_t>{-2, -136314880, 0, 4}), read_vector<int32_t>(result));
EXPECT_EQ((vector<int32_t>{-2, -136314888, 0x40000010, 0x40000001}),
read_vector<int32_t>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, log)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment