Commit e2255fbd authored by Robert Kimball's avatar Robert Kimball Committed by Scott Cyphers

Pool tests updated to check all backends (#1245)

* make pool test check backends other than CPU

* more unit test cleanup
parent 7c7c5d62
......@@ -10,7 +10,6 @@ divide_by_zero_int32
#int64 is not supprted by cuDNN
dot_matrix_vector_int64
#no mkldnn on GPU
mkldnn_layouts
#error throw is not the same on GPU, not supported yet
one_hot_scalar_fp_nonint_in_3
one_hot_scalar_oob_in_3
......@@ -34,3 +33,5 @@ backwards_maxpool_n2_c1_hw5_3x3_str2_max
backwards_avgpool_n1_c1_hw2x2
backwards_avgpool_n1_c1_hw4x4
backwards_avgpool_n2_c2_hw4x4
max_pool_3d
avg_pool_3d
......@@ -226,7 +226,6 @@ min_to_scalar
min_trivial
min_trivial_5d
min_vector_zero
mkldnn_layouts
multiple_backends
multiple_result
negative
......
......@@ -4,4 +4,3 @@ batchnorm_fprop_b2c2h2w1
batchnorm_fprop_globalstats_b2c2w2h1
batchnorm_fprop_inference_b2c2h2w1
computation_reuse
mkldnn_layouts
......@@ -4686,7 +4686,7 @@ NGRAPH_TEST(${BACKEND_NAME}, max_pool_3d)
args.push_back(tensor_val);
}
auto int_results = execute(int_f, args, "INTERPRETER");
auto cpu_results = execute(cpu_f, args, "CPU");
auto cpu_results = execute(cpu_f, args, "${BACKEND_NAME}");
for (size_t i = 0; i < cpu_results.size(); i++)
{
EXPECT_TRUE(test::all_close(cpu_results.at(i), int_results.at(i), 1.0e-4f, 1.0e-4f));
......@@ -5836,52 +5836,6 @@ NGRAPH_TEST(${BACKEND_NAME}, convolution_outlining)
EXPECT_EQ(vector<float>{expected_result}, read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, mkldnn_layouts)
{
Shape shape_a{1, 16, 2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_b{32, 16, 1, 1};
auto B = make_shared<op::Parameter>(element::f32, shape_b);
Shape shape_r{1, 32, 2, 2};
auto conv1 = make_shared<op::Convolution>(A,
B,
Strides{1, 1},
Strides{1, 1},
CoordinateDiff{0, 0},
CoordinateDiff{0, 0},
Strides{1, 1});
Shape pool_shape{1, 1};
auto pool1 = make_shared<op::AvgPool>(conv1, pool_shape);
auto pool1_result = make_shared<op::Result>(pool1);
// Request result in default layout
pool1_result->set_needs_default_layout(true);
auto f = make_shared<Function>(ResultVector{pool1_result}, op::ParameterVector{A, B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
vector<float> input(64, 1.0f);
vector<float> weights;
vector<float> rv(128);
for (int i = 0; i < 128; i++)
weights.push_back(0.0f);
for (int i = 0; i < 384; i++)
weights.push_back(1.0f);
auto a = backend->create_tensor(element::f32, shape_a, input.data());
auto b = backend->create_tensor(element::f32, shape_b, weights.data());
auto result = backend->create_tensor(element::f32, shape_r, rv.data());
vector<float> expected_result;
for (int i = 0; i < 32; i++)
expected_result.push_back(0.0f);
for (int i = 0; i < 96; i++)
expected_result.push_back(16.0f);
backend->call(f, {result}, {a, b});
EXPECT_EQ(vector<float>{expected_result}, rv);
}
NGRAPH_TEST(${BACKEND_NAME}, computation_reuse)
{
Shape shape_a{1, 16, 2, 2};
......@@ -6463,7 +6417,7 @@ NGRAPH_TEST(${BACKEND_NAME}, avg_pool_3d)
args.push_back(tensor_val);
}
auto int_results = execute(int_f, args, "INTERPRETER");
auto cpu_results = execute(cpu_f, args, "CPU");
auto cpu_results = execute(cpu_f, args, "${BACKEND_NAME}");
for (size_t i = 0; i < cpu_results.size(); i++)
{
EXPECT_TRUE(test::all_close(cpu_results.at(i), int_results.at(i), 1.0e-4f, 1.0e-4f));
......
......@@ -137,3 +137,57 @@ TEST(cpu_test, abc_tbb)
}
}
#endif // NGRAPH_TBB_ENABLE
TEST(cpu_test, mkldnn_layouts)
{
Shape shape_a{1, 16, 2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_b{32, 16, 1, 1};
auto B = make_shared<op::Parameter>(element::f32, shape_b);
Shape shape_r{1, 32, 2, 2};
auto conv1 = make_shared<op::Convolution>(A,
B,
Strides{1, 1},
Strides{1, 1},
CoordinateDiff{0, 0},
CoordinateDiff{0, 0},
Strides{1, 1});
Shape pool_shape{1, 1};
auto pool1 = make_shared<op::AvgPool>(conv1, pool_shape);
auto pool1_result = make_shared<op::Result>(pool1);
// Request result in default layout
pool1_result->set_needs_default_layout(true);
auto f = make_shared<Function>(ResultVector{pool1_result}, op::ParameterVector{A, B});
auto backend = runtime::Backend::create("CPU");
vector<float> input(64, 1.0f);
vector<float> weights;
vector<float> rv(128);
for (int i = 0; i < 128; i++)
{
weights.push_back(0.0f);
}
for (int i = 0; i < 384; i++)
{
weights.push_back(1.0f);
}
auto a = backend->create_tensor(element::f32, shape_a, input.data());
auto b = backend->create_tensor(element::f32, shape_b, weights.data());
auto result = backend->create_tensor(element::f32, shape_r, rv.data());
vector<float> expected_result;
for (int i = 0; i < 32; i++)
{
expected_result.push_back(0.0f);
}
for (int i = 0; i < 96; i++)
{
expected_result.push_back(16.0f);
}
backend->call(f, {result}, {a, b});
EXPECT_EQ(vector<float>{expected_result}, rv);
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment