Commit e2255fbd authored by Robert Kimball's avatar Robert Kimball Committed by Scott Cyphers

Pool tests updated to check all backends (#1245)

* make pool test check backends other than CPU

* more unit test cleanup
parent 7c7c5d62
...@@ -2,15 +2,14 @@ ...@@ -2,15 +2,14 @@
abc_int64 abc_int64
batch_norm_one_output batch_norm_one_output
batch_norm_three_outputs batch_norm_three_outputs
#need to check #need to check
computation_reuse computation_reuse
#int64 is not supprted #int64 is not supprted
concat_matrix_int64 concat_matrix_int64
divide_by_zero_int32 divide_by_zero_int32
#int64 is not supprted by cuDNN #int64 is not supprted by cuDNN
dot_matrix_vector_int64 dot_matrix_vector_int64
#no mkldnn on GPU #no mkldnn on GPU
mkldnn_layouts
#error throw is not the same on GPU, not supported yet #error throw is not the same on GPU, not supported yet
one_hot_scalar_fp_nonint_in_3 one_hot_scalar_fp_nonint_in_3
one_hot_scalar_oob_in_3 one_hot_scalar_oob_in_3
...@@ -23,7 +22,7 @@ select_and_scatter_with_overlap ...@@ -23,7 +22,7 @@ select_and_scatter_with_overlap
select_and_scatter_without_overlap select_and_scatter_without_overlap
#custom_mem is not implemented on GPU #custom_mem is not implemented on GPU
tensorview_custom_mem tensorview_custom_mem
#sigmoid not implemented #sigmoid not implemented
sigmoid_n1c1h2w2 sigmoid_n1c1h2w2
sigmoid_n1c1h4 sigmoid_n1c1h4
sigmoid_bprop_n1c1h4 sigmoid_bprop_n1c1h4
...@@ -34,3 +33,5 @@ backwards_maxpool_n2_c1_hw5_3x3_str2_max ...@@ -34,3 +33,5 @@ backwards_maxpool_n2_c1_hw5_3x3_str2_max
backwards_avgpool_n1_c1_hw2x2 backwards_avgpool_n1_c1_hw2x2
backwards_avgpool_n1_c1_hw4x4 backwards_avgpool_n1_c1_hw4x4
backwards_avgpool_n2_c2_hw4x4 backwards_avgpool_n2_c2_hw4x4
max_pool_3d
avg_pool_3d
...@@ -226,7 +226,6 @@ min_to_scalar ...@@ -226,7 +226,6 @@ min_to_scalar
min_trivial min_trivial
min_trivial_5d min_trivial_5d
min_vector_zero min_vector_zero
mkldnn_layouts
multiple_backends multiple_backends
multiple_result multiple_result
negative negative
......
...@@ -4,4 +4,3 @@ batchnorm_fprop_b2c2h2w1 ...@@ -4,4 +4,3 @@ batchnorm_fprop_b2c2h2w1
batchnorm_fprop_globalstats_b2c2w2h1 batchnorm_fprop_globalstats_b2c2w2h1
batchnorm_fprop_inference_b2c2h2w1 batchnorm_fprop_inference_b2c2h2w1
computation_reuse computation_reuse
mkldnn_layouts
...@@ -4686,7 +4686,7 @@ NGRAPH_TEST(${BACKEND_NAME}, max_pool_3d) ...@@ -4686,7 +4686,7 @@ NGRAPH_TEST(${BACKEND_NAME}, max_pool_3d)
args.push_back(tensor_val); args.push_back(tensor_val);
} }
auto int_results = execute(int_f, args, "INTERPRETER"); auto int_results = execute(int_f, args, "INTERPRETER");
auto cpu_results = execute(cpu_f, args, "CPU"); auto cpu_results = execute(cpu_f, args, "${BACKEND_NAME}");
for (size_t i = 0; i < cpu_results.size(); i++) for (size_t i = 0; i < cpu_results.size(); i++)
{ {
EXPECT_TRUE(test::all_close(cpu_results.at(i), int_results.at(i), 1.0e-4f, 1.0e-4f)); EXPECT_TRUE(test::all_close(cpu_results.at(i), int_results.at(i), 1.0e-4f, 1.0e-4f));
...@@ -5836,52 +5836,6 @@ NGRAPH_TEST(${BACKEND_NAME}, convolution_outlining) ...@@ -5836,52 +5836,6 @@ NGRAPH_TEST(${BACKEND_NAME}, convolution_outlining)
EXPECT_EQ(vector<float>{expected_result}, read_vector<float>(result)); EXPECT_EQ(vector<float>{expected_result}, read_vector<float>(result));
} }
NGRAPH_TEST(${BACKEND_NAME}, mkldnn_layouts)
{
Shape shape_a{1, 16, 2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_b{32, 16, 1, 1};
auto B = make_shared<op::Parameter>(element::f32, shape_b);
Shape shape_r{1, 32, 2, 2};
auto conv1 = make_shared<op::Convolution>(A,
B,
Strides{1, 1},
Strides{1, 1},
CoordinateDiff{0, 0},
CoordinateDiff{0, 0},
Strides{1, 1});
Shape pool_shape{1, 1};
auto pool1 = make_shared<op::AvgPool>(conv1, pool_shape);
auto pool1_result = make_shared<op::Result>(pool1);
// Request result in default layout
pool1_result->set_needs_default_layout(true);
auto f = make_shared<Function>(ResultVector{pool1_result}, op::ParameterVector{A, B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
vector<float> input(64, 1.0f);
vector<float> weights;
vector<float> rv(128);
for (int i = 0; i < 128; i++)
weights.push_back(0.0f);
for (int i = 0; i < 384; i++)
weights.push_back(1.0f);
auto a = backend->create_tensor(element::f32, shape_a, input.data());
auto b = backend->create_tensor(element::f32, shape_b, weights.data());
auto result = backend->create_tensor(element::f32, shape_r, rv.data());
vector<float> expected_result;
for (int i = 0; i < 32; i++)
expected_result.push_back(0.0f);
for (int i = 0; i < 96; i++)
expected_result.push_back(16.0f);
backend->call(f, {result}, {a, b});
EXPECT_EQ(vector<float>{expected_result}, rv);
}
NGRAPH_TEST(${BACKEND_NAME}, computation_reuse) NGRAPH_TEST(${BACKEND_NAME}, computation_reuse)
{ {
Shape shape_a{1, 16, 2, 2}; Shape shape_a{1, 16, 2, 2};
...@@ -6463,7 +6417,7 @@ NGRAPH_TEST(${BACKEND_NAME}, avg_pool_3d) ...@@ -6463,7 +6417,7 @@ NGRAPH_TEST(${BACKEND_NAME}, avg_pool_3d)
args.push_back(tensor_val); args.push_back(tensor_val);
} }
auto int_results = execute(int_f, args, "INTERPRETER"); auto int_results = execute(int_f, args, "INTERPRETER");
auto cpu_results = execute(cpu_f, args, "CPU"); auto cpu_results = execute(cpu_f, args, "${BACKEND_NAME}");
for (size_t i = 0; i < cpu_results.size(); i++) for (size_t i = 0; i < cpu_results.size(); i++)
{ {
EXPECT_TRUE(test::all_close(cpu_results.at(i), int_results.at(i), 1.0e-4f, 1.0e-4f)); EXPECT_TRUE(test::all_close(cpu_results.at(i), int_results.at(i), 1.0e-4f, 1.0e-4f));
......
...@@ -137,3 +137,57 @@ TEST(cpu_test, abc_tbb) ...@@ -137,3 +137,57 @@ TEST(cpu_test, abc_tbb)
} }
} }
#endif // NGRAPH_TBB_ENABLE #endif // NGRAPH_TBB_ENABLE
TEST(cpu_test, mkldnn_layouts)
{
Shape shape_a{1, 16, 2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_b{32, 16, 1, 1};
auto B = make_shared<op::Parameter>(element::f32, shape_b);
Shape shape_r{1, 32, 2, 2};
auto conv1 = make_shared<op::Convolution>(A,
B,
Strides{1, 1},
Strides{1, 1},
CoordinateDiff{0, 0},
CoordinateDiff{0, 0},
Strides{1, 1});
Shape pool_shape{1, 1};
auto pool1 = make_shared<op::AvgPool>(conv1, pool_shape);
auto pool1_result = make_shared<op::Result>(pool1);
// Request result in default layout
pool1_result->set_needs_default_layout(true);
auto f = make_shared<Function>(ResultVector{pool1_result}, op::ParameterVector{A, B});
auto backend = runtime::Backend::create("CPU");
vector<float> input(64, 1.0f);
vector<float> weights;
vector<float> rv(128);
for (int i = 0; i < 128; i++)
{
weights.push_back(0.0f);
}
for (int i = 0; i < 384; i++)
{
weights.push_back(1.0f);
}
auto a = backend->create_tensor(element::f32, shape_a, input.data());
auto b = backend->create_tensor(element::f32, shape_b, weights.data());
auto result = backend->create_tensor(element::f32, shape_r, rv.data());
vector<float> expected_result;
for (int i = 0; i < 32; i++)
{
expected_result.push_back(0.0f);
}
for (int i = 0; i < 96; i++)
{
expected_result.push_back(16.0f);
}
backend->call(f, {result}, {a, b});
EXPECT_EQ(vector<float>{expected_result}, rv);
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment