Commit b3b5b9fd authored by gcwenger's avatar gcwenger Committed by Scott Cyphers

Support for parameterized testing combined with backend driven manifests (#1855)

* Support for parameterized testing combined with backend driven manifests.

* clang-format fix

* Improved nGraph variants of gtest fixture and parameterized testing macros

* Reverted mistaken change to src/ngraph/runtime/gpu/CMakeLists.txt
parent 02affea5
avg_pool_2d_2channel_2image_padded_only_above_do_not_include_in_computation
avg_pool_2d_2channel_2image_padded_only_above_include_in_computation
avg_pool_3d_strided_uneven_padded_do_not_include_in_computation
avg_pool_3d_uneven_strided_padded_include_in_computation
avg_pool_3d_uneven_strided_padded
backwards_batch_norm_three_outputs
backwards_dot_scalar_tensor
backwards_dot_tensor3_tensor3
......
......@@ -237,11 +237,20 @@ NGRAPH_TEST(${BACKEND_NAME}, max_pool_2d_2channel_2image_asym_pad)
read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, max_pool_2d_1channel_1image_overpadded)
// MaxPool2D1ChannelTests test fixture for test setup reuse
class MaxPool2D1ChannelTests : public testing::Test
{
public:
Shape shape_a{1, 1, 5, 5};
Shape window_shape{2, 3};
auto window_movement_strides = Strides{1, 1};
Strides window_movement_strides{1, 1};
protected:
virtual void SetUp() override {}
};
NGRAPH_TEST_F(${BACKEND_NAME}, MaxPool2D1ChannelTests, max_pool_2d_1channel_1image_overpadded)
{
Shape padding_below{2, 0};
Shape padding_above{1, 2};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
......@@ -277,11 +286,8 @@ NGRAPH_TEST(${BACKEND_NAME}, max_pool_2d_1channel_1image_overpadded)
read_vector<float>(result)));
}
NGRAPH_TEST(${BACKEND_NAME}, max_pool_2d_1channel_1image_padded)
NGRAPH_TEST_F(${BACKEND_NAME}, MaxPool2D1ChannelTests, max_pool_2d_1channel_1image_padded)
{
Shape shape_a{1, 1, 5, 5};
Shape window_shape{2, 3};
auto window_movement_strides = Strides{1, 1};
Shape padding_below{1, 0};
Shape padding_above{1, 2};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
......@@ -1183,7 +1189,16 @@ NGRAPH_TEST(${BACKEND_NAME},
read_vector<float>(result)));
}
NGRAPH_TEST(${BACKEND_NAME}, avg_pool_3d_strided_uneven_padded_do_not_include_in_computation)
// Params to drive avg_pool_3d testing variations
class avg_pool_3d_params : public ::testing::TestWithParam<bool>
{
protected:
avg_pool_3d_params() { include_pad = GetParam(); }
bool include_pad;
};
// avg_pool_3d test code using params
NGRAPH_TEST_P(${BACKEND_NAME}, avg_pool_3d_params, avg_pool_3d_uneven_strided_padded)
{
Shape shape_a{64, 3, 12, 13, 15};
Shape window_shape{4, 5, 4};
......@@ -1195,11 +1210,11 @@ NGRAPH_TEST(${BACKEND_NAME}, avg_pool_3d_strided_uneven_padded_do_not_include_in
auto cpu_f = make_shared<Function>(
make_shared<op::AvgPool>(
A, window_shape, move_strides, padding_below, padding_above, false),
A, window_shape, move_strides, padding_below, padding_above, include_pad),
op::ParameterVector{A});
auto int_f = make_shared<Function>(
make_shared<op::AvgPool>(
B, window_shape, move_strides, padding_below, padding_above, false),
B, window_shape, move_strides, padding_below, padding_above, include_pad),
op::ParameterVector{B});
test::Uniform<float> rng(0.0f, 1.0f);
vector<vector<float>> args;
......@@ -1218,35 +1233,5 @@ NGRAPH_TEST(${BACKEND_NAME}, avg_pool_3d_strided_uneven_padded_do_not_include_in
}
}
NGRAPH_TEST(${BACKEND_NAME}, avg_pool_3d_uneven_strided_padded_include_in_computation)
{
Shape shape_a{64, 3, 7, 8, 10};
Shape window_shape{2, 3, 2};
auto move_strides = Strides{2, 3, 4};
Shape padding_below{5, 6, 4};
Shape padding_above{6, 4, 5};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
auto B = make_shared<op::Parameter>(element::f32, shape_a);
auto cpu_f = make_shared<Function>(
make_shared<op::AvgPool>(A, window_shape, move_strides, padding_below, padding_above, true),
op::ParameterVector{A});
auto int_f = make_shared<Function>(
make_shared<op::AvgPool>(B, window_shape, move_strides, padding_below, padding_above, true),
op::ParameterVector{B});
test::Uniform<float> rng(0.0f, 1.0f);
vector<vector<float>> args;
for (shared_ptr<op::Parameter> param : int_f->get_parameters())
{
vector<float> tensor_val(shape_size(param->get_shape()));
rng.initialize(tensor_val);
args.push_back(tensor_val);
}
auto int_results = execute(int_f, args, "INTERPRETER");
auto backend_results = execute(cpu_f, args, "${BACKEND_NAME}");
for (size_t i = 0; i < backend_results.size(); i++)
{
EXPECT_TRUE(test::all_close(backend_results.at(i), int_results.at(i), 1.0e-4f, 1.0e-4f));
}
}
// avg_pool_3d case generation
NGRAPH_INSTANTIATE_TEST_CASE_P(${BACKEND_NAME}, include_pad, avg_pool_3d_params, testing::Bool());
......@@ -27,12 +27,12 @@ using namespace ngraph;
static unordered_map<string, unordered_set<string>> s_blacklists;
string ngraph::prepend_disabled(const string& test_case_name,
string ngraph::prepend_disabled(const string& backend_name,
const string& test_name,
const string& manifest)
{
string rc = test_name;
unordered_set<string>& blacklist = s_blacklists[test_case_name];
unordered_set<string>& blacklist = s_blacklists[backend_name];
if (blacklist.empty() && !manifest.empty())
{
ifstream f(manifest);
......@@ -54,3 +54,16 @@ string ngraph::prepend_disabled(const string& test_case_name,
}
return rc;
}
string ngraph::combine_test_backend_and_case(const string& backend_name,
const string& test_casename)
{
if (backend_name == test_casename)
{
return backend_name;
}
else
{
return backend_name + "_" + test_casename;
}
}
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment