Commit 67844320 authored by gcwenger's avatar gcwenger Committed by Robert Kimball

Improved AvgPool unit test coverage. Fixed small bug that was revealed. (#1813)

* Improved AvgPool unit test coverage. Fixed small bug that was revealed.

* Renamed disabled unit tests to reflect new names.

* Ran clang-format on backend_test.in.cpp to fix format.

* Renamed cpu_results->backend_results in two unit tests.
parent 3fece336
...@@ -233,20 +233,19 @@ void runtime::gpu::GPU_Emitter::emit_AvgPool(EMIT_ARGS) ...@@ -233,20 +233,19 @@ void runtime::gpu::GPU_Emitter::emit_AvgPool(EMIT_ARGS)
{ {
auto& cuda_emitter = external_function->get_primitive_emitter()->get_cuda_emitter(); auto& cuda_emitter = external_function->get_primitive_emitter()->get_cuda_emitter();
index = cuda_emitter->build_avg_pool({{args[0].get_type(), out[0].get_type()}}, index =
cuda_emitter->build_avg_pool({{args[0].get_type(), out[0].get_type()}},
input_shape, input_shape,
result_shape, result_shape,
avg_pool->get_window_shape(), avg_pool->get_window_shape(),
avg_pool->get_window_movement_strides(), avg_pool->get_window_movement_strides(),
padding_below); padding_below,
avg_pool->get_include_padding_in_avg_computation());
} }
else if (input_shape.size() <= 5)
{
// 2d and 3d avg pool (NCHW) with either symetric padding or no padding // 2d and 3d avg pool (NCHW) with either symetric padding or no padding
if (input_shape.size() == 4 || input_shape.size() == 5) else if (input_shape.size() == 4 || input_shape.size() == 5)
{ {
auto& cudnn_emitter = auto& cudnn_emitter = external_function->get_primitive_emitter()->get_cudnn_emitter();
external_function->get_primitive_emitter()->get_cudnn_emitter();
auto cudnn_avg_type = avg_pool->get_include_padding_in_avg_computation() auto cudnn_avg_type = avg_pool->get_include_padding_in_avg_computation()
? CUDNN_POOLING_AVERAGE_COUNT_INCLUDE_PADDING ? CUDNN_POOLING_AVERAGE_COUNT_INCLUDE_PADDING
...@@ -262,7 +261,6 @@ void runtime::gpu::GPU_Emitter::emit_AvgPool(EMIT_ARGS) ...@@ -262,7 +261,6 @@ void runtime::gpu::GPU_Emitter::emit_AvgPool(EMIT_ARGS)
padding_below, padding_below,
padding_above); padding_above);
} }
}
else else
{ {
throw runtime_error("Pooling currently only supports up to 3 spatial dimensions."); throw runtime_error("Pooling currently only supports up to 3 spatial dimensions.");
......
...@@ -29,8 +29,6 @@ backwards_maxpool_n2_c1_hw5_3x3_str2_max ...@@ -29,8 +29,6 @@ backwards_maxpool_n2_c1_hw5_3x3_str2_max
backwards_avgpool_n1_c1_hw2x2 backwards_avgpool_n1_c1_hw2x2
backwards_avgpool_n1_c1_hw4x4 backwards_avgpool_n1_c1_hw4x4
backwards_avgpool_n2_c2_hw4x4 backwards_avgpool_n2_c2_hw4x4
max_pool_3d
avg_pool_3d
topk_1d_max_all topk_1d_max_all
topk_1d_max_partial topk_1d_max_partial
topk_1d_max_one topk_1d_max_one
......
avg_pool_2d_2channel_2image_padded_only_above avg_pool_2d_2channel_2image_padded_only_above_do_not_include_in_computation
avg_pool_3d avg_pool_2d_2channel_2image_padded_only_above_include_in_computation
avg_pool_3d_strided_uneven_padded_do_not_include_in_computation
avg_pool_3d_uneven_strided_padded_include_in_computation
backwards_batch_norm_three_outputs backwards_batch_norm_three_outputs
backwards_dot_scalar_tensor backwards_dot_scalar_tensor
backwards_dot_tensor3_tensor3 backwards_dot_tensor3_tensor3
......
...@@ -6888,7 +6888,7 @@ NGRAPH_TEST(${BACKEND_NAME}, avg_pool_2d_1channel_1image_strided) ...@@ -6888,7 +6888,7 @@ NGRAPH_TEST(${BACKEND_NAME}, avg_pool_2d_1channel_1image_strided)
read_vector<float>(result))); read_vector<float>(result)));
} }
NGRAPH_TEST(${BACKEND_NAME}, avg_pool_2d_1channel_1image_padded) NGRAPH_TEST(${BACKEND_NAME}, avg_pool_2d_1channel_1image_padded_do_not_include_in_computation)
{ {
Shape shape_a{1, 1, 3, 3}; Shape shape_a{1, 1, 3, 3};
Shape window_shape{2, 2}; Shape window_shape{2, 2};
...@@ -6919,7 +6919,38 @@ NGRAPH_TEST(${BACKEND_NAME}, avg_pool_2d_1channel_1image_padded) ...@@ -6919,7 +6919,38 @@ NGRAPH_TEST(${BACKEND_NAME}, avg_pool_2d_1channel_1image_padded)
read_vector<float>(result))); read_vector<float>(result)));
} }
NGRAPH_TEST(${BACKEND_NAME}, avg_pool_2d_2channel_2image_padded) NGRAPH_TEST(${BACKEND_NAME}, avg_pool_2d_1channel_1image_padded_include_in_computation)
{
Shape shape_a{1, 1, 3, 3};
Shape window_shape{2, 2};
auto window_movement_strides = Strides{1, 1};
Shape padding_below{1, 1};
Shape padding_above{1, 1};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_r{1, 1, 4, 4};
auto f = make_shared<Function>(
make_shared<op::AvgPool>(
A, window_shape, window_movement_strides, padding_below, padding_above, true),
op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, test::NDArray<float, 4>({{{{0, 1, 0}, {0, 3, 2}, {2, 0, 0}}}}).get_vector());
auto result = backend->create_tensor(element::f32, shape_r);
backend->call_with_validate(f, {result}, {a});
EXPECT_TRUE(
test::all_close(test::NDArray<float, 4>({{{{0.0f / 4, 1.0f / 4, 1.0f / 4, 0.0f / 4},
{0.0f / 4, 4.0f / 4, 6.0f / 4, 2.0f / 4},
{2.0f / 4, 5.0f / 4, 5.0f / 4, 2.0f / 4},
{2.0f / 4, 2.0f / 4, 0.0f / 4, 0.0f / 4}}}})
.get_vector(),
read_vector<float>(result)));
}
NGRAPH_TEST(${BACKEND_NAME}, avg_pool_2d_2channel_2image_padded_do_not_include_in_computation)
{ {
Shape shape_a{2, 1, 3, 3}; Shape shape_a{2, 1, 3, 3};
Shape window_shape{2, 2}; Shape window_shape{2, 2};
...@@ -6957,7 +6988,46 @@ NGRAPH_TEST(${BACKEND_NAME}, avg_pool_2d_2channel_2image_padded) ...@@ -6957,7 +6988,46 @@ NGRAPH_TEST(${BACKEND_NAME}, avg_pool_2d_2channel_2image_padded)
read_vector<float>(result))); read_vector<float>(result)));
} }
NGRAPH_TEST(${BACKEND_NAME}, avg_pool_2d_2channel_2image_padded_only_below) NGRAPH_TEST(${BACKEND_NAME}, avg_pool_2d_2channel_2image_padded_include_in_computation)
{
Shape shape_a{2, 1, 3, 3};
Shape window_shape{2, 2};
auto window_movement_strides = Strides{1, 1};
Shape padding_below{1, 1};
Shape padding_above{1, 1};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_r{2, 1, 4, 4};
auto f = make_shared<Function>(
make_shared<op::AvgPool>(
A, window_shape, window_movement_strides, padding_below, padding_above, true),
op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a,
test::NDArray<float, 4>(
{{{{0, 1, 0}, {0, 3, 2}, {2, 0, 0}}, {{3, 5, 2}, {2, 0, 9}, {3, 6, 5}}}})
.get_vector());
auto result = backend->create_tensor(element::f32, shape_r);
backend->call_with_validate(f, {result}, {a});
EXPECT_TRUE(
test::all_close(test::NDArray<float, 4>({{{{0.0f / 4, 1.0f / 4, 1.0f / 4, 0.0f / 4},
{0.0f / 4, 4.0f / 4, 6.0f / 4, 2.0f / 4},
{2.0f / 4, 5.0f / 4, 5.0f / 4, 2.0f / 4},
{2.0f / 4, 2.0f / 4, 0.0f / 4, 0.0f / 4}},
{{3.0f / 4, 8.0f / 4, 7.0f / 4, 2.0f / 4},
{5.0f / 4, 10.0f / 4, 16.0f / 4, 11.0f / 4},
{5.0f / 4, 11.0f / 4, 20.0f / 4, 14.0f / 4},
{3.0f / 4, 9.0f / 4, 11.0f / 4, 5.0f / 4}}}})
.get_vector(),
read_vector<float>(result)));
}
NGRAPH_TEST(${BACKEND_NAME},
avg_pool_2d_2channel_2image_padded_only_below_do_not_include_in_computation)
{ {
Shape shape_a{2, 1, 3, 3}; Shape shape_a{2, 1, 3, 3};
Shape window_shape{2, 2}; Shape window_shape{2, 2};
...@@ -6992,7 +7062,43 @@ NGRAPH_TEST(${BACKEND_NAME}, avg_pool_2d_2channel_2image_padded_only_below) ...@@ -6992,7 +7062,43 @@ NGRAPH_TEST(${BACKEND_NAME}, avg_pool_2d_2channel_2image_padded_only_below)
read_vector<float>(result))); read_vector<float>(result)));
} }
NGRAPH_TEST(${BACKEND_NAME}, avg_pool_2d_2channel_2image_padded_only_above) NGRAPH_TEST(${BACKEND_NAME}, avg_pool_2d_2channel_2image_padded_only_below_include_in_computation)
{
Shape shape_a{2, 1, 3, 3};
Shape window_shape{2, 2};
auto window_movement_strides = Strides{1, 1};
Shape padding_below{1, 1};
Shape padding_above{0, 0};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_r{2, 1, 3, 3};
auto f = make_shared<Function>(
make_shared<op::AvgPool>(
A, window_shape, window_movement_strides, padding_below, padding_above, true),
op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a,
test::NDArray<float, 4>(
{{{{0, 1, 0}, {0, 3, 2}, {2, 0, 0}}, {{3, 5, 2}, {2, 0, 9}, {3, 6, 5}}}})
.get_vector());
auto result = backend->create_tensor(element::f32, shape_r);
backend->call_with_validate(f, {result}, {a});
EXPECT_TRUE(test::all_close(test::NDArray<float, 4>({{{{0.0f / 4, 1.0f / 4, 1.0f / 4},
{0.0f / 4, 4.0f / 4, 6.0f / 4},
{2.0f / 4, 5.0f / 4, 5.0f / 4}},
{{3.0f / 4, 8.0f / 4, 7.0f / 4},
{5.0f / 4, 10.0f / 4, 16.0f / 4},
{5.0f / 4, 11.0f / 4, 20.0f / 4}}}})
.get_vector(),
read_vector<float>(result)));
}
NGRAPH_TEST(${BACKEND_NAME},
avg_pool_2d_2channel_2image_padded_only_above_do_not_include_in_computation)
{ {
Shape shape_a{2, 1, 3, 3}; Shape shape_a{2, 1, 3, 3};
Shape window_shape{2, 2}; Shape window_shape{2, 2};
...@@ -7027,7 +7133,42 @@ NGRAPH_TEST(${BACKEND_NAME}, avg_pool_2d_2channel_2image_padded_only_above) ...@@ -7027,7 +7133,42 @@ NGRAPH_TEST(${BACKEND_NAME}, avg_pool_2d_2channel_2image_padded_only_above)
read_vector<float>(result))); read_vector<float>(result)));
} }
NGRAPH_TEST(${BACKEND_NAME}, avg_pool_2d_2channel_2image_padded_3x3) NGRAPH_TEST(${BACKEND_NAME}, avg_pool_2d_2channel_2image_padded_only_above_include_in_computation)
{
Shape shape_a{2, 1, 3, 3};
Shape window_shape{2, 2};
auto window_movement_strides = Strides{1, 1};
Shape padding_below{0, 0};
Shape padding_above{1, 1};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_r{2, 1, 3, 3};
auto f = make_shared<Function>(
make_shared<op::AvgPool>(
A, window_shape, window_movement_strides, padding_below, padding_above, true),
op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a,
test::NDArray<float, 4>(
{{{{0, 1, 0}, {0, 3, 2}, {2, 0, 0}}, {{3, 5, 2}, {2, 0, 9}, {3, 6, 5}}}})
.get_vector());
auto result = backend->create_tensor(element::f32, shape_r);
backend->call_with_validate(f, {result}, {a});
EXPECT_TRUE(test::all_close(test::NDArray<float, 4>({{{{4.0f / 4, 6.0f / 4, 2.0f / 4},
{5.0f / 4, 5.0f / 4, 2.0f / 4},
{2.0f / 4, 0.0f / 4, 0.0f / 4}},
{{10.0f / 4, 16.0f / 4, 11.0f / 4},
{11.0f / 4, 20.0f / 4, 14.0f / 4},
{9.0f / 4, 11.0f / 4, 5.0f / 4}}}})
.get_vector(),
read_vector<float>(result)));
}
NGRAPH_TEST(${BACKEND_NAME}, avg_pool_2d_2channel_2image_3x3_padded_do_not_include_in_computation)
{ {
Shape shape_a{2, 1, 3, 3}; Shape shape_a{2, 1, 3, 3};
Shape window_shape{3, 3}; Shape window_shape{3, 3};
...@@ -7067,7 +7208,48 @@ NGRAPH_TEST(${BACKEND_NAME}, avg_pool_2d_2channel_2image_padded_3x3) ...@@ -7067,7 +7208,48 @@ NGRAPH_TEST(${BACKEND_NAME}, avg_pool_2d_2channel_2image_padded_3x3)
read_vector<float>(result))); read_vector<float>(result)));
} }
NGRAPH_TEST(${BACKEND_NAME}, avg_pool_2d_2channel_2image_padded_3x3_strided) NGRAPH_TEST(${BACKEND_NAME}, avg_pool_2d_2channel_2image_3x3_padded_include_in_computation)
{
Shape shape_a{2, 1, 3, 3};
Shape window_shape{3, 3};
auto window_movement_strides = Strides{1, 1};
Shape padding_below{2, 2};
Shape padding_above{2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_r{2, 1, 5, 5};
auto f = make_shared<Function>(
make_shared<op::AvgPool>(
A, window_shape, window_movement_strides, padding_below, padding_above, true),
op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a,
test::NDArray<float, 4>(
{{{{0, 1, 0}, {0, 3, 2}, {2, 0, 0}}, {{3, 5, 2}, {2, 0, 9}, {3, 6, 5}}}})
.get_vector());
auto result = backend->create_tensor(element::f32, shape_r);
backend->call_with_validate(f, {result}, {a});
EXPECT_TRUE(test::all_close_f(
test::NDArray<float, 4>({{{{0.0f / 9, 1.0f / 9, 1.0f / 9, 1.0f / 9, 0.0f / 9},
{0.0f / 9, 4.0f / 9, 6.0f / 9, 6.0f / 9, 2.0f / 9},
{2.0f / 9, 6.0f / 9, 8.0f / 9, 6.0f / 9, 2.0f / 9},
{2.0f / 9, 5.0f / 9, 7.0f / 9, 5.0f / 9, 2.0f / 9},
{2.0f / 9, 2.0f / 9, 2.0f / 9, 0.0f / 9, 0.0f / 9}},
{{3.0f / 9, 8.0f / 9, 10.0f / 9, 7.0f / 9, 2.0f / 9},
{5.0f / 9, 10.0f / 9, 21.0f / 9, 16.0f / 9, 11.0f / 9},
{8.0f / 9, 19.0f / 9, 35.0f / 9, 27.0f / 9, 16.0f / 9},
{5.0f / 9, 11.0f / 9, 25.0f / 9, 20.0f / 9, 14.0f / 9},
{3.0f / 9, 9.0f / 9, 14.0f / 9, 11.0f / 9, 5.0f / 9}}}})
.get_vector(),
read_vector<float>(result)));
}
NGRAPH_TEST(${BACKEND_NAME},
avg_pool_2d_2channel_2image_3x3_strided_padded_do_not_include_in_computation)
{ {
Shape shape_a{2, 1, 3, 3}; Shape shape_a{2, 1, 3, 3};
Shape window_shape{3, 3}; Shape window_shape{3, 3};
...@@ -7102,7 +7284,43 @@ NGRAPH_TEST(${BACKEND_NAME}, avg_pool_2d_2channel_2image_padded_3x3_strided) ...@@ -7102,7 +7284,43 @@ NGRAPH_TEST(${BACKEND_NAME}, avg_pool_2d_2channel_2image_padded_3x3_strided)
read_vector<float>(result))); read_vector<float>(result)));
} }
NGRAPH_TEST(${BACKEND_NAME}, avg_pool_2d_2channel_2image_padded_3x3_strided_uneven) NGRAPH_TEST(${BACKEND_NAME}, avg_pool_2d_2channel_2image_3x3_strided_padded_include_in_computation)
{
Shape shape_a{2, 1, 3, 3};
Shape window_shape{3, 3};
auto window_movement_strides = Strides{2, 2};
Shape padding_below{2, 2};
Shape padding_above{2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_r{2, 1, 3, 3};
auto f = make_shared<Function>(
make_shared<op::AvgPool>(
A, window_shape, window_movement_strides, padding_below, padding_above, true),
op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a,
test::NDArray<float, 4>(
{{{{0, 1, 0}, {0, 3, 2}, {2, 0, 0}}, {{3, 5, 2}, {2, 0, 9}, {3, 6, 5}}}})
.get_vector());
auto result = backend->create_tensor(element::f32, shape_r);
backend->call_with_validate(f, {result}, {a});
EXPECT_TRUE(test::all_close_f(test::NDArray<float, 4>({{{{0.0f / 9, 1.0f / 9, 0.0f / 9},
{2.0f / 9, 8.0f / 9, 2.0f / 9},
{2.0f / 9, 2.0f / 9, 0.0f / 9}},
{{3.0f / 9, 10.0f / 9, 2.0f / 9},
{8.0f / 9, 35.0f / 9, 16.0f / 9},
{3.0f / 9, 14.0f / 9, 5.0f / 9}}}})
.get_vector(),
read_vector<float>(result)));
}
NGRAPH_TEST(${BACKEND_NAME},
avg_pool_2d_2channel_2image_3x3_strided_uneven_padded_do_not_include_in_computation)
{ {
Shape shape_a{2, 1, 3, 3}; Shape shape_a{2, 1, 3, 3};
Shape window_shape{3, 3}; Shape window_shape{3, 3};
...@@ -7135,7 +7353,76 @@ NGRAPH_TEST(${BACKEND_NAME}, avg_pool_2d_2channel_2image_padded_3x3_strided_unev ...@@ -7135,7 +7353,76 @@ NGRAPH_TEST(${BACKEND_NAME}, avg_pool_2d_2channel_2image_padded_3x3_strided_unev
read_vector<float>(result))); read_vector<float>(result)));
} }
NGRAPH_TEST(${BACKEND_NAME}, avg_pool_3d) NGRAPH_TEST(${BACKEND_NAME},
avg_pool_2d_2channel_2image_3x3_strided_uneven_padded_include_in_computation)
{
Shape shape_a{2, 1, 3, 3};
Shape window_shape{3, 3};
auto window_movement_strides = Strides{2, 3};
Shape padding_below{2, 2};
Shape padding_above{2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_r{2, 1, 3, 2};
auto f = make_shared<Function>(
make_shared<op::AvgPool>(
A, window_shape, window_movement_strides, padding_below, padding_above, true),
op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a,
test::NDArray<float, 4>(
{{{{0, 1, 0}, {0, 3, 2}, {2, 0, 0}}, {{3, 5, 2}, {2, 0, 9}, {3, 6, 5}}}})
.get_vector());
auto result = backend->create_tensor(element::f32, shape_r);
backend->call_with_validate(f, {result}, {a});
EXPECT_TRUE(test::all_close_f(
test::NDArray<float, 4>(
{{{{0.0f / 9, 1.0f / 9}, {2.0f / 9, 6.0f / 9}, {2.0f / 9, 0.0f / 9}},
{{3.0f / 9, 7.0f / 9}, {8.0f / 9, 27.0f / 9}, {3.0f / 9, 11.0f / 9}}}})
.get_vector(),
read_vector<float>(result)));
}
NGRAPH_TEST(${BACKEND_NAME}, avg_pool_3d_strided_uneven_padded_do_not_include_in_computation)
{
Shape shape_a{64, 3, 12, 13, 15};
Shape window_shape{4, 5, 4};
auto move_strides = Strides{2, 3, 4};
Shape padding_below{2, 3, 1};
Shape padding_above{3, 1, 2};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
auto B = make_shared<op::Parameter>(element::f32, shape_a);
auto cpu_f = make_shared<Function>(
make_shared<op::AvgPool>(
A, window_shape, move_strides, padding_below, padding_above, false),
op::ParameterVector{A});
auto int_f = make_shared<Function>(
make_shared<op::AvgPool>(
B, window_shape, move_strides, padding_below, padding_above, false),
op::ParameterVector{B});
test::Uniform<float> rng(0.0f, 1.0f);
vector<vector<float>> args;
for (shared_ptr<op::Parameter> param : int_f->get_parameters())
{
vector<float> tensor_val(shape_size(param->get_shape()));
rng.initialize(tensor_val);
args.push_back(tensor_val);
}
auto int_results = execute(int_f, args, "INTERPRETER");
auto backend_results = execute(cpu_f, args, "${BACKEND_NAME}");
for (size_t i = 0; i < backend_results.size(); i++)
{
EXPECT_TRUE(test::all_close(backend_results.at(i), int_results.at(i), 1.0e-4f, 1.0e-4f));
}
}
NGRAPH_TEST(${BACKEND_NAME}, avg_pool_3d_uneven_strided_padded_include_in_computation)
{ {
Shape shape_a{64, 3, 7, 8, 10}; Shape shape_a{64, 3, 7, 8, 10};
Shape window_shape{2, 3, 2}; Shape window_shape{2, 3, 2};
...@@ -7161,10 +7448,10 @@ NGRAPH_TEST(${BACKEND_NAME}, avg_pool_3d) ...@@ -7161,10 +7448,10 @@ NGRAPH_TEST(${BACKEND_NAME}, avg_pool_3d)
args.push_back(tensor_val); args.push_back(tensor_val);
} }
auto int_results = execute(int_f, args, "INTERPRETER"); auto int_results = execute(int_f, args, "INTERPRETER");
auto cpu_results = execute(cpu_f, args, "${BACKEND_NAME}"); auto backend_results = execute(cpu_f, args, "${BACKEND_NAME}");
for (size_t i = 0; i < cpu_results.size(); i++) for (size_t i = 0; i < backend_results.size(); i++)
{ {
EXPECT_TRUE(test::all_close(cpu_results.at(i), int_results.at(i), 1.0e-4f, 1.0e-4f)); EXPECT_TRUE(test::all_close(backend_results.at(i), int_results.at(i), 1.0e-4f, 1.0e-4f));
} }
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment