graph_comparison.in.cpp 25.9 KB
Newer Older
1
//*****************************************************************************
2
// Copyright 2017-2019 Intel Corporation
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************

#include <cstdlib>
#include <string>
#include "gtest/gtest.h"

#include "ngraph/log.hpp"
#include "ngraph/ngraph.hpp"
#include "ngraph/runtime/host_tensor.hpp"
#include "ngraph/serializer.hpp"
#include "util/all_close.hpp"
#include "util/all_close_f.hpp"
#include "util/test_control.hpp"

using namespace std;
using namespace ngraph;

static string s_manifest = "${MANIFEST}";

// clang-format off
#define TESTING_${BACKEND_NAME}_BACKEND 1
#ifdef TESTING_${BACKEND_NAME}_BACKEND // avoid macro is not used warning
#endif
// clang-format on

// Currently only used to test GPU backend, but is expected to be useful
// testing other backends (except CPU which is used as the reference backend)
#if defined(TESTING_GPU_BACKEND)
class serialized_graph_files : public ::testing::TestWithParam<string>
{
public:
    void compare_results(NodeVector& result_nodes,
                         vector<shared_ptr<runtime::Tensor>> ref_results,
48
                         vector<shared_ptr<runtime::Tensor>> bk_results,
49 50
                         vector<shared_ptr<runtime::Tensor>> bk_isolated_results,
                         stringstream& msg)
51 52 53 54 55
    {
        for (int i = 0; i < ref_results.size(); ++i)
        {
            const shared_ptr<runtime::Tensor>& ref_data = ref_results.at(i);
            const shared_ptr<runtime::Tensor>& bk_data = bk_results.at(i);
56
            const shared_ptr<runtime::Tensor>& bk_isolated_data = bk_isolated_results.at(i);
57

58
            std::shared_ptr<ngraph::Node> result_node = result_nodes.at(i);
59
            msg << "Comparing results for " << result_node->get_name() << "\n";
60
            if (result_node->get_arguments().size() > 0)
61
            {
62 63
                msg << "  inputs:"
                    << "\n";
64
                for (auto& p : result_node->get_arguments())
65
                {
66
                    msg << "    " << p->get_name() << " " << p->get_element_type() << "\n";
67 68 69
                }
            }

70
            // Ensure the element types and shapes match between reference and backend
71
            ASSERT_EQ(ref_data->get_element_type(), bk_data->get_element_type());
72
            ASSERT_EQ(ref_data->get_element_type(), bk_isolated_data->get_element_type());
73
            ASSERT_EQ(ref_data->get_element_count(), bk_data->get_element_count());
74
            ASSERT_EQ(ref_data->get_element_count(), bk_isolated_data->get_element_count());
75
            ASSERT_EQ(ref_data->get_shape(), bk_data->get_shape());
76 77
            ASSERT_EQ(ref_data->get_shape(), bk_isolated_data->get_shape());

78 79 80
            msg << "  output type:       " << ref_data->get_element_type() << "\n";
            msg << "  output shape:      " << ref_data->get_shape() << "\n";
            msg << "  output # elements: " << ref_data->get_element_count() << "\n";
81 82 83 84 85 86

            element::Type et = ref_data->get_element_type();
            if (et == element::boolean)
            {
                vector<char> ref_data_vector = read_vector<char>(ref_data);
                vector<char> bk_data_vector = read_vector<char>(bk_data);
87
                vector<char> bk_isolated_data_vector = read_vector<char>(bk_isolated_data);
88 89 90
                msg << "Test backed op run w/ original graph dependencies:"
                    << "\n";
                msg << get_results_str(ref_data_vector, bk_data_vector);
91
                bool all_close_graph = test::all_close<char>(ref_data_vector, bk_data_vector);
92 93 94
                msg << "Test backed op run isolated w/ inputs from ref graph run:"
                    << "\n";
                msg << get_results_str(ref_data_vector, bk_isolated_data_vector);
95 96
                bool all_close_isolated =
                    test::all_close<char>(ref_data_vector, bk_isolated_data_vector);
97
                EXPECT_TRUE(all_close_graph && all_close_isolated) << msg.str();
98
            }
99
            else if (et == element::f32)
100 101 102
            {
                vector<float> ref_data_vector = read_float_vector(ref_data);
                vector<float> bk_data_vector = read_float_vector(bk_data);
103
                vector<float> bk_isolated_data_vector = read_float_vector(bk_isolated_data);
104 105 106
                msg << "Test backed op run w/ original graph dependencies:"
                    << "\n";
                msg << get_results_str(ref_data_vector, bk_data_vector);
107 108 109 110 111 112 113 114 115 116 117 118 119
                // Future work will better determine useful graph comparison thresholds.
                // For a very small sample of tested graphs initial criteria is:
                // * Comparison of ops using inputs from preceeding ops (original
                //   graph dependencies) allows for a little better than 1/3 of
                //   the possible bits to match
                // * Isolated operation allows for 2/3 of the possible bits to match
                constexpr int one_third_of_available_bits = (MAX_FLOAT_BITS + 1) / 3;
                constexpr int in_graph_tolerance =
                    FLOAT_MANTISSA_BITS - one_third_of_available_bits;
                constexpr int isolated_tolerance =
                    FLOAT_MANTISSA_BITS - (one_third_of_available_bits * 2);
                ::testing::AssertionResult all_close_graph =
                    test::all_close_f(ref_data_vector, bk_data_vector, in_graph_tolerance);
120 121 122
                msg << "Test backed op run isolated w/ inputs from ref graph run:"
                    << "\n";
                msg << get_results_str(ref_data_vector, bk_isolated_data_vector);
123 124 125 126 127 128 129 130
                ::testing::AssertionResult all_close_isolated =
                    test::all_close_f(ref_data_vector, bk_isolated_data_vector, isolated_tolerance);
                if (!all_close_graph || !all_close_isolated)
                {
                    cout << msg.str();
                }
                EXPECT_TRUE(all_close_graph);
                EXPECT_TRUE(all_close_isolated);
131
            }
132 133 134 135 136
            else if (et == element::f64)
            {
                vector<double> ref_data_vector = read_vector<double>(ref_data);
                vector<double> bk_data_vector = read_vector<double>(bk_data);
                vector<double> bk_isolated_data_vector = read_vector<double>(bk_isolated_data);
137 138 139
                msg << "Test backed op run w/ original graph dependencies:"
                    << "\n";
                msg << get_results_str(ref_data_vector, bk_data_vector);
140 141 142

                // When testing with original graph dependencies test w/ loose f64 tolerance
                constexpr int tolerance_bits = 30;
143
                ::testing::AssertionResult all_close_graph =
144
                    test::all_close_f(ref_data_vector, bk_data_vector, tolerance_bits);
145 146 147
                msg << "Test backed op run isolated w/ inputs from ref graph run:"
                    << "\n";
                msg << get_results_str(ref_data_vector, bk_isolated_data_vector);
148

149 150
                // When testing with isolated graph dependencies test w/ default (tight) f64
                // tolerance
151
                ::testing::AssertionResult all_close_isolated =
152
                    test::all_close_f(ref_data_vector, bk_isolated_data_vector);
153 154 155 156 157 158
                if (!all_close_graph || !all_close_isolated)
                {
                    cout << msg.str();
                }
                EXPECT_TRUE(all_close_graph);
                EXPECT_TRUE(all_close_isolated);
159
            }
160 161 162 163
            else if (et == element::i8)
            {
                vector<int8_t> ref_data_vector = read_vector<int8_t>(ref_data);
                vector<int8_t> bk_data_vector = read_vector<int8_t>(bk_data);
164
                vector<int8_t> bk_isolated_data_vector = read_vector<int8_t>(bk_isolated_data);
165 166 167
                msg << "Test backed op run w/ original graph dependencies:"
                    << "\n";
                msg << get_results_str(ref_data_vector, bk_data_vector);
168
                bool all_close_graph = test::all_close<int8_t>(ref_data_vector, bk_data_vector);
169 170 171
                msg << "Test backed op run isolated w/ inputs from ref graph run:"
                    << "\n";
                msg << get_results_str(ref_data_vector, bk_isolated_data_vector);
172 173
                bool all_close_isolated =
                    test::all_close<int8_t>(ref_data_vector, bk_isolated_data_vector);
174
                EXPECT_TRUE(all_close_graph && all_close_isolated) << msg.str();
175 176 177 178 179
            }
            else if (et == element::i16)
            {
                vector<int16_t> ref_data_vector = read_vector<int16_t>(ref_data);
                vector<int16_t> bk_data_vector = read_vector<int16_t>(bk_data);
180
                vector<int16_t> bk_isolated_data_vector = read_vector<int16_t>(bk_isolated_data);
181 182 183
                msg << "Test backed op run w/ original graph dependencies:"
                    << "\n";
                msg << get_results_str(ref_data_vector, bk_data_vector);
184
                bool all_close_graph = test::all_close<int16_t>(ref_data_vector, bk_data_vector);
185 186 187
                msg << "Test backed op run isolated w/ inputs from ref graph run:"
                    << "\n";
                msg << get_results_str(ref_data_vector, bk_isolated_data_vector);
188 189
                bool all_close_isolated =
                    test::all_close<int16_t>(ref_data_vector, bk_isolated_data_vector);
190
                EXPECT_TRUE(all_close_graph && all_close_isolated) << msg.str();
191 192 193 194 195
            }
            else if (et == element::i32)
            {
                vector<int32_t> ref_data_vector = read_vector<int32_t>(ref_data);
                vector<int32_t> bk_data_vector = read_vector<int32_t>(bk_data);
196
                vector<int32_t> bk_isolated_data_vector = read_vector<int32_t>(bk_isolated_data);
197 198 199
                msg << "Test backed op run w/ original graph dependencies:"
                    << "\n";
                msg << get_results_str(ref_data_vector, bk_data_vector);
200
                bool all_close_graph = test::all_close<int32_t>(ref_data_vector, bk_data_vector);
201 202 203
                msg << "Test backed op run isolated w/ inputs from ref graph run:"
                    << "\n";
                msg << get_results_str(ref_data_vector, bk_isolated_data_vector);
204 205
                bool all_close_isolated =
                    test::all_close<int32_t>(ref_data_vector, bk_isolated_data_vector);
206
                EXPECT_TRUE(all_close_graph && all_close_isolated) << msg.str();
207 208 209 210 211
            }
            else if (et == element::i64)
            {
                vector<int64_t> ref_data_vector = read_vector<int64_t>(ref_data);
                vector<int64_t> bk_data_vector = read_vector<int64_t>(bk_data);
212
                vector<int64_t> bk_isolated_data_vector = read_vector<int64_t>(bk_isolated_data);
213 214 215
                msg << "Test backed op run w/ original graph dependencies:"
                    << "\n";
                msg << get_results_str(ref_data_vector, bk_data_vector);
216
                bool all_close_graph = test::all_close<int64_t>(ref_data_vector, bk_data_vector);
217 218 219
                msg << "Test backed op run isolated w/ inputs from ref graph run:"
                    << "\n";
                msg << get_results_str(ref_data_vector, bk_isolated_data_vector);
220 221
                bool all_close_isolated =
                    test::all_close<int64_t>(ref_data_vector, bk_isolated_data_vector);
222
                EXPECT_TRUE(all_close_graph && all_close_isolated) << msg.str();
223 224 225 226 227
            }
            else if (et == element::u8)
            {
                vector<uint8_t> ref_data_vector = read_vector<uint8_t>(ref_data);
                vector<uint8_t> bk_data_vector = read_vector<uint8_t>(bk_data);
228
                vector<uint8_t> bk_isolated_data_vector = read_vector<uint8_t>(bk_isolated_data);
229 230 231
                msg << "Test backed op run w/ original graph dependencies:"
                    << "\n";
                msg << get_results_str(ref_data_vector, bk_data_vector);
232
                bool all_close_graph = test::all_close<uint8_t>(ref_data_vector, bk_data_vector);
233 234 235
                msg << "Test backed op run isolated w/ inputs from ref graph run:"
                    << "\n";
                msg << get_results_str(ref_data_vector, bk_isolated_data_vector);
236 237
                bool all_close_isolated =
                    test::all_close<uint8_t>(ref_data_vector, bk_isolated_data_vector);
238
                EXPECT_TRUE(all_close_graph && all_close_isolated) << msg.str();
239 240 241 242 243
            }
            else if (et == element::u16)
            {
                vector<uint16_t> ref_data_vector = read_vector<uint16_t>(ref_data);
                vector<uint16_t> bk_data_vector = read_vector<uint16_t>(bk_data);
244
                vector<uint16_t> bk_isolated_data_vector = read_vector<uint16_t>(bk_isolated_data);
245 246 247
                msg << "Test backed op run w/ original graph dependencies:"
                    << "\n";
                msg << get_results_str(ref_data_vector, bk_data_vector);
248
                bool all_close_graph = test::all_close<uint16_t>(ref_data_vector, bk_data_vector);
249 250 251
                msg << "Test backed op run isolated w/ inputs from ref graph run:"
                    << "\n";
                msg << get_results_str(ref_data_vector, bk_isolated_data_vector);
252 253
                bool all_close_isolated =
                    test::all_close<uint16_t>(ref_data_vector, bk_isolated_data_vector);
254
                EXPECT_TRUE(all_close_graph && all_close_isolated) << msg.str();
255 256 257 258 259
            }
            else if (et == element::u32)
            {
                vector<uint32_t> ref_data_vector = read_vector<uint32_t>(ref_data);
                vector<uint32_t> bk_data_vector = read_vector<uint32_t>(bk_data);
260
                vector<uint32_t> bk_isolated_data_vector = read_vector<uint32_t>(bk_isolated_data);
261 262 263
                msg << "Test backed op run w/ original graph dependencies:"
                    << "\n";
                msg << get_results_str(ref_data_vector, bk_data_vector);
264
                bool all_close_graph = test::all_close<uint32_t>(ref_data_vector, bk_data_vector);
265 266 267
                msg << "Test backed op run isolated w/ inputs from ref graph run:"
                    << "\n";
                msg << get_results_str(ref_data_vector, bk_isolated_data_vector);
268 269
                bool all_close_isolated =
                    test::all_close<uint32_t>(ref_data_vector, bk_isolated_data_vector);
270
                EXPECT_TRUE(all_close_graph && all_close_isolated) << msg.str();
271 272 273 274 275
            }
            else if (et == element::u64)
            {
                vector<uint64_t> ref_data_vector = read_vector<uint64_t>(ref_data);
                vector<uint64_t> bk_data_vector = read_vector<uint64_t>(bk_data);
276
                vector<uint64_t> bk_isolated_data_vector = read_vector<uint64_t>(bk_isolated_data);
277 278 279
                msg << "Test backed op run w/ original graph dependencies:"
                    << "\n";
                msg << get_results_str(ref_data_vector, bk_data_vector);
280
                bool all_close_graph = test::all_close<uint64_t>(ref_data_vector, bk_data_vector);
281 282 283
                msg << "Test backed op run isolated w/ inputs from ref graph run:"
                    << "\n";
                msg << get_results_str(ref_data_vector, bk_isolated_data_vector);
284 285
                bool all_close_isolated =
                    test::all_close<uint64_t>(ref_data_vector, bk_isolated_data_vector);
286
                EXPECT_TRUE(all_close_graph && all_close_isolated) << msg.str();
287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321
            }
            else
            {
                throw runtime_error("unsupported type");
            }
        }
    }

protected:
    serialized_graph_files() { file_name = GetParam(); }
    string file_name;
};

NGRAPH_TEST_P(${BACKEND_NAME}, serialized_graph_files, compare_backends_with_graphs)
{
    // Compare against CPU for speed. Running large graphs against the
    // interpreter is too slow.
    auto ref = runtime::Backend::create("CPU");
    auto backend = runtime::Backend::create("${BACKEND_NAME}");

    string frozen_graph_path;
    if (file_name[0] != '/')
    {
        // Path is relative to serialized zoo path
        frozen_graph_path = file_util::path_join(SERIALIZED_ZOO, file_name);
    }
    else
    {
        // Full path - useful for temporary testing to narrow down problem
        frozen_graph_path = file_name;
    }

    stringstream ss(frozen_graph_path);
    shared_ptr<Function> func = ngraph::deserialize(ss);

322
    // Collect set of results to run two back ends with intermediate results set as outputs
323 324 325 326 327
    NodeVector new_results;
    for (auto n : func->get_ordered_ops())
    {
        // Don't include op::Results otherwise Function c-tor will complain
        if (!n->is_output() && !n->is_parameter() && !n->is_constant() &&
328
            !(n->get_output_size() > 1))
329 330 331 332 333 334
        {
            // place conditionals here if you want to only make certain ops an output/result node
            new_results.push_back(n);
        }
    }

335 336 337 338 339 340 341 342 343
    // Collect set of parameters and results to run test backend with isolated
    // inputs which will be copied from the reference backend outputs
    ParameterVector isolated_parameters = func->get_parameters();
    NodeVector isolated_results;
    unordered_map<ngraph::Node*, std::shared_ptr<ngraph::Node>> isolated_node_to_original_node;
    for (auto n : func->get_ordered_ops())
    {
        // Don't include op::Results otherwise Function c-tor will complain
        if (!n->is_output() && !n->is_parameter() && !n->is_constant() &&
344
            !(n->get_output_size() > 1))
345
        {
346 347
            OutputVector isolated_op_args;
            for (auto& input : n->inputs())
348
            {
349 350
                auto source_output = input.get_source_output();
                auto arg = source_output.get_node_shared_ptr();
351
                if (!arg->is_output() && !arg->is_parameter() && !arg->is_constant() &&
352
                    !(arg->get_output_size() > 1))
353 354 355 356 357 358 359 360 361 362 363 364 365 366
                {
                    // Create new isolated arg which we'll fill in with reference results
                    auto isolated_param =
                        make_shared<op::Parameter>(arg->get_element_type(), arg->get_shape());
                    isolated_op_args.push_back(isolated_param);
                    isolated_parameters.push_back(isolated_param);
                    isolated_node_to_original_node[isolated_param.get()] = arg;
                }
                else
                {
                    // It's not an output in the reference results - use the original arg
                    isolated_op_args.push_back(arg);
                }
            }
367
            auto isolated_op = n->copy_with_new_inputs(isolated_op_args);
368 369 370 371 372
            isolated_results.push_back(isolated_op);
        }
    }

    // No need to include original results they are subsumed by new_results
373 374 375 376 377 378 379 380 381 382 383 384 385 386 387
    auto new_func = make_shared<Function>(new_results, func->get_parameters());

    auto ref_func = clone_function(*new_func);
    auto bk_func = clone_function(*new_func);

    vector<shared_ptr<runtime::Tensor>> ref_args;
    vector<shared_ptr<runtime::Tensor>> bk_args;
    default_random_engine engine(2112);
    for (shared_ptr<op::Parameter> param : new_func->get_parameters())
    {
        auto data =
            make_shared<ngraph::runtime::HostTensor>(param->get_element_type(), param->get_shape());
        random_init(data.get(), engine);
        auto ref_tensor = ref->create_tensor(param->get_element_type(), param->get_shape());
        auto bk_tensor = backend->create_tensor(param->get_element_type(), param->get_shape());
388 389 390 391
        ref_tensor->write(data->get_data_ptr(),
                          data->get_element_count() * data->get_element_type().size());
        bk_tensor->write(data->get_data_ptr(),
                         data->get_element_count() * data->get_element_type().size());
392 393 394 395 396 397
        ref_args.push_back(ref_tensor);
        bk_args.push_back(bk_tensor);
    }

    vector<shared_ptr<runtime::Tensor>> ref_results;
    vector<shared_ptr<runtime::Tensor>> bk_results;
398
    unordered_map<ngraph::Node*, shared_ptr<runtime::Tensor>> arg_to_ref_result;
399 400 401 402 403 404 405 406

    ref_results.reserve(new_results.size());
    bk_results.reserve(new_results.size());

    for (shared_ptr<Node>& out : new_results)
    {
        auto ref_result = ref->create_tensor(out->get_element_type(), out->get_shape());
        ref_results.push_back(ref_result);
407
        arg_to_ref_result[out.get()] = ref_result;
408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431
        auto bk_result = backend->create_tensor(out->get_element_type(), out->get_shape());
        bk_results.push_back(bk_result);
    }

    if (ref_func->get_parameters().size() != ref_args.size())
    {
        throw ngraph::ngraph_error(
            "Number of ref runtime parameters and allocated arguments don't match");
    }
    if (bk_func->get_parameters().size() != bk_args.size())
    {
        throw ngraph::ngraph_error(
            "Number of backend runtime parameters and allocated arguments don't match");
    }
    if (ref_func->get_results().size() != ref_results.size())
    {
        throw ngraph::ngraph_error(
            "Number of ref runtime results and allocated results don't match");
    }
    if (bk_func->get_results().size() != bk_results.size())
    {
        throw ngraph::ngraph_error(
            "Number of backend runtime results and allocated results don't match");
    }
432 433
    auto ref_handle = ref->compile(ref_func);
    ref_handle->call_with_validate(ref_results, ref_args);
434
    auto handle = backend->compile(bk_func);
435
    handle->call_with_validate(bk_results, bk_args);
436

437 438 439
    // Now create isolated function for backend being tested where each node of the
    // original graph is tested with inputs copied from reference backend rather
    // than original dependencies.
440
    // auto bk_isolated_func = clone_function(Function(isolated_results, isolated_parameters));
441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464
    auto bk_isolated_func = make_shared<Function>(isolated_results, isolated_parameters);

    // We'll leverage the bk_args we already used above (for same original parameter data values)
    // and then tack on new data copied (whenever possible) from the reference backend results.
    auto& isolated_params = bk_isolated_func->get_parameters();
    for (auto parameter_it = isolated_params.begin() + new_func->get_parameters().size();
         parameter_it != isolated_params.end();
         ++parameter_it)
    {
        auto param = *parameter_it;
        bool found_reference_data = false;
        auto original_node_it = isolated_node_to_original_node.find(param.get());
        if (original_node_it != isolated_node_to_original_node.end())
        {
            auto ref_tensor_it = arg_to_ref_result.find(original_node_it->second.get());
            if (ref_tensor_it != arg_to_ref_result.end())
            {
                found_reference_data = true;
                auto ref_tensor = ref_tensor_it->second;
                auto data = make_shared<ngraph::runtime::HostTensor>(param->get_element_type(),
                                                                     param->get_shape());
                auto bk_tensor =
                    backend->create_tensor(param->get_element_type(), param->get_shape());
                size_t size_in_bytes = ref_tensor->get_size_in_bytes();
465 466
                ref_tensor->read(data->get_data_ptr(), size_in_bytes);
                bk_tensor->write(data->get_data_ptr(), size_in_bytes);
467 468 469 470 471 472 473 474 475 476 477 478
                bk_args.push_back(bk_tensor);
            }
        }
        ASSERT_TRUE(found_reference_data);
    }
    vector<shared_ptr<runtime::Tensor>> bk_isolated_results;
    bk_isolated_results.reserve(isolated_results.size());
    for (shared_ptr<Node>& out : isolated_results)
    {
        auto bk_result = backend->create_tensor(out->get_element_type(), out->get_shape());
        bk_isolated_results.push_back(bk_result);
    }
479
    handle = backend->compile(bk_isolated_func);
480
    handle->call_with_validate(bk_isolated_results, bk_args);
481

482 483 484 485
    // ss has path of file tested, will now be used to accumulate
    // log message in case of test failure
    ss << "\n";
    compare_results(new_results, ref_results, bk_results, bk_isolated_results, ss);
486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516
}

// The set of graphs tested is not currently significant. These graphs were
// chosen because they're already availabe and demonstrate the technique.
NGRAPH_INSTANTIATE_TEST_CASE_P(
    ${BACKEND_NAME},
    tf_resnet8_files,
    serialized_graph_files,
    testing::Values("tensorflow/resnet8/"
                    "tf_function_cluster_12[_XlaCompiledKernel=true,_XlaNumConstantArgs=3,_"
                    "XlaNumResourceArgs=0].v23.json",
                    "tensorflow/resnet8/"
                    "tf_function_cluster_20[_XlaCompiledKernel=true,_XlaNumConstantArgs=3,_"
                    "XlaNumResourceArgs=0].v23.json",
                    "tensorflow/resnet8/"
                    "tf_function_cluster_22[_XlaCompiledKernel=true,_XlaNumConstantArgs=4,_"
                    "XlaNumResourceArgs=0].v24.json",
                    "tensorflow/resnet8/"
                    "tf_function_cluster_23[_XlaCompiledKernel=true,_XlaNumConstantArgs=1,_"
                    "XlaNumResourceArgs=0].v296.json",
                    "tensorflow/resnet8/"
                    "tf_function_cluster_28[_XlaCompiledKernel=true,_XlaNumConstantArgs=0,_"
                    "XlaNumResourceArgs=0].v13.json",
                    "tensorflow/resnet8/"
                    "tf_function_cluster_4[_XlaCompiledKernel=true,_XlaNumConstantArgs=1,_"
                    "XlaNumResourceArgs=0].v14.json",
                    "tensorflow/resnet8/"
                    "tf_function_cluster_8[_XlaCompiledKernel=true,_XlaNumConstantArgs=2,_"
                    "XlaNumResourceArgs=0].v28.json"));

#endif // skipping tests due to backend