nbench.cpp 13.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
//*****************************************************************************
// Copyright 2017-2018 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
16 17 18 19 20 21 22 23

// tool to benchmark any ngraph json model with given backend.
// compile and run with:
// g++ ./nbench.cpp -std=c++11 -I$HOME/ngraph_dist/include -L$HOME/ngraph_dist/lib -lngraph -o nbench
// env LD_LIBRARY_PATH=$HOME/ngraph_dist/lib env NGRAPH_INTERPRETER_EMIT_TIMING=1 ./nbench
// sample models are under ../../test/models

#include <fstream>
24
#include <iomanip>
25

26
#include "benchmark.hpp"
27
#include "ngraph/except.hpp"
28
#include "ngraph/file_util.hpp"
29
#include "ngraph/graph_util.hpp"
30 31 32 33 34
#include "ngraph/pass/manager.hpp"
#include "ngraph/pass/visualize_tree.hpp"
#include "ngraph/runtime/backend.hpp"
#include "ngraph/serializer.hpp"
#include "ngraph/util.hpp"
35

36
using namespace std;
37
using namespace ngraph;
38

39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153
class PerfShape : public ngraph::runtime::PerformanceCounter
{
public:
    PerfShape(const runtime::PerformanceCounter& p, Shape s)
        : PerformanceCounter(p)
        , shape(s)
    {
    }
    Shape shape;
};

unordered_map<string, shared_ptr<Node>> get_node_map(shared_ptr<Function> func)
{
    unordered_map<string, shared_ptr<Node>> node_map;
    vector<shared_ptr<Function>> fs;
    traverse_functions(func, [&](shared_ptr<Function> f) { fs.push_back(f); });
    for (shared_ptr<Function> f : fs)
    {
        for (shared_ptr<Node> node : f->get_ops())
        {
            node_map.insert({node->get_name(), node});
        }
    }
    return node_map;
}

vector<PerfShape> to_perf_shape(shared_ptr<Function> f,
                                const vector<runtime::PerformanceCounter>& perf_data)
{
    vector<PerfShape> result;
    auto node_map = get_node_map(f);
    for (const runtime::PerformanceCounter& p : perf_data)
    {
        auto node = node_map[p.name()];
        Shape shape = node->get_outputs()[0].get_shape();
        result.push_back(PerfShape(p, shape));
    }
    return result;
}

multimap<size_t, string> aggregate_timing_details(const vector<PerfShape>& perf_data)
{
    unordered_map<string, size_t> timing;
    unordered_map<string, size_t> count;
    for (const PerfShape& p : perf_data)
    {
        string op = p.name().substr(0, p.name().find('_'));
        string shape_name = " {" + join(p.shape) + "} ";
        timing[op + shape_name] += p.microseconds();
        count[op + shape_name] += 1;
    }

    multimap<size_t, string> rc;
    for (const pair<string, size_t>& t : timing)
    {
        rc.insert({t.second, t.first + to_string(count[t.first])});
    }
    return rc;
}

multimap<size_t, string> aggregate_timing(const vector<PerfShape>& perf_data)
{
    unordered_map<string, size_t> timing;
    for (const PerfShape& p : perf_data)
    {
        string op = p.name().substr(0, p.name().find('_'));
        timing[op] += p.microseconds();
    }

    multimap<size_t, string> rc;
    for (const pair<string, size_t>& t : timing)
    {
        rc.insert({t.second, t.first});
    }
    return rc;
}

void print_times(const multimap<size_t, string>& timing)
{
    // set the column widths
    int name_width = 0;
    int time_width = 0;
    for (const pair<size_t, string>& p : timing)
    {
        name_width = max(name_width, static_cast<int>(p.second.size()));
        stringstream ss;
        ss.imbue(locale(""));
        ss << p.first;
        time_width = max(time_width, static_cast<int>(ss.str().size()));
    }
    for (auto it = timing.rbegin(); it != timing.rend(); it++)
    {
        cout << setw(name_width + 2) << left << it->second << " " << setw(time_width + 2) << right
             << it->first << "us\n";
    }
}

void print_results(vector<PerfShape> perf_data, bool timing_detail)
{
    sort(perf_data.begin(), perf_data.end(), [](const PerfShape& p1, const PerfShape& p2) {
        return p1.total_microseconds() > p2.total_microseconds();
    });
    multimap<size_t, string> timing = aggregate_timing(perf_data);
    multimap<size_t, string> timing_details = aggregate_timing_details(perf_data);

    if (timing_detail)
    {
        cout << "\n---- Aggregate times per op type ----\n";
        print_times(timing);

        cout << "\n---- Aggregate times per op type/shape/count ----\n";
        print_times(timing_details);
    }
}

154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176
element::Type get_op_element_type(const Node& op)
{
    element::Type type;
    if (op.description() == "Convert")
    {
        type = op.get_input_element_type(0);
    }
    else if (op.description() == "Equal" || op.description() == "Greater" ||
             op.description() == "GreaterEq" || op.description() == "Less" ||
             op.description() == "LessEq" || op.description() == "NotEqual")
    {
        // Get the type of the second input, not the first
        // All BinaryElementwiseComparision ops have the same type for inputs
        // Select has bool for first input and the type we are interested in for the second
        type = op.get_input_element_type(1);
    }
    else
    {
        type = op.get_outputs().at(0).get_element_type();
    }
    return type;
}

177 178
int main(int argc, char** argv)
{
179
    string model_arg;
180
    string backend;
181
    string directory;
182
    int iterations = 10;
183
    bool failed = false;
184 185
    bool statistics = false;
    bool timing_detail = false;
186
    bool visualize = false;
187
    int warmup_iterations = 1;
188
    bool copy_data = true;
189

190 191
    for (size_t i = 1; i < argc; i++)
    {
192 193
        string arg = argv[i];
        if (arg == "-f" || arg == "--file")
194
        {
195
            model_arg = argv[++i];
196
        }
197
        else if (arg == "-b" || arg == "--backend")
198 199 200
        {
            backend = argv[++i];
        }
201
        else if (arg == "-i" || arg == "--iterations")
202 203 204
        {
            try
            {
205
                iterations = stoi(argv[++i]);
206 207 208 209 210 211 212
            }
            catch (...)
            {
                cout << "Invalid Argument\n";
                failed = true;
            }
        }
213 214 215 216
        else if (arg == "-s" || arg == "--statistics")
        {
            statistics = true;
        }
217
        else if (arg == "--timing_detail" || arg == "--timing-detail")
218 219 220
        {
            timing_detail = true;
        }
221 222 223 224
        else if (arg == "--no_copy_data")
        {
            copy_data = false;
        }
225 226 227 228
        else if (arg == "-v" || arg == "--visualize")
        {
            visualize = true;
        }
229 230 231 232
        else if (arg == "-d" || arg == "--directory")
        {
            directory = argv[++i];
        }
233 234 235 236 237 238 239 240 241 242 243 244
        else if (arg == "-w" || arg == "--warmup_iterations")
        {
            try
            {
                warmup_iterations = stoi(argv[++i]);
            }
            catch (...)
            {
                cout << "Invalid Argument\n";
                failed = true;
            }
        }
245 246 247 248 249
        else
        {
            cout << "Unknown option: " << arg << endl;
            failed = true;
        }
250
    }
251
    if (!model_arg.empty() && !file_util::exists(model_arg))
252
    {
253
        cout << "File " << model_arg << " not found\n";
254 255
        failed = true;
    }
256 257
    else if (!directory.empty() && !file_util::exists(directory))
    {
258
        cout << "Directory " << directory << " not found\n";
259 260
        failed = true;
    }
261
    else if (directory.empty() && model_arg.empty())
262 263 264 265
    {
        cout << "Either file or directory must be specified\n";
        failed = true;
    }
266 267 268 269

    if (failed)
    {
        cout << R"###(
270 271 272 273
DESCRIPTION
    Benchmark ngraph json model with given backend.

SYNOPSIS
Ashok Emani's avatar
Ashok Emani committed
274
        nbench [-f <filename>] [-b <backend>] [-i <iterations>]
275 276

OPTIONS
277 278 279 280 281 282
        -f|--file                 Serialized model file
        -b|--backend              Backend to use (default: CPU)
        -d|--directory            Directory to scan for models. All models are benchmarked.
        -i|--iterations           Iterations (default: 10)
        -s|--statistics           Display op stastics
        -v|--visualize            Visualize a model (WARNING: requires GraphViz installed)
283
        --timing_detail           Gather detailed timing
284
        -w|--warmup_iterations    Number of warm-up iterations
285
        --no_copy_data            Disable copy of input/result data every iteration
286 287 288
)###";
        return 1;
    }
289

290 291
    vector<string> models;
    if (!directory.empty())
292
    {
293 294 295 296 297 298 299 300 301
        vector<PerfShape> aggregate_perf_data;
        file_util::iterate_files(directory,
                                 [&](const string& file, bool is_dir) {
                                     if (!is_dir)
                                     {
                                         models.push_back(file);
                                     }
                                 },
                                 true);
302
    }
303
    else
304
    {
305 306 307
        // Error case where model is missing already checked above
        models.push_back(model_arg);
    }
308

309 310 311 312 313 314 315 316
    vector<PerfShape> aggregate_perf_data;
    for (const string& model : models)
    {
        cout << "\n";
        cout << "============================================================================\n";
        cout << "---- Processing '" << model << "'\n";
        cout << "============================================================================\n";
        try
317
        {
318 319 320 321 322 323 324 325 326 327
            if (visualize)
            {
                shared_ptr<Function> f = deserialize(model);
                auto model_file_name = ngraph::file_util::get_file_name(model) + std::string(".") +
                                       pass::VisualizeTree::get_file_ext();

                pass::Manager pass_manager;
                pass_manager.register_pass<pass::VisualizeTree>(model_file_name);
                pass_manager.run_passes(f);
            }
328

329
            if (statistics)
330
            {
331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367
                shared_ptr<Function> f = deserialize(model);

                cout << "\n---- Source Graph Statistics ----\n";
                cout << "Total nodes: " << f->get_ops().size() << endl;
                size_t total_constant_bytes = 0;
                unordered_map<string, size_t> op_list;
                set<string> type_list;
                for (shared_ptr<Node> node : f->get_ordered_ops())
                {
                    string name = node->get_name();
                    string op_name = name.substr(0, name.find('_'));
                    string shape_name = "{" + join(node->get_outputs()[0].get_shape()) + "}";
                    op_list[op_name + shape_name]++;
                    auto et = get_op_element_type(*node);
                    string type_string = et.c_type_string();
                    type_list.insert(type_string);

                    if (op_name == "Constant")
                    {
                        const Shape& shape = node->get_outputs()[0].get_shape();
                        size_t const_size = node->get_outputs()[0].get_element_type().size();
                        if (shape.size() == 0)
                        {
                            total_constant_bytes += const_size;
                        }
                        else
                        {
                            total_constant_bytes +=
                                (const_size * shape_size(node->get_outputs()[0].get_shape()));
                        }
                    }
                }
                cout << "--\n";
                cout << "Total Constant size: " << total_constant_bytes << " bytes\n";
                cout << "--\n";
                cout << "Types used:\n";
                for (const string& type : type_list)
368
                {
369
                    cout << "    " << type << "\n";
370
                }
371 372
                cout << "--\n";
                for (const pair<string, size_t>& op_info : op_list)
373
                {
374
                    cout << op_info.first << ": " << op_info.second << " ops" << endl;
375 376
                }
            }
377 378

            if (!backend.empty())
379
            {
380 381
                cout << "\n---- Benchmark ----\n";
                shared_ptr<Function> f = deserialize(model);
382 383
                auto perf_data = run_benchmark(
                    f, backend, iterations, timing_detail, warmup_iterations, copy_data);
384 385 386
                auto perf_shape = to_perf_shape(f, perf_data);
                aggregate_perf_data.insert(
                    aggregate_perf_data.end(), perf_shape.begin(), perf_shape.end());
387
                print_results(perf_shape, timing_detail);
388
            }
389 390 391
        }
        catch (ngraph::unsupported_op ue)
        {
392
            cout << "Unsupported op '" << ue.what() << "' in model " << model << endl;
393 394 395
        }
        catch (exception e)
        {
396
            cout << "Exception caught on '" << model << "'\n" << e.what() << endl;
397
        }
398 399
    }

400 401 402 403 404 405 406 407 408
    if (models.size() > 1)
    {
        cout << "\n";
        cout << "============================================================================\n";
        cout << "---- Aggregate over all models\n";
        cout << "============================================================================\n";
        print_results(aggregate_perf_data, timing_detail);
    }

409
    return 0;
410
}