benchmark.cpp 7.92 KB
Newer Older
1
//*****************************************************************************
2
// Copyright 2017-2019 Intel Corporation
3 4 5 6 7 8 9 10 11 12 13 14 15
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
16

17
#include <random>
18
#if defined(__x86_64__) || defined(__amd64__)
19
#include <xmmintrin.h>
20
#endif
21

Ashok Emani's avatar
Ashok Emani committed
22
#include "benchmark.hpp"
23
#include "ngraph/file_util.hpp"
Ashok Emani's avatar
Ashok Emani committed
24
#include "ngraph/runtime/backend.hpp"
25 26
#include "ngraph/runtime/host_tensor.hpp"
#include "ngraph/runtime/tensor.hpp"
Ashok Emani's avatar
Ashok Emani committed
27
#include "ngraph/serializer.hpp"
28
#include "ngraph/util.hpp"
Ashok Emani's avatar
Ashok Emani committed
29

30 31 32
using namespace std;
using namespace ngraph;

33 34
static default_random_engine s_random_engine;

35 36 37 38 39 40 41 42 43
void set_denormals_flush_to_zero()
{
#if defined(__x86_64__) || defined(__amd64__)
    // Avoids perf impact from denormals while benchmarking with random data
    _MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON);
    _MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON);
#endif
}

44
template <typename T>
45
void init_int_tv(shared_ptr<runtime::Tensor> tv, T min, T max)
46
{
47
    size_t size = tv->get_element_count();
48
    uniform_int_distribution<T> dist(min, max);
49
    vector<T> vec(size);
50 51 52 53
    for (T& element : vec)
    {
        element = dist(s_random_engine);
    }
54
    tv->write(vec.data(), 0, vec.size() * sizeof(T));
55 56
}

57
template <>
58
void init_int_tv<char>(shared_ptr<runtime::Tensor> tv, char min, char max)
59 60 61 62 63 64 65 66 67 68 69 70
{
    size_t size = tv->get_element_count();
    uniform_int_distribution<int16_t> dist(static_cast<short>(min), static_cast<short>(max));
    vector<char> vec(size);
    for (char& element : vec)
    {
        element = static_cast<char>(dist(s_random_engine));
    }
    tv->write(vec.data(), 0, vec.size() * sizeof(char));
}

template <>
71
void init_int_tv<int8_t>(shared_ptr<runtime::Tensor> tv, int8_t min, int8_t max)
72 73 74 75 76 77 78 79 80 81 82 83
{
    size_t size = tv->get_element_count();
    uniform_int_distribution<int16_t> dist(static_cast<short>(min), static_cast<short>(max));
    vector<int8_t> vec(size);
    for (int8_t& element : vec)
    {
        element = static_cast<int8_t>(dist(s_random_engine));
    }
    tv->write(vec.data(), 0, vec.size() * sizeof(int8_t));
}

template <>
84
void init_int_tv<uint8_t>(shared_ptr<runtime::Tensor> tv, uint8_t min, uint8_t max)
85 86 87 88 89 90 91 92 93 94 95
{
    size_t size = tv->get_element_count();
    uniform_int_distribution<int16_t> dist(static_cast<short>(min), static_cast<short>(max));
    vector<uint8_t> vec(size);
    for (uint8_t& element : vec)
    {
        element = static_cast<uint8_t>(dist(s_random_engine));
    }
    tv->write(vec.data(), 0, vec.size() * sizeof(uint8_t));
}

96
template <typename T>
97
void init_real_tv(shared_ptr<runtime::Tensor> tv, T min, T max)
98
{
99
    size_t size = tv->get_element_count();
100
    uniform_real_distribution<T> dist(min, max);
101
    vector<T> vec(size);
102 103 104 105
    for (T& element : vec)
    {
        element = dist(s_random_engine);
    }
106
    tv->write(vec.data(), 0, vec.size() * sizeof(T));
107 108
}

109
static void random_init(shared_ptr<runtime::Tensor> tv)
110
{
111
    element::Type et = tv->get_element_type();
112 113 114 115 116 117 118 119 120 121 122 123 124 125
    switch (et.get_type_enum())
    {
    case element::Type_t::boolean: init_int_tv<char>(tv, 0, 1); break;
    case element::Type_t::f32: init_real_tv<float>(tv, -1, 1); break;
    case element::Type_t::f64: init_real_tv<double>(tv, -1, 1); break;
    case element::Type_t::i8: init_int_tv<int8_t>(tv, -1, 1); break;
    case element::Type_t::i16: init_int_tv<int16_t>(tv, -1, 1); break;
    case element::Type_t::i32: init_int_tv<int32_t>(tv, 0, 1); break;
    case element::Type_t::i64: init_int_tv<int64_t>(tv, -1, 1); break;
    case element::Type_t::u8: init_int_tv<uint8_t>(tv, 0, 1); break;
    case element::Type_t::u16: init_int_tv<uint16_t>(tv, 0, 1); break;
    case element::Type_t::u32: init_int_tv<uint32_t>(tv, 0, 1); break;
    case element::Type_t::u64: init_int_tv<uint64_t>(tv, 0, 1); break;
    default: throw runtime_error("unsupported type");
126 127 128
    }
}

129 130 131 132
vector<runtime::PerformanceCounter> run_benchmark(shared_ptr<Function> f,
                                                  const string& backend_name,
                                                  size_t iterations,
                                                  bool timing_detail,
133 134
                                                  int warmup_iterations,
                                                  bool copy_data)
135 136 137
{
    stopwatch timer;
    timer.start();
138
    auto backend = runtime::Backend::create(backend_name);
139
    auto compiled_func = backend->compile(f, timing_detail);
140 141 142
    timer.stop();
    cout.imbue(locale(""));
    cout << "compile time: " << timer.get_milliseconds() << "ms" << endl;
Ashok Emani's avatar
Ashok Emani committed
143

144 145
    vector<shared_ptr<runtime::HostTensor>> arg_data;
    vector<shared_ptr<runtime::Tensor>> args;
146
    vector<bool> args_cacheable;
Ashok Emani's avatar
Ashok Emani committed
147 148
    for (shared_ptr<op::Parameter> param : f->get_parameters())
    {
149
        auto tensor = backend->create_tensor(param->get_element_type(), param->get_shape());
150
        auto tensor_data =
151
            make_shared<runtime::HostTensor>(param->get_element_type(), param->get_shape());
152
        random_init(tensor_data);
153 154 155
        tensor->write(tensor_data->get_data_ptr(),
                      0,
                      tensor_data->get_element_count() * tensor_data->get_element_type().size());
Ashok Emani's avatar
Ashok Emani committed
156
        args.push_back(tensor);
157
        arg_data.push_back(tensor_data);
158
        args_cacheable.push_back(param->get_cacheable());
Ashok Emani's avatar
Ashok Emani committed
159
    }
160 161
    set_denormals_flush_to_zero();

162 163
    vector<shared_ptr<runtime::HostTensor>> result_data;
    vector<shared_ptr<runtime::Tensor>> results;
Ashok Emani's avatar
Ashok Emani committed
164 165
    for (shared_ptr<Node> out : f->get_results())
    {
166
        auto result = backend->create_tensor(out->get_element_type(), out->get_shape());
167
        auto tensor_data =
168
            make_shared<runtime::HostTensor>(out->get_element_type(), out->get_shape());
Ashok Emani's avatar
Ashok Emani committed
169
        results.push_back(result);
170
        result_data.push_back(tensor_data);
Ashok Emani's avatar
Ashok Emani committed
171 172
    }

173 174 175 176 177 178 179
    for (size_t i = 0; i < args.size(); i++)
    {
        if (args_cacheable[i])
        {
            args[i]->set_stale(false);
        }
    }
180 181 182 183 184

    if (warmup_iterations)
    {
        for (int i = 0; i < warmup_iterations; i++)
        {
185
            compiled_func->call(results, args);
186 187 188
        }
    }

Ashok Emani's avatar
Ashok Emani committed
189 190
    stopwatch t1;
    t1.start();
191
    for (size_t i = 0; i < iterations; i++)
Ashok Emani's avatar
Ashok Emani committed
192
    {
193 194 195 196
        if (copy_data)
        {
            for (size_t arg_index = 0; arg_index < args.size(); arg_index++)
            {
197
                const shared_ptr<runtime::Tensor>& arg = args[arg_index];
198 199
                if (arg->get_stale())
                {
200
                    const shared_ptr<runtime::HostTensor>& data = arg_data[arg_index];
201 202
                    arg->write(data->get_data_ptr(),
                               0,
203
                               data->get_element_count() * data->get_element_type().size());
204 205 206
                }
            }
        }
207
        compiled_func->call(results, args);
208 209 210 211
        if (copy_data)
        {
            for (size_t result_index = 0; result_index < results.size(); result_index++)
            {
212 213
                const shared_ptr<runtime::HostTensor>& data = result_data[result_index];
                const shared_ptr<runtime::Tensor>& result = results[result_index];
214 215 216
                result->read(data->get_data_ptr(),
                             0,
                             data->get_element_count() * data->get_element_type().size());
217 218
            }
        }
Ashok Emani's avatar
Ashok Emani committed
219 220 221 222 223
    }
    t1.stop();
    float time = t1.get_milliseconds();
    cout << time / iterations << "ms per iteration" << endl;

224
    vector<runtime::PerformanceCounter> perf_data = compiled_func->get_performance_data();
225
    return perf_data;
Ashok Emani's avatar
Ashok Emani committed
226
}