test_tools.hpp 5.59 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
//*****************************************************************************
// Copyright 2017-2018 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
16

17
#pragma once
18

19
#include <exception>
20 21
#include <iomanip>
#include <iostream>
22
#include <list>
Robert Kimball's avatar
Robert Kimball committed
23
#include <memory>
24
#include <random>
25

Scott Cyphers's avatar
Scott Cyphers committed
26
#include "ngraph/descriptor/layout/tensor_layout.hpp"
27
#include "ngraph/file_util.hpp"
28
#include "ngraph/log.hpp"
29
#include "ngraph/runtime/backend.hpp"
30
#include "ngraph/runtime/tensor.hpp"
31
#include "ngraph/serializer.hpp"
32

33
namespace ngraph
34
{
35
    class Node;
36
    class Function;
37
}
38

39
bool validate_list(const std::list<std::shared_ptr<ngraph::Node>>& nodes);
40
std::shared_ptr<ngraph::Function> make_test_graph();
41 42

template <typename T>
43
void copy_data(std::shared_ptr<ngraph::runtime::Tensor> tv, const std::vector<T>& data)
44 45 46 47
{
    size_t data_size = data.size() * sizeof(T);
    tv->write(data.data(), 0, data_size);
}
48 49

template <typename T>
50
std::vector<T> read_vector(std::shared_ptr<ngraph::runtime::Tensor> tv)
51
{
Scott Cyphers's avatar
Scott Cyphers committed
52
    if (ngraph::element::from<T>() != tv->get_tensor_layout()->get_element_type())
53
    {
54
        throw std::invalid_argument("read_vector type must match Tensor type");
55 56 57 58 59 60 61 62
    }
    size_t element_count = ngraph::shape_size(tv->get_shape());
    size_t size = element_count * sizeof(T);
    std::vector<T> rc(element_count);
    tv->read(rc.data(), 0, size);
    return rc;
}

63
std::vector<float> read_float_vector(std::shared_ptr<ngraph::runtime::Tensor> tv);
64

65
template <typename T>
66
void write_vector(std::shared_ptr<ngraph::runtime::Tensor> tv, const std::vector<T>& values)
67 68 69
{
    tv->write(values.data(), 0, values.size() * sizeof(T));
}
70

71 72 73 74 75 76 77 78 79 80 81 82 83 84 85
template <typename T>
std::vector<std::shared_ptr<T>> get_ops_of_type(std::shared_ptr<ngraph::Function> f)
{
    std::vector<std::shared_ptr<T>> ops;
    for (auto op : f->get_ops())
    {
        if (auto cop = std::dynamic_pointer_cast<T>(op))
        {
            ops.push_back(cop);
        }
    }

    return ops;
}

86 87 88 89 90 91 92 93 94 95 96 97 98 99
template <typename T>
size_t count_ops_of_type(std::shared_ptr<ngraph::Function> f)
{
    size_t count = 0;
    for (auto op : f->get_ops())
    {
        if (std::dynamic_pointer_cast<T>(op))
        {
            count++;
        }
    }

    return count;
}
100

101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128
template <typename T>
void init_int_tv(ngraph::runtime::Tensor* tv, std::default_random_engine& engine, T min, T max)
{
    size_t size = tv->get_element_count();
    std::uniform_int_distribution<T> dist(min, max);
    std::vector<T> vec(size);
    for (T& element : vec)
    {
        element = dist(engine);
    }
    tv->write(vec.data(), 0, vec.size() * sizeof(T));
}

template <typename T>
void init_real_tv(ngraph::runtime::Tensor* tv, std::default_random_engine& engine, T min, T max)
{
    size_t size = tv->get_element_count();
    std::uniform_real_distribution<T> dist(min, max);
    std::vector<T> vec(size);
    for (T& element : vec)
    {
        element = dist(engine);
    }
    tv->write(vec.data(), 0, vec.size() * sizeof(T));
}

void random_init(ngraph::runtime::Tensor* tv, std::default_random_engine& engine);

tsocha's avatar
tsocha committed
129 130 131 132
template <typename T, typename T1 = T>
std::vector<std::vector<T1>> execute(const std::shared_ptr<ngraph::Function>& function,
                                     std::vector<std::vector<T>> args,
                                     const std::string& backend_id)
133
{
134
    auto backend = ngraph::runtime::Backend::create(backend_id);
135

136
    auto parms = function->get_parameters();
137 138 139 140 141 142

    if (parms.size() != args.size())
    {
        throw ngraph::ngraph_error("number of parameters and arguments don't match");
    }

143
    std::vector<std::shared_ptr<ngraph::runtime::Tensor>> arg_tensors(args.size());
144 145 146 147 148 149 150
    for (size_t i = 0; i < args.size(); i++)
    {
        auto t = backend->create_tensor(parms.at(i)->get_element_type(), parms.at(i)->get_shape());
        copy_data(t, args.at(i));
        arg_tensors.at(i) = t;
    }

151
    auto results = function->get_results();
152
    std::vector<std::shared_ptr<ngraph::runtime::Tensor>> result_tensors(results.size());
153 154 155 156 157 158 159

    for (size_t i = 0; i < results.size(); i++)
    {
        result_tensors.at(i) =
            backend->create_tensor(results.at(i)->get_element_type(), results.at(i)->get_shape());
    }

160
    backend->call_with_validate(function, result_tensors, arg_tensors);
161

tsocha's avatar
tsocha committed
162
    std::vector<std::vector<T1>> result_vectors;
163 164
    for (auto rt : result_tensors)
    {
tsocha's avatar
tsocha committed
165
        result_vectors.push_back(read_vector<T1>(rt));
166 167 168
    }
    return result_vectors;
}
169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185

template <typename T>
void print_results(std::vector<T>& ref_data, std::vector<T>& actual_data, size_t max_results = 16)
{
    size_t num_results = std::min(static_cast<size_t>(max_results), ref_data.size());
    std::cout << "First " << num_results << " results";
    for (size_t i = 0; i < num_results; ++i)
    {
        std::cout << "\n"
                  << std::setw(4) << i << " ref: " << std::setw(16) << std::left << ref_data[i]
                  << "  actual: " << std::setw(16) << std::left << actual_data[i];
    }
    std::cout << std::endl;
}

template <>
void print_results(std::vector<char>& ref_data, std::vector<char>& actual_data, size_t max_results);