• Scott Cyphers's avatar
    GetOutputElement removal preparation (#4425) · 0af33226
    Scott Cyphers authored
    * GetOutputElement removal preparation
    
    Not all outputs are used so don't force them to be connected in replace
    Add pattern that matches on any output
    Remove GOEs by default, allow to disable
    Fix failing core passes/tests with GOE dependency
    
    * Fix MLIR call
    
    * Fix value handle assignment
    
    * Cleanup
    
    * style
    
    * review comments
    
    * Fix onnx tests
    
    * Allow simplifcations to work on multi-values nodes
    
    * Disable goe removal for MLIR test
    
    * null init of Output
    Co-authored-by: 's avatarnmostafa <nagy.h.mostafa@intel.com>
    0af33226
test_tools.hpp 9.63 KB
//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************

#pragma once

#include <exception>
#include <fstream>
#include <iomanip>
#include <iostream>
#include <list>
#include <memory>
#include <random>
#include <vector>

#include "gtest/gtest.h"
#include "ngraph/descriptor/layout/tensor_layout.hpp"
#include "ngraph/file_util.hpp"
#include "ngraph/log.hpp"
#include "ngraph/runtime/backend.hpp"
#include "ngraph/runtime/tensor.hpp"
#include "ngraph/serializer.hpp"

#ifdef NGRAPH_MLIR_ENABLE
#define MLIR_DISABLE_TEST(name) DISABLED_##name
#else
#define MLIR_DISABLE_TEST(name) name
#endif

namespace ngraph
{
    class Node;
    class Function;
}

class DisableRemoveGOE
{
public:
    DisableRemoveGOE()
        : m_saved_remove_goe(ngraph::get_remove_goe())
    {
        ngraph::set_remove_goe(false);
    }
    ~DisableRemoveGOE() { ngraph::set_remove_goe(m_saved_remove_goe); }
private:
    bool m_saved_remove_goe;
};

bool validate_list(const std::vector<std::shared_ptr<ngraph::Node>>& nodes);
std::shared_ptr<ngraph::Function> make_test_graph();
#ifndef NGRAPH_JSON_DISABLE
std::shared_ptr<ngraph::Function> make_function_from_file(const std::string& file_name);
#endif

template <typename T>
void copy_data(std::shared_ptr<ngraph::runtime::Tensor> tv, const std::vector<T>& data)
{
    size_t data_size = data.size() * sizeof(T);
    tv->write(data.data(), data_size);
}

template <>
void copy_data<bool>(std::shared_ptr<ngraph::runtime::Tensor> tv, const std::vector<bool>& data);

template <typename T>
std::vector<T> read_vector(std::shared_ptr<ngraph::runtime::Tensor> tv)
{
    if (ngraph::element::from<T>() != tv->get_element_type())
    {
        throw std::invalid_argument("read_vector type must match Tensor type");
    }
    size_t element_count = ngraph::shape_size(tv->get_shape());
    size_t size = element_count * sizeof(T);
    std::vector<T> rc(element_count);
    tv->read(rc.data(), size);
    return rc;
}

std::vector<float> read_float_vector(std::shared_ptr<ngraph::runtime::Tensor> tv);

template <typename T>
void write_vector(std::shared_ptr<ngraph::runtime::Tensor> tv, const std::vector<T>& values)
{
    tv->write(values.data(), values.size() * sizeof(T));
}

template <typename T>
std::vector<std::shared_ptr<T>> get_ops_of_type(std::shared_ptr<ngraph::Function> f)
{
    std::vector<std::shared_ptr<T>> ops;
    for (auto op : f->get_ops())
    {
        if (auto cop = ngraph::as_type_ptr<T>(op))
        {
            ops.push_back(cop);
        }
    }

    return ops;
}

template <typename T>
size_t count_ops_of_type(std::shared_ptr<ngraph::Function> f)
{
    size_t count = 0;
    for (auto op : f->get_ops())
    {
        if (ngraph::is_type<T>(op))
        {
            count++;
        }
    }

    return count;
}

template <typename T>
void init_int_tv(ngraph::runtime::Tensor* tv, std::default_random_engine& engine, T min, T max)
{
    size_t size = tv->get_element_count();
    std::uniform_int_distribution<T> dist(min, max);
    std::vector<T> vec(size);
    for (T& element : vec)
    {
        element = dist(engine);
    }
    tv->write(vec.data(), vec.size() * sizeof(T));
}

template <typename T>
void init_real_tv(ngraph::runtime::Tensor* tv, std::default_random_engine& engine, T min, T max)
{
    size_t size = tv->get_element_count();
    std::uniform_real_distribution<T> dist(min, max);
    std::vector<T> vec(size);
    for (T& element : vec)
    {
        element = dist(engine);
    }
    tv->write(vec.data(), vec.size() * sizeof(T));
}

void random_init(ngraph::runtime::Tensor* tv, std::default_random_engine& engine);

template <typename T1, typename T2>
std::vector<std::shared_ptr<ngraph::runtime::Tensor>>
    prepare_and_run(const std::shared_ptr<ngraph::Function>& function,
                    std::vector<std::vector<T1>> t1args,
                    std::vector<std::vector<T2>> t2args,
                    const std::string& backend_id)
{
    auto backend = ngraph::runtime::Backend::create(backend_id);

    auto parms = function->get_parameters();

    if (parms.size() != t1args.size() + t2args.size())
    {
        throw ngraph::ngraph_error("number of parameters and arguments don't match");
    }

    std::vector<std::shared_ptr<ngraph::runtime::Tensor>> arg_tensors(t1args.size() +
                                                                      t2args.size());

    size_t total_arg_count = 0;
    for (size_t i = 0; i < t1args.size(); i++)
    {
        auto t = backend->create_tensor(parms.at(total_arg_count)->get_element_type(),
                                        parms.at(total_arg_count)->get_shape());
        auto x = t1args.at(i);
        copy_data(t, x);
        arg_tensors.at(total_arg_count) = t;
        total_arg_count++;
    }

    for (size_t i = 0; i < t2args.size(); i++)
    {
        auto t = backend->create_tensor(parms.at(total_arg_count)->get_element_type(),
                                        parms.at(total_arg_count)->get_shape());
        copy_data(t, t2args.at(i));
        arg_tensors.at(total_arg_count) = t;
        total_arg_count++;
    }

    auto results = function->get_results();
    std::vector<std::shared_ptr<ngraph::runtime::Tensor>> result_tensors(results.size());

    for (size_t i = 0; i < results.size(); i++)
    {
        result_tensors.at(i) =
            backend->create_tensor(results.at(i)->get_element_type(), results.at(i)->get_shape());
    }

    auto handle = backend->compile(function);
    handle->call_with_validate(result_tensors, arg_tensors);

    return result_tensors;
}

template <typename T>
std::vector<std::shared_ptr<ngraph::runtime::Tensor>>
    prepare_and_run(const std::shared_ptr<ngraph::Function>& function,
                    std::vector<std::vector<T>> args,
                    const std::string& backend_id)
{
    std::vector<std::vector<T>> emptyargs;
    return prepare_and_run<T, T>(function, args, emptyargs, backend_id);
}

template <typename TIN1, typename TIN2, typename TOUT>
std::vector<std::vector<TOUT>> execute(const std::shared_ptr<ngraph::Function>& function,
                                       std::vector<std::vector<TIN1>> t1args,
                                       std::vector<std::vector<TIN2>> t2args,
                                       const std::string& backend_id)
{
    std::vector<std::shared_ptr<ngraph::runtime::Tensor>> result_tensors =
        prepare_and_run(function, t1args, t2args, backend_id);

    std::vector<std::vector<TOUT>> result_vectors;
    for (auto rt : result_tensors)
    {
        result_vectors.push_back(read_vector<TOUT>(rt));
    }
    return result_vectors;
}

template <typename TIN, typename TOUT = TIN>
std::vector<std::vector<TOUT>> execute(const std::shared_ptr<ngraph::Function>& function,
                                       std::vector<std::vector<TIN>> args,
                                       const std::string& backend_id)
{
    std::vector<std::vector<TIN>> emptyargs;
    return execute<TIN, TIN, TOUT>(function, args, emptyargs, backend_id);
}

template <typename T>
std::string get_results_str(const std::vector<T>& ref_data,
                            const std::vector<T>& actual_data,
                            size_t max_results = 16)
{
    std::stringstream ss;
    size_t num_results = std::min(static_cast<size_t>(max_results), ref_data.size());
    ss << "First " << num_results << " results";
    for (size_t i = 0; i < num_results; ++i)
    {
        ss << std::endl
           // use unary + operator to force integral values to be displayed as numbers
           << std::setw(4) << i << " ref: " << std::setw(16) << std::left << +ref_data[i]
           << "  actual: " << std::setw(16) << std::left << +actual_data[i];
    }
    ss << std::endl;

    return ss.str();
}

template <>
std::string get_results_str(const std::vector<char>& ref_data,
                            const std::vector<char>& actual_data,
                            size_t max_results);

/// \brief      Reads a binary file to a vector.
///
/// \param[in]  path  The path where the file is located.
///
/// \tparam     T     The type we want to interpret as the elements in binary file.
///
/// \return     Return vector of data read from input binary file.
///
template <typename T>
std::vector<T> read_binary_file(const std::string& path)
{
    std::vector<T> file_content;
    std::ifstream inputs_fs{path, std::ios::in | std::ios::binary};
    if (!inputs_fs)
    {
        throw std::runtime_error("Failed to open the file: " + path);
    }

    inputs_fs.seekg(0, std::ios::end);
    auto size = inputs_fs.tellg();
    inputs_fs.seekg(0, std::ios::beg);
    if (size % sizeof(T) != 0)
    {
        throw std::runtime_error(
            "Error reading binary file content: Input file size (in bytes) "
            "is not a multiple of requested data type size.");
    }
    file_content.resize(size / sizeof(T));
    inputs_fs.read(reinterpret_cast<char*>(file_content.data()), size);
    return file_content;
}

testing::AssertionResult test_ordered_ops(std::shared_ptr<ngraph::Function> f,
                                          const ngraph::NodeVector& required_ops);