Commit 40bcfdf7 authored by gcwenger's avatar gcwenger Committed by Robert Kimball

Heterogenous serialized graph testing across backends (#2020)

* Heterogenous sub-graph comparison testing

* Print index for float differences

* Disabled compare_backends_with_graphs on most backends for now. Moved to new file. Added testing of unsigned values.

* Fixed element::boolean range. Added missing include.

* Switched use of shared_ptr as parm to raw *. Moved to using namespace std in cpp. Fixed comment marker in unit_test.manifest files. Switched some EXPECT_EQ TO ASSERT_EQ. Fixed parameterized test disabling.

* Frozen naming -> serialized. Removed extraneous comments.

* Graph comparison unit test relies on CPU for reference, so only build when CPU is built.

* Reworked per backend disabling of compare_backends_with_graphs
parent 31e2765a
......@@ -138,6 +138,9 @@ set(MULTI_TEST_SRC
if(NGRAPH_DISTRIBUTED_ENABLE)
list(APPEND MULTI_TEST_SRC distributed.in.cpp)
endif()
if (NGRAPH_CPU_ENABLE)
list(APPEND MULTI_TEST_SRC backend_graph_comparison.in.cpp)
endif()
foreach(BACKEND_NAME ${ACTIVE_BACKEND_LIST})
# Some---but not all---autodiff tests go through multiple iterations with
......
This diff is collapsed.
......@@ -34,10 +34,11 @@ namespace ngraph
/// \param atol Absolute tolerance
/// \returns true if shapes match and for all elements, |a_i-b_i| <= atol + rtol*|b_i|.
template <typename T>
bool all_close(const std::vector<T>& a,
const std::vector<T>& b,
T rtol = static_cast<T>(1e-5),
T atol = static_cast<T>(1e-8))
typename std::enable_if<std::is_signed<T>::value, bool>::type
all_close(const std::vector<T>& a,
const std::vector<T>& b,
T rtol = static_cast<T>(1e-5),
T atol = static_cast<T>(1e-8))
{
bool rc = true;
assert(a.size() == b.size());
......@@ -52,6 +53,33 @@ namespace ngraph
return rc;
}
/// \brief Same as numpy.allclose
/// \param a First tensor to compare
/// \param b Second tensor to compare
/// \param rtol Relative tolerance
/// \param atol Absolute tolerance
/// \returns true if shapes match and for all elements, |a_i-b_i| <= atol + rtol*|b_i|.
template <typename T>
typename std::enable_if<std::is_unsigned<T>::value, bool>::type
all_close(const std::vector<T>& a,
const std::vector<T>& b,
T rtol = static_cast<T>(1e-5),
T atol = static_cast<T>(1e-8))
{
bool rc = true;
assert(a.size() == b.size());
for (size_t i = 0; i < a.size(); ++i)
{
T abs_diff = (a[i] > b[i]) ? (a[i] - b[i]) : (b[i] - a[i]);
if (abs_diff > atol + rtol * b[i])
{
NGRAPH_INFO << a[i] << " is not close to " << b[i] << " at index " << i;
rc = false;
}
}
return rc;
}
/// \brief Same as numpy.allclose
/// \param a First tensor to compare
/// \param b Second tensor to compare
......
......@@ -72,7 +72,7 @@ bool test::all_close_f(const vector<float>& a,
bool is_close_f = close_f(a[i], b[i], mantissa_bits, tolerance_bits);
if (!is_close_f)
{
NGRAPH_INFO << a[i] << " is not close to " << b[i];
NGRAPH_INFO << a[i] << " is not close to " << b[i] << " at index " << i;
rc = false;
}
}
......
......@@ -120,7 +120,7 @@ namespace ngraph
::testing::internal::CodeLocation(__FILE__, __LINE__)) \
->AddTestPattern( \
#backend_name "/" #test_case_name, \
::ngraph::prepend_disabled(#test_case_name, #test_name, s_manifest).c_str(), \
::ngraph::prepend_disabled(#backend_name, #test_name, s_manifest).c_str(), \
new ::testing::internal::TestMetaFactory<NGRAPH_GTEST_TEST_CLASS_NAME_( \
backend_name, test_case_name, test_name)>()); \
return 0; \
......
......@@ -147,3 +147,119 @@ shared_ptr<Function> make_test_graph()
return f0;
}
template <>
void init_int_tv<char>(ngraph::runtime::Tensor* tv,
std::default_random_engine& engine,
char min,
char max)
{
size_t size = tv->get_element_count();
std::uniform_int_distribution<int16_t> dist(static_cast<short>(min), static_cast<short>(max));
std::vector<char> vec(size);
for (char& element : vec)
{
element = static_cast<char>(dist(engine));
}
tv->write(vec.data(), 0, vec.size() * sizeof(char));
}
template <>
void init_int_tv<int8_t>(ngraph::runtime::Tensor* tv,
std::default_random_engine& engine,
int8_t min,
int8_t max)
{
size_t size = tv->get_element_count();
std::uniform_int_distribution<int16_t> dist(static_cast<short>(min), static_cast<short>(max));
std::vector<int8_t> vec(size);
for (int8_t& element : vec)
{
element = static_cast<int8_t>(dist(engine));
}
tv->write(vec.data(), 0, vec.size() * sizeof(int8_t));
}
template <>
void init_int_tv<uint8_t>(ngraph::runtime::Tensor* tv,
std::default_random_engine& engine,
uint8_t min,
uint8_t max)
{
size_t size = tv->get_element_count();
std::uniform_int_distribution<int16_t> dist(static_cast<short>(min), static_cast<short>(max));
std::vector<uint8_t> vec(size);
for (uint8_t& element : vec)
{
element = static_cast<uint8_t>(dist(engine));
}
tv->write(vec.data(), 0, vec.size() * sizeof(uint8_t));
}
void random_init(ngraph::runtime::Tensor* tv, std::default_random_engine& engine)
{
element::Type et = tv->get_element_type();
if (et == element::boolean)
{
init_int_tv<char>(tv, engine, 0, 1);
}
else if (et == element::f32)
{
init_real_tv<float>(tv, engine, numeric_limits<float>::min(), 1.0f);
}
else if (et == element::f64)
{
init_real_tv<double>(tv, engine, numeric_limits<float>::min(), 1.0f);
}
else if (et == element::i8)
{
init_int_tv<int8_t>(tv, engine, -1, 1);
}
else if (et == element::i16)
{
init_int_tv<int16_t>(tv, engine, -1, 1);
}
else if (et == element::i32)
{
init_int_tv<int32_t>(tv, engine, 0, 1);
}
else if (et == element::i64)
{
init_int_tv<int64_t>(tv, engine, 0, 1);
}
else if (et == element::u8)
{
init_int_tv<uint8_t>(tv, engine, 0, 1);
}
else if (et == element::u16)
{
init_int_tv<uint16_t>(tv, engine, 0, 1);
}
else if (et == element::u32)
{
init_int_tv<uint32_t>(tv, engine, 0, 1);
}
else if (et == element::u64)
{
init_int_tv<uint64_t>(tv, engine, 0, 1);
}
else
{
throw runtime_error("unsupported type");
}
}
template <>
void print_results(std::vector<char>& ref_data, std::vector<char>& actual_data, size_t max_results)
{
size_t num_results = std::min(static_cast<size_t>(max_results), ref_data.size());
std::cout << "First " << num_results << " results";
for (size_t i = 0; i < num_results; ++i)
{
std::cout << "\n"
<< std::setw(4) << i << " ref: " << std::setw(16) << std::left
<< static_cast<int>(ref_data[i]) << " actual: " << std::setw(16) << std::left
<< static_cast<int>(actual_data[i]);
}
std::cout << std::endl;
}
......@@ -17,8 +17,11 @@
#pragma once
#include <exception>
#include <iomanip>
#include <iostream>
#include <list>
#include <memory>
#include <random>
#include "ngraph/descriptor/layout/tensor_layout.hpp"
#include "ngraph/file_util.hpp"
......@@ -95,6 +98,34 @@ size_t count_ops_of_type(std::shared_ptr<ngraph::Function> f)
return count;
}
template <typename T>
void init_int_tv(ngraph::runtime::Tensor* tv, std::default_random_engine& engine, T min, T max)
{
size_t size = tv->get_element_count();
std::uniform_int_distribution<T> dist(min, max);
std::vector<T> vec(size);
for (T& element : vec)
{
element = dist(engine);
}
tv->write(vec.data(), 0, vec.size() * sizeof(T));
}
template <typename T>
void init_real_tv(ngraph::runtime::Tensor* tv, std::default_random_engine& engine, T min, T max)
{
size_t size = tv->get_element_count();
std::uniform_real_distribution<T> dist(min, max);
std::vector<T> vec(size);
for (T& element : vec)
{
element = dist(engine);
}
tv->write(vec.data(), 0, vec.size() * sizeof(T));
}
void random_init(ngraph::runtime::Tensor* tv, std::default_random_engine& engine);
template <typename T, typename T1 = T>
std::vector<std::vector<T1>> execute(const std::shared_ptr<ngraph::Function>& function,
std::vector<std::vector<T>> args,
......@@ -135,3 +166,20 @@ std::vector<std::vector<T1>> execute(const std::shared_ptr<ngraph::Function>& fu
}
return result_vectors;
}
template <typename T>
void print_results(std::vector<T>& ref_data, std::vector<T>& actual_data, size_t max_results = 16)
{
size_t num_results = std::min(static_cast<size_t>(max_results), ref_data.size());
std::cout << "First " << num_results << " results";
for (size_t i = 0; i < num_results; ++i)
{
std::cout << "\n"
<< std::setw(4) << i << " ref: " << std::setw(16) << std::left << ref_data[i]
<< " actual: " << std::setw(16) << std::left << actual_data[i];
}
std::cout << std::endl;
}
template <>
void print_results(std::vector<char>& ref_data, std::vector<char>& actual_data, size_t max_results);
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment