Unverified Commit b8e28555 authored by Yixing Lao's avatar Yixing Lao Committed by GitHub

Floating point comparison with ULP, adding close_f and all_close_f (#1068)

parent 680be054
......@@ -33,6 +33,7 @@ set (SRC
cse.cpp
element_type.cpp
file_util.cpp
all_close_f.cpp
inliner.cpp
input_output_assign.cpp
main.cpp
......
This diff is collapsed.
......@@ -1364,7 +1364,7 @@ NGRAPH_TEST(${BACKEND_NAME}, backwards_tan)
auto pi = 3.14159f;
// Stay away from the asymptotes at 6 and 12 o'clock.
auto slop = 0.1f;
auto slop = 0.2f;
test::Uniform<float> rng_r(-pi / 2 + slop, pi / 2 - slop);
test::Uniform<float> rng_l(pi / 2 + slop, (3 * pi) / 2 - slop);
......
This diff is collapsed.
......@@ -16,6 +16,7 @@
set (SRC
autodiff/backprop_function.cpp
all_close_f.cpp
test_tools.cpp
benchmark.cpp
test_control.cpp
......
......@@ -39,15 +39,17 @@ namespace ngraph
T rtol = static_cast<T>(1e-5),
T atol = static_cast<T>(1e-8))
{
bool rc = true;
assert(a.size() == b.size());
for (size_t i = 0; i < a.size(); ++i)
{
if (std::abs(a[i] - b[i]) > atol + rtol * std::abs(b[i]))
{
return false;
NGRAPH_INFO << a[i] << " is not close to " << b[i];
rc = false;
}
}
return true;
return rc;
}
/// @brief Same as numpy.allclose
......
/*******************************************************************************
* Copyright 2017-2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#include <cmath>
#include "util/all_close_f.hpp"
using namespace std;
using namespace ngraph;
union FloatUnion {
float f;
uint32_t i;
};
bool test::close_f(float a, float b, int mantissa_bits, int tolerance_bits)
{
// isfinite(a) => !isinf(a) && !isnan(a)
if (!isfinite(a) || !isfinite(b))
{
return false;
}
FloatUnion a_fu{a};
FloatUnion b_fu{b};
uint32_t a_uint = a_fu.i;
uint32_t b_uint = b_fu.i;
// A trick to handle both positive and negative numbers, see https://goo.gl/YbdnFQ
// - If negative: convert to two's complement
// - If positive: mask with sign bit
uint32_t sign_mask = static_cast<uint32_t>(1U) << 31;
a_uint = (sign_mask & a_uint) ? (~a_uint + 1) : (sign_mask | a_uint);
b_uint = (sign_mask & b_uint) ? (~b_uint + 1) : (sign_mask | b_uint);
uint32_t distance = (a_uint >= b_uint) ? (a_uint - b_uint) : (b_uint - a_uint);
// e.g. for float with 24 bit mantissa, 2 bit accuracy, and hard-coded 8 bit exponent_bits
// tolerance_bit_shift = 32 - (1 + 8 + (24 - 1 ) - 2 )
// float_length sign exp mantissa implicit 1 tolerance_bits
uint32_t tolerance_bit_shift = 32 - (1 + 8 + (mantissa_bits - 1) - tolerance_bits);
uint32_t tolerance = static_cast<uint32_t>(1U) << tolerance_bit_shift;
return distance <= tolerance;
}
bool test::all_close_f(const vector<float>& a,
const vector<float>& b,
int mantissa_bits,
int tolerance_bits)
{
bool rc = true;
if (a.size() != b.size())
{
throw ngraph_error("a.size() != b.size() for all_close comparison.");
}
for (size_t i = 0; i < a.size(); ++i)
{
bool is_close_f = close_f(a[i], b[i], mantissa_bits, tolerance_bits);
if (!is_close_f)
{
NGRAPH_INFO << a[i] << " is not close to " << b[i];
rc = false;
}
}
return rc;
}
bool test::all_close_f(const std::shared_ptr<runtime::TensorView>& a,
const std::shared_ptr<runtime::TensorView>& b,
int mantissa_bits,
int tolerance_bits)
{
// Check that the layouts are compatible
if (*a->get_tensor_view_layout() != *b->get_tensor_view_layout())
{
throw ngraph_error("Cannot compare tensors with different layouts");
}
if (a->get_shape() != b->get_shape())
{
return false;
}
return test::all_close_f(
read_float_vector(a), read_float_vector(b), mantissa_bits, tolerance_bits);
}
bool test::all_close_f(const std::vector<std::shared_ptr<runtime::TensorView>>& as,
const std::vector<std::shared_ptr<runtime::TensorView>>& bs,
int mantissa_bits,
int tolerance_bits)
{
if (as.size() != bs.size())
{
return false;
}
for (size_t i = 0; i < as.size(); ++i)
{
if (!test::all_close_f(as[i], bs[i], mantissa_bits, tolerance_bits))
{
return false;
}
}
return true;
}
/*******************************************************************************
* Copyright 2017-2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain expected copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#pragma once
#include <memory>
#include <vector>
#include "test_tools.hpp"
namespace ngraph
{
namespace test
{
/// @brief Check if the two f32 numbers are close
/// @param a First number to compare
/// @param b Second number to compare
/// @param mantissa_bits The mantissa width of the underlying number before casting to float
/// @param tolerance_bits Bit tolerance error
/// @returns True iff the distance between a and b is within 2 ^ tolerance_bits ULP
///
/// References:
/// - https://en.wikipedia.org/wiki/Unit_in_the_last_place
/// - https://randomascii.wordpress.com/2012/01/23/stupid-float-tricks-2
/// - https://github.com/google/googletest/blob/master/googletest/docs/AdvancedGuide.md#floating-point-comparison
///
/// s e e e e e e e e m m m m m m m m m m m m m m m m m m m m m m m
/// |------------bfloat-----------|
/// |----------------------------float----------------------------|
///
/// bfloat (s1, e8, m7) has 7 + 1 = 8 bits of mantissa or bit_precision
/// float (s1, e8, m23) has 23 + 1 = 24 bits of mantissa or bit_precision
///
/// This function uses hard-coded value of 8 bit exponent_bits, so it's only valid for
/// bfloat and f32.
bool close_f(float a, float b, int mantissa_bits = 8, int tolerance_bits = 2);
/// @brief Check if the two floating point vectors are all close
/// @param a First number to compare
/// @param b Second number to compare
/// @param mantissa_bits The mantissa width of the underlying number before casting to float
/// @param tolerance_bits Bit tolerance error
/// @returns true iff the two floating point vectors are close
bool all_close_f(const std::vector<float>& a,
const std::vector<float>& b,
int mantissa_bits = 8,
int tolerance_bits = 2);
/// @brief Check if the two TensorViews are all close in float
/// @param a First TensorView to compare
/// @param b Second TensorView to compare
/// @param mantissa_bits The mantissa width of the underlying number before casting to float
/// @param tolerance_bits Bit tolerance error
/// Returns true iff the two TensorViews are all close in float
bool all_close_f(const std::shared_ptr<runtime::TensorView>& a,
const std::shared_ptr<runtime::TensorView>& b,
int mantissa_bits = 8,
int tolerance_bits = 2);
/// @brief Check if the two vectors of TensorViews are all close in float
/// @param as First vector of TensorView to compare
/// @param bs Second vector of TensorView to compare
/// @param mantissa_bits The mantissa width of the underlying number before casting to float
/// @param tolerance_bits Bit tolerance error
/// Returns true iff the two TensorViews are all close in float
bool all_close_f(const std::vector<std::shared_ptr<runtime::TensorView>>& as,
const std::vector<std::shared_ptr<runtime::TensorView>>& bs,
int mantissa_bits = 8,
int tolerance_bits = 2);
}
}
......@@ -21,6 +21,9 @@
#include "util/autodiff/numeric_derivative.hpp"
#include "util/test_tools.hpp"
// TODO: Consider removing template since only <float> is being used in tests and numerical
// derivative does not work with int types
// TODO: Always compute the numerical derivatives in double
template <typename T>
bool autodiff_numeric_compare(const std::shared_ptr<ngraph::runtime::Backend>& backend,
std::function<std::shared_ptr<ngraph::Function>()> make_graph,
......@@ -28,7 +31,7 @@ bool autodiff_numeric_compare(const std::shared_ptr<ngraph::runtime::Backend>& b
T rtol,
T atol)
{
T delta = static_cast<T>(0.001);
T delta = static_cast<T>(0.0009765625f); // Binary-representable number near 0.001
// Use INTERPRETER to compute numerical derivatives
auto interpreter_backend = ngraph::runtime::Backend::create("INTERPRETER");
......
......@@ -23,6 +23,74 @@
using namespace std;
using namespace ngraph;
vector<float> read_float_vector(shared_ptr<runtime::TensorView> tv)
{
vector<float> float_vec;
element::Type element_type = tv->get_tensor_view_layout()->get_element_type();
if (element_type == element::boolean)
{
vector<char> vec = read_vector<char>(tv);
float_vec = vector<float>(vec.begin(), vec.end());
}
else if (element_type == element::f32)
{
vector<float> vec = read_vector<float>(tv);
float_vec = vector<float>(vec.begin(), vec.end());
}
else if (element_type == element::f64)
{
vector<double> vec = read_vector<double>(tv);
float_vec = vector<float>(vec.begin(), vec.end());
}
else if (element_type == element::i8)
{
vector<int8_t> vec = read_vector<int8_t>(tv);
float_vec = vector<float>(vec.begin(), vec.end());
}
else if (element_type == element::i16)
{
vector<int16_t> vec = read_vector<int16_t>(tv);
float_vec = vector<float>(vec.begin(), vec.end());
}
else if (element_type == element::i32)
{
vector<int32_t> vec = read_vector<int32_t>(tv);
float_vec = vector<float>(vec.begin(), vec.end());
}
else if (element_type == element::i64)
{
vector<int64_t> vec = read_vector<int64_t>(tv);
float_vec = vector<float>(vec.begin(), vec.end());
}
else if (element_type == element::u8)
{
vector<uint8_t> vec = read_vector<uint8_t>(tv);
float_vec = vector<float>(vec.begin(), vec.end());
}
else if (element_type == element::u16)
{
vector<uint16_t> vec = read_vector<uint16_t>(tv);
float_vec = vector<float>(vec.begin(), vec.end());
}
else if (element_type == element::u32)
{
vector<uint32_t> vec = read_vector<uint32_t>(tv);
float_vec = vector<float>(vec.begin(), vec.end());
}
else if (element_type == element::u64)
{
vector<uint64_t> vec = read_vector<uint64_t>(tv);
float_vec = vector<float>(vec.begin(), vec.end());
}
else
{
throw ngraph_error("Unsupported nGraph element type.");
}
return float_vec;
}
// This function traverses the list of ops and verifies that each op's dependencies (its inputs)
// is located earlier in the list. That is enough to be valid
bool validate_list(const list<shared_ptr<Node>>& nodes)
......
......@@ -56,6 +56,8 @@ std::vector<T> read_vector(std::shared_ptr<ngraph::runtime::TensorView> tv)
return rc;
}
std::vector<float> read_float_vector(std::shared_ptr<ngraph::runtime::TensorView> tv);
template <typename T>
void write_vector(std::shared_ptr<ngraph::runtime::TensorView> tv, const std::vector<T>& values)
{
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment