Unverified Commit b8e28555 authored by Yixing Lao's avatar Yixing Lao Committed by GitHub

Floating point comparison with ULP, adding close_f and all_close_f (#1068)

parent 680be054
......@@ -33,6 +33,7 @@ set (SRC
cse.cpp
element_type.cpp
file_util.cpp
all_close_f.cpp
inliner.cpp
input_output_assign.cpp
main.cpp
......
/*******************************************************************************
* Copyright 2017-2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#include <bitset>
#include <cmath>
#include <limits>
#include <sstream>
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "util/all_close_f.hpp"
using namespace std;
using namespace ngraph;
union FloatUnion {
float f;
uint32_t i;
};
string float_to_bits(float f)
{
FloatUnion fu{f};
stringstream ss;
ss << bitset<32>(fu.i);
return ss.str();
}
float bits_to_float(const string& s)
{
if (s.size() != 32)
{
throw ngraph_error("Input length must be 32");
}
bitset<32> bs(s);
FloatUnion fu;
fu.i = static_cast<uint32_t>(bs.to_ulong());
return fu.f;
}
// Test the exact bounds near +0.f
//
// With mantissa_bits = 8, tolerance_bits = 2
//
// Targeted bit
// |
// v
// s e e e e e e e e m m m m m m m m m m m m m m m m m m m m m m m
// =>| 8 |
// | 2 |<=
//
// [Upper bound]
// Add 1 at this bit
// |
// v
// 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
// + 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
// ---------------------------------------------------------------
// 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
//
// [Lower bound]
// Minus 1 at this bit
// |
// v
// 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
// - 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
// ---------------------------------------------------------------
// 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
//
// Convert to 2's compliment
// 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
//
// Mask the sign bit
// 1 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
TEST(all_close_f, mantissa_8_near_0)
{
// 0.f, the ground-truth value
float expected = bits_to_float("00000000000000000000000000000000");
float computed;
// ~3.67342E-40, the exact upper bound
computed = bits_to_float("00000000000001000000000000000000");
EXPECT_TRUE(test::close_f(expected, computed, 8, 2));
// ~3.67343E-40, the next representable number bigger than upper bound
computed = bits_to_float("00000000000001000000000000000001");
EXPECT_FALSE(test::close_f(expected, computed, 8, 2));
// ~-3.67342E-40, the exact lower bound
computed = bits_to_float("10000000000001000000000000000000");
EXPECT_TRUE(test::close_f(expected, computed, 8, 2));
// ~-3.67343E-40, the next representable number smaller than lower bound
computed = bits_to_float("10000000000001000000000000000001");
EXPECT_FALSE(test::close_f(expected, computed, 8, 2));
}
// Test the exact bounds near -0.f
//
// With mantissa_bits = 8, tolerance_bits = 2
//
// Targeted bit
// |
// v
// s e e e e e e e e m m m m m m m m m m m m m m m m m m m m m m m
// =>| 8 |
// | 2 |<=
//
// [Upper bound]
// Minus 1 at this bit
// |
// v
// 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
// - 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
// ---------------------------------------------------------------
// 0 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
//
// Convert to 2's compliment
// 1 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
//
// Mask off sign bit
// 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
//
// [Lower bound]
// Add 1 at this bit
// |
// v
// 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
// + 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
// ---------------------------------------------------------------
// 1 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
TEST(all_close_f, mantissa_8_near_n0)
{
// 0.f, the ground-truth value
float expected = bits_to_float("10000000000000000000000000000000");
float computed;
// ~3.67342E-40, the exact upper bound
computed = bits_to_float("00000000000001000000000000000000");
EXPECT_TRUE(test::close_f(expected, computed, 8, 2));
// ~3.67343E-40, the next representable number bigger than upper bound
computed = bits_to_float("00000000000001000000000000000001");
EXPECT_FALSE(test::close_f(expected, computed, 8, 2));
// ~-3.67342E-40, the exact lower bound
computed = bits_to_float("10000000000001000000000000000000");
EXPECT_TRUE(test::close_f(expected, computed, 8, 2));
// ~-3.67343E-40, the next representable number smaller than lower bound
computed = bits_to_float("10000000000001000000000000000001");
EXPECT_FALSE(test::close_f(expected, computed, 8, 2));
}
// Test the exact bounds near 1.f
//
// With mantissa_bits = 8, tolerance_bits = 2
//
// Targeted bit
// |
// v
// s e e e e e e e e m m m m m m m m m m m m m m m m m m m m m m m
// =>| 8 |
// | 2 |<=
//
// [Upper bound]
// Add 1 at this bit to get upper bound
// |
// v
// 0 0 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
// + 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
// ---------------------------------------------------------------
// 0 0 1 1 1 1 1 1 1 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
//
// [Lower bound]
// Minus 1 at this bit to get lower bound
// |
// v
// 0 0 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
// - 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
// ---------------------------------------------------------------
// 0 0 1 1 1 1 1 1 0 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
TEST(all_close_f, mantissa_8_near_1)
{
// 1.f, the ground-truth value
float expected = bits_to_float("00111111100000000000000000000000");
float computed;
// 1.03125f, the exact upper bound
computed = bits_to_float("00111111100001000000000000000000");
EXPECT_TRUE(test::close_f(expected, computed, 8, 2));
// 1.031250119f, the next representable number bigger than upper bound
computed = bits_to_float("00111111100001000000000000000001");
EXPECT_FALSE(test::close_f(expected, computed, 8, 2));
// 0.984375f, the exact lower bound
computed = bits_to_float("00111111011111000000000000000000");
EXPECT_TRUE(test::close_f(expected, computed, 8, 2));
// 0.9843749404f, the next representable number smaller than lower bound
computed = bits_to_float("00111111011110111111111111111111");
EXPECT_FALSE(test::close_f(expected, computed, 8, 2));
}
// Test the exact bounds near -1.f
//
// With mantissa_bits = 8, tolerance_bits = 2
//
// Targeted bit
// |
// v
// s e e e e e e e e m m m m m m m m m m m m m m m m m m m m m m m
// =>| 8 |
// | 2 |<=
//
// [Upper bound]
// Minus 1 at this bit
// |
// v
// 1 0 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
// - 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
// ---------------------------------------------------------------
// 1 0 1 1 1 1 1 1 0 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
//
// [Lower bound]
// Add 1 at this bit
// |
// v
// 1 0 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
// + 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
// ---------------------------------------------------------------
// 1 0 1 1 1 1 1 1 1 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
TEST(all_close_f, mantissa_8_near_n1)
{
// -1.f, the ground-truth value
float expected = bits_to_float("10111111100000000000000000000000");
float computed;
// -0.984375f, the exact upper bound
computed = bits_to_float("10111111011111000000000000000000");
EXPECT_TRUE(test::close_f(expected, computed, 8, 2));
// -0.984374940395355224609375f, the next representable number bigger than upper bound
computed = bits_to_float("10111111011110111111111111111111");
EXPECT_FALSE(test::close_f(expected, computed, 8, 2));
// -1.03125f, the exact lower bound
computed = bits_to_float("10111111100001000000000000000000");
EXPECT_TRUE(test::close_f(expected, computed, 8, 2));
// -1.03125011920928955078125f, the next representable number smaller than lower bound
computed = bits_to_float("10111111100001000000000000000001");
EXPECT_FALSE(test::close_f(expected, computed, 8, 2));
}
// For intuitive understanding of tightness of bounds in decimal
// Test bounds near 0, 1, 10, 100, 1000 with mantissa_bits = 8, tolerance_bits = 2
//
// Targeted bit
// |
// v
// s e e e e e e e e m m m m m m m m m m m m m m m m m m m m m m m
// =>| 8 |
// | 2 |<=
TEST(all_close_f, mantissa_8_near_0_1_10_100_1000)
{
float expected;
float upper_bound;
float bigger_than_upper_bound;
float lower_bound;
float smaller_than_lower_bound;
// Bounds around 0: 0 +- 3.67e-40
expected = 0.f; // 00000000000000000000000000000000
upper_bound = 3.67342e-40f; // 00000000000001000000000000000000, approximated
bigger_than_upper_bound = 3.67343e-40f; // 00000000000001000000000000000001, approximated
lower_bound = -3.67342e-40f; // 10000000000001000000000000000000, approximated
smaller_than_lower_bound = 3.67343e-40f; // 10000000000001000000000000000001, approximated
EXPECT_TRUE(test::close_f(expected, upper_bound, 8, 2));
EXPECT_FALSE(test::close_f(expected, bigger_than_upper_bound, 8, 2));
EXPECT_TRUE(test::close_f(expected, lower_bound, 8, 2));
EXPECT_FALSE(test::close_f(expected, smaller_than_lower_bound, 8, 2));
// Bounds around 1: 1 +- 0.03
expected = 1.f; // 00111111100000000000000000000000
upper_bound = 1.03125f; // 00111111100001000000000000000000
bigger_than_upper_bound = 1.031250119f; // 00111111100001000000000000000001
lower_bound = 0.984375f; // 00111111011111000000000000000000
smaller_than_lower_bound = 0.9843749404f; // 00111111011110111111111111111111
EXPECT_TRUE(test::close_f(expected, upper_bound, 8, 2));
EXPECT_FALSE(test::close_f(expected, bigger_than_upper_bound, 8, 2));
EXPECT_TRUE(test::close_f(expected, lower_bound, 8, 2));
EXPECT_FALSE(test::close_f(expected, smaller_than_lower_bound, 8, 2));
// Bounds around 10: 10 +- 0.25
expected = 10.f; // 01000001001000000000000000000000
upper_bound = 10.25f; // 01000001001001000000000000000000
bigger_than_upper_bound = 10.25000095367431640625f; // 01000001001001000000000000000001
lower_bound = 9.75f; // 01000001000111000000000000000000
smaller_than_lower_bound = 9.74999904632568359375f; // 01000001000110111111111111111111
EXPECT_TRUE(test::close_f(expected, upper_bound, 8, 2));
EXPECT_FALSE(test::close_f(expected, bigger_than_upper_bound, 8, 2));
EXPECT_TRUE(test::close_f(expected, lower_bound, 8, 2));
EXPECT_FALSE(test::close_f(expected, smaller_than_lower_bound, 8, 2));
// Bounds around 100: 100 +- 2
expected = 100.f; // 01000010110010000000000000000000
upper_bound = 102.f; // 01000010110011000000000000000000
bigger_than_upper_bound = 102.00000762939453125f; // 01000010110011000000000000000001
lower_bound = 98.0f; // 01000010110001000000000000000000
smaller_than_lower_bound = 97.99999237060546875f; // 01000010110000111111111111111111
EXPECT_TRUE(test::close_f(expected, upper_bound, 8, 2));
EXPECT_FALSE(test::close_f(expected, bigger_than_upper_bound, 8, 2));
EXPECT_TRUE(test::close_f(expected, lower_bound, 8, 2));
EXPECT_FALSE(test::close_f(expected, smaller_than_lower_bound, 8, 2));
// Bounds around 1000: 1000 +- 16
expected = 1000.f; // 01000100011110100000000000000000
upper_bound = 1016.f; // 01000100011111100000000000000000
bigger_than_upper_bound = 1016.00006103515625f; // 01000100011111100000000000000001
lower_bound = 984.0f; // 01000100011101100000000000000000
smaller_than_lower_bound = 983.99993896484375f; // 01000100011101011111111111111111
EXPECT_TRUE(test::close_f(expected, upper_bound, 8, 2));
EXPECT_FALSE(test::close_f(expected, bigger_than_upper_bound, 8, 2));
EXPECT_TRUE(test::close_f(expected, lower_bound, 8, 2));
EXPECT_FALSE(test::close_f(expected, smaller_than_lower_bound, 8, 2));
}
// For intuitive understanding of tightness of bounds in decimal
// Test bounds near 0, 1, 10, 100, 1000 with mantissa_bits = 24, tolerance_bits = 2
//
// Targeted bit
// |
// v
// s e e e e e e e e m m m m m m m m m m m m m m m m m m m m m m m
// =>| 24 |
// | 2 |<=
TEST(all_close_f, mantissa_24_near_0_1_10_100_1000)
{
float expected;
float upper_bound;
float bigger_than_upper_bound;
float lower_bound;
float smaller_than_lower_bound;
// Bounds around 0: 0 +- 5.6e-45
expected = 0.f;
upper_bound = bits_to_float("00000000000000000000000000000100");
bigger_than_upper_bound = bits_to_float("00000000000000000000000000000101");
lower_bound = bits_to_float("10000000000000000000000000000100");
smaller_than_lower_bound = bits_to_float("10000000000000000000000000000101");
EXPECT_TRUE(test::close_f(expected, upper_bound, 24, 2));
EXPECT_FALSE(test::close_f(expected, bigger_than_upper_bound, 24, 2));
EXPECT_TRUE(test::close_f(expected, lower_bound, 24, 2));
EXPECT_FALSE(test::close_f(expected, smaller_than_lower_bound, 24, 2));
// Bounds around 1: 1 +- 4.77e-7
expected = 1.f;
upper_bound = bits_to_float("00111111100000000000000000000100");
bigger_than_upper_bound = bits_to_float("00111111100000000000000000000101");
lower_bound = bits_to_float("00111111011111111111111111111100");
smaller_than_lower_bound = bits_to_float("00111111011111111111111111111011");
EXPECT_TRUE(test::close_f(expected, upper_bound, 24, 2));
EXPECT_FALSE(test::close_f(expected, bigger_than_upper_bound, 24, 2));
EXPECT_TRUE(test::close_f(expected, lower_bound, 24, 2));
EXPECT_FALSE(test::close_f(expected, smaller_than_lower_bound, 24, 2));
// Bounds around 10: 10 +- 3.81e-6
expected = 10.f;
upper_bound = bits_to_float("01000001001000000000000000000100");
bigger_than_upper_bound = bits_to_float("01000001001000000000000000000101");
lower_bound = bits_to_float("01000001000111111111111111111100");
smaller_than_lower_bound = bits_to_float("01000001000111111111111111111011");
EXPECT_TRUE(test::close_f(expected, upper_bound, 24, 2));
EXPECT_FALSE(test::close_f(expected, bigger_than_upper_bound, 24, 2));
EXPECT_TRUE(test::close_f(expected, lower_bound, 24, 2));
EXPECT_FALSE(test::close_f(expected, smaller_than_lower_bound, 24, 2));
// Bounds around 100: 100 +- 3.05e-5
expected = 100.f;
upper_bound = bits_to_float("01000010110010000000000000000100");
bigger_than_upper_bound = bits_to_float("01000010110010000000000000000101");
lower_bound = bits_to_float("01000010110001111111111111111100");
smaller_than_lower_bound = bits_to_float("01000010110001111111111111111011");
EXPECT_TRUE(test::close_f(expected, upper_bound, 24, 2));
EXPECT_FALSE(test::close_f(expected, bigger_than_upper_bound, 24, 2));
EXPECT_TRUE(test::close_f(expected, lower_bound, 24, 2));
EXPECT_FALSE(test::close_f(expected, smaller_than_lower_bound, 24, 2));
// Bounds around 1000: 1000 +- 2.44e-4
expected = 1000.f;
upper_bound = bits_to_float("01000100011110100000000000000100");
bigger_than_upper_bound = bits_to_float("01000100011110100000000000000101");
lower_bound = bits_to_float("01000100011110011111111111111100");
smaller_than_lower_bound = bits_to_float("01000100011110011111111111111011");
EXPECT_TRUE(test::close_f(expected, upper_bound, 24, 2));
EXPECT_FALSE(test::close_f(expected, bigger_than_upper_bound, 24, 2));
EXPECT_TRUE(test::close_f(expected, lower_bound, 24, 2));
EXPECT_FALSE(test::close_f(expected, smaller_than_lower_bound, 24, 2));
}
TEST(all_close_f, inf_nan)
{
float zero = 0.f;
float infinity = numeric_limits<float>::infinity();
float neg_infinity = -numeric_limits<float>::infinity();
float quiet_nan = numeric_limits<float>::quiet_NaN();
float signaling_nan = numeric_limits<float>::signaling_NaN();
EXPECT_FALSE(test::close_f(zero, infinity));
EXPECT_FALSE(test::close_f(zero, neg_infinity));
EXPECT_FALSE(test::close_f(zero, quiet_nan));
EXPECT_FALSE(test::close_f(zero, signaling_nan));
EXPECT_FALSE(test::close_f(infinity, infinity));
EXPECT_FALSE(test::close_f(neg_infinity, neg_infinity));
EXPECT_FALSE(test::close_f(quiet_nan, quiet_nan));
EXPECT_FALSE(test::close_f(signaling_nan, signaling_nan));
}
......@@ -1364,7 +1364,7 @@ NGRAPH_TEST(${BACKEND_NAME}, backwards_tan)
auto pi = 3.14159f;
// Stay away from the asymptotes at 6 and 12 o'clock.
auto slop = 0.1f;
auto slop = 0.2f;
test::Uniform<float> rng_r(-pi / 2 + slop, pi / 2 - slop);
test::Uniform<float> rng_l(pi / 2 + slop, (3 * pi) / 2 - slop);
......
......@@ -28,6 +28,7 @@
#include "ngraph/op/get_output_element.hpp"
#include "ngraph/serializer.hpp"
#include "util/all_close.hpp"
#include "util/all_close_f.hpp"
#include "util/ndarray.hpp"
#include "util/test_control.hpp"
#include "util/test_tools.hpp"
......@@ -1335,17 +1336,19 @@ NGRAPH_TEST(${BACKEND_NAME}, log)
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
copy_data(
a, vector<float>{expf(1), expf(2), expf(3), expf(4), expf(5), expf(6), expf(7), expf(8)});
vector<float> loga;
for (auto elt : read_vector<float>(a))
{
loga.push_back(logf(elt));
}
copy_data(a, vector<float>{0.125f, 0.25f, 0.5f, 1.f, 2.f, 4.f, 8.f, 16.f});
vector<float> loga{-2.07944154f,
-1.38629436f,
-0.69314718f,
0.00000000f,
0.69314718f,
1.38629436f,
2.07944154f,
2.77258872f};
auto result = backend->create_tensor(element::f32, shape);
backend->call(f, {result}, {a});
EXPECT_TRUE(test::all_close(loga, read_vector<float>(result)));
EXPECT_TRUE(test::all_close_f(loga, read_vector<float>(result)));
}
NGRAPH_TEST(${BACKEND_NAME}, maximum)
......@@ -2480,73 +2483,91 @@ NGRAPH_TEST(${BACKEND_NAME}, reshape_6d)
NGRAPH_TEST(${BACKEND_NAME}, sin)
{
Shape shape{6};
Shape shape{11};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::Sin>(A), op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
float pi = acosf(-1);
auto a = backend->create_tensor(element::f32, shape);
vector<float> input{pi / 2, 0.0f, -0.0f, pi / 6, -pi, pi};
vector<float> input{0.f, 0.25f, -0.25f, 0.5f, -0.5f, 1.f, -1.f, 2.f, -2.f, 4.f, -4.f};
copy_data(a, input);
auto result = backend->create_tensor(element::f32, shape);
std::transform(
input.begin(), input.end(), input.begin(), [](float x) -> float { return sinf(x); });
backend->call(f, {result}, {a});
EXPECT_EQ(input, read_vector<float>(result));
EXPECT_TRUE(test::all_close_f(vector<float>{0.00000000f,
0.24740396f,
-0.24740396f,
0.47942554f,
-0.47942554f,
0.84147098f,
-0.84147098f,
0.90929743f,
-0.90929743f,
-0.75680250f,
0.75680250f},
read_vector<float>(result)));
}
NGRAPH_TEST(${BACKEND_NAME}, cos)
{
Shape shape{6};
Shape shape{11};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::Cos>(A), op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
float pi = acosf(-1);
auto a = backend->create_tensor(element::f32, shape);
vector<float> input{pi / 2, 0.0f, -0.0f, pi / 3, -pi, pi};
vector<float> input{0.f, 0.25f, -0.25f, 0.5f, -0.5f, 1.f, -1.f, 2.f, -2.f, 4.f, -4.f};
copy_data(a, input);
auto result = backend->create_tensor(element::f32, shape);
std::transform(
input.begin(), input.end(), input.begin(), [](float x) -> float { return cosf(x); });
backend->call(f, {result}, {a});
EXPECT_EQ(input, read_vector<float>(result));
EXPECT_TRUE(test::all_close_f(vector<float>{1.00000000f,
0.96891242f,
0.96891242f,
0.87758256f,
0.87758256f,
0.54030231f,
0.54030231f,
-0.41614684f,
-0.41614684f,
-0.65364362f,
-0.65364362f},
read_vector<float>(result)));
}
NGRAPH_TEST(${BACKEND_NAME}, tan)
{
Shape shape{6};
Shape shape{11};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::Tan>(A), op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
float pi = acosf(-1);
auto a = backend->create_tensor(element::f32, shape);
vector<float> input{pi / 4, 0.0f, -0.0f, 7 * pi / 4, 3 * pi / 4, 5 * pi / 4};
vector<float> input{0.f, 0.25f, -0.25f, 0.5f, -0.5f, 1.f, -1.f, 2.f, -2.f, 4.f, -4.f};
copy_data(a, input);
auto result = backend->create_tensor(element::f32, shape);
std::transform(
input.begin(), input.end(), input.begin(), [](float x) -> float { return tanf(x); });
backend->call(f, {result}, {a});
EXPECT_TRUE(test::all_close(input, read_vector<float>(result)));
EXPECT_TRUE(test::all_close_f(vector<float>{0.00000000f,
0.25534192f,
-0.25534192f,
0.54630249f,
-0.54630249f,
1.55740772f,
-1.55740772f,
-2.18503986f,
2.18503986f,
1.15782128f,
-1.15782128f},
read_vector<float>(result)));
}
NGRAPH_TEST(${BACKEND_NAME}, asin)
{
Shape shape{6};
Shape shape{11};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::Asin>(A), op::ParameterVector{A});
......@@ -2554,20 +2575,27 @@ NGRAPH_TEST(${BACKEND_NAME}, asin)
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
vector<float> input{1.0f, 0.0f, -0.0f, -1.0f, 0.5f, -0.5f};
vector<float> input{-1.f, -0.75f, -0.5f, -0.25f, -0.125f, 0.f, 0.125f, 0.25f, 0.5f, 0.75f, 1.f};
copy_data(a, input);
auto result = backend->create_tensor(element::f32, shape);
std::transform(
input.begin(), input.end(), input.begin(), [](float x) -> float { return asinf(x); });
backend->call(f, {result}, {a});
EXPECT_EQ(input, read_vector<float>(result));
EXPECT_TRUE(test::all_close_f(vector<float>{-1.57079633f,
-0.84806208f,
-0.52359878f,
-0.25268026f,
-0.12532783f,
0.00000000f,
0.12532783f,
0.25268026f,
0.52359878f,
0.84806208f,
1.57079633f},
read_vector<float>(result)));
}
NGRAPH_TEST(${BACKEND_NAME}, acos)
{
Shape shape{6};
Shape shape{11};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::Acos>(A), op::ParameterVector{A});
......@@ -2575,20 +2603,27 @@ NGRAPH_TEST(${BACKEND_NAME}, acos)
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
vector<float> input{1.0f, 0.0f, -0.0f, -1.0f, 0.5f, -0.5f};
vector<float> input{-1.f, -0.75f, -0.5f, -0.25f, -0.125f, 0.f, 0.125f, 0.25f, 0.5f, 0.75f, 1.f};
copy_data(a, input);
auto result = backend->create_tensor(element::f32, shape);
std::transform(
input.begin(), input.end(), input.begin(), [](float x) -> float { return acosf(x); });
backend->call(f, {result}, {a});
EXPECT_EQ(input, read_vector<float>(result));
EXPECT_TRUE(test::all_close_f(vector<float>{3.14159265f,
2.41885841f,
2.09439510f,
1.82347658f,
1.69612416f,
1.57079633f,
1.44546850f,
1.31811607f,
1.04719755f,
0.72273425f,
0.00000000f},
read_vector<float>(result)));
}
NGRAPH_TEST(${BACKEND_NAME}, atan)
{
Shape shape{6};
Shape shape{11};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::Atan>(A), op::ParameterVector{A});
......@@ -2596,15 +2631,22 @@ NGRAPH_TEST(${BACKEND_NAME}, atan)
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
vector<float> input{1.0f, 0.0f, -0.0f, -1.0f, 0.5f, -0.5f};
vector<float> input{-4.f, -2.f, -1.f, -0.5f, -0.25f, 0.f, 0.25f, 0.5f, 1.f, 2.f, 4.f};
copy_data(a, input);
auto result = backend->create_tensor(element::f32, shape);
std::transform(
input.begin(), input.end(), input.begin(), [](float x) -> float { return atanf(x); });
backend->call(f, {result}, {a});
EXPECT_EQ(input, read_vector<float>(result));
EXPECT_TRUE(test::all_close_f(vector<float>{-1.32581766f,
-1.10714872f,
-0.78539816f,
-0.46364761f,
-0.24497866f,
0.00000000f,
0.24497866f,
0.46364761f,
0.78539816f,
1.10714872f,
1.32581766f},
read_vector<float>(result)));
}
NGRAPH_TEST(${BACKEND_NAME}, sinh)
......@@ -2625,7 +2667,7 @@ NGRAPH_TEST(${BACKEND_NAME}, sinh)
input.begin(), input.end(), input.begin(), [](float x) -> float { return sinhf(x); });
backend->call(f, {result}, {a});
EXPECT_EQ(input, read_vector<float>(result));
EXPECT_TRUE(test::all_close_f(input, read_vector<float>(result)));
}
NGRAPH_TEST(${BACKEND_NAME}, cosh)
......@@ -2646,7 +2688,7 @@ NGRAPH_TEST(${BACKEND_NAME}, cosh)
input.begin(), input.end(), input.begin(), [](float x) -> float { return coshf(x); });
backend->call(f, {result}, {a});
EXPECT_TRUE(test::all_close(input, read_vector<float>(result)));
EXPECT_TRUE(test::all_close_f(input, read_vector<float>(result)));
}
NGRAPH_TEST(${BACKEND_NAME}, tanh)
......@@ -2667,7 +2709,7 @@ NGRAPH_TEST(${BACKEND_NAME}, tanh)
input.begin(), input.end(), input.begin(), [](float x) -> float { return tanhf(x); });
backend->call(f, {result}, {a});
EXPECT_TRUE(test::all_close(input, read_vector<float>(result)));
EXPECT_TRUE(test::all_close_f(input, read_vector<float>(result)));
}
NGRAPH_TEST(${BACKEND_NAME}, exp)
......@@ -2684,9 +2726,9 @@ NGRAPH_TEST(${BACKEND_NAME}, exp)
auto result = backend->create_tensor(element::f32, shape);
backend->call(f, {result}, {a});
EXPECT_EQ(
(vector<float>{expf(-4), expf(-3), expf(-2), expf(-1), expf(0), expf(1), expf(2), expf(3)}),
read_vector<float>(result));
EXPECT_TRUE(test::all_close_f(
vector<float>{expf(-4), expf(-3), expf(-2), expf(-1), expf(0), expf(1), expf(2), expf(3)},
read_vector<float>(result)));
}
NGRAPH_TEST(${BACKEND_NAME}, slice_scalar)
......@@ -2871,7 +2913,7 @@ NGRAPH_TEST(${BACKEND_NAME}, scalar_constant_int64)
NGRAPH_TEST(${BACKEND_NAME}, tensor_constant_float32)
{
Shape shape{2, 2};
auto r = op::Constant::create(element::f32, shape, {4.75, 4.7, -5.3, 0.0});
auto r = op::Constant::create(element::f32, shape, {4.75, 4.5, -5.25, 0.0});
auto f = make_shared<Function>(r, op::ParameterVector{});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
......@@ -2880,7 +2922,7 @@ NGRAPH_TEST(${BACKEND_NAME}, tensor_constant_float32)
auto result = backend->create_tensor(element::f32, shape);
backend->call(f, {result}, {});
EXPECT_EQ((vector<float>{4.75f, 4.7f, -5.3f, 0.0f}), read_vector<float>(result));
EXPECT_EQ((vector<float>{4.75f, 4.5f, -5.25f, 0.0f}), read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, tensor_constant_int64)
......@@ -3216,7 +3258,8 @@ NGRAPH_TEST(${BACKEND_NAME}, sum_3d_eliminate_zero_dim)
EXPECT_EQ((vector<float>{0, 0, 0, 0, 0, 0}), read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, sum_to_scalar_stable)
// TODO: Kahan sum only works in limited cases with CPU / Interpreter backend
NGRAPH_TEST(${BACKEND_NAME}, kahan_sum_to_scalar)
{
Shape shape{2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape);
......@@ -3225,16 +3268,17 @@ NGRAPH_TEST(${BACKEND_NAME}, sum_to_scalar_stable)
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
float epsilon = 9.5367431640625e-7f;
auto a = backend->create_tensor(element::f32, shape);
copy_data(a, vector<float>{1e-6f, -1, 0, 1});
copy_data(a, vector<float>{epsilon, -1.f, 0.f, 1.f});
auto result = backend->create_tensor(element::f32, Shape{});
backend->call(f, {result}, {a});
EXPECT_TRUE(test::all_close(read_vector<float>(result), vector<float>{1e-6f}, 5e-2f));
// EXPECT_EQ(vector<float>{1e-6}, read_vector<float>(result));
EXPECT_TRUE(test::all_close_f(vector<float>{epsilon}, read_vector<float>(result)));
}
NGRAPH_TEST(${BACKEND_NAME}, sum_3d_to_vector_stable)
// TODO: Kahan sum only works in limited cases with CPU / Interpreter backend
NGRAPH_TEST(${BACKEND_NAME}, kahan_sum_3d_to_vector)
{
Shape shape_a{3, 3, 3};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
......@@ -3245,13 +3289,17 @@ NGRAPH_TEST(${BACKEND_NAME}, sum_3d_to_vector_stable)
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{1, 1, 1, 1, 1, 1, 1e-4f, 1e-5f, 1e-6f, 1, 1, 1, 1, 1,
1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1});
float epsilon_a = 1.220703125e-4f;
float epsilon_b = 3.0517578125e-5f;
float epsilon_c = 7.62939453125e-6f;
copy_data(a, vector<float>{1, 1, 1, 1, 1, 1, epsilon_a, epsilon_b, epsilon_c,
1, 1, 1, 1, 1, 1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1});
auto result = backend->create_tensor(element::f32, shape_rt);
backend->call(f, {result}, {a});
EXPECT_TRUE(
test::all_close(read_vector<float>(result), vector<float>{1e-4f, 1e-5f, 1e-6f}, 5e-2f));
EXPECT_TRUE(test::all_close_f(vector<float>{epsilon_a, epsilon_b, epsilon_c},
read_vector<float>(result)));
}
NGRAPH_TEST(${BACKEND_NAME}, sum_5d_to_scalar)
......@@ -5709,20 +5757,20 @@ NGRAPH_TEST(${BACKEND_NAME}, avg_pool_1d_1channel_1image)
float denom = 3.0;
backend->call(f, {result}, {a});
EXPECT_TRUE(test::all_close(test::NDArray<float, 3>({{{1 / denom,
3 / denom,
3 / denom,
3 / denom,
4 / denom,
5 / denom,
5 / denom,
2 / denom,
2 / denom,
2 / denom,
2 / denom,
0 / denom}}})
.get_vector(),
read_vector<float>(result)));
EXPECT_TRUE(test::all_close_f(test::NDArray<float, 3>({{{1 / denom,
3 / denom,
3 / denom,
3 / denom,
4 / denom,
5 / denom,
5 / denom,
2 / denom,
2 / denom,
2 / denom,
2 / denom,
0 / denom}}})
.get_vector(),
read_vector<float>(result)));
}
NGRAPH_TEST(${BACKEND_NAME}, avg_pool_1d_1channel_2image)
......@@ -5747,32 +5795,32 @@ NGRAPH_TEST(${BACKEND_NAME}, avg_pool_1d_1channel_2image)
float denom = 3.0;
backend->call(f, {result}, {a});
EXPECT_TRUE(test::all_close(test::NDArray<float, 3>({{{1 / denom,
3 / denom,
3 / denom,
3 / denom,
4 / denom,
5 / denom,
5 / denom,
2 / denom,
2 / denom,
2 / denom,
2 / denom,
0 / denom}},
{{3 / denom,
4 / denom,
2 / denom,
1 / denom,
0 / denom,
2 / denom,
2 / denom,
3 / denom,
1 / denom,
1 / denom,
1 / denom,
3 / denom}}})
.get_vector(),
read_vector<float>(result)));
EXPECT_TRUE(test::all_close_f(test::NDArray<float, 3>({{{1 / denom,
3 / denom,
3 / denom,
3 / denom,
4 / denom,
5 / denom,
5 / denom,
2 / denom,
2 / denom,
2 / denom,
2 / denom,
0 / denom}},
{{3 / denom,
4 / denom,
2 / denom,
1 / denom,
0 / denom,
2 / denom,
2 / denom,
3 / denom,
1 / denom,
1 / denom,
1 / denom,
3 / denom}}})
.get_vector(),
read_vector<float>(result)));
}
NGRAPH_TEST(${BACKEND_NAME}, avg_pool_1d_2channel_2image)
......@@ -5800,57 +5848,57 @@ NGRAPH_TEST(${BACKEND_NAME}, avg_pool_1d_2channel_2image)
float denom = 3.0;
backend->call(f, {result}, {a});
EXPECT_TRUE(test::all_close(test::NDArray<float, 3>({{{1 / denom,
3 / denom,
3 / denom,
3 / denom,
4 / denom,
5 / denom,
5 / denom,
2 / denom,
2 / denom,
2 / denom,
2 / denom,
0 / denom},
{0 / denom,
2 / denom,
2 / denom,
2 / denom,
2 / denom,
5 / denom,
5 / denom,
4 / denom,
3 / denom,
3 / denom,
3 / denom,
1 / denom}},
{{3 / denom,
4 / denom,
2 / denom,
1 / denom,
0 / denom,
2 / denom,
2 / denom,
3 / denom,
1 / denom,
1 / denom,
1 / denom,
3 / denom},
{3 / denom,
1 / denom,
1 / denom,
1 / denom,
3 / denom,
2 / denom,
2 / denom,
0 / denom,
1 / denom,
2 / denom,
4 / denom,
3 / denom}}})
.get_vector(),
read_vector<float>(result)));
EXPECT_TRUE(test::all_close_f(test::NDArray<float, 3>({{{1 / denom,
3 / denom,
3 / denom,
3 / denom,
4 / denom,
5 / denom,
5 / denom,
2 / denom,
2 / denom,
2 / denom,
2 / denom,
0 / denom},
{0 / denom,
2 / denom,
2 / denom,
2 / denom,
2 / denom,
5 / denom,
5 / denom,
4 / denom,
3 / denom,
3 / denom,
3 / denom,
1 / denom}},
{{3 / denom,
4 / denom,
2 / denom,
1 / denom,
0 / denom,
2 / denom,
2 / denom,
3 / denom,
1 / denom,
1 / denom,
1 / denom,
3 / denom},
{3 / denom,
1 / denom,
1 / denom,
1 / denom,
3 / denom,
2 / denom,
2 / denom,
0 / denom,
1 / denom,
2 / denom,
4 / denom,
3 / denom}}})
.get_vector(),
read_vector<float>(result)));
}
NGRAPH_TEST(${BACKEND_NAME}, avg_pool_2d_2channel_2image)
......@@ -5897,7 +5945,7 @@ NGRAPH_TEST(${BACKEND_NAME}, avg_pool_2d_2channel_2image)
backend->call(f, {result}, {a});
EXPECT_TRUE(test::all_close(
EXPECT_TRUE(test::all_close_f(
test::NDArray<float, 4>({{{{6 / denom, 8 / denom, 5 / denom}, // img 0 chan 0
{7 / denom, 5 / denom, 3 / denom},
{5 / denom, 2 / denom, 5 / denom},
......@@ -5950,11 +5998,11 @@ NGRAPH_TEST(${BACKEND_NAME}, avg_pool_2d_1channel_1image_strided)
float denom = 2 * 3;
backend->call(f, {result}, {a});
EXPECT_TRUE(test::all_close(test::NDArray<float, 4>({{{{6 / denom, 5 / denom, 4 / denom},
{6 / denom, 5 / denom, 8 / denom},
{6 / denom, 2 / denom, 4 / denom}}}})
.get_vector(),
read_vector<float>(result)));
EXPECT_TRUE(test::all_close_f(test::NDArray<float, 4>({{{{6 / denom, 5 / denom, 4 / denom},
{6 / denom, 5 / denom, 8 / denom},
{6 / denom, 2 / denom, 4 / denom}}}})
.get_vector(),
read_vector<float>(result)));
}
NGRAPH_TEST(${BACKEND_NAME}, avg_pool_2d_1channel_1image_padded)
......@@ -6121,7 +6169,7 @@ NGRAPH_TEST(${BACKEND_NAME}, avg_pool_2d_2channel_2image_padded_3x3)
auto result = backend->create_tensor(element::f32, shape_r);
backend->call(f, {result}, {a});
EXPECT_TRUE(test::all_close(
EXPECT_TRUE(test::all_close_f(
test::NDArray<float, 4>({{{{0.0f / 1, 1.0f / 2, 1.0f / 3, 1.0f / 2, 0.0f / 1},
{0.0f / 2, 4.0f / 4, 6.0f / 6, 6.0f / 4, 2.0f / 2},
{2.0f / 3, 6.0f / 6, 8.0f / 9, 6.0f / 6, 2.0f / 3},
......@@ -6161,14 +6209,14 @@ NGRAPH_TEST(${BACKEND_NAME}, avg_pool_2d_2channel_2image_padded_3x3_strided)
auto result = backend->create_tensor(element::f32, shape_r);
backend->call(f, {result}, {a});
EXPECT_TRUE(test::all_close(test::NDArray<float, 4>({{{{0.0f / 1, 1.0f / 3, 0.0f / 1},
{2.0f / 3, 8.0f / 9, 2.0f / 3},
{2.0f / 1, 2.0f / 3, 0.0f / 1}},
{{3.0f / 1, 10.0f / 3, 2.0f / 1},
{8.0f / 3, 35.0f / 9, 16.0f / 3},
{3.0f / 1, 14.0f / 3, 5.0f / 1}}}})
.get_vector(),
read_vector<float>(result)));
EXPECT_TRUE(test::all_close_f(test::NDArray<float, 4>({{{{0.0f / 1, 1.0f / 3, 0.0f / 1},
{2.0f / 3, 8.0f / 9, 2.0f / 3},
{2.0f / 1, 2.0f / 3, 0.0f / 1}},
{{3.0f / 1, 10.0f / 3, 2.0f / 1},
{8.0f / 3, 35.0f / 9, 16.0f / 3},
{3.0f / 1, 14.0f / 3, 5.0f / 1}}}})
.get_vector(),
read_vector<float>(result)));
}
NGRAPH_TEST(${BACKEND_NAME}, avg_pool_2d_2channel_2image_padded_3x3_strided_uneven)
......@@ -6196,7 +6244,7 @@ NGRAPH_TEST(${BACKEND_NAME}, avg_pool_2d_2channel_2image_padded_3x3_strided_unev
auto result = backend->create_tensor(element::f32, shape_r);
backend->call(f, {result}, {a});
EXPECT_TRUE(test::all_close(
EXPECT_TRUE(test::all_close_f(
test::NDArray<float, 4>(
{{{{0.0f / 1, 1.0f / 2}, {2.0f / 3, 6.0f / 6}, {2.0f / 1, 0.0f / 2}},
{{3.0f / 1, 7.0f / 2}, {8.0f / 3, 27.0f / 6}, {3.0f / 1, 11.0f / 2}}}})
......@@ -7572,14 +7620,14 @@ NGRAPH_TEST(${BACKEND_NAME}, softmax_all)
backend->call(f, {result}, {a});
vector<float> expected{
expf(-3) / d, expf(-2) / d, expf(-1) / d, expf(0) / d, expf(1) / d, expf(2) / d};
EXPECT_TRUE(test::all_close(expected, read_vector<float>(result)));
EXPECT_TRUE(test::all_close_f(expected, read_vector<float>(result)));
// empty AxisSet is the same as "full" AxisSet
f = make_shared<Function>(make_shared<op::Softmax>(A, AxisSet{}), op::ParameterVector{A});
backend = runtime::Backend::create("${BACKEND_NAME}");
backend->call(f, {result}, {a});
EXPECT_TRUE(test::all_close(expected, read_vector<float>(result)));
EXPECT_TRUE(test::all_close_f(expected, read_vector<float>(result)));
}
NGRAPH_TEST(${BACKEND_NAME}, softmax_axis)
......@@ -7604,7 +7652,7 @@ NGRAPH_TEST(${BACKEND_NAME}, softmax_axis)
expf(-40) / d1,
expf(-50) / d1,
expf(-60) / d1};
EXPECT_TRUE(test::all_close(expected, read_vector<float>(result)));
EXPECT_TRUE(test::all_close_f(expected, read_vector<float>(result)));
}
NGRAPH_TEST(${BACKEND_NAME}, softmax_underflow)
......
......@@ -16,6 +16,7 @@
set (SRC
autodiff/backprop_function.cpp
all_close_f.cpp
test_tools.cpp
benchmark.cpp
test_control.cpp
......
......@@ -39,15 +39,17 @@ namespace ngraph
T rtol = static_cast<T>(1e-5),
T atol = static_cast<T>(1e-8))
{
bool rc = true;
assert(a.size() == b.size());
for (size_t i = 0; i < a.size(); ++i)
{
if (std::abs(a[i] - b[i]) > atol + rtol * std::abs(b[i]))
{
return false;
NGRAPH_INFO << a[i] << " is not close to " << b[i];
rc = false;
}
}
return true;
return rc;
}
/// @brief Same as numpy.allclose
......
/*******************************************************************************
* Copyright 2017-2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#include <cmath>
#include "util/all_close_f.hpp"
using namespace std;
using namespace ngraph;
union FloatUnion {
float f;
uint32_t i;
};
bool test::close_f(float a, float b, int mantissa_bits, int tolerance_bits)
{
// isfinite(a) => !isinf(a) && !isnan(a)
if (!isfinite(a) || !isfinite(b))
{
return false;
}
FloatUnion a_fu{a};
FloatUnion b_fu{b};
uint32_t a_uint = a_fu.i;
uint32_t b_uint = b_fu.i;
// A trick to handle both positive and negative numbers, see https://goo.gl/YbdnFQ
// - If negative: convert to two's complement
// - If positive: mask with sign bit
uint32_t sign_mask = static_cast<uint32_t>(1U) << 31;
a_uint = (sign_mask & a_uint) ? (~a_uint + 1) : (sign_mask | a_uint);
b_uint = (sign_mask & b_uint) ? (~b_uint + 1) : (sign_mask | b_uint);
uint32_t distance = (a_uint >= b_uint) ? (a_uint - b_uint) : (b_uint - a_uint);
// e.g. for float with 24 bit mantissa, 2 bit accuracy, and hard-coded 8 bit exponent_bits
// tolerance_bit_shift = 32 - (1 + 8 + (24 - 1 ) - 2 )
// float_length sign exp mantissa implicit 1 tolerance_bits
uint32_t tolerance_bit_shift = 32 - (1 + 8 + (mantissa_bits - 1) - tolerance_bits);
uint32_t tolerance = static_cast<uint32_t>(1U) << tolerance_bit_shift;
return distance <= tolerance;
}
bool test::all_close_f(const vector<float>& a,
const vector<float>& b,
int mantissa_bits,
int tolerance_bits)
{
bool rc = true;
if (a.size() != b.size())
{
throw ngraph_error("a.size() != b.size() for all_close comparison.");
}
for (size_t i = 0; i < a.size(); ++i)
{
bool is_close_f = close_f(a[i], b[i], mantissa_bits, tolerance_bits);
if (!is_close_f)
{
NGRAPH_INFO << a[i] << " is not close to " << b[i];
rc = false;
}
}
return rc;
}
bool test::all_close_f(const std::shared_ptr<runtime::TensorView>& a,
const std::shared_ptr<runtime::TensorView>& b,
int mantissa_bits,
int tolerance_bits)
{
// Check that the layouts are compatible
if (*a->get_tensor_view_layout() != *b->get_tensor_view_layout())
{
throw ngraph_error("Cannot compare tensors with different layouts");
}
if (a->get_shape() != b->get_shape())
{
return false;
}
return test::all_close_f(
read_float_vector(a), read_float_vector(b), mantissa_bits, tolerance_bits);
}
bool test::all_close_f(const std::vector<std::shared_ptr<runtime::TensorView>>& as,
const std::vector<std::shared_ptr<runtime::TensorView>>& bs,
int mantissa_bits,
int tolerance_bits)
{
if (as.size() != bs.size())
{
return false;
}
for (size_t i = 0; i < as.size(); ++i)
{
if (!test::all_close_f(as[i], bs[i], mantissa_bits, tolerance_bits))
{
return false;
}
}
return true;
}
/*******************************************************************************
* Copyright 2017-2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain expected copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#pragma once
#include <memory>
#include <vector>
#include "test_tools.hpp"
namespace ngraph
{
namespace test
{
/// @brief Check if the two f32 numbers are close
/// @param a First number to compare
/// @param b Second number to compare
/// @param mantissa_bits The mantissa width of the underlying number before casting to float
/// @param tolerance_bits Bit tolerance error
/// @returns True iff the distance between a and b is within 2 ^ tolerance_bits ULP
///
/// References:
/// - https://en.wikipedia.org/wiki/Unit_in_the_last_place
/// - https://randomascii.wordpress.com/2012/01/23/stupid-float-tricks-2
/// - https://github.com/google/googletest/blob/master/googletest/docs/AdvancedGuide.md#floating-point-comparison
///
/// s e e e e e e e e m m m m m m m m m m m m m m m m m m m m m m m
/// |------------bfloat-----------|
/// |----------------------------float----------------------------|
///
/// bfloat (s1, e8, m7) has 7 + 1 = 8 bits of mantissa or bit_precision
/// float (s1, e8, m23) has 23 + 1 = 24 bits of mantissa or bit_precision
///
/// This function uses hard-coded value of 8 bit exponent_bits, so it's only valid for
/// bfloat and f32.
bool close_f(float a, float b, int mantissa_bits = 8, int tolerance_bits = 2);
/// @brief Check if the two floating point vectors are all close
/// @param a First number to compare
/// @param b Second number to compare
/// @param mantissa_bits The mantissa width of the underlying number before casting to float
/// @param tolerance_bits Bit tolerance error
/// @returns true iff the two floating point vectors are close
bool all_close_f(const std::vector<float>& a,
const std::vector<float>& b,
int mantissa_bits = 8,
int tolerance_bits = 2);
/// @brief Check if the two TensorViews are all close in float
/// @param a First TensorView to compare
/// @param b Second TensorView to compare
/// @param mantissa_bits The mantissa width of the underlying number before casting to float
/// @param tolerance_bits Bit tolerance error
/// Returns true iff the two TensorViews are all close in float
bool all_close_f(const std::shared_ptr<runtime::TensorView>& a,
const std::shared_ptr<runtime::TensorView>& b,
int mantissa_bits = 8,
int tolerance_bits = 2);
/// @brief Check if the two vectors of TensorViews are all close in float
/// @param as First vector of TensorView to compare
/// @param bs Second vector of TensorView to compare
/// @param mantissa_bits The mantissa width of the underlying number before casting to float
/// @param tolerance_bits Bit tolerance error
/// Returns true iff the two TensorViews are all close in float
bool all_close_f(const std::vector<std::shared_ptr<runtime::TensorView>>& as,
const std::vector<std::shared_ptr<runtime::TensorView>>& bs,
int mantissa_bits = 8,
int tolerance_bits = 2);
}
}
......@@ -21,6 +21,9 @@
#include "util/autodiff/numeric_derivative.hpp"
#include "util/test_tools.hpp"
// TODO: Consider removing template since only <float> is being used in tests and numerical
// derivative does not work with int types
// TODO: Always compute the numerical derivatives in double
template <typename T>
bool autodiff_numeric_compare(const std::shared_ptr<ngraph::runtime::Backend>& backend,
std::function<std::shared_ptr<ngraph::Function>()> make_graph,
......@@ -28,7 +31,7 @@ bool autodiff_numeric_compare(const std::shared_ptr<ngraph::runtime::Backend>& b
T rtol,
T atol)
{
T delta = static_cast<T>(0.001);
T delta = static_cast<T>(0.0009765625f); // Binary-representable number near 0.001
// Use INTERPRETER to compute numerical derivatives
auto interpreter_backend = ngraph::runtime::Backend::create("INTERPRETER");
......
......@@ -23,6 +23,74 @@
using namespace std;
using namespace ngraph;
vector<float> read_float_vector(shared_ptr<runtime::TensorView> tv)
{
vector<float> float_vec;
element::Type element_type = tv->get_tensor_view_layout()->get_element_type();
if (element_type == element::boolean)
{
vector<char> vec = read_vector<char>(tv);
float_vec = vector<float>(vec.begin(), vec.end());
}
else if (element_type == element::f32)
{
vector<float> vec = read_vector<float>(tv);
float_vec = vector<float>(vec.begin(), vec.end());
}
else if (element_type == element::f64)
{
vector<double> vec = read_vector<double>(tv);
float_vec = vector<float>(vec.begin(), vec.end());
}
else if (element_type == element::i8)
{
vector<int8_t> vec = read_vector<int8_t>(tv);
float_vec = vector<float>(vec.begin(), vec.end());
}
else if (element_type == element::i16)
{
vector<int16_t> vec = read_vector<int16_t>(tv);
float_vec = vector<float>(vec.begin(), vec.end());
}
else if (element_type == element::i32)
{
vector<int32_t> vec = read_vector<int32_t>(tv);
float_vec = vector<float>(vec.begin(), vec.end());
}
else if (element_type == element::i64)
{
vector<int64_t> vec = read_vector<int64_t>(tv);
float_vec = vector<float>(vec.begin(), vec.end());
}
else if (element_type == element::u8)
{
vector<uint8_t> vec = read_vector<uint8_t>(tv);
float_vec = vector<float>(vec.begin(), vec.end());
}
else if (element_type == element::u16)
{
vector<uint16_t> vec = read_vector<uint16_t>(tv);
float_vec = vector<float>(vec.begin(), vec.end());
}
else if (element_type == element::u32)
{
vector<uint32_t> vec = read_vector<uint32_t>(tv);
float_vec = vector<float>(vec.begin(), vec.end());
}
else if (element_type == element::u64)
{
vector<uint64_t> vec = read_vector<uint64_t>(tv);
float_vec = vector<float>(vec.begin(), vec.end());
}
else
{
throw ngraph_error("Unsupported nGraph element type.");
}
return float_vec;
}
// This function traverses the list of ops and verifies that each op's dependencies (its inputs)
// is located earlier in the list. That is enough to be valid
bool validate_list(const list<shared_ptr<Node>>& nodes)
......
......@@ -56,6 +56,8 @@ std::vector<T> read_vector(std::shared_ptr<ngraph::runtime::TensorView> tv)
return rc;
}
std::vector<float> read_float_vector(std::shared_ptr<ngraph::runtime::TensorView> tv);
template <typename T>
void write_vector(std::shared_ptr<ngraph::runtime::TensorView> tv, const std::vector<T>& values)
{
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment