Commit c6a3a188 authored by Rostislav Vasilikhin's avatar Rostislav Vasilikhin Committed by Vadim Pisarevsky

SoftFloat integrated (#8668)

* everything is put into softfloat.cpp and softfloat.hpp

* WIP: try to integrate softfloat into OpenCV

* extra functions removed

* softfloat made stateless

* CV_EXPORTS added

* operators fixed

* exp added, log: WIP

* log32 fixed

* shorter names; a lot of TODOs

* log64 rewritten

* cbrt32 added

* minors, refactoring

* "inline" -> "CV_INLINE"

* cast to bool warnings fixed

* several warnings fixed

* fixed warning about unsigned unary minus

* fixed warnings on type cast

* inline -> CV_INLINE

* special cases processing added (NaNs, Infs, etc.)

* constants for NaN and Inf added

* more macros and helper functions added

* added (or fixed) tests for pow32, pow64, cbrt32

* exp-like functions fixed

* minor changes

* fixed random number generation for tests

* tests for exp32 and exp64: values are compared to SoftFloat-based naive implementation

* minor warning fix

* pow(f, i) 32/64: special cases handling added

* unused functions removed

* refactoring is in progress (not compiling)

* CV_inline added

* unions {uint_t, float_t} removed

* tests compilation fixed

* static const members -> static methods returning const

* reinterpret_cast

* warning fixed

* const-ness fixed

* all FP calculations (even compile-time) are done in SoftFloat + minor fixes

* pow(f, i) removed from interface (can cause incorrect cast) to internals of pow(f, f), tests fixed

* CV_INLINE -> inline

* internal constants moved to .cpp file

* toInt_minMag() methods merged into toInt() methods

* macros moved to .cpp file

* refactoring: types renamed to softfloat and softdouble; explicit constructors, etc.

* toFloat(), toDouble() -> operator float(), operator double()

* removed f32/f64 prefixes from functions names

* toType() methods removed, round() and trunc() functions added

* minor change

* minors

* MSVC: warnings fixed

* added int cvRound(), cvFloor, cvCeil, cvTrunc, saturate_cast<T>()

* typo fixed

* type cast fixed
parent d54b1ad8
......@@ -58,6 +58,7 @@
#include "opencv2/core/types.hpp"
#include "opencv2/core/mat.hpp"
#include "opencv2/core/persistence.hpp"
#include "opencv2/core/softfloat.hpp"
/**
@defgroup core Core functionality
......
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Copyright (C) 2015, Itseez Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
/*============================================================================
This C header file is part of the SoftFloat IEEE Floating-Point Arithmetic
Package, Release 3c, by John R. Hauser.
Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
#pragma once
#ifndef softfloat_h
#define softfloat_h 1
#include "cvdef.h"
namespace cv
{
struct softfloat;
struct softdouble;
struct CV_EXPORTS softfloat
{
public:
softfloat() { v = 0; }
softfloat( const softfloat& c) { v = c.v; }
softfloat& operator=( const softfloat& c )
{
if(&c != this) v = c.v;
return *this;
}
static const softfloat fromRaw( const uint32_t a ) { softfloat x; x.v = a; return x; }
explicit softfloat( const uint32_t );
explicit softfloat( const uint64_t );
explicit softfloat( const int32_t );
explicit softfloat( const int64_t );
explicit softfloat( const float a ) { Cv32suf s; s.f = a; v = s.u; }
operator softdouble() const;
operator float() const { Cv32suf s; s.u = v; return s.f; }
softfloat operator + (const softfloat&) const;
softfloat operator - (const softfloat&) const;
softfloat operator * (const softfloat&) const;
softfloat operator / (const softfloat&) const;
softfloat operator % (const softfloat&) const;
softfloat operator - () const { softfloat x; x.v = v ^ (1U << 31); return x; }
softfloat& operator += (const softfloat& a) { *this = *this + a; return *this; }
softfloat& operator -= (const softfloat& a) { *this = *this - a; return *this; }
softfloat& operator *= (const softfloat& a) { *this = *this * a; return *this; }
softfloat& operator /= (const softfloat& a) { *this = *this / a; return *this; }
softfloat& operator %= (const softfloat& a) { *this = *this % a; return *this; }
bool operator == ( const softfloat& ) const;
bool operator != ( const softfloat& ) const;
bool operator > ( const softfloat& ) const;
bool operator >= ( const softfloat& ) const;
bool operator < ( const softfloat& ) const;
bool operator <= ( const softfloat& ) const;
bool isNaN() const { return (v & 0x7fffffff) > 0x7f800000; }
bool isInf() const { return (v & 0x7fffffff) == 0x7f800000; }
static softfloat zero() { return softfloat::fromRaw( 0 ); }
static softfloat inf() { return softfloat::fromRaw( 0xFF << 23 ); }
static softfloat nan() { return softfloat::fromRaw( 0x7fffffff ); }
static softfloat one() { return softfloat::fromRaw( 127 << 23 ); }
uint32_t v;
};
/*----------------------------------------------------------------------------
*----------------------------------------------------------------------------*/
struct CV_EXPORTS softdouble
{
public:
softdouble() { }
softdouble( const softdouble& c) { v = c.v; }
softdouble& operator=( const softdouble& c )
{
if(&c != this) v = c.v;
return *this;
}
static softdouble fromRaw( const uint64_t a ) { softdouble x; x.v = a; return x; }
explicit softdouble( const uint32_t );
explicit softdouble( const uint64_t );
explicit softdouble( const int32_t );
explicit softdouble( const int64_t );
explicit softdouble( const double a ) { Cv64suf s; s.f = a; v = s.u; }
operator softfloat() const;
operator double() const { Cv64suf s; s.u = v; return s.f; }
softdouble operator + (const softdouble&) const;
softdouble operator - (const softdouble&) const;
softdouble operator * (const softdouble&) const;
softdouble operator / (const softdouble&) const;
softdouble operator % (const softdouble&) const;
softdouble operator - () const { softdouble x; x.v = v ^ (1ULL << 63); return x; }
softdouble& operator += (const softdouble& a) { *this = *this + a; return *this; }
softdouble& operator -= (const softdouble& a) { *this = *this - a; return *this; }
softdouble& operator *= (const softdouble& a) { *this = *this * a; return *this; }
softdouble& operator /= (const softdouble& a) { *this = *this / a; return *this; }
softdouble& operator %= (const softdouble& a) { *this = *this % a; return *this; }
bool operator == ( const softdouble& ) const;
bool operator != ( const softdouble& ) const;
bool operator > ( const softdouble& ) const;
bool operator >= ( const softdouble& ) const;
bool operator < ( const softdouble& ) const;
bool operator <= ( const softdouble& ) const;
bool isNaN() const { return (v & 0x7fffffffffffffff) > 0x7ff0000000000000; }
bool isInf() const { return (v & 0x7fffffffffffffff) == 0x7ff0000000000000; }
static softdouble zero() { return softdouble::fromRaw( 0 ); }
static softdouble inf() { return softdouble::fromRaw( (uint_fast64_t)(0x7FF) << 52 ); }
static softdouble nan() { return softdouble::fromRaw( CV_BIG_INT(0x7FFFFFFFFFFFFFFF) ); }
static softdouble one() { return softdouble::fromRaw( (uint_fast64_t)( 1023) << 52 ); }
uint64_t v;
};
/*----------------------------------------------------------------------------
*----------------------------------------------------------------------------*/
CV_EXPORTS softfloat mulAdd( const softfloat& a, const softfloat& b, const softfloat & c);
CV_EXPORTS softdouble mulAdd( const softdouble& a, const softdouble& b, const softdouble& c);
CV_EXPORTS softfloat sqrt( const softfloat& a );
CV_EXPORTS softdouble sqrt( const softdouble& a );
}
/*----------------------------------------------------------------------------
| Ported from OpenCV and added for usability
*----------------------------------------------------------------------------*/
CV_EXPORTS int cvTrunc(const cv::softfloat& a);
CV_EXPORTS int cvTrunc(const cv::softdouble& a);
CV_EXPORTS int cvRound(const cv::softfloat& a);
CV_EXPORTS int cvRound(const cv::softdouble& a);
CV_EXPORTS int cvFloor(const cv::softfloat& a);
CV_EXPORTS int cvFloor(const cv::softdouble& a);
CV_EXPORTS int cvCeil(const cv::softfloat& a);
CV_EXPORTS int cvCeil(const cv::softdouble& a);
namespace cv
{
template<typename _Tp> static inline _Tp saturate_cast(softfloat a) { return _Tp(a); }
template<typename _Tp> static inline _Tp saturate_cast(softdouble a) { return _Tp(a); }
template<> inline uchar saturate_cast<uchar>(softfloat a) { return (uchar)std::max(std::min(cvRound(a), (int)UCHAR_MAX), 0); }
template<> inline uchar saturate_cast<uchar>(softdouble a) { return (uchar)std::max(std::min(cvRound(a), (int)UCHAR_MAX), 0); }
template<> inline schar saturate_cast<schar>(softfloat a) { return (schar)std::min(std::max(cvRound(a), (int)SCHAR_MIN), (int)SCHAR_MAX); }
template<> inline schar saturate_cast<schar>(softdouble a) { return (schar)std::min(std::max(cvRound(a), (int)SCHAR_MIN), (int)SCHAR_MAX); }
template<> inline ushort saturate_cast<ushort>(softfloat a) { return (ushort)std::max(std::min(cvRound(a), (int)USHRT_MAX), 0); }
template<> inline ushort saturate_cast<ushort>(softdouble a) { return (ushort)std::max(std::min(cvRound(a), (int)USHRT_MAX), 0); }
template<> inline short saturate_cast<short>(softfloat a) { return (short)std::min(std::max(cvRound(a), (int)SHRT_MIN), (int)SHRT_MAX); }
template<> inline short saturate_cast<short>(softdouble a) { return (short)std::min(std::max(cvRound(a), (int)SHRT_MIN), (int)SHRT_MAX); }
template<> inline int saturate_cast<int>(softfloat a) { return cvRound(a); }
template<> inline int saturate_cast<int>(softdouble a) { return cvRound(a); }
// we intentionally do not clip negative numbers, to make -1 become 0xffffffff etc.
template<> inline unsigned saturate_cast<unsigned>(softfloat a) { return cvRound(a); }
template<> inline unsigned saturate_cast<unsigned>(softdouble a) { return cvRound(a); }
inline softfloat min(const softfloat& a, const softfloat& b) { return (a > b) ? b : a; }
inline softdouble min(const softdouble& a, const softdouble& b) { return (a > b) ? b : a; }
inline softfloat max(const softfloat& a, const softfloat& b) { return (a > b) ? a : b; }
inline softdouble max(const softdouble& a, const softdouble& b) { return (a > b) ? a : b; }
inline softfloat abs( softfloat a) { softfloat x; x.v = a.v & ((1U << 31) - 1); return x; }
inline softdouble abs( softdouble a) { softdouble x; x.v = a.v & ((1ULL << 63) - 1); return x; }
CV_EXPORTS softfloat exp( const softfloat& a);
CV_EXPORTS softdouble exp( const softdouble& a);
CV_EXPORTS softfloat log( const softfloat& a );
CV_EXPORTS softdouble log( const softdouble& a );
CV_EXPORTS softfloat pow( const softfloat& a, const softfloat& b);
CV_EXPORTS softdouble pow( const softdouble& a, const softdouble& b);
CV_EXPORTS softfloat cbrt(const softfloat& a);
}
#endif
......@@ -58,6 +58,8 @@
#include "opencv2/core/ocl.hpp"
#endif
#include "opencv2/core/softfloat.hpp"
#include <assert.h>
#include <ctype.h>
#include <float.h>
......
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -3068,4 +3068,523 @@ TEST(Core_QR_Solver, accuracy64f)
ASSERT_FALSE(solve(A, B, solutionQR, DECOMP_QR));
}
softdouble naiveExp(softdouble x)
{
int exponent = ((x.v >>52) & 0x7FF) - 1023;
int sign = (((uint64_t)(x.v) >> 63) != 0) ? -1 : 1;
if(sign < 0 && exponent >= 10) return softdouble::inf();
softdouble mantissa;
//mantissa.v = packToF64UI(0, 1023, fracF64UI(x.v));
mantissa.v = ((uint64_t)(1023)<<52) + (((x.v) & UINT64_C( 0x000FFFFFFFFFFFFF )));
//Taylor series for mantissa
uint64 fac[20] = {1, 2, 6, 24, 120, 720, 5040, 40320, 362880, 3628800,
39916800, 479001600, 6227020800, 87178291200, 1307674368000,
20922789888000, 355687428096000, 6402373705728000, 121645100408832000,
2432902008176640000};
softdouble sum = softdouble::one();
// 21! > (2 ** 64)
for(int i = 20; i > 0; i--)
sum += pow(mantissa, softdouble(i))/softdouble(fac[i-1]);
if(exponent >= 0)
{
exponent = (1 << exponent);
return pow(sum, softdouble(exponent*sign));
}
else
{
if(sign < 0) sum = softdouble::one()/sum;
exponent = -exponent;
for(int j = 0; j < exponent; j++)
sum = sqrt(sum);
return sum;
}
}
TEST(Core_SoftFloat, exp32)
{
//special cases
ASSERT_TRUE(exp( softfloat::nan()).isNaN());
ASSERT_TRUE(exp( softfloat::inf()).isInf());
ASSERT_EQ (exp(-softfloat::inf()), softfloat::zero());
//ln(FLT_MAX) ~ 88.722
const float ln_max = 88.722f;
vector<float> inputs;
RNG rng(0);
inputs.push_back(0);
inputs.push_back(1);
inputs.push_back(FLT_MIN);
for(int i = 0; i < 50000; i++)
{
Cv32suf x;
x.fmt.sign = rng() % 2;
x.fmt.exponent = rng() % (10 + 127); //bigger exponent will produce inf
x.fmt.significand = rng() % (1 << 23);
if(x.f > ln_max)
x.f = rng.uniform(0.0f, ln_max);
inputs.push_back(x.f);
}
for(size_t i = 0; i < inputs.size(); i++)
{
float xf = inputs[i];
softfloat x(xf);
softfloat y = exp(x);
ASSERT_TRUE(!y.isNaN());
ASSERT_TRUE(!y.isInf());
ASSERT_GE(y, softfloat::zero());
softfloat ygood = naiveExp(x);
softfloat diff = abs(ygood - y);
const softfloat eps(FLT_EPSILON);
if(diff > eps)
{
ASSERT_LE(diff/max(abs(y), abs(ygood)), eps);
}
}
}
TEST(Core_SoftFloat, exp64)
{
//special cases
ASSERT_TRUE(exp( softdouble::nan()).isNaN());
ASSERT_TRUE(exp( softdouble::inf()).isInf());
ASSERT_EQ (exp(-softdouble::inf()), softdouble::zero());
//ln(DBL_MAX) ~ 709.7827
const double ln_max = 709.7827;
vector<double> inputs;
RNG rng(0);
inputs.push_back(0);
inputs.push_back(1);
inputs.push_back(DBL_MIN);
for(int i = 0; i < 50000; i++)
{
Cv64suf x;
uint64 sign = rng() % 2;
uint64 exponent = rng() % (10 + 1023); //bigger exponent will produce inf
uint64 mantissa = (((long long int)((unsigned int)(rng)) << 32 ) | (unsigned int)(rng)) & ((1LL << 52) - 1);
x.u = (sign << 63) | (exponent << 52) | mantissa;
if(x.f > ln_max)
x.f = rng.uniform(0.0, ln_max);
inputs.push_back(x.f);
}
for(size_t i = 0; i < inputs.size(); i++)
{
double xf = inputs[i];
softdouble x(xf);
softdouble y = exp(x);
ASSERT_TRUE(!y.isNaN());
ASSERT_TRUE(!y.isInf());
ASSERT_GE(y, softdouble::zero());
softdouble ygood = naiveExp(x);
softdouble diff = abs(ygood - y);
const softdouble eps(DBL_EPSILON);
if(diff > eps)
{
ASSERT_LE(diff/max(abs(y), abs(ygood)), softdouble(8192)*eps);
}
}
}
TEST(Core_SoftFloat, log32)
{
const int nValues = 50000;
RNG rng(0);
//special cases
ASSERT_TRUE(log(softfloat::nan()).isNaN());
for(int i = 0; i < nValues; i++)
{
Cv32suf x;
x.fmt.sign = 1;
x.fmt.exponent = rng() % 255;
x.fmt.significand = rng() % (1 << 23);
softfloat x32(x.f);
ASSERT_TRUE(log(x32).isNaN());
}
ASSERT_TRUE(log(softfloat::zero()).isInf());
vector<float> inputs;
inputs.push_back(1);
inputs.push_back(std::exp(1.f));
inputs.push_back(FLT_MIN);
inputs.push_back(FLT_MAX);
for(int i = 0; i < nValues; i++)
{
Cv32suf x;
x.fmt.sign = 0;
x.fmt.exponent = rng() % 255;
x.fmt.significand = rng() % (1 << 23);
inputs.push_back(x.f);
}
for(size_t i = 0; i < inputs.size(); i++)
{
float xf = inputs[i];
softfloat x(xf);
softfloat y = log(x);
ASSERT_TRUE(!y.isNaN());
ASSERT_TRUE(!y.isInf());
softfloat ex = exp(y);
softfloat diff = abs(ex - x);
// 88 is approx estimate of max exp() argument
ASSERT_TRUE(!ex.isInf() || (y > softfloat(88)));
if(!ex.isInf() && diff > softfloat(FLT_EPSILON))
{
ASSERT_LT(diff/max(abs(ex), x), softfloat(0.00001f));
}
}
}
TEST(Core_SoftFloat, log64)
{
const int nValues = 50000;
RNG rng(0);
//special cases
ASSERT_TRUE(log(softdouble::nan()).isNaN());
for(int i = 0; i < nValues; i++)
{
Cv64suf x;
uint64 sign = 1;
uint64 exponent = rng() % 2047;
uint64 mantissa = (((long long int)((unsigned int)(rng)) << 32 ) | (unsigned int)(rng)) & ((1LL << 52) - 1);
x.u = (sign << 63) | (exponent << 52) | mantissa;
softdouble x64(x.f);
ASSERT_TRUE(log(x64).isNaN());
}
ASSERT_TRUE(log(softdouble::zero()).isInf());
vector<double> inputs;
inputs.push_back(1);
inputs.push_back(exp(softdouble::one()));
inputs.push_back(DBL_MIN);
inputs.push_back(DBL_MAX);
for(int i = 0; i < nValues; i++)
{
Cv64suf x;
uint64 sign = 0;
uint64 exponent = rng() % 2047;
uint64 mantissa = (((long long int)((unsigned int)(rng)) << 32 ) | (unsigned int)(rng)) & ((1LL << 52) - 1);
x.u = (sign << 63) | (exponent << 52) | mantissa;
inputs.push_back(abs(x.f));
}
for(size_t i = 0; i < inputs.size(); i++)
{
double xf = inputs[i];
softdouble x(xf);
softdouble y = log(x);
ASSERT_TRUE(!y.isNaN());
ASSERT_TRUE(!y.isInf());
softdouble ex = exp(y);
softdouble diff = abs(ex - x);
// 700 is approx estimate of max exp() argument
ASSERT_TRUE(!ex.isInf() || (y > softdouble(700)));
if(!ex.isInf() && diff > softdouble(DBL_EPSILON))
{
ASSERT_LT(diff/max(abs(ex), x), softdouble(1e-10));
}
}
}
TEST(Core_SoftFloat, cbrt32)
{
vector<float> inputs;
RNG rng(0);
inputs.push_back(0);
inputs.push_back(1);
inputs.push_back(FLT_MAX);
inputs.push_back(FLT_MIN);
for(int i = 0; i < 50000; i++)
{
Cv32suf x;
x.fmt.sign = rng() % 2;
x.fmt.exponent = rng() % 255;
x.fmt.significand = rng() % (1 << 23);
inputs.push_back(x.f);
}
for(size_t i = 0; i < inputs.size(); i++)
{
float xf = inputs[i];
softfloat x(xf);
softfloat y = cbrt(x);
ASSERT_TRUE(!y.isNaN());
ASSERT_TRUE(!y.isInf());
softfloat cube = y*y*y;
softfloat diff = abs(x - cube);
const softfloat eps(FLT_EPSILON);
if(diff > eps)
{
ASSERT_LT(diff/max(abs(x), abs(cube)), softfloat(4)*eps);
}
}
}
TEST(Core_SoftFloat, pow32)
{
const softfloat zero = softfloat::zero(), one = softfloat::one();
const softfloat inf = softfloat::inf(), nan = softfloat::nan();
const size_t nValues = 5000;
RNG rng(0);
//x ** nan == nan
for(size_t i = 0; i < nValues; i++)
{
Cv32suf x;
x.u = rng();
ASSERT_TRUE(pow(softfloat(x.f), nan).isNaN());
}
//x ** inf check
for(size_t i = 0; i < nValues; i++)
{
Cv32suf x;
x.u = rng();
softfloat x32(x.f);
softfloat ax = abs(x32);
if(x32.isNaN())
{
ASSERT_TRUE(pow(x32, inf).isNaN());
}
if(ax > one)
{
ASSERT_TRUE(pow(x32, inf).isInf());
ASSERT_EQ (pow(x32, -inf), zero);
}
if(ax < one && ax > zero)
{
ASSERT_TRUE(pow(x32, -inf).isInf());
ASSERT_EQ (pow(x32, inf), zero);
}
}
//+-1 ** inf
ASSERT_TRUE(pow( one, inf).isNaN());
ASSERT_TRUE(pow(-one, inf).isNaN());
// x ** 0 == 1
for(size_t i = 0; i < nValues; i++)
{
Cv32suf x;
x.u = rng();
ASSERT_EQ(pow(softfloat(x.f), zero), one);
}
// x ** 1 == x
for(size_t i = 0; i < nValues; i++)
{
Cv32suf x;
x.u = rng();
softfloat x32(x.f);
softfloat val = pow(x32, one);
// don't compare val and x32 directly because x != x if x is nan
ASSERT_EQ(val.v, x32.v);
}
// nan ** y == nan, if y != 0
for(size_t i = 0; i < nValues; i++)
{
Cv32suf x;
x.u = rng();
if(!x.u) x.f = FLT_MIN;
softfloat x32(x.f);
ASSERT_TRUE(pow(nan, x32).isNaN());
}
// nan ** 0 == 1
ASSERT_EQ(pow(nan, zero), one);
// inf ** y == 0, if y < 0
// inf ** y == inf, if y > 0
for(size_t i = 0; i < nValues; i++)
{
Cv32suf x;
x.fmt.sign = 0;
x.fmt.exponent = rng() % 255;
x.fmt.significand = rng() % (1 << 23);
softfloat x32 = softfloat(x.f);
ASSERT_TRUE(pow( inf, x32).isInf());
ASSERT_TRUE(pow(-inf, x32).isInf());
ASSERT_EQ(pow( inf, -x32), zero);
ASSERT_EQ(pow(-inf, -x32), zero);
}
// x ** y == (-x) ** y, if y % 2 == 0
// x ** y == - (-x) ** y, if y % 2 == 1
// x ** y == nan, if x < 0 and y is not integer
for(size_t i = 0; i < nValues; i++)
{
Cv32suf x;
x.fmt.sign = 1;
x.fmt.exponent = rng() % 255;
x.fmt.significand = rng() % (1 << 23);
softfloat x32(x.f);
Cv32suf y;
y.fmt.sign = rng() % 2;
//bigger exponent produces integer numbers only
y.fmt.exponent = rng() % (23 + 127);
y.fmt.significand = rng() % (1 << 23);
softfloat y32(y.f);
int yi = cvRound(y32);
if(y32 != softfloat(yi))
ASSERT_TRUE(pow(x32, y32).isNaN());
else if(yi % 2)
ASSERT_EQ(pow(-x32, y32), -pow(x32, y32));
else
ASSERT_EQ(pow(-x32, y32), pow(x32, y32));
}
// (0 ** 0) == 1
ASSERT_EQ(pow(zero, zero), one);
// 0 ** y == inf, if y < 0
// 0 ** y == 0, if y > 0
for(size_t i = 0; i < nValues; i++)
{
Cv32suf x;
x.fmt.sign = 0;
x.fmt.exponent = rng() % 255;
x.fmt.significand = rng() % (1 << 23);
softfloat x32(x.f);
ASSERT_TRUE(pow(zero, -x32).isInf());
if(x32 != one)
ASSERT_EQ(pow(zero, x32), zero);
}
}
TEST(Core_SoftFloat, pow64)
{
const softdouble zero = softdouble::zero(), one = softdouble::one();
const softdouble inf = softdouble::inf(), nan = softdouble::nan();
const size_t nValues = 5000;
RNG rng(0);
//x ** nan == nan
for(size_t i = 0; i < nValues; i++)
{
Cv64suf x;
x.u = ((long long int)((unsigned int)(rng)) << 32 ) | (unsigned int)(rng);
ASSERT_TRUE(pow(softdouble(x.f), nan).isNaN());
}
//x ** inf check
for(size_t i = 0; i < nValues; i++)
{
Cv64suf x;
x.u = ((long long int)((unsigned int)(rng)) << 32 ) | (unsigned int)(rng);
softdouble x64(x.f);
softdouble ax = abs(x64);
if(x64.isNaN())
{
ASSERT_TRUE(pow(x64, inf).isNaN());
}
if(ax > one)
{
ASSERT_TRUE(pow(x64, inf).isInf());
ASSERT_EQ(pow(x64, -inf), zero);
}
if(ax < one && ax > zero)
{
ASSERT_TRUE(pow(x64, -inf).isInf());
ASSERT_EQ(pow(x64, inf), zero);
}
}
//+-1 ** inf
ASSERT_TRUE(pow( one, inf).isNaN());
ASSERT_TRUE(pow(-one, inf).isNaN());
// x ** 0 == 1
for(size_t i = 0; i < nValues; i++)
{
Cv64suf x;
x.u = ((long long int)((unsigned int)(rng)) << 32 ) | (unsigned int)(rng);
ASSERT_EQ(pow(softdouble(x.f), zero), one);
}
// x ** 1 == x
for(size_t i = 0; i < nValues; i++)
{
Cv64suf x;
x.u = ((long long int)((unsigned int)(rng)) << 32 ) | (unsigned int)(rng);
softdouble x64(x.f);
softdouble val = pow(x64, one);
// don't compare val and x64 directly because x != x if x is nan
ASSERT_EQ(val.v, x64.v);
}
// nan ** y == nan, if y != 0
for(size_t i = 0; i < nValues; i++)
{
Cv64suf x;
x.u = ((long long int)((unsigned int)(rng)) << 32 ) | (unsigned int)(rng);
if(!x.u) x.f = DBL_MIN;
softdouble x64(x.f);
ASSERT_TRUE(pow(nan, x64).isNaN());
}
// nan ** 0 == 1
ASSERT_EQ(pow(nan, zero), one);
// inf ** y == 0, if y < 0
// inf ** y == inf, if y > 0
for(size_t i = 0; i < nValues; i++)
{
Cv64suf x;
uint64 sign = 0;
uint64 exponent = rng() % 2047;
uint64 mantissa = (((long long int)((unsigned int)(rng)) << 32 ) | (unsigned int)(rng)) & ((1LL << 52) - 1);
x.u = (sign << 63) | (exponent << 52) | mantissa;
softdouble x64(x.f);
ASSERT_TRUE(pow( inf, x64).isInf());
ASSERT_TRUE(pow(-inf, x64).isInf());
ASSERT_EQ(pow( inf, -x64), zero);
ASSERT_EQ(pow(-inf, -x64), zero);
}
// x ** y == (-x) ** y, if y % 2 == 0
// x ** y == - (-x) ** y, if y % 2 == 1
// x ** y == nan, if x < 0 and y is not integer
for(size_t i = 0; i < nValues; i++)
{
Cv64suf x;
uint64 sign = 1;
uint64 exponent = rng() % 2047;
uint64 mantissa = (((long long int)((unsigned int)(rng)) << 32 ) | (unsigned int)(rng)) & ((1LL << 52) - 1);
x.u = (sign << 63) | (exponent << 52) | mantissa;
softdouble x64(x.f);
Cv64suf y;
sign = rng() % 2;
//bigger exponent produces integer numbers only
//exponent = rng() % (52 + 1023);
//bigger exponent is too big
exponent = rng() % (23 + 1023);
mantissa = (((long long int)((unsigned int)(rng)) << 32 ) | (unsigned int)(rng)) & ((1LL << 52) - 1);
y.u = (sign << 63) | (exponent << 52) | mantissa;
softdouble y64(y.f);
uint64 yi = cvRound(y64);
if(y64 != softdouble(yi))
ASSERT_TRUE(pow(x64, y64).isNaN());
else if(yi % 2)
ASSERT_EQ(pow(-x64, y64), -pow(x64, y64));
else
ASSERT_EQ(pow(-x64, y64), pow(x64, y64));
}
// (0 ** 0) == 1
ASSERT_EQ(pow(zero, zero), one);
// 0 ** y == inf, if y < 0
// 0 ** y == 0, if y > 0
for(size_t i = 0; i < nValues; i++)
{
Cv64suf x;
uint64 sign = 0;
uint64 exponent = rng() % 2047;
uint64 mantissa = (((long long int)((unsigned int)(rng)) << 32 ) | (unsigned int)(rng)) & ((1LL << 52) - 1);
x.u = (sign << 63) | (exponent << 52) | mantissa;
softdouble x64(x.f);
ASSERT_TRUE(pow(zero, -x64).isInf());
if(x64 != one)
ASSERT_EQ(pow(zero, x64), zero);
}
}
/* End of file. */
......@@ -10334,7 +10334,12 @@ class TypeWithoutFormatter<T, kConvertibleToInteger> {
// T is not an enum, printing it as an integer is the best we can do
// given that it has no user-defined printer.
static void PrintValue(const T& value, ::std::ostream* os) {
// MSVC warns about implicitly converting from double and float to int for
// possible loss of data, so we need to temporarily disable the
// warning.
GTEST_DISABLE_MSC_WARNINGS_PUSH_(4244)
const internal::BiggestInt kBigInt = value;
GTEST_DISABLE_MSC_WARNINGS_POP_()
*os << kBigInt;
}
};
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment