Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in / Register
Toggle navigation
N
ngraph
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
submodule
ngraph
Commits
e9e410aa
Unverified
Commit
e9e410aa
authored
Aug 01, 2019
by
Scott Cyphers
Committed by
GitHub
Aug 01, 2019
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'master' into aprocter/cf-dyn-broadcast
parents
b0ba5d72
23f838e5
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
18 changed files
with
238 additions
and
33 deletions
+238
-33
ngraph.ops.rst
doc/sphinx/source/python_api/_autosummary/ngraph.ops.rst
+1
-0
__init__.py
python/ngraph/__init__.py
+1
-0
__init__.py
python/ngraph/impl/op/__init__.py
+1
-0
ops.py
python/ngraph/ops.py
+19
-1
gelu.cpp
python/pyngraph/ops/fused/gelu.cpp
+30
-0
gelu.hpp
python/pyngraph/ops/fused/gelu.hpp
+23
-0
regmodule_pyngraph_op.cpp
python/pyngraph/ops/regmodule_pyngraph_op.cpp
+1
-0
regmodule_pyngraph_op.hpp
python/pyngraph/ops/regmodule_pyngraph_op.hpp
+1
-0
setup.py
python/setup.py
+1
-0
test_ops_fused.py
python/test/ngraph/test_ops_fused.py
+34
-1
constant_folding.cpp
src/ngraph/pass/constant_folding.cpp
+0
-0
constant_folding.hpp
src/ngraph/pass/constant_folding.hpp
+12
-8
max.hpp
src/ngraph/runtime/reference/max.hpp
+1
-1
fused_op.in.cpp
test/backend/fused_op.in.cpp
+1
-2
constant_folding.cpp
test/constant_folding.cpp
+104
-0
onnx_import_rnn.in.cpp
test/onnx/onnx_import_rnn.in.cpp
+5
-10
test_case.cpp
test/util/test_case.cpp
+2
-7
test_case.hpp
test/util/test_case.hpp
+1
-3
No files found.
doc/sphinx/source/python_api/_autosummary/ngraph.ops.rst
View file @
e9e410aa
...
...
@@ -37,6 +37,7 @@ ngraph.ops
equal
exp
floor
gelu
get_output_element
greater
greater_eq
...
...
python/ngraph/__init__.py
View file @
e9e410aa
...
...
@@ -50,6 +50,7 @@ from ngraph.ops import elu
from
ngraph.ops
import
equal
from
ngraph.ops
import
exp
from
ngraph.ops
import
floor
from
ngraph.ops
import
gelu
from
ngraph.ops
import
get_output_element
from
ngraph.ops
import
greater
from
ngraph.ops
import
greater_eq
...
...
python/ngraph/impl/op/__init__.py
View file @
e9e410aa
...
...
@@ -74,6 +74,7 @@ from _pyngraph.op import Elu
from
_pyngraph.op
import
Equal
from
_pyngraph.op
import
Exp
from
_pyngraph.op
import
Floor
from
_pyngraph.op
import
Gelu
from
_pyngraph.op
import
GetOutputElement
from
_pyngraph.op
import
Greater
from
_pyngraph.op
import
GreaterEq
...
...
python/ngraph/ops.py
View file @
e9e410aa
...
...
@@ -23,7 +23,7 @@ from ngraph.impl import AxisSet, AxisVector, Coordinate, CoordinateDiff, Functio
from
ngraph.impl.op
import
Abs
,
Acos
,
Add
,
And
,
Asin
,
ArgMax
,
ArgMin
,
Atan
,
AvgPool
,
\
BatchNormTraining
,
BatchNormInference
,
Broadcast
,
Ceiling
,
Clamp
,
Concat
,
Constant
,
Convert
,
\
Convolution
,
ConvolutionBackpropData
,
Cos
,
Cosh
,
Divide
,
Dot
,
Elu
,
Equal
,
Exp
,
Floor
,
\
GetOutputElement
,
Greater
,
GreaterEq
,
Less
,
LessEq
,
Log
,
LRN
,
Max
,
Maximum
,
MaxPool
,
\
Ge
lu
,
Ge
tOutputElement
,
Greater
,
GreaterEq
,
Less
,
LessEq
,
Log
,
LRN
,
Max
,
Maximum
,
MaxPool
,
\
Min
,
Minimum
,
Multiply
,
Negative
,
Not
,
NotEqual
,
OneHot
,
Or
,
Pad
,
Parameter
,
Product
,
\
Power
,
Relu
,
ReplaceSlice
,
Reshape
,
Reverse
,
Select
,
Sign
,
Sin
,
Sinh
,
Slice
,
Softmax
,
\
Sqrt
,
Subtract
,
Sum
,
Tan
,
Tanh
,
TopK
...
...
@@ -527,6 +527,24 @@ def convert(node, new_type, name=None): # type: (Node, NumericType, str) -> Nod
return
Convert
(
node
,
new_element_type
)
@nameable_op
def
gelu
(
node
,
name
=
None
):
# type: (NodeInput, str) -> Node
r"""Perform Gaussian Error Linear Unit operation element-wise on data from input node.
Computes GELU function:
.. math:: f(x) = 0.5\cdot x\cdot(1 + erf( \dfrac{x}{\sqrt{2}})
For more information refer to:
`Gaussian Error Linear Unit (GELU) <https://arxiv.org/pdf/1606.08415.pdf>`_
:param node: Input tensor. One of: input node, array or scalar.
:param name: Optional output node name.
:return: The new node performing a GELU operation on its input data element-wise.
"""
return
Gelu
(
as_node
(
node
))
@nameable_op
def
select
(
selection_node
,
input_node1
,
input_node2
,
name
=
None
):
# type: (Node, Node, Node, str) -> Node
...
...
python/pyngraph/ops/fused/gelu.cpp
0 → 100644
View file @
e9e410aa
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
#include "ngraph/op/fused/gelu.hpp"
#include "pyngraph/ops/fused/gelu.hpp"
namespace
py
=
pybind11
;
void
regclass_pyngraph_op_Gelu
(
py
::
module
m
)
{
py
::
class_
<
ngraph
::
op
::
Gelu
,
std
::
shared_ptr
<
ngraph
::
op
::
Gelu
>
,
ngraph
::
op
::
Op
>
gelu
(
m
,
"Gelu"
);
gelu
.
doc
()
=
"ngraph.impl.op.Gelu wraps ngraph::op::Gelu"
;
gelu
.
def
(
py
::
init
<
const
std
::
shared_ptr
<
ngraph
::
Node
>&>
());
}
python/pyngraph/ops/fused/gelu.hpp
0 → 100644
View file @
e9e410aa
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include <pybind11/pybind11.h>
namespace
py
=
pybind11
;
void
regclass_pyngraph_op_Gelu
(
py
::
module
m
);
python/pyngraph/ops/regmodule_pyngraph_op.cpp
View file @
e9e410aa
...
...
@@ -54,6 +54,7 @@ void regmodule_pyngraph_op(py::module m_op)
regclass_pyngraph_op_Equal
(
m_op
);
regclass_pyngraph_op_Exp
(
m_op
);
regclass_pyngraph_op_Floor
(
m_op
);
regclass_pyngraph_op_Gelu
(
m_op
);
regclass_pyngraph_op_GetOutputElement
(
m_op
);
regclass_pyngraph_op_Greater
(
m_op
);
regclass_pyngraph_op_GreaterEq
(
m_op
);
...
...
python/pyngraph/ops/regmodule_pyngraph_op.hpp
View file @
e9e410aa
...
...
@@ -44,6 +44,7 @@
#include "pyngraph/ops/floor.hpp"
#include "pyngraph/ops/fused/clamp.hpp"
#include "pyngraph/ops/fused/elu.hpp"
#include "pyngraph/ops/fused/gelu.hpp"
#include "pyngraph/ops/get_output_element.hpp"
#include "pyngraph/ops/greater.hpp"
#include "pyngraph/ops/greater_eq.hpp"
...
...
python/setup.py
View file @
e9e410aa
...
...
@@ -184,6 +184,7 @@ sources = [
'pyngraph/ops/equal.cpp'
,
'pyngraph/ops/exp.cpp'
,
'pyngraph/ops/floor.cpp'
,
'pyngraph/ops/fused/gelu.cpp'
,
'pyngraph/ops/greater.cpp'
,
'pyngraph/ops/greater_eq.cpp'
,
'pyngraph/ops/less.cpp'
,
...
...
python/test/ngraph/test_ops_fused.py
View file @
e9e410aa
...
...
@@ -19,7 +19,7 @@ import ngraph as ng
from
test.ngraph.util
import
get_runtime
def
test_elu_operator
():
def
test_elu_operator
_with_parameters
():
runtime
=
get_runtime
()
data_shape
=
[
2
,
2
]
...
...
@@ -69,6 +69,38 @@ def test_elu_operator_with_scalar():
assert
np
.
allclose
(
result
,
expected
)
def
test_gelu_operator_with_parameters
():
runtime
=
get_runtime
()
data_value
=
np
.
array
([[
-
5
,
1
],
[
-
2
,
3
]],
dtype
=
np
.
float32
)
data_shape
=
[
2
,
2
]
parameter_data
=
ng
.
parameter
(
data_shape
,
name
=
'Data'
,
dtype
=
np
.
float32
)
model
=
ng
.
gelu
(
parameter_data
)
computation
=
runtime
.
computation
(
model
,
parameter_data
)
result
=
computation
(
data_value
)
expected
=
np
.
array
([[
-
1.4901161e-06
,
8.4134471e-01
],
[
-
4.5500278e-02
,
2.9959502
]],
dtype
=
np
.
float32
)
assert
np
.
allclose
(
result
,
expected
)
def
test_gelu_operator_with_array
():
runtime
=
get_runtime
()
data_value
=
np
.
array
([[
-
5
,
1
],
[
-
2
,
3
]],
dtype
=
np
.
float32
)
model
=
ng
.
gelu
(
data_value
)
computation
=
runtime
.
computation
(
model
)
result
=
computation
()
expected
=
np
.
array
([[
-
1.4901161e-06
,
8.4134471e-01
],
[
-
4.5500278e-02
,
2.9959502
]],
dtype
=
np
.
float32
)
assert
np
.
allclose
(
result
,
expected
)
def
test_clamp_operator
():
runtime
=
get_runtime
()
...
...
@@ -99,4 +131,5 @@ def test_clamp_operator_with_array():
result
=
computation
()
expected
=
np
.
clip
(
data_value
,
min_value
,
max_value
)
assert
np
.
allclose
(
result
,
expected
)
src/ngraph/pass/constant_folding.cpp
View file @
e9e410aa
This diff is collapsed.
Click to expand it.
src/ngraph/pass/constant_folding.hpp
View file @
e9e410aa
...
...
@@ -43,8 +43,8 @@ public:
CONVERT
,
SHAPE_OF
,
REVERSE
,
PRODUCT
,
SUM
,
ARITHMETIC_REDUCTION
,
LOGICAL_REDUCTION
,
CONCAT
,
GATHER
,
SLICE
,
...
...
@@ -70,8 +70,8 @@ public:
construct_constant_convert
();
construct_constant_shape_of
();
construct_constant_reverse
();
construct_constant_
product
();
construct_constant_
sum
();
construct_constant_
arithmetic_reduction
();
construct_constant_
logical_reduction
();
construct_constant_concat
();
construct_constant_gather
();
construct_constant_slice
();
...
...
@@ -104,8 +104,12 @@ public:
case
CFTransformations
:
:
CONVERT
:
construct_constant_convert
();
break
;
case
CFTransformations
:
:
SHAPE_OF
:
construct_constant_shape_of
();
break
;
case
CFTransformations
:
:
REVERSE
:
construct_constant_reverse
();
break
;
case
CFTransformations
:
:
PRODUCT
:
construct_constant_product
();
break
;
case
CFTransformations
:
:
SUM
:
construct_constant_sum
();
break
;
case
CFTransformations
:
:
ARITHMETIC_REDUCTION
:
construct_constant_arithmetic_reduction
();
break
;
case
CFTransformations
:
:
LOGICAL_REDUCTION
:
construct_constant_logical_reduction
();
break
;
case
CFTransformations
:
:
CONCAT
:
construct_constant_concat
();
break
;
case
CFTransformations
:
:
GATHER
:
construct_constant_gather
();
break
;
case
CFTransformations
:
:
SLICE
:
construct_constant_slice
();
break
;
...
...
@@ -130,8 +134,8 @@ private:
void
construct_constant_convert
();
void
construct_constant_shape_of
();
void
construct_constant_reverse
();
void
construct_constant_
product
();
void
construct_constant_
sum
();
void
construct_constant_
arithmetic_reduction
();
void
construct_constant_
logical_reduction
();
void
construct_constant_concat
();
void
construct_constant_gather
();
void
construct_constant_slice
();
...
...
src/ngraph/runtime/reference/max.hpp
View file @
e9e410aa
...
...
@@ -36,7 +36,7 @@ namespace ngraph
const
AxisSet
&
reduction_axes
)
{
T
minval
=
std
::
numeric_limits
<
T
>::
has_infinity
?
-
std
::
numeric_limits
<
T
>::
infinity
(
)
?
T
(
-
std
::
numeric_limits
<
T
>::
infinity
()
)
:
std
::
numeric_limits
<
T
>::
min
();
CoordinateTransform
output_transform
(
out_shape
);
...
...
test/backend/fused_op.in.cpp
View file @
e9e410aa
...
...
@@ -1540,8 +1540,7 @@ NGRAPH_TEST(${BACKEND_NAME}, group_conv_transpose)
-
0.0270785
f
,
-
0.00680824
f
,
-
0.06650258
f
,
0.08004665
f
,
0.07918708
f
,
-
0.0724144
f
,
0.06256775
f
,
-
0.17838378
f
,
-
0.18863615
f
,
0.20064656
f
,
0.133717
f
,
-
0.06876295
f
,
-
0.06398046
f
,
-
0.00864975
f
,
0.19289537
f
,
-
0.01490572
f
,
-
0.13673618
f
,
0.01949645
f
});
test_case
.
set_tolerance
(
3
);
test_case
.
run
();
test_case
.
run
(
DEFAULT_FLOAT_TOLERANCE_BITS
+
1
);
}
NGRAPH_TEST
(
$
{
BACKEND_NAME
},
group_conv_transpose_output_shape
)
...
...
test/constant_folding.cpp
View file @
e9e410aa
...
...
@@ -461,6 +461,110 @@ TEST(constant_folding, const_sum)
ASSERT_EQ
(
values_expected
,
values_out
);
}
TEST
(
constant_folding
,
const_max
)
{
Shape
input_shape
{
3
,
3
};
vector
<
int32_t
>
values_in
{
1
,
2
,
3
,
4
,
5
,
6
,
7
,
8
,
9
};
auto
constant
=
op
::
Constant
::
create
(
element
::
i32
,
input_shape
,
values_in
);
auto
convert
=
make_shared
<
op
::
Max
>
(
constant
,
AxisSet
{
1
});
auto
f
=
make_shared
<
Function
>
(
convert
,
ParameterVector
{});
pass
::
Manager
pass_manager
;
pass_manager
.
register_pass
<
pass
::
ConstantFolding
>
();
pass_manager
.
run_passes
(
f
);
ASSERT_EQ
(
count_ops_of_type
<
op
::
Max
>
(
f
),
0
);
ASSERT_EQ
(
count_ops_of_type
<
op
::
Constant
>
(
f
),
1
);
auto
new_const
=
std
::
dynamic_pointer_cast
<
op
::
Constant
>
(
f
->
get_results
().
at
(
0
)
->
get_argument
(
0
));
ASSERT_TRUE
(
new_const
);
auto
values_out
=
new_const
->
get_vector
<
int32_t
>
();
vector
<
int32_t
>
values_expected
{
3
,
6
,
9
};
ASSERT_EQ
(
values_expected
,
values_out
);
}
TEST
(
constant_folding
,
const_min
)
{
Shape
input_shape
{
3
,
3
};
vector
<
int32_t
>
values_in
{
1
,
2
,
3
,
4
,
5
,
6
,
7
,
8
,
9
};
auto
constant
=
op
::
Constant
::
create
(
element
::
i32
,
input_shape
,
values_in
);
auto
convert
=
make_shared
<
op
::
Min
>
(
constant
,
AxisSet
{
1
});
auto
f
=
make_shared
<
Function
>
(
convert
,
ParameterVector
{});
pass
::
Manager
pass_manager
;
pass_manager
.
register_pass
<
pass
::
ConstantFolding
>
();
pass_manager
.
run_passes
(
f
);
ASSERT_EQ
(
count_ops_of_type
<
op
::
Min
>
(
f
),
0
);
ASSERT_EQ
(
count_ops_of_type
<
op
::
Constant
>
(
f
),
1
);
auto
new_const
=
std
::
dynamic_pointer_cast
<
op
::
Constant
>
(
f
->
get_results
().
at
(
0
)
->
get_argument
(
0
));
ASSERT_TRUE
(
new_const
);
auto
values_out
=
new_const
->
get_vector
<
int32_t
>
();
vector
<
int32_t
>
values_expected
{
1
,
4
,
7
};
ASSERT_EQ
(
values_expected
,
values_out
);
}
TEST
(
constant_folding
,
const_all
)
{
Shape
input_shape
{
3
,
3
};
vector
<
char
>
values_in
{
0
,
1
,
1
,
0
,
1
,
0
,
1
,
1
,
1
};
auto
constant
=
op
::
Constant
::
create
(
element
::
boolean
,
input_shape
,
values_in
);
auto
convert
=
make_shared
<
op
::
All
>
(
constant
,
AxisSet
{
1
});
auto
f
=
make_shared
<
Function
>
(
convert
,
ParameterVector
{});
pass
::
Manager
pass_manager
;
pass_manager
.
register_pass
<
pass
::
ConstantFolding
>
();
pass_manager
.
run_passes
(
f
);
ASSERT_EQ
(
count_ops_of_type
<
op
::
All
>
(
f
),
0
);
ASSERT_EQ
(
count_ops_of_type
<
op
::
Constant
>
(
f
),
1
);
auto
new_const
=
std
::
dynamic_pointer_cast
<
op
::
Constant
>
(
f
->
get_results
().
at
(
0
)
->
get_argument
(
0
));
ASSERT_TRUE
(
new_const
);
auto
values_out
=
new_const
->
get_vector
<
char
>
();
vector
<
char
>
values_expected
{
0
,
0
,
1
};
ASSERT_EQ
(
values_expected
,
values_out
);
}
TEST
(
constant_folding
,
const_any
)
{
Shape
input_shape
{
3
,
3
};
vector
<
char
>
values_in
{
1
,
0
,
0
,
1
,
0
,
1
,
0
,
0
,
0
};
auto
constant
=
op
::
Constant
::
create
(
element
::
boolean
,
input_shape
,
values_in
);
auto
convert
=
make_shared
<
op
::
Any
>
(
constant
,
AxisSet
{
1
});
auto
f
=
make_shared
<
Function
>
(
convert
,
ParameterVector
{});
pass
::
Manager
pass_manager
;
pass_manager
.
register_pass
<
pass
::
ConstantFolding
>
();
pass_manager
.
run_passes
(
f
);
ASSERT_EQ
(
count_ops_of_type
<
op
::
Any
>
(
f
),
0
);
ASSERT_EQ
(
count_ops_of_type
<
op
::
Constant
>
(
f
),
1
);
auto
new_const
=
std
::
dynamic_pointer_cast
<
op
::
Constant
>
(
f
->
get_results
().
at
(
0
)
->
get_argument
(
0
));
ASSERT_TRUE
(
new_const
);
auto
values_out
=
new_const
->
get_vector
<
char
>
();
vector
<
char
>
values_expected
{
1
,
1
,
0
};
ASSERT_EQ
(
values_expected
,
values_out
);
}
TEST
(
constant_folding
,
const_concat
)
{
auto
constant0
=
...
...
test/onnx/onnx_import_rnn.in.cpp
View file @
e9e410aa
...
...
@@ -104,8 +104,7 @@ NGRAPH_TEST(onnx_${BACKEND_NAME}, model_lstm_fwd_with_clip)
// We have to enlarge tolerance bits to 3 - it's only one bit more than default value.
// The discrepancies may occur at most on 7th decimal position.
test_case
.
set_tolerance
(
3
);
test_case
.
run
();
test_case
.
run
(
DEFAULT_FLOAT_TOLERANCE_BITS
+
1
);
}
NGRAPH_TEST
(
onnx_
$
{
BACKEND_NAME
},
model_lstm_fwd_mixed_seq
)
...
...
@@ -144,8 +143,7 @@ NGRAPH_TEST(onnx_${BACKEND_NAME}, model_lstm_fwd_mixed_seq)
// We have to enlarge tolerance bits to 3 - it's only one bit more than default value.
// The discrepancies may occur at most on 7th decimal position.
test_case
.
set_tolerance
(
3
);
test_case
.
run
();
test_case
.
run
(
DEFAULT_FLOAT_TOLERANCE_BITS
+
1
);
}
NGRAPH_TEST
(
onnx_
$
{
BACKEND_NAME
},
model_lstm_fwd_hardsigmoid_activation
)
...
...
@@ -201,8 +199,7 @@ NGRAPH_TEST(onnx_${BACKEND_NAME}, model_lstm_fwd_hardsigmoid_activation)
test_case
.
add_expected_output
<
float
>
(
Shape
{
1
,
1
,
2
},
{
0.19017234
f
,
0.00356848
f
});
// The discrepancies occur at most at 18th mantissa bit - 8th decimal position.
test_case
.
set_tolerance
(
6
);
test_case
.
run
();
test_case
.
run
(
DEFAULT_FLOAT_TOLERANCE_BITS
+
4
);
}
NGRAPH_TEST
(
onnx_
$
{
BACKEND_NAME
},
model_lstm_fwd_large_batch_no_clip
)
...
...
@@ -307,8 +304,7 @@ NGRAPH_TEST(onnx_${BACKEND_NAME}, model_lstm_bdir_short_input_seq)
test_case
.
add_expected_output
<
float
>
(
Shape
{
2
,
1
,
2
},
{
-
0.0251062
f
,
0.0561262
f
,
-
0.0318928
f
,
0.0762679
f
});
test_case
.
set_tolerance
(
DEFAULT_FLOAT_TOLERANCE_BITS
+
3
);
test_case
.
run
();
test_case
.
run
(
DEFAULT_FLOAT_TOLERANCE_BITS
+
3
);
}
NGRAPH_TEST
(
onnx_
$
{
BACKEND_NAME
},
model_lstm_mixed_seq_reverse
)
...
...
@@ -353,6 +349,5 @@ NGRAPH_TEST(onnx_${BACKEND_NAME}, model_lstm_mixed_seq_reverse)
Shape
{
1
,
2
,
3
},
{
0.52497941
f
,
0.54983425
f
,
0.5744428
f
,
1.34960834
f
,
1.54772296
f
,
1.65633056
f
});
test_case
.
set_tolerance
(
DEFAULT_FLOAT_TOLERANCE_BITS
+
1
);
test_case
.
run
();
test_case
.
run
(
DEFAULT_FLOAT_TOLERANCE_BITS
+
1
);
}
test/util/test_case.cpp
View file @
e9e410aa
...
...
@@ -19,8 +19,9 @@
#include "gtest/gtest.h"
#include "ngraph/assertion.hpp"
void
ngraph
::
test
::
NgraphTestCase
::
run
()
void
ngraph
::
test
::
NgraphTestCase
::
run
(
size_t
tolerance_bits
)
{
m_tolerance_bits
=
tolerance_bits
;
const
auto
&
function_results
=
m_function
->
get_results
();
NGRAPH_CHECK
(
m_expected_outputs
.
size
()
==
function_results
.
size
(),
"Expected number of outputs is different from the function's number of results."
);
...
...
@@ -52,12 +53,6 @@ void ngraph::test::NgraphTestCase::run()
}
}
ngraph
::
test
::
NgraphTestCase
&
ngraph
::
test
::
NgraphTestCase
::
set_tolerance
(
int
tolerance_bits
)
{
m_tolerance_bits
=
tolerance_bits
;
return
*
this
;
}
ngraph
::
test
::
NgraphTestCase
&
ngraph
::
test
::
NgraphTestCase
::
dump_results
(
bool
dump
)
{
m_dump_results
=
dump
;
...
...
test/util/test_case.hpp
View file @
e9e410aa
...
...
@@ -38,8 +38,6 @@ namespace ngraph
{
}
NgraphTestCase
&
set_tolerance
(
int
tolerance_bits
);
/// \brief Makes the test case print the expected and computed values to the console. This should only be used for debugging purposes.
///
/// Just before the assertion is done, the current test case will gather expected and computed values,
...
...
@@ -130,7 +128,7 @@ namespace ngraph
add_expected_output
(
expected_shape
,
value
);
}
void
run
();
void
run
(
size_t
tolerance_bits
=
DEFAULT_FLOAT_TOLERANCE_BITS
);
private
:
template
<
typename
T
>
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment