Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in / Register
Toggle navigation
N
ngraph
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
submodule
ngraph
Commits
23f29e88
Unverified
Commit
23f29e88
authored
Mar 01, 2019
by
Michał Karzyński
Committed by
GitHub
Mar 01, 2019
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
[Py] Update to new pytest configuration method (#2520)
parent
544f13ad
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
12 changed files
with
55 additions
and
32 deletions
+55
-32
__init__.py
python/test/__init__.py
+5
-0
conftest.py
python/test/conftest.py
+24
-8
test_basic.py
python/test/ngraph/test_basic.py
+5
-3
test_convolution.py
python/test/ngraph/test_convolution.py
+2
-2
test_normalization.py
python/test/ngraph/test_normalization.py
+1
-1
test_ops_matmul.py
python/test/ngraph/test_ops_matmul.py
+2
-2
test_ops_reshape.py
python/test/ngraph/test_ops_reshape.py
+5
-5
test_ops_unary.py
python/test/ngraph/test_ops_unary.py
+2
-2
test_pooling.py
python/test/ngraph/test_pooling.py
+2
-2
test_reduction.py
python/test/ngraph/test_reduction.py
+4
-4
util.py
python/test/ngraph/util.py
+3
-3
test_ops.py
python/test/test_ops.py
+0
-0
No files found.
python/test/__init__.py
View file @
23f29e88
...
...
@@ -13,3 +13,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
# test.BACKEND_NAME is a configuration variable determining which
# nGraph backend tests will use. It's set during pytest configuration time.
# See `pytest_configure` hook in `conftest.py` for more details.
BACKEND_NAME
=
None
python/test/conftest.py
View file @
23f29e88
...
...
@@ -14,6 +14,7 @@
# limitations under the License.
# ******************************************************************************
import
pytest
import
test
def
pytest_addoption
(
parser
):
...
...
@@ -22,13 +23,28 @@ def pytest_addoption(parser):
help
=
'Select from available backends'
)
def
pass_method
(
*
args
,
**
kwargs
):
pass
def
pytest_configure
(
config
):
backend_name
=
config
.
getvalue
(
'backend'
)
test
.
BACKEND_NAME
=
backend_name
def
pytest_configure
(
config
):
config
.
gpu_skip
=
pytest
.
mark
.
skipif
(
config
.
getvalue
(
'backend'
)
==
'GPU'
)
config
.
cpu_skip
=
pytest
.
mark
.
skipif
(
config
.
getvalue
(
'backend'
)
==
'CPU'
)
config
.
nnp_skip
=
pytest
.
mark
.
skipif
(
config
.
getvalue
(
'backend'
)
==
'NNP'
)
config
.
interpreter_skip
=
pytest
.
mark
.
skipif
(
config
.
getvalue
(
'backend'
)
==
'INTERPRETER'
)
config
.
plaidml_skip
=
pytest
.
mark
.
skipif
(
config
.
getvalue
(
'backend'
)
==
'PlaidML'
)
def
pytest_collection_modifyitems
(
config
,
items
):
backend_name
=
config
.
getvalue
(
'backend'
)
gpu_skip
=
pytest
.
mark
.
skip
(
reason
=
'Skipping test on the GPU backend.'
)
cpu_skip
=
pytest
.
mark
.
skip
(
reason
=
'Skipping test on the CPU backend.'
)
nnp_skip
=
pytest
.
mark
.
skip
(
reason
=
'Skipping test on the NNP backend.'
)
interpreter_skip
=
pytest
.
mark
.
skip
(
reason
=
'Skipping test on the INTERPRETER backend.'
)
plaidml_skip
=
pytest
.
mark
.
skip
(
reason
=
'Skipping test on the PlaidML backend.'
)
for
item
in
items
:
if
backend_name
==
'GPU'
and
'skip_on_gpu'
in
item
.
keywords
:
item
.
add_marker
(
gpu_skip
)
if
backend_name
==
'CPU'
and
'skip_on_cpu'
in
item
.
keywords
:
item
.
add_marker
(
cpu_skip
)
if
backend_name
==
'NNP'
and
'skip_on_nnp'
in
item
.
keywords
:
item
.
add_marker
(
nnp_skip
)
if
backend_name
==
'INTERPRETER'
and
'skip_on_interpreter'
in
item
.
keywords
:
item
.
add_marker
(
interpreter_skip
)
if
backend_name
==
'PlaidML'
and
'skip_on_plaidml'
in
item
.
keywords
:
item
.
add_marker
(
plaidml_skip
)
python/test/ngraph/test_basic.py
View file @
23f29e88
...
...
@@ -18,14 +18,16 @@ import pytest
import
json
import
ngraph
as
ng
from
test.ngraph.util
import
get_runtime
,
run_op_node
from
ngraph.exceptions
import
UserInputError
import
test
from
test.ngraph.util
import
get_runtime
,
run_op_node
@pytest.mark.parametrize
(
'dtype'
,
[
np
.
float32
,
np
.
float64
,
np
.
int8
,
np
.
int16
,
np
.
int32
,
np
.
int64
,
np
.
uint8
,
np
.
uint16
,
np
.
uint32
,
np
.
uint64
])
@pytest.
config.gpu_skip
(
reason
=
'Not implemented'
)
@pytest.
mark.skip_on_gpu
def
test_simple_computation_on_ndarrays
(
dtype
):
runtime
=
get_runtime
()
...
...
@@ -51,7 +53,7 @@ def test_simple_computation_on_ndarrays(dtype):
def
test_serialization
():
dtype
=
np
.
float32
backend_name
=
pytest
.
config
.
getoption
(
'backend'
,
default
=
'CPU'
)
backend_name
=
test
.
BACKEND_NAME
shape
=
[
2
,
2
]
parameter_a
=
ng
.
parameter
(
shape
,
dtype
=
dtype
,
name
=
'A'
)
...
...
python/test/ngraph/test_convolution.py
View file @
23f29e88
...
...
@@ -21,7 +21,7 @@ import ngraph as ng
from
test.ngraph.util
import
get_runtime
@pytest.
config.gpu_skip
(
reason
=
'Not implemented'
)
@pytest.
mark.skip_on_gpu
def
test_convolution_2d
():
runtime
=
get_runtime
()
# input_x should have shape N(batch) x C x H x W
...
...
@@ -95,7 +95,7 @@ def test_convolution_2d():
dtype
=
np
.
float32
))
@pytest.
config.gpu_skip
(
reason
=
'Not implemented'
)
@pytest.
mark.skip_on_gpu
def
test_convolution_backprop_data
():
runtime
=
get_runtime
()
...
...
python/test/ngraph/test_normalization.py
View file @
23f29e88
...
...
@@ -21,7 +21,7 @@ import ngraph as ng
from
test.ngraph.util
import
get_runtime
@pytest.
config.gpu_skip
(
reason
=
'Not implemented'
)
@pytest.
mark.skip_on_gpu
def
test_lrn
():
input_image_shape
=
(
2
,
3
,
2
,
1
)
input_image
=
np
.
arange
(
int
(
np
.
prod
(
input_image_shape
)))
.
reshape
(
input_image_shape
)
.
astype
(
'f'
)
...
...
python/test/ngraph/test_ops_matmul.py
View file @
23f29e88
...
...
@@ -38,7 +38,7 @@ from test.ngraph.util import run_op_node
([
2
,
3
,
4
,
5
],
[
5
,
2
,
3
],
None
,
1
),
([
2
,
3
,
4
,
5
],
[
4
,
5
,
2
,
4
],
2
,
2
),
])
@pytest.
config.gpu_skip
(
reason
=
'under investigation, runtime error is: function failed to compile'
)
@pytest.
mark.skip_on_gpu
# under investigation, runtime error is: function failed to compile
def
test_dot
(
left_shape
,
right_shape
,
reduction_axes_count
,
numpy_axes
):
np
.
random
.
seed
(
133391
)
left_input
=
-
100.0
+
np
.
random
.
rand
(
*
left_shape
)
*
200.0
...
...
@@ -49,7 +49,7 @@ def test_dot(left_shape, right_shape, reduction_axes_count, numpy_axes):
assert
np
.
allclose
(
result
,
expected
)
@pytest.
config.gpu_skip
(
reason
=
'under investigation, runtime error is: function failed to compile'
)
@pytest.
mark.skip_on_gpu
# under investigation, runtime error is: function failed to compile
def
test_dot_tensor_scalar
():
np
.
random
.
seed
(
133391
)
left_input
=
10.0
...
...
python/test/ngraph/test_ops_reshape.py
View file @
23f29e88
...
...
@@ -20,7 +20,7 @@ import ngraph as ng
from
test.ngraph.util
import
get_runtime
,
run_op_numeric_data
@pytest.
config.gpu_skip
(
reason
=
'Not implemented'
)
@pytest.
mark.skip_on_gpu
def
test_concat
():
a
=
np
.
array
([[
1
,
2
],
[
3
,
4
]])
b
=
np
.
array
([[
5
,
6
]])
...
...
@@ -40,7 +40,7 @@ def test_concat():
(
bool
,
False
),
(
bool
,
np
.
empty
((
2
,
2
),
dtype
=
bool
)),
])
@pytest.
config.gpu_skip
(
reason
=
'under investigation, runtime error is: function failed to compile'
)
@pytest.
mark.skip_on_gpu
# under investigation, runtime error is: function failed to compile
def
test_constant_from_bool
(
val_type
,
value
):
expected
=
np
.
array
(
value
,
dtype
=
val_type
)
result
=
run_op_numeric_data
(
value
,
ng
.
constant
,
val_type
)
...
...
@@ -59,7 +59,7 @@ def test_constant_from_bool(val_type, value):
(
np
.
uint32
,
np
.
uint32
(
123456
)),
(
np
.
uint64
,
np
.
uint64
(
1234567
)),
])
@pytest.
config.gpu_skip
(
reason
=
'under investigation, runtime error is: function failed to compile'
)
@pytest.
mark.skip_on_gpu
# under investigation, runtime error is: function failed to compile
def
test_constant_from_scalar
(
val_type
,
value
):
expected
=
np
.
array
(
value
,
dtype
=
val_type
)
result
=
run_op_numeric_data
(
value
,
ng
.
constant
,
val_type
)
...
...
@@ -70,7 +70,7 @@ def test_constant_from_scalar(val_type, value):
np
.
float32
,
np
.
float64
,
])
@pytest.
config.gpu_skip
(
reason
=
'under investigation, runtime error is: function failed to compile'
)
@pytest.
mark.skip_on_gpu
# under investigation, runtime error is: function failed to compile
def
test_constant_from_float_array
(
val_type
):
np
.
random
.
seed
(
133391
)
input_data
=
np
.
array
(
-
1
+
np
.
random
.
rand
(
2
,
3
,
4
)
*
2
,
dtype
=
val_type
)
...
...
@@ -88,7 +88,7 @@ def test_constant_from_float_array(val_type):
(
np
.
uint32
,
0
,
1024
),
(
np
.
uint64
,
0
,
16383
),
])
@pytest.
config.gpu_skip
(
reason
=
'under investigation, runtime error is: function failed to compile'
)
@pytest.
mark.skip_on_gpu
# under investigation, runtime error is: function failed to compile
def
test_constant_from_integer_array
(
val_type
,
range_start
,
range_end
):
np
.
random
.
seed
(
133391
)
input_data
=
np
.
array
(
np
.
random
.
randint
(
range_start
,
range_end
,
size
=
(
2
,
2
)),
dtype
=
val_type
)
...
...
python/test/ngraph/test_ops_unary.py
View file @
23f29e88
...
...
@@ -42,7 +42,7 @@ from test.ngraph.util import run_op_numeric_data, run_op_node
(
ng
.
tan
,
np
.
tan
,
-
1.
,
1.
),
(
ng
.
tanh
,
np
.
tanh
,
-
100.
,
100.
),
])
@pytest.
config.gpu_skip
(
reason
=
'under investigation, runtime error is: function failed to compile'
)
@pytest.
mark.skip_on_gpu
# under investigation, runtime error is: function failed to compile
def
test_unary_op_array
(
ng_api_fn
,
numpy_fn
,
range_start
,
range_end
):
np
.
random
.
seed
(
133391
)
input_data
=
range_start
+
np
.
random
.
rand
(
2
,
3
,
4
)
*
(
range_end
-
range_start
)
...
...
@@ -77,7 +77,7 @@ def test_unary_op_array(ng_api_fn, numpy_fn, range_start, range_end):
(
ng
.
tan
,
np
.
tan
,
np
.
float32
(
np
.
pi
/
4.0
)),
(
ng
.
tanh
,
np
.
tanh
,
np
.
float32
(
0.1234
)),
])
@pytest.
config.gpu_skip
(
reason
=
'under investigation, runtime error is: function failed to compile'
)
@pytest.
mark.skip_on_gpu
# under investigation, runtime error is: function failed to compile
def
test_unary_op_scalar
(
ng_api_fn
,
numpy_fn
,
input_data
):
expected
=
numpy_fn
(
input_data
)
...
...
python/test/ngraph/test_pooling.py
View file @
23f29e88
...
...
@@ -25,7 +25,7 @@ def _ndarray_1x1x4x4():
return
np
.
arange
(
11
,
27
,
dtype
=
np
.
float32
)
.
reshape
(
1
,
1
,
4
,
4
)
@pytest.
config.gpu_skip
(
reason
=
'Not implemented'
)
@pytest.
mark.skip_on_gpu
def
test_avg_pool_2d
(
_ndarray_1x1x4x4
):
runtime
=
get_runtime
()
input_data
=
_ndarray_1x1x4x4
...
...
@@ -74,7 +74,7 @@ def test_avg_pool_2d(_ndarray_1x1x4x4):
assert
np
.
allclose
(
result
,
expected
)
@pytest.
config.gpu_skip
(
reason
=
'Not implemented'
)
@pytest.
mark.skip_on_gpu
def
test_avg_pooling_3d
(
_ndarray_1x1x4x4
):
rt
=
get_runtime
()
data
=
_ndarray_1x1x4x4
...
...
python/test/ngraph/test_reduction.py
View file @
23f29e88
...
...
@@ -34,7 +34,7 @@ from test.ngraph.util import run_op_node, get_runtime
(
ng
.
sum
,
np
.
sum
,
(
0
,
2
)),
(
ng
.
prod
,
np
.
prod
,
(
0
,
2
)),
])
@pytest.
config.gpu_skip
(
reason
=
'Not implemented'
)
@pytest.
mark.skip_on_gpu
def
test_reduction_ops
(
ng_api_helper
,
numpy_function
,
reduction_axes
):
shape
=
[
2
,
4
,
3
,
2
]
np
.
random
.
seed
(
133391
)
...
...
@@ -45,7 +45,7 @@ def test_reduction_ops(ng_api_helper, numpy_function, reduction_axes):
assert
np
.
allclose
(
result
,
expected
)
@pytest.
config.gpu_skip
(
reason
=
'Not implemented'
)
@pytest.
mark.skip_on_gpu
def
test_argmax
():
runtime
=
get_runtime
()
input_x
=
ng
.
constant
(
np
.
array
([[
9
,
2
,
10
],
...
...
@@ -58,7 +58,7 @@ def test_argmax():
np
.
array
([
1
,
3
,
0
],
dtype
=
np
.
int32
))
@pytest.
config.gpu_skip
(
reason
=
'Not implemented'
)
@pytest.
mark.skip_on_gpu
def
test_argmin
():
runtime
=
get_runtime
()
input_x
=
ng
.
constant
(
np
.
array
([[
12
,
2
,
10
],
...
...
@@ -71,7 +71,7 @@ def test_argmin():
np
.
array
([
3
,
2
,
1
],
dtype
=
np
.
int32
))
@pytest.
config.gpu_skip
(
reason
=
'Not implemented'
)
@pytest.
mark.skip_on_gpu
def
test_topk
():
runtime
=
get_runtime
()
input_x
=
ng
.
constant
(
np
.
array
([[
9
,
2
,
10
],
...
...
python/test/ngraph/util.py
View file @
23f29e88
...
...
@@ -15,11 +15,12 @@
# ******************************************************************************
import
numpy
as
np
import
pytest
import
ngraph
as
ng
from
string
import
ascii_uppercase
import
test
def
_get_numpy_dtype
(
scalar
):
return
np
.
array
([
scalar
])
.
dtype
...
...
@@ -27,8 +28,7 @@ def _get_numpy_dtype(scalar):
def
get_runtime
():
"""Return runtime object."""
backend_name
=
pytest
.
config
.
getoption
(
'backend'
,
default
=
'CPU'
)
return
ng
.
runtime
(
backend_name
=
backend_name
)
return
ng
.
runtime
(
backend_name
=
test
.
BACKEND_NAME
)
def
run_op_node
(
input_data
,
op_fun
,
*
args
):
...
...
python/test/test_ops.py
View file @
23f29e88
This diff is collapsed.
Click to expand it.
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment