Unverified Commit 23f29e88 authored by Michał Karzyński's avatar Michał Karzyński Committed by GitHub

[Py] Update to new pytest configuration method (#2520)

parent 544f13ad
...@@ -13,3 +13,8 @@ ...@@ -13,3 +13,8 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
# ****************************************************************************** # ******************************************************************************
# test.BACKEND_NAME is a configuration variable determining which
# nGraph backend tests will use. It's set during pytest configuration time.
# See `pytest_configure` hook in `conftest.py` for more details.
BACKEND_NAME = None
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
# limitations under the License. # limitations under the License.
# ****************************************************************************** # ******************************************************************************
import pytest import pytest
import test
def pytest_addoption(parser): def pytest_addoption(parser):
...@@ -22,13 +23,28 @@ def pytest_addoption(parser): ...@@ -22,13 +23,28 @@ def pytest_addoption(parser):
help='Select from available backends') help='Select from available backends')
def pass_method(*args, **kwargs): def pytest_configure(config):
pass backend_name = config.getvalue('backend')
test.BACKEND_NAME = backend_name
def pytest_configure(config): def pytest_collection_modifyitems(config, items):
config.gpu_skip = pytest.mark.skipif(config.getvalue('backend') == 'GPU') backend_name = config.getvalue('backend')
config.cpu_skip = pytest.mark.skipif(config.getvalue('backend') == 'CPU')
config.nnp_skip = pytest.mark.skipif(config.getvalue('backend') == 'NNP') gpu_skip = pytest.mark.skip(reason='Skipping test on the GPU backend.')
config.interpreter_skip = pytest.mark.skipif(config.getvalue('backend') == 'INTERPRETER') cpu_skip = pytest.mark.skip(reason='Skipping test on the CPU backend.')
config.plaidml_skip = pytest.mark.skipif(config.getvalue('backend') == 'PlaidML') nnp_skip = pytest.mark.skip(reason='Skipping test on the NNP backend.')
interpreter_skip = pytest.mark.skip(reason='Skipping test on the INTERPRETER backend.')
plaidml_skip = pytest.mark.skip(reason='Skipping test on the PlaidML backend.')
for item in items:
if backend_name == 'GPU' and 'skip_on_gpu' in item.keywords:
item.add_marker(gpu_skip)
if backend_name == 'CPU' and 'skip_on_cpu' in item.keywords:
item.add_marker(cpu_skip)
if backend_name == 'NNP' and 'skip_on_nnp' in item.keywords:
item.add_marker(nnp_skip)
if backend_name == 'INTERPRETER' and 'skip_on_interpreter' in item.keywords:
item.add_marker(interpreter_skip)
if backend_name == 'PlaidML' and 'skip_on_plaidml' in item.keywords:
item.add_marker(plaidml_skip)
...@@ -18,14 +18,16 @@ import pytest ...@@ -18,14 +18,16 @@ import pytest
import json import json
import ngraph as ng import ngraph as ng
from test.ngraph.util import get_runtime, run_op_node
from ngraph.exceptions import UserInputError from ngraph.exceptions import UserInputError
import test
from test.ngraph.util import get_runtime, run_op_node
@pytest.mark.parametrize('dtype', [np.float32, np.float64, @pytest.mark.parametrize('dtype', [np.float32, np.float64,
np.int8, np.int16, np.int32, np.int64, np.int8, np.int16, np.int32, np.int64,
np.uint8, np.uint16, np.uint32, np.uint64]) np.uint8, np.uint16, np.uint32, np.uint64])
@pytest.config.gpu_skip(reason='Not implemented') @pytest.mark.skip_on_gpu
def test_simple_computation_on_ndarrays(dtype): def test_simple_computation_on_ndarrays(dtype):
runtime = get_runtime() runtime = get_runtime()
...@@ -51,7 +53,7 @@ def test_simple_computation_on_ndarrays(dtype): ...@@ -51,7 +53,7 @@ def test_simple_computation_on_ndarrays(dtype):
def test_serialization(): def test_serialization():
dtype = np.float32 dtype = np.float32
backend_name = pytest.config.getoption('backend', default='CPU') backend_name = test.BACKEND_NAME
shape = [2, 2] shape = [2, 2]
parameter_a = ng.parameter(shape, dtype=dtype, name='A') parameter_a = ng.parameter(shape, dtype=dtype, name='A')
......
...@@ -21,7 +21,7 @@ import ngraph as ng ...@@ -21,7 +21,7 @@ import ngraph as ng
from test.ngraph.util import get_runtime from test.ngraph.util import get_runtime
@pytest.config.gpu_skip(reason='Not implemented') @pytest.mark.skip_on_gpu
def test_convolution_2d(): def test_convolution_2d():
runtime = get_runtime() runtime = get_runtime()
# input_x should have shape N(batch) x C x H x W # input_x should have shape N(batch) x C x H x W
...@@ -95,7 +95,7 @@ def test_convolution_2d(): ...@@ -95,7 +95,7 @@ def test_convolution_2d():
dtype=np.float32)) dtype=np.float32))
@pytest.config.gpu_skip(reason='Not implemented') @pytest.mark.skip_on_gpu
def test_convolution_backprop_data(): def test_convolution_backprop_data():
runtime = get_runtime() runtime = get_runtime()
......
...@@ -21,7 +21,7 @@ import ngraph as ng ...@@ -21,7 +21,7 @@ import ngraph as ng
from test.ngraph.util import get_runtime from test.ngraph.util import get_runtime
@pytest.config.gpu_skip(reason='Not implemented') @pytest.mark.skip_on_gpu
def test_lrn(): def test_lrn():
input_image_shape = (2, 3, 2, 1) input_image_shape = (2, 3, 2, 1)
input_image = np.arange(int(np.prod(input_image_shape))).reshape(input_image_shape).astype('f') input_image = np.arange(int(np.prod(input_image_shape))).reshape(input_image_shape).astype('f')
......
...@@ -38,7 +38,7 @@ from test.ngraph.util import run_op_node ...@@ -38,7 +38,7 @@ from test.ngraph.util import run_op_node
([2, 3, 4, 5], [5, 2, 3], None, 1), ([2, 3, 4, 5], [5, 2, 3], None, 1),
([2, 3, 4, 5], [4, 5, 2, 4], 2, 2), ([2, 3, 4, 5], [4, 5, 2, 4], 2, 2),
]) ])
@pytest.config.gpu_skip(reason='under investigation, runtime error is: function failed to compile') @pytest.mark.skip_on_gpu # under investigation, runtime error is: function failed to compile
def test_dot(left_shape, right_shape, reduction_axes_count, numpy_axes): def test_dot(left_shape, right_shape, reduction_axes_count, numpy_axes):
np.random.seed(133391) np.random.seed(133391)
left_input = -100.0 + np.random.rand(*left_shape) * 200.0 left_input = -100.0 + np.random.rand(*left_shape) * 200.0
...@@ -49,7 +49,7 @@ def test_dot(left_shape, right_shape, reduction_axes_count, numpy_axes): ...@@ -49,7 +49,7 @@ def test_dot(left_shape, right_shape, reduction_axes_count, numpy_axes):
assert np.allclose(result, expected) assert np.allclose(result, expected)
@pytest.config.gpu_skip(reason='under investigation, runtime error is: function failed to compile') @pytest.mark.skip_on_gpu # under investigation, runtime error is: function failed to compile
def test_dot_tensor_scalar(): def test_dot_tensor_scalar():
np.random.seed(133391) np.random.seed(133391)
left_input = 10.0 left_input = 10.0
......
...@@ -20,7 +20,7 @@ import ngraph as ng ...@@ -20,7 +20,7 @@ import ngraph as ng
from test.ngraph.util import get_runtime, run_op_numeric_data from test.ngraph.util import get_runtime, run_op_numeric_data
@pytest.config.gpu_skip(reason='Not implemented') @pytest.mark.skip_on_gpu
def test_concat(): def test_concat():
a = np.array([[1, 2], [3, 4]]) a = np.array([[1, 2], [3, 4]])
b = np.array([[5, 6]]) b = np.array([[5, 6]])
...@@ -40,7 +40,7 @@ def test_concat(): ...@@ -40,7 +40,7 @@ def test_concat():
(bool, False), (bool, False),
(bool, np.empty((2, 2), dtype=bool)), (bool, np.empty((2, 2), dtype=bool)),
]) ])
@pytest.config.gpu_skip(reason='under investigation, runtime error is: function failed to compile') @pytest.mark.skip_on_gpu # under investigation, runtime error is: function failed to compile
def test_constant_from_bool(val_type, value): def test_constant_from_bool(val_type, value):
expected = np.array(value, dtype=val_type) expected = np.array(value, dtype=val_type)
result = run_op_numeric_data(value, ng.constant, val_type) result = run_op_numeric_data(value, ng.constant, val_type)
...@@ -59,7 +59,7 @@ def test_constant_from_bool(val_type, value): ...@@ -59,7 +59,7 @@ def test_constant_from_bool(val_type, value):
(np.uint32, np.uint32(123456)), (np.uint32, np.uint32(123456)),
(np.uint64, np.uint64(1234567)), (np.uint64, np.uint64(1234567)),
]) ])
@pytest.config.gpu_skip(reason='under investigation, runtime error is: function failed to compile') @pytest.mark.skip_on_gpu # under investigation, runtime error is: function failed to compile
def test_constant_from_scalar(val_type, value): def test_constant_from_scalar(val_type, value):
expected = np.array(value, dtype=val_type) expected = np.array(value, dtype=val_type)
result = run_op_numeric_data(value, ng.constant, val_type) result = run_op_numeric_data(value, ng.constant, val_type)
...@@ -70,7 +70,7 @@ def test_constant_from_scalar(val_type, value): ...@@ -70,7 +70,7 @@ def test_constant_from_scalar(val_type, value):
np.float32, np.float32,
np.float64, np.float64,
]) ])
@pytest.config.gpu_skip(reason='under investigation, runtime error is: function failed to compile') @pytest.mark.skip_on_gpu # under investigation, runtime error is: function failed to compile
def test_constant_from_float_array(val_type): def test_constant_from_float_array(val_type):
np.random.seed(133391) np.random.seed(133391)
input_data = np.array(-1 + np.random.rand(2, 3, 4) * 2, dtype=val_type) input_data = np.array(-1 + np.random.rand(2, 3, 4) * 2, dtype=val_type)
...@@ -88,7 +88,7 @@ def test_constant_from_float_array(val_type): ...@@ -88,7 +88,7 @@ def test_constant_from_float_array(val_type):
(np.uint32, 0, 1024), (np.uint32, 0, 1024),
(np.uint64, 0, 16383), (np.uint64, 0, 16383),
]) ])
@pytest.config.gpu_skip(reason='under investigation, runtime error is: function failed to compile') @pytest.mark.skip_on_gpu # under investigation, runtime error is: function failed to compile
def test_constant_from_integer_array(val_type, range_start, range_end): def test_constant_from_integer_array(val_type, range_start, range_end):
np.random.seed(133391) np.random.seed(133391)
input_data = np.array(np.random.randint(range_start, range_end, size=(2, 2)), dtype=val_type) input_data = np.array(np.random.randint(range_start, range_end, size=(2, 2)), dtype=val_type)
......
...@@ -42,7 +42,7 @@ from test.ngraph.util import run_op_numeric_data, run_op_node ...@@ -42,7 +42,7 @@ from test.ngraph.util import run_op_numeric_data, run_op_node
(ng.tan, np.tan, -1., 1.), (ng.tan, np.tan, -1., 1.),
(ng.tanh, np.tanh, -100., 100.), (ng.tanh, np.tanh, -100., 100.),
]) ])
@pytest.config.gpu_skip(reason='under investigation, runtime error is: function failed to compile') @pytest.mark.skip_on_gpu # under investigation, runtime error is: function failed to compile
def test_unary_op_array(ng_api_fn, numpy_fn, range_start, range_end): def test_unary_op_array(ng_api_fn, numpy_fn, range_start, range_end):
np.random.seed(133391) np.random.seed(133391)
input_data = range_start + np.random.rand(2, 3, 4) * (range_end - range_start) input_data = range_start + np.random.rand(2, 3, 4) * (range_end - range_start)
...@@ -77,7 +77,7 @@ def test_unary_op_array(ng_api_fn, numpy_fn, range_start, range_end): ...@@ -77,7 +77,7 @@ def test_unary_op_array(ng_api_fn, numpy_fn, range_start, range_end):
(ng.tan, np.tan, np.float32(np.pi / 4.0)), (ng.tan, np.tan, np.float32(np.pi / 4.0)),
(ng.tanh, np.tanh, np.float32(0.1234)), (ng.tanh, np.tanh, np.float32(0.1234)),
]) ])
@pytest.config.gpu_skip(reason='under investigation, runtime error is: function failed to compile') @pytest.mark.skip_on_gpu # under investigation, runtime error is: function failed to compile
def test_unary_op_scalar(ng_api_fn, numpy_fn, input_data): def test_unary_op_scalar(ng_api_fn, numpy_fn, input_data):
expected = numpy_fn(input_data) expected = numpy_fn(input_data)
......
...@@ -25,7 +25,7 @@ def _ndarray_1x1x4x4(): ...@@ -25,7 +25,7 @@ def _ndarray_1x1x4x4():
return np.arange(11, 27, dtype=np.float32).reshape(1, 1, 4, 4) return np.arange(11, 27, dtype=np.float32).reshape(1, 1, 4, 4)
@pytest.config.gpu_skip(reason='Not implemented') @pytest.mark.skip_on_gpu
def test_avg_pool_2d(_ndarray_1x1x4x4): def test_avg_pool_2d(_ndarray_1x1x4x4):
runtime = get_runtime() runtime = get_runtime()
input_data = _ndarray_1x1x4x4 input_data = _ndarray_1x1x4x4
...@@ -74,7 +74,7 @@ def test_avg_pool_2d(_ndarray_1x1x4x4): ...@@ -74,7 +74,7 @@ def test_avg_pool_2d(_ndarray_1x1x4x4):
assert np.allclose(result, expected) assert np.allclose(result, expected)
@pytest.config.gpu_skip(reason='Not implemented') @pytest.mark.skip_on_gpu
def test_avg_pooling_3d(_ndarray_1x1x4x4): def test_avg_pooling_3d(_ndarray_1x1x4x4):
rt = get_runtime() rt = get_runtime()
data = _ndarray_1x1x4x4 data = _ndarray_1x1x4x4
......
...@@ -34,7 +34,7 @@ from test.ngraph.util import run_op_node, get_runtime ...@@ -34,7 +34,7 @@ from test.ngraph.util import run_op_node, get_runtime
(ng.sum, np.sum, (0, 2)), (ng.sum, np.sum, (0, 2)),
(ng.prod, np.prod, (0, 2)), (ng.prod, np.prod, (0, 2)),
]) ])
@pytest.config.gpu_skip(reason='Not implemented') @pytest.mark.skip_on_gpu
def test_reduction_ops(ng_api_helper, numpy_function, reduction_axes): def test_reduction_ops(ng_api_helper, numpy_function, reduction_axes):
shape = [2, 4, 3, 2] shape = [2, 4, 3, 2]
np.random.seed(133391) np.random.seed(133391)
...@@ -45,7 +45,7 @@ def test_reduction_ops(ng_api_helper, numpy_function, reduction_axes): ...@@ -45,7 +45,7 @@ def test_reduction_ops(ng_api_helper, numpy_function, reduction_axes):
assert np.allclose(result, expected) assert np.allclose(result, expected)
@pytest.config.gpu_skip(reason='Not implemented') @pytest.mark.skip_on_gpu
def test_argmax(): def test_argmax():
runtime = get_runtime() runtime = get_runtime()
input_x = ng.constant(np.array([[9, 2, 10], input_x = ng.constant(np.array([[9, 2, 10],
...@@ -58,7 +58,7 @@ def test_argmax(): ...@@ -58,7 +58,7 @@ def test_argmax():
np.array([1, 3, 0], dtype=np.int32)) np.array([1, 3, 0], dtype=np.int32))
@pytest.config.gpu_skip(reason='Not implemented') @pytest.mark.skip_on_gpu
def test_argmin(): def test_argmin():
runtime = get_runtime() runtime = get_runtime()
input_x = ng.constant(np.array([[12, 2, 10], input_x = ng.constant(np.array([[12, 2, 10],
...@@ -71,7 +71,7 @@ def test_argmin(): ...@@ -71,7 +71,7 @@ def test_argmin():
np.array([3, 2, 1], dtype=np.int32)) np.array([3, 2, 1], dtype=np.int32))
@pytest.config.gpu_skip(reason='Not implemented') @pytest.mark.skip_on_gpu
def test_topk(): def test_topk():
runtime = get_runtime() runtime = get_runtime()
input_x = ng.constant(np.array([[9, 2, 10], input_x = ng.constant(np.array([[9, 2, 10],
......
...@@ -15,11 +15,12 @@ ...@@ -15,11 +15,12 @@
# ****************************************************************************** # ******************************************************************************
import numpy as np import numpy as np
import pytest
import ngraph as ng import ngraph as ng
from string import ascii_uppercase from string import ascii_uppercase
import test
def _get_numpy_dtype(scalar): def _get_numpy_dtype(scalar):
return np.array([scalar]).dtype return np.array([scalar]).dtype
...@@ -27,8 +28,7 @@ def _get_numpy_dtype(scalar): ...@@ -27,8 +28,7 @@ def _get_numpy_dtype(scalar):
def get_runtime(): def get_runtime():
"""Return runtime object.""" """Return runtime object."""
backend_name = pytest.config.getoption('backend', default='CPU') return ng.runtime(backend_name=test.BACKEND_NAME)
return ng.runtime(backend_name=backend_name)
def run_op_node(input_data, op_fun, *args): def run_op_node(input_data, op_fun, *args):
......
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment