Unverified Commit 23f29e88 authored by Michał Karzyński's avatar Michał Karzyński Committed by GitHub

[Py] Update to new pytest configuration method (#2520)

parent 544f13ad
......@@ -13,3 +13,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
# test.BACKEND_NAME is a configuration variable determining which
# nGraph backend tests will use. It's set during pytest configuration time.
# See `pytest_configure` hook in `conftest.py` for more details.
BACKEND_NAME = None
......@@ -14,6 +14,7 @@
# limitations under the License.
# ******************************************************************************
import pytest
import test
def pytest_addoption(parser):
......@@ -22,13 +23,28 @@ def pytest_addoption(parser):
help='Select from available backends')
def pass_method(*args, **kwargs):
pass
def pytest_configure(config):
backend_name = config.getvalue('backend')
test.BACKEND_NAME = backend_name
def pytest_configure(config):
config.gpu_skip = pytest.mark.skipif(config.getvalue('backend') == 'GPU')
config.cpu_skip = pytest.mark.skipif(config.getvalue('backend') == 'CPU')
config.nnp_skip = pytest.mark.skipif(config.getvalue('backend') == 'NNP')
config.interpreter_skip = pytest.mark.skipif(config.getvalue('backend') == 'INTERPRETER')
config.plaidml_skip = pytest.mark.skipif(config.getvalue('backend') == 'PlaidML')
def pytest_collection_modifyitems(config, items):
backend_name = config.getvalue('backend')
gpu_skip = pytest.mark.skip(reason='Skipping test on the GPU backend.')
cpu_skip = pytest.mark.skip(reason='Skipping test on the CPU backend.')
nnp_skip = pytest.mark.skip(reason='Skipping test on the NNP backend.')
interpreter_skip = pytest.mark.skip(reason='Skipping test on the INTERPRETER backend.')
plaidml_skip = pytest.mark.skip(reason='Skipping test on the PlaidML backend.')
for item in items:
if backend_name == 'GPU' and 'skip_on_gpu' in item.keywords:
item.add_marker(gpu_skip)
if backend_name == 'CPU' and 'skip_on_cpu' in item.keywords:
item.add_marker(cpu_skip)
if backend_name == 'NNP' and 'skip_on_nnp' in item.keywords:
item.add_marker(nnp_skip)
if backend_name == 'INTERPRETER' and 'skip_on_interpreter' in item.keywords:
item.add_marker(interpreter_skip)
if backend_name == 'PlaidML' and 'skip_on_plaidml' in item.keywords:
item.add_marker(plaidml_skip)
......@@ -18,14 +18,16 @@ import pytest
import json
import ngraph as ng
from test.ngraph.util import get_runtime, run_op_node
from ngraph.exceptions import UserInputError
import test
from test.ngraph.util import get_runtime, run_op_node
@pytest.mark.parametrize('dtype', [np.float32, np.float64,
np.int8, np.int16, np.int32, np.int64,
np.uint8, np.uint16, np.uint32, np.uint64])
@pytest.config.gpu_skip(reason='Not implemented')
@pytest.mark.skip_on_gpu
def test_simple_computation_on_ndarrays(dtype):
runtime = get_runtime()
......@@ -51,7 +53,7 @@ def test_simple_computation_on_ndarrays(dtype):
def test_serialization():
dtype = np.float32
backend_name = pytest.config.getoption('backend', default='CPU')
backend_name = test.BACKEND_NAME
shape = [2, 2]
parameter_a = ng.parameter(shape, dtype=dtype, name='A')
......
......@@ -21,7 +21,7 @@ import ngraph as ng
from test.ngraph.util import get_runtime
@pytest.config.gpu_skip(reason='Not implemented')
@pytest.mark.skip_on_gpu
def test_convolution_2d():
runtime = get_runtime()
# input_x should have shape N(batch) x C x H x W
......@@ -95,7 +95,7 @@ def test_convolution_2d():
dtype=np.float32))
@pytest.config.gpu_skip(reason='Not implemented')
@pytest.mark.skip_on_gpu
def test_convolution_backprop_data():
runtime = get_runtime()
......
......@@ -21,7 +21,7 @@ import ngraph as ng
from test.ngraph.util import get_runtime
@pytest.config.gpu_skip(reason='Not implemented')
@pytest.mark.skip_on_gpu
def test_lrn():
input_image_shape = (2, 3, 2, 1)
input_image = np.arange(int(np.prod(input_image_shape))).reshape(input_image_shape).astype('f')
......
......@@ -38,7 +38,7 @@ from test.ngraph.util import run_op_node
([2, 3, 4, 5], [5, 2, 3], None, 1),
([2, 3, 4, 5], [4, 5, 2, 4], 2, 2),
])
@pytest.config.gpu_skip(reason='under investigation, runtime error is: function failed to compile')
@pytest.mark.skip_on_gpu # under investigation, runtime error is: function failed to compile
def test_dot(left_shape, right_shape, reduction_axes_count, numpy_axes):
np.random.seed(133391)
left_input = -100.0 + np.random.rand(*left_shape) * 200.0
......@@ -49,7 +49,7 @@ def test_dot(left_shape, right_shape, reduction_axes_count, numpy_axes):
assert np.allclose(result, expected)
@pytest.config.gpu_skip(reason='under investigation, runtime error is: function failed to compile')
@pytest.mark.skip_on_gpu # under investigation, runtime error is: function failed to compile
def test_dot_tensor_scalar():
np.random.seed(133391)
left_input = 10.0
......
......@@ -20,7 +20,7 @@ import ngraph as ng
from test.ngraph.util import get_runtime, run_op_numeric_data
@pytest.config.gpu_skip(reason='Not implemented')
@pytest.mark.skip_on_gpu
def test_concat():
a = np.array([[1, 2], [3, 4]])
b = np.array([[5, 6]])
......@@ -40,7 +40,7 @@ def test_concat():
(bool, False),
(bool, np.empty((2, 2), dtype=bool)),
])
@pytest.config.gpu_skip(reason='under investigation, runtime error is: function failed to compile')
@pytest.mark.skip_on_gpu # under investigation, runtime error is: function failed to compile
def test_constant_from_bool(val_type, value):
expected = np.array(value, dtype=val_type)
result = run_op_numeric_data(value, ng.constant, val_type)
......@@ -59,7 +59,7 @@ def test_constant_from_bool(val_type, value):
(np.uint32, np.uint32(123456)),
(np.uint64, np.uint64(1234567)),
])
@pytest.config.gpu_skip(reason='under investigation, runtime error is: function failed to compile')
@pytest.mark.skip_on_gpu # under investigation, runtime error is: function failed to compile
def test_constant_from_scalar(val_type, value):
expected = np.array(value, dtype=val_type)
result = run_op_numeric_data(value, ng.constant, val_type)
......@@ -70,7 +70,7 @@ def test_constant_from_scalar(val_type, value):
np.float32,
np.float64,
])
@pytest.config.gpu_skip(reason='under investigation, runtime error is: function failed to compile')
@pytest.mark.skip_on_gpu # under investigation, runtime error is: function failed to compile
def test_constant_from_float_array(val_type):
np.random.seed(133391)
input_data = np.array(-1 + np.random.rand(2, 3, 4) * 2, dtype=val_type)
......@@ -88,7 +88,7 @@ def test_constant_from_float_array(val_type):
(np.uint32, 0, 1024),
(np.uint64, 0, 16383),
])
@pytest.config.gpu_skip(reason='under investigation, runtime error is: function failed to compile')
@pytest.mark.skip_on_gpu # under investigation, runtime error is: function failed to compile
def test_constant_from_integer_array(val_type, range_start, range_end):
np.random.seed(133391)
input_data = np.array(np.random.randint(range_start, range_end, size=(2, 2)), dtype=val_type)
......
......@@ -42,7 +42,7 @@ from test.ngraph.util import run_op_numeric_data, run_op_node
(ng.tan, np.tan, -1., 1.),
(ng.tanh, np.tanh, -100., 100.),
])
@pytest.config.gpu_skip(reason='under investigation, runtime error is: function failed to compile')
@pytest.mark.skip_on_gpu # under investigation, runtime error is: function failed to compile
def test_unary_op_array(ng_api_fn, numpy_fn, range_start, range_end):
np.random.seed(133391)
input_data = range_start + np.random.rand(2, 3, 4) * (range_end - range_start)
......@@ -77,7 +77,7 @@ def test_unary_op_array(ng_api_fn, numpy_fn, range_start, range_end):
(ng.tan, np.tan, np.float32(np.pi / 4.0)),
(ng.tanh, np.tanh, np.float32(0.1234)),
])
@pytest.config.gpu_skip(reason='under investigation, runtime error is: function failed to compile')
@pytest.mark.skip_on_gpu # under investigation, runtime error is: function failed to compile
def test_unary_op_scalar(ng_api_fn, numpy_fn, input_data):
expected = numpy_fn(input_data)
......
......@@ -25,7 +25,7 @@ def _ndarray_1x1x4x4():
return np.arange(11, 27, dtype=np.float32).reshape(1, 1, 4, 4)
@pytest.config.gpu_skip(reason='Not implemented')
@pytest.mark.skip_on_gpu
def test_avg_pool_2d(_ndarray_1x1x4x4):
runtime = get_runtime()
input_data = _ndarray_1x1x4x4
......@@ -74,7 +74,7 @@ def test_avg_pool_2d(_ndarray_1x1x4x4):
assert np.allclose(result, expected)
@pytest.config.gpu_skip(reason='Not implemented')
@pytest.mark.skip_on_gpu
def test_avg_pooling_3d(_ndarray_1x1x4x4):
rt = get_runtime()
data = _ndarray_1x1x4x4
......
......@@ -34,7 +34,7 @@ from test.ngraph.util import run_op_node, get_runtime
(ng.sum, np.sum, (0, 2)),
(ng.prod, np.prod, (0, 2)),
])
@pytest.config.gpu_skip(reason='Not implemented')
@pytest.mark.skip_on_gpu
def test_reduction_ops(ng_api_helper, numpy_function, reduction_axes):
shape = [2, 4, 3, 2]
np.random.seed(133391)
......@@ -45,7 +45,7 @@ def test_reduction_ops(ng_api_helper, numpy_function, reduction_axes):
assert np.allclose(result, expected)
@pytest.config.gpu_skip(reason='Not implemented')
@pytest.mark.skip_on_gpu
def test_argmax():
runtime = get_runtime()
input_x = ng.constant(np.array([[9, 2, 10],
......@@ -58,7 +58,7 @@ def test_argmax():
np.array([1, 3, 0], dtype=np.int32))
@pytest.config.gpu_skip(reason='Not implemented')
@pytest.mark.skip_on_gpu
def test_argmin():
runtime = get_runtime()
input_x = ng.constant(np.array([[12, 2, 10],
......@@ -71,7 +71,7 @@ def test_argmin():
np.array([3, 2, 1], dtype=np.int32))
@pytest.config.gpu_skip(reason='Not implemented')
@pytest.mark.skip_on_gpu
def test_topk():
runtime = get_runtime()
input_x = ng.constant(np.array([[9, 2, 10],
......
......@@ -15,11 +15,12 @@
# ******************************************************************************
import numpy as np
import pytest
import ngraph as ng
from string import ascii_uppercase
import test
def _get_numpy_dtype(scalar):
return np.array([scalar]).dtype
......@@ -27,8 +28,7 @@ def _get_numpy_dtype(scalar):
def get_runtime():
"""Return runtime object."""
backend_name = pytest.config.getoption('backend', default='CPU')
return ng.runtime(backend_name=backend_name)
return ng.runtime(backend_name=test.BACKEND_NAME)
def run_op_node(input_data, op_fun, *args):
......
......@@ -35,6 +35,7 @@ from ngraph.impl.op import Concat, Select
from ngraph.impl.op import Reverse, MaxPool, ReplaceSlice, Slice
from ngraph.impl.op import Convolution, ConvolutionBackpropData, ConvolutionBackpropFilters
import test
def binary_op(op_str, a, b):
......@@ -116,7 +117,7 @@ def binary_op_exec(op_str):
B = Parameter(element_type, shape)
parameter_list = [A, B]
function = Function(NodeVector([binary_op(op_str, A, B)]), parameter_list, 'test')
backend = Backend.create(pytest.config.getoption('backend'))
backend = Backend.create(test.BACKEND_NAME)
a = backend.create_tensor(element_type, shape)
b = backend.create_tensor(element_type, shape)
......@@ -146,7 +147,7 @@ def binary_op_comparison(op_str):
B = Parameter(element_type, shape)
parameter_list = [A, B]
function = Function(NodeVector([binary_op(op_str, A, B)]), parameter_list, 'test')
backend = Backend.create(pytest.config.getoption('backend'))
backend = Backend.create(test.BACKEND_NAME)
a = backend.create_tensor(element_type, shape)
b = backend.create_tensor(element_type, shape)
......@@ -245,7 +246,7 @@ def test_add_with_mul():
C = Parameter(element_type, shape)
parameter_list = [A, B, C]
function = Function(NodeVector([Multiply(Add(A, B), C)]), parameter_list, 'test')
backend = Backend.create(pytest.config.getoption('backend'))
backend = Backend.create(test.BACKEND_NAME)
a = backend.create_tensor(element_type, shape)
b = backend.create_tensor(element_type, shape)
......@@ -358,7 +359,7 @@ def unary_op_exec(op_str, input_list):
A = Parameter(element_type, shape)
parameter_list = [A]
function = Function(NodeVector([unary_op(op_str, A)]), parameter_list, 'test')
backend = Backend.create(pytest.config.getoption('backend'))
backend = Backend.create(test.BACKEND_NAME)
a = backend.create_tensor(element_type, shape)
result = backend.create_tensor(element_type, shape)
......@@ -479,7 +480,7 @@ def test_tanh():
unary_op_exec(op_str, input_list)
@pytest.config.gpu_skip(reason="Not implemented")
@pytest.mark.skip_on_gpu
def test_reverse():
input_list = [[-1, 0], [0.5, 1]]
op_str = 'Reverse'
......@@ -492,7 +493,7 @@ def test_not():
A = Parameter(element_type, shape)
parameter_list = [A]
function = Function(NodeVector([Not(A)]), parameter_list, 'test')
backend = Backend.create(pytest.config.getoption('backend'))
backend = Backend.create(test.BACKEND_NAME)
a = backend.create_tensor(element_type, shape)
result = backend.create_tensor(Type.boolean, shape)
......@@ -518,7 +519,7 @@ def test_sum():
A = Parameter(element_type, shape)
parameter_list = [A]
function = Function(NodeVector([Sum(A, AxisSet({1}))]), parameter_list, 'test')
backend = Backend.create(pytest.config.getoption('backend'))
backend = Backend.create(test.BACKEND_NAME)
a = backend.create_tensor(element_type, shape)
result = backend.create_tensor(element_type, Shape([1]))
......@@ -545,7 +546,7 @@ def test_reshape():
A = Parameter(element_type, shape)
parameter_list = [A]
function = Function(NodeVector([Reshape(A, AxisVector([0, 1]), Shape([3, 2]))]), parameter_list, 'test')
backend = Backend.create(pytest.config.getoption('backend'))
backend = Backend.create(test.BACKEND_NAME)
a = backend.create_tensor(element_type, shape)
result = backend.create_tensor(element_type, Shape([3, 2]))
......@@ -572,7 +573,7 @@ def test_convert():
parameter_list = [A]
# f32 to boolean
function = Function(NodeVector([Convert(A, Type.boolean)]), parameter_list, 'test')
backend = Backend.create(pytest.config.getoption('backend'))
backend = Backend.create(test.BACKEND_NAME)
a = backend.create_tensor(element_type, shape)
result = backend.create_tensor(Type.boolean, shape)
......@@ -591,7 +592,7 @@ def test_convert():
# f32 to i32
function = Function(NodeVector([Convert(A, Type.i32)]), parameter_list, 'test')
backend = Backend.create(pytest.config.getoption('backend'))
backend = Backend.create(test.BACKEND_NAME)
result = backend.create_tensor(Type.i32, shape)
......@@ -615,7 +616,7 @@ def test_broadcast():
A = Parameter(element_type, Shape([3]))
parameter_list = [A]
function = Function(NodeVector([Broadcast(A, Shape([3, 3]), AxisSet({0}))]), parameter_list, 'test')
backend = Backend.create(pytest.config.getoption('backend'))
backend = Backend.create(test.BACKEND_NAME)
a = backend.create_tensor(element_type, Shape([3]))
result = backend.create_tensor(element_type, Shape([3, 3]))
......@@ -641,7 +642,7 @@ def test_constant():
parameter_list = []
function = Function(NodeVector([Constant(element_type, Shape([3, 3]), list(range(9)))]),
parameter_list, 'test')
backend = Backend.create(pytest.config.getoption('backend'))
backend = Backend.create(test.BACKEND_NAME)
result = backend.create_tensor(element_type, Shape([3, 3]))
......@@ -662,7 +663,7 @@ def test_onehot():
A = Parameter(element_type, Shape([3]))
parameter_list = [A]
function = Function(NodeVector([OneHot(A, Shape([3, 3]), 0)]), parameter_list, 'test')
backend = Backend.create(pytest.config.getoption('backend'))
backend = Backend.create(test.BACKEND_NAME)
a = backend.create_tensor(element_type, Shape([3]))
result = backend.create_tensor(element_type, Shape([3, 3]))
......@@ -681,7 +682,7 @@ def test_onehot():
assert np.allclose(result_arr, result_arr_ref)
@pytest.config.gpu_skip(reason="Not implemented")
@pytest.mark.skip_on_gpu
def test_concat():
element_type = Type.f32
......@@ -691,7 +692,7 @@ def test_concat():
parameter_list = [A, B, C]
axis = 0
function = Function(NodeVector([Concat(NodeVector([A, B, C]), axis)]), parameter_list, 'test')
backend = Backend.create(pytest.config.getoption('backend'))
backend = Backend.create(test.BACKEND_NAME)
a = backend.create_tensor(element_type, Shape([1, 2]))
b = backend.create_tensor(element_type, Shape([1, 2]))
......@@ -716,7 +717,7 @@ def test_concat():
assert np.allclose(result_arr, result_arr_ref)
@pytest.config.gpu_skip(reason="Not implemented")
@pytest.mark.skip_on_gpu
def test_axisset():
set_axisset = AxisSet({1, 2, 3})
......@@ -733,7 +734,7 @@ def test_axisset():
assert set(tuple_axisset) == set(set_axisset)
@pytest.config.gpu_skip(reason="Not implemented")
@pytest.mark.skip_on_gpu
def test_select():
element_type = Type.f32
......@@ -743,7 +744,7 @@ def test_select():
parameter_list = [A, B, C]
function = Function(NodeVector([Select(A, B, C)]), parameter_list, 'test')
backend = Backend.create(pytest.config.getoption('backend'))
backend = Backend.create(test.BACKEND_NAME)
a = backend.create_tensor(Type.boolean, Shape([1, 2]))
b = backend.create_tensor(element_type, Shape([1, 2]))
......@@ -765,7 +766,7 @@ def test_select():
assert np.allclose(result_arr, result_arr_ref)
@pytest.config.gpu_skip(reason="Not implemented")
@pytest.mark.skip_on_gpu
def test_slice():
element_type = Type.f32
......@@ -779,7 +780,7 @@ def test_slice():
function = Function(NodeVector([Slice(A, Coordinate(lower_bounds),
Coordinate(upper_bounds))]), parameter_list, 'test')
backend = Backend.create(pytest.config.getoption('backend'))
backend = Backend.create(test.BACKEND_NAME)
a = backend.create_tensor(element_type, shape)
result = backend.create_tensor(element_type, Shape([4, 4]))
......@@ -802,7 +803,7 @@ def test_slice():
function = Function(NodeVector([Slice(A, Coordinate(lower_bounds), Coordinate(upper_bounds),
Strides(strides))]), parameter_list, 'test')
backend = Backend.create(pytest.config.getoption('backend'))
backend = Backend.create(test.BACKEND_NAME)
result = backend.create_tensor(element_type, Shape([4, 2]))
result_arr = np.zeros(8, dtype=np.float32).reshape(4, 2)
......@@ -817,7 +818,7 @@ def test_slice():
assert np.allclose(result_arr, result_arr_ref)
@pytest.config.gpu_skip(reason="Not implemented")
@pytest.mark.skip_on_gpu
def test_replace_slice():
element_type = Type.f32
......@@ -832,7 +833,7 @@ def test_replace_slice():
function = Function(NodeVector([ReplaceSlice(A, B, Coordinate(lower_bounds),
Coordinate(upper_bounds))]), parameter_list, 'test')
backend = Backend.create(pytest.config.getoption('backend'))
backend = Backend.create(test.BACKEND_NAME)
a = backend.create_tensor(element_type, Shape([6, 4]))
b = backend.create_tensor(element_type, Shape([3, 2]))
......@@ -860,7 +861,7 @@ def test_replace_slice():
function = Function(NodeVector([ReplaceSlice(A, B, Coordinate(lower_bounds),
Coordinate(upper_bounds), Strides(strides))]),
parameter_list, 'test')
backend = Backend.create(pytest.config.getoption('backend'))
backend = Backend.create(test.BACKEND_NAME)
handle = backend.compile(function)
handle.call([result], [a, b])
......@@ -872,7 +873,7 @@ def test_replace_slice():
assert np.allclose(result_arr, result_arr_ref)
@pytest.config.gpu_skip(reason="Not implemented")
@pytest.mark.skip_on_gpu
def test_max_pool():
#test 1d
......@@ -885,7 +886,7 @@ def test_max_pool():
window_shape = [3]
function = Function(NodeVector([MaxPool(A, Shape(window_shape))]), parameter_list, 'test')
backend = Backend.create(pytest.config.getoption('backend'))
backend = Backend.create(test.BACKEND_NAME)
a = backend.create_tensor(element_type, shape)
result = backend.create_tensor(element_type, Shape([1, 1, 8]))
......@@ -905,7 +906,7 @@ def test_max_pool():
strides = [2]
function = Function(NodeVector([MaxPool(A, Shape(window_shape), Strides(strides))]), parameter_list, 'test')
backend = Backend.create(pytest.config.getoption('backend'))
backend = Backend.create(test.BACKEND_NAME)
size = 4
result = backend.create_tensor(element_type, Shape([1, 1, size]))
......@@ -929,7 +930,7 @@ def test_max_pool():
window_shape = [3, 3]
function = Function(NodeVector([MaxPool(A, Shape(window_shape))]), parameter_list, 'test')
backend = Backend.create(pytest.config.getoption('backend'))
backend = Backend.create(test.BACKEND_NAME)
a = backend.create_tensor(element_type, shape)
result = backend.create_tensor(element_type, Shape([1, 1, 8, 8]))
......@@ -949,7 +950,7 @@ def test_max_pool():
strides = [2, 2]
function = Function(NodeVector([MaxPool(A, Shape(window_shape), Strides(strides))]), parameter_list, 'test')
backend = Backend.create(pytest.config.getoption('backend'))
backend = Backend.create(test.BACKEND_NAME)
size = 4
result = backend.create_tensor(element_type, Shape([1, 1, size, size]))
......@@ -964,7 +965,7 @@ def test_max_pool():
assert np.allclose(result_arr, result_arr_ref)
@pytest.config.gpu_skip(reason="Not implemented")
@pytest.mark.skip_on_gpu
def convolution2d(image, filterit, strides=(1, 1), dilation=(1, 1), padding_below=(0, 0),
padding_above=(0, 0), data_dilation=(1, 1)):
......@@ -1007,7 +1008,7 @@ def convolution2d(image, filterit, strides=(1, 1), dilation=(1, 1), padding_belo
return result
@pytest.config.gpu_skip(reason="Not implemented")
@pytest.mark.skip_on_gpu
def test_convolution():
element_type = Type.f32
......@@ -1027,7 +1028,7 @@ def test_convolution():
result_arr = np.zeros(196, dtype=np.float32).reshape(1, 1, 14, 14)
function = Function(NodeVector([Convolution(A, B)]), parameter_list, 'test')
backend = Backend.create(pytest.config.getoption('backend'))
backend = Backend.create(test.BACKEND_NAME)
a = backend.create_tensor(element_type, image_shape)
b = backend.create_tensor(element_type, filter_shape)
......@@ -1045,7 +1046,7 @@ def test_convolution():
assert np.allclose(result_arr, result_arr_ref)
@pytest.config.gpu_skip(reason="Not implemented")
@pytest.mark.skip_on_gpu
def test_convolution_with_strides():
element_type = Type.f32
......@@ -1061,7 +1062,7 @@ def test_convolution_with_strides():
strides = [2, 2]
function = Function(NodeVector([Convolution(A, B, Strides(strides))]), parameter_list, 'test')
backend = Backend.create(pytest.config.getoption('backend'))
backend = Backend.create(test.BACKEND_NAME)
a = backend.create_tensor(element_type, image_shape)
b = backend.create_tensor(element_type, filter_shape)
......@@ -1080,7 +1081,7 @@ def test_convolution_with_strides():
assert np.allclose(result_arr, result_arr_ref)
@pytest.config.gpu_skip(reason="Not implemented")
@pytest.mark.skip_on_gpu
def test_convolution_with_filter_dilation():
element_type = Type.f32
......@@ -1096,7 +1097,7 @@ def test_convolution_with_filter_dilation():
dilation = [2, 2]
function = Function(NodeVector([Convolution(A, B, Strides(strides), Strides(dilation))]), parameter_list, 'test')
backend = Backend.create(pytest.config.getoption('backend'))
backend = Backend.create(test.BACKEND_NAME)
a = backend.create_tensor(element_type, image_shape)
b = backend.create_tensor(element_type, filter_shape)
......@@ -1116,7 +1117,7 @@ def test_convolution_with_filter_dilation():
assert np.allclose(result_arr, result_arr_ref)
@pytest.config.gpu_skip(reason="Not implemented")
@pytest.mark.skip_on_gpu
def test_convolution_with_padding():
element_type = Type.f32
......@@ -1137,7 +1138,7 @@ def test_convolution_with_padding():
function = Function(NodeVector([Convolution(A, B, Strides(strides), Strides(dilation),
CoordinateDiff(padding_below), CoordinateDiff(padding_above))]),
parameter_list, 'test')
backend = Backend.create(pytest.config.getoption('backend'))
backend = Backend.create(test.BACKEND_NAME)
a = backend.create_tensor(element_type, image_shape)
b = backend.create_tensor(element_type, filter_shape)
......@@ -1176,7 +1177,7 @@ def test_convolution_with_padding():
function = Function(NodeVector([Convolution(A, B, Strides(strides), Strides(dilation),
CoordinateDiff(padding_below), CoordinateDiff(padding_above))]),
parameter_list, 'test')
backend = Backend.create(pytest.config.getoption('backend'))
backend = Backend.create(test.BACKEND_NAME)
a = backend.create_tensor(element_type, image_shape)
b = backend.create_tensor(element_type, filter_shape)
......@@ -1197,7 +1198,7 @@ def test_convolution_with_padding():
assert np.allclose(result_arr, result_arr_ref)
@pytest.config.gpu_skip(reason="Not implemented")
@pytest.mark.skip_on_gpu
def test_convolution_with_data_dilation():
element_type = Type.f32
......@@ -1218,7 +1219,7 @@ def test_convolution_with_data_dilation():
function = Function(NodeVector([Convolution(A, B, Strides(strides), Strides(dilation),
CoordinateDiff(padding_below), CoordinateDiff(padding_above),
Strides(data_dilation))]), parameter_list, 'test')
backend = Backend.create(pytest.config.getoption('backend'))
backend = Backend.create(test.BACKEND_NAME)
a = backend.create_tensor(element_type, image_shape)
b = backend.create_tensor(element_type, filter_shape)
......@@ -1239,7 +1240,7 @@ def test_convolution_with_data_dilation():
assert np.allclose(result_arr, result_arr_ref)
@pytest.config.gpu_skip(reason="Not implemented")
@pytest.mark.skip_on_gpu
def test_convolutionBackpropData():
element_type = Type.f32
......@@ -1266,7 +1267,7 @@ def test_convolutionBackpropData():
function = Function(NodeVector([ConvolutionBackpropData(image_shape, A, B, Strides(window_strides), Strides(window_dilation),
CoordinateDiff(padding_below), CoordinateDiff(padding_above),
Strides(data_dilation))]), parameter_list, 'test')
backend = Backend.create(pytest.config.getoption('backend'))
backend = Backend.create(test.BACKEND_NAME)
a = backend.create_tensor(element_type, filter_shape)
b = backend.create_tensor(element_type, output_shape)
......@@ -1295,7 +1296,7 @@ def test_convolutionBackpropData():
assert np.allclose(result_arr, result_arr_ref)
@pytest.config.gpu_skip(reason="Not implemented")
@pytest.mark.skip_on_gpu
def test_convolutionBackpropFilters():
element_type = Type.f32
......@@ -1322,7 +1323,7 @@ def test_convolutionBackpropFilters():
function = Function(NodeVector([ConvolutionBackpropFilters(A, filter_shape, B, Strides(window_strides), Strides(window_dilation),
CoordinateDiff(padding_below),CoordinateDiff(padding_above),
Strides(data_dilation))]), parameter_list, 'test')
backend = Backend.create(pytest.config.getoption('backend'))
backend = Backend.create(test.BACKEND_NAME)
a = backend.create_tensor(element_type, image_shape)
b = backend.create_tensor(element_type, output_shape)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment