Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in / Register
Toggle navigation
N
ngraph
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
submodule
ngraph
Commits
23f29e88
Unverified
Commit
23f29e88
authored
6 years ago
by
Michał Karzyński
Committed by
GitHub
6 years ago
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
[Py] Update to new pytest configuration method (#2520)
parent
544f13ad
Hide whitespace changes
Inline
Side-by-side
Showing
12 changed files
with
101 additions
and
77 deletions
+101
-77
__init__.py
python/test/__init__.py
+5
-0
conftest.py
python/test/conftest.py
+24
-8
test_basic.py
python/test/ngraph/test_basic.py
+5
-3
test_convolution.py
python/test/ngraph/test_convolution.py
+2
-2
test_normalization.py
python/test/ngraph/test_normalization.py
+1
-1
test_ops_matmul.py
python/test/ngraph/test_ops_matmul.py
+2
-2
test_ops_reshape.py
python/test/ngraph/test_ops_reshape.py
+5
-5
test_ops_unary.py
python/test/ngraph/test_ops_unary.py
+2
-2
test_pooling.py
python/test/ngraph/test_pooling.py
+2
-2
test_reduction.py
python/test/ngraph/test_reduction.py
+4
-4
util.py
python/test/ngraph/util.py
+3
-3
test_ops.py
python/test/test_ops.py
+46
-45
No files found.
python/test/__init__.py
View file @
23f29e88
...
@@ -13,3 +13,8 @@
...
@@ -13,3 +13,8 @@
# See the License for the specific language governing permissions and
# See the License for the specific language governing permissions and
# limitations under the License.
# limitations under the License.
# ******************************************************************************
# ******************************************************************************
# test.BACKEND_NAME is a configuration variable determining which
# nGraph backend tests will use. It's set during pytest configuration time.
# See `pytest_configure` hook in `conftest.py` for more details.
BACKEND_NAME
=
None
This diff is collapsed.
Click to expand it.
python/test/conftest.py
View file @
23f29e88
...
@@ -14,6 +14,7 @@
...
@@ -14,6 +14,7 @@
# limitations under the License.
# limitations under the License.
# ******************************************************************************
# ******************************************************************************
import
pytest
import
pytest
import
test
def
pytest_addoption
(
parser
):
def
pytest_addoption
(
parser
):
...
@@ -22,13 +23,28 @@ def pytest_addoption(parser):
...
@@ -22,13 +23,28 @@ def pytest_addoption(parser):
help
=
'Select from available backends'
)
help
=
'Select from available backends'
)
def
pass_method
(
*
args
,
**
kwargs
):
def
pytest_configure
(
config
):
pass
backend_name
=
config
.
getvalue
(
'backend'
)
test
.
BACKEND_NAME
=
backend_name
def
pytest_configure
(
config
):
def
pytest_collection_modifyitems
(
config
,
items
):
config
.
gpu_skip
=
pytest
.
mark
.
skipif
(
config
.
getvalue
(
'backend'
)
==
'GPU'
)
backend_name
=
config
.
getvalue
(
'backend'
)
config
.
cpu_skip
=
pytest
.
mark
.
skipif
(
config
.
getvalue
(
'backend'
)
==
'CPU'
)
config
.
nnp_skip
=
pytest
.
mark
.
skipif
(
config
.
getvalue
(
'backend'
)
==
'NNP'
)
gpu_skip
=
pytest
.
mark
.
skip
(
reason
=
'Skipping test on the GPU backend.'
)
config
.
interpreter_skip
=
pytest
.
mark
.
skipif
(
config
.
getvalue
(
'backend'
)
==
'INTERPRETER'
)
cpu_skip
=
pytest
.
mark
.
skip
(
reason
=
'Skipping test on the CPU backend.'
)
config
.
plaidml_skip
=
pytest
.
mark
.
skipif
(
config
.
getvalue
(
'backend'
)
==
'PlaidML'
)
nnp_skip
=
pytest
.
mark
.
skip
(
reason
=
'Skipping test on the NNP backend.'
)
interpreter_skip
=
pytest
.
mark
.
skip
(
reason
=
'Skipping test on the INTERPRETER backend.'
)
plaidml_skip
=
pytest
.
mark
.
skip
(
reason
=
'Skipping test on the PlaidML backend.'
)
for
item
in
items
:
if
backend_name
==
'GPU'
and
'skip_on_gpu'
in
item
.
keywords
:
item
.
add_marker
(
gpu_skip
)
if
backend_name
==
'CPU'
and
'skip_on_cpu'
in
item
.
keywords
:
item
.
add_marker
(
cpu_skip
)
if
backend_name
==
'NNP'
and
'skip_on_nnp'
in
item
.
keywords
:
item
.
add_marker
(
nnp_skip
)
if
backend_name
==
'INTERPRETER'
and
'skip_on_interpreter'
in
item
.
keywords
:
item
.
add_marker
(
interpreter_skip
)
if
backend_name
==
'PlaidML'
and
'skip_on_plaidml'
in
item
.
keywords
:
item
.
add_marker
(
plaidml_skip
)
This diff is collapsed.
Click to expand it.
python/test/ngraph/test_basic.py
View file @
23f29e88
...
@@ -18,14 +18,16 @@ import pytest
...
@@ -18,14 +18,16 @@ import pytest
import
json
import
json
import
ngraph
as
ng
import
ngraph
as
ng
from
test.ngraph.util
import
get_runtime
,
run_op_node
from
ngraph.exceptions
import
UserInputError
from
ngraph.exceptions
import
UserInputError
import
test
from
test.ngraph.util
import
get_runtime
,
run_op_node
@pytest.mark.parametrize
(
'dtype'
,
[
np
.
float32
,
np
.
float64
,
@pytest.mark.parametrize
(
'dtype'
,
[
np
.
float32
,
np
.
float64
,
np
.
int8
,
np
.
int16
,
np
.
int32
,
np
.
int64
,
np
.
int8
,
np
.
int16
,
np
.
int32
,
np
.
int64
,
np
.
uint8
,
np
.
uint16
,
np
.
uint32
,
np
.
uint64
])
np
.
uint8
,
np
.
uint16
,
np
.
uint32
,
np
.
uint64
])
@pytest.
config.gpu_skip
(
reason
=
'Not implemented'
)
@pytest.
mark.skip_on_gpu
def
test_simple_computation_on_ndarrays
(
dtype
):
def
test_simple_computation_on_ndarrays
(
dtype
):
runtime
=
get_runtime
()
runtime
=
get_runtime
()
...
@@ -51,7 +53,7 @@ def test_simple_computation_on_ndarrays(dtype):
...
@@ -51,7 +53,7 @@ def test_simple_computation_on_ndarrays(dtype):
def
test_serialization
():
def
test_serialization
():
dtype
=
np
.
float32
dtype
=
np
.
float32
backend_name
=
pytest
.
config
.
getoption
(
'backend'
,
default
=
'CPU'
)
backend_name
=
test
.
BACKEND_NAME
shape
=
[
2
,
2
]
shape
=
[
2
,
2
]
parameter_a
=
ng
.
parameter
(
shape
,
dtype
=
dtype
,
name
=
'A'
)
parameter_a
=
ng
.
parameter
(
shape
,
dtype
=
dtype
,
name
=
'A'
)
...
...
This diff is collapsed.
Click to expand it.
python/test/ngraph/test_convolution.py
View file @
23f29e88
...
@@ -21,7 +21,7 @@ import ngraph as ng
...
@@ -21,7 +21,7 @@ import ngraph as ng
from
test.ngraph.util
import
get_runtime
from
test.ngraph.util
import
get_runtime
@pytest.
config.gpu_skip
(
reason
=
'Not implemented'
)
@pytest.
mark.skip_on_gpu
def
test_convolution_2d
():
def
test_convolution_2d
():
runtime
=
get_runtime
()
runtime
=
get_runtime
()
# input_x should have shape N(batch) x C x H x W
# input_x should have shape N(batch) x C x H x W
...
@@ -95,7 +95,7 @@ def test_convolution_2d():
...
@@ -95,7 +95,7 @@ def test_convolution_2d():
dtype
=
np
.
float32
))
dtype
=
np
.
float32
))
@pytest.
config.gpu_skip
(
reason
=
'Not implemented'
)
@pytest.
mark.skip_on_gpu
def
test_convolution_backprop_data
():
def
test_convolution_backprop_data
():
runtime
=
get_runtime
()
runtime
=
get_runtime
()
...
...
This diff is collapsed.
Click to expand it.
python/test/ngraph/test_normalization.py
View file @
23f29e88
...
@@ -21,7 +21,7 @@ import ngraph as ng
...
@@ -21,7 +21,7 @@ import ngraph as ng
from
test.ngraph.util
import
get_runtime
from
test.ngraph.util
import
get_runtime
@pytest.
config.gpu_skip
(
reason
=
'Not implemented'
)
@pytest.
mark.skip_on_gpu
def
test_lrn
():
def
test_lrn
():
input_image_shape
=
(
2
,
3
,
2
,
1
)
input_image_shape
=
(
2
,
3
,
2
,
1
)
input_image
=
np
.
arange
(
int
(
np
.
prod
(
input_image_shape
)))
.
reshape
(
input_image_shape
)
.
astype
(
'f'
)
input_image
=
np
.
arange
(
int
(
np
.
prod
(
input_image_shape
)))
.
reshape
(
input_image_shape
)
.
astype
(
'f'
)
...
...
This diff is collapsed.
Click to expand it.
python/test/ngraph/test_ops_matmul.py
View file @
23f29e88
...
@@ -38,7 +38,7 @@ from test.ngraph.util import run_op_node
...
@@ -38,7 +38,7 @@ from test.ngraph.util import run_op_node
([
2
,
3
,
4
,
5
],
[
5
,
2
,
3
],
None
,
1
),
([
2
,
3
,
4
,
5
],
[
5
,
2
,
3
],
None
,
1
),
([
2
,
3
,
4
,
5
],
[
4
,
5
,
2
,
4
],
2
,
2
),
([
2
,
3
,
4
,
5
],
[
4
,
5
,
2
,
4
],
2
,
2
),
])
])
@pytest.
config.gpu_skip
(
reason
=
'under investigation, runtime error is: function failed to compile'
)
@pytest.
mark.skip_on_gpu
# under investigation, runtime error is: function failed to compile
def
test_dot
(
left_shape
,
right_shape
,
reduction_axes_count
,
numpy_axes
):
def
test_dot
(
left_shape
,
right_shape
,
reduction_axes_count
,
numpy_axes
):
np
.
random
.
seed
(
133391
)
np
.
random
.
seed
(
133391
)
left_input
=
-
100.0
+
np
.
random
.
rand
(
*
left_shape
)
*
200.0
left_input
=
-
100.0
+
np
.
random
.
rand
(
*
left_shape
)
*
200.0
...
@@ -49,7 +49,7 @@ def test_dot(left_shape, right_shape, reduction_axes_count, numpy_axes):
...
@@ -49,7 +49,7 @@ def test_dot(left_shape, right_shape, reduction_axes_count, numpy_axes):
assert
np
.
allclose
(
result
,
expected
)
assert
np
.
allclose
(
result
,
expected
)
@pytest.
config.gpu_skip
(
reason
=
'under investigation, runtime error is: function failed to compile'
)
@pytest.
mark.skip_on_gpu
# under investigation, runtime error is: function failed to compile
def
test_dot_tensor_scalar
():
def
test_dot_tensor_scalar
():
np
.
random
.
seed
(
133391
)
np
.
random
.
seed
(
133391
)
left_input
=
10.0
left_input
=
10.0
...
...
This diff is collapsed.
Click to expand it.
python/test/ngraph/test_ops_reshape.py
View file @
23f29e88
...
@@ -20,7 +20,7 @@ import ngraph as ng
...
@@ -20,7 +20,7 @@ import ngraph as ng
from
test.ngraph.util
import
get_runtime
,
run_op_numeric_data
from
test.ngraph.util
import
get_runtime
,
run_op_numeric_data
@pytest.
config.gpu_skip
(
reason
=
'Not implemented'
)
@pytest.
mark.skip_on_gpu
def
test_concat
():
def
test_concat
():
a
=
np
.
array
([[
1
,
2
],
[
3
,
4
]])
a
=
np
.
array
([[
1
,
2
],
[
3
,
4
]])
b
=
np
.
array
([[
5
,
6
]])
b
=
np
.
array
([[
5
,
6
]])
...
@@ -40,7 +40,7 @@ def test_concat():
...
@@ -40,7 +40,7 @@ def test_concat():
(
bool
,
False
),
(
bool
,
False
),
(
bool
,
np
.
empty
((
2
,
2
),
dtype
=
bool
)),
(
bool
,
np
.
empty
((
2
,
2
),
dtype
=
bool
)),
])
])
@pytest.
config.gpu_skip
(
reason
=
'under investigation, runtime error is: function failed to compile'
)
@pytest.
mark.skip_on_gpu
# under investigation, runtime error is: function failed to compile
def
test_constant_from_bool
(
val_type
,
value
):
def
test_constant_from_bool
(
val_type
,
value
):
expected
=
np
.
array
(
value
,
dtype
=
val_type
)
expected
=
np
.
array
(
value
,
dtype
=
val_type
)
result
=
run_op_numeric_data
(
value
,
ng
.
constant
,
val_type
)
result
=
run_op_numeric_data
(
value
,
ng
.
constant
,
val_type
)
...
@@ -59,7 +59,7 @@ def test_constant_from_bool(val_type, value):
...
@@ -59,7 +59,7 @@ def test_constant_from_bool(val_type, value):
(
np
.
uint32
,
np
.
uint32
(
123456
)),
(
np
.
uint32
,
np
.
uint32
(
123456
)),
(
np
.
uint64
,
np
.
uint64
(
1234567
)),
(
np
.
uint64
,
np
.
uint64
(
1234567
)),
])
])
@pytest.
config.gpu_skip
(
reason
=
'under investigation, runtime error is: function failed to compile'
)
@pytest.
mark.skip_on_gpu
# under investigation, runtime error is: function failed to compile
def
test_constant_from_scalar
(
val_type
,
value
):
def
test_constant_from_scalar
(
val_type
,
value
):
expected
=
np
.
array
(
value
,
dtype
=
val_type
)
expected
=
np
.
array
(
value
,
dtype
=
val_type
)
result
=
run_op_numeric_data
(
value
,
ng
.
constant
,
val_type
)
result
=
run_op_numeric_data
(
value
,
ng
.
constant
,
val_type
)
...
@@ -70,7 +70,7 @@ def test_constant_from_scalar(val_type, value):
...
@@ -70,7 +70,7 @@ def test_constant_from_scalar(val_type, value):
np
.
float32
,
np
.
float32
,
np
.
float64
,
np
.
float64
,
])
])
@pytest.
config.gpu_skip
(
reason
=
'under investigation, runtime error is: function failed to compile'
)
@pytest.
mark.skip_on_gpu
# under investigation, runtime error is: function failed to compile
def
test_constant_from_float_array
(
val_type
):
def
test_constant_from_float_array
(
val_type
):
np
.
random
.
seed
(
133391
)
np
.
random
.
seed
(
133391
)
input_data
=
np
.
array
(
-
1
+
np
.
random
.
rand
(
2
,
3
,
4
)
*
2
,
dtype
=
val_type
)
input_data
=
np
.
array
(
-
1
+
np
.
random
.
rand
(
2
,
3
,
4
)
*
2
,
dtype
=
val_type
)
...
@@ -88,7 +88,7 @@ def test_constant_from_float_array(val_type):
...
@@ -88,7 +88,7 @@ def test_constant_from_float_array(val_type):
(
np
.
uint32
,
0
,
1024
),
(
np
.
uint32
,
0
,
1024
),
(
np
.
uint64
,
0
,
16383
),
(
np
.
uint64
,
0
,
16383
),
])
])
@pytest.
config.gpu_skip
(
reason
=
'under investigation, runtime error is: function failed to compile'
)
@pytest.
mark.skip_on_gpu
# under investigation, runtime error is: function failed to compile
def
test_constant_from_integer_array
(
val_type
,
range_start
,
range_end
):
def
test_constant_from_integer_array
(
val_type
,
range_start
,
range_end
):
np
.
random
.
seed
(
133391
)
np
.
random
.
seed
(
133391
)
input_data
=
np
.
array
(
np
.
random
.
randint
(
range_start
,
range_end
,
size
=
(
2
,
2
)),
dtype
=
val_type
)
input_data
=
np
.
array
(
np
.
random
.
randint
(
range_start
,
range_end
,
size
=
(
2
,
2
)),
dtype
=
val_type
)
...
...
This diff is collapsed.
Click to expand it.
python/test/ngraph/test_ops_unary.py
View file @
23f29e88
...
@@ -42,7 +42,7 @@ from test.ngraph.util import run_op_numeric_data, run_op_node
...
@@ -42,7 +42,7 @@ from test.ngraph.util import run_op_numeric_data, run_op_node
(
ng
.
tan
,
np
.
tan
,
-
1.
,
1.
),
(
ng
.
tan
,
np
.
tan
,
-
1.
,
1.
),
(
ng
.
tanh
,
np
.
tanh
,
-
100.
,
100.
),
(
ng
.
tanh
,
np
.
tanh
,
-
100.
,
100.
),
])
])
@pytest.
config.gpu_skip
(
reason
=
'under investigation, runtime error is: function failed to compile'
)
@pytest.
mark.skip_on_gpu
# under investigation, runtime error is: function failed to compile
def
test_unary_op_array
(
ng_api_fn
,
numpy_fn
,
range_start
,
range_end
):
def
test_unary_op_array
(
ng_api_fn
,
numpy_fn
,
range_start
,
range_end
):
np
.
random
.
seed
(
133391
)
np
.
random
.
seed
(
133391
)
input_data
=
range_start
+
np
.
random
.
rand
(
2
,
3
,
4
)
*
(
range_end
-
range_start
)
input_data
=
range_start
+
np
.
random
.
rand
(
2
,
3
,
4
)
*
(
range_end
-
range_start
)
...
@@ -77,7 +77,7 @@ def test_unary_op_array(ng_api_fn, numpy_fn, range_start, range_end):
...
@@ -77,7 +77,7 @@ def test_unary_op_array(ng_api_fn, numpy_fn, range_start, range_end):
(
ng
.
tan
,
np
.
tan
,
np
.
float32
(
np
.
pi
/
4.0
)),
(
ng
.
tan
,
np
.
tan
,
np
.
float32
(
np
.
pi
/
4.0
)),
(
ng
.
tanh
,
np
.
tanh
,
np
.
float32
(
0.1234
)),
(
ng
.
tanh
,
np
.
tanh
,
np
.
float32
(
0.1234
)),
])
])
@pytest.
config.gpu_skip
(
reason
=
'under investigation, runtime error is: function failed to compile'
)
@pytest.
mark.skip_on_gpu
# under investigation, runtime error is: function failed to compile
def
test_unary_op_scalar
(
ng_api_fn
,
numpy_fn
,
input_data
):
def
test_unary_op_scalar
(
ng_api_fn
,
numpy_fn
,
input_data
):
expected
=
numpy_fn
(
input_data
)
expected
=
numpy_fn
(
input_data
)
...
...
This diff is collapsed.
Click to expand it.
python/test/ngraph/test_pooling.py
View file @
23f29e88
...
@@ -25,7 +25,7 @@ def _ndarray_1x1x4x4():
...
@@ -25,7 +25,7 @@ def _ndarray_1x1x4x4():
return
np
.
arange
(
11
,
27
,
dtype
=
np
.
float32
)
.
reshape
(
1
,
1
,
4
,
4
)
return
np
.
arange
(
11
,
27
,
dtype
=
np
.
float32
)
.
reshape
(
1
,
1
,
4
,
4
)
@pytest.
config.gpu_skip
(
reason
=
'Not implemented'
)
@pytest.
mark.skip_on_gpu
def
test_avg_pool_2d
(
_ndarray_1x1x4x4
):
def
test_avg_pool_2d
(
_ndarray_1x1x4x4
):
runtime
=
get_runtime
()
runtime
=
get_runtime
()
input_data
=
_ndarray_1x1x4x4
input_data
=
_ndarray_1x1x4x4
...
@@ -74,7 +74,7 @@ def test_avg_pool_2d(_ndarray_1x1x4x4):
...
@@ -74,7 +74,7 @@ def test_avg_pool_2d(_ndarray_1x1x4x4):
assert
np
.
allclose
(
result
,
expected
)
assert
np
.
allclose
(
result
,
expected
)
@pytest.
config.gpu_skip
(
reason
=
'Not implemented'
)
@pytest.
mark.skip_on_gpu
def
test_avg_pooling_3d
(
_ndarray_1x1x4x4
):
def
test_avg_pooling_3d
(
_ndarray_1x1x4x4
):
rt
=
get_runtime
()
rt
=
get_runtime
()
data
=
_ndarray_1x1x4x4
data
=
_ndarray_1x1x4x4
...
...
This diff is collapsed.
Click to expand it.
python/test/ngraph/test_reduction.py
View file @
23f29e88
...
@@ -34,7 +34,7 @@ from test.ngraph.util import run_op_node, get_runtime
...
@@ -34,7 +34,7 @@ from test.ngraph.util import run_op_node, get_runtime
(
ng
.
sum
,
np
.
sum
,
(
0
,
2
)),
(
ng
.
sum
,
np
.
sum
,
(
0
,
2
)),
(
ng
.
prod
,
np
.
prod
,
(
0
,
2
)),
(
ng
.
prod
,
np
.
prod
,
(
0
,
2
)),
])
])
@pytest.
config.gpu_skip
(
reason
=
'Not implemented'
)
@pytest.
mark.skip_on_gpu
def
test_reduction_ops
(
ng_api_helper
,
numpy_function
,
reduction_axes
):
def
test_reduction_ops
(
ng_api_helper
,
numpy_function
,
reduction_axes
):
shape
=
[
2
,
4
,
3
,
2
]
shape
=
[
2
,
4
,
3
,
2
]
np
.
random
.
seed
(
133391
)
np
.
random
.
seed
(
133391
)
...
@@ -45,7 +45,7 @@ def test_reduction_ops(ng_api_helper, numpy_function, reduction_axes):
...
@@ -45,7 +45,7 @@ def test_reduction_ops(ng_api_helper, numpy_function, reduction_axes):
assert
np
.
allclose
(
result
,
expected
)
assert
np
.
allclose
(
result
,
expected
)
@pytest.
config.gpu_skip
(
reason
=
'Not implemented'
)
@pytest.
mark.skip_on_gpu
def
test_argmax
():
def
test_argmax
():
runtime
=
get_runtime
()
runtime
=
get_runtime
()
input_x
=
ng
.
constant
(
np
.
array
([[
9
,
2
,
10
],
input_x
=
ng
.
constant
(
np
.
array
([[
9
,
2
,
10
],
...
@@ -58,7 +58,7 @@ def test_argmax():
...
@@ -58,7 +58,7 @@ def test_argmax():
np
.
array
([
1
,
3
,
0
],
dtype
=
np
.
int32
))
np
.
array
([
1
,
3
,
0
],
dtype
=
np
.
int32
))
@pytest.
config.gpu_skip
(
reason
=
'Not implemented'
)
@pytest.
mark.skip_on_gpu
def
test_argmin
():
def
test_argmin
():
runtime
=
get_runtime
()
runtime
=
get_runtime
()
input_x
=
ng
.
constant
(
np
.
array
([[
12
,
2
,
10
],
input_x
=
ng
.
constant
(
np
.
array
([[
12
,
2
,
10
],
...
@@ -71,7 +71,7 @@ def test_argmin():
...
@@ -71,7 +71,7 @@ def test_argmin():
np
.
array
([
3
,
2
,
1
],
dtype
=
np
.
int32
))
np
.
array
([
3
,
2
,
1
],
dtype
=
np
.
int32
))
@pytest.
config.gpu_skip
(
reason
=
'Not implemented'
)
@pytest.
mark.skip_on_gpu
def
test_topk
():
def
test_topk
():
runtime
=
get_runtime
()
runtime
=
get_runtime
()
input_x
=
ng
.
constant
(
np
.
array
([[
9
,
2
,
10
],
input_x
=
ng
.
constant
(
np
.
array
([[
9
,
2
,
10
],
...
...
This diff is collapsed.
Click to expand it.
python/test/ngraph/util.py
View file @
23f29e88
...
@@ -15,11 +15,12 @@
...
@@ -15,11 +15,12 @@
# ******************************************************************************
# ******************************************************************************
import
numpy
as
np
import
numpy
as
np
import
pytest
import
ngraph
as
ng
import
ngraph
as
ng
from
string
import
ascii_uppercase
from
string
import
ascii_uppercase
import
test
def
_get_numpy_dtype
(
scalar
):
def
_get_numpy_dtype
(
scalar
):
return
np
.
array
([
scalar
])
.
dtype
return
np
.
array
([
scalar
])
.
dtype
...
@@ -27,8 +28,7 @@ def _get_numpy_dtype(scalar):
...
@@ -27,8 +28,7 @@ def _get_numpy_dtype(scalar):
def
get_runtime
():
def
get_runtime
():
"""Return runtime object."""
"""Return runtime object."""
backend_name
=
pytest
.
config
.
getoption
(
'backend'
,
default
=
'CPU'
)
return
ng
.
runtime
(
backend_name
=
test
.
BACKEND_NAME
)
return
ng
.
runtime
(
backend_name
=
backend_name
)
def
run_op_node
(
input_data
,
op_fun
,
*
args
):
def
run_op_node
(
input_data
,
op_fun
,
*
args
):
...
...
This diff is collapsed.
Click to expand it.
python/test/test_ops.py
View file @
23f29e88
...
@@ -35,6 +35,7 @@ from ngraph.impl.op import Concat, Select
...
@@ -35,6 +35,7 @@ from ngraph.impl.op import Concat, Select
from
ngraph.impl.op
import
Reverse
,
MaxPool
,
ReplaceSlice
,
Slice
from
ngraph.impl.op
import
Reverse
,
MaxPool
,
ReplaceSlice
,
Slice
from
ngraph.impl.op
import
Convolution
,
ConvolutionBackpropData
,
ConvolutionBackpropFilters
from
ngraph.impl.op
import
Convolution
,
ConvolutionBackpropData
,
ConvolutionBackpropFilters
import
test
def
binary_op
(
op_str
,
a
,
b
):
def
binary_op
(
op_str
,
a
,
b
):
...
@@ -116,7 +117,7 @@ def binary_op_exec(op_str):
...
@@ -116,7 +117,7 @@ def binary_op_exec(op_str):
B
=
Parameter
(
element_type
,
shape
)
B
=
Parameter
(
element_type
,
shape
)
parameter_list
=
[
A
,
B
]
parameter_list
=
[
A
,
B
]
function
=
Function
(
NodeVector
([
binary_op
(
op_str
,
A
,
B
)]),
parameter_list
,
'test'
)
function
=
Function
(
NodeVector
([
binary_op
(
op_str
,
A
,
B
)]),
parameter_list
,
'test'
)
backend
=
Backend
.
create
(
pytest
.
config
.
getoption
(
'backend'
)
)
backend
=
Backend
.
create
(
test
.
BACKEND_NAME
)
a
=
backend
.
create_tensor
(
element_type
,
shape
)
a
=
backend
.
create_tensor
(
element_type
,
shape
)
b
=
backend
.
create_tensor
(
element_type
,
shape
)
b
=
backend
.
create_tensor
(
element_type
,
shape
)
...
@@ -146,7 +147,7 @@ def binary_op_comparison(op_str):
...
@@ -146,7 +147,7 @@ def binary_op_comparison(op_str):
B
=
Parameter
(
element_type
,
shape
)
B
=
Parameter
(
element_type
,
shape
)
parameter_list
=
[
A
,
B
]
parameter_list
=
[
A
,
B
]
function
=
Function
(
NodeVector
([
binary_op
(
op_str
,
A
,
B
)]),
parameter_list
,
'test'
)
function
=
Function
(
NodeVector
([
binary_op
(
op_str
,
A
,
B
)]),
parameter_list
,
'test'
)
backend
=
Backend
.
create
(
pytest
.
config
.
getoption
(
'backend'
)
)
backend
=
Backend
.
create
(
test
.
BACKEND_NAME
)
a
=
backend
.
create_tensor
(
element_type
,
shape
)
a
=
backend
.
create_tensor
(
element_type
,
shape
)
b
=
backend
.
create_tensor
(
element_type
,
shape
)
b
=
backend
.
create_tensor
(
element_type
,
shape
)
...
@@ -245,7 +246,7 @@ def test_add_with_mul():
...
@@ -245,7 +246,7 @@ def test_add_with_mul():
C
=
Parameter
(
element_type
,
shape
)
C
=
Parameter
(
element_type
,
shape
)
parameter_list
=
[
A
,
B
,
C
]
parameter_list
=
[
A
,
B
,
C
]
function
=
Function
(
NodeVector
([
Multiply
(
Add
(
A
,
B
),
C
)]),
parameter_list
,
'test'
)
function
=
Function
(
NodeVector
([
Multiply
(
Add
(
A
,
B
),
C
)]),
parameter_list
,
'test'
)
backend
=
Backend
.
create
(
pytest
.
config
.
getoption
(
'backend'
)
)
backend
=
Backend
.
create
(
test
.
BACKEND_NAME
)
a
=
backend
.
create_tensor
(
element_type
,
shape
)
a
=
backend
.
create_tensor
(
element_type
,
shape
)
b
=
backend
.
create_tensor
(
element_type
,
shape
)
b
=
backend
.
create_tensor
(
element_type
,
shape
)
...
@@ -358,7 +359,7 @@ def unary_op_exec(op_str, input_list):
...
@@ -358,7 +359,7 @@ def unary_op_exec(op_str, input_list):
A
=
Parameter
(
element_type
,
shape
)
A
=
Parameter
(
element_type
,
shape
)
parameter_list
=
[
A
]
parameter_list
=
[
A
]
function
=
Function
(
NodeVector
([
unary_op
(
op_str
,
A
)]),
parameter_list
,
'test'
)
function
=
Function
(
NodeVector
([
unary_op
(
op_str
,
A
)]),
parameter_list
,
'test'
)
backend
=
Backend
.
create
(
pytest
.
config
.
getoption
(
'backend'
)
)
backend
=
Backend
.
create
(
test
.
BACKEND_NAME
)
a
=
backend
.
create_tensor
(
element_type
,
shape
)
a
=
backend
.
create_tensor
(
element_type
,
shape
)
result
=
backend
.
create_tensor
(
element_type
,
shape
)
result
=
backend
.
create_tensor
(
element_type
,
shape
)
...
@@ -479,7 +480,7 @@ def test_tanh():
...
@@ -479,7 +480,7 @@ def test_tanh():
unary_op_exec
(
op_str
,
input_list
)
unary_op_exec
(
op_str
,
input_list
)
@pytest.
config.gpu_skip
(
reason
=
"Not implemented"
)
@pytest.
mark.skip_on_gpu
def
test_reverse
():
def
test_reverse
():
input_list
=
[[
-
1
,
0
],
[
0.5
,
1
]]
input_list
=
[[
-
1
,
0
],
[
0.5
,
1
]]
op_str
=
'Reverse'
op_str
=
'Reverse'
...
@@ -492,7 +493,7 @@ def test_not():
...
@@ -492,7 +493,7 @@ def test_not():
A
=
Parameter
(
element_type
,
shape
)
A
=
Parameter
(
element_type
,
shape
)
parameter_list
=
[
A
]
parameter_list
=
[
A
]
function
=
Function
(
NodeVector
([
Not
(
A
)]),
parameter_list
,
'test'
)
function
=
Function
(
NodeVector
([
Not
(
A
)]),
parameter_list
,
'test'
)
backend
=
Backend
.
create
(
pytest
.
config
.
getoption
(
'backend'
)
)
backend
=
Backend
.
create
(
test
.
BACKEND_NAME
)
a
=
backend
.
create_tensor
(
element_type
,
shape
)
a
=
backend
.
create_tensor
(
element_type
,
shape
)
result
=
backend
.
create_tensor
(
Type
.
boolean
,
shape
)
result
=
backend
.
create_tensor
(
Type
.
boolean
,
shape
)
...
@@ -518,7 +519,7 @@ def test_sum():
...
@@ -518,7 +519,7 @@ def test_sum():
A
=
Parameter
(
element_type
,
shape
)
A
=
Parameter
(
element_type
,
shape
)
parameter_list
=
[
A
]
parameter_list
=
[
A
]
function
=
Function
(
NodeVector
([
Sum
(
A
,
AxisSet
({
1
}))]),
parameter_list
,
'test'
)
function
=
Function
(
NodeVector
([
Sum
(
A
,
AxisSet
({
1
}))]),
parameter_list
,
'test'
)
backend
=
Backend
.
create
(
pytest
.
config
.
getoption
(
'backend'
)
)
backend
=
Backend
.
create
(
test
.
BACKEND_NAME
)
a
=
backend
.
create_tensor
(
element_type
,
shape
)
a
=
backend
.
create_tensor
(
element_type
,
shape
)
result
=
backend
.
create_tensor
(
element_type
,
Shape
([
1
]))
result
=
backend
.
create_tensor
(
element_type
,
Shape
([
1
]))
...
@@ -545,7 +546,7 @@ def test_reshape():
...
@@ -545,7 +546,7 @@ def test_reshape():
A
=
Parameter
(
element_type
,
shape
)
A
=
Parameter
(
element_type
,
shape
)
parameter_list
=
[
A
]
parameter_list
=
[
A
]
function
=
Function
(
NodeVector
([
Reshape
(
A
,
AxisVector
([
0
,
1
]),
Shape
([
3
,
2
]))]),
parameter_list
,
'test'
)
function
=
Function
(
NodeVector
([
Reshape
(
A
,
AxisVector
([
0
,
1
]),
Shape
([
3
,
2
]))]),
parameter_list
,
'test'
)
backend
=
Backend
.
create
(
pytest
.
config
.
getoption
(
'backend'
)
)
backend
=
Backend
.
create
(
test
.
BACKEND_NAME
)
a
=
backend
.
create_tensor
(
element_type
,
shape
)
a
=
backend
.
create_tensor
(
element_type
,
shape
)
result
=
backend
.
create_tensor
(
element_type
,
Shape
([
3
,
2
]))
result
=
backend
.
create_tensor
(
element_type
,
Shape
([
3
,
2
]))
...
@@ -572,7 +573,7 @@ def test_convert():
...
@@ -572,7 +573,7 @@ def test_convert():
parameter_list
=
[
A
]
parameter_list
=
[
A
]
# f32 to boolean
# f32 to boolean
function
=
Function
(
NodeVector
([
Convert
(
A
,
Type
.
boolean
)]),
parameter_list
,
'test'
)
function
=
Function
(
NodeVector
([
Convert
(
A
,
Type
.
boolean
)]),
parameter_list
,
'test'
)
backend
=
Backend
.
create
(
pytest
.
config
.
getoption
(
'backend'
)
)
backend
=
Backend
.
create
(
test
.
BACKEND_NAME
)
a
=
backend
.
create_tensor
(
element_type
,
shape
)
a
=
backend
.
create_tensor
(
element_type
,
shape
)
result
=
backend
.
create_tensor
(
Type
.
boolean
,
shape
)
result
=
backend
.
create_tensor
(
Type
.
boolean
,
shape
)
...
@@ -591,7 +592,7 @@ def test_convert():
...
@@ -591,7 +592,7 @@ def test_convert():
# f32 to i32
# f32 to i32
function
=
Function
(
NodeVector
([
Convert
(
A
,
Type
.
i32
)]),
parameter_list
,
'test'
)
function
=
Function
(
NodeVector
([
Convert
(
A
,
Type
.
i32
)]),
parameter_list
,
'test'
)
backend
=
Backend
.
create
(
pytest
.
config
.
getoption
(
'backend'
)
)
backend
=
Backend
.
create
(
test
.
BACKEND_NAME
)
result
=
backend
.
create_tensor
(
Type
.
i32
,
shape
)
result
=
backend
.
create_tensor
(
Type
.
i32
,
shape
)
...
@@ -615,7 +616,7 @@ def test_broadcast():
...
@@ -615,7 +616,7 @@ def test_broadcast():
A
=
Parameter
(
element_type
,
Shape
([
3
]))
A
=
Parameter
(
element_type
,
Shape
([
3
]))
parameter_list
=
[
A
]
parameter_list
=
[
A
]
function
=
Function
(
NodeVector
([
Broadcast
(
A
,
Shape
([
3
,
3
]),
AxisSet
({
0
}))]),
parameter_list
,
'test'
)
function
=
Function
(
NodeVector
([
Broadcast
(
A
,
Shape
([
3
,
3
]),
AxisSet
({
0
}))]),
parameter_list
,
'test'
)
backend
=
Backend
.
create
(
pytest
.
config
.
getoption
(
'backend'
)
)
backend
=
Backend
.
create
(
test
.
BACKEND_NAME
)
a
=
backend
.
create_tensor
(
element_type
,
Shape
([
3
]))
a
=
backend
.
create_tensor
(
element_type
,
Shape
([
3
]))
result
=
backend
.
create_tensor
(
element_type
,
Shape
([
3
,
3
]))
result
=
backend
.
create_tensor
(
element_type
,
Shape
([
3
,
3
]))
...
@@ -641,7 +642,7 @@ def test_constant():
...
@@ -641,7 +642,7 @@ def test_constant():
parameter_list
=
[]
parameter_list
=
[]
function
=
Function
(
NodeVector
([
Constant
(
element_type
,
Shape
([
3
,
3
]),
list
(
range
(
9
)))]),
function
=
Function
(
NodeVector
([
Constant
(
element_type
,
Shape
([
3
,
3
]),
list
(
range
(
9
)))]),
parameter_list
,
'test'
)
parameter_list
,
'test'
)
backend
=
Backend
.
create
(
pytest
.
config
.
getoption
(
'backend'
)
)
backend
=
Backend
.
create
(
test
.
BACKEND_NAME
)
result
=
backend
.
create_tensor
(
element_type
,
Shape
([
3
,
3
]))
result
=
backend
.
create_tensor
(
element_type
,
Shape
([
3
,
3
]))
...
@@ -662,7 +663,7 @@ def test_onehot():
...
@@ -662,7 +663,7 @@ def test_onehot():
A
=
Parameter
(
element_type
,
Shape
([
3
]))
A
=
Parameter
(
element_type
,
Shape
([
3
]))
parameter_list
=
[
A
]
parameter_list
=
[
A
]
function
=
Function
(
NodeVector
([
OneHot
(
A
,
Shape
([
3
,
3
]),
0
)]),
parameter_list
,
'test'
)
function
=
Function
(
NodeVector
([
OneHot
(
A
,
Shape
([
3
,
3
]),
0
)]),
parameter_list
,
'test'
)
backend
=
Backend
.
create
(
pytest
.
config
.
getoption
(
'backend'
)
)
backend
=
Backend
.
create
(
test
.
BACKEND_NAME
)
a
=
backend
.
create_tensor
(
element_type
,
Shape
([
3
]))
a
=
backend
.
create_tensor
(
element_type
,
Shape
([
3
]))
result
=
backend
.
create_tensor
(
element_type
,
Shape
([
3
,
3
]))
result
=
backend
.
create_tensor
(
element_type
,
Shape
([
3
,
3
]))
...
@@ -681,7 +682,7 @@ def test_onehot():
...
@@ -681,7 +682,7 @@ def test_onehot():
assert
np
.
allclose
(
result_arr
,
result_arr_ref
)
assert
np
.
allclose
(
result_arr
,
result_arr_ref
)
@pytest.
config.gpu_skip
(
reason
=
"Not implemented"
)
@pytest.
mark.skip_on_gpu
def
test_concat
():
def
test_concat
():
element_type
=
Type
.
f32
element_type
=
Type
.
f32
...
@@ -691,7 +692,7 @@ def test_concat():
...
@@ -691,7 +692,7 @@ def test_concat():
parameter_list
=
[
A
,
B
,
C
]
parameter_list
=
[
A
,
B
,
C
]
axis
=
0
axis
=
0
function
=
Function
(
NodeVector
([
Concat
(
NodeVector
([
A
,
B
,
C
]),
axis
)]),
parameter_list
,
'test'
)
function
=
Function
(
NodeVector
([
Concat
(
NodeVector
([
A
,
B
,
C
]),
axis
)]),
parameter_list
,
'test'
)
backend
=
Backend
.
create
(
pytest
.
config
.
getoption
(
'backend'
)
)
backend
=
Backend
.
create
(
test
.
BACKEND_NAME
)
a
=
backend
.
create_tensor
(
element_type
,
Shape
([
1
,
2
]))
a
=
backend
.
create_tensor
(
element_type
,
Shape
([
1
,
2
]))
b
=
backend
.
create_tensor
(
element_type
,
Shape
([
1
,
2
]))
b
=
backend
.
create_tensor
(
element_type
,
Shape
([
1
,
2
]))
...
@@ -716,7 +717,7 @@ def test_concat():
...
@@ -716,7 +717,7 @@ def test_concat():
assert
np
.
allclose
(
result_arr
,
result_arr_ref
)
assert
np
.
allclose
(
result_arr
,
result_arr_ref
)
@pytest.
config.gpu_skip
(
reason
=
"Not implemented"
)
@pytest.
mark.skip_on_gpu
def
test_axisset
():
def
test_axisset
():
set_axisset
=
AxisSet
({
1
,
2
,
3
})
set_axisset
=
AxisSet
({
1
,
2
,
3
})
...
@@ -733,7 +734,7 @@ def test_axisset():
...
@@ -733,7 +734,7 @@ def test_axisset():
assert
set
(
tuple_axisset
)
==
set
(
set_axisset
)
assert
set
(
tuple_axisset
)
==
set
(
set_axisset
)
@pytest.
config.gpu_skip
(
reason
=
"Not implemented"
)
@pytest.
mark.skip_on_gpu
def
test_select
():
def
test_select
():
element_type
=
Type
.
f32
element_type
=
Type
.
f32
...
@@ -743,7 +744,7 @@ def test_select():
...
@@ -743,7 +744,7 @@ def test_select():
parameter_list
=
[
A
,
B
,
C
]
parameter_list
=
[
A
,
B
,
C
]
function
=
Function
(
NodeVector
([
Select
(
A
,
B
,
C
)]),
parameter_list
,
'test'
)
function
=
Function
(
NodeVector
([
Select
(
A
,
B
,
C
)]),
parameter_list
,
'test'
)
backend
=
Backend
.
create
(
pytest
.
config
.
getoption
(
'backend'
)
)
backend
=
Backend
.
create
(
test
.
BACKEND_NAME
)
a
=
backend
.
create_tensor
(
Type
.
boolean
,
Shape
([
1
,
2
]))
a
=
backend
.
create_tensor
(
Type
.
boolean
,
Shape
([
1
,
2
]))
b
=
backend
.
create_tensor
(
element_type
,
Shape
([
1
,
2
]))
b
=
backend
.
create_tensor
(
element_type
,
Shape
([
1
,
2
]))
...
@@ -765,7 +766,7 @@ def test_select():
...
@@ -765,7 +766,7 @@ def test_select():
assert
np
.
allclose
(
result_arr
,
result_arr_ref
)
assert
np
.
allclose
(
result_arr
,
result_arr_ref
)
@pytest.
config.gpu_skip
(
reason
=
"Not implemented"
)
@pytest.
mark.skip_on_gpu
def
test_slice
():
def
test_slice
():
element_type
=
Type
.
f32
element_type
=
Type
.
f32
...
@@ -779,7 +780,7 @@ def test_slice():
...
@@ -779,7 +780,7 @@ def test_slice():
function
=
Function
(
NodeVector
([
Slice
(
A
,
Coordinate
(
lower_bounds
),
function
=
Function
(
NodeVector
([
Slice
(
A
,
Coordinate
(
lower_bounds
),
Coordinate
(
upper_bounds
))]),
parameter_list
,
'test'
)
Coordinate
(
upper_bounds
))]),
parameter_list
,
'test'
)
backend
=
Backend
.
create
(
pytest
.
config
.
getoption
(
'backend'
)
)
backend
=
Backend
.
create
(
test
.
BACKEND_NAME
)
a
=
backend
.
create_tensor
(
element_type
,
shape
)
a
=
backend
.
create_tensor
(
element_type
,
shape
)
result
=
backend
.
create_tensor
(
element_type
,
Shape
([
4
,
4
]))
result
=
backend
.
create_tensor
(
element_type
,
Shape
([
4
,
4
]))
...
@@ -802,7 +803,7 @@ def test_slice():
...
@@ -802,7 +803,7 @@ def test_slice():
function
=
Function
(
NodeVector
([
Slice
(
A
,
Coordinate
(
lower_bounds
),
Coordinate
(
upper_bounds
),
function
=
Function
(
NodeVector
([
Slice
(
A
,
Coordinate
(
lower_bounds
),
Coordinate
(
upper_bounds
),
Strides
(
strides
))]),
parameter_list
,
'test'
)
Strides
(
strides
))]),
parameter_list
,
'test'
)
backend
=
Backend
.
create
(
pytest
.
config
.
getoption
(
'backend'
)
)
backend
=
Backend
.
create
(
test
.
BACKEND_NAME
)
result
=
backend
.
create_tensor
(
element_type
,
Shape
([
4
,
2
]))
result
=
backend
.
create_tensor
(
element_type
,
Shape
([
4
,
2
]))
result_arr
=
np
.
zeros
(
8
,
dtype
=
np
.
float32
)
.
reshape
(
4
,
2
)
result_arr
=
np
.
zeros
(
8
,
dtype
=
np
.
float32
)
.
reshape
(
4
,
2
)
...
@@ -817,7 +818,7 @@ def test_slice():
...
@@ -817,7 +818,7 @@ def test_slice():
assert
np
.
allclose
(
result_arr
,
result_arr_ref
)
assert
np
.
allclose
(
result_arr
,
result_arr_ref
)
@pytest.
config.gpu_skip
(
reason
=
"Not implemented"
)
@pytest.
mark.skip_on_gpu
def
test_replace_slice
():
def
test_replace_slice
():
element_type
=
Type
.
f32
element_type
=
Type
.
f32
...
@@ -832,7 +833,7 @@ def test_replace_slice():
...
@@ -832,7 +833,7 @@ def test_replace_slice():
function
=
Function
(
NodeVector
([
ReplaceSlice
(
A
,
B
,
Coordinate
(
lower_bounds
),
function
=
Function
(
NodeVector
([
ReplaceSlice
(
A
,
B
,
Coordinate
(
lower_bounds
),
Coordinate
(
upper_bounds
))]),
parameter_list
,
'test'
)
Coordinate
(
upper_bounds
))]),
parameter_list
,
'test'
)
backend
=
Backend
.
create
(
pytest
.
config
.
getoption
(
'backend'
)
)
backend
=
Backend
.
create
(
test
.
BACKEND_NAME
)
a
=
backend
.
create_tensor
(
element_type
,
Shape
([
6
,
4
]))
a
=
backend
.
create_tensor
(
element_type
,
Shape
([
6
,
4
]))
b
=
backend
.
create_tensor
(
element_type
,
Shape
([
3
,
2
]))
b
=
backend
.
create_tensor
(
element_type
,
Shape
([
3
,
2
]))
...
@@ -860,7 +861,7 @@ def test_replace_slice():
...
@@ -860,7 +861,7 @@ def test_replace_slice():
function
=
Function
(
NodeVector
([
ReplaceSlice
(
A
,
B
,
Coordinate
(
lower_bounds
),
function
=
Function
(
NodeVector
([
ReplaceSlice
(
A
,
B
,
Coordinate
(
lower_bounds
),
Coordinate
(
upper_bounds
),
Strides
(
strides
))]),
Coordinate
(
upper_bounds
),
Strides
(
strides
))]),
parameter_list
,
'test'
)
parameter_list
,
'test'
)
backend
=
Backend
.
create
(
pytest
.
config
.
getoption
(
'backend'
)
)
backend
=
Backend
.
create
(
test
.
BACKEND_NAME
)
handle
=
backend
.
compile
(
function
)
handle
=
backend
.
compile
(
function
)
handle
.
call
([
result
],
[
a
,
b
])
handle
.
call
([
result
],
[
a
,
b
])
...
@@ -872,7 +873,7 @@ def test_replace_slice():
...
@@ -872,7 +873,7 @@ def test_replace_slice():
assert
np
.
allclose
(
result_arr
,
result_arr_ref
)
assert
np
.
allclose
(
result_arr
,
result_arr_ref
)
@pytest.
config.gpu_skip
(
reason
=
"Not implemented"
)
@pytest.
mark.skip_on_gpu
def
test_max_pool
():
def
test_max_pool
():
#test 1d
#test 1d
...
@@ -885,7 +886,7 @@ def test_max_pool():
...
@@ -885,7 +886,7 @@ def test_max_pool():
window_shape
=
[
3
]
window_shape
=
[
3
]
function
=
Function
(
NodeVector
([
MaxPool
(
A
,
Shape
(
window_shape
))]),
parameter_list
,
'test'
)
function
=
Function
(
NodeVector
([
MaxPool
(
A
,
Shape
(
window_shape
))]),
parameter_list
,
'test'
)
backend
=
Backend
.
create
(
pytest
.
config
.
getoption
(
'backend'
)
)
backend
=
Backend
.
create
(
test
.
BACKEND_NAME
)
a
=
backend
.
create_tensor
(
element_type
,
shape
)
a
=
backend
.
create_tensor
(
element_type
,
shape
)
result
=
backend
.
create_tensor
(
element_type
,
Shape
([
1
,
1
,
8
]))
result
=
backend
.
create_tensor
(
element_type
,
Shape
([
1
,
1
,
8
]))
...
@@ -905,7 +906,7 @@ def test_max_pool():
...
@@ -905,7 +906,7 @@ def test_max_pool():
strides
=
[
2
]
strides
=
[
2
]
function
=
Function
(
NodeVector
([
MaxPool
(
A
,
Shape
(
window_shape
),
Strides
(
strides
))]),
parameter_list
,
'test'
)
function
=
Function
(
NodeVector
([
MaxPool
(
A
,
Shape
(
window_shape
),
Strides
(
strides
))]),
parameter_list
,
'test'
)
backend
=
Backend
.
create
(
pytest
.
config
.
getoption
(
'backend'
)
)
backend
=
Backend
.
create
(
test
.
BACKEND_NAME
)
size
=
4
size
=
4
result
=
backend
.
create_tensor
(
element_type
,
Shape
([
1
,
1
,
size
]))
result
=
backend
.
create_tensor
(
element_type
,
Shape
([
1
,
1
,
size
]))
...
@@ -929,7 +930,7 @@ def test_max_pool():
...
@@ -929,7 +930,7 @@ def test_max_pool():
window_shape
=
[
3
,
3
]
window_shape
=
[
3
,
3
]
function
=
Function
(
NodeVector
([
MaxPool
(
A
,
Shape
(
window_shape
))]),
parameter_list
,
'test'
)
function
=
Function
(
NodeVector
([
MaxPool
(
A
,
Shape
(
window_shape
))]),
parameter_list
,
'test'
)
backend
=
Backend
.
create
(
pytest
.
config
.
getoption
(
'backend'
)
)
backend
=
Backend
.
create
(
test
.
BACKEND_NAME
)
a
=
backend
.
create_tensor
(
element_type
,
shape
)
a
=
backend
.
create_tensor
(
element_type
,
shape
)
result
=
backend
.
create_tensor
(
element_type
,
Shape
([
1
,
1
,
8
,
8
]))
result
=
backend
.
create_tensor
(
element_type
,
Shape
([
1
,
1
,
8
,
8
]))
...
@@ -949,7 +950,7 @@ def test_max_pool():
...
@@ -949,7 +950,7 @@ def test_max_pool():
strides
=
[
2
,
2
]
strides
=
[
2
,
2
]
function
=
Function
(
NodeVector
([
MaxPool
(
A
,
Shape
(
window_shape
),
Strides
(
strides
))]),
parameter_list
,
'test'
)
function
=
Function
(
NodeVector
([
MaxPool
(
A
,
Shape
(
window_shape
),
Strides
(
strides
))]),
parameter_list
,
'test'
)
backend
=
Backend
.
create
(
pytest
.
config
.
getoption
(
'backend'
)
)
backend
=
Backend
.
create
(
test
.
BACKEND_NAME
)
size
=
4
size
=
4
result
=
backend
.
create_tensor
(
element_type
,
Shape
([
1
,
1
,
size
,
size
]))
result
=
backend
.
create_tensor
(
element_type
,
Shape
([
1
,
1
,
size
,
size
]))
...
@@ -964,7 +965,7 @@ def test_max_pool():
...
@@ -964,7 +965,7 @@ def test_max_pool():
assert
np
.
allclose
(
result_arr
,
result_arr_ref
)
assert
np
.
allclose
(
result_arr
,
result_arr_ref
)
@pytest.
config.gpu_skip
(
reason
=
"Not implemented"
)
@pytest.
mark.skip_on_gpu
def
convolution2d
(
image
,
filterit
,
strides
=
(
1
,
1
),
dilation
=
(
1
,
1
),
padding_below
=
(
0
,
0
),
def
convolution2d
(
image
,
filterit
,
strides
=
(
1
,
1
),
dilation
=
(
1
,
1
),
padding_below
=
(
0
,
0
),
padding_above
=
(
0
,
0
),
data_dilation
=
(
1
,
1
)):
padding_above
=
(
0
,
0
),
data_dilation
=
(
1
,
1
)):
...
@@ -1007,7 +1008,7 @@ def convolution2d(image, filterit, strides=(1, 1), dilation=(1, 1), padding_belo
...
@@ -1007,7 +1008,7 @@ def convolution2d(image, filterit, strides=(1, 1), dilation=(1, 1), padding_belo
return
result
return
result
@pytest.
config.gpu_skip
(
reason
=
"Not implemented"
)
@pytest.
mark.skip_on_gpu
def
test_convolution
():
def
test_convolution
():
element_type
=
Type
.
f32
element_type
=
Type
.
f32
...
@@ -1027,7 +1028,7 @@ def test_convolution():
...
@@ -1027,7 +1028,7 @@ def test_convolution():
result_arr
=
np
.
zeros
(
196
,
dtype
=
np
.
float32
)
.
reshape
(
1
,
1
,
14
,
14
)
result_arr
=
np
.
zeros
(
196
,
dtype
=
np
.
float32
)
.
reshape
(
1
,
1
,
14
,
14
)
function
=
Function
(
NodeVector
([
Convolution
(
A
,
B
)]),
parameter_list
,
'test'
)
function
=
Function
(
NodeVector
([
Convolution
(
A
,
B
)]),
parameter_list
,
'test'
)
backend
=
Backend
.
create
(
pytest
.
config
.
getoption
(
'backend'
)
)
backend
=
Backend
.
create
(
test
.
BACKEND_NAME
)
a
=
backend
.
create_tensor
(
element_type
,
image_shape
)
a
=
backend
.
create_tensor
(
element_type
,
image_shape
)
b
=
backend
.
create_tensor
(
element_type
,
filter_shape
)
b
=
backend
.
create_tensor
(
element_type
,
filter_shape
)
...
@@ -1045,7 +1046,7 @@ def test_convolution():
...
@@ -1045,7 +1046,7 @@ def test_convolution():
assert
np
.
allclose
(
result_arr
,
result_arr_ref
)
assert
np
.
allclose
(
result_arr
,
result_arr_ref
)
@pytest.
config.gpu_skip
(
reason
=
"Not implemented"
)
@pytest.
mark.skip_on_gpu
def
test_convolution_with_strides
():
def
test_convolution_with_strides
():
element_type
=
Type
.
f32
element_type
=
Type
.
f32
...
@@ -1061,7 +1062,7 @@ def test_convolution_with_strides():
...
@@ -1061,7 +1062,7 @@ def test_convolution_with_strides():
strides
=
[
2
,
2
]
strides
=
[
2
,
2
]
function
=
Function
(
NodeVector
([
Convolution
(
A
,
B
,
Strides
(
strides
))]),
parameter_list
,
'test'
)
function
=
Function
(
NodeVector
([
Convolution
(
A
,
B
,
Strides
(
strides
))]),
parameter_list
,
'test'
)
backend
=
Backend
.
create
(
pytest
.
config
.
getoption
(
'backend'
)
)
backend
=
Backend
.
create
(
test
.
BACKEND_NAME
)
a
=
backend
.
create_tensor
(
element_type
,
image_shape
)
a
=
backend
.
create_tensor
(
element_type
,
image_shape
)
b
=
backend
.
create_tensor
(
element_type
,
filter_shape
)
b
=
backend
.
create_tensor
(
element_type
,
filter_shape
)
...
@@ -1080,7 +1081,7 @@ def test_convolution_with_strides():
...
@@ -1080,7 +1081,7 @@ def test_convolution_with_strides():
assert
np
.
allclose
(
result_arr
,
result_arr_ref
)
assert
np
.
allclose
(
result_arr
,
result_arr_ref
)
@pytest.
config.gpu_skip
(
reason
=
"Not implemented"
)
@pytest.
mark.skip_on_gpu
def
test_convolution_with_filter_dilation
():
def
test_convolution_with_filter_dilation
():
element_type
=
Type
.
f32
element_type
=
Type
.
f32
...
@@ -1096,7 +1097,7 @@ def test_convolution_with_filter_dilation():
...
@@ -1096,7 +1097,7 @@ def test_convolution_with_filter_dilation():
dilation
=
[
2
,
2
]
dilation
=
[
2
,
2
]
function
=
Function
(
NodeVector
([
Convolution
(
A
,
B
,
Strides
(
strides
),
Strides
(
dilation
))]),
parameter_list
,
'test'
)
function
=
Function
(
NodeVector
([
Convolution
(
A
,
B
,
Strides
(
strides
),
Strides
(
dilation
))]),
parameter_list
,
'test'
)
backend
=
Backend
.
create
(
pytest
.
config
.
getoption
(
'backend'
)
)
backend
=
Backend
.
create
(
test
.
BACKEND_NAME
)
a
=
backend
.
create_tensor
(
element_type
,
image_shape
)
a
=
backend
.
create_tensor
(
element_type
,
image_shape
)
b
=
backend
.
create_tensor
(
element_type
,
filter_shape
)
b
=
backend
.
create_tensor
(
element_type
,
filter_shape
)
...
@@ -1116,7 +1117,7 @@ def test_convolution_with_filter_dilation():
...
@@ -1116,7 +1117,7 @@ def test_convolution_with_filter_dilation():
assert
np
.
allclose
(
result_arr
,
result_arr_ref
)
assert
np
.
allclose
(
result_arr
,
result_arr_ref
)
@pytest.
config.gpu_skip
(
reason
=
"Not implemented"
)
@pytest.
mark.skip_on_gpu
def
test_convolution_with_padding
():
def
test_convolution_with_padding
():
element_type
=
Type
.
f32
element_type
=
Type
.
f32
...
@@ -1137,7 +1138,7 @@ def test_convolution_with_padding():
...
@@ -1137,7 +1138,7 @@ def test_convolution_with_padding():
function
=
Function
(
NodeVector
([
Convolution
(
A
,
B
,
Strides
(
strides
),
Strides
(
dilation
),
function
=
Function
(
NodeVector
([
Convolution
(
A
,
B
,
Strides
(
strides
),
Strides
(
dilation
),
CoordinateDiff
(
padding_below
),
CoordinateDiff
(
padding_above
))]),
CoordinateDiff
(
padding_below
),
CoordinateDiff
(
padding_above
))]),
parameter_list
,
'test'
)
parameter_list
,
'test'
)
backend
=
Backend
.
create
(
pytest
.
config
.
getoption
(
'backend'
)
)
backend
=
Backend
.
create
(
test
.
BACKEND_NAME
)
a
=
backend
.
create_tensor
(
element_type
,
image_shape
)
a
=
backend
.
create_tensor
(
element_type
,
image_shape
)
b
=
backend
.
create_tensor
(
element_type
,
filter_shape
)
b
=
backend
.
create_tensor
(
element_type
,
filter_shape
)
...
@@ -1176,7 +1177,7 @@ def test_convolution_with_padding():
...
@@ -1176,7 +1177,7 @@ def test_convolution_with_padding():
function
=
Function
(
NodeVector
([
Convolution
(
A
,
B
,
Strides
(
strides
),
Strides
(
dilation
),
function
=
Function
(
NodeVector
([
Convolution
(
A
,
B
,
Strides
(
strides
),
Strides
(
dilation
),
CoordinateDiff
(
padding_below
),
CoordinateDiff
(
padding_above
))]),
CoordinateDiff
(
padding_below
),
CoordinateDiff
(
padding_above
))]),
parameter_list
,
'test'
)
parameter_list
,
'test'
)
backend
=
Backend
.
create
(
pytest
.
config
.
getoption
(
'backend'
)
)
backend
=
Backend
.
create
(
test
.
BACKEND_NAME
)
a
=
backend
.
create_tensor
(
element_type
,
image_shape
)
a
=
backend
.
create_tensor
(
element_type
,
image_shape
)
b
=
backend
.
create_tensor
(
element_type
,
filter_shape
)
b
=
backend
.
create_tensor
(
element_type
,
filter_shape
)
...
@@ -1197,7 +1198,7 @@ def test_convolution_with_padding():
...
@@ -1197,7 +1198,7 @@ def test_convolution_with_padding():
assert
np
.
allclose
(
result_arr
,
result_arr_ref
)
assert
np
.
allclose
(
result_arr
,
result_arr_ref
)
@pytest.
config.gpu_skip
(
reason
=
"Not implemented"
)
@pytest.
mark.skip_on_gpu
def
test_convolution_with_data_dilation
():
def
test_convolution_with_data_dilation
():
element_type
=
Type
.
f32
element_type
=
Type
.
f32
...
@@ -1218,7 +1219,7 @@ def test_convolution_with_data_dilation():
...
@@ -1218,7 +1219,7 @@ def test_convolution_with_data_dilation():
function
=
Function
(
NodeVector
([
Convolution
(
A
,
B
,
Strides
(
strides
),
Strides
(
dilation
),
function
=
Function
(
NodeVector
([
Convolution
(
A
,
B
,
Strides
(
strides
),
Strides
(
dilation
),
CoordinateDiff
(
padding_below
),
CoordinateDiff
(
padding_above
),
CoordinateDiff
(
padding_below
),
CoordinateDiff
(
padding_above
),
Strides
(
data_dilation
))]),
parameter_list
,
'test'
)
Strides
(
data_dilation
))]),
parameter_list
,
'test'
)
backend
=
Backend
.
create
(
pytest
.
config
.
getoption
(
'backend'
)
)
backend
=
Backend
.
create
(
test
.
BACKEND_NAME
)
a
=
backend
.
create_tensor
(
element_type
,
image_shape
)
a
=
backend
.
create_tensor
(
element_type
,
image_shape
)
b
=
backend
.
create_tensor
(
element_type
,
filter_shape
)
b
=
backend
.
create_tensor
(
element_type
,
filter_shape
)
...
@@ -1239,7 +1240,7 @@ def test_convolution_with_data_dilation():
...
@@ -1239,7 +1240,7 @@ def test_convolution_with_data_dilation():
assert
np
.
allclose
(
result_arr
,
result_arr_ref
)
assert
np
.
allclose
(
result_arr
,
result_arr_ref
)
@pytest.
config.gpu_skip
(
reason
=
"Not implemented"
)
@pytest.
mark.skip_on_gpu
def
test_convolutionBackpropData
():
def
test_convolutionBackpropData
():
element_type
=
Type
.
f32
element_type
=
Type
.
f32
...
@@ -1266,7 +1267,7 @@ def test_convolutionBackpropData():
...
@@ -1266,7 +1267,7 @@ def test_convolutionBackpropData():
function
=
Function
(
NodeVector
([
ConvolutionBackpropData
(
image_shape
,
A
,
B
,
Strides
(
window_strides
),
Strides
(
window_dilation
),
function
=
Function
(
NodeVector
([
ConvolutionBackpropData
(
image_shape
,
A
,
B
,
Strides
(
window_strides
),
Strides
(
window_dilation
),
CoordinateDiff
(
padding_below
),
CoordinateDiff
(
padding_above
),
CoordinateDiff
(
padding_below
),
CoordinateDiff
(
padding_above
),
Strides
(
data_dilation
))]),
parameter_list
,
'test'
)
Strides
(
data_dilation
))]),
parameter_list
,
'test'
)
backend
=
Backend
.
create
(
pytest
.
config
.
getoption
(
'backend'
)
)
backend
=
Backend
.
create
(
test
.
BACKEND_NAME
)
a
=
backend
.
create_tensor
(
element_type
,
filter_shape
)
a
=
backend
.
create_tensor
(
element_type
,
filter_shape
)
b
=
backend
.
create_tensor
(
element_type
,
output_shape
)
b
=
backend
.
create_tensor
(
element_type
,
output_shape
)
...
@@ -1295,7 +1296,7 @@ def test_convolutionBackpropData():
...
@@ -1295,7 +1296,7 @@ def test_convolutionBackpropData():
assert
np
.
allclose
(
result_arr
,
result_arr_ref
)
assert
np
.
allclose
(
result_arr
,
result_arr_ref
)
@pytest.
config.gpu_skip
(
reason
=
"Not implemented"
)
@pytest.
mark.skip_on_gpu
def
test_convolutionBackpropFilters
():
def
test_convolutionBackpropFilters
():
element_type
=
Type
.
f32
element_type
=
Type
.
f32
...
@@ -1322,7 +1323,7 @@ def test_convolutionBackpropFilters():
...
@@ -1322,7 +1323,7 @@ def test_convolutionBackpropFilters():
function
=
Function
(
NodeVector
([
ConvolutionBackpropFilters
(
A
,
filter_shape
,
B
,
Strides
(
window_strides
),
Strides
(
window_dilation
),
function
=
Function
(
NodeVector
([
ConvolutionBackpropFilters
(
A
,
filter_shape
,
B
,
Strides
(
window_strides
),
Strides
(
window_dilation
),
CoordinateDiff
(
padding_below
),
CoordinateDiff
(
padding_above
),
CoordinateDiff
(
padding_below
),
CoordinateDiff
(
padding_above
),
Strides
(
data_dilation
))]),
parameter_list
,
'test'
)
Strides
(
data_dilation
))]),
parameter_list
,
'test'
)
backend
=
Backend
.
create
(
pytest
.
config
.
getoption
(
'backend'
)
)
backend
=
Backend
.
create
(
test
.
BACKEND_NAME
)
a
=
backend
.
create_tensor
(
element_type
,
image_shape
)
a
=
backend
.
create_tensor
(
element_type
,
image_shape
)
b
=
backend
.
create_tensor
(
element_type
,
output_shape
)
b
=
backend
.
create_tensor
(
element_type
,
output_shape
)
...
...
This diff is collapsed.
Click to expand it.
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment