Commit 9aa63947 authored by arogowie-intel's avatar arogowie-intel Committed by Scott Cyphers

[Py] Python wrappers for nGraph ops. (#760)

* Update avg_pool signature to be consistent with ngraph AvgPool.

- Change parameters order and use more appropriate paramter names.
- Add docstring.

* Single file with test for pooling operations.

- Add test for avg_pool for 2D case.

* Code refactoring.

- Rename parameters to be more verbose.
- Change function return type, to enable general usage.

* Add UT for ceil, ceiling and abs.

* Update docstrings and type annotations.

* Add UT for broadcast operation.

* Add UT for concat operation.

- Minor change: add optional node name function parameter.

* Code formatting.

* UT for constant and convert operations.

- Move broadcast test to test_basic.py file.

* Update function signature.

- Update to be consistent with nGraph object API.

* Review fix.

- Update type annotations.
- Update docstring.
- Change local variables names to be consistent.

* Refactoring - put some commonly used functions into util.py.

* Update convolution operation signature.

- Add docstring
- Update to be consistent with respective nGraph object API.
- Formatting in UT.

* Fix import statements under py27.

* Correct quotation marks.

* Revert changes: use AxisSet as a return type.

* Review fix.

- Update parameter names to be consistent with respective parameters of nGraph object constructors.

* Review fix

- Set seed for random number generation to be repeatable.
- Use numpy.allclose.

* Change serialize routine

* Change serialize routine call for onnx

* Run clang-format on constant.cpp

* Update function type annotation.
parent ac9bd2ef
......@@ -246,7 +246,14 @@ Node.__ge__ = greater_eq
# Custom ops
@nameable_op
def broadcast(node, new_shape, axis=None, name=None): # type: (Node, TensorShape, int, str) -> Node
"""Return node which broadcasts input node values to specified shape."""
"""Return node which broadcasts input node values to specified shape.
:param node: The node with input tensor data.
:param new_shape: The new shape we want to broadcast tensor to.
:param axis: The axis along which we perform broadcasting.
:param name: Optional new name for output node.
:return: New node with broadcasted shape.
"""
return Broadcast(node, Shape(new_shape), get_broadcast_axes(new_shape, node.shape, axis))
......@@ -274,49 +281,78 @@ def dot(left_node, right_node, name=None):
# convpool ops
@nameable_op
def convolution(x, # type: Node
weights, # type: Node
strides=None, # type: List[int]
dilation=None, # type: List[int]
padding_above=None, # type: List[int]
padding_below=None, # type: List[int]
name=None, # type: str
def convolution(data_batch, # type: Node
filter_weights, # type: Node
filter_strides=None, # type: List[int]
filter_dilation_strides=None, # type: List[int]
padding_below=None, # type: List[int]
padding_above=None, # type: List[int]
data_dilation_strides=None, # type: List[int]
name=None, # type: str
):
# type: (...) -> Node
"""Return convolution node."""
if strides is None:
strides = [1] * (len(x.shape) - 2) # Default to as many 1s as spatial dimensions of input.
if dilation is None:
dilation = [1] * (len(x.shape) - 2)
"""Return node performing batched convolution operation.
:param data_batch: The node providing data batch tensor.
:param filter_weights: The node providing filters tensor.
:param filter_strides: The kernel window movement strides.
:param filter_dilation_strides: The filters dilation strides.
:param padding_below: The number of zero padding elements to add on each axis below 0
coordinate.
:param padding_above: The number of zero padding elements to add on each axis above max
coordinate.
:param data_dilation_strides: The data batch dilation strides.
:param name: The optional new name for output node.
:return: New node performing batched convolution operation.
"""
spatial_dim_count = len(data_batch.shape) - 2
if filter_strides is None:
filter_strides = [1] * spatial_dim_count
if filter_dilation_strides is None:
filter_dilation_strides = [1] * spatial_dim_count
if padding_above is None:
padding_above = [0] * (len(x.shape) - 2)
padding_above = [0] * spatial_dim_count
if padding_below is None:
padding_below = [0] * (len(x.shape) - 2)
padding_below = [0] * spatial_dim_count
if data_dilation_strides is None:
data_dilation_strides = [1] * spatial_dim_count
return Convolution(x, weights, Strides(strides), Strides(dilation),
CoordinateDiff(padding_above), CoordinateDiff(padding_below))
return Convolution(data_batch, filter_weights, Strides(filter_strides),
Strides(filter_dilation_strides), CoordinateDiff(padding_below),
CoordinateDiff(padding_above), Strides(data_dilation_strides))
@nameable_op
def avg_pool(x, # type: Node
def avg_pool(data_batch, # type: Node
window_shape, # type: TensorShape
strides=None, # type: List[int]
padding_above=None, # type: List[int]
padding_below=None, # type: List[int]
zero_pad=True, # type: bool
window_strides=None, # type: List[int]
padding_below=None, # type: TensorShape
padding_above=None, # type: TensorShape
include_padding=False, # type: bool
name=None, # type: str
):
# type: (...) -> Node
"""Return average pooling node."""
if strides is None:
strides = [1] * len(window_shape) # Default to as many 1s as spatial dimensions of input.
"""Return average pooling node.
:param data_batch: The input node providing data.
:param window_shape: The pooling window shape.
:param window_strides: The window movement strides.
:param padding_below: The input data optional padding below filled with zeros.
:param padding_above: The input data optional padding below filled with zeros.
:param include_padding: Whether or not to include zero padding in average computations.
:param name: Optional name for the new output node.
:return: New node with AvgPool operation applied on its data.
"""
spatial_dim_count = len(window_shape)
if window_strides is None:
window_strides = [1] * spatial_dim_count
if padding_above is None:
padding_above = [0] * len(window_shape)
padding_above = [0] * spatial_dim_count
if padding_below is None:
padding_below = [0] * len(window_shape)
padding_below = [0] * spatial_dim_count
return AvgPool(x, Shape(window_shape), Strides(strides),
Shape(padding_above), Shape(padding_below), zero_pad)
return AvgPool(data_batch, Shape(window_shape), Strides(window_strides), Shape(padding_below),
Shape(padding_above), include_padding)
@nameable_op
......@@ -413,11 +449,12 @@ def slice(node, lower_bounds, upper_bounds, strides=None, name=None):
@nameable_op
def concat(nodes, axis): # type: (List[Node], int) -> Node
def concat(nodes, axis, name=None): # type: (List[Node], int, str) -> Node
"""Concatenate input nodes into single new node along specified axis.
:param nodes: The nodes we want concatenate into single new node.
:param axis: The axis along which we want to concatenate input nodes.
:param name: The optional new name for output node.
:return: Return new node that is a concatenation of input nodes.
"""
return Concat(NodeVector(nodes), axis)
......
......@@ -93,14 +93,13 @@ class Computation:
result_arr = result_arr.reshape(result_shape)
return result_arr
def serialize(self, indent=0, bin_const_data=False): # type: (int, bool) -> str
def serialize(self, indent=0): # type: (int) -> str
"""Serialize function (compute graph) to a JSON string.
:param indent: set indent of serialized output
:param bin_const_data: constant data should be binary or not
:return: serialized model
"""
return serialize(self.function, indent, bin_const_data)
return serialize(self.function, indent)
@staticmethod
def _get_buffer_size(element_type, element_count): # type: (TensorViewType, int) -> int
......
......@@ -14,32 +14,35 @@
# limitations under the License.
# ******************************************************************************
import logging
from typing import Optional, List
from typing import List
import ngraph as ng
from ngraph.impl import AxisSet, Node
from ngraph.impl import Node, AxisSet
from ngraph.utils.types import TensorShape, get_dtype, make_constant_node, NodeInput
log = logging.getLogger(__file__)
def get_broadcast_axes(left_shape, right_shape, axis):
# type: (TensorShape, TensorShape, Optional[int]) -> AxisSet
def get_broadcast_axes(output_shape, input_shape, axis=None):
# type: (TensorShape, TensorShape, int) -> AxisSet
"""Generate a list of broadcast axes for ngraph++ broadcast.
Informally, a broadcast "adds" axes to the input tensor,
replicating elements from the input tensor as needed to fill the new dimensions.
Function calculate which of the output axes is being so added.
For example, an output shape of `{2,5,6,2,8}` and input shape of `{2,6}` means
that the broadcast axes must be `{1,3,4}`.
Function calculate which of the output axes are added in this way.
:param output_shape: The new shape for the output tensor.
:param input_shape: The shape of input tensor.
:param axis: The axis along which we want to replicate elements.
:return: The indices of added axes.
"""
axes_indexes = list(range(0, len(left_shape)))
axes_indexes = list(range(0, len(output_shape)))
if axis is None:
right_begin = len(left_shape) - len(right_shape)
output_begin = len(output_shape) - len(input_shape)
else:
right_begin = axis
right_axes_indexes = list(range(right_begin, right_begin + len(right_shape)))
output_begin = axis
right_axes_indexes = list(range(output_begin, output_begin + len(input_shape)))
for index in reversed(right_axes_indexes):
del axes_indexes[index]
return AxisSet(set(axes_indexes))
......
/*******************************************************************************
* Copyright 2017-2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
* Copyright 2017-2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
......
......@@ -26,12 +26,10 @@ void regclass_pyngraph_Serializer(py::module m)
{
/*
std::string serialize(std::shared_ptr<ngraph::Function>,
size_t indent = 0,
bool binary_constant_data = false);
size_t indent = 0);
*/
m.def("serialize",
(std::string(*)(std::shared_ptr<ngraph::Function>, size_t, bool)) & ngraph::serialize,
(std::string(*)(std::shared_ptr<ngraph::Function>, size_t)) & ngraph::serialize,
py::arg(),
py::arg("indent") = 0,
py::arg());
py::arg("indent") = 0);
}
......@@ -13,32 +13,3 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
from __future__ import print_function, division
import numpy as np
import pytest
import ngraph as ng
def test_avg_pooling_3d():
manager_name = pytest.config.getoption('backend', default='CPU')
rt = ng.runtime(manager_name=manager_name)
data = np.arange(11, 27, dtype=np.float32)
data = data.reshape((1, 1, 4, 4))
data = np.broadcast_to(data, (1, 1, 4, 4, 4))
param = ng.parameter(data.shape)
avgpool = ng.avg_pool(param,
[2, 2, 2],
[2, 2, 2])
comp = rt.computation(avgpool, param)
result = comp(data)
result_ref = [[[[[13.5, 15.5],
[21.5, 23.5]],
[[13.5, 15.5],
[21.5, 23.5]]]]]
np.testing.assert_allclose(result, result_ref, rtol=0.001)
# ******************************************************************************
# Copyright 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
......@@ -13,26 +13,25 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
import numpy as np
import pytest
import json
import ngraph as ng
from test.ngraph.util import get_runtime, run_op_node
@pytest.mark.parametrize('dtype', [np.float32, np.float64,
np.int8, np.int16, np.int32, np.int64,
np.uint8, np.uint16, np.uint32, np.uint64])
def test_simple_computation_on_ndarrays(dtype):
manager_name = pytest.config.getoption('backend', default='CPU')
runtime = get_runtime()
shape = [2, 2]
parameter_a = ng.parameter(shape, dtype=dtype, name='A')
parameter_b = ng.parameter(shape, dtype=dtype, name='B')
parameter_c = ng.parameter(shape, dtype=dtype, name='C')
model = (parameter_a + parameter_b) * parameter_c
runtime = ng.runtime(manager_name=manager_name)
computation = runtime.computation(model, parameter_a, parameter_b, parameter_c)
value_a = np.array([[1, 2], [3, 4]], dtype=dtype)
......@@ -56,5 +55,79 @@ def test_serialization():
serialized = computation.serialize(2)
serial_json = json.loads(serialized)
assert serial_json[0]["name"] != ''
assert 10 == len(serial_json[0]["ops"])
\ No newline at end of file
assert serial_json[0]['name'] != ''
assert 10 == len(serial_json[0]['ops'])
def test_broadcast():
input_data = np.array([1, 2, 3])
new_shape = [3, 3]
expected = [[1, 2, 3],
[1, 2, 3],
[1, 2, 3]]
result = run_op_node(input_data, ng.broadcast, new_shape)
assert np.allclose(result, expected)
axis = 0
expected = [[1, 1, 1],
[2, 2, 2],
[3, 3, 3]]
result = run_op_node(input_data, ng.broadcast, new_shape, axis)
assert np.allclose(result, expected)
input_data = np.arange(4)
new_shape = [3, 4, 2, 4]
expected = np.broadcast_to(input_data, new_shape)
result = run_op_node(input_data, ng.broadcast, new_shape)
assert np.allclose(result, expected)
@pytest.mark.parametrize('val_type, input_data', [
(bool, np.zeros((2, 2), dtype=int)),
])
def test_convert_to_bool(val_type, input_data):
expected = np.array(input_data, dtype=val_type)
result = run_op_node(input_data, ng.convert, val_type)
assert np.allclose(result, expected)
@pytest.mark.parametrize('val_type, range_start, range_end, in_dtype', [
(np.float32, -8, 8, np.int32),
(np.float64, -16383, 16383, np.int64),
])
def test_convert_to_float(val_type, range_start, range_end, in_dtype):
np.random.seed(133391)
input_data = np.random.randint(range_start, range_end, size=(2, 2), dtype=in_dtype)
expected = np.array(input_data, dtype=val_type)
result = run_op_node(input_data, ng.convert, val_type)
assert np.allclose(result, expected)
@pytest.mark.parametrize('val_type', [
np.int8,
np.int16,
np.int32,
np.int64,
])
def test_convert_to_int(val_type):
np.random.seed(133391)
input_data = np.ceil(-8 + np.random.rand(2, 3, 4) * 16)
expected = np.array(input_data, dtype=val_type)
result = run_op_node(input_data, ng.convert, val_type)
assert np.allclose(result, expected)
@pytest.mark.parametrize('val_type', [
np.uint8,
np.uint16,
np.uint32,
np.uint64,
])
def test_convert_to_uint(val_type):
np.random.seed(133391)
input_data = np.ceil(np.random.rand(2, 3, 4) * 16)
expected = np.array(input_data, dtype=val_type)
result = run_op_node(input_data, ng.convert, val_type)
assert np.allclose(result, expected)
......@@ -15,14 +15,13 @@
# ******************************************************************************
import numpy as np
import pytest
import ngraph as ng
from test.ngraph.util import get_runtime
def test_default_arguments_convolution_2d():
manager_name = pytest.config.getoption('backend', default='CPU')
runtime = ng.runtime(manager_name=manager_name)
def test_convolution_2d():
runtime = get_runtime()
# input_x should have shape N(batch) x C x H x W
input_x = ng.constant(np.array([
[0., 0., 5., 5., 0., 0., 0., 0., 0.],
......@@ -46,48 +45,49 @@ def test_default_arguments_convolution_2d():
padding_above=[1, 1], padding_below=[1, 1]))
result = model()
assert np.array_equal(result,
np.array([[[[0., -15., -15., 15., 15., 0., 0., 0., 0.],
[0., -20., -20., 20., 20., 0., 0., 0., 0.],
[0., -20., -20., 20., 20., 0., 0., 0., 0.],
[0., -20., -20., 20., 20., 0., 0., 0., 0.],
[0., -20., -20., 20., 20., 0., 0., 0., 0.],
[0., -20., -20., 20., 20., 0., 0., 0., 0.],
[0., -20., -20., 20., 20., 0., 0., 0., 0.],
[0., -20., -20., 20., 20., 0., 0., 0., 0.],
[0., -15., -15., 15., 15., 0., 0., 0., 0.]]]],
dtype=np.float32))
assert np.allclose(result,
np.array([[[[0., -15., -15., 15., 15., 0., 0., 0., 0.],
[0., -20., -20., 20., 20., 0., 0., 0., 0.],
[0., -20., -20., 20., 20., 0., 0., 0., 0.],
[0., -20., -20., 20., 20., 0., 0., 0., 0.],
[0., -20., -20., 20., 20., 0., 0., 0., 0.],
[0., -20., -20., 20., 20., 0., 0., 0., 0.],
[0., -20., -20., 20., 20., 0., 0., 0., 0.],
[0., -20., -20., 20., 20., 0., 0., 0., 0.],
[0., -15., -15., 15., 15., 0., 0., 0., 0.]]]],
dtype=np.float32))
# convolution with padding=0 should produce 7 x 7 output:
model = runtime.computation(ng.convolution(input_x, input_filter))
result = model()
assert np.array_equal(result,
np.array([[[[-20, -20, 20, 20, 0, 0, 0],
[-20, -20, 20, 20, 0, 0, 0],
[-20, -20, 20, 20, 0, 0, 0],
[-20, -20, 20, 20, 0, 0, 0],
[-20, -20, 20, 20, 0, 0, 0],
[-20, -20, 20, 20, 0, 0, 0],
[-20, -20, 20, 20, 0, 0, 0]]]],
dtype=np.float32))
assert np.allclose(result,
np.array([[[[-20, -20, 20, 20, 0, 0, 0],
[-20, -20, 20, 20, 0, 0, 0],
[-20, -20, 20, 20, 0, 0, 0],
[-20, -20, 20, 20, 0, 0, 0],
[-20, -20, 20, 20, 0, 0, 0],
[-20, -20, 20, 20, 0, 0, 0],
[-20, -20, 20, 20, 0, 0, 0]]]],
dtype=np.float32))
# convolution with strides=2 should produce 4 x 4 output:
model = runtime.computation(ng.convolution(input_x, input_filter, strides=[2, 2]))
model = runtime.computation(ng.convolution(input_x, input_filter, filter_strides=[2, 2]))
result = model()
assert np.array_equal(result,
np.array([[[[-20., 20., 0., 0.],
[-20., 20., 0., 0.],
[-20., 20., 0., 0.],
[-20., 20., 0., 0.]]]],
dtype=np.float32))
assert np.allclose(result,
np.array([[[[-20., 20., 0., 0.],
[-20., 20., 0., 0.],
[-20., 20., 0., 0.],
[-20., 20., 0., 0.]]]],
dtype=np.float32))
# convolution with dilation=2 should produce 5 x 5 output:
model = runtime.computation(ng.convolution(input_x, input_filter, dilation=(2, 2)))
model = runtime.computation(ng.convolution(input_x, input_filter,
filter_dilation_strides=(2, 2)))
result = model()
assert np.array_equal(result,
np.array([[[[0, 0, 20, 20, 0],
[0, 0, 20, 20, 0],
[0, 0, 20, 20, 0],
[0, 0, 20, 20, 0],
[0, 0, 20, 20, 0]]]],
dtype=np.float32))
assert np.allclose(result,
np.array([[[[0, 0, 20, 20, 0],
[0, 0, 20, 20, 0],
[0, 0, 20, 20, 0],
[0, 0, 20, 20, 0],
[0, 0, 20, 20, 0]]]],
dtype=np.float32))
......@@ -19,6 +19,7 @@ import numpy as np
import pytest
import ngraph as ng
from test.ngraph.util import get_runtime
@pytest.mark.parametrize('ng_api_helper,numpy_function', [
......@@ -36,8 +37,7 @@ import ngraph as ng
(ng.less_eq, np.less_equal),
])
def test_binary_op(ng_api_helper, numpy_function):
manager_name = pytest.config.getoption('backend', default='CPU')
runtime = ng.runtime(manager_name=manager_name)
runtime = get_runtime()
shape = [2, 2]
parameter_a = ng.parameter(shape, name='A', dtype=np.float32)
......@@ -69,8 +69,7 @@ def test_binary_op(ng_api_helper, numpy_function):
(ng.less_eq, np.less_equal),
])
def test_binary_op_with_scalar(ng_api_helper, numpy_function):
manager_name = pytest.config.getoption('backend', default='CPU')
runtime = ng.runtime(manager_name=manager_name)
runtime = get_runtime()
value_a = np.array([[1, 2], [3, 4]], dtype=np.float32)
value_b = np.array([[5, 6], [7, 8]], dtype=np.float32)
......@@ -99,8 +98,7 @@ def test_binary_op_with_scalar(ng_api_helper, numpy_function):
(operator.le, np.less_equal),
])
def test_binary_operators(operator, numpy_function):
manager_name = pytest.config.getoption('backend', default='CPU')
runtime = ng.runtime(manager_name=manager_name)
runtime = get_runtime()
value_a = np.array([[1, 2], [3, 4]], dtype=np.float32)
value_b = np.array([[4, 5], [1, 7]], dtype=np.float32)
......@@ -129,8 +127,7 @@ def test_binary_operators(operator, numpy_function):
(operator.le, np.less_equal),
])
def test_binary_operators_with_scalar(operator, numpy_function):
manager_name = pytest.config.getoption('backend', default='CPU')
runtime = ng.runtime(manager_name=manager_name)
runtime = get_runtime()
value_a = np.array([[1, 2], [3, 4]], dtype=np.float32)
value_b = np.array([[5, 6], [7, 8]], dtype=np.float32)
......
# ******************************************************************************
# Copyright 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
import numpy as np
import pytest
import ngraph as ng
from test.ngraph.util import get_runtime, run_op_numeric_data
def test_concat():
a = np.array([[1, 2], [3, 4]])
b = np.array([[5, 6]])
axis = 0
expected = np.concatenate((a, b), axis=0)
runtime = get_runtime()
parameter_a = ng.parameter(list(a.shape), name='A', dtype=np.float32)
parameter_b = ng.parameter(list(b.shape), name='B', dtype=np.float32)
node = ng.concat([parameter_a, parameter_b], axis)
computation = runtime.computation(node, parameter_a, parameter_b)
result = computation(a, b)
assert np.allclose(result, expected)
@pytest.mark.parametrize('val_type, value', [
(bool, False),
(bool, np.empty((2, 2), dtype=bool)),
])
def test_constant_from_bool(val_type, value):
expected = np.array(value, dtype=val_type)
result = run_op_numeric_data(value, ng.constant, val_type)
assert np.allclose(result, expected)
@pytest.mark.parametrize('val_type, value', [
(np.float32, np.float32(0.1234)),
(np.float64, np.float64(0.1234)),
(np.int8, np.int8(-63)),
(np.int16, np.int16(-12345)),
(np.int32, np.int32(-123456)),
(np.int64, np.int64(-1234567)),
(np.uint8, np.uint8(63)),
(np.uint16, np.uint16(12345)),
(np.uint32, np.uint32(123456)),
(np.uint64, np.uint64(1234567)),
])
def test_constant_from_scalar(val_type, value):
expected = np.array(value, dtype=val_type)
result = run_op_numeric_data(value, ng.constant, val_type)
assert np.allclose(result, expected)
@pytest.mark.parametrize('val_type', [
np.float32,
np.float64,
])
def test_constant_from_float_array(val_type):
np.random.seed(133391)
input_data = np.array(-1 + np.random.rand(2, 3, 4) * 2, dtype=val_type)
result = run_op_numeric_data(input_data, ng.constant, val_type)
assert np.allclose(result, input_data)
@pytest.mark.parametrize('val_type, range_start, range_end', [
(np.int8, -8, 8),
(np.int16, -64, 64),
(np.int32, -1024, 1024),
(np.int64, -16383, 16383),
(np.uint8, 0, 8),
(np.uint16, 0, 64),
(np.uint32, 0, 1024),
(np.uint64, 0, 16383),
])
def test_constant_from_integer_array(val_type, range_start, range_end):
np.random.seed(133391)
input_data = np.array(np.random.randint(range_start, range_end, size=(2, 2)), dtype=val_type)
result = run_op_numeric_data(input_data, ng.constant, val_type)
assert np.allclose(result, input_data)
......@@ -17,43 +17,44 @@ import numpy as np
import pytest
import ngraph as ng
from test.ngraph.util import run_op_numeric_data, run_op_node
def _get_runtime():
manager_name = pytest.config.getoption('backend', default='CPU')
return ng.runtime(manager_name=manager_name)
def _run_unary_op_node(input_data, unary_op):
runtime = _get_runtime()
parameter_a = ng.parameter(input_data.shape, name='A', dtype=np.float32)
node = unary_op(parameter_a)
computation = runtime.computation(node, parameter_a)
return computation(input_data)
@pytest.mark.parametrize('ng_api_fn, numpy_fn, range_start, range_end', [
(ng.absolute, np.abs, -1, 1),
(ng.abs, np.abs, -1, 1),
(ng.acos, np.arccos, -1, 1),
(ng.asin, np.arcsin, -1, 1),
(ng.atan, np.arctan, -100, 100),
(ng.ceiling, np.ceil, -100, 100),
(ng.ceil, np.ceil, -100, 100),
])
def test_unary_op_array(ng_api_fn, numpy_fn, range_start, range_end):
np.random.seed(133391)
input_data = range_start + np.random.rand(2, 3, 4) * (range_end - range_start)
expected = numpy_fn(input_data)
result = run_op_node(input_data, ng_api_fn)
assert np.allclose(result, expected)
def _run_unary_op_numeric_data(input_data, unary_op):
runtime = _get_runtime()
node = unary_op(input_data)
computation = runtime.computation(node)
return computation()
result = run_op_numeric_data(input_data, ng_api_fn)
assert np.allclose(result, expected)
@pytest.mark.parametrize('ng_api_fn, numpy_fn, input_data', [
(ng.absolute, np.abs, -1 + np.random.rand(2, 3, 4) * 2),
(ng.absolute, np.abs, np.float32(-3)),
(ng.acos, np.arccos, -1 + np.random.rand(2, 3, 4) * 2),
(ng.abs, np.abs, np.float32(-3)),
(ng.acos, np.arccos, np.float32(-0.5)),
(ng.asin, np.arcsin, -1 + np.random.rand(2, 3, 4) * 2),
(ng.asin, np.arcsin, np.float32(-0.5)),
(ng.atan, np.arctan, -100 + np.random.rand(2, 3, 4) * 200),
(ng.atan, np.arctan, np.float32(-0.5)),
(ng.ceiling, np.ceil, np.float32(1.5)),
(ng.ceil, np.ceil, np.float32(1.5)),
])
def test_unary_op(ng_api_fn, numpy_fn, input_data):
def test_unary_op_scalar(ng_api_fn, numpy_fn, input_data):
expected = numpy_fn(input_data)
result = _run_unary_op_node(input_data, ng_api_fn)
result = run_op_node(input_data, ng_api_fn)
assert np.allclose(result, expected)
result = _run_unary_op_numeric_data(input_data, ng_api_fn)
result = run_op_numeric_data(input_data, ng_api_fn)
assert np.allclose(result, expected)
# ******************************************************************************
# Copyright 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
import numpy as np
import pytest
import ngraph as ng
from test.ngraph.util import get_runtime
@pytest.fixture
def _ndarray_1x1x4x4():
return np.arange(11, 27, dtype=np.float32).reshape(1, 1, 4, 4)
def test_avg_pool_2d(_ndarray_1x1x4x4):
runtime = get_runtime()
input_data = _ndarray_1x1x4x4
param = ng.parameter(input_data.shape, name='A', dtype=np.float32)
window_shape = [2, 2]
strides = [2, 2]
expected = [[[[13.5, 15.5],
[21.5, 23.5]]]]
avg_pool_node = ng.avg_pool(param, window_shape, strides)
computation = runtime.computation(avg_pool_node, param)
result = computation(input_data)
assert np.allclose(result, expected)
expected = [[[[13.5, 14.5, 15.5],
[17.5, 18.5, 19.5],
[21.5, 22.5, 23.5]]]]
avg_pool_node = ng.avg_pool(param, window_shape)
computation = runtime.computation(avg_pool_node, param)
result = computation(input_data)
assert np.allclose(result, expected)
padding_below = [1, 1]
padding_above = [1, 1]
strides = [2, 2]
include_pad = False
expected = [[[[11.0, 12.5, 14.0],
[17.0, 18.5, 20.0],
[23.0, 24.5, 26.0]]]]
avg_pool_node = ng.avg_pool(param, window_shape, strides, padding_below, padding_above,
include_pad)
computation = runtime.computation(avg_pool_node, param)
result = computation(input_data)
assert np.allclose(result, expected)
include_pad = True
expected = [[[[2.75, 6.25, 3.5],
[8.5, 18.5, 10.0],
[5.75, 12.25, 6.5]]]]
avg_pool_node = ng.avg_pool(param, window_shape, strides, padding_below, padding_above,
include_pad)
computation = runtime.computation(avg_pool_node, param)
result = computation(input_data)
assert np.allclose(result, expected)
def test_avg_pooling_3d(_ndarray_1x1x4x4):
rt = get_runtime()
data = _ndarray_1x1x4x4
data = np.broadcast_to(data, (1, 1, 4, 4, 4))
param = ng.parameter(list(data.shape))
window_shape = [2, 2, 2]
strides = [2, 2, 2]
avgpool = ng.avg_pool(param, window_shape, strides)
comp = rt.computation(avgpool, param)
result = comp(data)
result_ref = [[[[[13.5, 15.5],
[21.5, 23.5]],
[[13.5, 15.5],
[21.5, 23.5]]]]]
assert np.allclose(result, result_ref)
......@@ -17,6 +17,7 @@ import numpy as np
import pytest
import ngraph as ng
from test.ngraph.util import run_op_node
@pytest.mark.parametrize('ng_api_helper, numpy_function, reduction_axes', [
......@@ -34,17 +35,10 @@ import ngraph as ng
(ng.prod, np.prod, (0, 2)),
])
def test_reduction_ops(ng_api_helper, numpy_function, reduction_axes):
manager_name = pytest.config.getoption('backend', default='CPU')
runtime = ng.runtime(manager_name=manager_name)
shape = [2, 4, 3, 2]
parameter_a = ng.parameter(shape, name='A', dtype=np.float32)
model = ng_api_helper(parameter_a, reduction_axes)
computation = runtime.computation(model, parameter_a)
value_a = np.random.randn(*shape).astype(np.float32)
np.random.seed(133391)
input_data = np.random.randn(*shape).astype(np.float32)
result = computation(value_a)
expected = numpy_function(value_a, axis=reduction_axes)
expected = numpy_function(input_data, axis=reduction_axes)
result = run_op_node(input_data, ng_api_helper, reduction_axes)
assert np.allclose(result, expected)
# ******************************************************************************
# Copyright 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
import numpy as np
import pytest
import ngraph as ng
def get_runtime():
"""Return runtime object."""
manager_name = pytest.config.getoption('backend', default='CPU')
return ng.runtime(manager_name=manager_name)
def run_op_node(input_data, op_fun, *args):
"""Run computation on node performing `op_fun`.
`op_fun` have to needs to accept a node as an argument.
:param input_data: The input data for performed computation.
:param op_fun: The function handler for operation we want to carry out.
:param args: The arguments passed to operation we want to carry out.
:return: The result from computations.
"""
runtime = get_runtime()
parameter_a = ng.parameter(input_data.shape, name='A', dtype=np.float32)
node = op_fun(parameter_a, *args)
computation = runtime.computation(node, parameter_a)
return computation(input_data)
def run_op_numeric_data(input_data, op_fun, *args):
"""Run computation on node performing `op_fun`.
`op_fun` have to accept a scalar or an array.
:param input_data: The input data for performed computation.
:param op_fun: The function handler for operation we want to carry out.
:param args: The arguments passed to operation we want to carry out.
:return: The result from computations.
"""
runtime = get_runtime()
node = op_fun(input_data, *args)
computation = runtime.computation(node)
return computation()
......@@ -14,6 +14,8 @@
# limitations under the License.
# ******************************************************************************
# flake8: noqa
from __future__ import absolute_import
import pytest
import numpy as np
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment