Commit 86fd6a53 authored by arogowie-intel's avatar arogowie-intel Committed by Scott Cyphers

[Py] Python wrappers for nGraph operations. (#793)

* Add python wrappers for nGraph Cos, Cosh operations.

- Update docstrings.

* Enable auxiliary function running computation to accept multiple input nodes.

* Python wrapper for nGraph Dot function with UT.

* Update python wrappers for nGraph Exp and Equal operations.

- Update docstrings.
- Add UT for exp.

* Update python wrappers for nGraph Floor, Greater, GreaterEq, Less, LessEq operations.

- Update docstrings.
- Add UT for ng.floor.

* Update python wrapper for nGraph Log operation.

- Update docstring.
- Add UT.
parent 1a7ab108
......@@ -29,6 +29,8 @@ from ngraph.ops import concat
from ngraph.ops import constant
from ngraph.ops import convert
from ngraph.ops import convolution
from ngraph.ops import cos
from ngraph.ops import cosh
from ngraph.ops import divide
from ngraph.ops import dot
from ngraph.ops import equal
......
......@@ -21,9 +21,9 @@ from ngraph.impl import AxisSet, AxisVector, Coordinate, CoordinateDiff, Node, N
Shape, Strides
from ngraph.impl.op import Abs, Acos, Add, Asin, Atan, AvgPool, Broadcast, Ceiling, Concat, \
Constant, Convert, Convolution, Divide, Dot, Equal, Exp, Floor, Greater, GreaterEq, Less, \
LessEq, Log, Max, Maximum, MaxPool, Min, Minimum, Multiply, Negative, Not, NotEqual, Parameter,\
Product, Reshape, Slice, Softmax, Sqrt, Subtract, Sum, Tanh
Constant, Convert, Convolution, Cos, Cosh, Divide, Dot, Equal, Exp, Floor, Greater, GreaterEq, \
Less, LessEq, Log, Max, Maximum, MaxPool, Min, Minimum, Multiply, Negative, Not, NotEqual, \
Parameter, Product, Reshape, Slice, Softmax, Sqrt, Subtract, Sum, Tanh
from typing import Iterable, List
......@@ -47,7 +47,13 @@ def parameter(shape, dtype=np.float32, name=None):
@nameable_op
def constant(value, dtype=None, name=None): # type: (NumericData, NumericType, str) -> Constant
"""Return an ngraph Constant object with the specified value."""
"""Create a Constant node from provided value.
:param value: One of: array of values or scalar to initialize node with.
:param dtype: The data type of provided data.
:param name: Optional name for output node.
:return: The Constant node initialized with provided data.
"""
return make_constant_node(value, dtype)
......@@ -96,6 +102,28 @@ def atan(node, name=None): # type: (NodeInput, str) -> Node
return Atan(node)
@unary_op
def cos(node, name=None): # type: (NodeInput, str) -> Node
"""Apply cosine function on the input node element-wise.
:param node: One of: input node, array or scalar.
:param name: Optional new name for output node.
:return: New node with cos operation applied on it.
"""
return Cos(node)
@unary_op
def cosh(node, name=None): # type: (NodeInput, str) -> Node
"""Apply hyperbolic cosine function on the input node element-wise.
:param node: One of: input node, array or scalar.
:param name: Optional new name for output node.
:return: New node with cosh operation applied on it.
"""
return Cosh(node)
@unary_op
def sqrt(node, name=None): # type: (NodeInput, str) -> Node
"""Return node which applies square root to the input node elementwise."""
......@@ -104,13 +132,23 @@ def sqrt(node, name=None): # type: (NodeInput, str) -> Node
@unary_op
def exp(node, name=None): # type: (NodeInput, str) -> Node
"""Return node which applies exp to the input node elementwise."""
"""Return node which applies exp to the input node element-wise.
:param node: The node providing data for operation.
:param name: The optional name for new output node.
:return: The new node performing natural exponential operation.
"""
return Exp(node)
@unary_op
def log(node, name=None): # type: (NodeInput, str) -> Node
"""Return node which applies natural logarithm to the input node elementwise."""
"""Return node which applies natural logarithm to the input node element-wise.
:param node: The input node providing data for operation.
:param name: The optional new name for output node.
:return: The new node performing log operation element-wise.
"""
return Log(node)
......@@ -122,13 +160,23 @@ def negative(node, name=None): # type: (NodeInput, str) -> Node
@unary_op
def floor(node, name=None): # type: (NodeInput, str) -> Node
"""Return node which applies floor to the input node elementwise."""
"""Return node which applies floor to the input node element-wise.
:param node: The input node providing data.
:param name: The optional name for new output node.
:return: The node performing element-wise floor operation.
"""
return Floor(node)
@unary_op
def ceiling(node, name=None): # type: (NodeInput, str) -> Node
"""Return node which applies ceiling to the input node elementwise."""
"""Return node which applies ceiling to the input node element-wise.
:param node: The node providing data to ceiling operation.
:param name: Optional name for output node.
:return: The node performing element-wise ceiling.
"""
return Ceiling(node)
......@@ -147,7 +195,13 @@ def reshape(node, input_order, output_shape, name=None):
# Binary ops
@binary_op
def divide(left_node, right_node, name=None): # type: (NodeInput, NodeInput, str) -> Node
"""Return node which applies f(x) = A/B to the input nodes elementwise."""
"""Return node which applies f(x) = A/B to the input nodes element-wise.
:param left_node: The node providing dividend data.
:param right_node: The node providing divisor data.
:param name: Optional name for output node.
:return: The node performing element-wise division.
"""
return Divide(left_node, right_node)
......@@ -184,7 +238,13 @@ def maximum(left_node, right_node, name=None): # type: (NodeInput, NodeInput, s
# Logical ops
@binary_op
def equal(left_node, right_node, name=None): # type: (NodeInput, NodeInput, str) -> Node
"""Return node which checks if input nodes are equal elementwise."""
"""Return node which checks if input nodes are equal element-wise.
:param left_node: The first input node for equal operation.
:param right_node: The second input node for equal operation.
:param name: The optional name for output new node.
:return: The node performing element-wise equality check.
"""
return Equal(left_node, right_node)
......@@ -196,25 +256,51 @@ def not_equal(left_node, right_node, name=None): # type: (NodeInput, NodeInput,
@binary_op
def greater(left_node, right_node, name=None): # type: (NodeInput, NodeInput, str) -> Node
"""Return node which checks if left input node is greater than the right node elementwise."""
"""Return node which checks if left input node is greater than the right node element-wise.
:param left_node: The first input node providing data.
:param right_node: The second input node providing data.
:param name: The optional new name for output node.
:return: The node performing element-wise check whether left_node is greater than right_node.
"""
return Greater(left_node, right_node)
@binary_op
def greater_eq(left_node, right_node, name=None): # type: (NodeInput, NodeInput, str) -> Node
"""Return node which checks if left node is greater or equal to the right node elementwise."""
"""Return node which checks if left node is greater or equal to the right node element-wise.
:param left_node: The first input node providing data.
:param right_node: The second input node providing data.
:param name: The optional new name for output node.
:return: The node performing element-wise check whether left_node is greater than or equal
right_node.
"""
return GreaterEq(left_node, right_node)
@binary_op
def less(left_node, right_node, name=None): # type: (NodeInput, NodeInput, str) -> Node
"""Return node which checks if left input node is less than the right node elementwise."""
"""Return node which checks if left input node is less than the right node element-wise.
:param left_node: The first input node providing data.
:param right_node: The second input node providing data.
:param name: The optional new name for output node.
:return: The node performing element-wise check whether left_node is less than the right_node.
"""
return Less(left_node, right_node)
@binary_op
def less_eq(left_node, right_node, name=None): # type: (NodeInput, NodeInput, str) -> Node
"""Return node which checks if left node is less or equal to the right node elementwise."""
"""Return node which checks if left input node is less or equal the right node element-wise.
:param left_node: The first input node providing data.
:param right_node: The second input node providing data.
:param name: The optional new name for output node.
:return: The node performing element-wise check whether left_node is less than or equal the
right_node.
"""
return LessEq(left_node, right_node)
......@@ -273,10 +359,23 @@ def tanh(node, name=None): # type: (Node, str) -> Node
# matmul ops
@nameable_op
def dot(left_node, right_node, name=None):
# type: (Node, Node, str) -> Node
"""Return node which performs matrix multiplication of two input nodes."""
def dot(left_node, right_node, reduction_axes_count=None, name=None):
# type: (Node, Node, int, str) -> Node
"""Return node which performs generalized dot product of two input nodes.
This operation is capable of performing scalar-tensor, matrix-vector product and matrix
multiplication.
:param left_node: The node providing left hand side data.
:param right_node: The node providing right hand side data.
:param reduction_axes_count: The number of axes to reduce during dot-product.
:param name: The optional name for output node.
:return: The new node performing dot-product on input two nodes.
"""
if reduction_axes_count is None:
return Dot(left_node, right_node)
else:
return Dot(left_node, right_node, reduction_axes_count)
# convpool ops
......@@ -395,7 +494,7 @@ def max(node, reduction_axes=None, name=None):
:param node: The tensor we want to max-reduce.
:param reduction_axes: The axes to eliminate through max operation.
:param name: Optional name for input node.
:param name: Optional name for output node.
"""
reduction_axes = get_reduction_axes(node, reduction_axes)
return Max(node, AxisSet(reduction_axes))
......@@ -408,7 +507,7 @@ def min(node, reduction_axes=None, name=None):
:param node: The tensor we want to max-reduce.
:param reduction_axes: The axes to eliminate through min operation.
:param name: Optional name for input node.
:param name: Optional name for output node.
"""
reduction_axes = get_reduction_axes(node, reduction_axes)
return Min(node, AxisSet(reduction_axes))
......@@ -421,7 +520,7 @@ def prod(node, reduction_axes=None, name=None):
:param node: The tensor we want to product-reduce.
:param reduction_axes: The axes to eliminate through product operation.
:param name: Optional name for input node.
:param name: Optional name for output node.
"""
reduction_axes = get_reduction_axes(node, reduction_axes)
return Product(node, AxisSet(reduction_axes))
......
......@@ -58,15 +58,13 @@ def test_serialization():
assert serial_json[0]['name'] != ''
assert 10 == len(serial_json[0]['ops'])
def test_broadcast():
input_data = np.array([1, 2, 3])
new_shape = [3, 3]
expected = [[1, 2, 3],
[1, 2, 3],
[1, 2, 3]]
result = run_op_node(input_data, ng.broadcast, new_shape)
result = run_op_node([input_data], ng.broadcast, new_shape)
assert np.allclose(result, expected)
axis = 0
......@@ -74,13 +72,13 @@ def test_broadcast():
[2, 2, 2],
[3, 3, 3]]
result = run_op_node(input_data, ng.broadcast, new_shape, axis)
result = run_op_node([input_data], ng.broadcast, new_shape, axis)
assert np.allclose(result, expected)
input_data = np.arange(4)
new_shape = [3, 4, 2, 4]
expected = np.broadcast_to(input_data, new_shape)
result = run_op_node(input_data, ng.broadcast, new_shape)
result = run_op_node([input_data], ng.broadcast, new_shape)
assert np.allclose(result, expected)
......@@ -89,7 +87,7 @@ def test_broadcast():
])
def test_convert_to_bool(val_type, input_data):
expected = np.array(input_data, dtype=val_type)
result = run_op_node(input_data, ng.convert, val_type)
result = run_op_node([input_data], ng.convert, val_type)
assert np.allclose(result, expected)
......@@ -101,7 +99,7 @@ def test_convert_to_float(val_type, range_start, range_end, in_dtype):
np.random.seed(133391)
input_data = np.random.randint(range_start, range_end, size=(2, 2), dtype=in_dtype)
expected = np.array(input_data, dtype=val_type)
result = run_op_node(input_data, ng.convert, val_type)
result = run_op_node([input_data], ng.convert, val_type)
assert np.allclose(result, expected)
......@@ -115,7 +113,7 @@ def test_convert_to_int(val_type):
np.random.seed(133391)
input_data = np.ceil(-8 + np.random.rand(2, 3, 4) * 16)
expected = np.array(input_data, dtype=val_type)
result = run_op_node(input_data, ng.convert, val_type)
result = run_op_node([input_data], ng.convert, val_type)
assert np.allclose(result, expected)
......@@ -129,5 +127,5 @@ def test_convert_to_uint(val_type):
np.random.seed(133391)
input_data = np.ceil(np.random.rand(2, 3, 4) * 16)
expected = np.array(input_data, dtype=val_type)
result = run_op_node(input_data, ng.convert, val_type)
result = run_op_node([input_data], ng.convert, val_type)
assert np.allclose(result, expected)
# ******************************************************************************
# Copyright 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
import numpy as np
import pytest
import ngraph as ng
from test.ngraph.util import run_op_node
@pytest.mark.parametrize('left_shape, right_shape, reduction_axes_count, numpy_axes', [
# matrix, vector
([2, 4], [4], None, 1),
([4], [4, 2], None, 1),
# matrix, matrix
([2, 4], [4, 2], None, 1),
# result is a scalar
([2, 4], [2, 4], 2, 2),
# tensor, vector
([2, 4, 5], [5], None, 1),
([5], [5, 4, 2], None, 1),
# tensor, matrix
([2, 4, 5], [5, 4], None, 1),
([5, 4], [4, 5, 2], None, 1),
# tensor, tensor
([2, 3, 4, 5], [5, 2, 3], None, 1),
([2, 3, 4, 5], [4, 5, 2, 4], 2, 2),
])
def test_dot(left_shape, right_shape, reduction_axes_count, numpy_axes):
np.random.seed(133391)
left_input = -100.0 + np.random.rand(*left_shape) * 200.0
right_input = -100.0 + np.random.rand(*right_shape) * 200.0
expected = np.tensordot(left_input, right_input, numpy_axes)
result = run_op_node([left_input, right_input], ng.dot, reduction_axes_count)
assert np.allclose(result, expected)
def test_dot_tensor_scalar():
np.random.seed(133391)
left_input = 10.0
right_input = -100.0 + np.random.rand(2, 3, 4) * 200.0
expected = left_input * right_input
result = run_op_node([left_input, right_input], ng.dot)
assert np.allclose(result, expected)
result = run_op_node([right_input, left_input], ng.dot)
assert np.allclose(result, expected)
......@@ -28,13 +28,18 @@ from test.ngraph.util import run_op_numeric_data, run_op_node
(ng.atan, np.arctan, -100, 100),
(ng.ceiling, np.ceil, -100, 100),
(ng.ceil, np.ceil, -100, 100),
(ng.cos, np.cos, -np.pi, np.pi),
(ng.cosh, np.cosh, -np.pi, np.pi),
(ng.exp, np.exp, -100, 100),
(ng.floor, np.floor, -100, 100),
(ng.log, np.log, 0, 100),
])
def test_unary_op_array(ng_api_fn, numpy_fn, range_start, range_end):
np.random.seed(133391)
input_data = range_start + np.random.rand(2, 3, 4) * (range_end - range_start)
expected = numpy_fn(input_data)
result = run_op_node(input_data, ng_api_fn)
result = run_op_node([input_data], ng_api_fn)
assert np.allclose(result, expected)
result = run_op_numeric_data(input_data, ng_api_fn)
......@@ -49,11 +54,16 @@ def test_unary_op_array(ng_api_fn, numpy_fn, range_start, range_end):
(ng.atan, np.arctan, np.float32(-0.5)),
(ng.ceiling, np.ceil, np.float32(1.5)),
(ng.ceil, np.ceil, np.float32(1.5)),
(ng.cos, np.cos, np.float32(np.pi / 4.0)),
(ng.cosh, np.cosh, np.float32(np.pi / 4.0)),
(ng.exp, np.exp, np.float32(1.5)),
(ng.floor, np.floor, np.float32(1.5)),
(ng.log, np.log, np.float32(1.5)),
])
def test_unary_op_scalar(ng_api_fn, numpy_fn, input_data):
expected = numpy_fn(input_data)
result = run_op_node(input_data, ng_api_fn)
result = run_op_node([input_data], ng_api_fn)
assert np.allclose(result, expected)
result = run_op_numeric_data(input_data, ng_api_fn)
......
......@@ -40,5 +40,5 @@ def test_reduction_ops(ng_api_helper, numpy_function, reduction_axes):
input_data = np.random.randn(*shape).astype(np.float32)
expected = numpy_function(input_data, axis=reduction_axes)
result = run_op_node(input_data, ng_api_helper, reduction_axes)
result = run_op_node([input_data], ng_api_helper, reduction_axes)
assert np.allclose(result, expected)
......@@ -16,9 +16,14 @@
import numpy as np
import pytest
import ngraph as ng
from string import ascii_uppercase
def _get_numpy_dtype(scalar):
return np.array([scalar]).dtype
def get_runtime():
"""Return runtime object."""
......@@ -37,10 +42,21 @@ def run_op_node(input_data, op_fun, *args):
:return: The result from computations.
"""
runtime = get_runtime()
parameter_a = ng.parameter(input_data.shape, name='A', dtype=np.float32)
node = op_fun(parameter_a, *args)
computation = runtime.computation(node, parameter_a)
return computation(input_data)
comp_args = []
op_fun_args = []
comp_inputs = []
for idx, data in enumerate(input_data):
if np.isscalar(data):
op_fun_args.append(ng.constant(data, _get_numpy_dtype(data)))
else:
node = ng.parameter(data.shape, name=ascii_uppercase[idx], dtype=data.dtype)
op_fun_args.append(node)
comp_args.append(node)
comp_inputs.append(data)
op_fun_args.extend(args)
node = op_fun(*op_fun_args)
computation = runtime.computation(node, *comp_args)
return computation(*comp_inputs)
def run_op_numeric_data(input_data, op_fun, *args):
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment