Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in / Register
Toggle navigation
N
ngraph
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
submodule
ngraph
Commits
64da3641
Commit
64da3641
authored
Jul 25, 2019
by
Ewa21
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
[Py] Added gelu operator to Python API.
parent
c1220108
Show whitespace changes
Inline
Side-by-side
Showing
11 changed files
with
135 additions
and
4 deletions
+135
-4
ngraph.ops.rst
doc/sphinx/source/python_api/_autosummary/ngraph.ops.rst
+1
-0
__init__.py
python/ngraph/__init__.py
+1
-0
__init__.py
python/ngraph/impl/op/__init__.py
+1
-0
ops.py
python/ngraph/ops.py
+19
-1
elu.hpp
python/pyngraph/ops/fused/elu.hpp
+23
-0
gelu.cpp
python/pyngraph/ops/fused/gelu.cpp
+30
-0
gelu.hpp
python/pyngraph/ops/fused/gelu.hpp
+23
-0
regmodule_pyngraph_op.cpp
python/pyngraph/ops/regmodule_pyngraph_op.cpp
+1
-0
regmodule_pyngraph_op.hpp
python/pyngraph/ops/regmodule_pyngraph_op.hpp
+2
-1
setup.py
python/setup.py
+2
-1
test_ops_fused.py
python/test/ngraph/test_ops_fused.py
+32
-1
No files found.
doc/sphinx/source/python_api/_autosummary/ngraph.ops.rst
View file @
64da3641
...
@@ -36,6 +36,7 @@ ngraph.ops
...
@@ -36,6 +36,7 @@ ngraph.ops
equal
equal
exp
exp
floor
floor
gelu
get_output_element
get_output_element
greater
greater
greater_eq
greater_eq
...
...
python/ngraph/__init__.py
View file @
64da3641
...
@@ -49,6 +49,7 @@ from ngraph.ops import elu
...
@@ -49,6 +49,7 @@ from ngraph.ops import elu
from
ngraph.ops
import
equal
from
ngraph.ops
import
equal
from
ngraph.ops
import
exp
from
ngraph.ops
import
exp
from
ngraph.ops
import
floor
from
ngraph.ops
import
floor
from
ngraph.ops
import
gelu
from
ngraph.ops
import
get_output_element
from
ngraph.ops
import
get_output_element
from
ngraph.ops
import
greater
from
ngraph.ops
import
greater
from
ngraph.ops
import
greater_eq
from
ngraph.ops
import
greater_eq
...
...
python/ngraph/impl/op/__init__.py
View file @
64da3641
...
@@ -73,6 +73,7 @@ from _pyngraph.op import Elu
...
@@ -73,6 +73,7 @@ from _pyngraph.op import Elu
from
_pyngraph.op
import
Equal
from
_pyngraph.op
import
Equal
from
_pyngraph.op
import
Exp
from
_pyngraph.op
import
Exp
from
_pyngraph.op
import
Floor
from
_pyngraph.op
import
Floor
from
_pyngraph.op
import
Gelu
from
_pyngraph.op
import
GetOutputElement
from
_pyngraph.op
import
GetOutputElement
from
_pyngraph.op
import
Greater
from
_pyngraph.op
import
Greater
from
_pyngraph.op
import
GreaterEq
from
_pyngraph.op
import
GreaterEq
...
...
python/ngraph/ops.py
View file @
64da3641
...
@@ -23,7 +23,7 @@ from ngraph.impl import AxisSet, AxisVector, Coordinate, CoordinateDiff, Functio
...
@@ -23,7 +23,7 @@ from ngraph.impl import AxisSet, AxisVector, Coordinate, CoordinateDiff, Functio
from
ngraph.impl.op
import
Abs
,
Acos
,
Add
,
And
,
Asin
,
ArgMax
,
ArgMin
,
Atan
,
AvgPool
,
\
from
ngraph.impl.op
import
Abs
,
Acos
,
Add
,
And
,
Asin
,
ArgMax
,
ArgMin
,
Atan
,
AvgPool
,
\
BatchNormTraining
,
BatchNormInference
,
Broadcast
,
Ceiling
,
Concat
,
Constant
,
Convert
,
\
BatchNormTraining
,
BatchNormInference
,
Broadcast
,
Ceiling
,
Concat
,
Constant
,
Convert
,
\
Convolution
,
ConvolutionBackpropData
,
Cos
,
Cosh
,
Divide
,
Dot
,
Elu
,
Equal
,
Exp
,
Floor
,
\
Convolution
,
ConvolutionBackpropData
,
Cos
,
Cosh
,
Divide
,
Dot
,
Elu
,
Equal
,
Exp
,
Floor
,
\
GetOutputElement
,
Greater
,
GreaterEq
,
Less
,
LessEq
,
Log
,
LRN
,
Max
,
Maximum
,
MaxPool
,
\
Ge
lu
,
Ge
tOutputElement
,
Greater
,
GreaterEq
,
Less
,
LessEq
,
Log
,
LRN
,
Max
,
Maximum
,
MaxPool
,
\
Min
,
Minimum
,
Multiply
,
Negative
,
Not
,
NotEqual
,
OneHot
,
Or
,
Pad
,
Parameter
,
Product
,
\
Min
,
Minimum
,
Multiply
,
Negative
,
Not
,
NotEqual
,
OneHot
,
Or
,
Pad
,
Parameter
,
Product
,
\
Power
,
Relu
,
ReplaceSlice
,
Reshape
,
Reverse
,
Select
,
Sign
,
Sin
,
Sinh
,
Slice
,
Softmax
,
\
Power
,
Relu
,
ReplaceSlice
,
Reshape
,
Reverse
,
Select
,
Sign
,
Sin
,
Sinh
,
Slice
,
Softmax
,
\
Sqrt
,
Subtract
,
Sum
,
Tan
,
Tanh
,
TopK
Sqrt
,
Subtract
,
Sum
,
Tan
,
Tanh
,
TopK
...
@@ -527,6 +527,24 @@ def convert(node, new_type, name=None): # type: (Node, NumericType, str) -> Nod
...
@@ -527,6 +527,24 @@ def convert(node, new_type, name=None): # type: (Node, NumericType, str) -> Nod
return
Convert
(
node
,
new_element_type
)
return
Convert
(
node
,
new_element_type
)
@nameable_op
def
gelu
(
node
,
name
=
None
):
# type: (NodeInput, str) -> Node
r"""Perform Gaussian Error Linear Unit operation element-wise on data from input node.
Computes Gaussian error linear:
.. math:: f(x) = 0.5\cdot x\cdot(1 + erf( \dfrac{x}{\sqrt{2}})
For more information refer to:
`Gaussian Error Linear Unit (GELU) <https://arxiv.org/pdf/1606.08415.pdf>`_
:param node: Input tensor. One of: input node, array or scalar.
:param name: Optional output node name.
:return: The new node performing a GELU operation on its input data element-wise.
"""
return
Gelu
(
as_node
(
node
))
@nameable_op
@nameable_op
def
select
(
selection_node
,
input_node1
,
input_node2
,
name
=
None
):
def
select
(
selection_node
,
input_node1
,
input_node2
,
name
=
None
):
# type: (Node, Node, Node, str) -> Node
# type: (Node, Node, Node, str) -> Node
...
...
python/pyngraph/ops/fused/elu.hpp
0 → 100644
View file @
64da3641
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include <pybind11/pybind11.h>
namespace
py
=
pybind11
;
void
regclass_pyngraph_op_Elu
(
py
::
module
m
);
python/pyngraph/ops/fused/gelu.cpp
0 → 100644
View file @
64da3641
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
#include "ngraph/op/fused/gelu.hpp"
#include "pyngraph/ops/fused/gelu.hpp"
namespace
py
=
pybind11
;
void
regclass_pyngraph_op_Gelu
(
py
::
module
m
)
{
py
::
class_
<
ngraph
::
op
::
Gelu
,
std
::
shared_ptr
<
ngraph
::
op
::
Gelu
>
,
ngraph
::
op
::
Op
>
gelu
(
m
,
"Gelu"
);
gelu
.
doc
()
=
"ngraph.impl.op.Elu wraps ngraph::op::Gelu"
;
gelu
.
def
(
py
::
init
<
const
std
::
shared_ptr
<
ngraph
::
Node
>&>
());
}
python/pyngraph/ops/fused/gelu.hpp
0 → 100644
View file @
64da3641
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include <pybind11/pybind11.h>
namespace
py
=
pybind11
;
void
regclass_pyngraph_op_Gelu
(
py
::
module
m
);
python/pyngraph/ops/regmodule_pyngraph_op.cpp
View file @
64da3641
...
@@ -53,6 +53,7 @@ void regmodule_pyngraph_op(py::module m_op)
...
@@ -53,6 +53,7 @@ void regmodule_pyngraph_op(py::module m_op)
regclass_pyngraph_op_Equal
(
m_op
);
regclass_pyngraph_op_Equal
(
m_op
);
regclass_pyngraph_op_Exp
(
m_op
);
regclass_pyngraph_op_Exp
(
m_op
);
regclass_pyngraph_op_Floor
(
m_op
);
regclass_pyngraph_op_Floor
(
m_op
);
regclass_pyngraph_op_Gelu
(
m_op
);
regclass_pyngraph_op_GetOutputElement
(
m_op
);
regclass_pyngraph_op_GetOutputElement
(
m_op
);
regclass_pyngraph_op_Greater
(
m_op
);
regclass_pyngraph_op_Greater
(
m_op
);
regclass_pyngraph_op_GreaterEq
(
m_op
);
regclass_pyngraph_op_GreaterEq
(
m_op
);
...
...
python/pyngraph/ops/regmodule_pyngraph_op.hpp
View file @
64da3641
...
@@ -39,10 +39,11 @@
...
@@ -39,10 +39,11 @@
#include "pyngraph/ops/cosh.hpp"
#include "pyngraph/ops/cosh.hpp"
#include "pyngraph/ops/divide.hpp"
#include "pyngraph/ops/divide.hpp"
#include "pyngraph/ops/dot.hpp"
#include "pyngraph/ops/dot.hpp"
#include "pyngraph/ops/elu.hpp"
#include "pyngraph/ops/equal.hpp"
#include "pyngraph/ops/equal.hpp"
#include "pyngraph/ops/exp.hpp"
#include "pyngraph/ops/exp.hpp"
#include "pyngraph/ops/floor.hpp"
#include "pyngraph/ops/floor.hpp"
#include "pyngraph/ops/fused/elu.hpp"
#include "pyngraph/ops/fused/gelu.hpp"
#include "pyngraph/ops/get_output_element.hpp"
#include "pyngraph/ops/get_output_element.hpp"
#include "pyngraph/ops/greater.hpp"
#include "pyngraph/ops/greater.hpp"
#include "pyngraph/ops/greater_eq.hpp"
#include "pyngraph/ops/greater_eq.hpp"
...
...
python/setup.py
View file @
64da3641
...
@@ -179,10 +179,11 @@ sources = [
...
@@ -179,10 +179,11 @@ sources = [
'pyngraph/ops/ceiling.cpp'
,
'pyngraph/ops/ceiling.cpp'
,
'pyngraph/ops/divide.cpp'
,
'pyngraph/ops/divide.cpp'
,
'pyngraph/ops/dot.cpp'
,
'pyngraph/ops/dot.cpp'
,
'pyngraph/ops/elu.cpp'
,
'pyngraph/ops/
fused/
elu.cpp'
,
'pyngraph/ops/equal.cpp'
,
'pyngraph/ops/equal.cpp'
,
'pyngraph/ops/exp.cpp'
,
'pyngraph/ops/exp.cpp'
,
'pyngraph/ops/floor.cpp'
,
'pyngraph/ops/floor.cpp'
,
'pyngraph/ops/fused/gelu.cpp'
,
'pyngraph/ops/greater.cpp'
,
'pyngraph/ops/greater.cpp'
,
'pyngraph/ops/greater_eq.cpp'
,
'pyngraph/ops/greater_eq.cpp'
,
'pyngraph/ops/less.cpp'
,
'pyngraph/ops/less.cpp'
,
...
...
python/test/ngraph/test_ops_fused.py
View file @
64da3641
...
@@ -19,7 +19,7 @@ import ngraph as ng
...
@@ -19,7 +19,7 @@ import ngraph as ng
from
test.ngraph.util
import
get_runtime
from
test.ngraph.util
import
get_runtime
def
test_elu_operator
():
def
test_elu_operator
_with_parameters
():
runtime
=
get_runtime
()
runtime
=
get_runtime
()
data_shape
=
[
2
,
2
]
data_shape
=
[
2
,
2
]
...
@@ -67,3 +67,34 @@ def test_elu_operator_with_scalar():
...
@@ -67,3 +67,34 @@ def test_elu_operator_with_scalar():
result
=
computation
(
data_value
)
result
=
computation
(
data_value
)
expected
=
np
.
array
([[
-
2.9797862
,
1.
],
[
-
2.5939941
,
3.
]],
dtype
=
np
.
float32
)
expected
=
np
.
array
([[
-
2.9797862
,
1.
],
[
-
2.5939941
,
3.
]],
dtype
=
np
.
float32
)
assert
np
.
allclose
(
result
,
expected
)
assert
np
.
allclose
(
result
,
expected
)
def
test_gelu_operator_with_parameters
():
runtime
=
get_runtime
()
data_value
=
np
.
array
([[
-
5
,
1
],
[
-
2
,
3
]],
dtype
=
np
.
float32
)
data_shape
=
[
2
,
2
]
parameter_data
=
ng
.
parameter
(
data_shape
,
name
=
'Data'
,
dtype
=
np
.
float32
)
model
=
ng
.
gelu
(
parameter_data
)
computation
=
runtime
.
computation
(
model
,
parameter_data
)
result
=
computation
(
data_value
)
expected
=
np
.
array
([[
-
1.4901161e-06
,
8.4134471e-01
],
[
-
4.5500278e-02
,
2.9959502
]],
dtype
=
np
.
float32
)
assert
np
.
allclose
(
result
,
expected
)
def
test_gelu_operator_with_array
():
runtime
=
get_runtime
()
data_value
=
np
.
array
([[
-
5
,
1
],
[
-
2
,
3
]],
dtype
=
np
.
float32
)
model
=
ng
.
gelu
(
data_value
)
computation
=
runtime
.
computation
(
model
)
result
=
computation
()
expected
=
np
.
array
([[
-
1.4901161e-06
,
8.4134471e-01
],
[
-
4.5500278e-02
,
2.9959502
]],
dtype
=
np
.
float32
)
assert
np
.
allclose
(
result
,
expected
)
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment