Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in / Register
Toggle navigation
N
ngraph
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
submodule
ngraph
Commits
adf849e5
Unverified
Commit
adf849e5
authored
Sep 04, 2019
by
Scott Cyphers
Committed by
GitHub
Sep 04, 2019
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'master' into cyphers/typename
parents
32fb97d1
7809effd
Show whitespace changes
Inline
Side-by-side
Showing
18 changed files
with
865 additions
and
244 deletions
+865
-244
ngraph.ops.rst
doc/sphinx/source/python_api/_autosummary/ngraph.ops.rst
+4
-0
ngraph.rst
doc/sphinx/source/python_api/_autosummary/ngraph.rst
+0
-1
__init__.py
python/ngraph/__init__.py
+4
-0
__init__.py
python/ngraph/impl/op/__init__.py
+4
-0
ops.py
python/ngraph/ops.py
+189
-4
dequantize.cpp
python/pyngraph/ops/dequantize.cpp
+35
-0
dequantize.hpp
python/pyngraph/ops/dequantize.hpp
+23
-0
quantize.cpp
python/pyngraph/ops/quantize.cpp
+50
-0
quantize.hpp
python/pyngraph/ops/quantize.hpp
+23
-0
quantized_convolution.cpp
python/pyngraph/ops/quantized_convolution.cpp
+50
-0
quantized_convolution.hpp
python/pyngraph/ops/quantized_convolution.hpp
+23
-0
quantized_dot.cpp
python/pyngraph/ops/quantized_dot.cpp
+43
-0
quantized_dot.hpp
python/pyngraph/ops/quantized_dot.hpp
+23
-0
regmodule_pyngraph_op.cpp
python/pyngraph/ops/regmodule_pyngraph_op.cpp
+4
-0
regmodule_pyngraph_op.hpp
python/pyngraph/ops/regmodule_pyngraph_op.hpp
+4
-0
setup.py
python/setup.py
+4
-0
test_ops_quantized.py
python/test/ngraph/test_ops_quantized.py
+229
-0
unit_test.manifest
src/ngraph/runtime/plaidml/unit_test.manifest
+153
-239
No files found.
doc/sphinx/source/python_api/_autosummary/ngraph.ops.rst
View file @
adf849e5
...
...
@@ -32,6 +32,7 @@ ngraph.ops
cos
cosh
depth_to_space
dequantize
divide
dot
elu
...
...
@@ -68,6 +69,9 @@ ngraph.ops
power
prelu
prod
quantize
quantized_convolution
quantized_dot
relu
replace_slice
reshape
...
...
doc/sphinx/source/python_api/_autosummary/ngraph.rst
View file @
adf849e5
...
...
@@ -33,4 +33,3 @@ ngraph.runtime module
:undoc-members:
:show-inheritance:
python/ngraph/__init__.py
View file @
adf849e5
...
...
@@ -45,6 +45,7 @@ from ngraph.ops import convolution_backprop_data
from
ngraph.ops
import
cos
from
ngraph.ops
import
cosh
from
ngraph.ops
import
depth_to_space
from
ngraph.ops
import
dequantize
from
ngraph.ops
import
divide
from
ngraph.ops
import
dot
from
ngraph.ops
import
elu
...
...
@@ -81,6 +82,9 @@ from ngraph.ops import parameter
from
ngraph.ops
import
power
from
ngraph.ops
import
prod
from
ngraph.ops
import
prelu
from
ngraph.ops
import
quantize
from
ngraph.ops
import
quantized_convolution
from
ngraph.ops
import
quantized_dot
from
ngraph.ops
import
relu
from
ngraph.ops
import
replace_slice
from
ngraph.ops
import
reshape
...
...
python/ngraph/impl/op/__init__.py
View file @
adf849e5
...
...
@@ -69,6 +69,7 @@ from _pyngraph.op import ConvolutionBackpropFilters
from
_pyngraph.op
import
Cos
from
_pyngraph.op
import
Cosh
from
_pyngraph.op
import
DepthToSpace
from
_pyngraph.op
import
Dequantize
from
_pyngraph.op
import
Divide
from
_pyngraph.op
import
Dot
from
_pyngraph.op
import
Elu
...
...
@@ -106,6 +107,9 @@ from _pyngraph.op import Parameter
from
_pyngraph.op
import
Power
from
_pyngraph.op
import
PRelu
from
_pyngraph.op
import
Product
from
_pyngraph.op
import
Quantize
from
_pyngraph.op
import
QuantizedConvolution
from
_pyngraph.op
import
QuantizedDot
from
_pyngraph.op
import
Relu
from
_pyngraph.op
import
ReluBackprop
from
_pyngraph.op
import
ReplaceSlice
...
...
python/ngraph/ops.py
View file @
adf849e5
...
...
@@ -22,10 +22,11 @@ from ngraph.impl import AxisSet, AxisVector, Coordinate, CoordinateDiff, Functio
from
ngraph.impl.op
import
Abs
,
Acos
,
Add
,
And
,
Asin
,
ArgMax
,
ArgMin
,
Atan
,
AvgPool
,
\
BatchNormTraining
,
BatchNormInference
,
Broadcast
,
Ceiling
,
Clamp
,
Concat
,
Constant
,
Convert
,
\
Convolution
,
ConvolutionBackpropData
,
Cos
,
Cosh
,
DepthToSpace
,
Divide
,
Dot
,
Elu
,
FakeQuantize
,
\
Equal
,
Exp
,
Floor
,
Gelu
,
Gemm
,
GetOutputElement
,
Greater
,
GreaterEq
,
GRN
,
HardSigmoid
,
Less
,
\
LessEq
,
Log
,
LRN
,
Max
,
Maximum
,
MaxPool
,
Min
,
Minimum
,
Multiply
,
MVN
,
Negative
,
Not
,
NotEqual
,
\
OneHot
,
Or
,
Pad
,
Parameter
,
Product
,
Power
,
PRelu
,
Relu
,
ReplaceSlice
,
Reshape
,
Reverse
,
\
Convolution
,
ConvolutionBackpropData
,
Cos
,
Cosh
,
DepthToSpace
,
Dequantize
,
Divide
,
Dot
,
Elu
,
\
FakeQuantize
,
Equal
,
Exp
,
Floor
,
Gelu
,
Gemm
,
GetOutputElement
,
Greater
,
GreaterEq
,
GRN
,
\
HardSigmoid
,
Less
,
LessEq
,
Log
,
LRN
,
Max
,
Maximum
,
MaxPool
,
Min
,
Minimum
,
Multiply
,
MVN
,
\
Negative
,
Not
,
NotEqual
,
OneHot
,
Or
,
Pad
,
Parameter
,
Product
,
Power
,
Quantize
,
\
QuantizedConvolution
,
QuantizedDot
,
PRelu
,
Relu
,
ReplaceSlice
,
Reshape
,
Reverse
,
\
ScaleShift
,
Select
,
ShuffleChannels
,
Sign
,
Sin
,
Sinh
,
Slice
,
Softmax
,
SpaceToDepth
,
Sqrt
,
\
SquaredDifference
,
Squeeze
,
Subtract
,
Sum
,
Tan
,
Tanh
,
TopK
,
Unsqueeze
...
...
@@ -243,6 +244,190 @@ def mvn(data, axes, normalize_variance, eps, name=None):
return
MVN
(
data
,
AxisSet
(
axes
),
normalize_variance
,
eps
)
@nameable_op
def
quantize
(
data
,
scale
,
zero_point
,
new_type
,
axes
,
round_mode
,
name
=
None
):
# type: (Node, Node, Node, NumericType, Set[int], Quantize.RoundMode, str) -> Node
r"""Perform quantize operation on data from input node.
Computes quantize on the input tensor:
.. math:: output = ROUND((input / scale) + zero\_point)
:param data: The node with data tensor.
:param scale: Scale used for mapping.
:param zero_point: Zero point used for mapping.
:param new_type: Output element type.
:param round_mode: Number describes how to perform ROUND function.
ROUND_NEAREST_TOWARD_INFINITY: Round to nearest integer. In case of two
equidistant integers round away from zero e.g. 2.5 -> 3, -3.5 -> -4
ROUND_NEAREST_TOWARD_ZERO: Round to nearest integer. In case of two equidistant
integers round toward zero e.g. 2.5 -> 2, -3.5 -> -3
ROUND_NEAREST_UPWARD: Round to nearest integer. In case of two equidistant
integers round up e.g. 2.5 -> 2, -3.5 -> -3
ROUND_NEAREST_DOWNWARD: Round to nearest integer. In case of two equidistant
integers round down e.g. 2.5 -> 2, -3.5 -> -4
ROUND_NEAREST_TOWARD_EVEN: Round to nearest integer. In case of two equidistant
integers round down e.g. 2.5 -> 2, -3.5 -> -4
ROUND_TOWARD_INFINITY: Round to nearest integer away from zero.
ROUND_TOWARD_ZERO: Round to nearest integer toward zero.
ROUND_UP: Round to nearest integer toward infinity (ceiling).
ROUND_DOWN: Round to nearest integer toward negative infinity (floor).
:param name: Optional output node name.
:return: The new node performing a quantize operation on input tensor.
"""
new_element_type
=
get_element_type
(
new_type
)
return
Quantize
(
data
,
scale
,
zero_point
,
new_element_type
,
AxisSet
(
axes
),
round_mode
)
@nameable_op
def
dequantize
(
data
,
scale
,
zero_point
,
element_type
,
axes
,
name
=
None
):
# type: (Node, Node, Node, NumericType, Set[int], str) -> Node
r"""Perform dequantize operation on data from input node.
Computes dequantize on the input tensor:
.. math:: output = (input - zero\_point) * scale
:param data: The node with data tensor.
:param scale: Scale used for mapping.
:param zero_point: Zero point used for mapping.
:param element_type: Output element type.
:param name: Optional output node name.
:return: The new node performing a dequantize operation on input tensor.
"""
new_element_type
=
get_element_type
(
element_type
)
return
Dequantize
(
data
,
scale
,
zero_point
,
new_element_type
,
AxisSet
(
axes
))
@nameable_op
def
quantized_convolution
(
data
,
# type: Node
filters
,
# type: Node
window_movement_strides
,
# type: List[int]
window_dilation_strides
,
# type: List[int]
padding_below
,
# type: List[int]
padding_above
,
# type: List[int]
data_dilation_strides
,
# type: List[int]
input_scale
,
# type: Node
input_zero_point
,
# type: Node
filter_scale
,
# type: Node
filter_zero_point
,
# type: Node
output_scale
,
# type: Node
output_zero_point
,
# type: Node
output_type
,
# type: NumericType
input_axes
,
# type: Set[int]
filter_axes
,
# type: Set[int]
output_axes
,
# type: Set[int]
name
=
None
,
# type: str
):
# type: (...) -> Node
r"""Perform quantized convolution operation on data from input node.
:param data: The node producing the input data batch tensor.
:param filters: The node producing the filters tensor.
:param window_movement_strides: The window movement strides.
:param window_dilation_strides: he window dilation strides.
:param padding_below: The padding-below sizes.
:param padding_above: The padding-above sizes.
:param data_dilation_strides: The data dilation strides.
:param input_scale: Scale to transform the input.
:param input_zero_point: Zero point used for mapping.
:param filter_scale: Scale to transform the filters.
:param filter_zero_point: Zero point used for mapping.
:param output_scale: Scale to transform the output.
:param output_zero_point: Zero point used for mapping.
:param output_type: Output element type.
:param input_axes: Input axes set for channel wise quantization.
:param filter_axes: Filter axes set for channel wise quantization.
:param output_type: Output axes set for channel wise quantization.
:param name: Optional output node name.
:return: The new node performing a quantized convolution operation on input tensor.
"""
new_output_type
=
get_element_type
(
output_type
)
return
QuantizedConvolution
(
data
,
filters
,
Strides
(
window_movement_strides
),
Strides
(
window_dilation_strides
),
CoordinateDiff
(
padding_below
),
CoordinateDiff
(
padding_above
),
Strides
(
data_dilation_strides
),
input_scale
,
input_zero_point
,
filter_scale
,
filter_zero_point
,
output_scale
,
output_zero_point
,
new_output_type
,
AxisSet
(
input_axes
),
AxisSet
(
filter_axes
),
AxisSet
(
output_axes
))
@nameable_op
def
quantized_dot
(
input0
,
# type: Node
input1
,
# type: Node
reduction_axes_count
,
# type: int
input0_scale
,
# type: Node
input0_zero_point
,
# type: Node
input1_scale
,
# type: Node
input1_zero_point
,
# type: Node
output_scale
,
# type: Node
output_zero_point
,
# type: Node
output_type
,
# type: NumericType
input0_axes
,
# type: Set[int]
input1_axes
,
# type: Set[int]
output_axes
,
# type: Set[int]
name
=
None
,
# type: str
):
# type: (...) -> Node
r"""Perform quantized dot operation on data from input node.
:param input0: The node producing the input data batch tensor.
:param input1: The node producing the filters tensor.
:param reduction_axes_count: Number of reduction axes.
:param input0_scale: Scale to transform the input.
:param input0_zero_point: Zero point used for mapping.
:param input1_scale: Scale to transform the filters.
:param input1_zero_point: Zero point used for mapping.
:param output_scale: Scale to transform the output.
:param output_zero_point: Zero point used for mapping.
:param output_type: Output element type.
:param input0_axes: Input0 axes set for channel wise quantization
:param input1_axes: Input1 axes set for channel wise quantization
:param output_axes: Output axes set for channel wise quantization
:param name: Optional output node name.
:return: The new node performing a quantized dot operation on input tensor.
"""
new_output_type
=
get_element_type
(
output_type
)
return
QuantizedDot
(
input0
,
input1
,
reduction_axes_count
,
input0_scale
,
input0_zero_point
,
input1_scale
,
input1_zero_point
,
output_scale
,
output_zero_point
,
new_output_type
,
AxisSet
(
input0_axes
),
AxisSet
(
input1_axes
),
AxisSet
(
output_axes
))
# Unary ops
@unary_op
def
absolute
(
node
,
name
=
None
):
# type: (NodeInput, str) -> Node
...
...
python/pyngraph/ops/dequantize.cpp
0 → 100644
View file @
adf849e5
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
#include "ngraph/op/dequantize.hpp"
#include "pyngraph/ops/dequantize.hpp"
namespace
py
=
pybind11
;
void
regclass_pyngraph_op_Dequantize
(
py
::
module
m
)
{
py
::
class_
<
ngraph
::
op
::
Dequantize
,
std
::
shared_ptr
<
ngraph
::
op
::
Dequantize
>
,
ngraph
::
op
::
Op
>
dequantize
(
m
,
"Dequantize"
);
dequantize
.
doc
()
=
"ngraph.impl.op.Dequantize wraps ngraph::op::Dequantize"
;
dequantize
.
def
(
py
::
init
<
const
std
::
shared_ptr
<
ngraph
::
Node
>&
,
const
std
::
shared_ptr
<
ngraph
::
Node
>&
,
const
std
::
shared_ptr
<
ngraph
::
Node
>&
,
const
ngraph
::
element
::
Type
&
,
const
ngraph
::
AxisSet
&>
());
}
python/pyngraph/ops/dequantize.hpp
0 → 100644
View file @
adf849e5
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include <pybind11/pybind11.h>
namespace
py
=
pybind11
;
void
regclass_pyngraph_op_Dequantize
(
py
::
module
m
);
python/pyngraph/ops/quantize.cpp
0 → 100644
View file @
adf849e5
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
#include "ngraph/op/quantize.hpp"
#include "pyngraph/ops/quantize.hpp"
namespace
py
=
pybind11
;
void
regclass_pyngraph_op_Quantize
(
py
::
module
m
)
{
py
::
class_
<
ngraph
::
op
::
Quantize
,
std
::
shared_ptr
<
ngraph
::
op
::
Quantize
>
,
ngraph
::
op
::
Op
>
quantize
(
m
,
"Quantize"
);
quantize
.
doc
()
=
"ngraph.impl.op.Quantize wraps ngraph::op::Quantize"
;
quantize
.
def
(
py
::
init
<
const
std
::
shared_ptr
<
ngraph
::
Node
>&
,
const
std
::
shared_ptr
<
ngraph
::
Node
>&
,
const
std
::
shared_ptr
<
ngraph
::
Node
>&
,
const
ngraph
::
element
::
Type
&
,
const
ngraph
::
AxisSet
&
,
ngraph
::
op
::
Quantize
::
RoundMode
>
());
py
::
enum_
<
ngraph
::
op
::
Quantize
::
RoundMode
>
(
quantize
,
"RoundMode"
,
py
::
arithmetic
())
.
value
(
"ROUND_NEAREST_TOWARD_INFINITY"
,
ngraph
::
op
::
Quantize
::
RoundMode
::
ROUND_NEAREST_TOWARD_INFINITY
)
.
value
(
"ROUND_NEAREST_TOWARD_ZERO"
,
ngraph
::
op
::
Quantize
::
RoundMode
::
ROUND_NEAREST_TOWARD_ZERO
)
.
value
(
"ROUND_NEAREST_UPWARD"
,
ngraph
::
op
::
Quantize
::
RoundMode
::
ROUND_NEAREST_UPWARD
)
.
value
(
"ROUND_NEAREST_DOWNWARD"
,
ngraph
::
op
::
Quantize
::
RoundMode
::
ROUND_NEAREST_DOWNWARD
)
.
value
(
"ROUND_NEAREST_TOWARD_EVEN"
,
ngraph
::
op
::
Quantize
::
RoundMode
::
ROUND_NEAREST_TOWARD_EVEN
)
.
value
(
"ROUND_TOWARD_INFINITY"
,
ngraph
::
op
::
Quantize
::
RoundMode
::
ROUND_TOWARD_INFINITY
)
.
value
(
"ROUND_TOWARD_ZERO"
,
ngraph
::
op
::
Quantize
::
RoundMode
::
ROUND_TOWARD_ZERO
)
.
value
(
"ROUND_UP"
,
ngraph
::
op
::
Quantize
::
RoundMode
::
ROUND_UP
)
.
value
(
"ROUND_DOWN"
,
ngraph
::
op
::
Quantize
::
RoundMode
::
ROUND_DOWN
)
.
export_values
();
}
python/pyngraph/ops/quantize.hpp
0 → 100644
View file @
adf849e5
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include <pybind11/pybind11.h>
namespace
py
=
pybind11
;
void
regclass_pyngraph_op_Quantize
(
py
::
module
m
);
python/pyngraph/ops/quantized_convolution.cpp
0 → 100644
View file @
adf849e5
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
#include "ngraph/op/quantized_convolution.hpp"
#include "pyngraph/ops/quantized_convolution.hpp"
namespace
py
=
pybind11
;
void
regclass_pyngraph_op_QuantizedConvolution
(
py
::
module
m
)
{
py
::
class_
<
ngraph
::
op
::
QuantizedConvolution
,
std
::
shared_ptr
<
ngraph
::
op
::
QuantizedConvolution
>
,
ngraph
::
op
::
Op
>
quantizedconvolution
(
m
,
"QuantizedConvolution"
);
quantizedconvolution
.
doc
()
=
"ngraph.impl.op.QuantizedConvolution wraps ngraph::op::QuantizedConvolution"
;
quantizedconvolution
.
def
(
py
::
init
<
const
std
::
shared_ptr
<
ngraph
::
Node
>&
,
const
std
::
shared_ptr
<
ngraph
::
Node
>&
,
const
ngraph
::
Strides
&
,
const
ngraph
::
Strides
&
,
const
ngraph
::
CoordinateDiff
&
,
const
ngraph
::
CoordinateDiff
&
,
const
ngraph
::
Strides
&
,
const
std
::
shared_ptr
<
ngraph
::
Node
>&
,
const
std
::
shared_ptr
<
ngraph
::
Node
>&
,
const
std
::
shared_ptr
<
ngraph
::
Node
>&
,
const
std
::
shared_ptr
<
ngraph
::
Node
>&
,
const
std
::
shared_ptr
<
ngraph
::
Node
>&
,
const
std
::
shared_ptr
<
ngraph
::
Node
>&
,
const
ngraph
::
element
::
Type
&
,
const
ngraph
::
AxisSet
&
,
const
ngraph
::
AxisSet
&
,
const
ngraph
::
AxisSet
&>
());
}
python/pyngraph/ops/quantized_convolution.hpp
0 → 100644
View file @
adf849e5
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include <pybind11/pybind11.h>
namespace
py
=
pybind11
;
void
regclass_pyngraph_op_QuantizedConvolution
(
py
::
module
m
);
python/pyngraph/ops/quantized_dot.cpp
0 → 100644
View file @
adf849e5
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
#include "ngraph/op/quantized_dot.hpp"
#include "pyngraph/ops/quantized_dot.hpp"
namespace
py
=
pybind11
;
void
regclass_pyngraph_op_QuantizedDot
(
py
::
module
m
)
{
py
::
class_
<
ngraph
::
op
::
QuantizedDot
,
std
::
shared_ptr
<
ngraph
::
op
::
QuantizedDot
>
,
ngraph
::
op
::
Op
>
quantizeddot
(
m
,
"QuantizedDot"
);
quantizeddot
.
doc
()
=
"ngraph.impl.op.QuantizedDot wraps ngraph::op::QuantizedDot"
;
quantizeddot
.
def
(
py
::
init
<
const
std
::
shared_ptr
<
ngraph
::
Node
>&
,
const
std
::
shared_ptr
<
ngraph
::
Node
>&
,
const
int
,
const
std
::
shared_ptr
<
ngraph
::
Node
>&
,
const
std
::
shared_ptr
<
ngraph
::
Node
>&
,
const
std
::
shared_ptr
<
ngraph
::
Node
>&
,
const
std
::
shared_ptr
<
ngraph
::
Node
>&
,
const
std
::
shared_ptr
<
ngraph
::
Node
>&
,
const
std
::
shared_ptr
<
ngraph
::
Node
>&
,
const
ngraph
::
element
::
Type
&
,
const
ngraph
::
AxisSet
&
,
const
ngraph
::
AxisSet
&
,
const
ngraph
::
AxisSet
&>
());
}
python/pyngraph/ops/quantized_dot.hpp
0 → 100644
View file @
adf849e5
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include <pybind11/pybind11.h>
namespace
py
=
pybind11
;
void
regclass_pyngraph_op_QuantizedDot
(
py
::
module
m
);
python/pyngraph/ops/regmodule_pyngraph_op.cpp
View file @
adf849e5
...
...
@@ -49,6 +49,7 @@ void regmodule_pyngraph_op(py::module m_op)
regclass_pyngraph_op_Cos
(
m_op
);
regclass_pyngraph_op_Cosh
(
m_op
);
regclass_pyngraph_op_DepthToSpace
(
m_op
);
regclass_pyngraph_op_Dequantize
(
m_op
);
regclass_pyngraph_op_Divide
(
m_op
);
regclass_pyngraph_op_Dot
(
m_op
);
regclass_pyngraph_op_Elu
(
m_op
);
...
...
@@ -86,6 +87,9 @@ void regmodule_pyngraph_op(py::module m_op)
regclass_pyngraph_op_Power
(
m_op
);
regclass_pyngraph_op_PRelu
(
m_op
);
regclass_pyngraph_op_Product
(
m_op
);
regclass_pyngraph_op_Quantize
(
m_op
);
regclass_pyngraph_op_QuantizedConvolution
(
m_op
);
regclass_pyngraph_op_QuantizedDot
(
m_op
);
regclass_pyngraph_op_Relu
(
m_op
);
regclass_pyngraph_op_ReluBackprop
(
m_op
);
regclass_pyngraph_op_ReplaceSlice
(
m_op
);
...
...
python/pyngraph/ops/regmodule_pyngraph_op.hpp
View file @
adf849e5
...
...
@@ -37,6 +37,7 @@
#include "pyngraph/ops/convolution.hpp"
#include "pyngraph/ops/cos.hpp"
#include "pyngraph/ops/cosh.hpp"
#include "pyngraph/ops/dequantize.hpp"
#include "pyngraph/ops/divide.hpp"
#include "pyngraph/ops/dot.hpp"
#include "pyngraph/ops/equal.hpp"
...
...
@@ -81,6 +82,9 @@
#include "pyngraph/ops/passthrough.hpp"
#include "pyngraph/ops/power.hpp"
#include "pyngraph/ops/product.hpp"
#include "pyngraph/ops/quantize.hpp"
#include "pyngraph/ops/quantized_convolution.hpp"
#include "pyngraph/ops/quantized_dot.hpp"
#include "pyngraph/ops/relu.hpp"
#include "pyngraph/ops/replace_slice.hpp"
#include "pyngraph/ops/reshape.hpp"
...
...
python/setup.py
View file @
adf849e5
...
...
@@ -179,6 +179,7 @@ sources = [
'pyngraph/ops/cosh.cpp'
,
'pyngraph/ops/ceiling.cpp'
,
'pyngraph/ops/fused/depth_to_space.cpp'
,
'pyngraph/ops/dequantize.cpp'
,
'pyngraph/ops/divide.cpp'
,
'pyngraph/ops/dot.cpp'
,
'pyngraph/ops/fused/elu.cpp'
,
...
...
@@ -214,6 +215,9 @@ sources = [
'pyngraph/ops/passthrough.cpp'
,
'pyngraph/ops/power.cpp'
,
'pyngraph/ops/fused/prelu.cpp'
,
'pyngraph/ops/quantize.cpp'
,
'pyngraph/ops/quantized_convolution.cpp'
,
'pyngraph/ops/quantized_dot.cpp'
,
'pyngraph/ops/regmodule_pyngraph_op.cpp'
,
'pyngraph/ops/relu.cpp'
,
'pyngraph/ops/replace_slice.cpp'
,
...
...
python/test/ngraph/test_ops_quantized.py
0 → 100644
View file @
adf849e5
# ******************************************************************************
# Copyright 2017-2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
import
numpy
as
np
import
ngraph
as
ng
from
test.ngraph.util
import
get_runtime
from
ngraph.impl.op
import
Quantize
def
test_quantize_operator
():
runtime
=
get_runtime
()
data_shape
=
[
6
]
scale_shape
=
[]
zero_point_shape
=
[]
data_value
=
np
.
array
([
0
,
2
,
3
,
1000
,
-
254
,
-
1000
])
.
astype
(
np
.
float32
)
scale_value
=
np
.
float32
(
2
)
zero_point_value
=
np
.
uint8
(
128
)
new_type
=
np
.
uint8
axis_set
=
[]
parameter_data
=
ng
.
parameter
(
data_shape
,
name
=
'Data'
,
dtype
=
np
.
float32
)
parameter_scale
=
ng
.
parameter
(
scale_shape
,
name
=
'Scale'
,
dtype
=
np
.
float32
)
parameter_zero_point
=
ng
.
parameter
(
zero_point_shape
,
name
=
'Zero_Point'
,
dtype
=
np
.
uint8
)
model
=
ng
.
quantize
(
parameter_data
,
parameter_scale
,
parameter_zero_point
,
new_type
,
axis_set
,
Quantize
.
RoundMode
.
ROUND_NEAREST_TOWARD_INFINITY
)
computation
=
runtime
.
computation
(
model
,
parameter_data
,
parameter_scale
,
parameter_zero_point
)
result
=
computation
(
data_value
,
scale_value
,
zero_point_value
)
expected
=
np
.
array
([
128
,
129
,
130
,
255
,
1
,
0
])
.
astype
(
np
.
uint8
)
assert
np
.
allclose
(
result
,
expected
)
def
test_quantized_convoluction_operator
():
runtime
=
get_runtime
()
data_shape
=
[
1
,
1
,
3
,
4
]
filters_shape
=
[
1
,
1
,
3
,
3
]
result_shape
=
[
1
,
1
,
3
,
4
]
shape
=
[]
data_value
=
np
.
array
([
1
,
2
,
3
,
4
,
5
,
6
,
7
,
0
,
1
,
2
,
3
,
4
])
.
astype
(
np
.
uint8
)
.
reshape
(
data_shape
)
filters_value
=
np
.
array
([
1
,
2
,
3
,
4
,
5
,
0
,
0
,
1
,
2
])
.
astype
(
np
.
uint8
)
.
reshape
(
filters_shape
)
window_movement_strides
=
[
1
,
1
]
window_dilation_strides
=
[
1
,
1
]
padding_below
=
[
1
,
1
]
padding_above
=
[
1
,
1
]
data_dilation_strides
=
[
1
,
1
]
input_scale_value
=
1
input_zero_point_value
=
0
filter_scale_value
=
1
filter_zero_point_value
=
0
output_scale_value
=
1
output_zero_point_value
=
0
output_type
=
np
.
int32
input_axes
=
[]
filter_axes
=
[]
output_axes
=
[]
parameter_data
=
ng
.
parameter
(
data_shape
,
name
=
'Data'
,
dtype
=
np
.
uint8
)
parameter_filters
=
ng
.
parameter
(
filters_shape
,
name
=
'Filters'
,
dtype
=
np
.
uint8
)
parameter_input_scale
=
ng
.
parameter
(
shape
,
name
=
'Input_scale'
,
dtype
=
np
.
float32
)
parameter_input_zero_point
=
ng
.
parameter
(
shape
,
name
=
'Input_zero_point'
,
dtype
=
np
.
uint8
)
parameter_filter_scale
=
ng
.
parameter
(
shape
,
name
=
'Filter_scale'
,
dtype
=
np
.
float32
)
parameter_filter_zero_point
=
ng
.
parameter
(
shape
,
name
=
'Filter_zero_point'
,
dtype
=
np
.
uint8
)
parameter_output_scale
=
ng
.
parameter
(
shape
,
name
=
'Output_scale'
,
dtype
=
np
.
float32
)
parameter_output_zero_point
=
ng
.
parameter
(
shape
,
name
=
'Output_zero_point'
,
dtype
=
np
.
int32
)
model
=
ng
.
quantized_convolution
(
parameter_data
,
parameter_filters
,
window_movement_strides
,
window_dilation_strides
,
padding_below
,
padding_above
,
data_dilation_strides
,
parameter_input_scale
,
parameter_input_zero_point
,
parameter_filter_scale
,
parameter_filter_zero_point
,
parameter_output_scale
,
parameter_output_zero_point
,
output_type
,
input_axes
,
filter_axes
,
output_axes
)
computation
=
runtime
.
computation
(
model
,
parameter_data
,
parameter_filters
,
parameter_input_scale
,
parameter_input_zero_point
,
parameter_filter_scale
,
parameter_filter_zero_point
,
parameter_output_scale
,
parameter_output_zero_point
)
result
=
computation
(
data_value
,
filters_value
,
input_scale_value
,
input_zero_point_value
,
filter_scale_value
,
filter_zero_point_value
,
output_scale_value
,
output_zero_point_value
)
expected
=
np
.
array
([
22
,
34
,
30
,
32
,
38
,
72
,
90
,
43
,
33
,
52
,
43
,
39
])
.
astype
(
np
.
int8
)
.
reshape
(
result_shape
)
assert
np
.
allclose
(
result
,
expected
)
def
test_quantized_dot_operator
():
runtime
=
get_runtime
()
input0_shape
=
[
1
,
2
]
input1_shape
=
[
2
,
3
]
result_shape
=
[
1
,
3
]
shape
=
[]
input0_value
=
np
.
array
([
2
,
3
])
.
astype
(
np
.
uint8
)
.
reshape
(
input0_shape
)
input1_value
=
np
.
array
([
0
,
2
,
4
,
1
,
3
,
5
])
.
astype
(
np
.
uint8
)
.
reshape
(
input1_shape
)
reduction_axes_count
=
1
input0_scale_value
=
2
input0_zero_point_value
=
0
input1_scale_value
=
1
input1_zero_point_value
=
0
output_scale_value
=
2
output_zero_point_value
=
0
output_type
=
np
.
uint8
input0_axes
=
[]
input1_axes
=
[]
output_axes
=
[]
parameter_input0
=
ng
.
parameter
(
input0_shape
,
name
=
'Input0'
,
dtype
=
np
.
uint8
)
parameter_input1
=
ng
.
parameter
(
input1_shape
,
name
=
'Input1'
,
dtype
=
np
.
uint8
)
parameter_input0_scale
=
ng
.
parameter
(
shape
,
name
=
'Input0_scale'
,
dtype
=
np
.
float32
)
parameter_input0_zero_point
=
ng
.
parameter
(
shape
,
name
=
'Input0_zero_point'
,
dtype
=
np
.
uint8
)
parameter_input1_scale
=
ng
.
parameter
(
shape
,
name
=
'Input1_scale'
,
dtype
=
np
.
float32
)
parameter_input1_zero_point
=
ng
.
parameter
(
shape
,
name
=
'Input1_zero_point'
,
dtype
=
np
.
uint8
)
parameter_output_scale
=
ng
.
parameter
(
shape
,
name
=
'Output_scale'
,
dtype
=
np
.
float32
)
parameter_output_zero_point
=
ng
.
parameter
(
shape
,
name
=
'Output_zero_point'
,
dtype
=
np
.
uint8
)
model
=
ng
.
quantized_dot
(
parameter_input0
,
parameter_input1
,
reduction_axes_count
,
parameter_input0_scale
,
parameter_input0_zero_point
,
parameter_input1_scale
,
parameter_input1_zero_point
,
parameter_output_scale
,
parameter_output_zero_point
,
output_type
,
input0_axes
,
input1_axes
,
output_axes
)
computation
=
runtime
.
computation
(
model
,
parameter_input0
,
parameter_input1
,
parameter_input0_scale
,
parameter_input0_zero_point
,
parameter_input1_scale
,
parameter_input1_zero_point
,
parameter_output_scale
,
parameter_output_zero_point
)
result
=
computation
(
input0_value
,
input1_value
,
input0_scale_value
,
input0_zero_point_value
,
input1_scale_value
,
input1_zero_point_value
,
output_scale_value
,
output_zero_point_value
)
expected
=
np
.
array
([
3
,
13
,
23
])
.
astype
(
np
.
int8
)
.
reshape
(
result_shape
)
assert
np
.
allclose
(
result
,
expected
)
def
test_dequantize_operator
():
runtime
=
get_runtime
()
data_shape
=
[
4
,
3
]
scale_shape
=
[]
zero_point_shape
=
[]
result_shape
=
[
4
,
3
]
data_value
=
np
.
array
([
1
,
1
,
2
,
-
1
,
3
,
-
1
,
4
,
-
3
,
5
,
-
3
,
6
,
-
5
])
.
astype
(
np
.
int8
)
.
reshape
(
data_shape
)
scale_value
=
np
.
float32
(
2
)
zero_point_value
=
np
.
int8
(
1
)
element_type
=
np
.
float32
axis_set
=
[]
parameter_data
=
ng
.
parameter
(
data_shape
,
name
=
'Data'
,
dtype
=
np
.
int8
)
parameter_scale
=
ng
.
parameter
(
scale_shape
,
name
=
'Scale'
,
dtype
=
np
.
float32
)
parameter_zero_point
=
ng
.
parameter
(
zero_point_shape
,
name
=
'Zero_Point'
,
dtype
=
np
.
int8
)
model
=
ng
.
dequantize
(
parameter_data
,
parameter_scale
,
parameter_zero_point
,
element_type
,
axis_set
)
computation
=
runtime
.
computation
(
model
,
parameter_data
,
parameter_scale
,
parameter_zero_point
)
result
=
computation
(
data_value
,
scale_value
,
zero_point_value
)
expected
=
np
.
array
([
0
,
0
,
2
,
-
4
,
4
,
-
4
,
6
,
-
8
,
8
,
-
8
,
10
,
-
12
])
.
astype
(
np
.
float32
)
.
reshape
(
result_shape
)
assert
np
.
allclose
(
result
,
expected
)
src/ngraph/runtime/plaidml/unit_test.manifest
View file @
adf849e5
...
...
@@ -39,88 +39,79 @@ topk_2d_min_one # No plans to implement TopK
topk_int64 # No plans to implement TopK
topk_5d_max_partial # No plans to implement TopK
topk_1d_i32_max_all # No plans to implement TopK
topk_2d_max_one_with_equal_values # No plans to implement TopK
model_top_k # No plans to implement TopK
# unsupported op: `Erf`
erf
gelu_f32
gelu_f64
gelu_backprop_factor_f32
gelu_backprop_factor_f64
backwards_gelu_f32
backwards_gelu_f64
model_erf
model_erf_int32
# Tests that PlaidML might be able to run at some point.
backwards_maxpool_n2_c1_hw5_3x3_str2_max_pad1x2_2x3
backwards_maxpool_n4c1h4w4_kh2kw2_sh1sw1
backwards_maxpool_n2c1h5w5_kh3kw3_sh2sw2
backwards_maxpool_n4_c1_hw4_2x2_max
backwards_maxpool_n2_c1_hw5_3x3_str2_max
backwards_slice
batchnorm_fprop_bprop # To debug
batchnorm_fprop_bprop_2step # To debug
batch_norm_inference_0eps_f64
batch_norm_inference_f64
batch_norm_training_0eps_f64
softmax_axis_3d_double # To debug
replace_slice_matrix_inplace
max_pool_2d_1channel_1image_overpadded
max_pool_3d
maxpool_bprop_larger_than_cache
generate_mask
generate_mask2
avg_pool_3d
avg_pool_3d_uneven_strided_padded_include_in_computation
quantize_clamp_int32 # Requires fp64 inputs, which won't work on GPUs
numeric_float_nan
numeric_double_nan
shape_of_scalar
shape_of_vector
shape_of_matrix
shape_of_5d
sum_stable_acc_double # To debug: precision errors
# unsupported ops: `BroadcastDistributed`
broadcastdistributed
# unsupported ops: 'QuantizedConvolution', 'QuantizedDot', 'EmbeddingLookup'
model_quant_conv_linear
model_conv_integer_no_zero_point
model_matmul_integer_no_zero_point
model_matmul_integer_4d_no_zero_point
model_qlinear_matmul
model_qlinear_matmul_3d
model_matmul_integer
model_matmul_integer_zero_point_zero
model_matmul_integer_scalar
model_matmul_integer_4d
model_matmul_integer_4d_zero_point
model_hardmax
quantized_convolution
quantized_conv_int32_output
quantized_dot_u8u8
quantized_dot_int32_output
embedding_lookup_4x5_reverse
embedding_lookup_10x1_arbitrary
embedding_lookup_10x1_arbitrary_index_type_int
embedding_lookup_10x1_arbitrary_index_type_int64
floor_int32
gather_nd_scalar_from_2d
gather_nd_1d_from_2d
gather_nd_scalar_from_3d
gather_nd_1d_from_3d
gather_nd_2d_from_3d
gather_nd_batch_scalar_from_2d
gather_nd_batch_1d_from_2d
gather_nd_batch_scalar_from_3d
gather_nd_batch_1d_from_3d
gather_nd_batch_2d_from_3d
gather_nd_single_indices
gather_4d_indices_no_axis_uint8
gather_scalar_indices_axis_1_2d_input
gather_1d_indices_axis_2_4d_input
gather_2d_indices_axis_1_2d_input
gather_scalar_indices_no_axis_2d_input
gather_1d_indices_no_axis_1d_input
gather_2d_indices_no_axis_2d_input
gather_3d_indices_no_axis_2d_input
gather_4d_indices_no_axis_2d_input
scatter_add_4d_indices
scatter_add_3d_indices
scatter_add_2d_indices
scatter_add_1d_indices
scatter_add_scalar_indices
scatter_nd_add_batch_2d_to_3d
scatter_nd_add_2d_to_3d
# To be triaged -- bad kernels, numerical accuracy, edge conditions,
# unimplemented functionality, &c
cos
erf
sin
tan
not
abc_int64
concat_matrix_int64
select_double
# unsupported op: `ReverseSequence`
model_lstm_bdir_short_input_seq
model_lstm_mixed_seq_reverse
model_reverse_sequence_0_batch_1
model_reverse_sequence_1_batch_0
# result mismatch
model_dequantize_linear_scalar_zero_scale_int8
model_softmax
avg_pool_3d_uneven_strided_padded
rnn_cell_activation_function
gru_cell_bias_clip
gru_cell_linear_before_reset
softmax_axis_3d
relu_2Dfprop_i32
avg_pool_uint8
avg_pool_int8
one_hot_vector_many_categories
conv_bias_1d
conv_bias_2d
conv_bias_3d
conv_bias_add_2d
normalize_across_hw_4d
divide_python_rounding_int32
convert_int32_bool
convert_float32_bool
tensor_constant_int64
constant_equality_bool
numeric_float_inf
numeric_double_inf
computation_reuse
pad_negative_exterior_1d_check_limits
batch_norm_inference_0eps_f64
batch_norm_inference_f64
batch_norm_training_0eps_f64
batch_norm_inference_parameters_duplication
batch_norm_fprop_b1c2h2w2
batch_norm_fprop_b2c2h2w1
batch_norm_fprop_b2c2d2h1w1
batch_norm_fprop_inference_b2c2h2w1
pad_edge_1d
pad_edge_1d_top_neg
pad_edge_1d_top_neg_bigger_than_tensor
...
...
@@ -136,29 +127,84 @@ pad_reflect_1d_bottom_neg_bigger_than_tensor
pad_reflect_1d_multi_reflect
pad_reflect_2d
pad_reflect_2d_with_neg
pad_negative_exterior_2d
pad_negative_exterior_2d_all_negative
pad_negative_exterior_4d
pad_symmetric
max_trivial_int8
max_trivial_5d_int32
# No double precision FP support in PlaidML
sum_trivial_in_double
sum_stable_acc_double
sum_stable_simple_double
softmax_axis_3d_double
select_double
numeric_double_nan
numeric_double_inf
max_3d_to_scalar_double
softmax_axis_3d
logical_and
logical_or
batch_norm_inference_parameters_duplication
batch_norm_fprop_b1c2h2w2
batch_norm_fprop_b2c2h2w1
batch_norm_fprop_b2c2d2h1w1
batch_norm_fprop_inference_b2c2h2w1
argmin_trivial_in_double
# unsupported op: `ShapeOf`
shape_of_vector
shape_of_matrix
shape_of_5d
# unsupported ops: `ScatterAdd` `ScatterNDAdd`
scatter_add_3d_indices
scatter_add_2d_indices
scatter_add_1d_indices
scatter_add_scalar_indices
scatter_nd_add_batch_2d_to_3d
scatter_nd_add_2d_to_3d
# c++ runtime exception
replace_slice_matrix_inplace
quantize_clamp_int32
pad_negative_exterior_1d_check_limits
backwards_slice
backwards_softmax_3d
argmin_3D_i32
argmin_3D_i64
argmax_3D_i32
argmax_3D_i64
argmax_3D_axis_0
argmax_3D_axis_1
argmax_3D_axis_2
argmin_trivial_in_double
topk_2d_max_one_with_equal_values
sum_trivial_in_double
sum_stable_simple_double
one_hot_vector_many_categories
argmin_4D_i64
argmax_4D_i64
# PlaidML doesn't support over-padded MaxPool
max_pool_3d
backwards_maxpool_n2_c1_hw5_3x3_str2_max_pad1x2_2x3
# c++ compilation failure
max_trivial_int8
max_trivial_5d_int32
floor_int32
any_trivial
any_2x2x3_eliminate_dim_0
# unsupported op: `GenerateMask`
generate_mask
generate_mask2
# unsupported op: `Gather`
gather_4d_indices_no_axis_uint8
gather_4d_indices_no_axis_2d_input
gather_3d_indices_no_axis_2d_input
gather_2d_indices_no_axis_2d_input
gather_1d_indices_no_axis_1d_input
gather_scalar_indices_no_axis_2d_input
gather_2d_indices_axis_1_2d_input
gather_1d_indices_axis_2_4d_input
gather_scalar_indices_axis_1_2d_input
gather_nd_single_indices
gather_nd_scalar_from_2d
gather_nd_1d_from_2d
gather_nd_scalar_from_3d
gather_nd_1d_from_3d
gather_nd_2d_from_3d
gather_nd_batch_scalar_from_2d
gather_nd_batch_1d_from_2d
gather_nd_batch_scalar_from_3d
gather_nd_batch_1d_from_3d
gather_nd_batch_2d_from_3d
gather_no_axis_int8
gather_no_axis_int16
gather_no_axis_int32
...
...
@@ -168,93 +214,24 @@ gather_no_axis_uint16
gather_no_axis_uint32
gather_no_axis_uint64
gather_no_axis_bool
elu
elu_negative_alpha
prelu
hardsigmoid
prelu_shared_slope
prelu_negative_slope
relu_2Dfprop_i32
conv_bias_1d
conv_bias_2d
conv_bias_3d
conv_bias_bprop_2d
conv_bias_add_2d
space_to_depth
depth_to_space
normalize_across_chw_4d
normalize_across_chw_4d_max_bias
normalize_across_chw_3d
normalize_across_chw_2d
normalize_across_hw_4d
gemm
fused_clamp
mvn_mean_normalization
mvn_mean_normalization_split_channels
mvn_mean_variance_normalization
mvn_mean_variance_normalization_split_channels
grn_4d
grn_2d_with_bias
scale_shift_no_broadcast
scale_shift
shuffle_channels_simple
shuffle_channels_negative_axis
shuffle_channels_float
squeeze
squeeze_default_axes
squared_difference
squared_difference_broadcast
fake_quantize
fake_quantize_with_clip
fake_quantize_with_clip_across_channels
dot_0_0
dot_2x0_0
equal
notequal
greater
greater_int64
greatereq
less
lesseq
lesseq_int32
lesseq_bool
broadcast_vector_rowwise_int64
minimum_int64
maximum_int64
auto_bcast_binary_elementwise
any_trivial
any_2x2x3_eliminate_dim_0
backwards_acos
backwards_asin
backwards_atan
backwards_softmax_all
backwards_softmax_axis
backwards_softmax_underflow
backwards_softmax_3d
batch_mat_mul_forward
dot_matrix_2x0_0x2
max_pool_uint8
max_pool_int8
avg_pool_uint8
avg_pool_int8
# Need erf
gelu_f32
gelu_f64
gelu_backprop_factor_f32
gelu_backprop_factor_f64
backwards_gelu_f32
backwards_gelu_f64
# unsupported op: `BatchMatMul`
batch_mat_mul_forward
backwards_batchmatmul_tensor2_tensor2
#
From
onnx tests
# onnx tests
model_quant_conv_linear_2d
model_quant_conv_linear_3d
model_conv_integer
model_conv_integer_zero_point_zero
model_conv_integer_pads
model_lstm_fwd_hardsigmoid_activation
model_lstm_fwd_with_clip
model_lstm_fwd_mixed_seq
model_lstm_fwd_large_batch_no_clip
model_global_lp_pool_p3
model_argmin_no_keepdims
model_reduce_log_sum_exp
model_elu
model_selu
model_sigmoid
...
...
@@ -264,81 +241,18 @@ model_argmin_int32
model_lp_norm_default
model_instance_normalization
# failings on plaidml_nGPU
argmin_4D_i64
argmax_4D_i64
# dgkutnic ww24.5: these tests are to be triaged by the PlaidML team
# ww25.2: re-scrubbed this list of tests after fixing check_inputs
# initial debug points to some of these failing due to precision issues
sqrt
batch_norm_inference_0eps_f32
batch_norm_inference_f32
batch_norm_training_0eps_f32
argmin_trivial
argmax_trivial
argmin_trivial_in_i32
argmin_3D_i32
argmin_3D_i64
argmax_3D_i32
argmax_3D_i64
sum_large_1d_to_scalar
sum_stable_acc
one_hot_scalar_2_in_3
one_hot_scalar_1_in_3
one_hot_scalar_0_in_3
# passing locally, fails closeness checks in CI which may be too strict
elu
elu_negative_alpha
lstm_cell_no_bias_no_peepholes
lstm_cell_bias_peepholes
lstm_cell_bias_peepholes_clip_input_forget
lstm_cell_activaction_functions
divide_python_rounding_int32
backwards_batchmatmul_tensor2_tensor2
# unsupported ops: `BroadcastDistributed`
broadcastdistributed
# unsupported ops: 'QuantizedConvolution', 'QuantizedDot', 'TopK', 'Erf', 'EmbeddingLookup'
model_quant_conv_linear
model_conv_integer_no_zero_point
model_matmul_integer_no_zero_point
model_matmul_integer_4d_no_zero_point
model_qlinear_matmul
model_qlinear_matmul_3d
model_matmul_integer
model_matmul_integer_zero_point_zero
model_matmul_integer_scalar
model_matmul_integer_4d
model_matmul_integer_4d_zero_point
model_top_k
model_erf
model_erf_int32
model_hardmax
quantized_convolution
quantized_conv_int32_output
quantized_dot_u8u8
quantized_dot_int32_output
# unsupported op: `ReverseSequence`
model_lstm_bdir_short_input_seq
model_lstm_mixed_seq_reverse
model_reverse_sequence_0_batch_1
model_reverse_sequence_1_batch_0
# node validation error: "Argument shapes are inconsistent."
model_lstm_fwd_with_clip
model_lstm_fwd_mixed_seq
model_lstm_fwd_hardsigmoid_activation
model_reduce_log_sum
model_reduce_log_sum_exp
model_reduce_mean
# result mismatch
model_dequantize_linear_scalar_zero_scale_int8
model_softmax
avg_pool_3d_uneven_strided_padded
rnn_cell_activation_function
gru_cell_bias_clip
gru_cell_linear_before_reset
dot_0_0
dot_matrix_2x0_0x2
dot_2x0_0
auto_bcast_binary_elementwise
max_pool_2d_1channel_1image_overpadded
# axes input param not supported
lrn_across_h
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment