Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in / Register
Toggle navigation
N
ngraph
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
submodule
ngraph
Commits
886e87d4
Unverified
Commit
886e87d4
authored
Aug 08, 2019
by
Adam Procter
Committed by
GitHub
Aug 08, 2019
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'master' into aprocter/s-barannikov-unused_includes
parents
2d868b2c
c83a98c3
Hide whitespace changes
Inline
Side-by-side
Showing
53 changed files
with
333 additions
and
90 deletions
+333
-90
CMakeLists.txt
CMakeLists.txt
+4
-0
CODEOWNERS
CODEOWNERS
+1
-1
ngraph.ops.rst
doc/sphinx/source/python_api/_autosummary/ngraph.ops.rst
+1
-0
__init__.py
python/ngraph/__init__.py
+1
-0
__init__.py
python/ngraph/impl/op/__init__.py
+1
-0
ops.py
python/ngraph/ops.py
+35
-3
fake_quantize.cpp
python/pyngraph/ops/fused/fake_quantize.cpp
+36
-0
fake_quantize.hpp
python/pyngraph/ops/fused/fake_quantize.hpp
+23
-0
regmodule_pyngraph_op.cpp
python/pyngraph/ops/regmodule_pyngraph_op.cpp
+1
-0
regmodule_pyngraph_op.hpp
python/pyngraph/ops/regmodule_pyngraph_op.hpp
+1
-0
setup.py
python/setup.py
+1
-0
test_ops_fused.py
python/test/ngraph/test_ops_fused.py
+47
-0
CMakeLists.txt
src/ngraph/CMakeLists.txt
+8
-0
batch_mat_mul.cpp
src/ngraph/op/experimental/batch_mat_mul.cpp
+2
-2
batch_mat_mul.hpp
src/ngraph/op/experimental/batch_mat_mul.hpp
+2
-1
compiled_kernel.cpp
src/ngraph/op/experimental/compiled_kernel.cpp
+7
-0
compiled_kernel.hpp
src/ngraph/op/experimental/compiled_kernel.hpp
+4
-0
dyn_broadcast.cpp
src/ngraph/op/experimental/dyn_broadcast.cpp
+4
-4
dyn_broadcast.hpp
src/ngraph/op/experimental/dyn_broadcast.hpp
+4
-3
dyn_pad.cpp
src/ngraph/op/experimental/dyn_pad.cpp
+6
-5
dyn_pad.hpp
src/ngraph/op/experimental/dyn_pad.hpp
+5
-4
dyn_replace_slice.cpp
src/ngraph/op/experimental/dyn_replace_slice.cpp
+6
-6
dyn_replace_slice.hpp
src/ngraph/op/experimental/dyn_replace_slice.hpp
+6
-5
dyn_reshape.cpp
src/ngraph/op/experimental/dyn_reshape.cpp
+2
-4
dyn_reshape.hpp
src/ngraph/op/experimental/dyn_reshape.hpp
+3
-2
dyn_slice.cpp
src/ngraph/op/experimental/dyn_slice.cpp
+5
-5
dyn_slice.hpp
src/ngraph/op/experimental/dyn_slice.hpp
+5
-4
generate_mask.cpp
src/ngraph/op/experimental/generate_mask.cpp
+0
-5
generate_mask.hpp
src/ngraph/op/experimental/generate_mask.hpp
+1
-1
quantized_concat.cpp
src/ngraph/op/experimental/quantized_concat.cpp
+7
-0
quantized_concat.hpp
src/ngraph/op/experimental/quantized_concat.hpp
+7
-0
quantized_conv_bias.hpp
src/ngraph/op/experimental/quantized_conv_bias.hpp
+1
-0
quantized_conv_relu.hpp
src/ngraph/op/experimental/quantized_conv_relu.hpp
+1
-0
quantized_dot.hpp
src/ngraph/op/experimental/quantized_dot.hpp
+1
-0
quantized_dot_bias.hpp
src/ngraph/op/experimental/quantized_dot_bias.hpp
+1
-0
quantized_max_pool.hpp
src/ngraph/op/experimental/quantized_max_pool.hpp
+1
-0
range.cpp
src/ngraph/op/experimental/range.cpp
+0
-4
range.hpp
src/ngraph/op/experimental/range.hpp
+1
-1
shape_of.cpp
src/ngraph/op/experimental/shape_of.cpp
+2
-2
shape_of.hpp
src/ngraph/op/experimental/shape_of.hpp
+2
-1
tile.cpp
src/ngraph/op/experimental/tile.cpp
+2
-2
tile.hpp
src/ngraph/op/experimental/tile.hpp
+2
-1
transpose.cpp
src/ngraph/op/experimental/transpose.cpp
+2
-2
transpose.hpp
src/ngraph/op/experimental/transpose.hpp
+2
-1
manager.hpp
src/ngraph/pass/manager.hpp
+19
-15
backend.cpp
src/ngraph/runtime/backend.cpp
+4
-0
backend_manager.cpp
src/ngraph/runtime/backend_manager.cpp
+17
-6
quantized_concat.cpp
src/ngraph/runtime/cpu/builder/quantized_concat.cpp
+3
-0
quantized_max_pool.cpp
src/ngraph/runtime/cpu/builder/quantized_max_pool.cpp
+3
-0
cpu_builder_registry.cpp
src/ngraph/runtime/cpu/cpu_builder_registry.cpp
+2
-0
cpu_builder_registry.hpp
src/ngraph/runtime/cpu/cpu_builder_registry.hpp
+2
-0
unit_test.manifest
src/ngraph/runtime/plaidml/unit_test.manifest
+3
-0
pass_manager.cpp
test/pass_manager.cpp
+26
-0
No files found.
CMakeLists.txt
View file @
886e87d4
...
...
@@ -398,6 +398,10 @@ if (NGRAPH_MLIR_ENABLE)
set
(
NGRAPH_MLIR_SOURCE_DIR
${
CMAKE_SOURCE_DIR
}
/src/contrib/mlir
)
endif
()
if
(
NGRAPH_STATIC_LIB_ENABLE
)
set
(
CMAKE_CXX_FLAGS
"
${
CMAKE_CXX_FLAGS
}
-DNGRAPH_STATIC_LIB_ENABLE"
)
endif
()
if
(
NGRAPH_PLAIDML_ENABLE
)
find_package
(
PlaidML CONFIG
)
if
(
NOT PLAIDML_FOUND
)
...
...
CODEOWNERS
View file @
886e87d4
...
...
@@ -37,7 +37,7 @@ project/doc-contributor-README.rst @indie
/src/resource/ @rkimballn1
/src/tools/ @rkimballn1
/src/ngraph/autodiff/ @diyessi
/src/ngraph/builder/ @
sfvaroglu
/src/ngraph/builder/ @
mbrookhart
/src/ngraph/codegen/ @rkimballn1
/src/ngraph/distributed.* @wenzhe-nrv @diyessi
/src/ngraph/frontend/ @postrational
...
...
doc/sphinx/source/python_api/_autosummary/ngraph.ops.rst
View file @
886e87d4
...
...
@@ -37,6 +37,7 @@ ngraph.ops
elu
equal
exp
fake_quantize
floor
gelu
gemm
...
...
python/ngraph/__init__.py
View file @
886e87d4
...
...
@@ -50,6 +50,7 @@ from ngraph.ops import dot
from
ngraph.ops
import
elu
from
ngraph.ops
import
equal
from
ngraph.ops
import
exp
from
ngraph.ops
import
fake_quantize
from
ngraph.ops
import
floor
from
ngraph.ops
import
gelu
from
ngraph.ops
import
gemm
...
...
python/ngraph/impl/op/__init__.py
View file @
886e87d4
...
...
@@ -74,6 +74,7 @@ from _pyngraph.op import Dot
from
_pyngraph.op
import
Elu
from
_pyngraph.op
import
Equal
from
_pyngraph.op
import
Exp
from
_pyngraph.op
import
FakeQuantize
from
_pyngraph.op
import
Floor
from
_pyngraph.op
import
Gelu
from
_pyngraph.op
import
Gemm
...
...
python/ngraph/ops.py
View file @
886e87d4
...
...
@@ -22,9 +22,9 @@ from ngraph.impl import AxisSet, AxisVector, Coordinate, CoordinateDiff, Functio
from
ngraph.impl.op
import
Abs
,
Acos
,
Add
,
And
,
Asin
,
ArgMax
,
ArgMin
,
Atan
,
AvgPool
,
\
BatchNormTraining
,
BatchNormInference
,
Broadcast
,
Ceiling
,
Clamp
,
Concat
,
Constant
,
Convert
,
\
Convolution
,
ConvolutionBackpropData
,
Cos
,
Cosh
,
DepthToSpace
,
Divide
,
Dot
,
Elu
,
Equal
,
Exp
,
\
Floor
,
Gelu
,
Gemm
,
GetOutputElement
,
Greater
,
GreaterEq
,
GRN
,
Less
,
LessEq
,
Log
,
LRN
,
Max
,
\
Maximum
,
MaxPool
,
Min
,
Minimum
,
Multiply
,
Negative
,
Not
,
NotEqual
,
OneHot
,
Or
,
Pad
,
\
Convolution
,
ConvolutionBackpropData
,
Cos
,
Cosh
,
DepthToSpace
,
Divide
,
Dot
,
Elu
,
FakeQuantize
,
\
Equal
,
Exp
,
Floor
,
Gelu
,
Gemm
,
GetOutputElement
,
Greater
,
GreaterEq
,
GRN
,
Less
,
LessEq
,
Log
,
\
LRN
,
Max
,
Maximum
,
MaxPool
,
Min
,
Minimum
,
Multiply
,
Negative
,
Not
,
NotEqual
,
OneHot
,
Or
,
Pad
,
\
Parameter
,
Product
,
Power
,
Relu
,
ReplaceSlice
,
Reshape
,
Reverse
,
Select
,
Sign
,
Sin
,
Sinh
,
\
Slice
,
Softmax
,
Sqrt
,
Subtract
,
Sum
,
Tan
,
Tanh
,
TopK
...
...
@@ -537,6 +537,38 @@ def broadcast_to(node, new_shape, axis=None, name=None):
@nameable_op
def
fake_quantize
(
data
,
input_low
,
input_high
,
output_low
,
output_high
,
levels
,
name
=
None
):
# type: (Node, Node, Node, Node, Node, int, str) -> Node
r"""Perform an element-wise linear quantization on input data.
Input floating point values are quantized into a discrete set of floating point values.
.. code-block:: python
if x <= input_low:
output = output_low
if x > input_high:
output = output_high
else:
output = fake_quantize(output)
Fake quantize uses the following logic:
.. math:: output =
\dfrac{round( \dfrac{data - input\_low}{(input\_high - input\_low)\cdot (levels-1)})}
{(levels-1)\cdot (output\_high - output\_low)} + output\_low
:param data: The node with data tensor.
:param input_low: The node with the minimum for input values.
:param input_high: The node with the maximum for input values.
:param output_low: The node with the minimum quantized value.
:param output_high: The node with the maximum quantized value.
:param levels: The number of quantization levels. Integer value.
:return: New node with quantized value.
"""
return
FakeQuantize
(
data
,
input_low
,
input_high
,
output_low
,
output_high
,
levels
)
def
gemm
(
A
,
# type: Node
B
,
# type: Node
C
,
# type: Node
...
...
python/pyngraph/ops/fused/fake_quantize.cpp
0 → 100644
View file @
886e87d4
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
#include "ngraph/op/fused/fake_quantize.hpp"
#include "pyngraph/ops/fused/fake_quantize.hpp"
namespace
py
=
pybind11
;
void
regclass_pyngraph_op_FakeQuantize
(
py
::
module
m
)
{
py
::
class_
<
ngraph
::
op
::
FakeQuantize
,
std
::
shared_ptr
<
ngraph
::
op
::
FakeQuantize
>
,
ngraph
::
op
::
Op
>
fakequantize
(
m
,
"FakeQuantize"
);
fakequantize
.
doc
()
=
"ngraph.impl.op.FakeQuantize wraps ngraph::op::FakeQuantize"
;
fakequantize
.
def
(
py
::
init
<
const
std
::
shared_ptr
<
ngraph
::
Node
>&
,
const
std
::
shared_ptr
<
ngraph
::
Node
>&
,
const
std
::
shared_ptr
<
ngraph
::
Node
>&
,
const
std
::
shared_ptr
<
ngraph
::
Node
>&
,
const
std
::
shared_ptr
<
ngraph
::
Node
>&
,
int
&>
());
}
python/pyngraph/ops/fused/fake_quantize.hpp
0 → 100644
View file @
886e87d4
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include <pybind11/pybind11.h>
namespace
py
=
pybind11
;
void
regclass_pyngraph_op_FakeQuantize
(
py
::
module
m
);
python/pyngraph/ops/regmodule_pyngraph_op.cpp
View file @
886e87d4
...
...
@@ -54,6 +54,7 @@ void regmodule_pyngraph_op(py::module m_op)
regclass_pyngraph_op_Elu
(
m_op
);
regclass_pyngraph_op_Equal
(
m_op
);
regclass_pyngraph_op_Exp
(
m_op
);
regclass_pyngraph_op_FakeQuantize
(
m_op
);
regclass_pyngraph_op_Floor
(
m_op
);
regclass_pyngraph_op_Gelu
(
m_op
);
regclass_pyngraph_op_Gemm
(
m_op
);
...
...
python/pyngraph/ops/regmodule_pyngraph_op.hpp
View file @
886e87d4
...
...
@@ -45,6 +45,7 @@
#include "pyngraph/ops/fused/clamp.hpp"
#include "pyngraph/ops/fused/depth_to_space.hpp"
#include "pyngraph/ops/fused/elu.hpp"
#include "pyngraph/ops/fused/fake_quantize.hpp"
#include "pyngraph/ops/fused/gelu.hpp"
#include "pyngraph/ops/fused/gemm.hpp"
#include "pyngraph/ops/fused/grn.hpp"
...
...
python/setup.py
View file @
886e87d4
...
...
@@ -184,6 +184,7 @@ sources = [
'pyngraph/ops/fused/elu.cpp'
,
'pyngraph/ops/equal.cpp'
,
'pyngraph/ops/exp.cpp'
,
'pyngraph/ops/fused/fake_quantize.cpp'
,
'pyngraph/ops/floor.cpp'
,
'pyngraph/ops/fused/gelu.cpp'
,
'pyngraph/ops/fused/gemm.cpp'
,
...
...
python/test/ngraph/test_ops_fused.py
View file @
886e87d4
...
...
@@ -69,6 +69,52 @@ def test_elu_operator_with_scalar():
assert
np
.
allclose
(
result
,
expected
)
def
test_fake_quantize
():
runtime
=
get_runtime
()
data_value
=
np
.
arange
(
24.0
,
dtype
=
np
.
float32
)
.
reshape
(
1
,
2
,
3
,
4
)
input_low_value
=
np
.
float32
(
0
)
input_high_value
=
np
.
float32
(
23
)
output_low_value
=
np
.
float32
(
2
)
output_high_value
=
np
.
float32
(
16
)
levels
=
np
.
float32
(
4
)
data_shape
=
[
1
,
2
,
3
,
4
]
bound_shape
=
[]
parameter_data
=
ng
.
parameter
(
data_shape
,
name
=
'data'
,
dtype
=
np
.
float32
)
parameter_input_low
=
ng
.
parameter
(
bound_shape
,
name
=
'input_low'
,
dtype
=
np
.
float32
)
parameter_input_high
=
ng
.
parameter
(
bound_shape
,
name
=
'input_high'
,
dtype
=
np
.
float32
)
parameter_output_low
=
ng
.
parameter
(
bound_shape
,
name
=
'output_low'
,
dtype
=
np
.
float32
)
parameter_output_high
=
ng
.
parameter
(
bound_shape
,
name
=
'output_high'
,
dtype
=
np
.
float32
)
model
=
ng
.
fake_quantize
(
parameter_data
,
parameter_input_low
,
parameter_input_high
,
parameter_output_low
,
parameter_output_high
,
levels
)
computation
=
runtime
.
computation
(
model
,
parameter_data
,
parameter_input_low
,
parameter_input_high
,
parameter_output_low
,
parameter_output_high
)
result
=
computation
(
data_value
,
input_low_value
,
input_high_value
,
output_low_value
,
output_high_value
)
expected
=
np
.
array
([[[[[
2.
,
2.
,
2.
,
2.
],
[
6.6666669
,
6.6666669
,
6.6666669
,
6.6666669
],
[
6.6666669
,
6.6666669
,
6.6666669
,
6.6666669
]],
[[
11.33333301
,
11.33333301
,
11.33333301
,
11.33333301
],
[
11.33333301
,
11.33333301
,
11.33333301
,
11.33333301
],
[
16.
,
16.
,
16.
,
16.
]]]]],
dtype
=
np
.
float32
)
assert
np
.
allclose
(
result
,
expected
)
def
test_depth_to_space
():
runtime
=
get_runtime
()
...
...
@@ -219,4 +265,5 @@ def test_grn_operator():
[[
0.9970545
,
0.98994946
,
0.9805807
,
0.97014254
],
[
0.9593655
,
0.9486833
,
0.9383431
,
0.9284767
],
[
0.91914505
,
0.9103665
,
0.9021342
,
0.8944272
]]]],
dtype
=
np
.
float32
)
assert
np
.
allclose
(
result
,
expected
)
src/ngraph/CMakeLists.txt
View file @
886e87d4
...
...
@@ -519,6 +519,14 @@ if(NOT NGRAPH_JSON_ENABLE)
target_compile_definitions
(
ngraph PUBLIC NGRAPH_JSON_DISABLE
)
endif
()
if
(
NGRAPH_INTERPRETER_STATIC_LIB_ENABLE
)
target_compile_definitions
(
ngraph PUBLIC NGRAPH_INTERPRETER_STATIC_LIB_ENABLE
)
endif
()
if
(
NGRAPH_CPU_STATIC_LIB_ENABLE
)
target_compile_definitions
(
ngraph PUBLIC NGRAPH_CPU_STATIC_LIB_ENABLE
)
endif
()
if
(
NGRAPH_DISTRIBUTED_ENABLE
)
if
(
NGRAPH_DISTRIBUTED_MLSL_ENABLE
)
target_include_directories
(
ngraph SYSTEM PRIVATE libmlsl
)
...
...
src/ngraph/op/experimental/batch_mat_mul.cpp
View file @
886e87d4
...
...
@@ -24,8 +24,8 @@ using namespace ngraph;
const
string
op
::
BatchMatMul
::
type_name
{
"BatchMatMul"
};
op
::
BatchMatMul
::
BatchMatMul
(
const
shared_ptr
<
Node
>&
arg0
,
const
shared_ptr
<
Node
>&
arg1
)
:
Op
(
check_single_output_args
({
arg0
,
arg1
})
)
op
::
BatchMatMul
::
BatchMatMul
(
const
Output
<
Node
>&
arg0
,
const
Output
<
Node
>&
arg1
)
:
Op
(
{
arg0
,
arg1
}
)
{
constructor_validate_and_infer_types
();
}
...
...
src/ngraph/op/experimental/batch_mat_mul.hpp
View file @
886e87d4
...
...
@@ -35,11 +35,12 @@ namespace ngraph
NGRAPH_API
static
const
std
::
string
type_name
;
const
std
::
string
&
description
()
const
override
{
return
type_name
;
}
BatchMatMul
()
=
default
;
/// \brief Constructs a batch of matmul product operation.
///
/// \param arg0 The node producing the first argument.
/// \param arg1 The node producing the second argument.
BatchMatMul
(
const
std
::
shared_ptr
<
Node
>&
arg0
,
const
std
::
shared_ptr
<
Node
>&
arg1
);
BatchMatMul
(
const
Output
<
Node
>&
arg0
,
const
Output
<
Node
>&
arg1
);
virtual
void
validate_and_infer_types
()
override
;
...
...
src/ngraph/op/experimental/compiled_kernel.cpp
View file @
886e87d4
...
...
@@ -62,6 +62,13 @@ shared_ptr<Node> ngraph::op::CompiledKernel::copy_with_new_args(const NodeVector
return
std
::
make_shared
<
CompiledKernel
>
(
new_node_list
,
new_outputs
,
new_args
);
}
ngraph
::
op
::
CompiledKernel
::
CompiledKernel
(
const
OutputVector
&
node_list
,
const
OutputVector
&
outputs
,
const
OutputVector
&
args
)
:
CompiledKernel
(
as_node_vector
(
node_list
),
as_node_vector
(
outputs
),
as_node_vector
(
args
))
{
}
ngraph
::
op
::
CompiledKernel
::
CompiledKernel
(
const
NodeVector
&
node_list
,
const
NodeVector
&
outputs
,
const
NodeVector
&
args
)
...
...
src/ngraph/op/experimental/compiled_kernel.hpp
View file @
886e87d4
...
...
@@ -35,9 +35,13 @@ namespace ngraph
NGRAPH_API
static
const
std
::
string
type_name
;
const
std
::
string
&
description
()
const
override
{
return
type_name
;
}
CompiledKernel
()
=
default
;
CompiledKernel
(
const
NodeVector
&
node_list
,
const
NodeVector
&
outputs
,
const
NodeVector
&
args
);
CompiledKernel
(
const
OutputVector
&
node_list
,
const
OutputVector
&
outputs
,
const
OutputVector
&
args
);
virtual
std
::
shared_ptr
<
Node
>
copy_with_new_args
(
const
NodeVector
&
new_args
)
const
override
;
...
...
src/ngraph/op/experimental/dyn_broadcast.cpp
View file @
886e87d4
...
...
@@ -22,10 +22,10 @@ using namespace ngraph;
const
string
op
::
DynBroadcast
::
type_name
{
"DynBroadcast"
};
op
::
DynBroadcast
::
DynBroadcast
(
const
shared_ptr
<
Node
>&
arg
,
const
shared_ptr
<
Node
>&
shape
,
const
shared_ptr
<
Node
>&
broadcast_axes
)
:
Op
(
check_single_output_args
({
arg
,
shape
,
broadcast_axes
})
)
op
::
DynBroadcast
::
DynBroadcast
(
const
Output
<
Node
>&
arg
,
const
Output
<
Node
>&
shape
,
const
Output
<
Node
>&
broadcast_axes
)
:
Op
(
{
arg
,
shape
,
broadcast_axes
}
)
{
constructor_validate_and_infer_types
();
}
...
...
src/ngraph/op/experimental/dyn_broadcast.hpp
View file @
886e87d4
...
...
@@ -31,15 +31,16 @@ namespace ngraph
NGRAPH_API
static
const
std
::
string
type_name
;
const
std
::
string
&
description
()
const
override
{
return
type_name
;
}
DynBroadcast
()
=
default
;
/// \brief Constructs a dynamic broadcast operation.
///
/// \param arg Node that produces the input tensor to be broadcast.
/// \param shape Node that produces shape of the output tensor.
/// \param broadcast_axes Node that produces the axis positions (0-based) in the result that are being broadcast. The
/// remaining axes in shape must be the same as the shape of arg.
DynBroadcast
(
const
std
::
shared_ptr
<
Node
>&
arg
,
const
std
::
shared_ptr
<
Node
>&
shape
,
const
std
::
shared_ptr
<
Node
>&
broadcast_axes
);
DynBroadcast
(
const
Output
<
Node
>&
arg
,
const
Output
<
Node
>&
shape
,
const
Output
<
Node
>&
broadcast_axes
);
void
validate_and_infer_types
()
override
;
...
...
src/ngraph/op/experimental/dyn_pad.cpp
View file @
886e87d4
...
...
@@ -21,12 +21,13 @@ using namespace ngraph;
const
string
op
::
DynPad
::
type_name
{
"DynPad"
};
op
::
DynPad
::
DynPad
(
const
std
::
shared_ptr
<
Node
>&
arg
,
const
std
::
shared_ptr
<
Node
>&
padding_below
,
const
std
::
shared_ptr
<
Node
>&
padding_above
,
const
std
::
shared_ptr
<
Node
>&
padding_value
,
op
::
DynPad
::
DynPad
(
const
Output
<
Node
>&
arg
,
const
Output
<
Node
>&
padding_below
,
const
Output
<
Node
>&
padding_above
,
const
Output
<
Node
>&
padding_value
,
op
::
PadMode
pad_mode
)
:
Op
(
check_single_output_args
({
arg
,
padding_below
,
padding_above
,
padding_value
}))
:
Op
({
arg
,
padding_below
,
padding_above
,
padding_value
})
,
m_pad_mode
(
pad_mode
)
{
constructor_validate_and_infer_types
();
}
...
...
src/ngraph/op/experimental/dyn_pad.hpp
View file @
886e87d4
...
...
@@ -30,6 +30,7 @@ namespace ngraph
NGRAPH_API
static
const
std
::
string
type_name
;
const
std
::
string
&
description
()
const
override
{
return
type_name
;
}
DynPad
()
=
default
;
/// \brief Perform dynamic padding of a tensor
///
/// \param arg The node producing input tensor to be padded.
...
...
@@ -37,10 +38,10 @@ namespace ngraph
/// \param padding_above The node producing the padding-above widths.
/// \param padding_value The value to be used for padding. Must be scalar.
/// \param pad_mode The padding mode: CONSTANT(default), EDGE or REFLECT.
DynPad
(
const
std
::
shared_ptr
<
Node
>&
arg
,
const
std
::
shared_ptr
<
Node
>&
padding_below
,
const
std
::
shared_ptr
<
Node
>&
padding_above
,
const
std
::
shared_ptr
<
Node
>&
padding_value
,
DynPad
(
const
Output
<
Node
>&
arg
,
const
Output
<
Node
>&
padding_below
,
const
Output
<
Node
>&
padding_above
,
const
Output
<
Node
>&
padding_value
,
PadMode
pad_mode
=
PadMode
::
CONSTANT
);
PadMode
get_pad_mode
()
const
{
return
m_pad_mode
;
}
...
...
src/ngraph/op/experimental/dyn_replace_slice.cpp
View file @
886e87d4
...
...
@@ -26,17 +26,17 @@ using namespace ngraph;
const
string
op
::
DynReplaceSlice
::
type_name
{
"DynReplaceSlice"
};
op
::
DynReplaceSlice
::
DynReplaceSlice
(
const
shared_ptr
<
Node
>&
arg
,
const
shared_ptr
<
Node
>&
replacement
,
const
shared_ptr
<
Node
>&
lower_bounds
,
const
shared_ptr
<
Node
>&
upper_bounds
,
const
shared_ptr
<
Node
>&
strides
,
op
::
DynReplaceSlice
::
DynReplaceSlice
(
const
Output
<
Node
>&
arg
,
const
Output
<
Node
>&
replacement
,
const
Output
<
Node
>&
lower_bounds
,
const
Output
<
Node
>&
upper_bounds
,
const
Output
<
Node
>&
strides
,
const
AxisSet
&
lower_bounds_mask
,
const
AxisSet
&
upper_bounds_mask
,
const
AxisSet
&
new_axis
,
const
AxisSet
&
shrink_axis
,
const
AxisSet
&
ellipsis_mask
)
:
Op
(
check_single_output_args
({
arg
,
replacement
,
lower_bounds
,
upper_bounds
,
strides
})
)
:
Op
(
{
arg
,
replacement
,
lower_bounds
,
upper_bounds
,
strides
}
)
,
m_lower_bounds_mask
(
lower_bounds_mask
)
,
m_upper_bounds_mask
(
upper_bounds_mask
)
,
m_new_axis
(
new_axis
)
...
...
src/ngraph/op/experimental/dyn_replace_slice.hpp
View file @
886e87d4
...
...
@@ -30,6 +30,7 @@ namespace ngraph
NGRAPH_API
static
const
std
::
string
type_name
;
const
std
::
string
&
description
()
const
override
{
return
type_name
;
}
DynReplaceSlice
()
=
default
;
/// \brief Constructs a dynamic tensor replace-slice operation.
///
/// \param arg The tensor in which to replace the slice.
...
...
@@ -43,11 +44,11 @@ namespace ngraph
/// \param new_axis Add dimension one axis at the set positions
/// \param shrink_axis Delete dimensions at the set positions
/// \param ellipsis_mask Inserts missing dimensions on the set position
DynReplaceSlice
(
const
std
::
shared_ptr
<
Node
>&
arg
,
const
std
::
shared_ptr
<
Node
>&
replacement
,
const
std
::
shared_ptr
<
Node
>&
lower_bounds
,
const
std
::
shared_ptr
<
Node
>&
upper_bounds
,
const
std
::
shared_ptr
<
Node
>&
strides
,
DynReplaceSlice
(
const
Output
<
Node
>&
arg
,
const
Output
<
Node
>&
replacement
,
const
Output
<
Node
>&
lower_bounds
,
const
Output
<
Node
>&
upper_bounds
,
const
Output
<
Node
>&
strides
,
const
AxisSet
&
lower_bounds_mask
=
AxisSet
{},
const
AxisSet
&
upper_bounds_mask
=
AxisSet
{},
const
AxisSet
&
new_axis
=
AxisSet
{},
...
...
src/ngraph/op/experimental/dyn_reshape.cpp
View file @
886e87d4
...
...
@@ -25,10 +25,8 @@ using namespace ngraph;
const
string
op
::
DynReshape
::
type_name
{
"DynReshape"
};
op
::
DynReshape
::
DynReshape
(
const
shared_ptr
<
Node
>&
arg
,
const
shared_ptr
<
Node
>&
pattern
,
bool
zero_flag
)
:
Op
(
check_single_output_args
({
arg
,
pattern
}))
op
::
DynReshape
::
DynReshape
(
const
Output
<
Node
>&
arg
,
const
Output
<
Node
>&
pattern
,
bool
zero_flag
)
:
Op
({
arg
,
pattern
})
,
m_zero_flag
(
zero_flag
)
{
constructor_validate_and_infer_types
();
...
...
src/ngraph/op/experimental/dyn_reshape.hpp
View file @
886e87d4
...
...
@@ -34,6 +34,7 @@ namespace ngraph
NGRAPH_API
static
const
std
::
string
type_name
;
const
std
::
string
&
description
()
const
override
{
return
type_name
;
}
DynReshape
()
=
default
;
/// \brief Constructs a dynamic reshape operation. This operation does not perform transpose.
///
/// \param arg The tensor to be reshaped.
...
...
@@ -44,8 +45,8 @@ namespace ngraph
/// size is inferred based on element count of input tensor.
/// \param zero_flag Treats zeros in `pattern` as wildcard flags indicating a copy from input
/// shape at the same index.
DynReshape
(
const
std
::
shared_ptr
<
Node
>&
arg
,
const
std
::
shared_ptr
<
Node
>&
pattern
,
DynReshape
(
const
Output
<
Node
>&
arg
,
const
Output
<
Node
>&
pattern
,
bool
zero_flag
=
false
);
void
validate_and_infer_types
()
override
;
...
...
src/ngraph/op/experimental/dyn_slice.cpp
View file @
886e87d4
...
...
@@ -26,16 +26,16 @@ using namespace ngraph;
const
string
op
::
DynSlice
::
type_name
{
"DynSlice"
};
op
::
DynSlice
::
DynSlice
(
const
shared_ptr
<
Node
>&
arg
,
const
shared_ptr
<
Node
>&
lower_bounds
,
const
shared_ptr
<
Node
>&
upper_bounds
,
const
shared_ptr
<
Node
>&
strides
,
op
::
DynSlice
::
DynSlice
(
const
Output
<
Node
>&
arg
,
const
Output
<
Node
>&
lower_bounds
,
const
Output
<
Node
>&
upper_bounds
,
const
Output
<
Node
>&
strides
,
const
AxisSet
&
lower_bounds_mask
,
const
AxisSet
&
upper_bounds_mask
,
const
AxisSet
&
new_axis
,
const
AxisSet
&
shrink_axis
,
const
AxisSet
&
ellipsis_mask
)
:
Op
(
check_single_output_args
({
arg
,
lower_bounds
,
upper_bounds
,
strides
})
)
:
Op
(
{
arg
,
lower_bounds
,
upper_bounds
,
strides
}
)
,
m_lower_bounds_mask
(
lower_bounds_mask
)
,
m_upper_bounds_mask
(
upper_bounds_mask
)
,
m_new_axis
(
new_axis
)
...
...
src/ngraph/op/experimental/dyn_slice.hpp
View file @
886e87d4
...
...
@@ -30,6 +30,7 @@ namespace ngraph
NGRAPH_API
static
const
std
::
string
type_name
;
const
std
::
string
&
description
()
const
override
{
return
type_name
;
}
DynSlice
()
=
default
;
/// \brief Constructs a dynamic tensor slice operation.
///
/// \param arg The tensor to be sliced.
...
...
@@ -42,10 +43,10 @@ namespace ngraph
/// \param new_axis Add dimension one axis at the set positions
/// \param shrink_axis Delete dimensions at the set positions
/// \param ellipsis_mask Inserts missing dimensions on the set position
DynSlice
(
const
std
::
shared_ptr
<
Node
>&
arg
,
const
std
::
shared_ptr
<
Node
>&
lower_bounds
,
const
std
::
shared_ptr
<
Node
>&
upper_bounds
,
const
std
::
shared_ptr
<
Node
>&
strides
,
DynSlice
(
const
Output
<
Node
>&
arg
,
const
Output
<
Node
>&
lower_bounds
,
const
Output
<
Node
>&
upper_bounds
,
const
Output
<
Node
>&
strides
,
const
AxisSet
&
lower_bounds_mask
=
AxisSet
{},
const
AxisSet
&
upper_bounds_mask
=
AxisSet
{},
const
AxisSet
&
new_axis
=
AxisSet
{},
...
...
src/ngraph/op/experimental/generate_mask.cpp
View file @
886e87d4
...
...
@@ -21,11 +21,6 @@ using namespace ngraph;
const
string
op
::
GenerateMask
::
type_name
{
"GenerateMask"
};
op
::
GenerateMask
::
GenerateMask
()
:
Op
()
{
}
#if 0
// Not supported until all transformers use nodes instead of attributes
op::GenerateMask::GenerateMask(const Output<Node>& training,
...
...
src/ngraph/op/experimental/generate_mask.hpp
View file @
886e87d4
...
...
@@ -34,7 +34,7 @@ namespace ngraph
const
std
::
string
&
description
()
const
override
{
return
type_name
;
}
/// \brief Constructs a GenerateMask node with a given shape, seed,
/// probability and training/inference mode
GenerateMask
();
GenerateMask
()
=
default
;
#if 0
/// Switch to dynamic arguments when all transformers have switched to using the node values
...
...
src/ngraph/op/experimental/quantized_concat.cpp
View file @
886e87d4
...
...
@@ -24,6 +24,13 @@ using namespace ngraph;
const
string
op
::
QuantizedConcat
::
type_name
{
"QuantizedConcat"
};
op
::
QuantizedConcat
::
QuantizedConcat
(
const
OutputVector
&
args
,
size_t
concatenation_axis
)
:
Op
(
args
)
,
m_concatenation_axis
(
concatenation_axis
)
{
constructor_validate_and_infer_types
();
}
op
::
QuantizedConcat
::
QuantizedConcat
(
const
NodeVector
&
args
,
size_t
concatenation_axis
)
:
Op
(
check_single_output_args
(
args
))
,
m_concatenation_axis
(
concatenation_axis
)
...
...
src/ngraph/op/experimental/quantized_concat.hpp
View file @
886e87d4
...
...
@@ -31,12 +31,19 @@ namespace ngraph
NGRAPH_API
static
const
std
::
string
type_name
;
const
std
::
string
&
description
()
const
override
{
return
type_name
;
}
QuantizedConcat
()
=
default
;
/// \brief Constructs a concatenation operation.
///
/// \param args The nodes producing the input tensors.
/// \param concatenation_axis The axis along which to concatenate the input tensors.
QuantizedConcat
(
const
NodeVector
&
args
,
size_t
concatenation_axis
);
/// \brief Constructs a concatenation operation.
///
/// \param args The nodes producing the input tensors.
/// \param concatenation_axis The axis along which to concatenate the input tensors.
QuantizedConcat
(
const
OutputVector
&
args
,
size_t
concatenation_axis
);
void
validate_and_infer_types
()
override
;
virtual
std
::
shared_ptr
<
Node
>
...
...
src/ngraph/op/experimental/quantized_conv_bias.hpp
View file @
886e87d4
...
...
@@ -30,6 +30,7 @@ namespace ngraph
NGRAPH_API
static
const
std
::
string
type_name
;
const
std
::
string
&
description
()
const
override
{
return
type_name
;
}
QuantizedConvolutionBias
()
=
default
;
QuantizedConvolutionBias
(
const
Output
<
Node
>&
data_batch
,
const
Output
<
Node
>&
filters
,
const
Output
<
Node
>&
bias
,
...
...
src/ngraph/op/experimental/quantized_conv_relu.hpp
View file @
886e87d4
...
...
@@ -30,6 +30,7 @@ namespace ngraph
NGRAPH_API
static
const
std
::
string
type_name
;
const
std
::
string
&
description
()
const
override
{
return
type_name
;
}
QuantizedConvolutionRelu
()
=
default
;
QuantizedConvolutionRelu
(
const
Output
<
Node
>&
data_batch
,
const
Output
<
Node
>&
filters
,
const
Strides
&
window_movement_strides
,
...
...
src/ngraph/op/experimental/quantized_dot.hpp
View file @
886e87d4
...
...
@@ -30,6 +30,7 @@ namespace ngraph
NGRAPH_API
static
const
std
::
string
type_name
;
const
std
::
string
&
description
()
const
override
{
return
type_name
;
}
QuantizedDot
()
=
default
;
QuantizedDot
(
const
Output
<
Node
>&
data
,
const
Output
<
Node
>&
weights
,
const
Output
<
Node
>&
scale
,
...
...
src/ngraph/op/experimental/quantized_dot_bias.hpp
View file @
886e87d4
...
...
@@ -30,6 +30,7 @@ namespace ngraph
NGRAPH_API
static
const
std
::
string
type_name
;
const
std
::
string
&
description
()
const
override
{
return
type_name
;
}
QuantizedDotBias
()
=
default
;
QuantizedDotBias
(
const
Output
<
Node
>&
data
,
const
Output
<
Node
>&
weights
,
const
Output
<
Node
>&
bias
,
...
...
src/ngraph/op/experimental/quantized_max_pool.hpp
View file @
886e87d4
...
...
@@ -29,6 +29,7 @@ namespace ngraph
NGRAPH_API
static
const
std
::
string
type_name
;
const
std
::
string
&
description
()
const
override
{
return
type_name
;
}
QuantizedMaxPool
()
=
default
;
/// \brief Constructs a batched max pooling operation.
///
/// \param arg The node producing the input data batch tensor.
...
...
src/ngraph/op/experimental/range.cpp
View file @
886e87d4
...
...
@@ -24,10 +24,6 @@ using namespace ngraph;
const
string
op
::
Range
::
type_name
=
"Range"
;
op
::
Range
::
Range
()
{
}
op
::
Range
::
Range
(
const
Output
<
Node
>&
start
,
const
Output
<
Node
>&
stop
,
const
Output
<
Node
>&
step
)
:
Op
({
start
,
stop
,
step
})
{
...
...
src/ngraph/op/experimental/range.hpp
View file @
886e87d4
...
...
@@ -31,7 +31,7 @@ namespace ngraph
static
const
std
::
string
type_name
;
const
std
::
string
&
description
()
const
override
{
return
type_name
;
}
/// \brief Constructs an unitialized range operation.
Range
();
Range
()
=
default
;
/// \brief Constructs a range operation.
///
...
...
src/ngraph/op/experimental/shape_of.cpp
View file @
886e87d4
...
...
@@ -21,8 +21,8 @@ using namespace ngraph;
const
string
op
::
ShapeOf
::
type_name
{
"ShapeOf"
};
op
::
ShapeOf
::
ShapeOf
(
const
shared_ptr
<
Node
>&
arg
)
:
Op
(
check_single_output_args
({
arg
})
)
op
::
ShapeOf
::
ShapeOf
(
const
Output
<
Node
>&
arg
)
:
Op
(
{
arg
}
)
{
constructor_validate_and_infer_types
();
}
...
...
src/ngraph/op/experimental/shape_of.hpp
View file @
886e87d4
...
...
@@ -29,8 +29,9 @@ namespace ngraph
NGRAPH_API
static
const
std
::
string
type_name
;
const
std
::
string
&
description
()
const
override
{
return
type_name
;
}
ShapeOf
()
=
default
;
/// \brief Constructs a shape-of operation.
ShapeOf
(
const
std
::
shared_ptr
<
Node
>&
arg
);
ShapeOf
(
const
Output
<
Node
>&
arg
);
virtual
std
::
shared_ptr
<
Node
>
copy_with_new_args
(
const
NodeVector
&
new_args
)
const
override
;
...
...
src/ngraph/op/experimental/tile.cpp
View file @
886e87d4
...
...
@@ -23,8 +23,8 @@ using namespace ngraph;
const
string
op
::
Tile
::
type_name
{
"Tile"
};
op
::
Tile
::
Tile
(
const
std
::
shared_ptr
<
Node
>&
arg
,
const
std
::
shared_ptr
<
Node
>&
repeats
)
:
Op
(
check_single_output_args
({
arg
,
repeats
})
)
op
::
Tile
::
Tile
(
const
Output
<
Node
>&
arg
,
const
Output
<
Node
>&
repeats
)
:
Op
(
{
arg
,
repeats
}
)
{
constructor_validate_and_infer_types
();
}
...
...
src/ngraph/op/experimental/tile.hpp
View file @
886e87d4
...
...
@@ -30,11 +30,12 @@ namespace ngraph
NGRAPH_API
static
const
std
::
string
type_name
;
const
std
::
string
&
description
()
const
override
{
return
type_name
;
}
Tile
()
=
default
;
/// \brief Perform dynamic padding of a tensor
///
/// \param arg The node producing input tensor to be padded.
/// \param repeats The node producing the per-dimension replication factor
Tile
(
const
std
::
shared_ptr
<
Node
>&
arg
,
const
std
::
shared_ptr
<
Node
>&
repeats
);
Tile
(
const
Output
<
Node
>&
arg
,
const
Output
<
Node
>&
repeats
);
void
validate_and_infer_types
()
override
;
...
...
src/ngraph/op/experimental/transpose.cpp
View file @
886e87d4
...
...
@@ -24,8 +24,8 @@ using namespace ngraph;
const
string
op
::
Transpose
::
type_name
{
"Transpose"
};
op
::
Transpose
::
Transpose
(
const
shared_ptr
<
Node
>&
arg
,
const
shared_ptr
<
Node
>&
input_order
)
:
Op
(
check_single_output_args
({
arg
,
input_order
})
)
op
::
Transpose
::
Transpose
(
const
Output
<
Node
>&
arg
,
const
Output
<
Node
>&
input_order
)
:
Op
(
{
arg
,
input_order
}
)
{
constructor_validate_and_infer_types
();
}
...
...
src/ngraph/op/experimental/transpose.hpp
View file @
886e87d4
...
...
@@ -31,6 +31,7 @@ namespace ngraph
NGRAPH_API
static
const
std
::
string
type_name
;
const
std
::
string
&
description
()
const
override
{
return
type_name
;
}
Transpose
()
=
default
;
/// \brief Constructs a transpose operation.
///
/// \param arg Node producing the tensor to be transposed.
...
...
@@ -38,7 +39,7 @@ namespace ngraph
/// input shape. Must be a vector of element type element::i64,
/// with shape [n], where n is the rank of arg. The tensor's
/// value must contain every integer in the range [0,n-1].
Transpose
(
const
std
::
shared_ptr
<
Node
>&
arg
,
const
std
::
shared_ptr
<
Node
>&
input_order
);
Transpose
(
const
Output
<
Node
>&
arg
,
const
Output
<
Node
>&
input_order
);
void
validate_and_infer_types
()
override
;
...
...
src/ngraph/pass/manager.hpp
View file @
886e87d4
...
...
@@ -43,6 +43,25 @@ public:
template
<
typename
T
,
class
...
Args
>
void
register_pass
(
Args
&&
...
args
)
{
push_pass
<
T
>
(
std
::
forward
<
Args
>
(
args
)...);
if
(
m_per_pass_validation
)
{
push_pass
<
Validate
>
();
}
}
void
run_passes
(
std
::
shared_ptr
<
Function
>
,
bool
transitive
=
true
);
ManagerState
&
get_state
();
PassConfig
&
get_pass_config
()
{
return
m_pass_config
;
}
void
set_pass_config
(
const
PassConfig
&
pass_config
)
{
m_pass_config
=
pass_config
;
}
void
set_pass_visualization
(
bool
new_state
)
{
m_visualize
=
new_state
;
}
void
set_pass_serialization
(
bool
new_state
)
{
m_serialize
=
new_state
;
}
void
set_per_pass_validation
(
bool
new_state
)
{
m_per_pass_validation
=
new_state
;
}
private
:
template
<
typename
T
,
class
...
Args
>
void
push_pass
(
Args
&&
...
args
)
{
static_assert
(
std
::
is_base_of
<
pass
::
PassBase
,
T
>::
value
,
"pass not derived from pass base"
);
auto
pass
=
std
::
make_shared
<
T
>
(
std
::
forward
<
Args
>
(
args
)...);
...
...
@@ -61,23 +80,8 @@ public:
m_pass_names
.
push_back
(
typeid
(
T
).
name
());
#endif
}
if
(
m_per_pass_validation
)
{
auto
validate
=
std
::
make_shared
<
Validate
>
();
auto
validate_base
=
std
::
static_pointer_cast
<
PassBase
>
(
validate
);
m_pass_list
.
push_back
(
validate_base
);
}
}
void
run_passes
(
std
::
shared_ptr
<
Function
>
,
bool
transitive
=
true
);
ManagerState
&
get_state
();
PassConfig
&
get_pass_config
()
{
return
m_pass_config
;
}
void
set_pass_config
(
const
PassConfig
&
pass_config
)
{
m_pass_config
=
pass_config
;
}
void
set_pass_visualization
(
bool
new_state
)
{
m_visualize
=
new_state
;
}
void
set_pass_serialization
(
bool
new_state
)
{
m_serialize
=
new_state
;
}
void
set_per_pass_validation
(
bool
new_state
)
{
m_per_pass_validation
=
new_state
;
}
private
:
std
::
vector
<
std
::
string
>
m_pass_names
;
std
::
vector
<
std
::
shared_ptr
<
PassBase
>>
m_pass_list
;
ManagerState
m_state
;
...
...
src/ngraph/runtime/backend.cpp
View file @
886e87d4
...
...
@@ -37,6 +37,9 @@ std::string runtime::Backend::s_backend_shared_library_search_directory;
// This finds the full path of the containing shared library
static
string
find_my_pathname
()
{
#ifdef NGRAPH_STATIC_LIB_ENABLE
return
""
;
#else
#ifdef _WIN32
HMODULE
hModule
=
GetModuleHandleW
(
L"ngraph.dll"
);
WCHAR
wpath
[
MAX_PATH
];
...
...
@@ -52,6 +55,7 @@ static string find_my_pathname()
dladdr
(
reinterpret_cast
<
void
*>
(
find_my_pathname
),
&
dl_info
);
return
dl_info
.
dli_fname
;
#endif
#endif
}
runtime
::
Backend
::~
Backend
()
...
...
src/ngraph/runtime/backend_manager.cpp
View file @
886e87d4
...
...
@@ -32,12 +32,18 @@
using
namespace
std
;
using
namespace
ngraph
;
#ifdef NGRAPH_STATIC_LIB_ENABLE
#define DLERROR() ""
#else
#ifdef _WIN32
#define CLOSE_LIBRARY(a) FreeLibrary(a)
#define DLSYM(a, b) GetProcAddress(a, b)
#define DLERROR() ""
#else
#define CLOSE_LIBRARY(a) dlclose(a)
#define DLSYM(a, b) dlsym(a, b)
#define DLERROR() dlerror()
#endif
#endif
unordered_map
<
string
,
runtime
::
BackendConstructor
*>&
runtime
::
BackendManager
::
get_registry
()
...
...
@@ -101,19 +107,19 @@ shared_ptr<runtime::Backend> runtime::BackendManager::create_backend(const std::
}
else
{
#ifndef NGRAPH_STATIC_LIB_ENABLE
DL_HANDLE
handle
=
open_shared_library
(
type
);
if
(
!
handle
)
{
stringstream
ss
;
ss
<<
"Backend '"
<<
type
<<
"' not registered. Error:"
;
#ifndef _WIN32
ss
<<
dlerror
();
ss
<<
DLERROR
();
#endif
throw
runtime_error
(
ss
.
str
());
}
#ifndef _WIN32
dlerror
();
// Clear any pending errors
DLERROR
();
// Clear any pending errors
#endif
function
<
runtime
::
BackendConstructor
*
()
>
get_backend_constructor_pointer
=
reinterpret_cast
<
runtime
::
BackendConstructor
*
(
*
)()
>
(
...
...
@@ -127,7 +133,7 @@ shared_ptr<runtime::Backend> runtime::BackendManager::create_backend(const std::
{
string
error
;
#ifndef _WIN32
const
char
*
err
=
dlerror
();
const
char
*
err
=
DLERROR
();
error
=
(
err
?
err
:
""
);
#endif
CLOSE_LIBRARY
(
handle
);
...
...
@@ -136,6 +142,7 @@ shared_ptr<runtime::Backend> runtime::BackendManager::create_backend(const std::
"library.
\n
Error='"
+
error
+
"'"
);
}
#endif
}
return
backend
;
}
...
...
@@ -146,6 +153,7 @@ DL_HANDLE runtime::BackendManager::open_shared_library(string type)
string
lib_suffix
=
SHARED_LIB_SUFFIX
;
DL_HANDLE
handle
=
nullptr
;
#ifndef NGRAPH_STATIC_LIB_ENABLE
// strip off attributes, IE:CPU becomes IE
auto
colon
=
type
.
find
(
":"
);
...
...
@@ -163,9 +171,9 @@ DL_HANDLE runtime::BackendManager::open_shared_library(string type)
SetDllDirectory
((
LPCSTR
)
my_directory
.
c_str
());
handle
=
LoadLibrary
(
library_path
.
c_str
());
#else
dlerror
();
// Clear any pending errors
DLERROR
();
// Clear any pending errors
handle
=
dlopen
(
library_path
.
c_str
(),
RTLD_NOW
|
RTLD_GLOBAL
);
const
char
*
err
=
dlerror
();
const
char
*
err
=
DLERROR
();
error
=
(
err
?
err
:
""
);
#endif
if
(
!
handle
)
...
...
@@ -175,12 +183,14 @@ DL_HANDLE runtime::BackendManager::open_shared_library(string type)
ss
<<
"
\n
Open error message '"
<<
error
<<
"'"
;
throw
runtime_error
(
ss
.
str
());
}
#endif
return
handle
;
}
map
<
string
,
string
>
runtime
::
BackendManager
::
get_registered_device_map
()
{
map
<
string
,
string
>
rc
;
#ifndef NGRAPH_STATIC_LIB_ENABLE
string
my_directory
=
file_util
::
get_directory
(
Backend
::
get_backend_shared_library_search_directory
());
vector
<
string
>
backend_list
;
...
...
@@ -197,6 +207,7 @@ map<string, string> runtime::BackendManager::get_registered_device_map()
}
};
file_util
::
iterate_files
(
my_directory
,
f
,
false
,
true
);
#endif
return
rc
;
}
...
...
src/ngraph/runtime/cpu/builder/quantized_concat.cpp
View file @
886e87d4
...
...
@@ -95,6 +95,9 @@ namespace ngraph
}
}
REGISTER_OP_BUILDER
(
QuantizedConcat
);
#ifdef NGRAPH_CPU_STATIC_LIB_ENABLE
void
register_builders_quantized_concat_cpp
()
{}
#endif
}
}
}
src/ngraph/runtime/cpu/builder/quantized_max_pool.cpp
View file @
886e87d4
...
...
@@ -69,6 +69,9 @@ namespace ngraph
}
}
REGISTER_OP_BUILDER
(
QuantizedMaxPool
);
#ifdef NGRAPH_CPU_STATIC_LIB_ENABLE
void
register_builders_quantized_max_pool_cpp
()
{}
#endif
}
}
}
src/ngraph/runtime/cpu/cpu_builder_registry.cpp
View file @
886e87d4
...
...
@@ -77,6 +77,8 @@ namespace ngraph
register_builders_tile_cpp
();
register_builders_topk_cpp
();
register_builders_update_slice_cpp
();
register_builders_quantized_concat_cpp
();
register_builders_quantized_max_pool_cpp
();
}
}
}
...
...
src/ngraph/runtime/cpu/cpu_builder_registry.hpp
View file @
886e87d4
...
...
@@ -76,6 +76,8 @@ namespace ngraph
void
register_builders_tile_cpp
();
void
register_builders_topk_cpp
();
void
register_builders_update_slice_cpp
();
void
register_builders_quantized_concat_cpp
();
void
register_builders_quantized_max_pool_cpp
();
}
}
}
...
...
src/ngraph/runtime/plaidml/unit_test.manifest
View file @
886e87d4
...
...
@@ -280,6 +280,9 @@ lstm_cell_activaction_functions
divide_python_rounding_int32
backwards_batchmatmul_tensor2_tensor2
# unsupported ops: `BroadcastDistributed`
broadcastdistributed
# unsupported ops: 'QuantizedConvolution', 'QuantizedDot', 'TopK', 'Erf', 'EmbeddingLookup'
model_quant_conv_linear
model_conv_integer_no_zero_point
...
...
test/pass_manager.cpp
View file @
886e87d4
...
...
@@ -41,3 +41,29 @@ TEST(pass_manager, add)
EXPECT_EQ
(
node_count
,
sorted
.
size
());
EXPECT_TRUE
(
validate_list
(
sorted
));
}
namespace
{
class
DummyPass
:
public
pass
::
FunctionPass
{
public
:
DummyPass
()
:
FunctionPass
()
{
}
bool
run_on_function
(
std
::
shared_ptr
<
ngraph
::
Function
>
f
)
override
{
return
false
;
}
};
}
// Regression test: We've had an issue in the past where enabling per-pass validation and
// per-pass serialization at the same time causes a crash.
TEST
(
pass_manager
,
serialize_with_revalidate_does_not_crash
)
{
pass
::
Manager
pass_manager
;
pass_manager
.
set_per_pass_validation
(
true
);
pass_manager
.
set_pass_serialization
(
true
);
pass_manager
.
register_pass
<
DummyPass
>
();
auto
graph
=
make_test_graph
();
pass_manager
.
run_passes
(
graph
);
}
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment