Unverified Commit 886e87d4 authored by Adam Procter's avatar Adam Procter Committed by GitHub

Merge branch 'master' into aprocter/s-barannikov-unused_includes

parents 2d868b2c c83a98c3
...@@ -398,6 +398,10 @@ if (NGRAPH_MLIR_ENABLE) ...@@ -398,6 +398,10 @@ if (NGRAPH_MLIR_ENABLE)
set(NGRAPH_MLIR_SOURCE_DIR ${CMAKE_SOURCE_DIR}/src/contrib/mlir) set(NGRAPH_MLIR_SOURCE_DIR ${CMAKE_SOURCE_DIR}/src/contrib/mlir)
endif() endif()
if (NGRAPH_STATIC_LIB_ENABLE)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DNGRAPH_STATIC_LIB_ENABLE")
endif()
if (NGRAPH_PLAIDML_ENABLE) if (NGRAPH_PLAIDML_ENABLE)
find_package(PlaidML CONFIG) find_package(PlaidML CONFIG)
if (NOT PLAIDML_FOUND) if (NOT PLAIDML_FOUND)
......
...@@ -37,7 +37,7 @@ project/doc-contributor-README.rst @indie ...@@ -37,7 +37,7 @@ project/doc-contributor-README.rst @indie
/src/resource/ @rkimballn1 /src/resource/ @rkimballn1
/src/tools/ @rkimballn1 /src/tools/ @rkimballn1
/src/ngraph/autodiff/ @diyessi /src/ngraph/autodiff/ @diyessi
/src/ngraph/builder/ @sfvaroglu /src/ngraph/builder/ @mbrookhart
/src/ngraph/codegen/ @rkimballn1 /src/ngraph/codegen/ @rkimballn1
/src/ngraph/distributed.* @wenzhe-nrv @diyessi /src/ngraph/distributed.* @wenzhe-nrv @diyessi
/src/ngraph/frontend/ @postrational /src/ngraph/frontend/ @postrational
......
...@@ -37,6 +37,7 @@ ngraph.ops ...@@ -37,6 +37,7 @@ ngraph.ops
elu elu
equal equal
exp exp
fake_quantize
floor floor
gelu gelu
gemm gemm
......
...@@ -50,6 +50,7 @@ from ngraph.ops import dot ...@@ -50,6 +50,7 @@ from ngraph.ops import dot
from ngraph.ops import elu from ngraph.ops import elu
from ngraph.ops import equal from ngraph.ops import equal
from ngraph.ops import exp from ngraph.ops import exp
from ngraph.ops import fake_quantize
from ngraph.ops import floor from ngraph.ops import floor
from ngraph.ops import gelu from ngraph.ops import gelu
from ngraph.ops import gemm from ngraph.ops import gemm
......
...@@ -74,6 +74,7 @@ from _pyngraph.op import Dot ...@@ -74,6 +74,7 @@ from _pyngraph.op import Dot
from _pyngraph.op import Elu from _pyngraph.op import Elu
from _pyngraph.op import Equal from _pyngraph.op import Equal
from _pyngraph.op import Exp from _pyngraph.op import Exp
from _pyngraph.op import FakeQuantize
from _pyngraph.op import Floor from _pyngraph.op import Floor
from _pyngraph.op import Gelu from _pyngraph.op import Gelu
from _pyngraph.op import Gemm from _pyngraph.op import Gemm
......
...@@ -22,9 +22,9 @@ from ngraph.impl import AxisSet, AxisVector, Coordinate, CoordinateDiff, Functio ...@@ -22,9 +22,9 @@ from ngraph.impl import AxisSet, AxisVector, Coordinate, CoordinateDiff, Functio
from ngraph.impl.op import Abs, Acos, Add, And, Asin, ArgMax, ArgMin, Atan, AvgPool, \ from ngraph.impl.op import Abs, Acos, Add, And, Asin, ArgMax, ArgMin, Atan, AvgPool, \
BatchNormTraining, BatchNormInference, Broadcast, Ceiling, Clamp, Concat, Constant, Convert, \ BatchNormTraining, BatchNormInference, Broadcast, Ceiling, Clamp, Concat, Constant, Convert, \
Convolution, ConvolutionBackpropData, Cos, Cosh, DepthToSpace, Divide, Dot, Elu, Equal, Exp, \ Convolution, ConvolutionBackpropData, Cos, Cosh, DepthToSpace, Divide, Dot, Elu, FakeQuantize, \
Floor, Gelu, Gemm, GetOutputElement, Greater, GreaterEq, GRN, Less, LessEq, Log, LRN, Max, \ Equal, Exp, Floor, Gelu, Gemm, GetOutputElement, Greater, GreaterEq, GRN, Less, LessEq, Log, \
Maximum, MaxPool, Min, Minimum, Multiply, Negative, Not, NotEqual, OneHot, Or, Pad, \ LRN, Max, Maximum, MaxPool, Min, Minimum, Multiply, Negative, Not, NotEqual, OneHot, Or, Pad, \
Parameter, Product, Power, Relu, ReplaceSlice, Reshape, Reverse, Select, Sign, Sin, Sinh, \ Parameter, Product, Power, Relu, ReplaceSlice, Reshape, Reverse, Select, Sign, Sin, Sinh, \
Slice, Softmax, Sqrt, Subtract, Sum, Tan, Tanh, TopK Slice, Softmax, Sqrt, Subtract, Sum, Tan, Tanh, TopK
...@@ -537,6 +537,38 @@ def broadcast_to(node, new_shape, axis=None, name=None): ...@@ -537,6 +537,38 @@ def broadcast_to(node, new_shape, axis=None, name=None):
@nameable_op @nameable_op
def fake_quantize(data, input_low, input_high, output_low, output_high, levels, name=None):
# type: (Node, Node, Node, Node, Node, int, str) -> Node
r"""Perform an element-wise linear quantization on input data.
Input floating point values are quantized into a discrete set of floating point values.
.. code-block:: python
if x <= input_low:
output = output_low
if x > input_high:
output = output_high
else:
output = fake_quantize(output)
Fake quantize uses the following logic:
.. math:: output =
\dfrac{round( \dfrac{data - input\_low}{(input\_high - input\_low)\cdot (levels-1)})}
{(levels-1)\cdot (output\_high - output\_low)} + output\_low
:param data: The node with data tensor.
:param input_low: The node with the minimum for input values.
:param input_high: The node with the maximum for input values.
:param output_low: The node with the minimum quantized value.
:param output_high: The node with the maximum quantized value.
:param levels: The number of quantization levels. Integer value.
:return: New node with quantized value.
"""
return FakeQuantize(data, input_low, input_high, output_low, output_high, levels)
def gemm(A, # type: Node def gemm(A, # type: Node
B, # type: Node B, # type: Node
C, # type: Node C, # type: Node
......
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
#include "ngraph/op/fused/fake_quantize.hpp"
#include "pyngraph/ops/fused/fake_quantize.hpp"
namespace py = pybind11;
void regclass_pyngraph_op_FakeQuantize(py::module m)
{
py::class_<ngraph::op::FakeQuantize, std::shared_ptr<ngraph::op::FakeQuantize>, ngraph::op::Op>
fakequantize(m, "FakeQuantize");
fakequantize.doc() = "ngraph.impl.op.FakeQuantize wraps ngraph::op::FakeQuantize";
fakequantize.def(py::init<const std::shared_ptr<ngraph::Node>&,
const std::shared_ptr<ngraph::Node>&,
const std::shared_ptr<ngraph::Node>&,
const std::shared_ptr<ngraph::Node>&,
const std::shared_ptr<ngraph::Node>&,
int&>());
}
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include <pybind11/pybind11.h>
namespace py = pybind11;
void regclass_pyngraph_op_FakeQuantize(py::module m);
...@@ -54,6 +54,7 @@ void regmodule_pyngraph_op(py::module m_op) ...@@ -54,6 +54,7 @@ void regmodule_pyngraph_op(py::module m_op)
regclass_pyngraph_op_Elu(m_op); regclass_pyngraph_op_Elu(m_op);
regclass_pyngraph_op_Equal(m_op); regclass_pyngraph_op_Equal(m_op);
regclass_pyngraph_op_Exp(m_op); regclass_pyngraph_op_Exp(m_op);
regclass_pyngraph_op_FakeQuantize(m_op);
regclass_pyngraph_op_Floor(m_op); regclass_pyngraph_op_Floor(m_op);
regclass_pyngraph_op_Gelu(m_op); regclass_pyngraph_op_Gelu(m_op);
regclass_pyngraph_op_Gemm(m_op); regclass_pyngraph_op_Gemm(m_op);
......
...@@ -45,6 +45,7 @@ ...@@ -45,6 +45,7 @@
#include "pyngraph/ops/fused/clamp.hpp" #include "pyngraph/ops/fused/clamp.hpp"
#include "pyngraph/ops/fused/depth_to_space.hpp" #include "pyngraph/ops/fused/depth_to_space.hpp"
#include "pyngraph/ops/fused/elu.hpp" #include "pyngraph/ops/fused/elu.hpp"
#include "pyngraph/ops/fused/fake_quantize.hpp"
#include "pyngraph/ops/fused/gelu.hpp" #include "pyngraph/ops/fused/gelu.hpp"
#include "pyngraph/ops/fused/gemm.hpp" #include "pyngraph/ops/fused/gemm.hpp"
#include "pyngraph/ops/fused/grn.hpp" #include "pyngraph/ops/fused/grn.hpp"
......
...@@ -184,6 +184,7 @@ sources = [ ...@@ -184,6 +184,7 @@ sources = [
'pyngraph/ops/fused/elu.cpp', 'pyngraph/ops/fused/elu.cpp',
'pyngraph/ops/equal.cpp', 'pyngraph/ops/equal.cpp',
'pyngraph/ops/exp.cpp', 'pyngraph/ops/exp.cpp',
'pyngraph/ops/fused/fake_quantize.cpp',
'pyngraph/ops/floor.cpp', 'pyngraph/ops/floor.cpp',
'pyngraph/ops/fused/gelu.cpp', 'pyngraph/ops/fused/gelu.cpp',
'pyngraph/ops/fused/gemm.cpp', 'pyngraph/ops/fused/gemm.cpp',
......
...@@ -69,6 +69,52 @@ def test_elu_operator_with_scalar(): ...@@ -69,6 +69,52 @@ def test_elu_operator_with_scalar():
assert np.allclose(result, expected) assert np.allclose(result, expected)
def test_fake_quantize():
runtime = get_runtime()
data_value = np.arange(24.0, dtype=np.float32).reshape(1, 2, 3, 4)
input_low_value = np.float32(0)
input_high_value = np.float32(23)
output_low_value = np.float32(2)
output_high_value = np.float32(16)
levels = np.float32(4)
data_shape = [1, 2, 3, 4]
bound_shape = []
parameter_data = ng.parameter(data_shape, name='data', dtype=np.float32)
parameter_input_low = ng.parameter(bound_shape, name='input_low', dtype=np.float32)
parameter_input_high = ng.parameter(bound_shape, name='input_high', dtype=np.float32)
parameter_output_low = ng.parameter(bound_shape, name='output_low', dtype=np.float32)
parameter_output_high = ng.parameter(bound_shape, name='output_high', dtype=np.float32)
model = ng.fake_quantize(parameter_data,
parameter_input_low,
parameter_input_high,
parameter_output_low,
parameter_output_high,
levels)
computation = runtime.computation(model,
parameter_data,
parameter_input_low,
parameter_input_high,
parameter_output_low,
parameter_output_high)
result = computation(data_value,
input_low_value,
input_high_value,
output_low_value,
output_high_value)
expected = np.array([[[[[2., 2., 2., 2.],
[6.6666669, 6.6666669, 6.6666669, 6.6666669],
[6.6666669, 6.6666669, 6.6666669, 6.6666669]],
[[11.33333301, 11.33333301, 11.33333301, 11.33333301],
[11.33333301, 11.33333301, 11.33333301, 11.33333301],
[16., 16., 16., 16.]]]]], dtype=np.float32)
assert np.allclose(result, expected)
def test_depth_to_space(): def test_depth_to_space():
runtime = get_runtime() runtime = get_runtime()
...@@ -219,4 +265,5 @@ def test_grn_operator(): ...@@ -219,4 +265,5 @@ def test_grn_operator():
[[0.9970545, 0.98994946, 0.9805807, 0.97014254], [[0.9970545, 0.98994946, 0.9805807, 0.97014254],
[0.9593655, 0.9486833, 0.9383431, 0.9284767], [0.9593655, 0.9486833, 0.9383431, 0.9284767],
[0.91914505, 0.9103665, 0.9021342, 0.8944272]]]], dtype=np.float32) [0.91914505, 0.9103665, 0.9021342, 0.8944272]]]], dtype=np.float32)
assert np.allclose(result, expected) assert np.allclose(result, expected)
...@@ -519,6 +519,14 @@ if(NOT NGRAPH_JSON_ENABLE) ...@@ -519,6 +519,14 @@ if(NOT NGRAPH_JSON_ENABLE)
target_compile_definitions(ngraph PUBLIC NGRAPH_JSON_DISABLE) target_compile_definitions(ngraph PUBLIC NGRAPH_JSON_DISABLE)
endif() endif()
if(NGRAPH_INTERPRETER_STATIC_LIB_ENABLE)
target_compile_definitions(ngraph PUBLIC NGRAPH_INTERPRETER_STATIC_LIB_ENABLE)
endif()
if(NGRAPH_CPU_STATIC_LIB_ENABLE)
target_compile_definitions(ngraph PUBLIC NGRAPH_CPU_STATIC_LIB_ENABLE)
endif()
if(NGRAPH_DISTRIBUTED_ENABLE) if(NGRAPH_DISTRIBUTED_ENABLE)
if(NGRAPH_DISTRIBUTED_MLSL_ENABLE) if(NGRAPH_DISTRIBUTED_MLSL_ENABLE)
target_include_directories(ngraph SYSTEM PRIVATE libmlsl) target_include_directories(ngraph SYSTEM PRIVATE libmlsl)
......
...@@ -24,8 +24,8 @@ using namespace ngraph; ...@@ -24,8 +24,8 @@ using namespace ngraph;
const string op::BatchMatMul::type_name{"BatchMatMul"}; const string op::BatchMatMul::type_name{"BatchMatMul"};
op::BatchMatMul::BatchMatMul(const shared_ptr<Node>& arg0, const shared_ptr<Node>& arg1) op::BatchMatMul::BatchMatMul(const Output<Node>& arg0, const Output<Node>& arg1)
: Op(check_single_output_args({arg0, arg1})) : Op({arg0, arg1})
{ {
constructor_validate_and_infer_types(); constructor_validate_and_infer_types();
} }
......
...@@ -35,11 +35,12 @@ namespace ngraph ...@@ -35,11 +35,12 @@ namespace ngraph
NGRAPH_API NGRAPH_API
static const std::string type_name; static const std::string type_name;
const std::string& description() const override { return type_name; } const std::string& description() const override { return type_name; }
BatchMatMul() = default;
/// \brief Constructs a batch of matmul product operation. /// \brief Constructs a batch of matmul product operation.
/// ///
/// \param arg0 The node producing the first argument. /// \param arg0 The node producing the first argument.
/// \param arg1 The node producing the second argument. /// \param arg1 The node producing the second argument.
BatchMatMul(const std::shared_ptr<Node>& arg0, const std::shared_ptr<Node>& arg1); BatchMatMul(const Output<Node>& arg0, const Output<Node>& arg1);
virtual void validate_and_infer_types() override; virtual void validate_and_infer_types() override;
......
...@@ -62,6 +62,13 @@ shared_ptr<Node> ngraph::op::CompiledKernel::copy_with_new_args(const NodeVector ...@@ -62,6 +62,13 @@ shared_ptr<Node> ngraph::op::CompiledKernel::copy_with_new_args(const NodeVector
return std::make_shared<CompiledKernel>(new_node_list, new_outputs, new_args); return std::make_shared<CompiledKernel>(new_node_list, new_outputs, new_args);
} }
ngraph::op::CompiledKernel::CompiledKernel(const OutputVector& node_list,
const OutputVector& outputs,
const OutputVector& args)
: CompiledKernel(as_node_vector(node_list), as_node_vector(outputs), as_node_vector(args))
{
}
ngraph::op::CompiledKernel::CompiledKernel(const NodeVector& node_list, ngraph::op::CompiledKernel::CompiledKernel(const NodeVector& node_list,
const NodeVector& outputs, const NodeVector& outputs,
const NodeVector& args) const NodeVector& args)
......
...@@ -35,9 +35,13 @@ namespace ngraph ...@@ -35,9 +35,13 @@ namespace ngraph
NGRAPH_API NGRAPH_API
static const std::string type_name; static const std::string type_name;
const std::string& description() const override { return type_name; } const std::string& description() const override { return type_name; }
CompiledKernel() = default;
CompiledKernel(const NodeVector& node_list, CompiledKernel(const NodeVector& node_list,
const NodeVector& outputs, const NodeVector& outputs,
const NodeVector& args); const NodeVector& args);
CompiledKernel(const OutputVector& node_list,
const OutputVector& outputs,
const OutputVector& args);
virtual std::shared_ptr<Node> virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override; copy_with_new_args(const NodeVector& new_args) const override;
......
...@@ -22,10 +22,10 @@ using namespace ngraph; ...@@ -22,10 +22,10 @@ using namespace ngraph;
const string op::DynBroadcast::type_name{"DynBroadcast"}; const string op::DynBroadcast::type_name{"DynBroadcast"};
op::DynBroadcast::DynBroadcast(const shared_ptr<Node>& arg, op::DynBroadcast::DynBroadcast(const Output<Node>& arg,
const shared_ptr<Node>& shape, const Output<Node>& shape,
const shared_ptr<Node>& broadcast_axes) const Output<Node>& broadcast_axes)
: Op(check_single_output_args({arg, shape, broadcast_axes})) : Op({arg, shape, broadcast_axes})
{ {
constructor_validate_and_infer_types(); constructor_validate_and_infer_types();
} }
......
...@@ -31,15 +31,16 @@ namespace ngraph ...@@ -31,15 +31,16 @@ namespace ngraph
NGRAPH_API NGRAPH_API
static const std::string type_name; static const std::string type_name;
const std::string& description() const override { return type_name; } const std::string& description() const override { return type_name; }
DynBroadcast() = default;
/// \brief Constructs a dynamic broadcast operation. /// \brief Constructs a dynamic broadcast operation.
/// ///
/// \param arg Node that produces the input tensor to be broadcast. /// \param arg Node that produces the input tensor to be broadcast.
/// \param shape Node that produces shape of the output tensor. /// \param shape Node that produces shape of the output tensor.
/// \param broadcast_axes Node that produces the axis positions (0-based) in the result that are being broadcast. The /// \param broadcast_axes Node that produces the axis positions (0-based) in the result that are being broadcast. The
/// remaining axes in shape must be the same as the shape of arg. /// remaining axes in shape must be the same as the shape of arg.
DynBroadcast(const std::shared_ptr<Node>& arg, DynBroadcast(const Output<Node>& arg,
const std::shared_ptr<Node>& shape, const Output<Node>& shape,
const std::shared_ptr<Node>& broadcast_axes); const Output<Node>& broadcast_axes);
void validate_and_infer_types() override; void validate_and_infer_types() override;
......
...@@ -21,12 +21,13 @@ using namespace ngraph; ...@@ -21,12 +21,13 @@ using namespace ngraph;
const string op::DynPad::type_name{"DynPad"}; const string op::DynPad::type_name{"DynPad"};
op::DynPad::DynPad(const std::shared_ptr<Node>& arg, op::DynPad::DynPad(const Output<Node>& arg,
const std::shared_ptr<Node>& padding_below, const Output<Node>& padding_below,
const std::shared_ptr<Node>& padding_above, const Output<Node>& padding_above,
const std::shared_ptr<Node>& padding_value, const Output<Node>& padding_value,
op::PadMode pad_mode) op::PadMode pad_mode)
: Op(check_single_output_args({arg, padding_below, padding_above, padding_value})) : Op({arg, padding_below, padding_above, padding_value})
, m_pad_mode(pad_mode)
{ {
constructor_validate_and_infer_types(); constructor_validate_and_infer_types();
} }
......
...@@ -30,6 +30,7 @@ namespace ngraph ...@@ -30,6 +30,7 @@ namespace ngraph
NGRAPH_API NGRAPH_API
static const std::string type_name; static const std::string type_name;
const std::string& description() const override { return type_name; } const std::string& description() const override { return type_name; }
DynPad() = default;
/// \brief Perform dynamic padding of a tensor /// \brief Perform dynamic padding of a tensor
/// ///
/// \param arg The node producing input tensor to be padded. /// \param arg The node producing input tensor to be padded.
...@@ -37,10 +38,10 @@ namespace ngraph ...@@ -37,10 +38,10 @@ namespace ngraph
/// \param padding_above The node producing the padding-above widths. /// \param padding_above The node producing the padding-above widths.
/// \param padding_value The value to be used for padding. Must be scalar. /// \param padding_value The value to be used for padding. Must be scalar.
/// \param pad_mode The padding mode: CONSTANT(default), EDGE or REFLECT. /// \param pad_mode The padding mode: CONSTANT(default), EDGE or REFLECT.
DynPad(const std::shared_ptr<Node>& arg, DynPad(const Output<Node>& arg,
const std::shared_ptr<Node>& padding_below, const Output<Node>& padding_below,
const std::shared_ptr<Node>& padding_above, const Output<Node>& padding_above,
const std::shared_ptr<Node>& padding_value, const Output<Node>& padding_value,
PadMode pad_mode = PadMode::CONSTANT); PadMode pad_mode = PadMode::CONSTANT);
PadMode get_pad_mode() const { return m_pad_mode; } PadMode get_pad_mode() const { return m_pad_mode; }
......
...@@ -26,17 +26,17 @@ using namespace ngraph; ...@@ -26,17 +26,17 @@ using namespace ngraph;
const string op::DynReplaceSlice::type_name{"DynReplaceSlice"}; const string op::DynReplaceSlice::type_name{"DynReplaceSlice"};
op::DynReplaceSlice::DynReplaceSlice(const shared_ptr<Node>& arg, op::DynReplaceSlice::DynReplaceSlice(const Output<Node>& arg,
const shared_ptr<Node>& replacement, const Output<Node>& replacement,
const shared_ptr<Node>& lower_bounds, const Output<Node>& lower_bounds,
const shared_ptr<Node>& upper_bounds, const Output<Node>& upper_bounds,
const shared_ptr<Node>& strides, const Output<Node>& strides,
const AxisSet& lower_bounds_mask, const AxisSet& lower_bounds_mask,
const AxisSet& upper_bounds_mask, const AxisSet& upper_bounds_mask,
const AxisSet& new_axis, const AxisSet& new_axis,
const AxisSet& shrink_axis, const AxisSet& shrink_axis,
const AxisSet& ellipsis_mask) const AxisSet& ellipsis_mask)
: Op(check_single_output_args({arg, replacement, lower_bounds, upper_bounds, strides})) : Op({arg, replacement, lower_bounds, upper_bounds, strides})
, m_lower_bounds_mask(lower_bounds_mask) , m_lower_bounds_mask(lower_bounds_mask)
, m_upper_bounds_mask(upper_bounds_mask) , m_upper_bounds_mask(upper_bounds_mask)
, m_new_axis(new_axis) , m_new_axis(new_axis)
......
...@@ -30,6 +30,7 @@ namespace ngraph ...@@ -30,6 +30,7 @@ namespace ngraph
NGRAPH_API NGRAPH_API
static const std::string type_name; static const std::string type_name;
const std::string& description() const override { return type_name; } const std::string& description() const override { return type_name; }
DynReplaceSlice() = default;
/// \brief Constructs a dynamic tensor replace-slice operation. /// \brief Constructs a dynamic tensor replace-slice operation.
/// ///
/// \param arg The tensor in which to replace the slice. /// \param arg The tensor in which to replace the slice.
...@@ -43,11 +44,11 @@ namespace ngraph ...@@ -43,11 +44,11 @@ namespace ngraph
/// \param new_axis Add dimension one axis at the set positions /// \param new_axis Add dimension one axis at the set positions
/// \param shrink_axis Delete dimensions at the set positions /// \param shrink_axis Delete dimensions at the set positions
/// \param ellipsis_mask Inserts missing dimensions on the set position /// \param ellipsis_mask Inserts missing dimensions on the set position
DynReplaceSlice(const std::shared_ptr<Node>& arg, DynReplaceSlice(const Output<Node>& arg,
const std::shared_ptr<Node>& replacement, const Output<Node>& replacement,
const std::shared_ptr<Node>& lower_bounds, const Output<Node>& lower_bounds,
const std::shared_ptr<Node>& upper_bounds, const Output<Node>& upper_bounds,
const std::shared_ptr<Node>& strides, const Output<Node>& strides,
const AxisSet& lower_bounds_mask = AxisSet{}, const AxisSet& lower_bounds_mask = AxisSet{},
const AxisSet& upper_bounds_mask = AxisSet{}, const AxisSet& upper_bounds_mask = AxisSet{},
const AxisSet& new_axis = AxisSet{}, const AxisSet& new_axis = AxisSet{},
......
...@@ -25,10 +25,8 @@ using namespace ngraph; ...@@ -25,10 +25,8 @@ using namespace ngraph;
const string op::DynReshape::type_name{"DynReshape"}; const string op::DynReshape::type_name{"DynReshape"};
op::DynReshape::DynReshape(const shared_ptr<Node>& arg, op::DynReshape::DynReshape(const Output<Node>& arg, const Output<Node>& pattern, bool zero_flag)
const shared_ptr<Node>& pattern, : Op({arg, pattern})
bool zero_flag)
: Op(check_single_output_args({arg, pattern}))
, m_zero_flag(zero_flag) , m_zero_flag(zero_flag)
{ {
constructor_validate_and_infer_types(); constructor_validate_and_infer_types();
......
...@@ -34,6 +34,7 @@ namespace ngraph ...@@ -34,6 +34,7 @@ namespace ngraph
NGRAPH_API NGRAPH_API
static const std::string type_name; static const std::string type_name;
const std::string& description() const override { return type_name; } const std::string& description() const override { return type_name; }
DynReshape() = default;
/// \brief Constructs a dynamic reshape operation. This operation does not perform transpose. /// \brief Constructs a dynamic reshape operation. This operation does not perform transpose.
/// ///
/// \param arg The tensor to be reshaped. /// \param arg The tensor to be reshaped.
...@@ -44,8 +45,8 @@ namespace ngraph ...@@ -44,8 +45,8 @@ namespace ngraph
/// size is inferred based on element count of input tensor. /// size is inferred based on element count of input tensor.
/// \param zero_flag Treats zeros in `pattern` as wildcard flags indicating a copy from input /// \param zero_flag Treats zeros in `pattern` as wildcard flags indicating a copy from input
/// shape at the same index. /// shape at the same index.
DynReshape(const std::shared_ptr<Node>& arg, DynReshape(const Output<Node>& arg,
const std::shared_ptr<Node>& pattern, const Output<Node>& pattern,
bool zero_flag = false); bool zero_flag = false);
void validate_and_infer_types() override; void validate_and_infer_types() override;
......
...@@ -26,16 +26,16 @@ using namespace ngraph; ...@@ -26,16 +26,16 @@ using namespace ngraph;
const string op::DynSlice::type_name{"DynSlice"}; const string op::DynSlice::type_name{"DynSlice"};
op::DynSlice::DynSlice(const shared_ptr<Node>& arg, op::DynSlice::DynSlice(const Output<Node>& arg,
const shared_ptr<Node>& lower_bounds, const Output<Node>& lower_bounds,
const shared_ptr<Node>& upper_bounds, const Output<Node>& upper_bounds,
const shared_ptr<Node>& strides, const Output<Node>& strides,
const AxisSet& lower_bounds_mask, const AxisSet& lower_bounds_mask,
const AxisSet& upper_bounds_mask, const AxisSet& upper_bounds_mask,
const AxisSet& new_axis, const AxisSet& new_axis,
const AxisSet& shrink_axis, const AxisSet& shrink_axis,
const AxisSet& ellipsis_mask) const AxisSet& ellipsis_mask)
: Op(check_single_output_args({arg, lower_bounds, upper_bounds, strides})) : Op({arg, lower_bounds, upper_bounds, strides})
, m_lower_bounds_mask(lower_bounds_mask) , m_lower_bounds_mask(lower_bounds_mask)
, m_upper_bounds_mask(upper_bounds_mask) , m_upper_bounds_mask(upper_bounds_mask)
, m_new_axis(new_axis) , m_new_axis(new_axis)
......
...@@ -30,6 +30,7 @@ namespace ngraph ...@@ -30,6 +30,7 @@ namespace ngraph
NGRAPH_API NGRAPH_API
static const std::string type_name; static const std::string type_name;
const std::string& description() const override { return type_name; } const std::string& description() const override { return type_name; }
DynSlice() = default;
/// \brief Constructs a dynamic tensor slice operation. /// \brief Constructs a dynamic tensor slice operation.
/// ///
/// \param arg The tensor to be sliced. /// \param arg The tensor to be sliced.
...@@ -42,10 +43,10 @@ namespace ngraph ...@@ -42,10 +43,10 @@ namespace ngraph
/// \param new_axis Add dimension one axis at the set positions /// \param new_axis Add dimension one axis at the set positions
/// \param shrink_axis Delete dimensions at the set positions /// \param shrink_axis Delete dimensions at the set positions
/// \param ellipsis_mask Inserts missing dimensions on the set position /// \param ellipsis_mask Inserts missing dimensions on the set position
DynSlice(const std::shared_ptr<Node>& arg, DynSlice(const Output<Node>& arg,
const std::shared_ptr<Node>& lower_bounds, const Output<Node>& lower_bounds,
const std::shared_ptr<Node>& upper_bounds, const Output<Node>& upper_bounds,
const std::shared_ptr<Node>& strides, const Output<Node>& strides,
const AxisSet& lower_bounds_mask = AxisSet{}, const AxisSet& lower_bounds_mask = AxisSet{},
const AxisSet& upper_bounds_mask = AxisSet{}, const AxisSet& upper_bounds_mask = AxisSet{},
const AxisSet& new_axis = AxisSet{}, const AxisSet& new_axis = AxisSet{},
......
...@@ -21,11 +21,6 @@ using namespace ngraph; ...@@ -21,11 +21,6 @@ using namespace ngraph;
const string op::GenerateMask::type_name{"GenerateMask"}; const string op::GenerateMask::type_name{"GenerateMask"};
op::GenerateMask::GenerateMask()
: Op()
{
}
#if 0 #if 0
// Not supported until all transformers use nodes instead of attributes // Not supported until all transformers use nodes instead of attributes
op::GenerateMask::GenerateMask(const Output<Node>& training, op::GenerateMask::GenerateMask(const Output<Node>& training,
......
...@@ -34,7 +34,7 @@ namespace ngraph ...@@ -34,7 +34,7 @@ namespace ngraph
const std::string& description() const override { return type_name; } const std::string& description() const override { return type_name; }
/// \brief Constructs a GenerateMask node with a given shape, seed, /// \brief Constructs a GenerateMask node with a given shape, seed,
/// probability and training/inference mode /// probability and training/inference mode
GenerateMask(); GenerateMask() = default;
#if 0 #if 0
/// Switch to dynamic arguments when all transformers have switched to using the node values /// Switch to dynamic arguments when all transformers have switched to using the node values
......
...@@ -24,6 +24,13 @@ using namespace ngraph; ...@@ -24,6 +24,13 @@ using namespace ngraph;
const string op::QuantizedConcat::type_name{"QuantizedConcat"}; const string op::QuantizedConcat::type_name{"QuantizedConcat"};
op::QuantizedConcat::QuantizedConcat(const OutputVector& args, size_t concatenation_axis)
: Op(args)
, m_concatenation_axis(concatenation_axis)
{
constructor_validate_and_infer_types();
}
op::QuantizedConcat::QuantizedConcat(const NodeVector& args, size_t concatenation_axis) op::QuantizedConcat::QuantizedConcat(const NodeVector& args, size_t concatenation_axis)
: Op(check_single_output_args(args)) : Op(check_single_output_args(args))
, m_concatenation_axis(concatenation_axis) , m_concatenation_axis(concatenation_axis)
......
...@@ -31,12 +31,19 @@ namespace ngraph ...@@ -31,12 +31,19 @@ namespace ngraph
NGRAPH_API NGRAPH_API
static const std::string type_name; static const std::string type_name;
const std::string& description() const override { return type_name; } const std::string& description() const override { return type_name; }
QuantizedConcat() = default;
/// \brief Constructs a concatenation operation. /// \brief Constructs a concatenation operation.
/// ///
/// \param args The nodes producing the input tensors. /// \param args The nodes producing the input tensors.
/// \param concatenation_axis The axis along which to concatenate the input tensors. /// \param concatenation_axis The axis along which to concatenate the input tensors.
QuantizedConcat(const NodeVector& args, size_t concatenation_axis); QuantizedConcat(const NodeVector& args, size_t concatenation_axis);
/// \brief Constructs a concatenation operation.
///
/// \param args The nodes producing the input tensors.
/// \param concatenation_axis The axis along which to concatenate the input tensors.
QuantizedConcat(const OutputVector& args, size_t concatenation_axis);
void validate_and_infer_types() override; void validate_and_infer_types() override;
virtual std::shared_ptr<Node> virtual std::shared_ptr<Node>
......
...@@ -30,6 +30,7 @@ namespace ngraph ...@@ -30,6 +30,7 @@ namespace ngraph
NGRAPH_API NGRAPH_API
static const std::string type_name; static const std::string type_name;
const std::string& description() const override { return type_name; } const std::string& description() const override { return type_name; }
QuantizedConvolutionBias() = default;
QuantizedConvolutionBias(const Output<Node>& data_batch, QuantizedConvolutionBias(const Output<Node>& data_batch,
const Output<Node>& filters, const Output<Node>& filters,
const Output<Node>& bias, const Output<Node>& bias,
......
...@@ -30,6 +30,7 @@ namespace ngraph ...@@ -30,6 +30,7 @@ namespace ngraph
NGRAPH_API NGRAPH_API
static const std::string type_name; static const std::string type_name;
const std::string& description() const override { return type_name; } const std::string& description() const override { return type_name; }
QuantizedConvolutionRelu() = default;
QuantizedConvolutionRelu(const Output<Node>& data_batch, QuantizedConvolutionRelu(const Output<Node>& data_batch,
const Output<Node>& filters, const Output<Node>& filters,
const Strides& window_movement_strides, const Strides& window_movement_strides,
......
...@@ -30,6 +30,7 @@ namespace ngraph ...@@ -30,6 +30,7 @@ namespace ngraph
NGRAPH_API NGRAPH_API
static const std::string type_name; static const std::string type_name;
const std::string& description() const override { return type_name; } const std::string& description() const override { return type_name; }
QuantizedDot() = default;
QuantizedDot(const Output<Node>& data, QuantizedDot(const Output<Node>& data,
const Output<Node>& weights, const Output<Node>& weights,
const Output<Node>& scale, const Output<Node>& scale,
......
...@@ -30,6 +30,7 @@ namespace ngraph ...@@ -30,6 +30,7 @@ namespace ngraph
NGRAPH_API NGRAPH_API
static const std::string type_name; static const std::string type_name;
const std::string& description() const override { return type_name; } const std::string& description() const override { return type_name; }
QuantizedDotBias() = default;
QuantizedDotBias(const Output<Node>& data, QuantizedDotBias(const Output<Node>& data,
const Output<Node>& weights, const Output<Node>& weights,
const Output<Node>& bias, const Output<Node>& bias,
......
...@@ -29,6 +29,7 @@ namespace ngraph ...@@ -29,6 +29,7 @@ namespace ngraph
NGRAPH_API NGRAPH_API
static const std::string type_name; static const std::string type_name;
const std::string& description() const override { return type_name; } const std::string& description() const override { return type_name; }
QuantizedMaxPool() = default;
/// \brief Constructs a batched max pooling operation. /// \brief Constructs a batched max pooling operation.
/// ///
/// \param arg The node producing the input data batch tensor. /// \param arg The node producing the input data batch tensor.
......
...@@ -24,10 +24,6 @@ using namespace ngraph; ...@@ -24,10 +24,6 @@ using namespace ngraph;
const string op::Range::type_name = "Range"; const string op::Range::type_name = "Range";
op::Range::Range()
{
}
op::Range::Range(const Output<Node>& start, const Output<Node>& stop, const Output<Node>& step) op::Range::Range(const Output<Node>& start, const Output<Node>& stop, const Output<Node>& step)
: Op({start, stop, step}) : Op({start, stop, step})
{ {
......
...@@ -31,7 +31,7 @@ namespace ngraph ...@@ -31,7 +31,7 @@ namespace ngraph
static const std::string type_name; static const std::string type_name;
const std::string& description() const override { return type_name; } const std::string& description() const override { return type_name; }
/// \brief Constructs an unitialized range operation. /// \brief Constructs an unitialized range operation.
Range(); Range() = default;
/// \brief Constructs a range operation. /// \brief Constructs a range operation.
/// ///
......
...@@ -21,8 +21,8 @@ using namespace ngraph; ...@@ -21,8 +21,8 @@ using namespace ngraph;
const string op::ShapeOf::type_name{"ShapeOf"}; const string op::ShapeOf::type_name{"ShapeOf"};
op::ShapeOf::ShapeOf(const shared_ptr<Node>& arg) op::ShapeOf::ShapeOf(const Output<Node>& arg)
: Op(check_single_output_args({arg})) : Op({arg})
{ {
constructor_validate_and_infer_types(); constructor_validate_and_infer_types();
} }
......
...@@ -29,8 +29,9 @@ namespace ngraph ...@@ -29,8 +29,9 @@ namespace ngraph
NGRAPH_API NGRAPH_API
static const std::string type_name; static const std::string type_name;
const std::string& description() const override { return type_name; } const std::string& description() const override { return type_name; }
ShapeOf() = default;
/// \brief Constructs a shape-of operation. /// \brief Constructs a shape-of operation.
ShapeOf(const std::shared_ptr<Node>& arg); ShapeOf(const Output<Node>& arg);
virtual std::shared_ptr<Node> virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override; copy_with_new_args(const NodeVector& new_args) const override;
......
...@@ -23,8 +23,8 @@ using namespace ngraph; ...@@ -23,8 +23,8 @@ using namespace ngraph;
const string op::Tile::type_name{"Tile"}; const string op::Tile::type_name{"Tile"};
op::Tile::Tile(const std::shared_ptr<Node>& arg, const std::shared_ptr<Node>& repeats) op::Tile::Tile(const Output<Node>& arg, const Output<Node>& repeats)
: Op(check_single_output_args({arg, repeats})) : Op({arg, repeats})
{ {
constructor_validate_and_infer_types(); constructor_validate_and_infer_types();
} }
......
...@@ -30,11 +30,12 @@ namespace ngraph ...@@ -30,11 +30,12 @@ namespace ngraph
NGRAPH_API NGRAPH_API
static const std::string type_name; static const std::string type_name;
const std::string& description() const override { return type_name; } const std::string& description() const override { return type_name; }
Tile() = default;
/// \brief Perform dynamic padding of a tensor /// \brief Perform dynamic padding of a tensor
/// ///
/// \param arg The node producing input tensor to be padded. /// \param arg The node producing input tensor to be padded.
/// \param repeats The node producing the per-dimension replication factor /// \param repeats The node producing the per-dimension replication factor
Tile(const std::shared_ptr<Node>& arg, const std::shared_ptr<Node>& repeats); Tile(const Output<Node>& arg, const Output<Node>& repeats);
void validate_and_infer_types() override; void validate_and_infer_types() override;
......
...@@ -24,8 +24,8 @@ using namespace ngraph; ...@@ -24,8 +24,8 @@ using namespace ngraph;
const string op::Transpose::type_name{"Transpose"}; const string op::Transpose::type_name{"Transpose"};
op::Transpose::Transpose(const shared_ptr<Node>& arg, const shared_ptr<Node>& input_order) op::Transpose::Transpose(const Output<Node>& arg, const Output<Node>& input_order)
: Op(check_single_output_args({arg, input_order})) : Op({arg, input_order})
{ {
constructor_validate_and_infer_types(); constructor_validate_and_infer_types();
} }
......
...@@ -31,6 +31,7 @@ namespace ngraph ...@@ -31,6 +31,7 @@ namespace ngraph
NGRAPH_API NGRAPH_API
static const std::string type_name; static const std::string type_name;
const std::string& description() const override { return type_name; } const std::string& description() const override { return type_name; }
Transpose() = default;
/// \brief Constructs a transpose operation. /// \brief Constructs a transpose operation.
/// ///
/// \param arg Node producing the tensor to be transposed. /// \param arg Node producing the tensor to be transposed.
...@@ -38,7 +39,7 @@ namespace ngraph ...@@ -38,7 +39,7 @@ namespace ngraph
/// input shape. Must be a vector of element type element::i64, /// input shape. Must be a vector of element type element::i64,
/// with shape [n], where n is the rank of arg. The tensor's /// with shape [n], where n is the rank of arg. The tensor's
/// value must contain every integer in the range [0,n-1]. /// value must contain every integer in the range [0,n-1].
Transpose(const std::shared_ptr<Node>& arg, const std::shared_ptr<Node>& input_order); Transpose(const Output<Node>& arg, const Output<Node>& input_order);
void validate_and_infer_types() override; void validate_and_infer_types() override;
......
...@@ -43,6 +43,25 @@ public: ...@@ -43,6 +43,25 @@ public:
template <typename T, class... Args> template <typename T, class... Args>
void register_pass(Args&&... args) void register_pass(Args&&... args)
{
push_pass<T>(std::forward<Args>(args)...);
if (m_per_pass_validation)
{
push_pass<Validate>();
}
}
void run_passes(std::shared_ptr<Function>, bool transitive = true);
ManagerState& get_state();
PassConfig& get_pass_config() { return m_pass_config; }
void set_pass_config(const PassConfig& pass_config) { m_pass_config = pass_config; }
void set_pass_visualization(bool new_state) { m_visualize = new_state; }
void set_pass_serialization(bool new_state) { m_serialize = new_state; }
void set_per_pass_validation(bool new_state) { m_per_pass_validation = new_state; }
private:
template <typename T, class... Args>
void push_pass(Args&&... args)
{ {
static_assert(std::is_base_of<pass::PassBase, T>::value, "pass not derived from pass base"); static_assert(std::is_base_of<pass::PassBase, T>::value, "pass not derived from pass base");
auto pass = std::make_shared<T>(std::forward<Args>(args)...); auto pass = std::make_shared<T>(std::forward<Args>(args)...);
...@@ -61,23 +80,8 @@ public: ...@@ -61,23 +80,8 @@ public:
m_pass_names.push_back(typeid(T).name()); m_pass_names.push_back(typeid(T).name());
#endif #endif
} }
if (m_per_pass_validation)
{
auto validate = std::make_shared<Validate>();
auto validate_base = std::static_pointer_cast<PassBase>(validate);
m_pass_list.push_back(validate_base);
} }
}
void run_passes(std::shared_ptr<Function>, bool transitive = true);
ManagerState& get_state();
PassConfig& get_pass_config() { return m_pass_config; }
void set_pass_config(const PassConfig& pass_config) { m_pass_config = pass_config; }
void set_pass_visualization(bool new_state) { m_visualize = new_state; }
void set_pass_serialization(bool new_state) { m_serialize = new_state; }
void set_per_pass_validation(bool new_state) { m_per_pass_validation = new_state; }
private:
std::vector<std::string> m_pass_names; std::vector<std::string> m_pass_names;
std::vector<std::shared_ptr<PassBase>> m_pass_list; std::vector<std::shared_ptr<PassBase>> m_pass_list;
ManagerState m_state; ManagerState m_state;
......
...@@ -37,6 +37,9 @@ std::string runtime::Backend::s_backend_shared_library_search_directory; ...@@ -37,6 +37,9 @@ std::string runtime::Backend::s_backend_shared_library_search_directory;
// This finds the full path of the containing shared library // This finds the full path of the containing shared library
static string find_my_pathname() static string find_my_pathname()
{ {
#ifdef NGRAPH_STATIC_LIB_ENABLE
return "";
#else
#ifdef _WIN32 #ifdef _WIN32
HMODULE hModule = GetModuleHandleW(L"ngraph.dll"); HMODULE hModule = GetModuleHandleW(L"ngraph.dll");
WCHAR wpath[MAX_PATH]; WCHAR wpath[MAX_PATH];
...@@ -52,6 +55,7 @@ static string find_my_pathname() ...@@ -52,6 +55,7 @@ static string find_my_pathname()
dladdr(reinterpret_cast<void*>(find_my_pathname), &dl_info); dladdr(reinterpret_cast<void*>(find_my_pathname), &dl_info);
return dl_info.dli_fname; return dl_info.dli_fname;
#endif #endif
#endif
} }
runtime::Backend::~Backend() runtime::Backend::~Backend()
......
...@@ -32,12 +32,18 @@ ...@@ -32,12 +32,18 @@
using namespace std; using namespace std;
using namespace ngraph; using namespace ngraph;
#ifdef NGRAPH_STATIC_LIB_ENABLE
#define DLERROR() ""
#else
#ifdef _WIN32 #ifdef _WIN32
#define CLOSE_LIBRARY(a) FreeLibrary(a) #define CLOSE_LIBRARY(a) FreeLibrary(a)
#define DLSYM(a, b) GetProcAddress(a, b) #define DLSYM(a, b) GetProcAddress(a, b)
#define DLERROR() ""
#else #else
#define CLOSE_LIBRARY(a) dlclose(a) #define CLOSE_LIBRARY(a) dlclose(a)
#define DLSYM(a, b) dlsym(a, b) #define DLSYM(a, b) dlsym(a, b)
#define DLERROR() dlerror()
#endif
#endif #endif
unordered_map<string, runtime::BackendConstructor*>& runtime::BackendManager::get_registry() unordered_map<string, runtime::BackendConstructor*>& runtime::BackendManager::get_registry()
...@@ -101,19 +107,19 @@ shared_ptr<runtime::Backend> runtime::BackendManager::create_backend(const std:: ...@@ -101,19 +107,19 @@ shared_ptr<runtime::Backend> runtime::BackendManager::create_backend(const std::
} }
else else
{ {
#ifndef NGRAPH_STATIC_LIB_ENABLE
DL_HANDLE handle = open_shared_library(type); DL_HANDLE handle = open_shared_library(type);
if (!handle) if (!handle)
{ {
stringstream ss; stringstream ss;
ss << "Backend '" << type << "' not registered. Error:"; ss << "Backend '" << type << "' not registered. Error:";
#ifndef _WIN32 #ifndef _WIN32
ss << dlerror(); ss << DLERROR();
#endif #endif
throw runtime_error(ss.str()); throw runtime_error(ss.str());
} }
#ifndef _WIN32 #ifndef _WIN32
dlerror(); // Clear any pending errors DLERROR(); // Clear any pending errors
#endif #endif
function<runtime::BackendConstructor*()> get_backend_constructor_pointer = function<runtime::BackendConstructor*()> get_backend_constructor_pointer =
reinterpret_cast<runtime::BackendConstructor* (*)()>( reinterpret_cast<runtime::BackendConstructor* (*)()>(
...@@ -127,7 +133,7 @@ shared_ptr<runtime::Backend> runtime::BackendManager::create_backend(const std:: ...@@ -127,7 +133,7 @@ shared_ptr<runtime::Backend> runtime::BackendManager::create_backend(const std::
{ {
string error; string error;
#ifndef _WIN32 #ifndef _WIN32
const char* err = dlerror(); const char* err = DLERROR();
error = (err ? err : ""); error = (err ? err : "");
#endif #endif
CLOSE_LIBRARY(handle); CLOSE_LIBRARY(handle);
...@@ -136,6 +142,7 @@ shared_ptr<runtime::Backend> runtime::BackendManager::create_backend(const std:: ...@@ -136,6 +142,7 @@ shared_ptr<runtime::Backend> runtime::BackendManager::create_backend(const std::
"library.\nError='" + "library.\nError='" +
error + "'"); error + "'");
} }
#endif
} }
return backend; return backend;
} }
...@@ -146,6 +153,7 @@ DL_HANDLE runtime::BackendManager::open_shared_library(string type) ...@@ -146,6 +153,7 @@ DL_HANDLE runtime::BackendManager::open_shared_library(string type)
string lib_suffix = SHARED_LIB_SUFFIX; string lib_suffix = SHARED_LIB_SUFFIX;
DL_HANDLE handle = nullptr; DL_HANDLE handle = nullptr;
#ifndef NGRAPH_STATIC_LIB_ENABLE
// strip off attributes, IE:CPU becomes IE // strip off attributes, IE:CPU becomes IE
auto colon = type.find(":"); auto colon = type.find(":");
...@@ -163,9 +171,9 @@ DL_HANDLE runtime::BackendManager::open_shared_library(string type) ...@@ -163,9 +171,9 @@ DL_HANDLE runtime::BackendManager::open_shared_library(string type)
SetDllDirectory((LPCSTR)my_directory.c_str()); SetDllDirectory((LPCSTR)my_directory.c_str());
handle = LoadLibrary(library_path.c_str()); handle = LoadLibrary(library_path.c_str());
#else #else
dlerror(); // Clear any pending errors DLERROR(); // Clear any pending errors
handle = dlopen(library_path.c_str(), RTLD_NOW | RTLD_GLOBAL); handle = dlopen(library_path.c_str(), RTLD_NOW | RTLD_GLOBAL);
const char* err = dlerror(); const char* err = DLERROR();
error = (err ? err : ""); error = (err ? err : "");
#endif #endif
if (!handle) if (!handle)
...@@ -175,12 +183,14 @@ DL_HANDLE runtime::BackendManager::open_shared_library(string type) ...@@ -175,12 +183,14 @@ DL_HANDLE runtime::BackendManager::open_shared_library(string type)
ss << "\nOpen error message '" << error << "'"; ss << "\nOpen error message '" << error << "'";
throw runtime_error(ss.str()); throw runtime_error(ss.str());
} }
#endif
return handle; return handle;
} }
map<string, string> runtime::BackendManager::get_registered_device_map() map<string, string> runtime::BackendManager::get_registered_device_map()
{ {
map<string, string> rc; map<string, string> rc;
#ifndef NGRAPH_STATIC_LIB_ENABLE
string my_directory = string my_directory =
file_util::get_directory(Backend::get_backend_shared_library_search_directory()); file_util::get_directory(Backend::get_backend_shared_library_search_directory());
vector<string> backend_list; vector<string> backend_list;
...@@ -197,6 +207,7 @@ map<string, string> runtime::BackendManager::get_registered_device_map() ...@@ -197,6 +207,7 @@ map<string, string> runtime::BackendManager::get_registered_device_map()
} }
}; };
file_util::iterate_files(my_directory, f, false, true); file_util::iterate_files(my_directory, f, false, true);
#endif
return rc; return rc;
} }
......
...@@ -95,6 +95,9 @@ namespace ngraph ...@@ -95,6 +95,9 @@ namespace ngraph
} }
} }
REGISTER_OP_BUILDER(QuantizedConcat); REGISTER_OP_BUILDER(QuantizedConcat);
#ifdef NGRAPH_CPU_STATIC_LIB_ENABLE
void register_builders_quantized_concat_cpp() {}
#endif
} }
} }
} }
...@@ -69,6 +69,9 @@ namespace ngraph ...@@ -69,6 +69,9 @@ namespace ngraph
} }
} }
REGISTER_OP_BUILDER(QuantizedMaxPool); REGISTER_OP_BUILDER(QuantizedMaxPool);
#ifdef NGRAPH_CPU_STATIC_LIB_ENABLE
void register_builders_quantized_max_pool_cpp() {}
#endif
} }
} }
} }
...@@ -77,6 +77,8 @@ namespace ngraph ...@@ -77,6 +77,8 @@ namespace ngraph
register_builders_tile_cpp(); register_builders_tile_cpp();
register_builders_topk_cpp(); register_builders_topk_cpp();
register_builders_update_slice_cpp(); register_builders_update_slice_cpp();
register_builders_quantized_concat_cpp();
register_builders_quantized_max_pool_cpp();
} }
} }
} }
......
...@@ -76,6 +76,8 @@ namespace ngraph ...@@ -76,6 +76,8 @@ namespace ngraph
void register_builders_tile_cpp(); void register_builders_tile_cpp();
void register_builders_topk_cpp(); void register_builders_topk_cpp();
void register_builders_update_slice_cpp(); void register_builders_update_slice_cpp();
void register_builders_quantized_concat_cpp();
void register_builders_quantized_max_pool_cpp();
} }
} }
} }
......
...@@ -280,6 +280,9 @@ lstm_cell_activaction_functions ...@@ -280,6 +280,9 @@ lstm_cell_activaction_functions
divide_python_rounding_int32 divide_python_rounding_int32
backwards_batchmatmul_tensor2_tensor2 backwards_batchmatmul_tensor2_tensor2
# unsupported ops: `BroadcastDistributed`
broadcastdistributed
# unsupported ops: 'QuantizedConvolution', 'QuantizedDot', 'TopK', 'Erf', 'EmbeddingLookup' # unsupported ops: 'QuantizedConvolution', 'QuantizedDot', 'TopK', 'Erf', 'EmbeddingLookup'
model_quant_conv_linear model_quant_conv_linear
model_conv_integer_no_zero_point model_conv_integer_no_zero_point
......
...@@ -41,3 +41,29 @@ TEST(pass_manager, add) ...@@ -41,3 +41,29 @@ TEST(pass_manager, add)
EXPECT_EQ(node_count, sorted.size()); EXPECT_EQ(node_count, sorted.size());
EXPECT_TRUE(validate_list(sorted)); EXPECT_TRUE(validate_list(sorted));
} }
namespace
{
class DummyPass : public pass::FunctionPass
{
public:
DummyPass()
: FunctionPass()
{
}
bool run_on_function(std::shared_ptr<ngraph::Function> f) override { return false; }
};
}
// Regression test: We've had an issue in the past where enabling per-pass validation and
// per-pass serialization at the same time causes a crash.
TEST(pass_manager, serialize_with_revalidate_does_not_crash)
{
pass::Manager pass_manager;
pass_manager.set_per_pass_validation(true);
pass_manager.set_pass_serialization(true);
pass_manager.register_pass<DummyPass>();
auto graph = make_test_graph();
pass_manager.run_passes(graph);
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment