Unverified Commit 18708e0c authored by Scott Cyphers's avatar Scott Cyphers Committed by GitHub

Merge branch 'master' into silee2/pragma

parents d1d27d9e c0ab7d42
......@@ -182,6 +182,8 @@ option(NGRAPH_DISTRIBUTED_ENABLE "Enable distributed training using MLSL/OpenMPI
option(NGRAPH_FAST_MATH_ENABLE "Enable fast math" ON)
option(NGRAPH_JSON_ENABLE "Enable JSON based serialization and tracing features" TRUE)
option(NGRAPH_STATIC_LIB_ENABLE "Enable build nGraph static library" FALSE)
option(NGRAPH_INTERPRETER_STATIC_LIB_ENABLE "Enable build INTERPRETER backend static library" FALSE)
option(NGRAPH_CPU_STATIC_LIB_ENABLE "Enable build CPU backend static library" FALSE)
if (NGRAPH_CPU_ENABLE
AND
......@@ -257,29 +259,35 @@ NORMALIZE_BOOL(NGRAPH_PYTHON_BUILD_ENABLE)
NORMALIZE_BOOL(NGRAPH_USE_PREBUILT_LLVM)
NORMALIZE_BOOL(NGRAPH_PLAIDML_ENABLE)
NORMALIZE_BOOL(NGRAPH_JSON_ENABLE)
message(STATUS "NGRAPH_UNIT_TEST_ENABLE: ${NGRAPH_UNIT_TEST_ENABLE}")
message(STATUS "NGRAPH_TOOLS_ENABLE: ${NGRAPH_TOOLS_ENABLE}")
message(STATUS "NGRAPH_CPU_ENABLE: ${NGRAPH_CPU_ENABLE}")
message(STATUS "NGRAPH_MLIR_ENABLE: ${NGRAPH_MLIR_ENABLE}")
message(STATUS "NGRAPH_INTELGPU_ENABLE: ${NGRAPH_INTELGPU_ENABLE}")
message(STATUS "NGRAPH_GPU_ENABLE: ${NGRAPH_GPU_ENABLE}")
message(STATUS "NGRAPH_INTERPRETER_ENABLE: ${NGRAPH_INTERPRETER_ENABLE}")
message(STATUS "NGRAPH_NOP_ENABLE: ${NGRAPH_NOP_ENABLE}")
message(STATUS "NGRAPH_GPUH_ENABLE: ${NGRAPH_GPUH_ENABLE}")
message(STATUS "NGRAPH_GENERIC_CPU_ENABLE: ${NGRAPH_GENERIC_CPU_ENABLE}")
message(STATUS "NGRAPH_DEBUG_ENABLE: ${NGRAPH_DEBUG_ENABLE}")
message(STATUS "NGRAPH_DEPRECATED_ENABLE: ${NGRAPH_DEPRECATED_ENABLE}")
message(STATUS "NGRAPH_ONNX_IMPORT_ENABLE: ${NGRAPH_ONNX_IMPORT_ENABLE}")
message(STATUS "NGRAPH_DEX_ONLY: ${NGRAPH_DEX_ONLY}")
message(STATUS "NGRAPH_ENABLE_CPU_CONV_AUTO: ${NGRAPH_ENABLE_CPU_CONV_AUTO}")
message(STATUS "NGRAPH_CODE_COVERAGE_ENABLE: ${NGRAPH_CODE_COVERAGE_ENABLE}")
message(STATUS "NGRAPH_LIB_VERSIONING_ENABLE: ${NGRAPH_LIB_VERSIONING_ENABLE}")
message(STATUS "NGRAPH_PYTHON_BUILD_ENABLE: ${NGRAPH_PYTHON_BUILD_ENABLE}")
message(STATUS "NGRAPH_USE_PREBUILT_LLVM: ${NGRAPH_USE_PREBUILT_LLVM}")
message(STATUS "NGRAPH_PLAIDML_ENABLE: ${NGRAPH_PLAIDML_ENABLE}")
message(STATUS "NGRAPH_DISTRIBUTED_ENABLE: ${NGRAPH_DISTRIBUTED_ENABLE}")
message(STATUS "NGRAPH_JSON_ENABLE: ${NGRAPH_JSON_ENABLE}")
NORMALIZE_BOOL(NGRAPH_STATIC_LIB_ENABLE)
NORMALIZE_BOOL(NGRAPH_INTERPRETER_STATIC_LIB_ENABLE)
NORMALIZE_BOOL(NGRAPH_CPU_STATIC_LIB_ENABLE)
message(STATUS "NGRAPH_UNIT_TEST_ENABLE: ${NGRAPH_UNIT_TEST_ENABLE}")
message(STATUS "NGRAPH_TOOLS_ENABLE: ${NGRAPH_TOOLS_ENABLE}")
message(STATUS "NGRAPH_CPU_ENABLE: ${NGRAPH_CPU_ENABLE}")
message(STATUS "NGRAPH_MLIR_ENABLE: ${NGRAPH_MLIR_ENABLE}")
message(STATUS "NGRAPH_INTELGPU_ENABLE: ${NGRAPH_INTELGPU_ENABLE}")
message(STATUS "NGRAPH_GPU_ENABLE: ${NGRAPH_GPU_ENABLE}")
message(STATUS "NGRAPH_INTERPRETER_ENABLE: ${NGRAPH_INTERPRETER_ENABLE}")
message(STATUS "NGRAPH_NOP_ENABLE: ${NGRAPH_NOP_ENABLE}")
message(STATUS "NGRAPH_GPUH_ENABLE: ${NGRAPH_GPUH_ENABLE}")
message(STATUS "NGRAPH_GENERIC_CPU_ENABLE: ${NGRAPH_GENERIC_CPU_ENABLE}")
message(STATUS "NGRAPH_DEBUG_ENABLE: ${NGRAPH_DEBUG_ENABLE}")
message(STATUS "NGRAPH_DEPRECATED_ENABLE: ${NGRAPH_DEPRECATED_ENABLE}")
message(STATUS "NGRAPH_ONNX_IMPORT_ENABLE: ${NGRAPH_ONNX_IMPORT_ENABLE}")
message(STATUS "NGRAPH_DEX_ONLY: ${NGRAPH_DEX_ONLY}")
message(STATUS "NGRAPH_ENABLE_CPU_CONV_AUTO: ${NGRAPH_ENABLE_CPU_CONV_AUTO}")
message(STATUS "NGRAPH_CODE_COVERAGE_ENABLE: ${NGRAPH_CODE_COVERAGE_ENABLE}")
message(STATUS "NGRAPH_LIB_VERSIONING_ENABLE: ${NGRAPH_LIB_VERSIONING_ENABLE}")
message(STATUS "NGRAPH_PYTHON_BUILD_ENABLE: ${NGRAPH_PYTHON_BUILD_ENABLE}")
message(STATUS "NGRAPH_USE_PREBUILT_LLVM: ${NGRAPH_USE_PREBUILT_LLVM}")
message(STATUS "NGRAPH_PLAIDML_ENABLE: ${NGRAPH_PLAIDML_ENABLE}")
message(STATUS "NGRAPH_DISTRIBUTED_ENABLE: ${NGRAPH_DISTRIBUTED_ENABLE}")
message(STATUS "NGRAPH_JSON_ENABLE: ${NGRAPH_JSON_ENABLE}")
message(STATUS "NGRAPH_STATIC_LIB_ENABLE: ${NGRAPH_STATIC_LIB_ENABLE}")
message(STATUS "NGRAPH_INTERPRETER_STATIC_LIB_ENABLE: ${NGRAPH_INTERPRETER_STATIC_LIB_ENABLE}")
message(STATUS "NGRAPH_CPU_STATIC_LIB_ENABLE: ${NGRAPH_CPU_STATIC_LIB_ENABLE}")
#-----------------------------------------------------------------------------------------------
# Installation logic...
......
......@@ -30,6 +30,11 @@
using namespace std;
using namespace ngraph;
const string op::ConvolutionBias::type_name{"ConvolutionBias"};
const string op::ConvolutionBiasBackpropFiltersBias::type_name{
"ConvolutionBiasBackpropFiltersBias"};
const string op::ConvolutionBiasAdd::type_name{"ConvolutionBiasAdd"};
static void validate_convbias_shapes(const Node* node,
element::Type et_filters,
element::Type et_bias,
......@@ -75,7 +80,7 @@ op::ConvolutionBias::ConvolutionBias(const shared_ptr<Node>& data_batch,
const CoordinateDiff& padding_above,
const Strides& data_dilation_strides,
const bool with_relu)
: FusedOp("ConvolutionBias", check_single_output_args({data_batch, filters, bias}))
: FusedOp(check_single_output_args({data_batch, filters, bias}))
, m_window_movement_strides(window_movement_strides)
, m_window_dilation_strides(window_dilation_strides)
, m_padding_below(padding_below)
......@@ -280,8 +285,7 @@ op::ConvolutionBiasBackpropFiltersBias::ConvolutionBiasBackpropFiltersBias(
const CoordinateDiff& padding_below_forward,
const CoordinateDiff& padding_above_forward,
const Strides& data_dilation_strides_forward)
: FusedOp("ConvolutionBiasBackpropFiltersBias",
check_single_output_args({data_batch, output_delta}))
: FusedOp(check_single_output_args({data_batch, output_delta}))
, m_filters_shape(filters_shape)
, m_bias_shape(bias_shape)
, m_window_movement_strides_forward(window_movement_strides_forward)
......@@ -368,8 +372,7 @@ op::ConvolutionBiasAdd::ConvolutionBiasAdd(const std::shared_ptr<Node>& data_bat
const CoordinateDiff& padding_above,
const Strides& data_dilation_strides,
bool with_relu)
: FusedOp("ConvolutionBiasAdd",
check_single_output_args({data_batch, filters, bias, add_input}))
: FusedOp(check_single_output_args({data_batch, filters, bias, add_input}))
, m_window_movement_strides(window_movement_strides)
, m_window_dilation_strides(window_dilation_strides)
, m_padding_below(padding_below)
......
......@@ -28,6 +28,9 @@ namespace ngraph
class ConvolutionBias : public ngraph::op::util::FusedOp
{
public:
NGRAPH_API
static const std::string type_name;
const std::string& description() const override { return type_name; }
ConvolutionBias(const std::shared_ptr<op::Convolution>& conv,
const std::shared_ptr<Node>& bias,
const bool with_relu = false);
......@@ -79,6 +82,9 @@ namespace ngraph
class ConvolutionBiasBackpropFiltersBias : public ngraph::op::util::FusedOp
{
public:
NGRAPH_API
static const std::string type_name;
const std::string& description() const override { return type_name; }
ConvolutionBiasBackpropFiltersBias(const std::shared_ptr<Node>& data_batch,
const Shape& filters_shape,
const Shape& bias_shape,
......@@ -169,6 +175,9 @@ namespace ngraph
class ConvolutionBiasAdd : public ngraph::op::util::FusedOp
{
public:
NGRAPH_API
static const std::string type_name;
const std::string& description() const override { return type_name; }
ConvolutionBiasAdd(const std::shared_ptr<op::ConvolutionBias>& conv,
const std::shared_ptr<Node>& sum_input,
bool with_relu = false);
......
......@@ -25,8 +25,10 @@
using namespace std;
using namespace ngraph;
const string op::DepthToSpace::type_name{"DepthToSpace"};
op::DepthToSpace::DepthToSpace(const shared_ptr<Node>& data, const size_t block_size)
: FusedOp("DepthToSpace", {data})
: FusedOp(check_single_output_args({data}))
, m_blocksize(block_size)
{
constructor_validate_and_infer_types();
......
......@@ -34,6 +34,9 @@ namespace ngraph
class DepthToSpace : public ngraph::op::util::FusedOp
{
public:
NGRAPH_API
static const std::string type_name;
const std::string& description() const override { return type_name; }
/// \brief Constructs a DepthToSpace operation.
///
/// \param data - Node producing the input tensor
......
......@@ -28,8 +28,10 @@
using namespace std;
using namespace ngraph;
const string op::Elu::type_name{"Elu"};
op::Elu::Elu(const shared_ptr<Node>& data, const shared_ptr<Node>& alpha)
: FusedOp("Elu", {data, alpha})
: FusedOp(check_single_output_args({data, alpha}))
{
constructor_validate_and_infer_types();
}
......
......@@ -31,6 +31,9 @@ namespace ngraph
class Elu : public ngraph::op::util::FusedOp
{
public:
NGRAPH_API
static const std::string type_name;
const std::string& description() const override { return type_name; }
/// \brief Constructs an Elu operation.
///
/// \param data Input tensor
......
......@@ -36,13 +36,15 @@
using namespace std;
using namespace ngraph;
const string op::FakeQuantize::type_name{"FakeQuantize"};
op::FakeQuantize::FakeQuantize(const shared_ptr<Node>& data,
const shared_ptr<Node>& input_low,
const shared_ptr<Node>& input_high,
const shared_ptr<Node>& output_low,
const shared_ptr<Node>& output_high,
size_t levels)
: FusedOp("FakeQuantize", {data, input_low, input_high, output_low, output_high})
: FusedOp(check_single_output_args({data, input_low, input_high, output_low, output_high}))
, m_levels(levels)
{
constructor_validate_and_infer_types();
......
......@@ -38,6 +38,9 @@ namespace ngraph
class FakeQuantize : public ngraph::op::util::FusedOp
{
public:
NGRAPH_API
static const std::string type_name;
const std::string& description() const override { return type_name; }
///
/// \brief Constructs a FakeQuantize operation node.
///
......
......@@ -27,8 +27,10 @@
using namespace std;
using namespace ngraph;
const string op::Gelu::type_name{"Gelu"};
op::Gelu::Gelu(const shared_ptr<Node>& data)
: FusedOp("Gelu", {data})
: FusedOp(check_single_output_args({data}))
{
constructor_validate_and_infer_types();
}
......
......@@ -32,6 +32,9 @@ namespace ngraph
class Gelu : public ngraph::op::util::FusedOp
{
public:
NGRAPH_API
static const std::string type_name;
const std::string& description() const override { return type_name; }
/// \brief Constructs an Gelu operation.
///
/// \param data Input tensor
......
......@@ -25,6 +25,8 @@
using namespace std;
using namespace ngraph;
const string op::Gemm::type_name{"Gemm"};
op::Gemm::Gemm(const std::shared_ptr<ngraph::Node>& A,
const std::shared_ptr<ngraph::Node>& B,
const std::shared_ptr<ngraph::Node>& C,
......@@ -32,7 +34,7 @@ op::Gemm::Gemm(const std::shared_ptr<ngraph::Node>& A,
double beta,
bool transA,
bool transB)
: FusedOp("Gemm", {A, B, C})
: FusedOp(check_single_output_args({A, B, C}))
, m_alpha{alpha}
, m_beta{beta}
, m_transA{transA}
......
......@@ -36,6 +36,9 @@ namespace ngraph
class Gemm : public ngraph::op::util::FusedOp
{
public:
NGRAPH_API
static const std::string type_name;
const std::string& description() const override { return type_name; }
/// \brief Constructs an Gemm operation.
///
/// \param A Input tensor A
......
......@@ -27,8 +27,10 @@
using namespace std;
using namespace ngraph;
const string op::GRN::type_name{"GRN"};
op::GRN::GRN(const shared_ptr<Node>& data, float bias)
: FusedOp("GRN", {data})
: FusedOp(check_single_output_args({data}))
, m_bias(bias)
{
constructor_validate_and_infer_types();
......
......@@ -30,6 +30,9 @@ namespace ngraph
class GRN : public ngraph::op::util::FusedOp
{
public:
NGRAPH_API
static const std::string type_name;
const std::string& description() const override { return type_name; }
/// \brief Constructs a GRN operation.
///
/// \param data - Node producing the input tensor
......
......@@ -27,6 +27,8 @@
using namespace std;
using namespace ngraph;
const string op::GroupConvolutionTranspose::type_name{"GroupConvolutionTranspose"};
op::GroupConvolutionTranspose::GroupConvolutionTranspose(const shared_ptr<Node>& data,
const shared_ptr<Node>& filters,
const Strides& strides,
......@@ -37,7 +39,7 @@ op::GroupConvolutionTranspose::GroupConvolutionTranspose(const shared_ptr<Node>&
const size_t groups,
const PadType& pad_type,
const Shape& output_shape)
: FusedOp("GroupConvolutionTranspose", check_single_output_args({data, filters}))
: FusedOp(check_single_output_args({data, filters}))
, m_strides(strides)
, m_dilations(dilations)
, m_padding_begin(padding_begin)
......
......@@ -35,6 +35,9 @@ namespace ngraph
class GroupConvolutionTranspose : public util::FusedOp
{
public:
NGRAPH_API
static const std::string type_name;
const std::string& description() const override { return type_name; }
///
/// \brief Constructs GroupConvolutionTranspose operation.
///
......
......@@ -31,6 +31,8 @@
using namespace std;
using namespace ngraph;
const string op::GRUCell::type_name{"GRUCell"};
op::GRUCell::GRUCell(const shared_ptr<Node>& X,
const shared_ptr<Node>& W,
const shared_ptr<Node>& R,
......@@ -59,7 +61,7 @@ op::GRUCell::GRUCell(const shared_ptr<Node>& X,
const vector<float>& activation_beta,
float clip,
bool linear_before_reset)
: FusedOp("GRUCell", {X, W, R, H_t})
: FusedOp(check_single_output_args({X, W, R, H_t}))
, RNNCellBase(hidden_size, clip, activations, activation_alpha, activation_beta)
, m_activation_f{get_activation_function(0)}
, m_activation_g{get_activation_function(1)}
......@@ -80,7 +82,7 @@ op::GRUCell::GRUCell(const shared_ptr<Node>& X,
const vector<float>& activation_beta,
float clip,
bool linear_before_reset)
: FusedOp("GRUCell", {X, W, R, H_t, B})
: FusedOp(check_single_output_args({X, W, R, H_t, B}))
, RNNCellBase(hidden_size, clip, activations, activation_alpha, activation_beta)
, m_activation_f{get_activation_function(0)}
, m_activation_g{get_activation_function(1)}
......
......@@ -41,6 +41,9 @@ namespace ngraph
class GRUCell : public util::FusedOp, public util::RNNCellBase
{
public:
NGRAPH_API
static const std::string type_name;
const std::string& description() const override { return type_name; }
///
/// \brief Constructs GRUCell node.
///
......
......@@ -27,8 +27,10 @@
using namespace std;
using namespace ngraph;
const string op::HardSigmoid::type_name{"HardSigmoid"};
op::HardSigmoid::HardSigmoid(const shared_ptr<Node>& data, float alpha, float beta)
: FusedOp("HardSigmoid", {data})
: FusedOp(check_single_output_args({data}))
, m_alpha(alpha)
, m_beta(beta)
{
......
......@@ -30,6 +30,9 @@ namespace ngraph
class HardSigmoid : public ngraph::op::util::FusedOp
{
public:
NGRAPH_API
static const std::string type_name;
const std::string& description() const override { return type_name; }
/// \brief Constructs a HardSigmoid operation.
///
/// \param data Input tensor.
......
......@@ -23,8 +23,10 @@
using namespace std;
using namespace ngraph;
const string op::LeakyRelu::type_name{"LeakyRelu"};
op::LeakyRelu::LeakyRelu(const shared_ptr<Node>& data, const shared_ptr<Node>& alpha)
: FusedOp("LeakyRelu", {data, alpha})
: FusedOp(check_single_output_args({data, alpha}))
{
constructor_validate_and_infer_types();
}
......
......@@ -30,6 +30,9 @@ namespace ngraph
class LeakyRelu : public ngraph::op::util::FusedOp
{
public:
NGRAPH_API
static const std::string type_name;
const std::string& description() const override { return type_name; }
LeakyRelu(const std::shared_ptr<ngraph::Node>& data,
const std::shared_ptr<ngraph::Node>& alpha);
......
......@@ -31,6 +31,8 @@
using namespace std;
using namespace ngraph;
const string op::LSTMCell::type_name{"LSTMCell"};
op::LSTMCell::LSTMCell(const shared_ptr<Node>& X,
const shared_ptr<Node>& W,
const shared_ptr<Node>& R,
......@@ -62,7 +64,7 @@ op::LSTMCell::LSTMCell(const shared_ptr<Node>& X,
const vector<float>& activation_beta,
float clip,
bool input_forget)
: FusedOp("LSTMCell", {X, W, R, H_t, C_t})
: FusedOp(check_single_output_args({X, W, R, H_t, C_t}))
, RNNCellBase(hidden_size, clip, activations, activation_alpha, activation_beta)
, m_activation_f{get_activation_function(0)}
, m_activation_g{get_activation_function(1)}
......@@ -87,7 +89,7 @@ op::LSTMCell::LSTMCell(const shared_ptr<Node>& X,
const vector<float>& activation_beta,
float clip,
bool input_forget)
: FusedOp("LSTMCell", {X, W, R, H_t, C_t, B, P})
: FusedOp(check_single_output_args({X, W, R, H_t, C_t, B, P}))
, RNNCellBase(hidden_size, clip, activations, activation_alpha, activation_beta)
, m_activation_f{get_activation_function(0)}
, m_activation_g{get_activation_function(1)}
......
......@@ -42,6 +42,9 @@ namespace ngraph
class LSTMCell : public util::FusedOp, public util::RNNCellBase
{
public:
NGRAPH_API
static const std::string type_name;
const std::string& description() const override { return type_name; }
///
/// \brief Constructs LSTMCell node.
///
......
......@@ -27,11 +27,13 @@
using namespace std;
using namespace ngraph;
const string op::MVN::type_name{"MVN"};
op::MVN::MVN(const std::shared_ptr<Node>& data,
bool across_channels,
bool normalize_variance,
double eps)
: FusedOp("MVN", {data})
: FusedOp(check_single_output_args({data}))
, m_eps{eps}
, m_across_channels{across_channels}
, m_normalize_variance{normalize_variance}
......@@ -52,7 +54,7 @@ op::MVN::MVN(const std::shared_ptr<Node>& data,
AxisSet reduction_axes,
bool normalize_variance,
double eps)
: FusedOp("MVN", {data})
: FusedOp(check_single_output_args({data}))
, m_eps{eps}
, m_across_channels{false}
, m_normalize_variance{normalize_variance}
......
......@@ -29,6 +29,9 @@ namespace ngraph
class MVN : public ngraph::op::util::FusedOp
{
public:
NGRAPH_API
static const std::string type_name;
const std::string& description() const override { return type_name; }
/// \brief Constructs an MVN operation.
///
/// \param data Input tensor with data
......
......@@ -26,12 +26,14 @@
using namespace std;
using namespace ngraph;
const string op::Normalize::type_name{"Normalize"};
op::Normalize::Normalize(const shared_ptr<ngraph::Node>& data,
const shared_ptr<ngraph::Node>& scale,
bool across_spatial,
bool channel_shared,
float eps)
: FusedOp("Normalize", {data, scale})
: FusedOp(check_single_output_args({data, scale}))
, m_across_spatial{across_spatial}
, m_channel_shared{channel_shared}
, m_eps{eps}
......
......@@ -30,6 +30,9 @@ namespace ngraph
class Normalize : public ngraph::op::util::FusedOp
{
public:
NGRAPH_API
static const std::string type_name;
const std::string& description() const override { return type_name; }
///
/// \brief Constructs a Normalize operation.
///
......
......@@ -29,8 +29,10 @@
using namespace std;
using namespace ngraph;
const string op::PRelu::type_name{"PRelu"};
op::PRelu::PRelu(const shared_ptr<Node>& data, const shared_ptr<Node>& slope)
: FusedOp("PRelu", {data, slope})
: FusedOp(check_single_output_args({data, slope}))
{
constructor_validate_and_infer_types();
}
......
......@@ -31,6 +31,9 @@ namespace ngraph
class PRelu : public ngraph::op::util::FusedOp
{
public:
NGRAPH_API
static const std::string type_name;
const std::string& description() const override { return type_name; }
/// \brief Constructs a PRelu operation.
///
/// \param data Input tensor
......
......@@ -31,6 +31,8 @@
using namespace std;
using namespace ngraph;
const string op::RNNCell::type_name{"RNNCell"};
op::RNNCell::RNNCell(const shared_ptr<Node>& X,
const shared_ptr<Node>& W,
const shared_ptr<Node>& R,
......@@ -50,7 +52,7 @@ op::RNNCell::RNNCell(const shared_ptr<Node>& X,
const vector<float>& activation_alpha,
const vector<float>& activation_beta,
float clip)
: FusedOp("RNNCell", {X, W, R, H_t})
: FusedOp(check_single_output_args({X, W, R, H_t}))
, RNNCellBase(hidden_size, clip, activations, activation_alpha, activation_beta)
, m_activation_f{get_activation_function(0)}
{
......@@ -68,7 +70,7 @@ op::RNNCell::RNNCell(const shared_ptr<Node>& X,
const vector<float>& activation_alpha,
const vector<float>& activation_beta,
float clip)
: FusedOp("RNNCell", {X, W, R, H_t, B})
: FusedOp(check_single_output_args({X, W, R, H_t, B}))
, RNNCellBase(hidden_size, clip, activations, activation_alpha, activation_beta)
, m_activation_f{get_activation_function(0)}
{
......
......@@ -41,6 +41,9 @@ namespace ngraph
class RNNCell : public util::FusedOp, public util::RNNCellBase
{
public:
NGRAPH_API
static const std::string type_name;
const std::string& description() const override { return type_name; }
///
/// \brief Constructs RNNCell node.
///
......
......@@ -21,10 +21,12 @@
using namespace std;
using namespace ngraph;
const string op::ScaleShift::type_name{"ScaleShift"};
op::ScaleShift::ScaleShift(const std::shared_ptr<ngraph::Node>& data,
const std::shared_ptr<ngraph::Node>& scale,
const std::shared_ptr<ngraph::Node>& shift)
: FusedOp("ScaleShift", {data, scale, shift})
: FusedOp(check_single_output_args({data, scale, shift}))
{
constructor_validate_and_infer_types();
}
......
......@@ -31,6 +31,9 @@ namespace ngraph
class ScaleShift : public ngraph::op::util::FusedOp
{
public:
NGRAPH_API
static const std::string type_name;
const std::string& description() const override { return type_name; }
/// \brief Constructs an ScaleShift operation.
///
/// \param data Input tensor
......
......@@ -21,10 +21,12 @@
using namespace std;
using namespace ngraph;
const string op::ShuffleChannels::type_name{"ShuffleChannels"};
op::ShuffleChannels::ShuffleChannels(const shared_ptr<Node>& data,
const int axis,
const size_t groups)
: FusedOp("ShuffleChannels", {data})
: FusedOp(check_single_output_args({data}))
, m_axis(axis)
, m_groups{groups}
{
......
......@@ -29,6 +29,9 @@ namespace ngraph
class ShuffleChannels : public ngraph::op::util::FusedOp
{
public:
NGRAPH_API
static const std::string type_name;
const std::string& description() const override { return type_name; }
/// \brief Constructs a ShuffleChannels node.
///
/// \param data - Node producing the input tensor
......
......@@ -24,8 +24,10 @@
using namespace std;
using namespace ngraph;
const string op::SpaceToDepth::type_name{"SpaceToDepth"};
op::SpaceToDepth::SpaceToDepth(const shared_ptr<Node>& data, const size_t block_size)
: FusedOp("SpaceToDepth", {data})
: FusedOp(check_single_output_args({data}))
, m_blocksize(block_size)
{
constructor_validate_and_infer_types();
......
......@@ -32,6 +32,9 @@ namespace ngraph
class SpaceToDepth : public ngraph::op::util::FusedOp
{
public:
NGRAPH_API
static const std::string type_name;
const std::string& description() const override { return type_name; }
/// \brief Constructs a SpaceToDepth operation.
///
/// \param data - Node producing the input tensor
......
......@@ -21,8 +21,10 @@
using namespace std;
using namespace ngraph;
const string op::Split::type_name{"Split"};
op::Split::Split(const shared_ptr<Node>& data, const int axis, const size_t num_split)
: FusedOp("Split", {data})
: FusedOp(check_single_output_args({data}))
, m_split_evenly{true}
, m_axis{axis}
, m_num_split{num_split}
......@@ -33,7 +35,7 @@ op::Split::Split(const shared_ptr<Node>& data, const int axis, const size_t num_
op::Split::Split(const std::shared_ptr<ngraph::Node>& data,
const int axis,
const std::vector<size_t>& splits)
: FusedOp("Split", {data})
: FusedOp(check_single_output_args({data}))
, m_split_evenly{false}
, m_axis{axis}
, m_splits{splits}
......
......@@ -30,6 +30,9 @@ namespace ngraph
class Split : public ngraph::op::util::FusedOp
{
public:
NGRAPH_API
static const std::string type_name;
const std::string& description() const override { return type_name; }
/// \brief Constructs a Split op that evenly divides the input tensor.
///
/// \param data - Node producing the input tensor
......
......@@ -24,8 +24,10 @@
using namespace std;
using namespace ngraph;
const string op::SquaredDifference::type_name{"SquaredDifference"};
op::SquaredDifference::SquaredDifference(const shared_ptr<Node>& x1, const shared_ptr<Node>& x2)
: FusedOp("SquaredDifference", {x1, x2})
: FusedOp(check_single_output_args({x1, x2}))
{
constructor_validate_and_infer_types();
}
......
......@@ -30,6 +30,9 @@ namespace ngraph
class SquaredDifference : public ngraph::op::util::FusedOp
{
public:
NGRAPH_API
static const std::string type_name;
const std::string& description() const override { return type_name; }
/// \brief Constructs the squared difference operation.
///
/// \param x1 First input tensor
......
......@@ -26,8 +26,10 @@
using namespace std;
using namespace ngraph;
const string op::Squeeze::type_name{"Squeeze"};
op::Squeeze::Squeeze(const shared_ptr<Node>& data, const shared_ptr<Node>& axes)
: FusedOp("Squeeze", {data, axes})
: FusedOp(check_single_output_args({data, axes}))
{
constructor_validate_and_infer_types();
}
......
......@@ -30,6 +30,9 @@ namespace ngraph
class Squeeze : public ngraph::op::util::FusedOp
{
public:
NGRAPH_API
static const std::string type_name;
const std::string& description() const override { return type_name; }
Squeeze(const std::shared_ptr<ngraph::Node>& data,
const std::shared_ptr<ngraph::Node>& axes);
......
......@@ -26,8 +26,10 @@
using namespace std;
using namespace ngraph;
const string op::Unsqueeze::type_name{"Unsqueeze"};
op::Unsqueeze::Unsqueeze(const shared_ptr<Node>& data, const shared_ptr<Node>& axes)
: FusedOp("Unsqueeze", {data, axes})
: FusedOp(check_single_output_args({data, axes}))
{
constructor_validate_and_infer_types();
}
......
......@@ -30,6 +30,9 @@ namespace ngraph
class Unsqueeze : public ngraph::op::util::FusedOp
{
public:
NGRAPH_API
static const std::string type_name;
const std::string& description() const override { return type_name; }
Unsqueeze(const std::shared_ptr<ngraph::Node>& data,
const std::shared_ptr<ngraph::Node>& axes);
......
......@@ -14,6 +14,12 @@
// limitations under the License.
//*****************************************************************************
#ifdef _WIN32
#include <windows.h>
#elif defined(__linux) || defined(__APPLE__)
#include <dlfcn.h>
#endif
#include <sstream>
#include "ngraph/file_util.hpp"
......@@ -25,6 +31,29 @@
using namespace std;
using namespace ngraph;
std::mutex runtime::Backend::m_mtx;
std::string runtime::Backend::s_backend_shared_library_search_directory;
// This finds the full path of the containing shared library
static string find_my_pathname()
{
#ifdef _WIN32
HMODULE hModule = GetModuleHandleW(L"ngraph.dll");
WCHAR wpath[MAX_PATH];
GetModuleFileNameW(hModule, wpath, MAX_PATH);
wstring ws(wpath);
string path(ws.begin(), ws.end());
replace(path.begin(), path.end(), '\\', '/');
path = file_util::get_directory(path);
path += "/";
return path;
#elif defined(__linux) || defined(__APPLE__)
Dl_info dl_info;
dladdr(reinterpret_cast<void*>(find_my_pathname), &dl_info);
return dl_info.dli_fname;
#endif
}
runtime::Backend::~Backend()
{
}
......@@ -86,6 +115,11 @@ void runtime::Backend::remove_compiled_function(std::shared_ptr<Executable> exec
{
}
std::shared_ptr<runtime::Executable> runtime::Backend::load(istream& input_stream)
{
throw runtime_error("load operation unimplemented.");
}
bool runtime::Backend::is_device_memory(void* ptr)
{
// override this method for each supported backend to determine if the passed pointer is in
......@@ -93,9 +127,19 @@ bool runtime::Backend::is_device_memory(void* ptr)
return false;
}
std::shared_ptr<runtime::Executable> runtime::Backend::load(istream& input_stream)
void runtime::Backend::set_backend_shared_library_search_directory(const string& path)
{
throw runtime_error("load opertion unimplemented.");
std::lock_guard<std::mutex> lock(runtime::Backend::m_mtx);
s_backend_shared_library_search_directory = path;
}
const string& runtime::Backend::get_backend_shared_library_search_directory()
{
if (s_backend_shared_library_search_directory.empty())
{
s_backend_shared_library_search_directory = find_my_pathname();
}
return s_backend_shared_library_search_directory;
}
bool runtime::Backend::set_config(const map<string, string>& config, string& error)
......
......@@ -17,6 +17,7 @@
#pragma once
#include <memory>
#include <mutex>
#include "ngraph/function.hpp"
#include "ngraph/pass/pass_config.hpp"
......@@ -142,6 +143,18 @@ public:
/// \returns a shared pointer to the op if found, else nullptr
virtual std::shared_ptr<ngraph::Node> get_backend_op(const std::string& op_name, ...);
/// \brief Allows sending backend specific configuration. The map contains key, value pairs
/// specific to a particluar backend. The definition of these key, value pairs is
/// defined by each backend.
/// \param config The configuration map sent to the backend
/// \param error An error string describing any error encountered
/// \returns true if the configuration is supported, false otherwise. On false the error
/// parameter value is valid.
virtual bool set_config(const std::map<std::string, std::string>& config, std::string& error);
static void set_backend_shared_library_search_directory(const std::string& path);
static const std::string& get_backend_shared_library_search_directory();
/// \brief Returns memory allocator used by backend for host allocations
virtual Allocator* get_host_memory_allocator() { return nullptr; }
/// \brief Set the host memory allocator to be used by the backend
......@@ -159,12 +172,8 @@ public:
/// \param ptr pointer to the memory to determine if its in device memory or not
virtual bool is_device_memory(void* ptr);
/// \brief Allows sending backend specific configuration. The map contains key, value pairs
/// specific to a particluar backend. The definition of these key, value pairs is
/// defined by each backend.
/// \param config The configuration map sent to the backend
/// \param error An error string describing any error encountered
/// \returns true if the configuration is supported, false otherwise. On false the error
/// parameter value is valid.
virtual bool set_config(const std::map<std::string, std::string>& config, std::string& error);
private:
// mutex to modify s_backend_shared_library_search_directory thread safe
static std::mutex m_mtx;
static std::string s_backend_shared_library_search_directory;
};
......@@ -25,6 +25,8 @@
#include "ngraph/file_util.hpp"
#include "ngraph/runtime/backend.hpp"
#include "ngraph/runtime/backend_manager.hpp"
#include "ngraph/runtime/cpu/static_initialize.hpp"
#include "ngraph/runtime/interpreter/static_initialize.hpp"
#include "ngraph/util.hpp"
using namespace std;
......@@ -68,6 +70,14 @@ vector<string> runtime::BackendManager::get_registered_backends()
shared_ptr<runtime::Backend> runtime::BackendManager::create_backend(const std::string& config)
{
#ifdef NGRAPH_INTERPRETER_STATIC_LIB_ENABLE
runtime::interpreter::static_initialize();
#endif
#ifdef NGRAPH_CPU_STATIC_LIB_ENABLE
runtime::cpu::static_initialize();
#endif
shared_ptr<runtime::Backend> backend;
string type = config;
......@@ -107,6 +117,7 @@ shared_ptr<runtime::Backend> runtime::BackendManager::create_backend(const std::
if (get_backend_constructor_pointer)
{
backend = get_backend_constructor_pointer()->create(config);
register_backend(type, get_backend_constructor_pointer());
}
else
{
......@@ -125,26 +136,6 @@ shared_ptr<runtime::Backend> runtime::BackendManager::create_backend(const std::
return backend;
}
// This doodad finds the full path of the containing shared library
static string find_my_file()
{
#ifdef _WIN32
HMODULE hModule = GetModuleHandleW(L"ngraph.dll");
WCHAR wpath[MAX_PATH];
GetModuleFileNameW(hModule, wpath, MAX_PATH);
wstring ws(wpath);
string path(ws.begin(), ws.end());
replace(path.begin(), path.end(), '\\', '/');
path = file_util::get_directory(path);
path += "/";
return path;
#else
Dl_info dl_info;
dladdr(reinterpret_cast<void*>(find_my_file), &dl_info);
return dl_info.dli_fname;
#endif
}
DL_HANDLE runtime::BackendManager::open_shared_library(string type)
{
string lib_prefix = SHARED_LIB_PREFIX;
......@@ -160,7 +151,8 @@ DL_HANDLE runtime::BackendManager::open_shared_library(string type)
}
string library_name = lib_prefix + to_lower(type) + "_backend" + lib_suffix;
string my_directory = file_util::get_directory(find_my_file());
string my_directory =
file_util::get_directory(Backend::get_backend_shared_library_search_directory());
string library_path = file_util::path_join(my_directory, library_name);
string error;
#ifdef _WIN32
......@@ -185,7 +177,8 @@ DL_HANDLE runtime::BackendManager::open_shared_library(string type)
map<string, string> runtime::BackendManager::get_registered_device_map()
{
map<string, string> rc;
string my_directory = file_util::get_directory(find_my_file());
string my_directory =
file_util::get_directory(Backend::get_backend_shared_library_search_directory());
vector<string> backend_list;
auto f = [&](const string& file, bool is_dir) {
......
......@@ -16,9 +16,16 @@
include(FindOpenMP)
if (NGRAPH_CPU_STATIC_LIB_ENABLE)
set(LIBRARY_TYPE STATIC)
else()
set(LIBRARY_TYPE SHARED)
endif()
set(SRC
cpu_backend.cpp
cpu_builder.cpp
cpu_builder_registry.cpp
cpu_call_frame.cpp
cpu_executor.cpp
cpu_external_function.cpp
......@@ -124,7 +131,6 @@ set(SRC
pass/cpu_post_layout_optimizations.cpp
pass/cpu_rnn_fusion.cpp
pass/cpu_workspace_insertion.cpp
ngraph_version.cpp
)
if (NOT NGRAPH_DEX_ONLY)
......@@ -156,7 +162,10 @@ endif()
if (NGRAPH_CPU_ENABLE)
set(NGRAPH_CPU_DEBUGINFO_ENABLE 0 CACHE STRING "Enable debuginfo in the CPU backend")
add_library(cpu_backend SHARED ${SRC})
add_library(cpu_backend ${LIBRARY_TYPE} ${SRC})
if (NGRAPH_CPU_STATIC_LIB_ENABLE)
target_compile_definitions(cpu_backend PRIVATE "NGRAPH_CPU_STATIC_LIB_ENABLE")
endif()
if(NGRAPH_LIB_VERSIONING_ENABLE)
set_target_properties(cpu_backend PROPERTIES
VERSION ${NGRAPH_VERSION}
......
......@@ -79,6 +79,10 @@ namespace ngraph
}
REGISTER_OP_BUILDER(Add);
#ifdef NGRAPH_CPU_STATIC_LIB_ENABLE
void register_builders_add_cpp() {}
#endif
}
}
}
......@@ -69,6 +69,10 @@ namespace ngraph
}
REGISTER_OP_BUILDER(AllReduce);
#ifdef NGRAPH_CPU_STATIC_LIB_ENABLE
void register_builders_allreduce_cpp() {}
#endif
}
}
}
......@@ -210,6 +210,10 @@ namespace ngraph
}
REGISTER_OP_BUILDER(ArgMax);
#ifdef NGRAPH_CPU_STATIC_LIB_ENABLE
void register_builders_argmax_cpp() {}
#endif
}
}
}
......@@ -210,6 +210,9 @@ namespace ngraph
}
REGISTER_OP_BUILDER(ArgMin);
#ifdef NGRAPH_CPU_STATIC_LIB_ENABLE
void register_builders_argmin_cpp() {}
#endif
}
}
}
......@@ -197,6 +197,10 @@ namespace ngraph
}
REGISTER_OP_BUILDER(AvgPool);
REGISTER_OP_BUILDER(AvgPoolBackprop);
#ifdef NGRAPH_CPU_STATIC_LIB_ENABLE
void register_builders_avg_pool_cpp() {}
#endif
}
}
}
......@@ -516,6 +516,10 @@ namespace ngraph
REGISTER_OP_BUILDER(BatchNormTrainingRelu);
REGISTER_OP_BUILDER(BatchNormInferenceRelu);
REGISTER_OP_BUILDER(BatchNormTrainingBackprop);
#ifdef NGRAPH_CPU_STATIC_LIB_ENABLE
void register_builders_batch_norm_cpp() {}
#endif
}
}
}
......@@ -88,6 +88,9 @@ namespace ngraph
}
}
REGISTER_OP_BUILDER(BoundedRelu);
#ifdef NGRAPH_CPU_STATIC_LIB_ENABLE
void register_builders_bounded_relu_cpp() {}
#endif
}
}
}
......@@ -232,6 +232,9 @@ namespace ngraph
}
REGISTER_OP_BUILDER(Broadcast);
#ifdef NGRAPH_CPU_STATIC_LIB_ENABLE
void register_builders_broadcast_cpp() {}
#endif
}
}
}
......@@ -44,6 +44,9 @@ namespace ngraph
functors.emplace_back(functor);
}
REGISTER_OP_BUILDER(BroadcastDistributed);
#ifdef NGRAPH_CPU_STATIC_LIB_ENABLE
void register_builders_broadcast_distributed_cpp() {}
#endif
}
}
}
......@@ -170,6 +170,9 @@ namespace ngraph
}
REGISTER_OP_BUILDER(Concat);
#ifdef NGRAPH_CPU_STATIC_LIB_ENABLE
void register_builders_concat_cpp() {}
#endif
}
}
}
......@@ -115,6 +115,9 @@ namespace ngraph
}
REGISTER_OP_BUILDER(Convert);
#ifdef NGRAPH_CPU_STATIC_LIB_ENABLE
void register_builders_convert_cpp() {}
#endif
}
}
}
......@@ -104,6 +104,9 @@ namespace ngraph
functors.emplace_back(functor);
}
REGISTER_CPU_OP_BUILDER(ConvertLayout);
#ifdef NGRAPH_CPU_STATIC_LIB_ENABLE
void register_builders_convert_layout_cpp() {}
#endif
}
}
}
......@@ -832,6 +832,10 @@ namespace ngraph
REGISTER_OP_BUILDER(ConvolutionAdd);
REGISTER_OP_BUILDER(GroupConvolutionBias);
REGISTER_OP_BUILDER(DeconvolutionBias)
#ifdef NGRAPH_CPU_STATIC_LIB_ENABLE
void register_builders_convolution_cpp() {}
#endif
} // namespace cpu
} // namespace runtime
} // namespace ngraph
......@@ -258,6 +258,9 @@ namespace ngraph
}
REGISTER_OP_BUILDER(Dot);
#ifdef NGRAPH_CPU_STATIC_LIB_ENABLE
void register_builders_dot_cpp() {}
#endif
}
}
}
......@@ -127,6 +127,9 @@ namespace ngraph
}
REGISTER_OP_BUILDER(Dropout);
#ifdef NGRAPH_CPU_STATIC_LIB_ENABLE
void register_builders_dropout_cpp() {}
#endif
}
}
}
......@@ -245,6 +245,9 @@ namespace ngraph
}
REGISTER_OP_BUILDER(EmbeddingLookup);
#ifdef NGRAPH_CPU_STATIC_LIB_ENABLE
void register_builders_embedding_lookup_cpp() {}
#endif
}
}
}
......@@ -72,6 +72,9 @@ namespace ngraph
}
}
REGISTER_OP_BUILDER(Erf);
#ifdef NGRAPH_CPU_STATIC_LIB_ENABLE
void register_builders_erf_cpp() {}
#endif
}
}
}
......@@ -237,6 +237,9 @@ namespace ngraph
}
REGISTER_OP_BUILDER(Gather);
#ifdef NGRAPH_CPU_STATIC_LIB_ENABLE
void register_builders_gather_cpp() {}
#endif
} // namespace cpu
} // namespace runtime
} // namespace ngraph
......@@ -139,6 +139,9 @@ namespace ngraph
}
REGISTER_OP_BUILDER(GatherND);
#ifdef NGRAPH_CPU_STATIC_LIB_ENABLE
void register_builders_gather_nd_cpp() {}
#endif
}
}
}
......@@ -48,6 +48,9 @@ namespace ngraph
}
REGISTER_OP_BUILDER(GetOutputElement);
#ifdef NGRAPH_CPU_STATIC_LIB_ENABLE
void register_builders_get_output_element_cpp() {}
#endif
}
}
}
......@@ -86,6 +86,9 @@ namespace ngraph
}
}
REGISTER_OP_BUILDER(CPULeakyRelu);
#ifdef NGRAPH_CPU_STATIC_LIB_ENABLE
void register_builders_leaky_relu_cpp() {}
#endif
}
}
}
......@@ -123,6 +123,9 @@ namespace ngraph
}
REGISTER_OP_BUILDER(LRN);
#ifdef NGRAPH_CPU_STATIC_LIB_ENABLE
void register_builders_lrn_cpp() {}
#endif
}
}
}
......@@ -108,6 +108,9 @@ namespace ngraph
functors.emplace_back(functor);
}
REGISTER_OP_BUILDER(Lstm);
#ifdef NGRAPH_CPU_STATIC_LIB_ENABLE
void register_builders_lstm_cpp() {}
#endif
}
}
}
......@@ -396,6 +396,9 @@ namespace ngraph
REGISTER_OP_BUILDER(MatmulBias);
REGISTER_OP_BUILDER(BatchMatMul);
REGISTER_OP_BUILDER(BatchMatMulTranspose);
#ifdef NGRAPH_CPU_STATIC_LIB_ENABLE
void register_builders_matmul_bias_cpp() {}
#endif
}
}
}
......@@ -38,6 +38,9 @@ namespace ngraph
}
REGISTER_OP_BUILDER(Max);
#ifdef NGRAPH_CPU_STATIC_LIB_ENABLE
void register_builders_max_cpp() {}
#endif
}
}
}
......@@ -337,6 +337,9 @@ namespace ngraph
REGISTER_OP_BUILDER(MaxPoolBackprop);
REGISTER_OP_BUILDER(MaxPoolWithIndices);
REGISTER_OP_BUILDER(MaxPoolWithIndicesBackprop);
#ifdef NGRAPH_CPU_STATIC_LIB_ENABLE
void register_builders_max_pool_cpp() {}
#endif
}
}
}
......@@ -38,6 +38,9 @@ namespace ngraph
}
REGISTER_OP_BUILDER(Min);
#ifdef NGRAPH_CPU_STATIC_LIB_ENABLE
void register_builders_min_cpp() {}
#endif
}
}
}
......@@ -109,6 +109,9 @@ namespace ngraph
}
REGISTER_OP_BUILDER(OneHot);
#ifdef NGRAPH_CPU_STATIC_LIB_ENABLE
void register_builders_one_hot_cpp() {}
#endif
}
}
}
......@@ -171,6 +171,9 @@ namespace ngraph
}
}
REGISTER_CF_BUILDER(Pad);
#ifdef NGRAPH_CPU_STATIC_LIB_ENABLE
void register_builders_pad_cpp() {}
#endif
}
}
}
......@@ -38,6 +38,9 @@ namespace ngraph
}
REGISTER_OP_BUILDER(Product);
#ifdef NGRAPH_CPU_STATIC_LIB_ENABLE
void register_builders_product_cpp() {}
#endif
}
}
}
......@@ -585,6 +585,9 @@ namespace ngraph
REGISTER_OP_BUILDER(Dequantize);
REGISTER_OP_BUILDER(Quantize);
#ifdef NGRAPH_CPU_STATIC_LIB_ENABLE
void register_builders_quantization_cpp() {}
#endif
}
}
}
......@@ -69,6 +69,9 @@ namespace ngraph
}
}
REGISTER_OP_BUILDER(QuantizedAvgPool);
#ifdef NGRAPH_CPU_STATIC_LIB_ENABLE
void register_builders_quantized_avg_pool_cpp() {}
#endif
}
}
}
......@@ -708,6 +708,9 @@ namespace ngraph
REGISTER_OP_BUILDER(QuantizedConvolutionBias);
REGISTER_OP_BUILDER(QuantizedConvolutionBiasAdd);
REGISTER_OP_BUILDER(QuantizedConvolutionBiasSignedAdd);
#ifdef NGRAPH_CPU_STATIC_LIB_ENABLE
void register_builders_quantized_conv_cpp() {}
#endif
}
}
}
......@@ -163,6 +163,9 @@ namespace ngraph
}
REGISTER_OP_BUILDER(QuantizedDotBias);
REGISTER_OP_BUILDER(QuantizedDot);
#ifdef NGRAPH_CPU_STATIC_LIB_ENABLE
void register_builders_quantized_dot_cpp() {}
#endif
}
}
}
......@@ -103,6 +103,9 @@ namespace ngraph
}
}
REGISTER_OP_BUILDER(QuantizedMatmul);
#ifdef NGRAPH_CPU_STATIC_LIB_ENABLE
void register_builders_quantized_matmul_cpp() {}
#endif
}
}
}
......@@ -84,6 +84,9 @@ namespace ngraph
REGISTER_OP_BUILDER(Any);
REGISTER_OP_BUILDER(All);
#ifdef NGRAPH_CPU_STATIC_LIB_ENABLE
void register_builders_reduce_function_cpp() {}
#endif
}
}
}
......@@ -134,6 +134,9 @@ namespace ngraph
REGISTER_OP_BUILDER(Relu);
REGISTER_OP_BUILDER(ReluBackprop);
#ifdef NGRAPH_CPU_STATIC_LIB_ENABLE
void register_builders_relu_cpp() {}
#endif
}
}
}
......@@ -135,6 +135,9 @@ namespace ngraph
}
REGISTER_OP_BUILDER(ReplaceSlice);
#ifdef NGRAPH_CPU_STATIC_LIB_ENABLE
void register_builders_replace_slice_cpp() {}
#endif
}
}
}
......@@ -248,6 +248,9 @@ namespace ngraph
}
REGISTER_OP_BUILDER(Reshape);
#ifdef NGRAPH_CPU_STATIC_LIB_ENABLE
void register_builders_reshape_cpp() {}
#endif
}
}
}
......@@ -63,6 +63,9 @@ namespace ngraph
}
REGISTER_OP_BUILDER(Reverse);
#ifdef NGRAPH_CPU_STATIC_LIB_ENABLE
void register_builders_reverse_cpp() {}
#endif
}
}
}
......@@ -80,6 +80,9 @@ namespace ngraph
}
REGISTER_OP_BUILDER(ReverseSequence);
#ifdef NGRAPH_CPU_STATIC_LIB_ENABLE
void register_builders_reverse_sequence_cpp() {}
#endif
}
}
}
......@@ -103,6 +103,9 @@ namespace ngraph
functors.emplace_back(functor);
}
REGISTER_OP_BUILDER(Rnn);
#ifdef NGRAPH_CPU_STATIC_LIB_ENABLE
void register_builders_rnn_cpp() {}
#endif
}
}
}
......@@ -140,6 +140,9 @@ namespace ngraph
}
}
REGISTER_OP_BUILDER(ScatterAdd);
#ifdef NGRAPH_CPU_STATIC_LIB_ENABLE
void register_builders_scatter_add_cpp() {}
#endif
}
}
}
......@@ -157,6 +157,9 @@ namespace ngraph
}
REGISTER_OP_BUILDER(ScatterNDAdd);
#ifdef NGRAPH_CPU_STATIC_LIB_ENABLE
void register_builders_scatter_nd_add_cpp() {}
#endif
}
}
}
......@@ -63,6 +63,9 @@ namespace ngraph
}
REGISTER_OP_BUILDER(Select);
#ifdef NGRAPH_CPU_STATIC_LIB_ENABLE
void register_builders_select_cpp() {}
#endif
}
}
}
......@@ -189,6 +189,9 @@ namespace ngraph
REGISTER_OP_BUILDER(SigmoidBackprop);
REGISTER_OP_BUILDER(SigmoidMultiply);
REGISTER_OP_BUILDER(SigmoidMultiplyBackprop);
#ifdef NGRAPH_CPU_STATIC_LIB_ENABLE
void register_builders_sigmoid_cpp() {}
#endif
}
}
}
......@@ -188,6 +188,9 @@ namespace ngraph
}
REGISTER_OP_BUILDER(Slice);
#ifdef NGRAPH_CPU_STATIC_LIB_ENABLE
void register_builders_slice_cpp() {}
#endif
}
}
}
......@@ -196,6 +196,9 @@ namespace ngraph
}
REGISTER_OP_BUILDER(Softmax);
#ifdef NGRAPH_CPU_STATIC_LIB_ENABLE
void register_builders_softmax_cpp() {}
#endif
}
}
}
......@@ -140,6 +140,9 @@ namespace ngraph
}
REGISTER_OP_BUILDER(GenerateMask);
#ifdef NGRAPH_CPU_STATIC_LIB_ENABLE
void register_builders_state_cpp() {}
#endif
}
}
}
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment