Commit f6bddf08 authored by Jayaram Bobba's avatar Jayaram Bobba Committed by Scott Cyphers

Opset1 Definition (#3813)

* Opset1

* Added opset1.hpp

* Added more ops to opset0 and opset1

* Move opset1.hpp up and remove opset0.hpp

* Add versioning to more ops

* Revert to older pass names to keep compatibility for external components

* Fix compilation errors with codegen

* merge

* Added compile-time check for opset

* Added opset1 tbl

* Add op_version table of all ops

* Create factories from op_version_tbl

* reorg unsupported ops in int backend

* Added temporary alias for GreaterEqual

* Add missing case to interpreter enumeration

* Finish opset serializer cleanup (#3939)

* Opset-based opset conversion (#3937)

* Opset-based opset conversion

* Add other opset conversion

* Use ops.hpp

* Update opset0_tbl.hpp

* Switch interpreter to opset0 + a few extras (#3941)

* Switch interpreter, gcpu to opset0

* Remove unnused files

* Give interpreter its own opset

* style

* Fix namespace

* Fix rounding type conversion

* Work-around for bad clang3.9 bug

* Work-around
parent d2482523
......@@ -436,6 +436,7 @@ set (SRC
op/util/unary_elementwise_arithmetic.cpp
op/util/unary_elementwise_arithmetic.hpp
ops.hpp
opsets/opset.cpp
partial_shape.cpp
partial_shape.hpp
pass/algebraic_simplification.cpp
......
......@@ -18,22 +18,7 @@
#include "ngraph/factory.hpp"
#include "ngraph/node.hpp"
#include "ngraph/op/abs.hpp"
#include "ngraph/op/acos.hpp"
#include "ngraph/op/add.hpp"
#include "ngraph/op/all.hpp"
#include "ngraph/op/allreduce.hpp"
#include "ngraph/op/and.hpp"
#include "ngraph/op/any.hpp"
#include "ngraph/op/argmax.hpp"
#include "ngraph/op/argmin.hpp"
#include "ngraph/op/avg_pool.hpp"
#include "ngraph/op/batch_norm.hpp"
#include "ngraph/op/broadcast.hpp"
#include "ngraph/op/broadcast_distributed.hpp"
#include "ngraph/op/ceiling.hpp"
#include "ngraph/op/concat.hpp"
#include "ngraph/op/parameter.hpp"
#include "ngraph/ops.hpp"
using namespace std;
......@@ -58,31 +43,9 @@ namespace ngraph
lock_guard<mutex> guard(init_guard);
if (registry.m_factory_map.size() == 0)
{
registry.register_factory<op::Abs>();
registry.register_factory<op::Acos>();
registry.register_factory<op::v0::Add>();
registry.register_factory<op::v1::Add>();
registry.register_factory<op::All>();
registry.register_factory<op::AllReduce>();
registry.register_factory<op::And>();
registry.register_factory<op::Any>();
registry.register_factory<op::ArgMax>();
registry.register_factory<op::ArgMin>();
registry.register_factory<op::v0::AvgPool>();
registry.register_factory<op::v0::AvgPoolBackprop>();
registry.register_factory<op::v1::AvgPool>();
registry.register_factory<op::v1::AvgPoolBackprop>();
registry.register_factory<op::BatchNormInference>();
registry.register_factory<op::BatchNormTraining>();
registry.register_factory<op::BatchNormTrainingBackprop>();
registry.register_factory<op::BroadcastDistributed>();
registry.register_factory<op::v0::Broadcast>();
registry.register_factory<op::v0::BroadcastLike>();
registry.register_factory<op::v1::Broadcast>();
registry.register_factory<op::Ceiling>();
registry.register_factory<op::Concat>();
registry.register_factory<op::v1::LogicalAnd>();
registry.register_factory<op::Parameter>();
#define NGRAPH_OP(NAME, NAMESPACE, VERSION) registry.register_factory<NAMESPACE::NAME>();
#include "ngraph/op/op_version_tbl.hpp"
#undef NGRAPH_OP
}
}
return registry;
......
......@@ -38,7 +38,7 @@ namespace ngraph
ASSERT_IS_SUPPORTED(node, fmod == 1)
<< "Only 'fmod=1' mode is supported for mod operator.";
return {std::make_shared<ngraph::op::Mod>(dividend, divisor)};
return {std::make_shared<ngraph::op::v1::Mod>(dividend, divisor)};
}
} // namespace set_1
......
......@@ -45,7 +45,7 @@ namespace ngraph
auto gamma_node = std::make_shared<ngraph::op::Constant>(
data->get_element_type(), data->get_shape(), std::vector<double>{gamma});
return {std::make_shared<ngraph::op::v1::Selu>(data, alpha_node, gamma_node)};
return {std::make_shared<ngraph::op::v0::Selu>(data, alpha_node, gamma_node)};
}
} // namespace set_1
......
......@@ -23,6 +23,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
/// \brief Elementwise absolute value operation.
///
......@@ -50,4 +52,6 @@ namespace ngraph
const NodeVector& deltas) override;
};
}
using v0::Abs;
}
}
......@@ -23,6 +23,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
/// \brief Elementwise inverse cosine (arccos) operation.
///
......@@ -49,4 +51,6 @@ namespace ngraph
const NodeVector& deltas) override;
};
}
using v0::Acos;
}
}
......@@ -54,6 +54,7 @@ namespace ngraph
const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec());
std::shared_ptr<Node> copy_with_new_args(const NodeVector& new_args) const override;
bool visit_attributes(AttributeVisitor& visitor) override;
virtual bool is_commutative() const override { return true; }
protected:
......@@ -101,8 +102,8 @@ namespace ngraph
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const NodeVector& deltas) override;
};
} // namespace v1
} // namespace v1
using v0::Add;
} // namespace op
......
......@@ -21,6 +21,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
/// \brief Logical "all" reduction operation.
class NGRAPH_API All : public util::LogicalReduction
......@@ -47,4 +49,6 @@ namespace ngraph
virtual std::shared_ptr<Node> get_default_value() const override;
};
}
using v0::All;
}
}
......@@ -22,6 +22,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
class NGRAPH_API AllReduce : public Op
{
......@@ -29,7 +31,8 @@ namespace ngraph
static constexpr NodeTypeInfo type_info{"AllReduce", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
AllReduce() = default;
AllReduce(const Output<Node>& arg, reduction::Type reduce_type = reduction::Type::SUM);
AllReduce(const Output<Node>& arg,
reduction::Type reduce_type = reduction::Type::SUM);
void validate_and_infer_types() override;
......@@ -42,4 +45,6 @@ namespace ngraph
reduction::Type m_reduce_type{reduction::Type::SUM};
};
}
using v0::AllReduce;
}
}
......@@ -21,6 +21,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
/// \brief Computes minimum index along a specified axis for a given tensor
class NGRAPH_API ArgMax : public op::util::IndexReduction
......@@ -36,7 +38,9 @@ namespace ngraph
/// \param axis The axis along which to compute an index for maximum
/// \param index_element_type produce indices. Currently, only int64 or int32 are
/// supported
ArgMax(const Output<Node>& arg, size_t axis, const element::Type& index_element_type);
ArgMax(const Output<Node>& arg,
size_t axis,
const element::Type& index_element_type);
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
......@@ -44,4 +48,6 @@ namespace ngraph
virtual std::shared_ptr<Node> get_default_value() const override;
};
}
using v0::ArgMax;
}
}
......@@ -21,6 +21,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
/// \brief Computes minimum index along a specified axis for a given tensor
class NGRAPH_API ArgMin : public op::util::IndexReduction
......@@ -37,7 +39,9 @@ namespace ngraph
/// \param axis The axis along which to compute an index for minimum
/// \param index_element_type produce indices. Currently, only int64 or int32 are
/// supported
ArgMin(const Output<Node>& arg, size_t axis, const element::Type& index_element_type);
ArgMin(const Output<Node>& arg,
size_t axis,
const element::Type& index_element_type);
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
......@@ -45,4 +49,6 @@ namespace ngraph
virtual std::shared_ptr<Node> get_default_value() const override;
};
}
using v0::ArgMin;
}
}
......@@ -23,6 +23,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
/// \brief Elementwise inverse sine (arcsin) operation.
///
......@@ -50,4 +52,6 @@ namespace ngraph
const NodeVector& deltas) override;
};
}
using v0::Asin;
}
}
......@@ -23,6 +23,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
/// \brief Elementwise inverse tangent (arctan) operation.
///
......@@ -51,4 +53,6 @@ namespace ngraph
const NodeVector& deltas) override;
};
}
using v0::Atan;
}
}
......@@ -25,6 +25,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
/// \brief Batchnorm for training operation
class NGRAPH_API BatchNormTraining : public Op
......@@ -59,7 +61,8 @@ namespace ngraph
/// SHAPE DETAILS:
/// gamma: must have rank 1, with the same span as input's channel axis.
/// beta: must have rank 1, with the same span as input's channel axis.
/// input: must have rank >= 2. The second dimension represents the channel axis
/// input: must have rank >= 2. The second dimension represents the channel
/// axis
/// and must have a span of at least 1.
/// output[0]: shall have the same shape as 'input'.
/// output[1]: shall have rank 1, with the same span as input's channel axis.
......@@ -123,7 +126,8 @@ namespace ngraph
/// SHAPE DETAILS:
/// gamma: must have rank 1, with the same span as input's channel axis.
/// beta: must have rank 1, with the same span as input's channel axis.
/// input: must have rank >= 2. The second dimension represents the channel axis
/// input: must have rank >= 2. The second dimension represents the channel
/// axis
/// and must have a span of at least 1.
/// mean: must have rank 1, with the same span as input's channel axis.
/// variance: must have rank 1, with the same span as input's channel axis.
......@@ -174,6 +178,8 @@ namespace ngraph
const Output<Node>& delta,
double epsilon);
bool visit_attributes(AttributeVisitor& visitor) override;
NGRAPH_DEPRECATED_DOC
NGRAPH_DEPRECATED("Use another constructor")
BatchNormTrainingBackprop(double epsilon,
......@@ -185,7 +191,6 @@ namespace ngraph
const Output<Node>& delta);
void validate_and_infer_types() override;
bool visit_attributes(AttributeVisitor& visitor) override;
double get_eps_value() const { return m_epsilon; }
void set_eps_value(double epsilon) { m_epsilon = epsilon; }
......@@ -203,4 +208,8 @@ namespace ngraph
double m_epsilon;
};
}
using v0::BatchNormInference;
using v0::BatchNormTraining;
using v0::BatchNormTrainingBackprop;
}
}
......@@ -23,6 +23,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
class NGRAPH_API BroadcastDistributed : public Op
{
......@@ -43,4 +45,6 @@ namespace ngraph
int64_t m_root_id;
};
}
using v0::BroadcastDistributed;
}
}
......@@ -21,6 +21,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
/// \brief Elementwise ceiling operation.
class NGRAPH_API Ceiling : public util::UnaryElementwiseArithmetic
......@@ -34,9 +36,12 @@ namespace ngraph
///
/// \param arg Node that produces the input tensor.
Ceiling(const Output<Node>& arg);
bool visit_attributes(AttributeVisitor& visitor) override { return true; }
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
};
}
using v0::Ceiling;
}
}
......@@ -23,6 +23,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
/// \brief Concatenation operation.
class NGRAPH_API Concat : public Op
......@@ -43,6 +45,7 @@ namespace ngraph
/// \param args The nodes producing the input tensors.
/// \param axis The axis along which to concatenate the input tensors.
Concat(const NodeVector& args, int64_t axis);
bool visit_attributes(AttributeVisitor& visitor) override;
void validate_and_infer_types() override;
......@@ -67,4 +70,6 @@ namespace ngraph
int64_t m_concat_axis = -1;
};
}
using v0::Concat;
}
}
......@@ -21,6 +21,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
/// \brief Elementwise type conversion operation.
class NGRAPH_API Convert : public Op
......@@ -40,7 +42,6 @@ namespace ngraph
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
const element::Type& get_destination_type() const { return m_destination_type; }
void set_destination_type(const element::Type& destination_type)
{
......@@ -59,4 +60,6 @@ namespace ngraph
const NodeVector& deltas) override;
};
}
using v0::Convert;
}
}
......@@ -62,7 +62,6 @@ namespace ngraph
const Strides& dilations,
const PadType& auto_pad = PadType::EXPLICIT);
size_t get_version() const override { return 1; }
void validate_and_infer_types() override;
virtual std::shared_ptr<Node>
......@@ -121,7 +120,6 @@ namespace ngraph
const CoordinateDiff& pads_begin,
const CoordinateDiff& pads_end);
size_t get_version() const override { return 1; }
void validate_and_infer_types() override;
void generate_adjoints(autodiff::Adjoints& adjoints,
......@@ -180,7 +178,6 @@ namespace ngraph
const CoordinateDiff& pads_begin,
const CoordinateDiff& pads_end);
size_t get_version() const override { return 1; }
void validate_and_infer_types() override;
virtual std::shared_ptr<Node>
......
......@@ -21,6 +21,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
/// \brief Elementwise cosine operation.
class NGRAPH_API Cos : public util::UnaryElementwiseArithmetic
......@@ -43,4 +45,6 @@ namespace ngraph
const NodeVector& deltas) override;
};
}
using v0::Cos;
}
}
......@@ -21,6 +21,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
/// \brief Elementwise hyperbolic cosine (cosh) operation.
class NGRAPH_API Cosh : public util::UnaryElementwiseArithmetic
......@@ -43,4 +45,6 @@ namespace ngraph
const NodeVector& deltas) override;
};
}
using v0::Cosh;
}
}
......@@ -22,7 +22,7 @@ namespace ngraph
{
namespace op
{
class CropAndResize : public Op
class NGRAPH_API CropAndResize : public Op
{
public:
enum class ResizeMethod
......@@ -32,7 +32,6 @@ namespace ngraph
nearest
};
NGRAPH_API
static constexpr NodeTypeInfo type_info{"CropAndResize", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs a crop and resize operation.
......
......@@ -22,6 +22,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
/// \brief Tensor cumulative sum operation.
///
......@@ -32,9 +34,11 @@ namespace ngraph
/// | | Description |
/// | -------------------- |
/// --------------------------------------------------------------------------------------------------|
/// | `exclusive` | If set to 1 will return exclusive sum in which the top element
/// | `exclusive` | If set to 1 will return exclusive sum in which the top
/// element
/// is not included. |
/// | | In other terms, if set to 1, the j-th output element would be
/// | | In other terms, if set to 1, the j-th output element
/// would be
/// the
/// sum of the first (j-1) elements.|
/// | | Otherwise, it would be the sum of the first j elements.
......@@ -53,19 +57,21 @@ namespace ngraph
/// | | Description |
/// | ----- |
/// ------------------------------------------------------------------------------------------------|
/// | `axis`| zero dimension tensor specifying axis position along which cumulative sum must
/// | `axis`| zero dimension tensor specifying axis position along which cumulative sum
/// must
/// be performed. |
///
/// ## Output
///
/// | Description |
/// | ------------------------------------------------------------------------------------|
/// | Output tensor of the same type as `arg` with cumulative sums of the arg's elements |
/// |
/// ------------------------------------------------------------------------------------|
/// | Output tensor of the same type as `arg` with cumulative sums of the arg's elements
/// |
class CumSum : public Op
class NGRAPH_API CumSum : public Op
{
public:
NGRAPH_API
static constexpr NodeTypeInfo type_info{"CumSum", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs a cumulative summation operation.
......@@ -74,7 +80,8 @@ namespace ngraph
/// \brief Constructs a cumulative summation operation.
///
/// \param arg The tensor to be summed.
/// \param axis zero dimension tensor specifying axis position along which cumulative
/// \param axis zero dimension tensor specifying axis position along which
/// cumulative
/// sum must be performed
CumSum(const Output<Node>& arg,
const Output<Node>& axis,
......@@ -97,4 +104,6 @@ namespace ngraph
bool m_reverse;
};
}
using v0::CumSum;
}
}
......@@ -24,7 +24,10 @@ namespace ngraph
{
namespace op
{
/// \brief Generalized dot product operation, including scalar-tensor product, matrix-vector
namespace v0
{
/// \brief Generalized dot product operation, including scalar-tensor product,
/// matrix-vector
/// product, and matrix multiplication.
class NGRAPH_API Dot : public Op
{
......@@ -43,13 +46,17 @@ namespace ngraph
size_t reduction_axes_count,
bool has_reduction_axes_count = true);
/// \brief Constructs a dot product operation with default dot-axis selection depending
/// \brief Constructs a dot product operation with default dot-axis selection
/// depending
/// on the inputs.
///
/// If `arg0` or `arg1` is a scalar, there are no dot-axes. Else, there is one dot-axis.
/// If `arg0` or `arg1` is a scalar, there are no dot-axes. Else, there is one
/// dot-axis.
///
/// (Note that in particular, this results in scalar-tensor products where one or the
/// other argument is a scalar, a matrix-vector products where `arg0` is a matrix and
/// (Note that in particular, this results in scalar-tensor products where one or
/// the
/// other argument is a scalar, a matrix-vector products where `arg0` is a matrix
/// and
/// `arg1` is a vector, and a matrix multiplication where `arg0` and `arg1` are both
/// matrices.)
///
......@@ -87,4 +94,6 @@ namespace ngraph
const NodeVector& deltas) override;
};
}
using v0::Dot;
}
}
......@@ -22,6 +22,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
/// \brief Returns embeddings for given indices
class NGRAPH_API EmbeddingLookup : public Op
......@@ -38,7 +40,8 @@ namespace ngraph
///
/// \param data The input indices for tokens to be translated into embeddings
/// \param weights is a dense matrix [N,M] where each row 0..N
/// corresponds to an embedding (i.e. typically, a vector of real numbers) of length M
/// corresponds to an embedding (i.e. typically, a vector of real numbers) of length
/// M
EmbeddingLookup(const Output<Node>& data, const Output<Node>& weights)
: Op({data, weights})
{
......@@ -57,4 +60,6 @@ namespace ngraph
copy_with_new_args(const NodeVector& new_args) const override;
};
}
using v0::EmbeddingLookup;
}
}
......@@ -21,6 +21,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
class NGRAPH_API Erf : public util::UnaryElementwiseArithmetic
{
......@@ -34,4 +36,6 @@ namespace ngraph
copy_with_new_args(const NodeVector& new_args) const override;
};
}
using v0::Erf;
}
}
......@@ -21,6 +21,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
/// \brief Elementwise natural exponential (exp) operation.
class NGRAPH_API Exp : public util::UnaryElementwiseArithmetic
......@@ -42,4 +44,6 @@ namespace ngraph
const NodeVector& deltas) override;
};
}
using v0::Exp;
}
}
......@@ -21,6 +21,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
class NGRAPH_API CTCGreedyDecoder : public Op
{
......@@ -47,4 +49,6 @@ namespace ngraph
bool m_ctc_merge_repeated;
};
}
using v0::CTCGreedyDecoder;
}
}
......@@ -42,6 +42,8 @@ namespace ngraph
float objectness_score = 0;
} DetectionOutputAttrs;
namespace v0
{
/// \brief Layer which performs non-max suppression to
/// generate detection output using location and confidence predictions
class NGRAPH_API DetectionOutput : public Op
......@@ -75,4 +77,6 @@ namespace ngraph
DetectionOutputAttrs m_attrs;
};
}
using v0::DetectionOutput;
}
}
......@@ -32,6 +32,8 @@ namespace ngraph
std::vector<size_t> pads_end;
} InterpolateAttrs;
namespace v0
{
/// \brief Layer which performs bilinear interpolation
class NGRAPH_API Interpolate : public Op
{
......@@ -58,4 +60,6 @@ namespace ngraph
InterpolateAttrs m_attrs;
};
}
using v0::Interpolate;
}
}
......@@ -44,6 +44,8 @@ namespace ngraph
bool scale_all_sizes = false;
};
namespace v0
{
/// \brief Layer which generates prior boxes of specified sizes
/// normalized to input image size
class NGRAPH_API PriorBox : public Op
......@@ -71,4 +73,6 @@ namespace ngraph
PriorBoxAttrs m_attrs;
};
}
using v0::PriorBox;
}
}
......@@ -40,6 +40,8 @@ namespace ngraph
std::vector<float> variances;
};
namespace v0
{
/// \brief Layer which generates prior boxes of specified sizes
/// normalized to input image size
class NGRAPH_API PriorBoxClustered : public Op
......@@ -67,4 +69,6 @@ namespace ngraph
PriorBoxClusteredAttrs m_attrs;
};
}
using v0::PriorBoxClustered;
}
}
......@@ -54,6 +54,8 @@ namespace ngraph
std::string framework;
};
namespace v0
{
class NGRAPH_API Proposal : public Op
{
public:
......@@ -81,4 +83,6 @@ namespace ngraph
ProposalAttrs m_attrs;
};
}
using v0::Proposal;
}
}
......@@ -21,6 +21,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
class NGRAPH_API PSROIPooling : public Op
{
......@@ -35,8 +37,10 @@ namespace ngraph
/// \param output_dim Output channel number
/// \param group_size Number of groups to encode position-sensitive scores
/// \param spatial_scale Ratio of input feature map over input image size
/// \param spatial_bins_x Numbers of bins to divide the input feature maps over width
/// \param spatial_bins_y Numbers of bins to divide the input feature maps over height
/// \param spatial_bins_x Numbers of bins to divide the input feature maps over
/// width
/// \param spatial_bins_y Numbers of bins to divide the input feature maps over
/// height
/// \param mode Mode of pooling - Avg or Bilinear
PSROIPooling(const Output<Node>& input,
const Output<Node>& coords,
......@@ -67,4 +71,6 @@ namespace ngraph
std::string m_mode;
};
}
using v0::PSROIPooling;
}
}
......@@ -21,6 +21,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
class NGRAPH_API RegionYolo : public Op
{
......@@ -39,7 +41,8 @@ namespace ngraph
/// \param[in] mask Mask
/// \param[in] axis Axis to begin softmax on
/// \param[in] end_axis Axis to end softmax on
/// \param[in] anchors A flattened list of pairs `[width, height]` that describes
/// \param[in] anchors A flattened list of pairs `[width, height]` that
/// describes
/// prior box sizes.
///
RegionYolo(const Output<Node>& input,
......@@ -76,4 +79,6 @@ namespace ngraph
int m_end_axis;
};
}
using v0::RegionYolo;
}
}
......@@ -21,6 +21,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
class NGRAPH_API ReorgYolo : public Op
{
......@@ -44,4 +46,6 @@ namespace ngraph
Strides m_strides;
};
}
using v0::ReorgYolo;
}
}
......@@ -21,6 +21,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
class NGRAPH_API ROIPooling : public Op
{
......@@ -55,4 +57,6 @@ namespace ngraph
std::string m_method;
};
}
using v0::ROIPooling;
}
}
......@@ -22,6 +22,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
/// \brief Range operation, analogous to `range()` in Python.
class NGRAPH_API Range : public Op
......@@ -40,7 +42,9 @@ namespace ngraph
/// element type, and same element type as `start` and `step`.
/// \param step The tensor producing the step value. Must be a scalar of integer
/// element type, and same element type as `start` and `stop`.
Range(const Output<Node>& start, const Output<Node>& stop, const Output<Node>& step);
Range(const Output<Node>& start,
const Output<Node>& stop,
const Output<Node>& step);
void validate_and_infer_types() override;
......@@ -48,4 +52,6 @@ namespace ngraph
copy_with_new_args(const NodeVector& new_args) const override;
};
}
using v0::Range;
}
}
......@@ -21,6 +21,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
/// \brief Operation that returns the shape of its input argument as a tensor.
class NGRAPH_API ShapeOf : public Op
......@@ -38,4 +40,6 @@ namespace ngraph
void validate_and_infer_types() override;
};
}
using v0::ShapeOf;
}
}
......@@ -21,6 +21,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
/// \brief Dynamic Tiling operation which repeats a tensor multiple times
/// along each dimension
......@@ -46,4 +48,6 @@ namespace ngraph
const NodeVector& deltas) override;
};
}
using v0::Tile;
}
}
......@@ -23,6 +23,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
/// \brief Tensor transpose operation.
class NGRAPH_API Transpose : public Op
......@@ -50,4 +52,6 @@ namespace ngraph
const NodeVector& deltas) override;
};
}
using v0::Transpose;
}
}
......@@ -21,6 +21,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
/// \brief Elementwise floor operation.
class NGRAPH_API Floor : public util::UnaryElementwiseArithmetic
......@@ -39,4 +41,6 @@ namespace ngraph
copy_with_new_args(const NodeVector& new_args) const override;
};
}
using v0::Floor;
}
}
......@@ -33,10 +33,9 @@ namespace ngraph
/// For example, for `a` with shape `(batch_size, n, k)`, and `b` with
/// shape `(batch_size, k, m)`, the result of BatchMatMul will have shape
/// `(batch_size, n, m)`, and `BatchMatMulTranspose(a, b)[i] = Dot(a[i], b[i])`.
class BatchMatMulTranspose : public ngraph::op::util::FusedOp
class NGRAPH_API BatchMatMulTranspose : public ngraph::op::util::FusedOp
{
public:
NGRAPH_API
static constexpr NodeTypeInfo type_info{"BatchMatMulTranspose", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
BatchMatMulTranspose() = default;
......
......@@ -23,6 +23,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
/// \brief Performs a clipping operation on all elements of the input node
///
......@@ -56,4 +58,6 @@ namespace ngraph
double m_max;
};
}
using v0::Clamp;
}
}
......@@ -23,6 +23,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
/// \brief Convolution + bias forward prop for batched convolution operation.
class NGRAPH_API ConvolutionBias : public ngraph::op::util::FusedOp
......@@ -49,8 +51,14 @@ namespace ngraph
const Output<Node>& filters,
const Output<Node>& bias);
const Strides& get_window_movement_strides() const { return m_window_movement_strides; }
const Strides& get_window_dilation_strides() const { return m_window_dilation_strides; }
const Strides& get_window_movement_strides() const
{
return m_window_movement_strides;
}
const Strides& get_window_dilation_strides() const
{
return m_window_dilation_strides;
}
const CoordinateDiff& get_padding_below() const { return m_padding_below; }
const CoordinateDiff& get_padding_above() const { return m_padding_above; }
const Strides& get_data_dilation_strides() const { return m_data_dilation_strides; }
......@@ -178,7 +186,7 @@ namespace ngraph
static constexpr NodeTypeInfo type_info{"ConvolutionBiasAdd", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
ConvolutionBiasAdd() = default;
ConvolutionBiasAdd(const std::shared_ptr<op::ConvolutionBias>& conv,
ConvolutionBiasAdd(const std::shared_ptr<op::v0::ConvolutionBias>& conv,
const Output<Node>& sum_input,
bool with_relu = false);
......@@ -193,8 +201,14 @@ namespace ngraph
const Strides& data_dilation_strides,
bool with_relu = false);
const Strides& get_window_movement_strides() const { return m_window_movement_strides; }
const Strides& get_window_dilation_strides() const { return m_window_dilation_strides; }
const Strides& get_window_movement_strides() const
{
return m_window_movement_strides;
}
const Strides& get_window_dilation_strides() const
{
return m_window_dilation_strides;
}
const CoordinateDiff& get_padding_below() const { return m_padding_below; }
const CoordinateDiff& get_padding_above() const { return m_padding_above; }
const Strides& get_data_dilation_strides() const { return m_data_dilation_strides; }
......@@ -217,4 +231,8 @@ namespace ngraph
bool m_with_relu;
};
}
using v0::ConvolutionBias;
using v0::ConvolutionBiasBackpropFiltersBias;
using v0::ConvolutionBiasAdd;
}
}
......@@ -24,10 +24,9 @@ namespace ngraph
{
namespace op
{
class CrossEntropy : public ngraph::op::util::FusedOp
class NGRAPH_API CrossEntropy : public ngraph::op::util::FusedOp
{
public:
NGRAPH_API
static constexpr NodeTypeInfo type_info{"CrossEntropy", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
CrossEntropy() = default;
......@@ -57,10 +56,9 @@ namespace ngraph
int64_t m_ignore_index;
};
class CrossEntropyBackprop : public util::FusedOp
class NGRAPH_API CrossEntropyBackprop : public util::FusedOp
{
public:
NGRAPH_API
static constexpr NodeTypeInfo type_info{"CrossEntropyBackprop", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
CrossEntropyBackprop() = default;
......
......@@ -23,6 +23,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
/// \brief DepthToSpace permutes data from the depth dimension of the input blob into
/// spatial dimensions.
......@@ -49,7 +51,8 @@ namespace ngraph
/// \brief Constructs a DepthToSpace operation.
///
/// \param data Node producing the input tensor
/// \param mode Specifies how the input depth dimension is split to block coordinates
/// \param mode Specifies how the input depth dimension is split to block
/// coordinates
/// \param block_size The size of the block of values to be moved
DepthToSpace(const Output<Node>& data,
const DepthToSpaceMode& mode,
......@@ -72,4 +75,6 @@ namespace ngraph
DepthToSpaceMode mode_from_string(const std::string& mode) const;
};
}
using v0::DepthToSpace;
}
}
......@@ -23,6 +23,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
/// \brief Exponential Linear Unit
/// x < 0 => f(x) = alpha * (exp(x) - 1.)
......@@ -49,5 +51,7 @@ namespace ngraph
private:
double m_alpha;
};
}
using v0::Elu;
} // namespace op
} // namespace ngraph
......@@ -24,6 +24,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
///
/// \brief Class performing element-wise linear quantization.
......@@ -84,4 +86,6 @@ namespace ngraph
AutoBroadcastSpec m_auto_broadcast;
};
}
using v0::FakeQuantize;
}
}
......@@ -23,6 +23,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
/// \brief Gaussian Error Linear Unit
/// f(x) = 0.5 * x * (1 + erf( x / sqrt(2) )
......@@ -67,4 +69,7 @@ namespace ngraph
copy_with_new_args(const NodeVector& new_args) const override;
};
}
using v0::Gelu;
using v0::GeluBackpropFactor;
}
}
......@@ -23,6 +23,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
/// \brief Operator performing General Matrix multiplication.
///
......@@ -73,4 +75,6 @@ namespace ngraph
bool m_transB;
};
}
using v0::Gemm;
}
}
......@@ -24,6 +24,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
/// \brief Global Response Normalization with L2 norm (across channels only).
///
......@@ -51,4 +53,6 @@ namespace ngraph
float m_bias = 1.0f;
};
}
using v0::GRN;
}
}
......@@ -24,6 +24,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
/// \brief Group Convolution
class NGRAPH_API GroupConvolution : public ngraph::op::util::FusedOp
......@@ -52,8 +54,14 @@ namespace ngraph
const Strides& data_dilation_strides,
const PadType& pad_type = PadType::EXPLICIT);
Shape get_weights_dimensions() const;
const Strides& get_window_movement_strides() const { return m_window_movement_strides; }
const Strides& get_window_dilation_strides() const { return m_window_dilation_strides; }
const Strides& get_window_movement_strides() const
{
return m_window_movement_strides;
}
const Strides& get_window_dilation_strides() const
{
return m_window_dilation_strides;
}
const CoordinateDiff& get_padding_below() const { return m_padding_below; }
const CoordinateDiff& get_padding_above() const { return m_padding_above; }
const Strides& get_data_dilation_strides() const { return m_data_dilation_strides; }
......@@ -85,4 +93,6 @@ namespace ngraph
bool has_groups_in_filters_shape() const;
};
}
using v0::GroupConvolution;
}
}
......@@ -30,6 +30,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
/// \brief Group Transpose Convolution (Deconvolution)
class NGRAPH_API GroupConvolutionTranspose : public util::FusedOp
......@@ -45,9 +47,11 @@ namespace ngraph
/// \param[in] filters The node producing filters data.
/// \param[in] strides The strides along each feature axis.
/// \param[in] dilations The dilations along each feature axis.
/// \param[in] padding_begin The padding added at the beggining of each feature axis.
/// \param[in] padding_begin The padding added at the beggining of each feature
/// axis.
/// \param[in] padding_end The padding added at the end of each feature axis.
/// \param[in] output_padding The zero-padding (adjustment) added to one side of the
/// \param[in] output_padding The zero-padding (adjustment) added to one side of
/// the
/// output.
/// \param[in] groups The number of groups the input channels and output
/// channels are divided into.
......@@ -71,7 +75,8 @@ namespace ngraph
///
/// \param[in] data The node producing input data.
/// \param[in] filters The node producing filters data.
/// \param[in] groups The number of groups the input channels and output channels
/// \param[in] groups The number of groups the input channels and output
/// channels
/// are divided into.
///
GroupConvolutionTranspose(const Output<Node>& data,
......@@ -85,7 +90,8 @@ namespace ngraph
/// \param[in] filters The node producing filters data.
/// \param[in] strides The strides along each feature axis.
/// \param[in] dilations The dilations along each feature axis.
/// \param[in] output_padding The zero-padding (adjustment) added to one side of the
/// \param[in] output_padding The zero-padding (adjustment) added to one side of
/// the
/// output.
/// \param[in] output_shape The output shape. When provided padding values are
/// automatically inferred.
......@@ -152,4 +158,6 @@ namespace ngraph
Shape m_output_shape;
};
}
using v0::GroupConvolutionTranspose;
}
}
......@@ -29,6 +29,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
///
/// \brief Class for GRU cell node.
......@@ -53,7 +55,8 @@ namespace ngraph
/// [gates_count * hidden_size, input_size].
/// \param[in] R The recurrence weight tensor with shape:
/// [gates_count * hidden_size, hidden_size].
/// \param[in] initial_hidden_state The hidden state tensor at current time step with
/// \param[in] initial_hidden_state The hidden state tensor at current time step
/// with
/// shape: [batch_size, hidden_size].
/// \param[in] hidden_size The number of hidden units for recurrent cell.
///
......@@ -72,16 +75,20 @@ namespace ngraph
/// [gates_count * hidden_size, input_size].
/// \param[in] R The recurrence weight tensor with shape:
/// [gates_count * hidden_size, hidden_size].
/// \param[in] initial_hidden_state The hidden state tensor at current time step with
/// \param[in] initial_hidden_state The hidden state tensor at current time step
/// with
/// shape: [batch_size, hidden_size].
/// \param[in] hidden_size The number of hidden units for recurrent cell.
/// \param[in] activations The vector of activation functions used inside
/// recurrent cell.
/// \param[in] activations_alpha The vector of alpha parameters for activation
/// functions in order respective to activation list.
/// functions in order respective to activation
/// list.
/// \param[in] activations_beta The vector of beta parameters for activation
/// functions in order respective to activation list.
/// \param[in] clip The value defining clipping range [-clip, clip] on
/// functions in order respective to activation
/// list.
/// \param[in] clip The value defining clipping range [-clip,
/// clip] on
/// input of activation functions.
///
GRUCell(const Output<Node>& X,
......@@ -104,7 +111,8 @@ namespace ngraph
/// hidden_size, input_size].
/// \param[in] R The recurrence weight tensor with shape:
/// [gates_count * hidden_size, hidden_size].
/// \param[in] initial_hidden_state The hidden state tensor at current time step with
/// \param[in] initial_hidden_state The hidden state tensor at current time step
/// with
/// shape: [batch_size, hidden_size].
/// \param[in] hidden_size The number of hidden units for recurrent cell.
/// \param[in] B The bias tensor for input gate with shape:
......@@ -112,12 +120,16 @@ namespace ngraph
/// \param[in] activations The vector of activation functions used inside
/// recurrent cell.
/// \param[in] activations_alpha The vector of alpha parameters for activation
/// functions in order respective to activation list.
/// functions in order respective to activation
/// list.
/// \param[in] activations_beta The vector of beta parameters for activation
/// functions in order respective to activation list.
/// \param[in] clip The value defining clipping range [-clip, clip] on
/// functions in order respective to activation
/// list.
/// \param[in] clip The value defining clipping range [-clip,
/// clip] on
/// input of activation functions.
/// \param[in] linear_before_reset Whether or not to apply the linear transformation
/// \param[in] linear_before_reset Whether or not to apply the linear
/// transformation
/// before multiplying by the output of the reset
/// gate.
///
......@@ -157,10 +169,13 @@ namespace ngraph
///
/// \brief Control whether or not apply the linear transformation.
///
/// \note The linear transformation may be applied when computing the output of hidden
/// \note The linear transformation may be applied when computing the output of
/// hidden
/// gate. It's done before multiplying by the output of the reset gate.
///
bool m_linear_before_reset;
};
}
using v0::GRUCell;
}
}
......@@ -23,6 +23,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
/// \brief Parameterized, bounded sigmoid-like, piecewise linear
/// function. min(max(alpha*x + beta, 0), 1)
......@@ -50,4 +52,6 @@ namespace ngraph
copy_with_new_args(const NodeVector& new_args) const override;
};
}
using v0::HardSigmoid;
}
}
......@@ -23,6 +23,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
/// \brief Layer Normalization
///
......@@ -135,4 +137,7 @@ namespace ngraph
double m_epsilon{1e-5};
};
}
using v0::LayerNorm;
using v0::LayerNormBackprop;
}
}
......@@ -23,6 +23,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
/// \brief LogSoftmax operation
class NGRAPH_API LogSoftmax : public ngraph::op::util::FusedOp
......@@ -46,5 +48,7 @@ namespace ngraph
protected:
int64_t m_axis;
};
}
using v0::LogSoftmax;
} // namespace op
} // namespace ngraph
......@@ -40,6 +40,8 @@ namespace ngraph
IOFC, // ONNX
};
namespace v0
{
///
/// \brief Class for single lstm cell node.
///
......@@ -61,7 +63,8 @@ namespace ngraph
/// (.) - is a Hadamard product (element-wise),
/// f, g, h - are activation functions.
///
/// \note This class represents only single *cell* (for current time step) and not the
/// \note This class represents only single *cell* (for current time step) and not
/// the
/// whole LSTM Sequence layer
///
/// \sa LSTMSequence, RNNCell, GRUCell
......@@ -77,9 +80,11 @@ namespace ngraph
///
/// \param[in] X The input tensor with shape: [batch_size,
/// input_size].
/// \param[in] initial_hidden_state The hidden state tensor at current time step with
/// \param[in] initial_hidden_state The hidden state tensor at current time step
/// with
/// shape: [batch_size, hidden_size].
/// \param[in] initial_cell_state The cell state tensor at current time step with
/// \param[in] initial_cell_state The cell state tensor at current time step
/// with
/// shape: [batch_size, hidden_size].
/// \param[in] W The gate weights tensor with shape:
/// [4*hidden_size,
......@@ -87,15 +92,19 @@ namespace ngraph
/// \param[in] R The recurrence weights tensor with shape:
/// [4*hidden_size, hidden_size].
/// \param[in] hidden_size The number of hidden units for recurrent cell.
/// \param[in] weights_format The order of gates in weights tensors. The default
/// \param[in] weights_format The order of gates in weights tensors. The
/// default
/// format is IFCO since it is used by DNNL.
/// \param[in] activations The vector of activation functions used inside
/// recurrent cell.
/// \param[in] activations_alpha The vector of alpha parameters for activation
/// functions in order respective to activation list.
/// functions in order respective to activation
/// list.
/// \param[in] activations_beta The vector of beta parameters for activation
/// functions in order respective to activation list.
/// \param[in] clip The value defining clipping range [-clip, clip] on
/// functions in order respective to activation
/// list.
/// \param[in] clip The value defining clipping range [-clip,
/// clip] on
/// input of activation functions.
/// \param[in] input_forget Controls coupling input and forget gates.
///
......@@ -118,9 +127,11 @@ namespace ngraph
///
/// \param[in] X The input tensor with shape: [batch_size,
/// input_size].
/// \param[in] initial_hidden_state The hidden state tensor at current time step with
/// \param[in] initial_hidden_state The hidden state tensor at current time step
/// with
/// shape: [batch_size, hidden_size].
/// \param[in] initial_cell_state The cell state tensor at current time step with
/// \param[in] initial_cell_state The cell state tensor at current time step
/// with
/// shape: [batch_size, hidden_size].
/// \param[in] W The weight tensor with shape: [4*hidden_size,
/// input_size].
......@@ -129,15 +140,19 @@ namespace ngraph
/// \param[in] B The bias tensor for gates with shape:
/// [4*hidden_size].
/// \param[in] hidden_size The number of hidden units for recurrent cell.
/// \param[in] weights_format The order of gates in weights tensors. The default
/// \param[in] weights_format The order of gates in weights tensors. The
/// default
/// format is IFCO since it is used by DNNL.
/// \param[in] activations The vector of activation functions used inside
/// recurrent cell.
/// \param[in] activations_alpha The vector of alpha parameters for activation
/// functions in order respective to activation list.
/// functions in order respective to activation
/// list.
/// \param[in] activations_beta The vector of beta parameters for activation
/// functions in order respective to activation list.
/// \param[in] clip The value defining clipping range [-clip, clip] on
/// functions in order respective to activation
/// list.
/// \param[in] clip The value defining clipping range [-clip,
/// clip] on
/// input of activation functions.
/// \param[in] input_forget Controls coupling input and forget gates.
///
......@@ -161,9 +176,11 @@ namespace ngraph
///
/// \param[in] X The input tensor with shape: [batch_size,
/// input_size].
/// \param[in] initial_hidden_state The hidden state tensor at current time step with
/// \param[in] initial_hidden_state The hidden state tensor at current time step
/// with
/// shape: [batch_size, hidden_size].
/// \param[in] initial_cell_state The cell state tensor at current time step with
/// \param[in] initial_cell_state The cell state tensor at current time step
/// with
/// shape: [batch_size, hidden_size].
/// \param[in] W The weight tensor with shape: [4*hidden_size,
/// input_size].
......@@ -175,15 +192,19 @@ namespace ngraph
/// [3*hidden_size] - 3 equals to only iof gates.
/// The order is: input, output, forget gates.
/// \param[in] hidden_size The number of hidden units for recurrent cell.
/// \param[in] weights_format The order of gates in weights tensors. The default
/// \param[in] weights_format The order of gates in weights tensors. The
/// default
/// format is IFCO since it is used by DNNL.
/// \param[in] activations The vector of activation functions used inside
/// recurrent cell.
/// \param[in] activations_alpha The vector of alpha parameters for activation
/// functions in order respective to activation list.
/// functions in order respective to activation
/// list.
/// \param[in] activations_beta The vector of beta parameters for activation
/// functions in order respective to activation list.
/// \param[in] clip The value defining clipping range [-clip, clip] on
/// functions in order respective to activation
/// list.
/// \param[in] clip The value defining clipping range [-clip,
/// clip] on
/// input of activation functions.
/// \param[in] input_forget Controls coupling input and forget gates.
///
......@@ -260,5 +281,7 @@ namespace ngraph
static constexpr std::size_t s_gates_count{4};
static constexpr std::size_t s_peepholes_count{3};
};
}
using v0::LSTMCell;
} // namespace op
} // namespace ngraph
......@@ -30,6 +30,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
///
/// \brief Class for lstm sequence node.
......@@ -72,8 +74,14 @@ namespace ngraph
"tanh"},
const float clip_threshold = 0,
const bool input_forget = false)
: FusedOp(
{X, initial_hidden_state, initial_cell_state, sequence_lengths, W, R, B, P})
: FusedOp({X,
initial_hidden_state,
initial_cell_state,
sequence_lengths,
W,
R,
B,
P})
, m_activations_alpha(activations_alpha)
, m_activations_beta(activations_beta)
, m_activations(activations)
......@@ -103,7 +111,8 @@ namespace ngraph
"tanh"},
const float clip_threshold = 0,
const bool input_forget = false)
: LSTMSequence(X,
: LSTMSequence(
X,
initial_hidden_state,
initial_cell_state,
sequence_lengths,
......@@ -173,5 +182,7 @@ namespace ngraph
bool m_input_forget;
LSTMWeightsFormat m_weights_format;
};
}
using v0::LSTMSequence;
} // namespace op
} // namespace ngraph
......@@ -23,6 +23,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
/// \brief Operator performing Matrix Multiplication.
class NGRAPH_API MatMul : public ngraph::op::util::FusedOp
......@@ -55,5 +57,7 @@ namespace ngraph
bool m_transpose_a;
bool m_transpose_b;
};
}
using v0::MatMul;
} // namespace op
} // namespace ngraph
......@@ -25,15 +25,17 @@
using namespace std;
using namespace ngraph;
constexpr NodeTypeInfo op::Mod::type_info;
constexpr NodeTypeInfo op::v1::Mod::type_info;
op::Mod::Mod(const Output<Node>& A, const Output<Node>& B, const AutoBroadcastSpec& auto_broadcast)
op::v1::Mod::Mod(const Output<Node>& A,
const Output<Node>& B,
const AutoBroadcastSpec& auto_broadcast)
: FusedOp({A, B})
, m_auto_broadcast(auto_broadcast)
{
}
NodeVector op::Mod::decompose_op() const
NodeVector op::v1::Mod::decompose_op() const
{
const auto dividend = make_shared<op::Abs>(input_value(0));
const auto dividend_sign = make_shared<op::Sign>(input_value(0));
......@@ -53,7 +55,7 @@ NodeVector op::Mod::decompose_op() const
return {make_shared<op::v1::Multiply>(dividend_sign, mod, m_auto_broadcast)};
}
shared_ptr<Node> op::Mod::copy_with_new_args(const NodeVector& new_args) const
shared_ptr<Node> op::v1::Mod::copy_with_new_args(const NodeVector& new_args) const
{
return make_shared<Mod>(new_args.at(0), new_args.at(1), m_auto_broadcast);
}
......@@ -23,15 +23,17 @@
namespace ngraph
{
namespace op
{
namespace v1
{
/// \brief Mod returns an element-wise division reminder with two given tensors applying
/// multi-directional broadcast rules.
class Mod : public ngraph::op::util::FusedOp
class NGRAPH_API Mod : public ngraph::op::util::FusedOp
{
public:
NGRAPH_API
static constexpr NodeTypeInfo type_info{"Mod", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
Mod() = default;
/// \brief Constructs a Mod node.
///
/// \param A - Dividend tensor
......@@ -51,4 +53,10 @@ namespace ngraph
AutoBroadcastSpec m_auto_broadcast;
};
}
namespace v0
{
using v1::Mod;
}
}
}
......@@ -23,6 +23,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
/// \brief Operator performing Mean Variance Normalization
///
......@@ -37,7 +39,8 @@ namespace ngraph
/// \param data Input tensor with data
/// \param normalize_variance flag that denotes whether to perform variance
/// normalization.
/// \param across_channels flag that denotes if mean values are shared across channels.
/// \param across_channels flag that denotes if mean values are shared across
/// channels.
/// \param eps the number to be added to the variance to avoid division by zero when
/// normalizing the value
///
......@@ -77,5 +80,7 @@ namespace ngraph
bool m_normalize_variance;
AxisSet m_reduction_axes;
};
}
using v0::MVN;
} // namespace op
} // namespace ngraph
......@@ -25,6 +25,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
/// \brief Normalization input tensor with L2 norm.
///
......@@ -41,7 +43,8 @@ namespace ngraph
/// \param axes - Node indicating axes along which reduction is
/// calculated
/// \param eps - The epsilon added to L2 norm.
/// \param eps_mode - Specifies how eps is combined with L2 value calculated
/// \param eps_mode - Specifies how eps is combined with L2 value
/// calculated
/// before division
///
NormalizeL2(const Output<Node>& data,
......@@ -63,4 +66,6 @@ namespace ngraph
EpsMode m_eps_mode;
};
}
using v0::NormalizeL2;
}
}
......@@ -23,6 +23,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
/// \brief pdpd slice op
///
......@@ -104,4 +106,7 @@ namespace ngraph
std::vector<int64_t> m_upper_bounds;
};
}
using v0::PartialSlice;
using v0::PartialSliceBackprop;
}
}
......@@ -23,6 +23,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
/// \brief Parametrized Relu
/// x < 0 => f(x) = x * slope
......@@ -46,4 +48,6 @@ namespace ngraph
copy_with_new_args(const NodeVector& new_args) const override;
};
}
using v0::PRelu;
}
}
......@@ -29,6 +29,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
///
/// \brief Class for single RNN cell node.
......@@ -43,7 +45,8 @@ namespace ngraph
/// * - Is a dot product,
/// f - is activation functions.
///
/// \note This class represents only single *cell* (for current time step) and not the
/// \note This class represents only single *cell* (for current time step) and not
/// the
/// whole LSTM Sequence layer
///
/// \sa LSTMSequence, LSTMCell, GRUCell
......@@ -59,7 +62,8 @@ namespace ngraph
///
/// \param[in] X The input tensor with shape: [batch_size,
/// input_size].
/// \param[in] initial_hidden_state The hidden state tensor at current time step with
/// \param[in] initial_hidden_state The hidden state tensor at current time step
/// with
/// shape: [batch_size, hidden_size].
/// \param[in] W The weight tensor with shape: [hidden_size,
/// input_size].
......@@ -69,13 +73,17 @@ namespace ngraph
/// \param[in] activations The vector of activation functions used inside
/// recurrent cell.
/// \param[in] activations_alpha The vector of alpha parameters for activation
/// functions in order respective to activation list.
/// functions in order respective to activation
/// list.
/// \param[in] activations_beta The vector of beta parameters for activation
/// functions in order respective to activation list.
/// \param[in] clip The value defining clipping range [-clip, clip] on
/// functions in order respective to activation
/// list.
/// \param[in] clip The value defining clipping range [-clip,
/// clip] on
/// input of activation functions.
///
RNNCell(const Output<Node>& X,
RNNCell(
const Output<Node>& X,
const Output<Node>& initial_hidden_state,
const Output<Node>& W,
const Output<Node>& R,
......@@ -90,7 +98,8 @@ namespace ngraph
///
/// \param[in] X The input tensor with shape: [batch_size,
/// input_size].
/// \param[in] initial_hidden_state The hidden state tensor at current time step with
/// \param[in] initial_hidden_state The hidden state tensor at current time step
/// with
/// shape: [batch_size, hidden_size].
/// \param[in] W The weight tensor with shape: [hidden_size,
/// input_size].
......@@ -102,13 +111,17 @@ namespace ngraph
/// \param[in] activations The vector of activation functions used inside
/// recurrent cell.
/// \param[in] activations_alpha The vector of alpha parameters for activation
/// functions in order respective to activation list.
/// functions in order respective to activation
/// list.
/// \param[in] activations_beta The vector of beta parameters for activation
/// functions in order respective to activation list.
/// \param[in] clip The value defining clipping range [-clip, clip] on
/// functions in order respective to activation
/// list.
/// \param[in] clip The value defining clipping range [-clip,
/// clip] on
/// input of activation functions.
///
RNNCell(const Output<Node>& X,
RNNCell(
const Output<Node>& X,
const Output<Node>& initial_hidden_state,
const Output<Node>& W,
const Output<Node>& R,
......@@ -139,5 +152,7 @@ namespace ngraph
static constexpr std::size_t s_gates_count{1};
};
}
using v0::RNNCell;
} // namespace op
} // namespace ngraph
......@@ -23,6 +23,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
/// \brief Operator performing Scale Shift transformation.
///
......@@ -49,4 +51,6 @@ namespace ngraph
copy_with_new_args(const NodeVector& new_args) const override;
};
}
using v0::ScaleShift;
}
}
......@@ -26,15 +26,15 @@
using namespace std;
using namespace ngraph;
constexpr NodeTypeInfo op::v1::Selu::type_info;
constexpr NodeTypeInfo op::v0::Selu::type_info;
op::v1::Selu::Selu(const Output<Node>& data, const Output<Node>& alpha, const Output<Node>& lambda)
op::v0::Selu::Selu(const Output<Node>& data, const Output<Node>& alpha, const Output<Node>& lambda)
: FusedOp({data, alpha, lambda})
{
constructor_validate_and_infer_types();
}
NodeVector op::v1::Selu::decompose_op() const
NodeVector op::v0::Selu::decompose_op() const
{
const auto data = input_value(0);
const auto alpha = input_value(1);
......@@ -47,8 +47,8 @@ NodeVector op::v1::Selu::decompose_op() const
alpha)};
}
shared_ptr<Node> op::v1::Selu::copy_with_new_args(const NodeVector& new_args) const
shared_ptr<Node> op::v0::Selu::copy_with_new_args(const NodeVector& new_args) const
{
check_new_args_count(this, new_args);
return make_shared<v1::Selu>(new_args.at(0), new_args.at(1), new_args.at(2));
return make_shared<v0::Selu>(new_args.at(0), new_args.at(1), new_args.at(2));
}
......@@ -24,13 +24,13 @@ namespace ngraph
{
namespace op
{
namespace v1
namespace v0
{
/// \brief Performs a SELU activation function on all elements of the input node
class NGRAPH_API Selu : public ngraph::op::util::FusedOp
{
public:
static constexpr NodeTypeInfo type_info{"Selu", 1};
static constexpr NodeTypeInfo type_info{"Selu", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
Selu() = default;
/// \brief Constructs a Selu node.
......@@ -48,6 +48,6 @@ namespace ngraph
copy_with_new_args(const NodeVector& new_args) const override;
};
}
using v1::Selu;
using v0::Selu;
} // namespace op
} // namespace ngraph
......@@ -24,6 +24,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
/// \brief Permutes data in the channel dimension of the input
class NGRAPH_API ShuffleChannels : public ngraph::op::util::FusedOp
......@@ -36,9 +38,11 @@ namespace ngraph
///
/// \param data - Node producing the input tensor
/// \param axis - channel dimension index in the data tensor. A negative value means
/// that the index should be calculated from the back of the input data
/// that the index should be calculated from the back of the input
/// data
/// shape.
/// \param groups - number of groups the channel dimension specified by axis should be
/// \param groups - number of groups the channel dimension specified by axis should
/// be
/// split into
ShuffleChannels(const Output<Node>& data,
const int axis = 1,
......@@ -66,4 +70,6 @@ namespace ngraph
size_t m_groups;
};
}
using v0::ShuffleChannels;
}
}
......@@ -23,6 +23,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
class NGRAPH_API SoftmaxCrossEntropy : public ngraph::op::util::FusedOp
{
......@@ -33,9 +35,11 @@ namespace ngraph
/// \brief Softamax + CrossEntropy for numerical stabilization
/// \param arg1 Node that produces the tensor to normalize
/// \param arg2 Node that produces ground truth lables for the input
/// \param soft_label flag indicating whether to interpretate the given labels as soft
/// \param soft_label flag indicating whether to interpretate the given labels as
/// soft
/// labels
/// \param ignore_index Specifies a target value that is ignored and does not contribute
/// \param ignore_index Specifies a target value that is ignored and does not
/// contribute
/// to the input gradient Only valid if soft_label is set to False
SoftmaxCrossEntropy(const Output<Node>& arg1,
const Output<Node>& arg2,
......@@ -65,9 +69,11 @@ namespace ngraph
/// \param delta Node that produces the delta during bprop
/// \param softmax Node that produces softmax from fprop
/// \param labels Node that produces ground truth labels for input
/// \param soft_label flag indicating whether to interpretate the given labels as soft
/// \param soft_label flag indicating whether to interpretate the given labels as
/// soft
/// labels
/// \param ignore_index Specifies a target value that is ignored and does not contribute
/// \param ignore_index Specifies a target value that is ignored and does not
/// contribute
/// to the input gradient Only valid if soft_label is set to False
SoftmaxCrossEntropyBackprop(const Output<Node>& delta,
const Output<Node>& softmax,
......@@ -87,5 +93,8 @@ namespace ngraph
bool m_soft_label;
int64_t m_ignore_index;
};
}
using v0::SoftmaxCrossEntropy;
using v0::SoftmaxCrossEntropyBackprop;
} // namespace op
} // namespace ngraph
......@@ -23,7 +23,10 @@ namespace ngraph
{
namespace op
{
/// \brief SpaceToDepth permutes input tensor blocks of spatial data into depth dimension.
namespace v0
{
/// \brief SpaceToDepth permutes input tensor blocks of spatial data into depth
/// dimension.
///
/// \note Values from the height and width dimensions are moved to the depth dimension.
///
......@@ -70,4 +73,6 @@ namespace ngraph
SpaceToDepthMode mode_from_string(const std::string& mode) const;
};
}
using v0::SpaceToDepth;
}
}
......@@ -25,6 +25,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
/// \brief Splits the input tensor into a list of smaller tensors ("pieces")
class NGRAPH_API Split : public ngraph::op::util::FusedOp
......@@ -49,7 +51,8 @@ namespace ngraph
/// \param axis - indicates an axis along which the input tensor should be split.
/// Negative values mean counting from the back of the input tensor's
/// shape.
/// \param splits - a list of lengths that the input tensor should be split to. Use this
/// \param splits - a list of lengths that the input tensor should be split to. Use
/// this
/// constructor to split the input tensor to variable length chunks.
Split(const Output<Node>& data, const int axis, const std::vector<size_t>& splits);
......@@ -65,7 +68,8 @@ namespace ngraph
private:
/// \brief Adjusts the axis for negative values
///
/// \note Negative values mean that the API consumer wants to point the axis location
/// \note Negative values mean that the API consumer wants to point the axis
/// location
/// from the back of the tensor. This is similar to the way NumPy works.
///
/// \param axis - original axis value; negative values are accepted
......@@ -82,4 +86,6 @@ namespace ngraph
std::vector<size_t> m_splits;
};
}
using v0::Split;
}
}
......@@ -23,6 +23,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
/// \brief Calculates an element-wise squared difference between two tensors
///
......@@ -38,7 +40,8 @@ namespace ngraph
/// \param x1 First input tensor
/// \param x2 Second input tensor
/// \param auto_broadcast Auto broadcast specification
SquaredDifference(const Output<Node>& x1,
SquaredDifference(
const Output<Node>& x1,
const Output<Node>& x2,
const AutoBroadcastSpec& auto_broadcast = AutoBroadcastType::NUMPY);
......@@ -56,5 +59,7 @@ namespace ngraph
private:
AutoBroadcastSpec m_autobroadcast;
};
}
using v0::SquaredDifference;
} // namespace op
} // namespace ngraph
......@@ -26,6 +26,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
class NGRAPH_API Squeeze : public ngraph::op::util::FusedOp
{
......@@ -41,4 +43,6 @@ namespace ngraph
copy_with_new_args(const NodeVector& new_args) const override;
};
}
using v0::Squeeze;
}
}
......@@ -26,6 +26,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
class NGRAPH_API Unsqueeze : public ngraph::op::util::FusedOp
{
......@@ -42,4 +44,6 @@ namespace ngraph
copy_with_new_args(const NodeVector& new_args) const override;
};
}
using v0::Unsqueeze;
}
}
......@@ -21,6 +21,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
/// \brief Gather slices from params with shapes given by indices
class NGRAPH_API GatherND : public Op
......@@ -49,4 +51,6 @@ namespace ngraph
copy_with_new_args(const NodeVector& new_args) const override;
};
}
using v0::GatherND;
}
}
......@@ -24,6 +24,8 @@ namespace ngraph
{
NodeVector get_output_elements(const std::shared_ptr<Node>& mon);
namespace v0
{
/// \brief Operation to get an output from a node.
class NGRAPH_API GetOutputElement : public Op
{
......@@ -54,6 +56,8 @@ namespace ngraph
size_t m_n;
};
}
using v0::GetOutputElement;
}
inline std::shared_ptr<Node> get_output_element(const Output<Node>& output,
bool for_get_output_element = false)
......
......@@ -39,9 +39,9 @@ shared_ptr<Node> op::v0::GreaterEq::copy_with_new_args(const NodeVector& new_arg
//---------------------------------- v1 ----------------------------------------
constexpr NodeTypeInfo op::v1::GreaterEq::type_info;
constexpr NodeTypeInfo op::v1::GreaterEqual::type_info;
op::v1::GreaterEq::GreaterEq(const Output<Node>& arg0,
op::v1::GreaterEqual::GreaterEqual(const Output<Node>& arg0,
const Output<Node>& arg1,
const AutoBroadcastSpec& auto_broadcast)
: BinaryElementwiseComparison(arg0, arg1, auto_broadcast)
......@@ -49,8 +49,8 @@ op::v1::GreaterEq::GreaterEq(const Output<Node>& arg0,
constructor_validate_and_infer_types();
}
shared_ptr<Node> op::v1::GreaterEq::copy_with_new_args(const NodeVector& new_args) const
shared_ptr<Node> op::v1::GreaterEqual::copy_with_new_args(const NodeVector& new_args) const
{
check_new_args_count(this, new_args);
return make_shared<op::v1::GreaterEq>(new_args.at(0), new_args.at(1), this->get_autob());
return make_shared<op::v1::GreaterEqual>(new_args.at(0), new_args.at(1), this->get_autob());
}
......@@ -49,19 +49,19 @@ namespace ngraph
namespace v1
{
/// \brief Elementwise greater-than-or-equal operation.
class NGRAPH_API GreaterEq : public util::BinaryElementwiseComparison
class NGRAPH_API GreaterEqual : public util::BinaryElementwiseComparison
{
public:
static constexpr NodeTypeInfo type_info{"GreaterEq", 1};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs a greater-than-or-equal operation.
GreaterEq() = default;
GreaterEqual() = default;
/// \brief Constructs a greater-than-or-equal operation.
///
/// \param arg0 Node that produces the first input tensor.
/// \param arg1 Node that produces the second input tensor.
/// \param auto_broadcast Auto broadcast specification
GreaterEq(const Output<Node>& arg0,
GreaterEqual(const Output<Node>& arg0,
const Output<Node>& arg1,
const AutoBroadcastSpec& auto_broadcast =
AutoBroadcastSpec(AutoBroadcastType::NUMPY));
......@@ -70,6 +70,9 @@ namespace ngraph
copy_with_new_args(const NodeVector& new_args) const override;
size_t get_version() const override { return 1; }
};
// DO NOT USE. Will be removed once users switch to GreaterEqual
using GreaterEq = GreaterEqual;
} // namespace v1
using v0::GreaterEq;
......
......@@ -58,7 +58,6 @@ namespace ngraph
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs a less-than-or-equal operation.
LessEq() = default;
/// \brief Constructs a less-than-or-equal operation.
///
/// \param arg0 Node that produces the first input tensor.
......
......@@ -21,6 +21,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
/// \brief Elementwise natural log operation.
class NGRAPH_API Log : public util::UnaryElementwiseArithmetic
......@@ -42,4 +44,6 @@ namespace ngraph
const NodeVector& deltas) override;
};
}
using v0::Log;
}
}
......@@ -21,6 +21,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
// clang-format off
/// \brief Elementwise Local Response Normalization (LRN) operation.
......@@ -80,4 +82,6 @@ namespace ngraph
size_t m_size;
};
}
using v0::LRN;
}
}
......@@ -21,6 +21,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
/// \brief Elementwise negative operation.
class NGRAPH_API Negative : public util::UnaryElementwiseArithmetic
......@@ -43,6 +45,8 @@ namespace ngraph
const NodeVector& deltas) override;
};
}
using v0::Negative;
}
NGRAPH_API
std::shared_ptr<Node> operator-(const Output<Node>& arg0);
}
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License", 0);
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
// This collection contains one entry for each op. If an op is added it must be
// added to this list.
//
// In order to use this list you want to define a macro named exactly NGRAPH_OP
// When you are done you should undef the macro
// As an example if you wanted to make a list of all op names as strings you could do this:
#ifndef NGRAPH_OP
#warning "NGRAPH_OP not defined"
#define NGRAPH_OP(NAME, NAMESPACE, VERSION)
#endif
NGRAPH_OP(Abs, ngraph::op::v0, 0)
NGRAPH_OP(Acos, ngraph::op::v0, 0)
NGRAPH_OP(Add, ngraph::op::v0, 0)
NGRAPH_OP(Add, ngraph::op::v1, 1)
NGRAPH_OP(All, ngraph::op::v0, 0)
NGRAPH_OP(AllReduce, ngraph::op::v0, 0)
NGRAPH_OP(And, ngraph::op::v0, 0)
NGRAPH_OP(Any, ngraph::op, 0)
NGRAPH_OP(ArgMax, ngraph::op::v0, 0)
NGRAPH_OP(ArgMin, ngraph::op::v0, 0)
NGRAPH_OP(Asin, ngraph::op::v0, 0)
NGRAPH_OP(Atan, ngraph::op, 0)
NGRAPH_OP(Atan2, ngraph::op, 0)
NGRAPH_OP(AvgPool, ngraph::op::v0, 0)
NGRAPH_OP(AvgPool, ngraph::op::v1, 1)
NGRAPH_OP(AvgPoolBackprop, ngraph::op::v0, 0)
NGRAPH_OP(AvgPoolBackprop, ngraph::op::v1, 1)
NGRAPH_OP(BatchMatMul, ngraph::op, 0)
NGRAPH_OP(BatchMatMulTranspose, ngraph::op, 0)
NGRAPH_OP(BatchNormInference, ngraph::op, 0)
NGRAPH_OP(BatchNormTraining, ngraph::op, 0)
NGRAPH_OP(BatchNormTrainingBackprop, ngraph::op, 0)
NGRAPH_OP(BinaryConvolution, ngraph::op::v1, 1)
NGRAPH_OP(Broadcast, ngraph::op::v0, 0)
NGRAPH_OP(Broadcast, ngraph::op::v1, 1)
NGRAPH_OP(BroadcastDistributed, ngraph::op::v0, 0)
NGRAPH_OP(BroadcastLike, ngraph::op::v0, 0)
NGRAPH_OP(CTCGreedyDecoder, ngraph::op::v0, 0)
NGRAPH_OP(Ceiling, ngraph::op::v0, 0)
NGRAPH_OP(Clamp, ngraph::op::v0, 0)
NGRAPH_OP(CompiledKernel, ngraph::op, 0)
NGRAPH_OP(Concat, ngraph::op::v0, 0)
NGRAPH_OP(Constant, ngraph::op, 0)
NGRAPH_OP(Convert, ngraph::op, 0)
NGRAPH_OP(Convolution, ngraph::op::v0, 0)
NGRAPH_OP(Convolution, ngraph::op::v1, 1)
NGRAPH_OP(ConvolutionBackpropData, ngraph::op::v0, 0)
NGRAPH_OP(ConvolutionBackpropData, ngraph::op::v1, 1)
NGRAPH_OP(ConvolutionBackpropFilters, ngraph::op::v0, 0)
NGRAPH_OP(ConvolutionBackpropFilters, ngraph::op::v1, 1)
NGRAPH_OP(ConvolutionBias, ngraph::op::v0, 0)
NGRAPH_OP(ConvolutionBiasAdd, ngraph::op::v0, 0)
NGRAPH_OP(ConvolutionBiasBackpropFiltersBias, ngraph::op::v0, 0)
NGRAPH_OP(Cos, ngraph::op, 0)
NGRAPH_OP(Cosh, ngraph::op, 0)
NGRAPH_OP(CropAndResize, ngraph::op, 0)
NGRAPH_OP(CrossEntropy, ngraph::op, 0)
NGRAPH_OP(CrossEntropyBackprop, ngraph::op, 0)
NGRAPH_OP(CumSum, ngraph::op::v0, 0)
NGRAPH_OP(DepthToSpace, ngraph::op::v0, 1)
NGRAPH_OP(Dequantize, ngraph::op, 0)
NGRAPH_OP(DetectionOutput, ngraph::op::v0, 0)
NGRAPH_OP(Divide, ngraph::op::v0, 0)
NGRAPH_OP(Divide, ngraph::op::v1, 1)
NGRAPH_OP(Dot, ngraph::op::v0, 0)
NGRAPH_OP(DynBroadcast, ngraph::op, 0)
NGRAPH_OP(DynPad, ngraph::op, 0)
NGRAPH_OP(DynReplaceSlice, ngraph::op, 0)
NGRAPH_OP(DynReshape, ngraph::op::v0, 0)
NGRAPH_OP(DynSlice, ngraph::op, 0)
NGRAPH_OP(Elu, ngraph::op::v0, 0)
NGRAPH_OP(EmbeddingLookup, ngraph::op::v0, 0)
NGRAPH_OP(Equal, ngraph::op::v0, 0)
NGRAPH_OP(Equal, ngraph::op::v1, 1)
NGRAPH_OP(Erf, ngraph::op::v0, 0)
NGRAPH_OP(Exp, ngraph::op::v0, 0)
NGRAPH_OP(FakeQuantize, ngraph::op::v0, 0)
NGRAPH_OP(Floor, ngraph::op::v0, 0)
NGRAPH_OP(FloorMod, ngraph::op::v1, 1)
NGRAPH_OP(GRN, ngraph::op::v0, 0)
NGRAPH_OP(GRUCell, ngraph::op::v0, 0)
NGRAPH_OP(Gather, ngraph::op::v0, 0)
NGRAPH_OP(Gather, ngraph::op::v1, 1)
NGRAPH_OP(GatherND, ngraph::op::v0, 0)
NGRAPH_OP(Gelu, ngraph::op::v0, 0)
NGRAPH_OP(GeluBackpropFactor, ngraph::op::v0, 0)
NGRAPH_OP(Gemm, ngraph::op::v0, 0)
NGRAPH_OP(GenerateMask, ngraph::op::v0, 0)
NGRAPH_OP(GenerateMask, ngraph::op::v1, 1)
NGRAPH_OP(GetOutputElement, ngraph::op::v0, 0)
NGRAPH_OP(Greater, ngraph::op::v0, 0)
NGRAPH_OP(Greater, ngraph::op::v1, 1)
NGRAPH_OP(GreaterEq, ngraph::op::v0, 0)
NGRAPH_OP(GreaterEqual, ngraph::op::v1, 1)
NGRAPH_OP(GroupConvolution, ngraph::op::v0, 0)
NGRAPH_OP(GroupConvolutionTranspose, ngraph::op::v0, 0)
NGRAPH_OP(HardSigmoid, ngraph::op::v0, 0)
NGRAPH_OP(Interpolate, ngraph::op::v0, 0)
NGRAPH_OP(LRN, ngraph::op::v0, 0)
NGRAPH_OP(LSTMCell, ngraph::op::v0, 0)
NGRAPH_OP(LSTMSequence, ngraph::op::v0, 0)
NGRAPH_OP(LayerNorm, ngraph::op::v0, 0)
NGRAPH_OP(LayerNormBackprop, ngraph::op::v0, 0)
NGRAPH_OP(Less, ngraph::op::v0, 0)
NGRAPH_OP(Less, ngraph::op::v1, 1)
NGRAPH_OP(LessEq, ngraph::op::v0, 0)
NGRAPH_OP(LessEqual, ngraph::op::v1, 1)
NGRAPH_OP(Log, ngraph::op, 0)
NGRAPH_OP(LogSoftmax, ngraph::op::v0, 0)
NGRAPH_OP(LogicalAnd, ngraph::op::v1, 1)
NGRAPH_OP(LogicalNot, ngraph::op::v1, 1)
NGRAPH_OP(LogicalOr, ngraph::op::v1, 1)
NGRAPH_OP(LogicalXor, ngraph::op::v1, 1)
NGRAPH_OP(MVN, ngraph::op::v0, 0)
NGRAPH_OP(MatMul, ngraph::op::v0, 0)
NGRAPH_OP(Max, ngraph::op::v0, 0)
NGRAPH_OP(MaxPool, ngraph::op::v0, 0)
NGRAPH_OP(MaxPool, ngraph::op::v1, 1)
NGRAPH_OP(MaxPoolBackprop, ngraph::op::v0, 0)
NGRAPH_OP(MaxPoolBackprop, ngraph::op::v1, 1)
NGRAPH_OP(Maximum, ngraph::op::v0, 0)
NGRAPH_OP(Maximum, ngraph::op::v1, 1)
NGRAPH_OP(Min, ngraph::op::v0, 0)
NGRAPH_OP(Minimum, ngraph::op::v0, 0)
NGRAPH_OP(Minimum, ngraph::op::v1, 1)
NGRAPH_OP(Mod, ngraph::op::v1, 1)
NGRAPH_OP(Multiply, ngraph::op::v0, 0)
NGRAPH_OP(Multiply, ngraph::op::v1, 1)
NGRAPH_OP(Negative, ngraph::op, 0)
NGRAPH_OP(NormalizeL2, ngraph::op::v0, 0)
NGRAPH_OP(Not, ngraph::op::v0, 0)
NGRAPH_OP(NotEqual, ngraph::op::v0, 0)
NGRAPH_OP(NotEqual, ngraph::op::v1, 1)
NGRAPH_OP(OneHot, ngraph::op::v0, 0)
NGRAPH_OP(Or, ngraph::op::v0, 0)
NGRAPH_OP(PRelu, ngraph::op::v0, 0)
NGRAPH_OP(PSROIPooling, ngraph::op::v0, 0)
NGRAPH_OP(Pad, ngraph::op::v0, 0)
NGRAPH_OP(Pad, ngraph::op::v1, 1)
NGRAPH_OP(Parameter, ngraph::op, 0)
NGRAPH_OP(PartialSlice, ngraph::op::v0, 0)
NGRAPH_OP(PartialSliceBackprop, ngraph::op::v0, 0)
NGRAPH_OP(Passthrough, ngraph::op, 0)
NGRAPH_OP(Power, ngraph::op::v0, 0)
NGRAPH_OP(Power, ngraph::op::v1, 1)
NGRAPH_OP(PriorBox, ngraph::op::v0, 0)
NGRAPH_OP(PriorBoxClustered, ngraph::op::v0, 0)
NGRAPH_OP(Product, ngraph::op, 0)
NGRAPH_OP(Proposal, ngraph::op::v0, 0)
NGRAPH_OP(Quantize, ngraph::op::v0, 0)
NGRAPH_OP(QuantizedConvolution, ngraph::op::v0, 0)
NGRAPH_OP(QuantizedConvolutionBias, ngraph::op, 0)
NGRAPH_OP(QuantizedConvolutionBiasAdd, ngraph::op, 0)
NGRAPH_OP(QuantizedConvolutionBiasSignedAdd, ngraph::op, 0)
NGRAPH_OP(QuantizedConvolutionRelu, ngraph::op, 0)
NGRAPH_OP(QuantizedDot, ngraph::op::v0, 0)
NGRAPH_OP(QuantizedDotBias, ngraph::op, 0)
NGRAPH_OP(RNNCell, ngraph::op::v0, 0)
NGRAPH_OP(ROIPooling, ngraph::op::v0, 0)
NGRAPH_OP(RandomUniform, ngraph::op, 0)
NGRAPH_OP(Range, ngraph::op, 0)
NGRAPH_OP(Reciprocal, ngraph::op, 0)
NGRAPH_OP(Recv, ngraph::op::v0, 0)
NGRAPH_OP(ReduceMax, ngraph::op::v1, 1)
NGRAPH_OP(ReduceMean, ngraph::op::v1, 1)
NGRAPH_OP(ReduceMin, ngraph::op::v1, 1)
NGRAPH_OP(ReduceProd, ngraph::op::v1, 1)
NGRAPH_OP(ReduceSum, ngraph::op::v1, 1)
NGRAPH_OP(RegionYolo, ngraph::op::v0, 0)
NGRAPH_OP(Relu, ngraph::op::v0, 0)
NGRAPH_OP(ReluBackprop, ngraph::op::v0, 0)
NGRAPH_OP(ReorgYolo, ngraph::op::v0, 0)
NGRAPH_OP(ReplaceSlice, ngraph::op::v0, 0)
NGRAPH_OP(Reshape, ngraph::op::v0, 0)
NGRAPH_OP(Reshape, ngraph::op::v1, 1)
NGRAPH_OP(Result, ngraph::op, 0)
NGRAPH_OP(Reverse, ngraph::op::v0, 0)
NGRAPH_OP(Reverse, ngraph::op::v1, 1)
NGRAPH_OP(ReverseSequence, ngraph::op::v0, 0)
NGRAPH_OP(ScalarConstantLikeBase, ngraph::op, 0)
NGRAPH_OP(ScaleShift, ngraph::op::v0, 0)
NGRAPH_OP(ScatterAdd, ngraph::op::v0, 0)
NGRAPH_OP(ScatterNDAdd, ngraph::op::v0, 0)
NGRAPH_OP(Select, ngraph::op::v0, 0)
NGRAPH_OP(Selu, ngraph::op::v0, 0)
NGRAPH_OP(Send, ngraph::op::v0, 0)
NGRAPH_OP(ShapeOf, ngraph::op::v0, 0)
NGRAPH_OP(ShuffleChannels, ngraph::op::v0, 0)
NGRAPH_OP(Sigmoid, ngraph::op::v0, 0)
NGRAPH_OP(SigmoidBackprop, ngraph::op::v0, 0)
NGRAPH_OP(Sign, ngraph::op::v0, 0)
NGRAPH_OP(Sin, ngraph::op::v0, 0)
NGRAPH_OP(Sinh, ngraph::op::v0, 0)
NGRAPH_OP(Slice, ngraph::op::v0, 0)
NGRAPH_OP(Softmax, ngraph::op::v0, 0)
NGRAPH_OP(Softmax, ngraph::op::v1, 1)
NGRAPH_OP(SoftmaxCrossEntropy, ngraph::op::v0, 0)
NGRAPH_OP(SoftmaxCrossEntropyBackprop, ngraph::op::v0, 0)
NGRAPH_OP(SpaceToDepth, ngraph::op::v0, 0)
NGRAPH_OP(Split, ngraph::op::v0, 0)
NGRAPH_OP(Sqrt, ngraph::op, 0)
NGRAPH_OP(SquaredDifference, ngraph::op::v0, 0)
NGRAPH_OP(Squeeze, ngraph::op::v0, 0)
NGRAPH_OP(StopGradient, ngraph::op::v0, 0)
NGRAPH_OP(StridedSlice, ngraph::op::v1, 1)
NGRAPH_OP(Subtract, ngraph::op::v0, 0)
NGRAPH_OP(Sum, ngraph::op::v0, 0)
NGRAPH_OP(Tan, ngraph::op::v0, 0)
NGRAPH_OP(Tanh, ngraph::op::v0, 0)
NGRAPH_OP(TensorIterator, ngraph::op::v0, 0)
NGRAPH_OP(Tile, ngraph::op::v0, 0)
NGRAPH_OP(TopK, ngraph::op::v0, 0)
NGRAPH_OP(TopK, ngraph::op::v1, 1)
NGRAPH_OP(Transpose, ngraph::op::v0, 0)
NGRAPH_OP(Unsqueeze, ngraph::op::v0, 0)
NGRAPH_OP(VariadicSplit, ngraph::op::v1, 1)
NGRAPH_OP(Xor, ngraph::op::v0, 0)
......@@ -25,6 +25,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
/// An op directly representing backend-specific code.
///
......@@ -33,9 +35,11 @@ namespace ngraph
/// operation language in certain modes.
class Passthrough;
}
using v0::Passthrough;
}
}
class NGRAPH_API ngraph::op::Passthrough final : public Op
class NGRAPH_API ngraph::op::v0::Passthrough final : public Op
{
public:
static constexpr NodeTypeInfo type_info{"Passthrough", 0};
......
......@@ -23,9 +23,12 @@
namespace ngraph
{
namespace op
{
namespace v0
{
/// \brief Quantize operation
/// Maps real input (r) to quantized output (q) using scale (s), zero point (z) and
/// Maps real input (r) to quantized output (q) using scale (s), zero point (z)
/// and
/// round mode: q = ROUND(r / s) + o
class NGRAPH_API Quantize : public ngraph::op::Op
{
......@@ -110,4 +113,6 @@ namespace ngraph
RoundMode m_round_mode;
};
}
using v0::Quantize;
}
}
......@@ -22,6 +22,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
class NGRAPH_API QuantizedConvolution : public Op
{
......@@ -67,8 +69,14 @@ namespace ngraph
QuantizedConvolution() = default;
const Strides& get_window_movement_strides() const { return m_window_movement_strides; }
const Strides& get_window_dilation_strides() const { return m_window_dilation_strides; }
const Strides& get_window_movement_strides() const
{
return m_window_movement_strides;
}
const Strides& get_window_dilation_strides() const
{
return m_window_dilation_strides;
}
const CoordinateDiff& get_padding_below() const { return m_padding_below; }
const CoordinateDiff& get_padding_above() const { return m_padding_above; }
const Strides& get_data_dilation_strides() const { return m_data_dilation_strides; }
......@@ -97,4 +105,6 @@ namespace ngraph
ngraph::AxisSet m_output_axes;
};
}
using v0::QuantizedConvolution;
}
}
......@@ -21,6 +21,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
class NGRAPH_API QuantizedDot : public Op
{
......@@ -83,4 +85,6 @@ namespace ngraph
ngraph::AxisSet m_output_axes;
};
}
using v0::QuantizedDot;
}
}
......@@ -23,6 +23,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
class NGRAPH_API Recv : public Op
{
......@@ -47,4 +49,6 @@ namespace ngraph
int m_src_id;
};
}
using v0::Recv;
}
}
......@@ -30,7 +30,7 @@ namespace ngraph
class NGRAPH_API ReduceProd : public util::ArithmeticReductionKeepDims
{
public:
static constexpr NodeTypeInfo type_info{"Product", 1};
static constexpr NodeTypeInfo type_info{"ReduceProd", 1};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs a product reduction operation.
ReduceProd() = default;
......
......@@ -77,7 +77,7 @@ namespace ngraph
class NGRAPH_API ReduceSum : public util::ArithmeticReductionKeepDims
{
public:
static constexpr NodeTypeInfo type_info{"Sum", 1};
static constexpr NodeTypeInfo type_info{"ReduceSum", 1};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs a summation operation.
ReduceSum() = default;
......
......@@ -26,6 +26,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
/// \brief Elementwise Relu operation.
///
......@@ -61,10 +63,14 @@ namespace ngraph
/// \brief Constructs a ReluBackprop operation.
///
/// \param arg Node that produces the relu forward input tensor.
ReluBackprop(std::shared_ptr<ngraph::Node> arg, std::shared_ptr<ngraph::Node> delta);
ReluBackprop(std::shared_ptr<ngraph::Node> arg,
std::shared_ptr<ngraph::Node> delta);
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
};
}
using v0::Relu;
using v0::ReluBackprop;
}
}
......@@ -23,6 +23,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
// clang-format off
/// \brief Takes two input tensors of identical rank, with the second tensor no larger than
......@@ -62,7 +64,8 @@ namespace ngraph
/// \param arg1 The tensor to write into `arg0`.
/// \param lower_bounds The axiswise lower bounds of the slice (inclusive).
/// \param upper_bounds The axiswise upper bounds of the slice (exclusive).
/// \param strides The slicing strides; for example, strides of `{n,m}` means to take
/// \param strides The slicing strides; for example, strides of `{n,m}` means to
/// take
/// every nth row and every mth column of `arg0` as part of the
/// slice to be replaced.
ReplaceSlice(const Output<Node>& arg0,
......@@ -89,7 +92,10 @@ namespace ngraph
/// \return The inclusive lower-bound coordinates.
const Coordinate& get_lower_bounds() const { return m_lower_bounds; }
void set_lower_bounds(const Coordinate& lower_bounds) { m_lower_bounds = lower_bounds; }
void set_lower_bounds(const Coordinate& lower_bounds)
{
m_lower_bounds = lower_bounds;
}
/// \return The exclusive upper-bound coordinates.
const Coordinate& get_upper_bounds() const { return m_upper_bounds; }
void set_uppper_bounds(const Coordinate& upper_bounds)
......@@ -108,4 +114,6 @@ namespace ngraph
Strides m_strides;
};
}
using v0::ReplaceSlice;
}
}
......@@ -23,6 +23,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
// clang-format off
/// \brief Tensor reshape operation.
......@@ -73,7 +75,8 @@ namespace ngraph
///
/// \param arg The tensor to be reshaped.
/// \param input_order The order in which to iterate over input axes. This must be a
/// permutation of the sequence \f$(0,\dots,n-1)\f$ where \f$n\f$ is
/// permutation of the sequence \f$(0,\dots,n-1)\f$ where \f$n\f$
/// is
/// the rank of the input tensor.
/// \param output_shape The output shape. If the input shape is
/// \f$(a_0,\dots,a_{k-1})\f$ then the output shape must
......@@ -104,6 +107,7 @@ namespace ngraph
Shape m_output_shape;
bool m_is_transpose{false};
};
}
namespace v1
{
......@@ -115,7 +119,7 @@ namespace ngraph
class NGRAPH_API Reshape : public Op
{
public:
static constexpr NodeTypeInfo type_info{"DynReshape", 1};
static constexpr NodeTypeInfo type_info{"Reshape", 1};
const NodeTypeInfo& get_type_info() const override { return type_info; }
Reshape() = default;
/// \brief Constructs a dynamic reshape operation. This operation does not perform
......@@ -150,5 +154,6 @@ namespace ngraph
bool m_zero_flag;
};
}
using v0::Reshape;
}
}
......@@ -21,6 +21,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
class NGRAPH_API ReverseSequence : public Op
{
......@@ -54,4 +56,6 @@ namespace ngraph
size_t m_seq_axis{0};
};
}
using v0::ReverseSequence;
}
}
......@@ -21,6 +21,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
/// \brief Add updates to slices from inputs addressed by indices
class NGRAPH_API ScatterAdd : public Op
......@@ -52,4 +54,6 @@ namespace ngraph
copy_with_new_args(const NodeVector& new_args) const override;
};
}
using v0::ScatterAdd;
}
}
......@@ -21,6 +21,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
/// \brief Add updates to slices from inputs addressed by indices
class NGRAPH_API ScatterNDAdd : public Op
......@@ -52,4 +54,6 @@ namespace ngraph
copy_with_new_args(const NodeVector& new_args) const override;
};
}
using v0::ScatterNDAdd;
}
}
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment