Commit f6bddf08 authored by Jayaram Bobba's avatar Jayaram Bobba Committed by Scott Cyphers

Opset1 Definition (#3813)

* Opset1

* Added opset1.hpp

* Added more ops to opset0 and opset1

* Move opset1.hpp up and remove opset0.hpp

* Add versioning to more ops

* Revert to older pass names to keep compatibility for external components

* Fix compilation errors with codegen

* merge

* Added compile-time check for opset

* Added opset1 tbl

* Add op_version table of all ops

* Create factories from op_version_tbl

* reorg unsupported ops in int backend

* Added temporary alias for GreaterEqual

* Add missing case to interpreter enumeration

* Finish opset serializer cleanup (#3939)

* Opset-based opset conversion (#3937)

* Opset-based opset conversion

* Add other opset conversion

* Use ops.hpp

* Update opset0_tbl.hpp

* Switch interpreter to opset0 + a few extras (#3941)

* Switch interpreter, gcpu to opset0

* Remove unnused files

* Give interpreter its own opset

* style

* Fix namespace

* Fix rounding type conversion

* Work-around for bad clang3.9 bug

* Work-around
parent d2482523
......@@ -436,6 +436,7 @@ set (SRC
op/util/unary_elementwise_arithmetic.cpp
op/util/unary_elementwise_arithmetic.hpp
ops.hpp
opsets/opset.cpp
partial_shape.cpp
partial_shape.hpp
pass/algebraic_simplification.cpp
......
......@@ -18,22 +18,7 @@
#include "ngraph/factory.hpp"
#include "ngraph/node.hpp"
#include "ngraph/op/abs.hpp"
#include "ngraph/op/acos.hpp"
#include "ngraph/op/add.hpp"
#include "ngraph/op/all.hpp"
#include "ngraph/op/allreduce.hpp"
#include "ngraph/op/and.hpp"
#include "ngraph/op/any.hpp"
#include "ngraph/op/argmax.hpp"
#include "ngraph/op/argmin.hpp"
#include "ngraph/op/avg_pool.hpp"
#include "ngraph/op/batch_norm.hpp"
#include "ngraph/op/broadcast.hpp"
#include "ngraph/op/broadcast_distributed.hpp"
#include "ngraph/op/ceiling.hpp"
#include "ngraph/op/concat.hpp"
#include "ngraph/op/parameter.hpp"
#include "ngraph/ops.hpp"
using namespace std;
......@@ -58,31 +43,9 @@ namespace ngraph
lock_guard<mutex> guard(init_guard);
if (registry.m_factory_map.size() == 0)
{
registry.register_factory<op::Abs>();
registry.register_factory<op::Acos>();
registry.register_factory<op::v0::Add>();
registry.register_factory<op::v1::Add>();
registry.register_factory<op::All>();
registry.register_factory<op::AllReduce>();
registry.register_factory<op::And>();
registry.register_factory<op::Any>();
registry.register_factory<op::ArgMax>();
registry.register_factory<op::ArgMin>();
registry.register_factory<op::v0::AvgPool>();
registry.register_factory<op::v0::AvgPoolBackprop>();
registry.register_factory<op::v1::AvgPool>();
registry.register_factory<op::v1::AvgPoolBackprop>();
registry.register_factory<op::BatchNormInference>();
registry.register_factory<op::BatchNormTraining>();
registry.register_factory<op::BatchNormTrainingBackprop>();
registry.register_factory<op::BroadcastDistributed>();
registry.register_factory<op::v0::Broadcast>();
registry.register_factory<op::v0::BroadcastLike>();
registry.register_factory<op::v1::Broadcast>();
registry.register_factory<op::Ceiling>();
registry.register_factory<op::Concat>();
registry.register_factory<op::v1::LogicalAnd>();
registry.register_factory<op::Parameter>();
#define NGRAPH_OP(NAME, NAMESPACE, VERSION) registry.register_factory<NAMESPACE::NAME>();
#include "ngraph/op/op_version_tbl.hpp"
#undef NGRAPH_OP
}
}
return registry;
......
......@@ -38,7 +38,7 @@ namespace ngraph
ASSERT_IS_SUPPORTED(node, fmod == 1)
<< "Only 'fmod=1' mode is supported for mod operator.";
return {std::make_shared<ngraph::op::Mod>(dividend, divisor)};
return {std::make_shared<ngraph::op::v1::Mod>(dividend, divisor)};
}
} // namespace set_1
......
......@@ -45,7 +45,7 @@ namespace ngraph
auto gamma_node = std::make_shared<ngraph::op::Constant>(
data->get_element_type(), data->get_shape(), std::vector<double>{gamma});
return {std::make_shared<ngraph::op::v1::Selu>(data, alpha_node, gamma_node)};
return {std::make_shared<ngraph::op::v0::Selu>(data, alpha_node, gamma_node)};
}
} // namespace set_1
......
......@@ -24,30 +24,34 @@ namespace ngraph
{
namespace op
{
/// \brief Elementwise absolute value operation.
///
class NGRAPH_API Abs : public util::UnaryElementwiseArithmetic
namespace v0
{
public:
static constexpr NodeTypeInfo type_info{"Abs", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs an absolute value operation.
Abs() = default;
bool visit_attributes(AttributeVisitor& visitor) override { return true; }
/// \brief Constructs an absolute value operation.
/// \brief Elementwise absolute value operation.
///
/// \param arg Output that produces the input tensor.<br>
/// `[d1, ...]`
///
/// Output `[d1, ...]`
///
Abs(const Output<Node>& arg);
class NGRAPH_API Abs : public util::UnaryElementwiseArithmetic
{
public:
static constexpr NodeTypeInfo type_info{"Abs", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs an absolute value operation.
Abs() = default;
bool visit_attributes(AttributeVisitor& visitor) override { return true; }
/// \brief Constructs an absolute value operation.
///
/// \param arg Output that produces the input tensor.<br>
/// `[d1, ...]`
///
/// Output `[d1, ...]`
///
Abs(const Output<Node>& arg);
std::shared_ptr<Node> copy_with_new_args(const NodeVector& new_args) const override;
std::shared_ptr<Node> copy_with_new_args(const NodeVector& new_args) const override;
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const NodeVector& deltas) override;
};
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const NodeVector& deltas) override;
};
}
using v0::Abs;
}
}
......@@ -24,29 +24,33 @@ namespace ngraph
{
namespace op
{
/// \brief Elementwise inverse cosine (arccos) operation.
///
class NGRAPH_API Acos : public util::UnaryElementwiseArithmetic
namespace v0
{
public:
static constexpr NodeTypeInfo type_info{"Acos", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs an arccos operation.
Acos() = default;
/// \brief Constructs an arccos operation.
/// \brief Elementwise inverse cosine (arccos) operation.
///
/// \param arg Output that produces the input tensor.<br>
/// `[d1, ...]`
///
/// Output `[d1, ...]`
///
Acos(const Output<Node>& arg);
bool visit_attributes(AttributeVisitor& visitor) override { return true; }
std::shared_ptr<Node> copy_with_new_args(const NodeVector& new_args) const override;
class NGRAPH_API Acos : public util::UnaryElementwiseArithmetic
{
public:
static constexpr NodeTypeInfo type_info{"Acos", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs an arccos operation.
Acos() = default;
/// \brief Constructs an arccos operation.
///
/// \param arg Output that produces the input tensor.<br>
/// `[d1, ...]`
///
/// Output `[d1, ...]`
///
Acos(const Output<Node>& arg);
bool visit_attributes(AttributeVisitor& visitor) override { return true; }
std::shared_ptr<Node> copy_with_new_args(const NodeVector& new_args) const override;
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const NodeVector& deltas) override;
};
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const NodeVector& deltas) override;
};
}
using v0::Acos;
}
}
......@@ -54,6 +54,7 @@ namespace ngraph
const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec());
std::shared_ptr<Node> copy_with_new_args(const NodeVector& new_args) const override;
bool visit_attributes(AttributeVisitor& visitor) override;
virtual bool is_commutative() const override { return true; }
protected:
......@@ -101,8 +102,8 @@ namespace ngraph
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const NodeVector& deltas) override;
};
} // namespace v1
} // namespace v1
using v0::Add;
} // namespace op
......
......@@ -22,29 +22,33 @@ namespace ngraph
{
namespace op
{
/// \brief Logical "all" reduction operation.
class NGRAPH_API All : public util::LogicalReduction
namespace v0
{
public:
static constexpr NodeTypeInfo type_info{"All", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs an "all" reduction operation.
All() = default;
/// \brief Constructs an "all" reduction operation.
///
/// \param arg The tensor to be reduced.
/// \param reduction_axes The axis positions (0-based) to be eliminated.
All(const Output<Node>& arg, const AxisSet& reduction_axes);
/// \brief Constructs an "all" reduction operation.
///
/// \param arg The tensor to be reduced.
/// \param reduction_axes The axis positions (0-based) to be eliminated.
All(const Output<Node>& arg, const Output<Node>& reduction_axes);
bool visit_attributes(AttributeVisitor& visitor) override { return true; }
std::shared_ptr<Node> copy_with_new_args(const NodeVector& new_args) const override;
/// \brief Logical "all" reduction operation.
class NGRAPH_API All : public util::LogicalReduction
{
public:
static constexpr NodeTypeInfo type_info{"All", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs an "all" reduction operation.
All() = default;
/// \brief Constructs an "all" reduction operation.
///
/// \param arg The tensor to be reduced.
/// \param reduction_axes The axis positions (0-based) to be eliminated.
All(const Output<Node>& arg, const AxisSet& reduction_axes);
/// \brief Constructs an "all" reduction operation.
///
/// \param arg The tensor to be reduced.
/// \param reduction_axes The axis positions (0-based) to be eliminated.
All(const Output<Node>& arg, const Output<Node>& reduction_axes);
bool visit_attributes(AttributeVisitor& visitor) override { return true; }
std::shared_ptr<Node> copy_with_new_args(const NodeVector& new_args) const override;
/// \return The default value for All.
virtual std::shared_ptr<Node> get_default_value() const override;
};
/// \return The default value for All.
virtual std::shared_ptr<Node> get_default_value() const override;
};
}
using v0::All;
}
}
......@@ -23,23 +23,28 @@ namespace ngraph
{
namespace op
{
class NGRAPH_API AllReduce : public Op
namespace v0
{
public:
static constexpr NodeTypeInfo type_info{"AllReduce", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
AllReduce() = default;
AllReduce(const Output<Node>& arg, reduction::Type reduce_type = reduction::Type::SUM);
class NGRAPH_API AllReduce : public Op
{
public:
static constexpr NodeTypeInfo type_info{"AllReduce", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
AllReduce() = default;
AllReduce(const Output<Node>& arg,
reduction::Type reduce_type = reduction::Type::SUM);
void validate_and_infer_types() override;
void validate_and_infer_types() override;
std::shared_ptr<Node> copy_with_new_args(const NodeVector& new_args) const override;
reduction::Type get_reduce_type() const;
void set_reduce_type(reduction::Type reduce_type);
bool visit_attributes(AttributeVisitor& visitor) override;
std::shared_ptr<Node> copy_with_new_args(const NodeVector& new_args) const override;
reduction::Type get_reduce_type() const;
void set_reduce_type(reduction::Type reduce_type);
bool visit_attributes(AttributeVisitor& visitor) override;
private:
reduction::Type m_reduce_type{reduction::Type::SUM};
};
private:
reduction::Type m_reduce_type{reduction::Type::SUM};
};
}
using v0::AllReduce;
}
}
......@@ -22,26 +22,32 @@ namespace ngraph
{
namespace op
{
/// \brief Computes minimum index along a specified axis for a given tensor
class NGRAPH_API ArgMax : public op::util::IndexReduction
namespace v0
{
public:
static constexpr NodeTypeInfo type_info{"ArgMax", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs a ArgMax operation.
ArgMax() = default;
/// \brief Constructs a ArgMax operation.
///
/// \param arg The input tensor
/// \param axis The axis along which to compute an index for maximum
/// \param index_element_type produce indices. Currently, only int64 or int32 are
/// supported
ArgMax(const Output<Node>& arg, size_t axis, const element::Type& index_element_type);
/// \brief Computes minimum index along a specified axis for a given tensor
class NGRAPH_API ArgMax : public op::util::IndexReduction
{
public:
static constexpr NodeTypeInfo type_info{"ArgMax", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs a ArgMax operation.
ArgMax() = default;
/// \brief Constructs a ArgMax operation.
///
/// \param arg The input tensor
/// \param axis The axis along which to compute an index for maximum
/// \param index_element_type produce indices. Currently, only int64 or int32 are
/// supported
ArgMax(const Output<Node>& arg,
size_t axis,
const element::Type& index_element_type);
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
bool visit_attributes(AttributeVisitor& visitor) override;
virtual std::shared_ptr<Node> get_default_value() const override;
};
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
bool visit_attributes(AttributeVisitor& visitor) override;
virtual std::shared_ptr<Node> get_default_value() const override;
};
}
using v0::ArgMax;
}
}
......@@ -22,27 +22,33 @@ namespace ngraph
{
namespace op
{
/// \brief Computes minimum index along a specified axis for a given tensor
class NGRAPH_API ArgMin : public op::util::IndexReduction
namespace v0
{
public:
static constexpr NodeTypeInfo type_info{"ArgMin", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs a ArgMin operation.
ArgMin() = default;
/// \brief Computes minimum index along a specified axis for a given tensor
class NGRAPH_API ArgMin : public op::util::IndexReduction
{
public:
static constexpr NodeTypeInfo type_info{"ArgMin", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs a ArgMin operation.
ArgMin() = default;
/// \brief Constructs a ArgMin operation.
///
/// \param arg The input tensor
/// \param axis The axis along which to compute an index for minimum
/// \param index_element_type produce indices. Currently, only int64 or int32 are
/// supported
ArgMin(const Output<Node>& arg, size_t axis, const element::Type& index_element_type);
/// \brief Constructs a ArgMin operation.
///
/// \param arg The input tensor
/// \param axis The axis along which to compute an index for minimum
/// \param index_element_type produce indices. Currently, only int64 or int32 are
/// supported
ArgMin(const Output<Node>& arg,
size_t axis,
const element::Type& index_element_type);
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
bool visit_attributes(AttributeVisitor& visitor) override;
virtual std::shared_ptr<Node> get_default_value() const override;
};
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
bool visit_attributes(AttributeVisitor& visitor) override;
virtual std::shared_ptr<Node> get_default_value() const override;
};
}
using v0::ArgMin;
}
}
......@@ -24,30 +24,34 @@ namespace ngraph
{
namespace op
{
/// \brief Elementwise inverse sine (arcsin) operation.
///
class NGRAPH_API Asin : public util::UnaryElementwiseArithmetic
namespace v0
{
public:
static constexpr NodeTypeInfo type_info{"Asin", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs an arcsin operation.
Asin() = default;
/// \brief Constructs an arcsin operation.
/// \brief Elementwise inverse sine (arcsin) operation.
///
/// \param arg Output that produces the input tensor.<br>
/// `[d1, ...]`
///
/// Output `[d1, ...]`
///
Asin(const Output<Node>& arg);
class NGRAPH_API Asin : public util::UnaryElementwiseArithmetic
{
public:
static constexpr NodeTypeInfo type_info{"Asin", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs an arcsin operation.
Asin() = default;
/// \brief Constructs an arcsin operation.
///
/// \param arg Output that produces the input tensor.<br>
/// `[d1, ...]`
///
/// Output `[d1, ...]`
///
Asin(const Output<Node>& arg);
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
bool visit_attributes(AttributeVisitor& visitor) override { return true; }
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const NodeVector& deltas) override;
};
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
bool visit_attributes(AttributeVisitor& visitor) override { return true; }
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const NodeVector& deltas) override;
};
}
using v0::Asin;
}
}
......@@ -24,31 +24,35 @@ namespace ngraph
{
namespace op
{
/// \brief Elementwise inverse tangent (arctan) operation.
///
class NGRAPH_API Atan : public util::UnaryElementwiseArithmetic
namespace v0
{
public:
static constexpr NodeTypeInfo type_info{"Atan", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs an arctan operation.
Atan() = default;
/// \brief Constructs an arctan operation.
///
/// \param arg Output that produces the input tensor.<br>
/// `[d1, ...]`
/// \brief Elementwise inverse tangent (arctan) operation.
///
/// Output `[d1, ...]`
///
Atan(const Output<Node>& arg);
class NGRAPH_API Atan : public util::UnaryElementwiseArithmetic
{
public:
static constexpr NodeTypeInfo type_info{"Atan", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs an arctan operation.
Atan() = default;
/// \brief Constructs an arctan operation.
///
/// \param arg Output that produces the input tensor.<br>
/// `[d1, ...]`
///
/// Output `[d1, ...]`
///
Atan(const Output<Node>& arg);
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
bool visit_attributes(AttributeVisitor& visitor) override { return true; }
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const NodeVector& deltas) override;
};
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
bool visit_attributes(AttributeVisitor& visitor) override { return true; }
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const NodeVector& deltas) override;
};
}
using v0::Atan;
}
}
This diff is collapsed.
......@@ -24,23 +24,27 @@ namespace ngraph
{
namespace op
{
class NGRAPH_API BroadcastDistributed : public Op
namespace v0
{
public:
static constexpr NodeTypeInfo type_info{"BroadcastDistributed", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
BroadcastDistributed() = default;
BroadcastDistributed(const Output<Node>& arg, int64_t root_id = 0);
bool visit_attributes(AttributeVisitor& visitor) override;
void validate_and_infer_types() override;
class NGRAPH_API BroadcastDistributed : public Op
{
public:
static constexpr NodeTypeInfo type_info{"BroadcastDistributed", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
BroadcastDistributed() = default;
BroadcastDistributed(const Output<Node>& arg, int64_t root_id = 0);
bool visit_attributes(AttributeVisitor& visitor) override;
void validate_and_infer_types() override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
int64_t get_root_id() const;
void set_root_id(int64_t root_id);
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
int64_t get_root_id() const;
void set_root_id(int64_t root_id);
private:
int64_t m_root_id;
};
private:
int64_t m_root_id;
};
}
using v0::BroadcastDistributed;
}
}
......@@ -22,21 +22,26 @@ namespace ngraph
{
namespace op
{
/// \brief Elementwise ceiling operation.
class NGRAPH_API Ceiling : public util::UnaryElementwiseArithmetic
namespace v0
{
public:
static constexpr NodeTypeInfo type_info{"Ceiling", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs a ceiling operation.
Ceiling() = default;
/// \brief Constructs a ceiling operation.
///
/// \param arg Node that produces the input tensor.
Ceiling(const Output<Node>& arg);
bool visit_attributes(AttributeVisitor& visitor) override { return true; }
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
};
/// \brief Elementwise ceiling operation.
class NGRAPH_API Ceiling : public util::UnaryElementwiseArithmetic
{
public:
static constexpr NodeTypeInfo type_info{"Ceiling", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs a ceiling operation.
Ceiling() = default;
/// \brief Constructs a ceiling operation.
///
/// \param arg Node that produces the input tensor.
Ceiling(const Output<Node>& arg);
bool visit_attributes(AttributeVisitor& visitor) override { return true; }
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
};
}
using v0::Ceiling;
}
}
......@@ -24,47 +24,52 @@ namespace ngraph
{
namespace op
{
/// \brief Concatenation operation.
class NGRAPH_API Concat : public Op
namespace v0
{
public:
static constexpr NodeTypeInfo type_info{"Concat", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs a concatenation operation.
Concat() = default;
/// \brief Constructs a concatenation operation.
///
/// \param args The outputs producing the input tensors.
/// \param axis The axis along which to concatenate the input tensors.
Concat(const OutputVector& args, int64_t axis);
/// \brief Concatenation operation.
class NGRAPH_API Concat : public Op
{
public:
static constexpr NodeTypeInfo type_info{"Concat", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs a concatenation operation.
Concat() = default;
/// \brief Constructs a concatenation operation.
///
/// \param args The outputs producing the input tensors.
/// \param axis The axis along which to concatenate the input tensors.
Concat(const OutputVector& args, int64_t axis);
/// \brief Constructs a concatenation operation.
///
/// \param args The nodes producing the input tensors.
/// \param axis The axis along which to concatenate the input tensors.
Concat(const NodeVector& args, int64_t axis);
bool visit_attributes(AttributeVisitor& visitor) override;
void validate_and_infer_types() override;
/// \brief Constructs a concatenation operation.
///
/// \param args The nodes producing the input tensors.
/// \param axis The axis along which to concatenate the input tensors.
Concat(const NodeVector& args, int64_t axis);
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
bool visit_attributes(AttributeVisitor& visitor) override;
void validate_and_infer_types() override;
/// \return The concatenation axis.
int64_t get_concatenation_axis() const { return m_concat_axis; }
void set_concatenation_axis(int64_t concatenation_axis)
{
m_concat_axis = concatenation_axis;
}
/// \return The concatenation axis.
int64_t get_axis() const { return m_axis; }
void set_axis(int64_t axis) { m_axis = axis; }
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const NodeVector& deltas) override;
/// \ brief m_axis stores default value for all iterations
int64_t m_axis;
/// \brief m_concat_axis stores m_axis plus the number of rank for each iteration
int64_t m_concat_axis = -1;
};
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
/// \return The concatenation axis.
int64_t get_concatenation_axis() const { return m_concat_axis; }
void set_concatenation_axis(int64_t concatenation_axis)
{
m_concat_axis = concatenation_axis;
}
/// \return The concatenation axis.
int64_t get_axis() const { return m_axis; }
void set_axis(int64_t axis) { m_axis = axis; }
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const NodeVector& deltas) override;
/// \ brief m_axis stores default value for all iterations
int64_t m_axis;
/// \brief m_concat_axis stores m_axis plus the number of rank for each iteration
int64_t m_concat_axis = -1;
};
}
using v0::Concat;
}
}
......@@ -22,41 +22,44 @@ namespace ngraph
{
namespace op
{
/// \brief Elementwise type conversion operation.
class NGRAPH_API Convert : public Op
namespace v0
{
public:
static constexpr NodeTypeInfo type_info{"Convert", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs a conversion operation.
Convert() = default;
/// \brief Constructs a conversion operation.
///
/// \param arg Node that produces the input tensor.
/// \param destination_type Element type for the output tensor.
Convert(const Output<Node>& arg, const ngraph::element::Type& destination_type);
void validate_and_infer_types() override;
/// \brief Elementwise type conversion operation.
class NGRAPH_API Convert : public Op
{
public:
static constexpr NodeTypeInfo type_info{"Convert", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs a conversion operation.
Convert() = default;
/// \brief Constructs a conversion operation.
///
/// \param arg Node that produces the input tensor.
/// \param destination_type Element type for the output tensor.
Convert(const Output<Node>& arg, const ngraph::element::Type& destination_type);
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
void validate_and_infer_types() override;
const element::Type& get_destination_type() const { return m_destination_type; }
void set_destination_type(const element::Type& destination_type)
{
m_destination_type = destination_type;
}
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
const element::Type& get_destination_type() const { return m_destination_type; }
void set_destination_type(const element::Type& destination_type)
{
m_destination_type = destination_type;
}
const element::Type& get_convert_element_type() const { return m_destination_type; }
void set_convert_element_type(const element::Type& destination_type)
{
m_destination_type = destination_type;
}
const element::Type& get_convert_element_type() const { return m_destination_type; }
void set_convert_element_type(const element::Type& destination_type)
{
m_destination_type = destination_type;
}
protected:
ngraph::element::Type m_destination_type;
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const NodeVector& deltas) override;
};
protected:
ngraph::element::Type m_destination_type;
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const NodeVector& deltas) override;
};
}
using v0::Convert;
}
}
......@@ -62,7 +62,6 @@ namespace ngraph
const Strides& dilations,
const PadType& auto_pad = PadType::EXPLICIT);
size_t get_version() const override { return 1; }
void validate_and_infer_types() override;
virtual std::shared_ptr<Node>
......@@ -121,7 +120,6 @@ namespace ngraph
const CoordinateDiff& pads_begin,
const CoordinateDiff& pads_end);
size_t get_version() const override { return 1; }
void validate_and_infer_types() override;
void generate_adjoints(autodiff::Adjoints& adjoints,
......@@ -180,7 +178,6 @@ namespace ngraph
const CoordinateDiff& pads_begin,
const CoordinateDiff& pads_end);
size_t get_version() const override { return 1; }
void validate_and_infer_types() override;
virtual std::shared_ptr<Node>
......
......@@ -22,25 +22,29 @@ namespace ngraph
{
namespace op
{
/// \brief Elementwise cosine operation.
class NGRAPH_API Cos : public util::UnaryElementwiseArithmetic
namespace v0
{
public:
static constexpr NodeTypeInfo type_info{"Cos", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs a cosine operation.
Cos() = default;
/// \brief Constructs a cosine operation.
///
/// \param arg Node that produces the input tensor.
Cos(const Output<Node>& arg);
/// \brief Elementwise cosine operation.
class NGRAPH_API Cos : public util::UnaryElementwiseArithmetic
{
public:
static constexpr NodeTypeInfo type_info{"Cos", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs a cosine operation.
Cos() = default;
/// \brief Constructs a cosine operation.
///
/// \param arg Node that produces the input tensor.
Cos(const Output<Node>& arg);
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const NodeVector& deltas) override;
};
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const NodeVector& deltas) override;
};
}
using v0::Cos;
}
}
......@@ -22,25 +22,29 @@ namespace ngraph
{
namespace op
{
/// \brief Elementwise hyperbolic cosine (cosh) operation.
class NGRAPH_API Cosh : public util::UnaryElementwiseArithmetic
namespace v0
{
public:
static constexpr NodeTypeInfo type_info{"Cosh", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs a hyperbolic cosine operation.
Cosh() = default;
/// \brief Constructs a hyperbolic cosine operation.
///
/// \param arg Node that produces the input tensor.
Cosh(const Output<Node>& arg);
/// \brief Elementwise hyperbolic cosine (cosh) operation.
class NGRAPH_API Cosh : public util::UnaryElementwiseArithmetic
{
public:
static constexpr NodeTypeInfo type_info{"Cosh", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs a hyperbolic cosine operation.
Cosh() = default;
/// \brief Constructs a hyperbolic cosine operation.
///
/// \param arg Node that produces the input tensor.
Cosh(const Output<Node>& arg);
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const NodeVector& deltas) override;
};
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const NodeVector& deltas) override;
};
}
using v0::Cosh;
}
}
......@@ -22,7 +22,7 @@ namespace ngraph
{
namespace op
{
class CropAndResize : public Op
class NGRAPH_API CropAndResize : public Op
{
public:
enum class ResizeMethod
......@@ -32,7 +32,6 @@ namespace ngraph
nearest
};
NGRAPH_API
static constexpr NodeTypeInfo type_info{"CropAndResize", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs a crop and resize operation.
......
......@@ -23,78 +23,87 @@ namespace ngraph
{
namespace op
{
/// \brief Tensor cumulative sum operation.
///
/// Compute the cumulative sum of the input tensor along the axis specified.
///
/// ## Parameters
///
/// | | Description |
/// | -------------------- |
/// --------------------------------------------------------------------------------------------------|
/// | `exclusive` | If set to 1 will return exclusive sum in which the top element
/// is not included. |
/// | | In other terms, if set to 1, the j-th output element would be
/// the
/// sum of the first (j-1) elements.|
/// | | Otherwise, it would be the sum of the first j elements.
/// |
///
/// | | Description |
/// | -------------------- | -------------------------------------------------- |
/// | `reverse` | if set to 1, performs the sum in reverse direction |
///
/// ## Inputs
///
/// | | Description |
/// | ----- | ------------------------------------------------------ |
/// | `arg` | An input tensor of any shape and numeric element type. |
///
/// | | Description |
/// | ----- |
/// ------------------------------------------------------------------------------------------------|
/// | `axis`| zero dimension tensor specifying axis position along which cumulative sum must
/// be performed. |
///
/// ## Output
///
/// | Description |
/// | ------------------------------------------------------------------------------------|
/// | Output tensor of the same type as `arg` with cumulative sums of the arg's elements |
class CumSum : public Op
namespace v0
{
public:
NGRAPH_API
static constexpr NodeTypeInfo type_info{"CumSum", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs a cumulative summation operation.
CumSum() = default;
/// \brief Constructs a cumulative summation operation.
/// \brief Tensor cumulative sum operation.
///
/// Compute the cumulative sum of the input tensor along the axis specified.
///
/// ## Parameters
///
/// | | Description |
/// | -------------------- |
/// --------------------------------------------------------------------------------------------------|
/// | `exclusive` | If set to 1 will return exclusive sum in which the top
/// element
/// is not included. |
/// | | In other terms, if set to 1, the j-th output element
/// would be
/// the
/// sum of the first (j-1) elements.|
/// | | Otherwise, it would be the sum of the first j elements.
/// |
///
/// | | Description |
/// | -------------------- | -------------------------------------------------- |
/// | `reverse` | if set to 1, performs the sum in reverse direction |
///
/// \param arg The tensor to be summed.
/// \param axis zero dimension tensor specifying axis position along which cumulative
/// sum must be performed
CumSum(const Output<Node>& arg,
const Output<Node>& axis,
const bool exclusive = false,
const bool reverse = false);
/// ## Inputs
///
/// | | Description |
/// | ----- | ------------------------------------------------------ |
/// | `arg` | An input tensor of any shape and numeric element type. |
///
/// | | Description |
/// | ----- |
/// ------------------------------------------------------------------------------------------------|
/// | `axis`| zero dimension tensor specifying axis position along which cumulative sum
/// must
/// be performed. |
///
/// ## Output
///
/// | Description |
/// |
/// ------------------------------------------------------------------------------------|
/// | Output tensor of the same type as `arg` with cumulative sums of the arg's elements
/// |
class NGRAPH_API CumSum : public Op
{
public:
static constexpr NodeTypeInfo type_info{"CumSum", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs a cumulative summation operation.
CumSum() = default;
/// \brief Constructs a cumulative summation operation.
///
/// \param arg The tensor to be summed.
/// \param axis zero dimension tensor specifying axis position along which
/// cumulative
/// sum must be performed
CumSum(const Output<Node>& arg,
const Output<Node>& axis,
const bool exclusive = false,
const bool reverse = false);
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
/// \return The default value for CumSum.
virtual std::shared_ptr<Node> get_default_value() const override;
bool is_exclusive() const { return m_exclusive; }
bool is_reverse() const { return m_reverse; }
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const NodeVector& deltas) override;
/// \return The default value for CumSum.
virtual std::shared_ptr<Node> get_default_value() const override;
bool is_exclusive() const { return m_exclusive; }
bool is_reverse() const { return m_reverse; }
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const NodeVector& deltas) override;
private:
bool m_exclusive;
bool m_reverse;
};
private:
bool m_exclusive;
bool m_reverse;
};
}
using v0::CumSum;
}
}
......@@ -24,67 +24,76 @@ namespace ngraph
{
namespace op
{
/// \brief Generalized dot product operation, including scalar-tensor product, matrix-vector
/// product, and matrix multiplication.
class NGRAPH_API Dot : public Op
namespace v0
{
public:
static constexpr NodeTypeInfo type_info{"Dot", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs a dot product operation.
Dot() = default;
/// \brief Constructs a dot product operation.
///
/// \param arg0 The node producing the first argument.
/// \param arg1 The node producing the second argument.
/// \param reduction_axes_count The number of axes to dot.
Dot(const Output<Node>& arg0,
const Output<Node>& arg1,
size_t reduction_axes_count,
bool has_reduction_axes_count = true);
/// \brief Generalized dot product operation, including scalar-tensor product,
/// matrix-vector
/// product, and matrix multiplication.
class NGRAPH_API Dot : public Op
{
public:
static constexpr NodeTypeInfo type_info{"Dot", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs a dot product operation.
Dot() = default;
/// \brief Constructs a dot product operation.
///
/// \param arg0 The node producing the first argument.
/// \param arg1 The node producing the second argument.
/// \param reduction_axes_count The number of axes to dot.
Dot(const Output<Node>& arg0,
const Output<Node>& arg1,
size_t reduction_axes_count,
bool has_reduction_axes_count = true);
/// \brief Constructs a dot product operation with default dot-axis selection depending
/// on the inputs.
///
/// If `arg0` or `arg1` is a scalar, there are no dot-axes. Else, there is one dot-axis.
///
/// (Note that in particular, this results in scalar-tensor products where one or the
/// other argument is a scalar, a matrix-vector products where `arg0` is a matrix and
/// `arg1` is a vector, and a matrix multiplication where `arg0` and `arg1` are both
/// matrices.)
///
/// \param arg0 The node producing the first argument.
/// \param arg1 The node producing the second argument.
Dot(const Output<Node>& arg0, const Output<Node>& arg1);
/// \brief Constructs a dot product operation with default dot-axis selection
/// depending
/// on the inputs.
///
/// If `arg0` or `arg1` is a scalar, there are no dot-axes. Else, there is one
/// dot-axis.
///
/// (Note that in particular, this results in scalar-tensor products where one or
/// the
/// other argument is a scalar, a matrix-vector products where `arg0` is a matrix
/// and
/// `arg1` is a vector, and a matrix multiplication where `arg0` and `arg1` are both
/// matrices.)
///
/// \param arg0 The node producing the first argument.
/// \param arg1 The node producing the second argument.
Dot(const Output<Node>& arg0, const Output<Node>& arg1);
void validate_and_infer_types() override;
void validate_and_infer_types() override;
virtual std::shared_ptr<Node> get_default_value() const override;
virtual std::shared_ptr<Node> get_default_value() const override;
size_t get_reduction_axes_count() const { return m_reduction_axes_count; }
void set_reduction_axes_count(size_t reduction_axes_count)
{
m_reduction_axes_count = reduction_axes_count;
}
bool get_has_reduction_axes_count() const { return m_has_reduction_axes_count; }
void set_has_reduction_axes_count(bool has_reduction_axes_count)
{
m_has_reduction_axes_count = has_reduction_axes_count;
}
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override
{
check_new_args_count(this, new_args);
return std::make_shared<Dot>(
new_args.at(0), new_args.at(1), m_reduction_axes_count);
}
size_t get_reduction_axes_count() const { return m_reduction_axes_count; }
void set_reduction_axes_count(size_t reduction_axes_count)
{
m_reduction_axes_count = reduction_axes_count;
}
bool get_has_reduction_axes_count() const { return m_has_reduction_axes_count; }
void set_has_reduction_axes_count(bool has_reduction_axes_count)
{
m_has_reduction_axes_count = has_reduction_axes_count;
}
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override
{
check_new_args_count(this, new_args);
return std::make_shared<Dot>(
new_args.at(0), new_args.at(1), m_reduction_axes_count);
}
protected:
size_t m_reduction_axes_count;
bool m_has_reduction_axes_count;
protected:
size_t m_reduction_axes_count;
bool m_has_reduction_axes_count;
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const NodeVector& deltas) override;
};
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const NodeVector& deltas) override;
};
}
using v0::Dot;
}
}
......@@ -23,38 +23,43 @@ namespace ngraph
{
namespace op
{
/// \brief Returns embeddings for given indices
class NGRAPH_API EmbeddingLookup : public Op
namespace v0
{
public:
static constexpr NodeTypeInfo type_info{"EmbeddingLookup", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs a EmbeddingLookup operation.
EmbeddingLookup() = default;
/// \brief Constructs a EmbeddingLookup operation.
///
/// EmbeddingLookup constructs an output tensor by replacing every index in a given
/// input tensor with a row (from the weights matrix) at that index
///
/// \param data The input indices for tokens to be translated into embeddings
/// \param weights is a dense matrix [N,M] where each row 0..N
/// corresponds to an embedding (i.e. typically, a vector of real numbers) of length M
EmbeddingLookup(const Output<Node>& data, const Output<Node>& weights)
: Op({data, weights})
/// \brief Returns embeddings for given indices
class NGRAPH_API EmbeddingLookup : public Op
{
constructor_validate_and_infer_types();
}
public:
static constexpr NodeTypeInfo type_info{"EmbeddingLookup", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs a EmbeddingLookup operation.
EmbeddingLookup() = default;
/// \brief Constructs a EmbeddingLookup operation.
///
/// EmbeddingLookup constructs an output tensor by replacing every index in a given
/// input tensor with a row (from the weights matrix) at that index
///
/// \param data The input indices for tokens to be translated into embeddings
/// \param weights is a dense matrix [N,M] where each row 0..N
/// corresponds to an embedding (i.e. typically, a vector of real numbers) of length
/// M
EmbeddingLookup(const Output<Node>& data, const Output<Node>& weights)
: Op({data, weights})
{
constructor_validate_and_infer_types();
}
void validate_and_infer_types() override;
void validate_and_infer_types() override;
void generate_adjoints(autodiff::Adjoints& /* adjoints */,
const NodeVector& /* deltas */) override
{
throw ngraph_error("Not yet implemented");
}
void generate_adjoints(autodiff::Adjoints& /* adjoints */,
const NodeVector& /* deltas */) override
{
throw ngraph_error("Not yet implemented");
}
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
};
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
};
}
using v0::EmbeddingLookup;
}
}
......@@ -22,16 +22,20 @@ namespace ngraph
{
namespace op
{
class NGRAPH_API Erf : public util::UnaryElementwiseArithmetic
namespace v0
{
public:
static constexpr NodeTypeInfo type_info{"Erf", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
Erf() = default;
Erf(const Output<Node>& arg);
class NGRAPH_API Erf : public util::UnaryElementwiseArithmetic
{
public:
static constexpr NodeTypeInfo type_info{"Erf", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
Erf() = default;
Erf(const Output<Node>& arg);
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
};
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
};
}
using v0::Erf;
}
}
......@@ -22,24 +22,28 @@ namespace ngraph
{
namespace op
{
/// \brief Elementwise natural exponential (exp) operation.
class NGRAPH_API Exp : public util::UnaryElementwiseArithmetic
namespace v0
{
public:
static constexpr NodeTypeInfo type_info{"Exp", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs an exponential operation.
Exp() = default;
/// \brief Constructs an exponential operation.
///
/// \param arg Node that produces the input tensor.
Exp(const Output<Node>& arg);
/// \brief Elementwise natural exponential (exp) operation.
class NGRAPH_API Exp : public util::UnaryElementwiseArithmetic
{
public:
static constexpr NodeTypeInfo type_info{"Exp", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs an exponential operation.
Exp() = default;
/// \brief Constructs an exponential operation.
///
/// \param arg Node that produces the input tensor.
Exp(const Output<Node>& arg);
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const NodeVector& deltas) override;
};
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const NodeVector& deltas) override;
};
}
using v0::Exp;
}
}
......@@ -22,29 +22,33 @@ namespace ngraph
{
namespace op
{
class NGRAPH_API CTCGreedyDecoder : public Op
namespace v0
{
public:
static constexpr NodeTypeInfo type_info{"CTCGreedyDecoder", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
CTCGreedyDecoder() = default;
/// \brief Constructs a CTCGreedyDecoder operation
///
/// \param input Logits on which greedy decoding is performed
/// \param seq_len Sequence lengths
/// \param ctc_merge_repeated Whether to merge repeated labels
CTCGreedyDecoder(const Output<Node>& input,
const Output<Node>& seq_len,
const bool ctc_merge_repeated);
class NGRAPH_API CTCGreedyDecoder : public Op
{
public:
static constexpr NodeTypeInfo type_info{"CTCGreedyDecoder", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
CTCGreedyDecoder() = default;
/// \brief Constructs a CTCGreedyDecoder operation
///
/// \param input Logits on which greedy decoding is performed
/// \param seq_len Sequence lengths
/// \param ctc_merge_repeated Whether to merge repeated labels
CTCGreedyDecoder(const Output<Node>& input,
const Output<Node>& seq_len,
const bool ctc_merge_repeated);
void validate_and_infer_types() override;
void validate_and_infer_types() override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
bool get_ctc_merge_repeated() const { return m_ctc_merge_repeated; }
private:
bool m_ctc_merge_repeated;
};
bool get_ctc_merge_repeated() const { return m_ctc_merge_repeated; }
private:
bool m_ctc_merge_repeated;
};
}
using v0::CTCGreedyDecoder;
}
}
......@@ -42,37 +42,41 @@ namespace ngraph
float objectness_score = 0;
} DetectionOutputAttrs;
/// \brief Layer which performs non-max suppression to
/// generate detection output using location and confidence predictions
class NGRAPH_API DetectionOutput : public Op
namespace v0
{
public:
static constexpr NodeTypeInfo type_info{"DetectionOutput", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
DetectionOutput() = default;
/// \brief Constructs a DetectionOutput operation
///
/// \param box_logits Box logits
/// \param class_preds Class predictions
/// \param proposals Proposals
/// \param aux_class_preds Auxilary class predictions
/// \param aux_box_preds Auxilary box predictions
/// \param attrs Detection Output attributes
DetectionOutput(const Output<Node>& box_logits,
const Output<Node>& class_preds,
const Output<Node>& proposals,
const Output<Node>& aux_class_preds,
const Output<Node>& aux_box_preds,
const DetectionOutputAttrs& attrs);
/// \brief Layer which performs non-max suppression to
/// generate detection output using location and confidence predictions
class NGRAPH_API DetectionOutput : public Op
{
public:
static constexpr NodeTypeInfo type_info{"DetectionOutput", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
DetectionOutput() = default;
/// \brief Constructs a DetectionOutput operation
///
/// \param box_logits Box logits
/// \param class_preds Class predictions
/// \param proposals Proposals
/// \param aux_class_preds Auxilary class predictions
/// \param aux_box_preds Auxilary box predictions
/// \param attrs Detection Output attributes
DetectionOutput(const Output<Node>& box_logits,
const Output<Node>& class_preds,
const Output<Node>& proposals,
const Output<Node>& aux_class_preds,
const Output<Node>& aux_box_preds,
const DetectionOutputAttrs& attrs);
void validate_and_infer_types() override;
void validate_and_infer_types() override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
const DetectionOutputAttrs& get_attrs() const { return m_attrs; }
private:
DetectionOutputAttrs m_attrs;
};
const DetectionOutputAttrs& get_attrs() const { return m_attrs; }
private:
DetectionOutputAttrs m_attrs;
};
}
using v0::DetectionOutput;
}
}
......@@ -32,30 +32,34 @@ namespace ngraph
std::vector<size_t> pads_end;
} InterpolateAttrs;
/// \brief Layer which performs bilinear interpolation
class NGRAPH_API Interpolate : public Op
namespace v0
{
public:
static constexpr NodeTypeInfo type_info{"Interpolate", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
Interpolate() = default;
/// \brief Constructs a Interpolate operation
///
/// \param image Input image
/// \param output_shape Output shape of spatial axes
/// \param attrs Interpolation attributes
Interpolate(const Output<Node>& image,
const Output<Node>& output_shape,
const InterpolateAttrs& attrs);
/// \brief Layer which performs bilinear interpolation
class NGRAPH_API Interpolate : public Op
{
public:
static constexpr NodeTypeInfo type_info{"Interpolate", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
Interpolate() = default;
/// \brief Constructs a Interpolate operation
///
/// \param image Input image
/// \param output_shape Output shape of spatial axes
/// \param attrs Interpolation attributes
Interpolate(const Output<Node>& image,
const Output<Node>& output_shape,
const InterpolateAttrs& attrs);
void validate_and_infer_types() override;
void validate_and_infer_types() override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
const InterpolateAttrs& get_attrs() const { return m_attrs; }
private:
InterpolateAttrs m_attrs;
};
const InterpolateAttrs& get_attrs() const { return m_attrs; }
private:
InterpolateAttrs m_attrs;
};
}
using v0::Interpolate;
}
}
......@@ -44,31 +44,35 @@ namespace ngraph
bool scale_all_sizes = false;
};
/// \brief Layer which generates prior boxes of specified sizes
/// normalized to input image size
class NGRAPH_API PriorBox : public Op
namespace v0
{
public:
static constexpr NodeTypeInfo type_info{"PriorBox", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
PriorBox() = default;
/// \brief Constructs a PriorBox operation
///
/// \param layer_shape Shape of layer for which prior boxes are computed
/// \param image_shape Shape of image to which prior boxes are scaled
/// \param attrs PriorBox attributes
PriorBox(const Output<Node>& layer_shape,
const Output<Node>& image_shape,
const PriorBoxAttrs& attrs);
/// \brief Layer which generates prior boxes of specified sizes
/// normalized to input image size
class NGRAPH_API PriorBox : public Op
{
public:
static constexpr NodeTypeInfo type_info{"PriorBox", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
PriorBox() = default;
/// \brief Constructs a PriorBox operation
///
/// \param layer_shape Shape of layer for which prior boxes are computed
/// \param image_shape Shape of image to which prior boxes are scaled
/// \param attrs PriorBox attributes
PriorBox(const Output<Node>& layer_shape,
const Output<Node>& image_shape,
const PriorBoxAttrs& attrs);
void validate_and_infer_types() override;
void validate_and_infer_types() override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
const PriorBoxAttrs& get_attrs() const { return m_attrs; }
private:
PriorBoxAttrs m_attrs;
};
const PriorBoxAttrs& get_attrs() const { return m_attrs; }
private:
PriorBoxAttrs m_attrs;
};
}
using v0::PriorBox;
}
}
......@@ -40,31 +40,35 @@ namespace ngraph
std::vector<float> variances;
};
/// \brief Layer which generates prior boxes of specified sizes
/// normalized to input image size
class NGRAPH_API PriorBoxClustered : public Op
namespace v0
{
public:
static constexpr NodeTypeInfo type_info{"PriorBoxClustered", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
PriorBoxClustered() = default;
/// \brief Constructs a PriorBoxClustered operation
///
/// \param layer_shape Shape of layer for which prior boxes are computed
/// \param image_shape Shape of image to which prior boxes are scaled
/// \param attrs PriorBoxClustered attributes
PriorBoxClustered(const Output<Node>& layer_shape,
const Output<Node>& image_shape,
const PriorBoxClusteredAttrs& attrs);
/// \brief Layer which generates prior boxes of specified sizes
/// normalized to input image size
class NGRAPH_API PriorBoxClustered : public Op
{
public:
static constexpr NodeTypeInfo type_info{"PriorBoxClustered", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
PriorBoxClustered() = default;
/// \brief Constructs a PriorBoxClustered operation
///
/// \param layer_shape Shape of layer for which prior boxes are computed
/// \param image_shape Shape of image to which prior boxes are scaled
/// \param attrs PriorBoxClustered attributes
PriorBoxClustered(const Output<Node>& layer_shape,
const Output<Node>& image_shape,
const PriorBoxClusteredAttrs& attrs);
void validate_and_infer_types() override;
void validate_and_infer_types() override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
const PriorBoxClusteredAttrs& get_attrs() const { return m_attrs; }
private:
PriorBoxClusteredAttrs m_attrs;
};
const PriorBoxClusteredAttrs& get_attrs() const { return m_attrs; }
private:
PriorBoxClusteredAttrs m_attrs;
};
}
using v0::PriorBoxClustered;
}
}
......@@ -54,31 +54,35 @@ namespace ngraph
std::string framework;
};
class NGRAPH_API Proposal : public Op
namespace v0
{
public:
static constexpr NodeTypeInfo type_info{"Proposal", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
Proposal() = default;
/// \brief Constructs a Proposal operation
///
/// \param class_probs Class probability scores
/// \param class_logits Class prediction logits
/// \param image_shape Shape of image
/// \param attrs Proposal op attributes
Proposal(const Output<Node>& class_probs,
const Output<Node>& class_logits,
const Output<Node>& image_shape,
const ProposalAttrs& attrs);
class NGRAPH_API Proposal : public Op
{
public:
static constexpr NodeTypeInfo type_info{"Proposal", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
Proposal() = default;
/// \brief Constructs a Proposal operation
///
/// \param class_probs Class probability scores
/// \param class_logits Class prediction logits
/// \param image_shape Shape of image
/// \param attrs Proposal op attributes
Proposal(const Output<Node>& class_probs,
const Output<Node>& class_logits,
const Output<Node>& image_shape,
const ProposalAttrs& attrs);
void validate_and_infer_types() override;
void validate_and_infer_types() override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
const ProposalAttrs& get_attrs() const { return m_attrs; }
private:
ProposalAttrs m_attrs;
};
const ProposalAttrs& get_attrs() const { return m_attrs; }
private:
ProposalAttrs m_attrs;
};
}
using v0::Proposal;
}
}
......@@ -22,49 +22,55 @@ namespace ngraph
{
namespace op
{
class NGRAPH_API PSROIPooling : public Op
namespace v0
{
public:
static constexpr NodeTypeInfo type_info{"PSROIPooling", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
PSROIPooling() = default;
/// \brief Constructs a PSROIPooling operation
///
/// \param input Input feature map {N, C, ...}
/// \param coords Coordinates of bounding boxes
/// \param output_dim Output channel number
/// \param group_size Number of groups to encode position-sensitive scores
/// \param spatial_scale Ratio of input feature map over input image size
/// \param spatial_bins_x Numbers of bins to divide the input feature maps over width
/// \param spatial_bins_y Numbers of bins to divide the input feature maps over height
/// \param mode Mode of pooling - Avg or Bilinear
PSROIPooling(const Output<Node>& input,
const Output<Node>& coords,
const size_t output_dim,
const size_t group_size,
const float spatial_scale,
int spatial_bins_x,
int spatial_bins_y,
const std::string& mode);
class NGRAPH_API PSROIPooling : public Op
{
public:
static constexpr NodeTypeInfo type_info{"PSROIPooling", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
PSROIPooling() = default;
/// \brief Constructs a PSROIPooling operation
///
/// \param input Input feature map {N, C, ...}
/// \param coords Coordinates of bounding boxes
/// \param output_dim Output channel number
/// \param group_size Number of groups to encode position-sensitive scores
/// \param spatial_scale Ratio of input feature map over input image size
/// \param spatial_bins_x Numbers of bins to divide the input feature maps over
/// width
/// \param spatial_bins_y Numbers of bins to divide the input feature maps over
/// height
/// \param mode Mode of pooling - Avg or Bilinear
PSROIPooling(const Output<Node>& input,
const Output<Node>& coords,
const size_t output_dim,
const size_t group_size,
const float spatial_scale,
int spatial_bins_x,
int spatial_bins_y,
const std::string& mode);
void validate_and_infer_types() override;
void validate_and_infer_types() override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
size_t get_output_dim() const { return m_output_dim; }
size_t get_group_size() const { return m_group_size; }
float get_spatial_scale() const { return m_spatial_scale; }
int get_spatial_bins_x() const { return m_spatial_bins_x; }
int get_spatial_bins_y() const { return m_spatial_bins_y; }
const std::string& get_mode() const { return m_mode; }
private:
size_t m_output_dim;
size_t m_group_size;
float m_spatial_scale;
int m_spatial_bins_x;
int m_spatial_bins_y;
std::string m_mode;
};
size_t get_output_dim() const { return m_output_dim; }
size_t get_group_size() const { return m_group_size; }
float get_spatial_scale() const { return m_spatial_scale; }
int get_spatial_bins_x() const { return m_spatial_bins_x; }
int get_spatial_bins_y() const { return m_spatial_bins_y; }
const std::string& get_mode() const { return m_mode; }
private:
size_t m_output_dim;
size_t m_group_size;
float m_spatial_scale;
int m_spatial_bins_x;
int m_spatial_bins_y;
std::string m_mode;
};
}
using v0::PSROIPooling;
}
}
......@@ -22,58 +22,63 @@ namespace ngraph
{
namespace op
{
class NGRAPH_API RegionYolo : public Op
namespace v0
{
public:
static constexpr NodeTypeInfo type_info{"RegionYolo", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
RegionYolo() = default;
///
/// \brief Constructs a RegionYolo operation
///
/// \param[in] input Input
/// \param[in] num_coords Number of coordinates for each region
/// \param[in] num_classes Number of classes for each region
/// \param[in] num_regions Number of regions
/// \param[in] do_softmax Compute softmax
/// \param[in] mask Mask
/// \param[in] axis Axis to begin softmax on
/// \param[in] end_axis Axis to end softmax on
/// \param[in] anchors A flattened list of pairs `[width, height]` that describes
/// prior box sizes.
///
RegionYolo(const Output<Node>& input,
const size_t num_coords,
const size_t num_classes,
const size_t num_regions,
const bool do_softmax,
const std::vector<int64_t>& mask,
const int axis,
const int end_axis,
const std::vector<float>& anchors = std::vector<float>{});
class NGRAPH_API RegionYolo : public Op
{
public:
static constexpr NodeTypeInfo type_info{"RegionYolo", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
RegionYolo() = default;
///
/// \brief Constructs a RegionYolo operation
///
/// \param[in] input Input
/// \param[in] num_coords Number of coordinates for each region
/// \param[in] num_classes Number of classes for each region
/// \param[in] num_regions Number of regions
/// \param[in] do_softmax Compute softmax
/// \param[in] mask Mask
/// \param[in] axis Axis to begin softmax on
/// \param[in] end_axis Axis to end softmax on
/// \param[in] anchors A flattened list of pairs `[width, height]` that
/// describes
/// prior box sizes.
///
RegionYolo(const Output<Node>& input,
const size_t num_coords,
const size_t num_classes,
const size_t num_regions,
const bool do_softmax,
const std::vector<int64_t>& mask,
const int axis,
const int end_axis,
const std::vector<float>& anchors = std::vector<float>{});
void validate_and_infer_types() override;
void validate_and_infer_types() override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
size_t get_num_coords() const { return m_num_coords; }
size_t get_num_classes() const { return m_num_classes; }
size_t get_num_regions() const { return m_num_regions; }
bool get_do_softmax() const { return m_do_softmax; }
const std::vector<int64_t>& get_mask() const { return m_mask; }
const std::vector<float>& get_anchors() const { return m_anchors; }
int get_axis() const { return m_axis; }
int get_end_axis() const { return m_end_axis; }
private:
size_t m_num_coords;
size_t m_num_classes;
size_t m_num_regions;
bool m_do_softmax;
std::vector<int64_t> m_mask;
std::vector<float> m_anchors{};
int m_axis;
int m_end_axis;
};
size_t get_num_coords() const { return m_num_coords; }
size_t get_num_classes() const { return m_num_classes; }
size_t get_num_regions() const { return m_num_regions; }
bool get_do_softmax() const { return m_do_softmax; }
const std::vector<int64_t>& get_mask() const { return m_mask; }
const std::vector<float>& get_anchors() const { return m_anchors; }
int get_axis() const { return m_axis; }
int get_end_axis() const { return m_end_axis; }
private:
size_t m_num_coords;
size_t m_num_classes;
size_t m_num_regions;
bool m_do_softmax;
std::vector<int64_t> m_mask;
std::vector<float> m_anchors{};
int m_axis;
int m_end_axis;
};
}
using v0::RegionYolo;
}
}
......@@ -22,26 +22,30 @@ namespace ngraph
{
namespace op
{
class NGRAPH_API ReorgYolo : public Op
namespace v0
{
public:
static constexpr NodeTypeInfo type_info{"ReorgYolo", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
ReorgYolo() = default;
/// \brief Constructs a ReorgYolo operation
///
/// \param input Input
/// \param strides Stride to reorganize input by
ReorgYolo(const Output<Node>& input, const Strides& strides);
class NGRAPH_API ReorgYolo : public Op
{
public:
static constexpr NodeTypeInfo type_info{"ReorgYolo", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
ReorgYolo() = default;
/// \brief Constructs a ReorgYolo operation
///
/// \param input Input
/// \param strides Stride to reorganize input by
ReorgYolo(const Output<Node>& input, const Strides& strides);
void validate_and_infer_types() override;
void validate_and_infer_types() override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
const Strides get_strides() const { return m_strides; }
private:
Strides m_strides;
};
const Strides get_strides() const { return m_strides; }
private:
Strides m_strides;
};
}
using v0::ReorgYolo;
}
}
......@@ -22,37 +22,41 @@ namespace ngraph
{
namespace op
{
class NGRAPH_API ROIPooling : public Op
namespace v0
{
public:
static constexpr NodeTypeInfo type_info{"ROIPooling", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
ROIPooling() = default;
/// \brief Constructs a ROIPooling operation
///
/// \param input Input feature map {N, C, ...}
/// \param coords Coordinates of bounding boxes
/// \param output_size Height/Width of ROI output features
/// \param spatial_scale Ratio of input feature map over input image size
/// \param method Method of pooling - Max or Bilinear
ROIPooling(const Output<Node>& input,
const Output<Node>& coords,
const Shape& output_size,
const float spatial_scale,
const std::string& method);
class NGRAPH_API ROIPooling : public Op
{
public:
static constexpr NodeTypeInfo type_info{"ROIPooling", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
ROIPooling() = default;
/// \brief Constructs a ROIPooling operation
///
/// \param input Input feature map {N, C, ...}
/// \param coords Coordinates of bounding boxes
/// \param output_size Height/Width of ROI output features
/// \param spatial_scale Ratio of input feature map over input image size
/// \param method Method of pooling - Max or Bilinear
ROIPooling(const Output<Node>& input,
const Output<Node>& coords,
const Shape& output_size,
const float spatial_scale,
const std::string& method);
void validate_and_infer_types() override;
void validate_and_infer_types() override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
const Shape& get_output_size() const { return m_output_size; }
float get_spatial_scale() const { return m_spatial_scale; }
const std::string& get_method() const { return m_method; }
private:
Shape m_output_size;
float m_spatial_scale;
std::string m_method;
};
const Shape& get_output_size() const { return m_output_size; }
float get_spatial_scale() const { return m_spatial_scale; }
const std::string& get_method() const { return m_method; }
private:
Shape m_output_size;
float m_spatial_scale;
std::string m_method;
};
}
using v0::ROIPooling;
}
}
......@@ -23,29 +23,35 @@ namespace ngraph
{
namespace op
{
/// \brief Range operation, analogous to `range()` in Python.
class NGRAPH_API Range : public Op
namespace v0
{
public:
static constexpr NodeTypeInfo type_info{"Range", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs an unitialized range operation.
Range() = default;
/// \brief Range operation, analogous to `range()` in Python.
class NGRAPH_API Range : public Op
{
public:
static constexpr NodeTypeInfo type_info{"Range", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs an unitialized range operation.
Range() = default;
/// \brief Constructs a range operation.
///
/// \param start The tensor producing the start value. Must be a scalar of integer
/// element type, and same element type as `stop` and `step`.
/// \param stop The tensor producing the stop value. Must be a scalar of integer
/// element type, and same element type as `start` and `step`.
/// \param step The tensor producing the step value. Must be a scalar of integer
/// element type, and same element type as `start` and `stop`.
Range(const Output<Node>& start, const Output<Node>& stop, const Output<Node>& step);
/// \brief Constructs a range operation.
///
/// \param start The tensor producing the start value. Must be a scalar of integer
/// element type, and same element type as `stop` and `step`.
/// \param stop The tensor producing the stop value. Must be a scalar of integer
/// element type, and same element type as `start` and `step`.
/// \param step The tensor producing the step value. Must be a scalar of integer
/// element type, and same element type as `start` and `stop`.
Range(const Output<Node>& start,
const Output<Node>& stop,
const Output<Node>& step);
void validate_and_infer_types() override;
void validate_and_infer_types() override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
};
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
};
}
using v0::Range;
}
}
......@@ -22,20 +22,24 @@ namespace ngraph
{
namespace op
{
/// \brief Operation that returns the shape of its input argument as a tensor.
class NGRAPH_API ShapeOf : public Op
namespace v0
{
public:
static constexpr NodeTypeInfo type_info{"ShapeOf", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
ShapeOf() = default;
/// \brief Constructs a shape-of operation.
ShapeOf(const Output<Node>& arg);
/// \brief Operation that returns the shape of its input argument as a tensor.
class NGRAPH_API ShapeOf : public Op
{
public:
static constexpr NodeTypeInfo type_info{"ShapeOf", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
ShapeOf() = default;
/// \brief Constructs a shape-of operation.
ShapeOf(const Output<Node>& arg);
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
void validate_and_infer_types() override;
};
void validate_and_infer_types() override;
};
}
using v0::ShapeOf;
}
}
......@@ -22,28 +22,32 @@ namespace ngraph
{
namespace op
{
/// \brief Dynamic Tiling operation which repeats a tensor multiple times
/// along each dimension
class NGRAPH_API Tile : public Op
namespace v0
{
public:
static constexpr NodeTypeInfo type_info{"Tile", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
Tile() = default;
/// \brief Perform dynamic padding of a tensor
///
/// \param data The node producing input tensor to be padded.
/// \param repeats The node producing the per-dimension replication factor
Tile(const Output<Node>& data, const Output<Node>& repeats);
/// \brief Dynamic Tiling operation which repeats a tensor multiple times
/// along each dimension
class NGRAPH_API Tile : public Op
{
public:
static constexpr NodeTypeInfo type_info{"Tile", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
Tile() = default;
/// \brief Perform dynamic padding of a tensor
///
/// \param data The node producing input tensor to be padded.
/// \param repeats The node producing the per-dimension replication factor
Tile(const Output<Node>& data, const Output<Node>& repeats);
void validate_and_infer_types() override;
void validate_and_infer_types() override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const NodeVector& deltas) override;
};
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const NodeVector& deltas) override;
};
}
using v0::Tile;
}
}
......@@ -24,30 +24,34 @@ namespace ngraph
{
namespace op
{
/// \brief Tensor transpose operation.
class NGRAPH_API Transpose : public Op
namespace v0
{
public:
static constexpr NodeTypeInfo type_info{"Transpose", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
Transpose() = default;
/// \brief Constructs a transpose operation.
///
/// \param arg Node producing the tensor to be transposed.
/// \param input_order Node producing the permutation to apply to the axes of the
/// input shape. Must be a vector of element type element::i64,
/// with shape [n], where n is the rank of arg. The tensor's
/// value must contain every integer in the range [0,n-1].
Transpose(const Output<Node>& arg, const Output<Node>& input_order);
/// \brief Tensor transpose operation.
class NGRAPH_API Transpose : public Op
{
public:
static constexpr NodeTypeInfo type_info{"Transpose", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
Transpose() = default;
/// \brief Constructs a transpose operation.
///
/// \param arg Node producing the tensor to be transposed.
/// \param input_order Node producing the permutation to apply to the axes of the
/// input shape. Must be a vector of element type element::i64,
/// with shape [n], where n is the rank of arg. The tensor's
/// value must contain every integer in the range [0,n-1].
Transpose(const Output<Node>& arg, const Output<Node>& input_order);
void validate_and_infer_types() override;
void validate_and_infer_types() override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const NodeVector& deltas) override;
};
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const NodeVector& deltas) override;
};
}
using v0::Transpose;
}
}
......@@ -22,21 +22,25 @@ namespace ngraph
{
namespace op
{
/// \brief Elementwise floor operation.
class NGRAPH_API Floor : public util::UnaryElementwiseArithmetic
namespace v0
{
public:
static constexpr NodeTypeInfo type_info{"Floor", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs a floor operation.
Floor() = default;
/// \brief Constructs a floor operation.
///
/// \param arg Node that produces the input tensor.
Floor(const Output<Node>& arg);
/// \brief Elementwise floor operation.
class NGRAPH_API Floor : public util::UnaryElementwiseArithmetic
{
public:
static constexpr NodeTypeInfo type_info{"Floor", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs a floor operation.
Floor() = default;
/// \brief Constructs a floor operation.
///
/// \param arg Node that produces the input tensor.
Floor(const Output<Node>& arg);
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
};
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
};
}
using v0::Floor;
}
}
......@@ -33,10 +33,9 @@ namespace ngraph
/// For example, for `a` with shape `(batch_size, n, k)`, and `b` with
/// shape `(batch_size, k, m)`, the result of BatchMatMul will have shape
/// `(batch_size, n, m)`, and `BatchMatMulTranspose(a, b)[i] = Dot(a[i], b[i])`.
class BatchMatMulTranspose : public ngraph::op::util::FusedOp
class NGRAPH_API BatchMatMulTranspose : public ngraph::op::util::FusedOp
{
public:
NGRAPH_API
static constexpr NodeTypeInfo type_info{"BatchMatMulTranspose", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
BatchMatMulTranspose() = default;
......
......@@ -24,36 +24,40 @@ namespace ngraph
{
namespace op
{
/// \brief Performs a clipping operation on all elements of the input node
///
/// All input values that are outside of the <min;max> range are set to 'min' or 'max'
/// depending on which side of the <min;max> range they are. The values that fall into
/// this range remain unchanged.
class NGRAPH_API Clamp : public ngraph::op::util::FusedOp
namespace v0
{
public:
static constexpr NodeTypeInfo type_info{"Clamp", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
Clamp() = default;
/// \brief Constructs a Clamp node.
/// \brief Performs a clipping operation on all elements of the input node
///
/// \param data - Node producing the input tensor
/// \param min - the lower bound of the <min;max> range
/// \param max - the upper bound of the <min;max> range
Clamp(const Output<Node>& data, const double min, const double max);
/// All input values that are outside of the <min;max> range are set to 'min' or 'max'
/// depending on which side of the <min;max> range they are. The values that fall into
/// this range remain unchanged.
class NGRAPH_API Clamp : public ngraph::op::util::FusedOp
{
public:
static constexpr NodeTypeInfo type_info{"Clamp", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
Clamp() = default;
/// \brief Constructs a Clamp node.
///
/// \param data - Node producing the input tensor
/// \param min - the lower bound of the <min;max> range
/// \param max - the upper bound of the <min;max> range
Clamp(const Output<Node>& data, const double min, const double max);
void pre_validate_and_infer_types() override;
void pre_validate_and_infer_types() override;
virtual NodeVector decompose_op() const override;
virtual NodeVector decompose_op() const override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
double get_min() const { return m_min; }
double get_max() const { return m_max; }
private:
double m_min;
double m_max;
};
double get_min() const { return m_min; }
double get_max() const { return m_max; }
private:
double m_min;
double m_max;
};
}
using v0::Clamp;
}
}
This diff is collapsed.
......@@ -24,10 +24,9 @@ namespace ngraph
{
namespace op
{
class CrossEntropy : public ngraph::op::util::FusedOp
class NGRAPH_API CrossEntropy : public ngraph::op::util::FusedOp
{
public:
NGRAPH_API
static constexpr NodeTypeInfo type_info{"CrossEntropy", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
CrossEntropy() = default;
......@@ -57,10 +56,9 @@ namespace ngraph
int64_t m_ignore_index;
};
class CrossEntropyBackprop : public util::FusedOp
class NGRAPH_API CrossEntropyBackprop : public util::FusedOp
{
public:
NGRAPH_API
static constexpr NodeTypeInfo type_info{"CrossEntropyBackprop", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
CrossEntropyBackprop() = default;
......
......@@ -24,52 +24,57 @@ namespace ngraph
{
namespace op
{
/// \brief DepthToSpace permutes data from the depth dimension of the input blob into
/// spatial dimensions.
///
/// \note Values from the depth dimension (assuming NCHW layout) are moved in
/// spatial blocks to the height and width dimensions.
///
/// Output node produces a tensor with shape:
/// [N, C/(blocksize * blocksize), H * blocksize, W * blocksize]
class NGRAPH_API DepthToSpace : public ngraph::op::util::FusedOp
namespace v0
{
public:
enum class DepthToSpaceMode
/// \brief DepthToSpace permutes data from the depth dimension of the input blob into
/// spatial dimensions.
///
/// \note Values from the depth dimension (assuming NCHW layout) are moved in
/// spatial blocks to the height and width dimensions.
///
/// Output node produces a tensor with shape:
/// [N, C/(blocksize * blocksize), H * blocksize, W * blocksize]
class NGRAPH_API DepthToSpace : public ngraph::op::util::FusedOp
{
// The input depth is divided to [block_size, ..., block_size, new_depth]
BLOCKS_FIRST,
// The input depth is divided to [new_depth, block_size, ..., block_size]
DEPTH_FIRST
};
public:
enum class DepthToSpaceMode
{
// The input depth is divided to [block_size, ..., block_size, new_depth]
BLOCKS_FIRST,
// The input depth is divided to [new_depth, block_size, ..., block_size]
DEPTH_FIRST
};
static constexpr NodeTypeInfo type_info{"DepthToSpace", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
DepthToSpace() = default;
/// \brief Constructs a DepthToSpace operation.
///
/// \param data Node producing the input tensor
/// \param mode Specifies how the input depth dimension is split to block coordinates
/// \param block_size The size of the block of values to be moved
DepthToSpace(const Output<Node>& data,
const DepthToSpaceMode& mode,
std::size_t block_size = 1);
static constexpr NodeTypeInfo type_info{"DepthToSpace", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
DepthToSpace() = default;
/// \brief Constructs a DepthToSpace operation.
///
/// \param data Node producing the input tensor
/// \param mode Specifies how the input depth dimension is split to block
/// coordinates
/// \param block_size The size of the block of values to be moved
DepthToSpace(const Output<Node>& data,
const DepthToSpaceMode& mode,
std::size_t block_size = 1);
DepthToSpace(const Output<Node>& data,
const std::string& mode,
std::size_t block_size = 1);
DepthToSpace(const Output<Node>& data,
const std::string& mode,
std::size_t block_size = 1);
std::size_t get_block_size() const { return m_blocksize; }
DepthToSpaceMode get_mode() const { return m_mode; }
virtual NodeVector decompose_op() const override;
std::size_t get_block_size() const { return m_blocksize; }
DepthToSpaceMode get_mode() const { return m_mode; }
virtual NodeVector decompose_op() const override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
protected:
std::size_t m_blocksize;
DepthToSpaceMode m_mode;
DepthToSpaceMode mode_from_string(const std::string& mode) const;
};
protected:
std::size_t m_blocksize;
DepthToSpaceMode m_mode;
DepthToSpaceMode mode_from_string(const std::string& mode) const;
};
}
using v0::DepthToSpace;
}
}
......@@ -24,30 +24,34 @@ namespace ngraph
{
namespace op
{
/// \brief Exponential Linear Unit
/// x < 0 => f(x) = alpha * (exp(x) - 1.)
/// x >= 0 => f(x) = x
///
class NGRAPH_API Elu : public ngraph::op::util::FusedOp
namespace v0
{
public:
static constexpr NodeTypeInfo type_info{"Elu", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
Elu() = default;
/// \brief Constructs an Elu operation.
/// \brief Exponential Linear Unit
/// x < 0 => f(x) = alpha * (exp(x) - 1.)
/// x >= 0 => f(x) = x
///
/// \param data Input tensor
/// \param alpha Multiplier for negative values
Elu(const Output<Node>& data, const double alpha);
class NGRAPH_API Elu : public ngraph::op::util::FusedOp
{
public:
static constexpr NodeTypeInfo type_info{"Elu", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
Elu() = default;
/// \brief Constructs an Elu operation.
///
/// \param data Input tensor
/// \param alpha Multiplier for negative values
Elu(const Output<Node>& data, const double alpha);
virtual NodeVector decompose_op() const override;
virtual NodeVector decompose_op() const override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
double get_alpha() const { return m_alpha; }
private:
double m_alpha;
};
double get_alpha() const { return m_alpha; }
private:
double m_alpha;
};
}
using v0::Elu;
} // namespace op
} // namespace ngraph
......@@ -25,63 +25,67 @@ namespace ngraph
{
namespace op
{
///
/// \brief Class performing element-wise linear quantization.
///
/// \note Input floating point values are quantized into a discrete
/// set of floating point values.
///
/// \paragraph Implementation This class creates a node which performs the following
/// operation:
///
/// round((data - input_low) / (input_high - input_low) * (levels-1)) /
/// (levels-1) * (output_high - output_low) + output_low
///
///
class NGRAPH_API FakeQuantize : public ngraph::op::util::FusedOp
namespace v0
{
public:
static constexpr NodeTypeInfo type_info{"FakeQuantize", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
FakeQuantize() = default;
///
/// \brief Constructs a FakeQuantize operation node.
/// \brief Class performing element-wise linear quantization.
///
/// \param[in] data The input data tensor.
/// \param[in] input_low The minimum limit for input values.
/// \param[in] input_high The maximum limit for input values.
/// \param[in] output_low The minimum quantized value.
/// \param[in] output_high The maximum quantized value.
/// \param[in] levels The number of quantization levels.
/// \param[in] auto_broadcast AutoBroadcast mode to be used for broadcasting
/// limit values
/// \note Input floating point values are quantized into a discrete
/// set of floating point values.
///
FakeQuantize(const Output<Node>& data,
const Output<Node>& input_low,
const Output<Node>& input_high,
const Output<Node>& output_low,
const Output<Node>& output_high,
std::size_t levels,
const AutoBroadcastSpec& auto_broadcast =
AutoBroadcastSpec(AutoBroadcastType::NUMPY));
/// \paragraph Implementation This class creates a node which performs the following
/// operation:
///
/// round((data - input_low) / (input_high - input_low) * (levels-1)) /
/// (levels-1) * (output_high - output_low) + output_low
///
///
class NGRAPH_API FakeQuantize : public ngraph::op::util::FusedOp
{
public:
static constexpr NodeTypeInfo type_info{"FakeQuantize", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
FakeQuantize() = default;
///
/// \brief Constructs a FakeQuantize operation node.
///
/// \param[in] data The input data tensor.
/// \param[in] input_low The minimum limit for input values.
/// \param[in] input_high The maximum limit for input values.
/// \param[in] output_low The minimum quantized value.
/// \param[in] output_high The maximum quantized value.
/// \param[in] levels The number of quantization levels.
/// \param[in] auto_broadcast AutoBroadcast mode to be used for broadcasting
/// limit values
///
FakeQuantize(const Output<Node>& data,
const Output<Node>& input_low,
const Output<Node>& input_high,
const Output<Node>& output_low,
const Output<Node>& output_high,
std::size_t levels,
const AutoBroadcastSpec& auto_broadcast =
AutoBroadcastSpec(AutoBroadcastType::NUMPY));
virtual NodeVector decompose_op() const override;
virtual void pre_validate_and_infer_types() override;
virtual NodeVector decompose_op() const override;
virtual void pre_validate_and_infer_types() override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
std::size_t get_levels() const { return m_levels; }
void set_levels(std::size_t levels) { m_levels = levels; }
const AutoBroadcastSpec& get_auto_broadcast() const { return m_auto_broadcast; }
void set_auto_broadcast(const AutoBroadcastSpec& auto_broadcast)
{
m_auto_broadcast = auto_broadcast;
}
std::size_t get_levels() const { return m_levels; }
void set_levels(std::size_t levels) { m_levels = levels; }
const AutoBroadcastSpec& get_auto_broadcast() const { return m_auto_broadcast; }
void set_auto_broadcast(const AutoBroadcastSpec& auto_broadcast)
{
m_auto_broadcast = auto_broadcast;
}
private:
std::size_t m_levels;
AutoBroadcastSpec m_auto_broadcast;
};
private:
std::size_t m_levels;
AutoBroadcastSpec m_auto_broadcast;
};
}
using v0::FakeQuantize;
}
}
......@@ -24,47 +24,52 @@ namespace ngraph
{
namespace op
{
/// \brief Gaussian Error Linear Unit
/// f(x) = 0.5 * x * (1 + erf( x / sqrt(2) )
class NGRAPH_API Gelu : public ngraph::op::util::FusedOp
namespace v0
{
public:
static constexpr NodeTypeInfo type_info{"Gelu", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
Gelu() = default;
/// \brief Constructs an Gelu operation.
///
/// \param data Input tensor
Gelu(const Output<Node>& data);
/// \brief Gaussian Error Linear Unit
/// f(x) = 0.5 * x * (1 + erf( x / sqrt(2) )
class NGRAPH_API Gelu : public ngraph::op::util::FusedOp
{
public:
static constexpr NodeTypeInfo type_info{"Gelu", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
Gelu() = default;
/// \brief Constructs an Gelu operation.
///
/// \param data Input tensor
Gelu(const Output<Node>& data);
virtual NodeVector decompose_op() const override;
virtual NodeVector decompose_op() const override;
void pre_validate_and_infer_types() override;
void pre_validate_and_infer_types() override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const NodeVector& deltas) override;
};
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const NodeVector& deltas) override;
};
/// \brief Backprop for Gelu(x) is GeluBackprop(x) * delta
class NGRAPH_API GeluBackpropFactor : public util::FusedOp
{
public:
static constexpr NodeTypeInfo type_info{"GeluBackpropFactor", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
GeluBackpropFactor() = default;
/// \brief Backprop for Gelu(x) is GeluBackprop(x) * delta
class NGRAPH_API GeluBackpropFactor : public util::FusedOp
{
public:
static constexpr NodeTypeInfo type_info{"GeluBackpropFactor", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
GeluBackpropFactor() = default;
GeluBackpropFactor(const Output<Node>& x);
GeluBackpropFactor(const Output<Node>& x);
virtual NodeVector decompose_op() const override;
virtual NodeVector decompose_op() const override;
void pre_validate_and_infer_types() override;
void pre_validate_and_infer_types() override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
};
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
};
}
using v0::Gelu;
using v0::GeluBackpropFactor;
}
}
......@@ -24,53 +24,57 @@ namespace ngraph
{
namespace op
{
/// \brief Operator performing General Matrix multiplication.
///
/// \note More information:
/// https://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms#Level_3
///
/// A' = transpose(A) if transA else A
/// B' = transpose(B) if transB else B
///
/// Compute Y = alpha * A' * B' + beta * C
///
class NGRAPH_API Gemm : public ngraph::op::util::FusedOp
namespace v0
{
public:
static constexpr NodeTypeInfo type_info{"Gemm", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
Gemm() = default;
/// \brief Constructs an Gemm operation.
/// \brief Operator performing General Matrix multiplication.
///
/// \param A Input tensor A
/// \param B Input tensor B
/// \param C Input tensor C
/// \param alpha Scalar multiplier for the product of input tensors A * B
/// \param beta Scalar multiplier for input tensor C
/// \param transA Whether A should be transposed
/// \param transB Whether B should be transposed
Gemm(const Output<Node>& A,
const Output<Node>& B,
const Output<Node>& C,
double alpha = 1.0,
double beta = 1.0,
bool transA = false,
bool transB = false);
/// \note More information:
/// https://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms#Level_3
///
/// A' = transpose(A) if transA else A
/// B' = transpose(B) if transB else B
///
/// Compute Y = alpha * A' * B' + beta * C
///
class NGRAPH_API Gemm : public ngraph::op::util::FusedOp
{
public:
static constexpr NodeTypeInfo type_info{"Gemm", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
Gemm() = default;
/// \brief Constructs an Gemm operation.
///
/// \param A Input tensor A
/// \param B Input tensor B
/// \param C Input tensor C
/// \param alpha Scalar multiplier for the product of input tensors A * B
/// \param beta Scalar multiplier for input tensor C
/// \param transA Whether A should be transposed
/// \param transB Whether B should be transposed
Gemm(const Output<Node>& A,
const Output<Node>& B,
const Output<Node>& C,
double alpha = 1.0,
double beta = 1.0,
bool transA = false,
bool transB = false);
virtual NodeVector decompose_op() const override;
virtual NodeVector decompose_op() const override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
double get_alpha() const { return m_alpha; }
double get_beta() const { return m_beta; }
bool get_transA() const { return m_transA; }
bool get_transB() const { return m_transB; }
private:
double m_alpha;
double m_beta;
bool m_transA;
bool m_transB;
};
double get_alpha() const { return m_alpha; }
double get_beta() const { return m_beta; }
bool get_transA() const { return m_transA; }
bool get_transB() const { return m_transB; }
private:
double m_alpha;
double m_beta;
bool m_transA;
bool m_transB;
};
}
using v0::Gemm;
}
}
......@@ -25,30 +25,34 @@ namespace ngraph
{
namespace op
{
/// \brief Global Response Normalization with L2 norm (across channels only).
///
class NGRAPH_API GRN : public ngraph::op::util::FusedOp
namespace v0
{
public:
static constexpr NodeTypeInfo type_info{"GRN", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
GRN() = default;
/// \brief Constructs a GRN operation.
/// \brief Global Response Normalization with L2 norm (across channels only).
///
/// \param data - Node producing the input tensor
/// \param bias - The bias added to the variance.
///
GRN(const Output<Node>& data, float bias);
class NGRAPH_API GRN : public ngraph::op::util::FusedOp
{
public:
static constexpr NodeTypeInfo type_info{"GRN", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
GRN() = default;
/// \brief Constructs a GRN operation.
///
/// \param data - Node producing the input tensor
/// \param bias - The bias added to the variance.
///
GRN(const Output<Node>& data, float bias);
float get_bias() const { return m_bias; }
virtual void pre_validate_and_infer_types() override;
virtual NodeVector decompose_op() const override;
float get_bias() const { return m_bias; }
virtual void pre_validate_and_infer_types() override;
virtual NodeVector decompose_op() const override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
protected:
float m_bias = 1.0f;
};
protected:
float m_bias = 1.0f;
};
}
using v0::GRN;
}
}
......@@ -25,64 +25,74 @@ namespace ngraph
{
namespace op
{
/// \brief Group Convolution
class NGRAPH_API GroupConvolution : public ngraph::op::util::FusedOp
namespace v0
{
public:
static constexpr NodeTypeInfo type_info{"GroupConvolution", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
GroupConvolution() = default;
GroupConvolution(const Output<Node>& data_batch,
const Output<Node>& filters,
const Strides& window_movement_strides,
const Strides& window_dilation_strides,
const CoordinateDiff& padding_below,
const CoordinateDiff& padding_above,
const Strides& data_dilation_strides,
const size_t groups,
const PadType& pad_type = PadType::EXPLICIT);
/// \brief Group Convolution
class NGRAPH_API GroupConvolution : public ngraph::op::util::FusedOp
{
public:
static constexpr NodeTypeInfo type_info{"GroupConvolution", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
GroupConvolution() = default;
GroupConvolution(const Output<Node>& data_batch,
const Output<Node>& filters,
const Strides& window_movement_strides,
const Strides& window_dilation_strides,
const CoordinateDiff& padding_below,
const CoordinateDiff& padding_above,
const Strides& data_dilation_strides,
const size_t groups,
const PadType& pad_type = PadType::EXPLICIT);
// constructor which accept groups included in filters shape.
GroupConvolution(const Output<Node>& data_batch,
const Output<Node>& filters,
const Strides& window_movement_strides,
const Strides& window_dilation_strides,
const CoordinateDiff& padding_below,
const CoordinateDiff& padding_above,
const Strides& data_dilation_strides,
const PadType& pad_type = PadType::EXPLICIT);
Shape get_weights_dimensions() const;
const Strides& get_window_movement_strides() const { return m_window_movement_strides; }
const Strides& get_window_dilation_strides() const { return m_window_dilation_strides; }
const CoordinateDiff& get_padding_below() const { return m_padding_below; }
const CoordinateDiff& get_padding_above() const { return m_padding_above; }
const Strides& get_data_dilation_strides() const { return m_data_dilation_strides; }
Output<Node> get_filters() { return input_value(1); }
Output<Node> get_data_batch() { return input_value(0); }
size_t get_groups() const;
const PadType& get_pad_type() const { return m_pad_type; }
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
// constructor which accept groups included in filters shape.
GroupConvolution(const Output<Node>& data_batch,
const Output<Node>& filters,
const Strides& window_movement_strides,
const Strides& window_dilation_strides,
const CoordinateDiff& padding_below,
const CoordinateDiff& padding_above,
const Strides& data_dilation_strides,
const PadType& pad_type = PadType::EXPLICIT);
Shape get_weights_dimensions() const;
const Strides& get_window_movement_strides() const
{
return m_window_movement_strides;
}
const Strides& get_window_dilation_strides() const
{
return m_window_dilation_strides;
}
const CoordinateDiff& get_padding_below() const { return m_padding_below; }
const CoordinateDiff& get_padding_above() const { return m_padding_above; }
const Strides& get_data_dilation_strides() const { return m_data_dilation_strides; }
Output<Node> get_filters() { return input_value(1); }
Output<Node> get_data_batch() { return input_value(0); }
size_t get_groups() const;
const PadType& get_pad_type() const { return m_pad_type; }
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
virtual NodeVector decompose_op() const override;
virtual NodeVector decompose_op() const override;
virtual void pre_validate_and_infer_types() override;
virtual void post_validate_and_infer_types() override;
virtual void pre_validate_and_infer_types() override;
virtual void post_validate_and_infer_types() override;
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const NodeVector& deltas) override;
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const NodeVector& deltas) override;
protected:
Strides m_window_movement_strides;
Strides m_window_dilation_strides;
CoordinateDiff m_padding_below;
CoordinateDiff m_padding_above;
Strides m_data_dilation_strides;
Dimension m_groups;
PadType m_pad_type{PadType::NOTSET};
protected:
Strides m_window_movement_strides;
Strides m_window_dilation_strides;
CoordinateDiff m_padding_below;
CoordinateDiff m_padding_above;
Strides m_data_dilation_strides;
Dimension m_groups;
PadType m_pad_type{PadType::NOTSET};
private:
bool has_groups_in_filters_shape() const;
};
private:
bool has_groups_in_filters_shape() const;
};
}
using v0::GroupConvolution;
}
}
This diff is collapsed.
......@@ -24,30 +24,34 @@ namespace ngraph
{
namespace op
{
/// \brief Parameterized, bounded sigmoid-like, piecewise linear
/// function. min(max(alpha*x + beta, 0), 1)
///
class NGRAPH_API HardSigmoid : public ngraph::op::util::FusedOp
namespace v0
{
public:
static constexpr NodeTypeInfo type_info{"HardSigmoid", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
HardSigmoid() = default;
/// \brief Constructs a HardSigmoid operation.
///
/// \param data Input tensor.
/// \param[in] alpha A scalar value representing the alpha parameter.
/// \param[in] beta A scalar value representing the beta parameter.
/// \brief Parameterized, bounded sigmoid-like, piecewise linear
/// function. min(max(alpha*x + beta, 0), 1)
///
HardSigmoid(const Output<Node>& data,
const Output<Node>& alpha,
const Output<Node>& beta);
class NGRAPH_API HardSigmoid : public ngraph::op::util::FusedOp
{
public:
static constexpr NodeTypeInfo type_info{"HardSigmoid", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
HardSigmoid() = default;
/// \brief Constructs a HardSigmoid operation.
///
/// \param data Input tensor.
/// \param[in] alpha A scalar value representing the alpha parameter.
/// \param[in] beta A scalar value representing the beta parameter.
///
HardSigmoid(const Output<Node>& data,
const Output<Node>& alpha,
const Output<Node>& beta);
virtual void pre_validate_and_infer_types() override;
virtual NodeVector decompose_op() const override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
};
virtual void pre_validate_and_infer_types() override;
virtual NodeVector decompose_op() const override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
};
}
using v0::HardSigmoid;
}
}
This diff is collapsed.
......@@ -24,27 +24,31 @@ namespace ngraph
{
namespace op
{
/// \brief LogSoftmax operation
class NGRAPH_API LogSoftmax : public ngraph::op::util::FusedOp
namespace v0
{
public:
static constexpr NodeTypeInfo type_info{"LogSoftmax", 0};
LogSoftmax() = default;
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs a LogSoftmax node.
///
/// \param data Node that produces the first input tensor
/// \param axis Describes the axis of the inputs when coerced to 2D
LogSoftmax(const Output<Node>& data, int64_t axis);
/// \brief LogSoftmax operation
class NGRAPH_API LogSoftmax : public ngraph::op::util::FusedOp
{
public:
static constexpr NodeTypeInfo type_info{"LogSoftmax", 0};
LogSoftmax() = default;
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs a LogSoftmax node.
///
/// \param data Node that produces the first input tensor
/// \param axis Describes the axis of the inputs when coerced to 2D
LogSoftmax(const Output<Node>& data, int64_t axis);
virtual NodeVector decompose_op() const override;
virtual NodeVector decompose_op() const override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
int64_t get_axis() const { return m_axis; }
protected:
int64_t m_axis;
};
int64_t get_axis() const { return m_axis; }
protected:
int64_t m_axis;
};
}
using v0::LogSoftmax;
} // namespace op
} // namespace ngraph
This diff is collapsed.
This diff is collapsed.
......@@ -24,36 +24,40 @@ namespace ngraph
{
namespace op
{
/// \brief Operator performing Matrix Multiplication.
class NGRAPH_API MatMul : public ngraph::op::util::FusedOp
namespace v0
{
public:
static constexpr NodeTypeInfo type_info{"MatMul", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
MatMul() = default;
/// \brief Constructs an ScaleShift operation.
///
/// \param A Matrix A
/// \param B Matrix B
/// \param transpose_a If matrix A should be transposed.
/// \param transpose_b If matrix B should be transposed.
MatMul(const Output<Node>& A,
const Output<Node>& B,
const bool& transpose_a = 0,
const bool& transpose_b = 0);
/// \brief Operator performing Matrix Multiplication.
class NGRAPH_API MatMul : public ngraph::op::util::FusedOp
{
public:
static constexpr NodeTypeInfo type_info{"MatMul", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
MatMul() = default;
/// \brief Constructs an ScaleShift operation.
///
/// \param A Matrix A
/// \param B Matrix B
/// \param transpose_a If matrix A should be transposed.
/// \param transpose_b If matrix B should be transposed.
MatMul(const Output<Node>& A,
const Output<Node>& B,
const bool& transpose_a = 0,
const bool& transpose_b = 0);
virtual void pre_validate_and_infer_types() override;
virtual void pre_validate_and_infer_types() override;
virtual NodeVector decompose_op() const override;
virtual NodeVector decompose_op() const override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
bool get_transpose_a() const { return m_transpose_a; }
bool get_transpose_b() const { return m_transpose_b; }
private:
bool m_transpose_a;
bool m_transpose_b;
};
bool get_transpose_a() const { return m_transpose_a; }
bool get_transpose_b() const { return m_transpose_b; }
private:
bool m_transpose_a;
bool m_transpose_b;
};
}
using v0::MatMul;
} // namespace op
} // namespace ngraph
......@@ -25,15 +25,17 @@
using namespace std;
using namespace ngraph;
constexpr NodeTypeInfo op::Mod::type_info;
constexpr NodeTypeInfo op::v1::Mod::type_info;
op::Mod::Mod(const Output<Node>& A, const Output<Node>& B, const AutoBroadcastSpec& auto_broadcast)
op::v1::Mod::Mod(const Output<Node>& A,
const Output<Node>& B,
const AutoBroadcastSpec& auto_broadcast)
: FusedOp({A, B})
, m_auto_broadcast(auto_broadcast)
{
}
NodeVector op::Mod::decompose_op() const
NodeVector op::v1::Mod::decompose_op() const
{
const auto dividend = make_shared<op::Abs>(input_value(0));
const auto dividend_sign = make_shared<op::Sign>(input_value(0));
......@@ -53,7 +55,7 @@ NodeVector op::Mod::decompose_op() const
return {make_shared<op::v1::Multiply>(dividend_sign, mod, m_auto_broadcast)};
}
shared_ptr<Node> op::Mod::copy_with_new_args(const NodeVector& new_args) const
shared_ptr<Node> op::v1::Mod::copy_with_new_args(const NodeVector& new_args) const
{
return make_shared<Mod>(new_args.at(0), new_args.at(1), m_auto_broadcast);
}
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -26,15 +26,15 @@
using namespace std;
using namespace ngraph;
constexpr NodeTypeInfo op::v1::Selu::type_info;
constexpr NodeTypeInfo op::v0::Selu::type_info;
op::v1::Selu::Selu(const Output<Node>& data, const Output<Node>& alpha, const Output<Node>& lambda)
op::v0::Selu::Selu(const Output<Node>& data, const Output<Node>& alpha, const Output<Node>& lambda)
: FusedOp({data, alpha, lambda})
{
constructor_validate_and_infer_types();
}
NodeVector op::v1::Selu::decompose_op() const
NodeVector op::v0::Selu::decompose_op() const
{
const auto data = input_value(0);
const auto alpha = input_value(1);
......@@ -47,8 +47,8 @@ NodeVector op::v1::Selu::decompose_op() const
alpha)};
}
shared_ptr<Node> op::v1::Selu::copy_with_new_args(const NodeVector& new_args) const
shared_ptr<Node> op::v0::Selu::copy_with_new_args(const NodeVector& new_args) const
{
check_new_args_count(this, new_args);
return make_shared<v1::Selu>(new_args.at(0), new_args.at(1), new_args.at(2));
return make_shared<v0::Selu>(new_args.at(0), new_args.at(1), new_args.at(2));
}
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -58,7 +58,6 @@ namespace ngraph
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs a less-than-or-equal operation.
LessEq() = default;
/// \brief Constructs a less-than-or-equal operation.
///
/// \param arg0 Node that produces the first input tensor.
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment