Unverified Commit 1011a992 authored by Robert Kimball's avatar Robert Kimball Committed by GitHub

Move ops in ngraph::op namespace to ngraph::op::v0 (#4377)

* Move op to v0 namespace

* BatchMatMul

* BatchNorm*

* CompiledKernel

* Constant

* Fix more

* More

* Fix Quantaized*

* fix last v0 ops

* fix compile error

* fix build error

* Fix GPU build

* Fix build error
Co-authored-by: 's avatarScott Cyphers <diyessi@users.noreply.github.com>
parent 7018f9ca
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#include "ngraph/check.hpp" #include "ngraph/check.hpp"
#include "ngraph/descriptor/tensor.hpp" #include "ngraph/descriptor/tensor.hpp"
#include "ngraph/node.hpp" #include "ngraph/node.hpp"
#include "ngraph/op/experimental/compiled_kernel.hpp"
#include <mlir/IR/Builders.h> #include <mlir/IR/Builders.h>
#include <mlir/IR/Module.h> #include <mlir/IR/Module.h>
...@@ -42,10 +43,6 @@ namespace ngraph ...@@ -42,10 +43,6 @@ namespace ngraph
{ {
class Type; class Type;
} }
namespace op
{
class CompiledKernel;
}
namespace runtime namespace runtime
{ {
namespace ngmlir namespace ngmlir
......
...@@ -22,30 +22,34 @@ namespace ngraph ...@@ -22,30 +22,34 @@ namespace ngraph
{ {
namespace op namespace op
{ {
/// \brief Logical "any" reduction operation. namespace v0
class NGRAPH_API Any : public util::LogicalReduction
{ {
public: /// \brief Logical "any" reduction operation.
static constexpr NodeTypeInfo type_info{"Any", 0}; class NGRAPH_API Any : public util::LogicalReduction
const NodeTypeInfo& get_type_info() const override { return type_info; } {
/// \brief Constructs an "any" reduction operation. public:
Any() = default; static constexpr NodeTypeInfo type_info{"Any", 0};
/// \brief Constructs an "any" reduction operation. const NodeTypeInfo& get_type_info() const override { return type_info; }
/// /// \brief Constructs an "any" reduction operation.
/// \param arg The tensor to be reduced. Any() = default;
/// \param reduction_axes The axis positions (0-based) to be eliminated. /// \brief Constructs an "any" reduction operation.
Any(const Output<Node>& arg, const AxisSet& reduction_axes); ///
/// \brief Constructs an "any" reduction operation. /// \param arg The tensor to be reduced.
/// /// \param reduction_axes The axis positions (0-based) to be eliminated.
/// \param arg The tensor to be reduced. Any(const Output<Node>& arg, const AxisSet& reduction_axes);
/// \param reduction_axes The axis positions (0-based) to be eliminated. /// \brief Constructs an "any" reduction operation.
Any(const Output<Node>& arg, const Output<Node>& reduction_axes); ///
/// \param arg The tensor to be reduced.
/// \param reduction_axes The axis positions (0-based) to be eliminated.
Any(const Output<Node>& arg, const Output<Node>& reduction_axes);
virtual std::shared_ptr<Node> virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override; copy_with_new_args(const NodeVector& new_args) const override;
bool visit_attributes(AttributeVisitor& visitor) override { return true; } bool visit_attributes(AttributeVisitor& visitor) override { return true; }
/// \return The default value for Any. /// \return The default value for Any.
virtual std::shared_ptr<Node> get_default_value() const override; virtual std::shared_ptr<Node> get_default_value() const override;
}; };
}
using v0::Any;
} }
} }
...@@ -22,48 +22,55 @@ namespace ngraph ...@@ -22,48 +22,55 @@ namespace ngraph
{ {
namespace op namespace op
{ {
class NGRAPH_API CropAndResize : public Op namespace v0
{ {
public: class NGRAPH_API CropAndResize : public Op
enum class ResizeMethod
{ {
unspecified, public:
bilinear, enum class ResizeMethod
nearest {
}; unspecified,
bilinear,
nearest
};
static constexpr NodeTypeInfo type_info{"CropAndResize", 0}; static constexpr NodeTypeInfo type_info{"CropAndResize", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; } const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs a crop and resize operation. /// \brief Constructs a crop and resize operation.
CropAndResize() = default; CropAndResize() = default;
/// \param image [N, H, W, C] /// \param image [N, H, W, C]
/// \param boxes [NUM_BOXES, 4] where boxes[box] is [y1, x1, y2, x2] each in [0, 1] /// \param boxes [NUM_BOXES, 4] where boxes[box] is [y1, x1, y2, x2] each in [0, 1]
/// \param box_indices [NUM_BOXES] in [0, N) /// \param box_indices [NUM_BOXES] in [0, N)
/// \param crop_size [crop_height, crop_width] /// \param crop_size [crop_height, crop_width]
CropAndResize(const Output<Node>& image, CropAndResize(const Output<Node>& image,
const Output<Node>& boxes, const Output<Node>& boxes,
const Output<Node>& box_indices, const Output<Node>& box_indices,
const Output<Node>& crop_size, const Output<Node>& crop_size,
ResizeMethod resize_method, ResizeMethod resize_method,
float extrapolation_value); float extrapolation_value);
void validate_and_infer_types() override; void validate_and_infer_types() override;
std::shared_ptr<Node> copy_with_new_args(const NodeVector& new_args) const override; std::shared_ptr<Node> copy_with_new_args(const NodeVector& new_args) const override;
ResizeMethod get_resize_method() const { return m_resize_method; } ResizeMethod get_resize_method() const { return m_resize_method; }
void set_resize_method(ResizeMethod resize_method) { m_resize_method = resize_method; } void set_resize_method(ResizeMethod resize_method)
float get_extrapolation_value() const { return m_extrapolation_value; } {
void set_extrapolation_value(float extrapolation_value) m_resize_method = resize_method;
{ }
m_extrapolation_value = extrapolation_value; float get_extrapolation_value() const { return m_extrapolation_value; }
} void set_extrapolation_value(float extrapolation_value)
{
m_extrapolation_value = extrapolation_value;
}
private: private:
ResizeMethod m_resize_method{ResizeMethod::unspecified}; ResizeMethod m_resize_method{ResizeMethod::unspecified};
float m_extrapolation_value{0}; float m_extrapolation_value{0};
}; };
}
using v0::CropAndResize;
} }
const std::string& as_string(op::CropAndResize::ResizeMethod); const std::string& as_string(op::CropAndResize::ResizeMethod);
......
...@@ -24,45 +24,50 @@ namespace ngraph ...@@ -24,45 +24,50 @@ namespace ngraph
{ {
namespace op namespace op
{ {
/// \brief Dequantize operation namespace v0
/// Maps quantized input (q) to real output (r) using scale (s) and zero point (z):
/// r = (q - o) * s
class NGRAPH_API Dequantize : public ngraph::op::Op
{ {
public: /// \brief Dequantize operation
static constexpr NodeTypeInfo type_info{"Dequantize", 0}; /// Maps quantized input (q) to real output (r) using scale (s) and zero point
const NodeTypeInfo& get_type_info() const override { return type_info; } /// (z):
/// \brief Constructs a Dequantize operation /// r = (q - o) * s
Dequantize() = default; class NGRAPH_API Dequantize : public ngraph::op::Op
{
public:
static constexpr NodeTypeInfo type_info{"Dequantize", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs a Dequantize operation
Dequantize() = default;
/// \brief Constructs a Dequantize operation /// \brief Constructs a Dequantize operation
/// \param input quantized input /// \param input quantized input
/// \param scale scale used for mapping /// \param scale scale used for mapping
/// \param zero_point zero point used for mapping /// \param zero_point zero point used for mapping
/// \param type output element type /// \param type output element type
/// \param axes axis positions on which `scale` and `zero_point` are specified /// \param axes axis positions on which `scale` and `zero_point` are specified
Dequantize(const Output<Node>& input, Dequantize(const Output<Node>& input,
const Output<Node>& scale, const Output<Node>& scale,
const Output<Node>& zero_point, const Output<Node>& zero_point,
const element::Type& type, const element::Type& type,
const AxisSet& axes); const AxisSet& axes);
void validate_and_infer_types() override; void validate_and_infer_types() override;
virtual std::shared_ptr<Node> virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override; copy_with_new_args(const NodeVector& new_args) const override;
const AxisSet& get_axes() const { return m_axes; } const AxisSet& get_axes() const { return m_axes; }
void set_axes(const AxisSet& axes) { m_axes = axes; } void set_axes(const AxisSet& axes) { m_axes = axes; }
const element::Type& get_type() const { return m_type; } const element::Type& get_type() const { return m_type; }
void set_type(const element::Type& type) { m_type = type; } void set_type(const element::Type& type) { m_type = type; }
protected: protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints, virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas) override; const OutputVector& deltas) override;
private: private:
element::Type m_type; element::Type m_type;
AxisSet m_axes; AxisSet m_axes;
}; };
}
using v0::Dequantize;
} }
} }
...@@ -24,19 +24,19 @@ using namespace ngraph; ...@@ -24,19 +24,19 @@ using namespace ngraph;
constexpr NodeTypeInfo op::BatchMatMul::type_info; constexpr NodeTypeInfo op::BatchMatMul::type_info;
op::BatchMatMul::BatchMatMul(const Output<Node>& arg0, const Output<Node>& arg1) op::v0::BatchMatMul::BatchMatMul(const Output<Node>& arg0, const Output<Node>& arg1)
: Op({arg0, arg1}) : Op({arg0, arg1})
{ {
constructor_validate_and_infer_types(); constructor_validate_and_infer_types();
} }
shared_ptr<Node> op::BatchMatMul::copy_with_new_args(const NodeVector& new_args) const shared_ptr<Node> op::v0::BatchMatMul::copy_with_new_args(const NodeVector& new_args) const
{ {
check_new_args_count(this, new_args); check_new_args_count(this, new_args);
return make_shared<BatchMatMul>(new_args.at(0), new_args.at(1)); return make_shared<BatchMatMul>(new_args.at(0), new_args.at(1));
} }
void op::BatchMatMul::validate_and_infer_types() void op::v0::BatchMatMul::validate_and_infer_types()
{ {
// Check input types // Check input types
const auto& arg0_et = get_input_element_type(0); const auto& arg0_et = get_input_element_type(0);
...@@ -77,7 +77,8 @@ void op::BatchMatMul::validate_and_infer_types() ...@@ -77,7 +77,8 @@ void op::BatchMatMul::validate_and_infer_types()
set_output_type(0, output_et, output_shape); set_output_type(0, output_et, output_shape);
} }
void op::BatchMatMul::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas) void op::v0::BatchMatMul::generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas)
{ {
auto delta = deltas.at(0); // NxIxK auto delta = deltas.at(0); // NxIxK
......
...@@ -22,34 +22,38 @@ namespace ngraph ...@@ -22,34 +22,38 @@ namespace ngraph
{ {
namespace op namespace op
{ {
/// \brief Matrix multiply for a batch of Rank 2 tensors. namespace v0
/// The inputs are expected to be Rank 3, where the first dim is the
/// batch size and must be the same for both inputs. The last two dims
/// are the shape of matrices, i.e. `(batch_size, :, :)`.
/// For example, for `a` with shape `(batch_size, n, k)`, and `b` with
/// shape `(batch_size, k, m)`, the result of BatchMatMul will have shape
/// `(batch_size, n, m)`, and `BatchMatMul(a, b)[i] = Dot(a[i], b[i])`.
class NGRAPH_API BatchMatMul : public Op
{ {
public: /// \brief Matrix multiply for a batch of Rank 2 tensors.
static constexpr NodeTypeInfo type_info{"BatchMatMul", 0}; /// The inputs are expected to be Rank 3, where the first dim is the
const NodeTypeInfo& get_type_info() const override { return type_info; } /// batch size and must be the same for both inputs. The last two dims
BatchMatMul() = default; /// are the shape of matrices, i.e. `(batch_size, :, :)`.
/// \brief Constructs a batch of matmul product operation. /// For example, for `a` with shape `(batch_size, n, k)`, and `b` with
/// /// shape `(batch_size, k, m)`, the result of BatchMatMul will have shape
/// \param arg0 The node producing the first argument. /// `(batch_size, n, m)`, and `BatchMatMul(a, b)[i] = Dot(a[i], b[i])`.
/// \param arg1 The node producing the second argument. class NGRAPH_API BatchMatMul : public Op
BatchMatMul(const Output<Node>& arg0, const Output<Node>& arg1); {
public:
static constexpr NodeTypeInfo type_info{"BatchMatMul", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
BatchMatMul() = default;
/// \brief Constructs a batch of matmul product operation.
///
/// \param arg0 The node producing the first argument.
/// \param arg1 The node producing the second argument.
BatchMatMul(const Output<Node>& arg0, const Output<Node>& arg1);
virtual void validate_and_infer_types() override; virtual void validate_and_infer_types() override;
virtual std::shared_ptr<Node> virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override; copy_with_new_args(const NodeVector& new_args) const override;
protected: protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints, virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas) override; const OutputVector& deltas) override;
}; };
}
using v0::BatchMatMul;
namespace util namespace util
{ {
......
...@@ -22,45 +22,49 @@ namespace ngraph ...@@ -22,45 +22,49 @@ namespace ngraph
{ {
namespace op namespace op
{ {
/// \brief CompiledKernel represents a sub-graph that can be compiled and executed namespace v0
/// independently.
///
/// This op can be used to delimit sub-graphs that with special compilation requirements
/// within a function. For example, we currently use it to delimit sub-graphs that will be
/// independently compiled and executed by MLIR backend.
class NGRAPH_API CompiledKernel : public ngraph::op::Op
{ {
public: /// \brief CompiledKernel represents a sub-graph that can be compiled and executed
static constexpr NodeTypeInfo type_info{"CompiledKernel", 0}; /// independently.
const NodeTypeInfo& get_type_info() const override { return type_info; } ///
CompiledKernel() = default; /// This op can be used to delimit sub-graphs that with special compilation requirements
CompiledKernel(const NodeVector& node_list, /// within a function. For example, we currently use it to delimit sub-graphs that will
const NodeVector& outputs, /// be independently compiled and executed by MLIR backend.
const NodeVector& args); class NGRAPH_API CompiledKernel : public Op
CompiledKernel(const OutputVector& node_list,
const OutputVector& outputs,
const OutputVector& args);
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
const NodeVector& get_node_list() const { return m_node_list; }
const NodeVector& get_kernel_outputs() const { return m_output_nodes; }
// For node B inside CompiledKernel ck such that A->B and A is outside of ck:
// replace input to B with a dummy Parameter Op and add an entry to ck's
// m_input_map.
void encapsulate_nodes();
const std::unordered_map<std::shared_ptr<Node>, size_t>& get_input_map() const
{ {
return m_input_map; public:
} static constexpr NodeTypeInfo type_info{"CompiledKernel", 0};
void insert_to_input_map(std::shared_ptr<Node>, size_t); const NodeTypeInfo& get_type_info() const override { return type_info; }
CompiledKernel() = default;
CompiledKernel(const NodeVector& node_list,
const NodeVector& outputs,
const NodeVector& args);
CompiledKernel(const OutputVector& node_list,
const OutputVector& outputs,
const OutputVector& args);
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
const NodeVector& get_node_list() const { return m_node_list; }
const NodeVector& get_kernel_outputs() const { return m_output_nodes; }
// For node B inside CompiledKernel ck such that A->B and A is outside of ck:
// replace input to B with a dummy Parameter Op and add an entry to ck's
// m_input_map.
void encapsulate_nodes();
const std::unordered_map<std::shared_ptr<Node>, size_t>& get_input_map() const
{
return m_input_map;
}
void insert_to_input_map(std::shared_ptr<Node>, size_t);
private: private:
NodeVector m_node_list; NodeVector m_node_list;
NodeVector m_output_nodes; NodeVector m_output_nodes;
// Used to store the information of internal nodes that have input coming from outside // Used to store the information of internal nodes that have input coming from
// of CK // outside of CK
std::unordered_map<std::shared_ptr<Node>, size_t> m_input_map; std::unordered_map<std::shared_ptr<Node>, size_t> m_input_map;
}; };
}
using v0::CompiledKernel;
} }
} }
...@@ -23,35 +23,41 @@ namespace ngraph ...@@ -23,35 +23,41 @@ namespace ngraph
{ {
namespace op namespace op
{ {
/// \brief Operation which "adds" axes to an input tensor, replicating elements from the namespace v0
/// input as needed along the new axes.
///
/// This is basically the "dynamic shape" version of the static Broadcast op.
class NGRAPH_API DynBroadcast : public Op
{ {
public: /// \brief Operation which "adds" axes to an input tensor, replicating elements from the
static constexpr NodeTypeInfo type_info{"DynBroadcast", 0}; /// input as needed along the new axes.
const NodeTypeInfo& get_type_info() const override { return type_info; }
DynBroadcast() = default;
/// \brief Constructs a dynamic broadcast operation.
/// ///
/// \param arg Node that produces the input tensor to be broadcast. /// This is basically the "dynamic shape" version of the static Broadcast op.
/// \param shape Node that produces shape of the output tensor. class NGRAPH_API DynBroadcast : public Op
/// \param broadcast_axes Node that produces the axis positions (0-based) in the result {
/// that are being broadcast. The remaining axes in shape must be public:
/// the same as the shape of arg. static constexpr NodeTypeInfo type_info{"DynBroadcast", 0};
DynBroadcast(const Output<Node>& arg, const NodeTypeInfo& get_type_info() const override { return type_info; }
const Output<Node>& shape, DynBroadcast() = default;
const Output<Node>& broadcast_axes); /// \brief Constructs a dynamic broadcast operation.
///
/// \param arg Node that produces the input tensor to be broadcast.
/// \param shape Node that produces shape of the output tensor.
/// \param broadcast_axes Node that produces the axis positions (0-based) in the
/// result
/// that are being broadcast. The remaining axes in shape must
/// be
/// the same as the shape of arg.
DynBroadcast(const Output<Node>& arg,
const Output<Node>& shape,
const Output<Node>& broadcast_axes);
void validate_and_infer_types() override; void validate_and_infer_types() override;
virtual std::shared_ptr<Node> virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override; copy_with_new_args(const NodeVector& new_args) const override;
protected: protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints, virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas) override; const OutputVector& deltas) override;
}; };
}
using v0::DynBroadcast;
} }
} }
...@@ -22,39 +22,44 @@ namespace ngraph ...@@ -22,39 +22,44 @@ namespace ngraph
{ {
namespace op namespace op
{ {
/// \brief Generic padding operation which takes padding below and above as dynamic shapes. namespace v0
/// This is similar to existing Pad operation except padding values are dynamic.
class NGRAPH_API DynPad : public Op
{ {
public: /// \brief Generic padding operation which takes padding below and above as dynamic
static constexpr NodeTypeInfo type_info{"DynPad", 0}; /// shapes.
const NodeTypeInfo& get_type_info() const override { return type_info; } /// This is similar to existing Pad operation except padding values are dynamic.
DynPad() = default; class NGRAPH_API DynPad : public Op
/// \brief Perform dynamic padding of a tensor {
/// public:
/// \param arg The node producing input tensor to be padded. static constexpr NodeTypeInfo type_info{"DynPad", 0};
/// \param padding_below The node producing the padding-below widths. const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \param padding_above The node producing the padding-above widths. DynPad() = default;
/// \param padding_value The value to be used for padding. Must be scalar. /// \brief Perform dynamic padding of a tensor
/// \param pad_mode The padding mode: CONSTANT(default), EDGE or REFLECT. ///
DynPad(const Output<Node>& arg, /// \param arg The node producing input tensor to be padded.
const Output<Node>& padding_below, /// \param padding_below The node producing the padding-below widths.
const Output<Node>& padding_above, /// \param padding_above The node producing the padding-above widths.
const Output<Node>& padding_value, /// \param padding_value The value to be used for padding. Must be scalar.
PadMode pad_mode = PadMode::CONSTANT); /// \param pad_mode The padding mode: CONSTANT(default), EDGE or REFLECT.
DynPad(const Output<Node>& arg,
const Output<Node>& padding_below,
const Output<Node>& padding_above,
const Output<Node>& padding_value,
PadMode pad_mode = PadMode::CONSTANT);
PadMode get_pad_mode() const { return m_pad_mode; } PadMode get_pad_mode() const { return m_pad_mode; }
void validate_and_infer_types() override; void validate_and_infer_types() override;
virtual std::shared_ptr<Node> virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override; copy_with_new_args(const NodeVector& new_args) const override;
protected: protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints, virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas) override; const OutputVector& deltas) override;
private: private:
PadMode m_pad_mode; PadMode m_pad_mode;
}; };
}
using v0::DynPad;
} }
} }
...@@ -23,60 +23,65 @@ namespace ngraph ...@@ -23,60 +23,65 @@ namespace ngraph
{ {
namespace op namespace op
{ {
/// \brief Takes a slice of an input tensor, i.e., the sub-tensor that resides within a namespace v0
/// bounding box, optionally with stride.
class NGRAPH_API DynReplaceSlice : public Op
{ {
public: /// \brief Takes a slice of an input tensor, i.e., the sub-tensor that resides within a
static constexpr NodeTypeInfo type_info{"DynReplaceSlice", 0}; /// bounding box, optionally with stride.
const NodeTypeInfo& get_type_info() const override { return type_info; } class NGRAPH_API DynReplaceSlice : public Op
DynReplaceSlice() = default; {
/// \brief Constructs a dynamic tensor replace-slice operation. public:
/// static constexpr NodeTypeInfo type_info{"DynReplaceSlice", 0};
/// \param arg The tensor in which to replace the slice. const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \param replacement Data to copy to the slice for replacement. DynReplaceSlice() = default;
/// \param lower_bounds The axiswise lower bounds of the slice (inclusive). /// \brief Constructs a dynamic tensor replace-slice operation.
/// \param upper_bounds The axiswise upper bounds of the slice (exclusive). ///
/// \param strides The slicing strides; for example, strides of `{n,m}` means to take /// \param arg The tensor in which to replace the slice.
/// every nth row and every mth column of the input matrix. /// \param replacement Data to copy to the slice for replacement.
/// \param lower_bounds_mask Ignores lower_bounds for axis with the mask set /// \param lower_bounds The axiswise lower bounds of the slice (inclusive).
/// \param upper_bounds_mask Ignores upper_bounds for axis with the mask set /// \param upper_bounds The axiswise upper bounds of the slice (exclusive).
/// \param new_axis Add dimension one axis at the set positions /// \param strides The slicing strides; for example, strides of `{n,m}` means to
/// \param shrink_axis Delete dimensions at the set positions /// take
/// \param ellipsis_mask Inserts missing dimensions on the set position /// every nth row and every mth column of the input matrix.
DynReplaceSlice(const Output<Node>& arg, /// \param lower_bounds_mask Ignores lower_bounds for axis with the mask set
const Output<Node>& replacement, /// \param upper_bounds_mask Ignores upper_bounds for axis with the mask set
const Output<Node>& lower_bounds, /// \param new_axis Add dimension one axis at the set positions
const Output<Node>& upper_bounds, /// \param shrink_axis Delete dimensions at the set positions
const Output<Node>& strides, /// \param ellipsis_mask Inserts missing dimensions on the set position
const AxisSet& lower_bounds_mask = AxisSet{}, DynReplaceSlice(const Output<Node>& arg,
const AxisSet& upper_bounds_mask = AxisSet{}, const Output<Node>& replacement,
const AxisSet& new_axis = AxisSet{}, const Output<Node>& lower_bounds,
const AxisSet& shrink_axis = AxisSet{}, const Output<Node>& upper_bounds,
const AxisSet& ellipsis_mask = AxisSet{}); const Output<Node>& strides,
const AxisSet& lower_bounds_mask = AxisSet{},
const AxisSet& upper_bounds_mask = AxisSet{},
const AxisSet& new_axis = AxisSet{},
const AxisSet& shrink_axis = AxisSet{},
const AxisSet& ellipsis_mask = AxisSet{});
const AxisSet& get_lower_bounds_mask() const { return m_lower_bounds_mask; } const AxisSet& get_lower_bounds_mask() const { return m_lower_bounds_mask; }
const AxisSet& get_upper_bounds_mask() const { return m_upper_bounds_mask; } const AxisSet& get_upper_bounds_mask() const { return m_upper_bounds_mask; }
const AxisSet& get_new_axis() const { return m_new_axis; } const AxisSet& get_new_axis() const { return m_new_axis; }
const AxisSet& get_shrink_axis() const { return m_shrink_axis; } const AxisSet& get_shrink_axis() const { return m_shrink_axis; }
const AxisSet& get_ellipsis_mask() const { return m_ellipsis_mask; } const AxisSet& get_ellipsis_mask() const { return m_ellipsis_mask; }
virtual std::shared_ptr<Node> virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override; copy_with_new_args(const NodeVector& new_args) const override;
void validate_and_infer_types() override; void validate_and_infer_types() override;
protected: protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints, virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas) override; const OutputVector& deltas) override;
private: private:
/// Helper method to compute output shape /// Helper method to compute output shape
Shape compute_output_shape() const; Shape compute_output_shape() const;
AxisSet m_lower_bounds_mask; AxisSet m_lower_bounds_mask;
AxisSet m_upper_bounds_mask; AxisSet m_upper_bounds_mask;
AxisSet m_new_axis; AxisSet m_new_axis;
AxisSet m_shrink_axis; AxisSet m_shrink_axis;
AxisSet m_ellipsis_mask; AxisSet m_ellipsis_mask;
}; };
}
using v0::DynReplaceSlice;
} }
} }
...@@ -23,58 +23,63 @@ namespace ngraph ...@@ -23,58 +23,63 @@ namespace ngraph
{ {
namespace op namespace op
{ {
/// \brief Takes a slice of an input tensor, i.e., the sub-tensor that resides within a namespace v0
/// bounding box, optionally with stride.
class NGRAPH_API DynSlice : public Op
{ {
public: /// \brief Takes a slice of an input tensor, i.e., the sub-tensor that resides within a
static constexpr NodeTypeInfo type_info{"DynSlice", 0}; /// bounding box, optionally with stride.
const NodeTypeInfo& get_type_info() const override { return type_info; } class NGRAPH_API DynSlice : public Op
DynSlice() = default; {
/// \brief Constructs a dynamic tensor slice operation. public:
/// static constexpr NodeTypeInfo type_info{"DynSlice", 0};
/// \param arg The tensor to be sliced. const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \param lower_bounds The axiswise lower bounds of the slice (inclusive). DynSlice() = default;
/// \param upper_bounds The axiswise upper bounds of the slice (exclusive). /// \brief Constructs a dynamic tensor slice operation.
/// \param strides The slicing strides; for example, strides of `{n,m}` means to take ///
/// every nth row and every mth column of the input matrix. /// \param arg The tensor to be sliced.
/// \param lower_bounds_mask Ignores lower_bounds for axis with the mask set /// \param lower_bounds The axiswise lower bounds of the slice (inclusive).
/// \param upper_bounds_mask Ignores upper_bounds for axis with the mask set /// \param upper_bounds The axiswise upper bounds of the slice (exclusive).
/// \param new_axis Add dimension one axis at the set positions /// \param strides The slicing strides; for example, strides of `{n,m}` means to
/// \param shrink_axis Delete dimensions at the set positions /// take
/// \param ellipsis_mask Inserts missing dimensions on the set position /// every nth row and every mth column of the input matrix.
DynSlice(const Output<Node>& arg, /// \param lower_bounds_mask Ignores lower_bounds for axis with the mask set
const Output<Node>& lower_bounds, /// \param upper_bounds_mask Ignores upper_bounds for axis with the mask set
const Output<Node>& upper_bounds, /// \param new_axis Add dimension one axis at the set positions
const Output<Node>& strides, /// \param shrink_axis Delete dimensions at the set positions
const AxisSet& lower_bounds_mask = AxisSet{}, /// \param ellipsis_mask Inserts missing dimensions on the set position
const AxisSet& upper_bounds_mask = AxisSet{}, DynSlice(const Output<Node>& arg,
const AxisSet& new_axis = AxisSet{}, const Output<Node>& lower_bounds,
const AxisSet& shrink_axis = AxisSet{}, const Output<Node>& upper_bounds,
const AxisSet& ellipsis_mask = AxisSet{}); const Output<Node>& strides,
const AxisSet& lower_bounds_mask = AxisSet{},
const AxisSet& upper_bounds_mask = AxisSet{},
const AxisSet& new_axis = AxisSet{},
const AxisSet& shrink_axis = AxisSet{},
const AxisSet& ellipsis_mask = AxisSet{});
const AxisSet& get_lower_bounds_mask() const { return m_lower_bounds_mask; } const AxisSet& get_lower_bounds_mask() const { return m_lower_bounds_mask; }
const AxisSet& get_upper_bounds_mask() const { return m_upper_bounds_mask; } const AxisSet& get_upper_bounds_mask() const { return m_upper_bounds_mask; }
const AxisSet& get_new_axis() const { return m_new_axis; } const AxisSet& get_new_axis() const { return m_new_axis; }
const AxisSet& get_shrink_axis() const { return m_shrink_axis; } const AxisSet& get_shrink_axis() const { return m_shrink_axis; }
const AxisSet& get_ellipsis_mask() const { return m_ellipsis_mask; } const AxisSet& get_ellipsis_mask() const { return m_ellipsis_mask; }
virtual std::shared_ptr<Node> virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override; copy_with_new_args(const NodeVector& new_args) const override;
void validate_and_infer_types() override; void validate_and_infer_types() override;
protected: protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints, virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas) override; const OutputVector& deltas) override;
private: private:
/// Helper method to compute output shape /// Helper method to compute output shape
Shape compute_output_shape() const; Shape compute_output_shape() const;
AxisSet m_lower_bounds_mask; AxisSet m_lower_bounds_mask;
AxisSet m_upper_bounds_mask; AxisSet m_upper_bounds_mask;
AxisSet m_new_axis; AxisSet m_new_axis;
AxisSet m_shrink_axis; AxisSet m_shrink_axis;
AxisSet m_ellipsis_mask; AxisSet m_ellipsis_mask;
}; };
}
using v0::DynSlice;
} }
} }
...@@ -23,39 +23,49 @@ namespace ngraph ...@@ -23,39 +23,49 @@ namespace ngraph
{ {
namespace op namespace op
{ {
/// \brief Relu(Convolution) forward prop for batched convolution operation. namespace v0
class NGRAPH_API QuantizedConvolutionRelu : public Op
{ {
public: /// \brief Relu(Convolution) forward prop for batched convolution operation.
static constexpr NodeTypeInfo type_info{"QuantizedConvolutionRelu", 0}; class NGRAPH_API QuantizedConvolutionRelu : public Op
const NodeTypeInfo& get_type_info() const override { return type_info; } {
QuantizedConvolutionRelu() = default; public:
QuantizedConvolutionRelu(const Output<Node>& data_batch, static constexpr NodeTypeInfo type_info{"QuantizedConvolutionRelu", 0};
const Output<Node>& filters, const NodeTypeInfo& get_type_info() const override { return type_info; }
const Strides& window_movement_strides, QuantizedConvolutionRelu() = default;
const Strides& window_dilation_strides, QuantizedConvolutionRelu(const Output<Node>& data_batch,
const CoordinateDiff& padding_below, const Output<Node>& filters,
const CoordinateDiff& padding_above, const Strides& window_movement_strides,
const Strides& data_dilation_strides, const Strides& window_dilation_strides,
const Output<Node>& scale); const CoordinateDiff& padding_below,
const CoordinateDiff& padding_above,
const Strides& data_dilation_strides,
const Output<Node>& scale);
const Strides& get_window_movement_strides() const { return m_window_movement_strides; } const Strides& get_window_movement_strides() const
const Strides& get_window_dilation_strides() const { return m_window_dilation_strides; } {
const CoordinateDiff& get_padding_below() const { return m_padding_below; } return m_window_movement_strides;
const CoordinateDiff& get_padding_above() const { return m_padding_above; } }
const Strides& get_data_dilation_strides() const { return m_data_dilation_strides; } const Strides& get_window_dilation_strides() const
Output<Node> get_filters() { return input_value(1); } {
Output<Node> get_data_batch() { return input_value(0); } return m_window_dilation_strides;
bool with_relu() const { return true; } }
virtual std::shared_ptr<Node> const CoordinateDiff& get_padding_below() const { return m_padding_below; }
copy_with_new_args(const NodeVector& new_args) const override; const CoordinateDiff& get_padding_above() const { return m_padding_above; }
const Strides& get_data_dilation_strides() const { return m_data_dilation_strides; }
Output<Node> get_filters() { return input_value(1); }
Output<Node> get_data_batch() { return input_value(0); }
bool with_relu() const { return true; }
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
protected: protected:
Strides m_window_movement_strides; Strides m_window_movement_strides;
Strides m_window_dilation_strides; Strides m_window_dilation_strides;
CoordinateDiff m_padding_below; CoordinateDiff m_padding_below;
CoordinateDiff m_padding_above; CoordinateDiff m_padding_above;
Strides m_data_dilation_strides; Strides m_data_dilation_strides;
}; };
}
using v0::QuantizedConvolutionRelu;
} }
} }
...@@ -24,35 +24,39 @@ namespace ngraph ...@@ -24,35 +24,39 @@ namespace ngraph
{ {
namespace op namespace op
{ {
class NGRAPH_API QuantizedDotBias : public Op namespace v0
{ {
public: class NGRAPH_API QuantizedDotBias : public Op
static constexpr NodeTypeInfo type_info{"QuantizedDotBias", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
QuantizedDotBias() = default;
QuantizedDotBias(const Output<Node>& data,
const Output<Node>& weights,
const Output<Node>& bias,
const Output<Node>& scale,
bool requantize = true,
bool with_relu = false);
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override
{ {
check_new_args_count(this, new_args); public:
return std::make_shared<QuantizedDotBias>(new_args.at(0), static constexpr NodeTypeInfo type_info{"QuantizedDotBias", 0};
new_args.at(1), const NodeTypeInfo& get_type_info() const override { return type_info; }
new_args.at(2), QuantizedDotBias() = default;
new_args.at(3), QuantizedDotBias(const Output<Node>& data,
m_requantize, const Output<Node>& weights,
m_with_relu); const Output<Node>& bias,
} const Output<Node>& scale,
bool with_relu() const { return m_with_relu; } bool requantize = true,
bool requantize() const { return m_requantize; } bool with_relu = false);
protected:
bool m_requantize; virtual std::shared_ptr<Node>
bool m_with_relu; copy_with_new_args(const NodeVector& new_args) const override
}; {
} // namespace op check_new_args_count(this, new_args);
} // namespace ngraph return std::make_shared<QuantizedDotBias>(new_args.at(0),
new_args.at(1),
new_args.at(2),
new_args.at(3),
m_requantize,
m_with_relu);
}
bool with_relu() const { return m_with_relu; }
bool requantize() const { return m_requantize; }
protected:
bool m_requantize;
bool m_with_relu;
};
}
using v0::QuantizedDotBias;
}
}
...@@ -24,63 +24,67 @@ namespace ngraph ...@@ -24,63 +24,67 @@ namespace ngraph
{ {
namespace op namespace op
{ {
/// \brief Generates a tensor populated with random values of a uniform distribution. namespace v0
class NGRAPH_API RandomUniform : public op::Op
{ {
public: /// \brief Generates a tensor populated with random values of a uniform distribution.
static constexpr NodeTypeInfo type_info{"RandomUniform", 0}; class NGRAPH_API RandomUniform : public op::Op
const NodeTypeInfo& get_type_info() const override { return type_info; } {
/// \brief Constructs an uninitialized RandomUniform node. public:
RandomUniform() = default; static constexpr NodeTypeInfo type_info{"RandomUniform", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs an uninitialized RandomUniform node.
RandomUniform() = default;
/// \brief Constructs a RandomUniform node. /// \brief Constructs a RandomUniform node.
/// \param min_value Output producing the minimum value (inclusive) for the random /// \param min_value Output producing the minimum value (inclusive) for the random
/// uniform distribution. Must return a scalar of floating point type, /// uniform distribution. Must return a scalar of floating point
/// and the type must match that of `max_value`. /// type, and the type must match that of `max_value`.
/// \param max_value Output producing the maximum value (inclusive) for the random /// \param max_value Output producing the maximum value (inclusive) for the random
/// uniform distribution. Must return a scalar of floating point type, /// uniform distribution. Must return a scalar of floating point
/// and the type must match that of `min_value`. /// type, and the type must match that of `min_value`.
/// \param result_shape Output producing the shape of the output tensor. Must return a /// \param result_shape Output producing the shape of the output tensor. Must return
/// vector of type `element::i64`. /// a vector of type `element::i64`.
/// \param use_fixed_seed Output producing a boolean scalar Flag indicating whether to /// \param use_fixed_seed Output producing a boolean scalar Flag indicating whether
/// use the value supplied in `fixed_seed` to re-seed the random /// to use the value supplied in `fixed_seed` to re-seed the
/// number generator at this iteration. Note that whenever /// random number generator at this iteration. Note that
/// `use_fixed_seed` is `true`, the same values will be generated /// whenever `use_fixed_seed` is `true`, the same values will
/// in the output tensor. This flag is primarily used for /// be generated in the output tensor. This flag is primarily
/// debugging. If `use_fixed_seed` is `false`, the value in /// used for debugging. If `use_fixed_seed` is `false`, the
/// `fixed_seed` is ignored. /// value in `fixed_seed` is ignored.
/// \param fixed_seed Fixed seed value to be supplied to the random number generator if /// \param fixed_seed Fixed seed value to be supplied to the random number generator
/// `use_fixed_seed` is `true`. If `use_fixed_seed` is `false`, this /// if `use_fixed_seed` is `true`. If `use_fixed_seed` is `false`,
/// value is ignored. /// this value is ignored.
RandomUniform(const Output<Node>& min_value, RandomUniform(const Output<Node>& min_value,
const Output<Node>& max_value, const Output<Node>& max_value,
const Output<Node>& result_shape, const Output<Node>& result_shape,
const Output<Node>& use_fixed_seed, const Output<Node>& use_fixed_seed,
uint64_t fixed_seed); uint64_t fixed_seed);
virtual std::shared_ptr<Node> virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override; copy_with_new_args(const NodeVector& new_args) const override;
/// \brief Returns the fixed seed value to be supplied to the random number generator /// \brief Returns the fixed seed value to be supplied to the random number
/// if `use_fixed_seed` is `true`. If `use_fixed_seed` is `false`, this value is /// generator if `use_fixed_seed` is `true`. If `use_fixed_seed` is `false`,
/// ignored. /// this value is ignored.
uint64_t get_fixed_seed() const { return m_fixed_seed; } uint64_t get_fixed_seed() const { return m_fixed_seed; }
/// \brief Sets the fixed seed value to be supplied to the random number generator /// \brief Sets the fixed seed value to be supplied to the random number generator
/// if `use_fixed_seed` is `true`. If `use_fixed_seed` is `false`, this value is /// if `use_fixed_seed` is `true`. If `use_fixed_seed` is `false`, this value
/// ignored. /// is ignored.
void set_fixed_seed(uint64_t fixed_seed) { m_fixed_seed = fixed_seed; } void set_fixed_seed(uint64_t fixed_seed) { m_fixed_seed = fixed_seed; }
// Internally, any implementation of RandomUniform will have state, since it is backed // Internally, any implementation of RandomUniform will have state, since it is
// by a random number generator. // backed by a random number generator.
bool has_state() const override { return true; } bool has_state() const override { return true; }
void validate_and_infer_types() override; void validate_and_infer_types() override;
protected: protected:
virtual void generate_adjoints(autodiff::Adjoints& /* adjoints */, virtual void generate_adjoints(autodiff::Adjoints& /* adjoints */,
const OutputVector& /* deltas */) override const OutputVector& /* deltas */) override
{ {
} }
uint64_t m_fixed_seed; uint64_t m_fixed_seed;
}; };
}
using v0::RandomUniform;
} }
} }
...@@ -24,48 +24,52 @@ namespace ngraph ...@@ -24,48 +24,52 @@ namespace ngraph
{ {
namespace op namespace op
{ {
/// \brief Matrix multiply for a batch of Rank 2 tensors each with potential namespace v0
/// transpose.
///
/// The inputs are expected to be Rank 3, where the first dim is the
/// batch size and must be the same for both inputs. The last two dims
/// are the shape of matrices, i.e. `(batch_size, :, :)`.
/// For example, for `a` with shape `(batch_size, n, k)`, and `b` with
/// shape `(batch_size, k, m)`, the result of BatchMatMul will have shape
/// `(batch_size, n, m)`, and `BatchMatMulTranspose(a, b)[i] = Dot(a[i], b[i])`.
class NGRAPH_API BatchMatMulTranspose : public ngraph::op::util::FusedOp
{ {
public: /// \brief Matrix multiply for a batch of Rank 2 tensors each with potential
static constexpr NodeTypeInfo type_info{"BatchMatMulTranspose", 0}; /// transpose.
const NodeTypeInfo& get_type_info() const override { return type_info; }
BatchMatMulTranspose() = default;
/// \brief Constructs a batch of matmul product operation.
/// ///
/// \param arg0 The node producing the first argument. /// The inputs are expected to be Rank 3, where the first dim is the
/// \param arg1 The node producing the second argument. /// batch size and must be the same for both inputs. The last two dims
/// \param transpose_0 Apply transpose to arg0. /// are the shape of matrices, i.e. `(batch_size, :, :)`.
/// \param transpose_1 Apply transpose to arg1. /// For example, for `a` with shape `(batch_size, n, k)`, and `b` with
BatchMatMulTranspose(const Output<Node>& arg0, /// shape `(batch_size, k, m)`, the result of BatchMatMul will have shape
const Output<Node>& arg1, /// `(batch_size, n, m)`, and `BatchMatMulTranspose(a, b)[i] = Dot(a[i], b[i])`.
bool transpose_0 = false, class NGRAPH_API BatchMatMulTranspose : public ngraph::op::util::FusedOp
bool transpose_1 = false); {
public:
static constexpr NodeTypeInfo type_info{"BatchMatMulTranspose", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
BatchMatMulTranspose() = default;
/// \brief Constructs a batch of matmul product operation.
///
/// \param arg0 The node producing the first argument.
/// \param arg1 The node producing the second argument.
/// \param transpose_0 Apply transpose to arg0.
/// \param transpose_1 Apply transpose to arg1.
BatchMatMulTranspose(const Output<Node>& arg0,
const Output<Node>& arg1,
bool transpose_0 = false,
bool transpose_1 = false);
bool get_transpose_arg0() const { return m_transpose_arg0; } bool get_transpose_arg0() const { return m_transpose_arg0; }
bool get_transpose_arg1() const { return m_transpose_arg1; } bool get_transpose_arg1() const { return m_transpose_arg1; }
virtual void validate_and_infer_types() override; virtual void validate_and_infer_types() override;
virtual std::shared_ptr<Node> virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override; copy_with_new_args(const NodeVector& new_args) const override;
virtual NodeVector decompose_op() const override; virtual NodeVector decompose_op() const override;
protected: protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints, virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas) override; const OutputVector& deltas) override;
private: private:
bool m_transpose_arg0; bool m_transpose_arg0;
bool m_transpose_arg1; bool m_transpose_arg1;
}; };
}
using v0::BatchMatMulTranspose;
} }
} }
...@@ -24,70 +24,79 @@ namespace ngraph ...@@ -24,70 +24,79 @@ namespace ngraph
{ {
namespace op namespace op
{ {
class NGRAPH_API CrossEntropy : public ngraph::op::util::FusedOp namespace v0
{ {
public: class NGRAPH_API CrossEntropy : public ngraph::op::util::FusedOp
static constexpr NodeTypeInfo type_info{"CrossEntropy", 0}; {
const NodeTypeInfo& get_type_info() const override { return type_info; } public:
CrossEntropy() = default; static constexpr NodeTypeInfo type_info{"CrossEntropy", 0};
/// \brief CrossEntropy for computing loss const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \param arg1 Node that produces the input tensor CrossEntropy() = default;
/// \param arg2 Node that produces ground truth lables for the input /// \brief CrossEntropy for computing loss
/// \param soft_label flag indicating whether to interpretate the given labels as soft /// \param arg1 Node that produces the input tensor
/// labels /// \param arg2 Node that produces ground truth lables for the input
/// \param ignore_index Specifies a target value that is ignored and does not contribute /// \param soft_label flag indicating whether to interpretate the given labels as
/// to the input gradient Only valid if soft_label is set to False /// soft
CrossEntropy(const Output<Node>& arg1, /// labels
const Output<Node>& arg2, /// \param ignore_index Specifies a target value that is ignored and does not
bool soft_label = false, /// contribute
int64_t ignore_index = -100); /// to the input gradient Only valid if soft_label is set to False
CrossEntropy(const Output<Node>& arg1,
const Output<Node>& arg2,
bool soft_label = false,
int64_t ignore_index = -100);
virtual NodeVector decompose_op() const override; virtual NodeVector decompose_op() const override;
void pre_validate_and_infer_types() override; void pre_validate_and_infer_types() override;
virtual std::shared_ptr<Node> virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override; copy_with_new_args(const NodeVector& new_args) const override;
bool get_soft_label() const { return m_soft_label; } bool get_soft_label() const { return m_soft_label; }
int64_t get_ignore_index() const { return m_ignore_index; } int64_t get_ignore_index() const { return m_ignore_index; }
private: private:
bool m_soft_label; bool m_soft_label;
int64_t m_ignore_index; int64_t m_ignore_index;
}; };
class NGRAPH_API CrossEntropyBackprop : public util::FusedOp class NGRAPH_API CrossEntropyBackprop : public util::FusedOp
{ {
public: public:
static constexpr NodeTypeInfo type_info{"CrossEntropyBackprop", 0}; static constexpr NodeTypeInfo type_info{"CrossEntropyBackprop", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; } const NodeTypeInfo& get_type_info() const override { return type_info; }
CrossEntropyBackprop() = default; CrossEntropyBackprop() = default;
/// \brief Backprop for CrossEntropy /// \brief Backprop for CrossEntropy
/// \param input Node that produces tensor from the fprop /// \param input Node that produces tensor from the fprop
/// \param labels Node that produces ground truth labels for input /// \param labels Node that produces ground truth labels for input
/// \param delta Node that produces the delta during bprop /// \param delta Node that produces the delta during bprop
/// \param soft_label flag indicating whether to interpretate the given labels as soft /// \param soft_label flag indicating whether to interpretate the given labels as
/// labels /// soft
/// \param ignore_index Specifies a target value that is ignored and does not contribute /// labels
/// to the input gradient Only valid if soft_label is set to False /// \param ignore_index Specifies a target value that is ignored and does not
CrossEntropyBackprop(const Output<Node>& input, /// contribute
const Output<Node>& labels, /// to the input gradient Only valid if soft_label is set to False
const Output<Node>& delta, CrossEntropyBackprop(const Output<Node>& input,
bool soft_label = false, const Output<Node>& labels,
int64_t ignore_index = -100); const Output<Node>& delta,
bool soft_label = false,
int64_t ignore_index = -100);
virtual NodeVector decompose_op() const override; virtual NodeVector decompose_op() const override;
void pre_validate_and_infer_types() override; void pre_validate_and_infer_types() override;
virtual std::shared_ptr<Node> virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override; copy_with_new_args(const NodeVector& new_args) const override;
bool get_soft_label() const { return m_soft_label; } bool get_soft_label() const { return m_soft_label; }
int64_t get_ignore_index() const { return m_ignore_index; } int64_t get_ignore_index() const { return m_ignore_index; }
private: private:
bool m_soft_label; bool m_soft_label;
int64_t m_ignore_index; int64_t m_ignore_index;
}; };
} // namespace op }
} // namespace ngraph using v0::CrossEntropy;
using v0::CrossEntropyBackprop;
}
}
...@@ -33,21 +33,21 @@ NGRAPH_OP(Add, ngraph::op::v1, 1) ...@@ -33,21 +33,21 @@ NGRAPH_OP(Add, ngraph::op::v1, 1)
NGRAPH_OP(All, ngraph::op::v0, 0) NGRAPH_OP(All, ngraph::op::v0, 0)
NGRAPH_OP(AllReduce, ngraph::op::v0, 0) NGRAPH_OP(AllReduce, ngraph::op::v0, 0)
NGRAPH_OP(And, ngraph::op::v0, 0) NGRAPH_OP(And, ngraph::op::v0, 0)
NGRAPH_OP(Any, ngraph::op, 0) NGRAPH_OP(Any, ngraph::op::v0, 0)
NGRAPH_OP(ArgMax, ngraph::op::v0, 0) NGRAPH_OP(ArgMax, ngraph::op::v0, 0)
NGRAPH_OP(ArgMin, ngraph::op::v0, 0) NGRAPH_OP(ArgMin, ngraph::op::v0, 0)
NGRAPH_OP(Asin, ngraph::op::v0, 0) NGRAPH_OP(Asin, ngraph::op::v0, 0)
NGRAPH_OP(Atan, ngraph::op, 0) NGRAPH_OP(Atan, ngraph::op::v0, 0)
NGRAPH_OP(Atan2, ngraph::op, 0) NGRAPH_OP(Atan2, ngraph::op::v0, 0)
NGRAPH_OP(AvgPool, ngraph::op::v0, 0) NGRAPH_OP(AvgPool, ngraph::op::v0, 0)
NGRAPH_OP(AvgPool, ngraph::op::v1, 1) NGRAPH_OP(AvgPool, ngraph::op::v1, 1)
NGRAPH_OP(AvgPoolBackprop, ngraph::op::v0, 0) NGRAPH_OP(AvgPoolBackprop, ngraph::op::v0, 0)
NGRAPH_OP(AvgPoolBackprop, ngraph::op::v1, 1) NGRAPH_OP(AvgPoolBackprop, ngraph::op::v1, 1)
NGRAPH_OP(BatchMatMul, ngraph::op, 0) NGRAPH_OP(BatchMatMul, ngraph::op::v0, 0)
NGRAPH_OP(BatchMatMulTranspose, ngraph::op, 0) NGRAPH_OP(BatchMatMulTranspose, ngraph::op::v0, 0)
NGRAPH_OP(BatchNormInference, ngraph::op, 0) NGRAPH_OP(BatchNormInference, ngraph::op::v0, 0)
NGRAPH_OP(BatchNormTraining, ngraph::op, 0) NGRAPH_OP(BatchNormTraining, ngraph::op::v0, 0)
NGRAPH_OP(BatchNormTrainingBackprop, ngraph::op, 0) NGRAPH_OP(BatchNormTrainingBackprop, ngraph::op::v0, 0)
NGRAPH_OP(BatchToSpace, ngraph::op::v1, 1) NGRAPH_OP(BatchToSpace, ngraph::op::v1, 1)
NGRAPH_OP(BinaryConvolution, ngraph::op::v1, 1) NGRAPH_OP(BinaryConvolution, ngraph::op::v1, 1)
NGRAPH_OP(Broadcast, ngraph::op::v0, 0) NGRAPH_OP(Broadcast, ngraph::op::v0, 0)
...@@ -57,10 +57,10 @@ NGRAPH_OP(BroadcastLike, ngraph::op::v0, 0) ...@@ -57,10 +57,10 @@ NGRAPH_OP(BroadcastLike, ngraph::op::v0, 0)
NGRAPH_OP(CTCGreedyDecoder, ngraph::op::v0, 0) NGRAPH_OP(CTCGreedyDecoder, ngraph::op::v0, 0)
NGRAPH_OP(Ceiling, ngraph::op::v0, 0) NGRAPH_OP(Ceiling, ngraph::op::v0, 0)
NGRAPH_OP(Clamp, ngraph::op::v0, 0) NGRAPH_OP(Clamp, ngraph::op::v0, 0)
NGRAPH_OP(CompiledKernel, ngraph::op, 0) NGRAPH_OP(CompiledKernel, ngraph::op::v0, 0)
NGRAPH_OP(Concat, ngraph::op::v0, 0) NGRAPH_OP(Concat, ngraph::op::v0, 0)
NGRAPH_OP(Constant, ngraph::op, 0) NGRAPH_OP(Constant, ngraph::op::v0, 0)
NGRAPH_OP(Convert, ngraph::op, 0) NGRAPH_OP(Convert, ngraph::op::v0, 0)
NGRAPH_OP(ConvertLike, ngraph::op::v1, 1) NGRAPH_OP(ConvertLike, ngraph::op::v1, 1)
NGRAPH_OP(Convolution, ngraph::op::v0, 0) NGRAPH_OP(Convolution, ngraph::op::v0, 0)
NGRAPH_OP(Convolution, ngraph::op::v1, 1) NGRAPH_OP(Convolution, ngraph::op::v1, 1)
...@@ -71,25 +71,25 @@ NGRAPH_OP(ConvolutionBackpropFilters, ngraph::op::v1, 1) ...@@ -71,25 +71,25 @@ NGRAPH_OP(ConvolutionBackpropFilters, ngraph::op::v1, 1)
NGRAPH_OP(ConvolutionBias, ngraph::op::v0, 0) NGRAPH_OP(ConvolutionBias, ngraph::op::v0, 0)
NGRAPH_OP(ConvolutionBiasAdd, ngraph::op::v0, 0) NGRAPH_OP(ConvolutionBiasAdd, ngraph::op::v0, 0)
NGRAPH_OP(ConvolutionBiasBackpropFiltersBias, ngraph::op::v0, 0) NGRAPH_OP(ConvolutionBiasBackpropFiltersBias, ngraph::op::v0, 0)
NGRAPH_OP(Cos, ngraph::op, 0) NGRAPH_OP(Cos, ngraph::op::v0, 0)
NGRAPH_OP(Cosh, ngraph::op, 0) NGRAPH_OP(Cosh, ngraph::op::v0, 0)
NGRAPH_OP(CropAndResize, ngraph::op, 0) NGRAPH_OP(CropAndResize, ngraph::op::v0, 0)
NGRAPH_OP(CrossEntropy, ngraph::op, 0) NGRAPH_OP(CrossEntropy, ngraph::op::v0, 0)
NGRAPH_OP(CrossEntropyBackprop, ngraph::op, 0) NGRAPH_OP(CrossEntropyBackprop, ngraph::op::v0, 0)
NGRAPH_OP(CumSum, ngraph::op::v0, 0) NGRAPH_OP(CumSum, ngraph::op::v0, 0)
NGRAPH_OP(DeformableConvolution, ngraph::op::v1, 1) NGRAPH_OP(DeformableConvolution, ngraph::op::v1, 1)
NGRAPH_OP(DeformablePSROIPooling, ngraph::op::v1, 1) NGRAPH_OP(DeformablePSROIPooling, ngraph::op::v1, 1)
NGRAPH_OP(DepthToSpace, ngraph::op::v0, 0) NGRAPH_OP(DepthToSpace, ngraph::op::v0, 0)
NGRAPH_OP(Dequantize, ngraph::op, 0) NGRAPH_OP(Dequantize, ngraph::op::v0, 0)
NGRAPH_OP(DetectionOutput, ngraph::op::v0, 0) NGRAPH_OP(DetectionOutput, ngraph::op::v0, 0)
NGRAPH_OP(Divide, ngraph::op::v0, 0) NGRAPH_OP(Divide, ngraph::op::v0, 0)
NGRAPH_OP(Divide, ngraph::op::v1, 1) NGRAPH_OP(Divide, ngraph::op::v1, 1)
NGRAPH_OP(Dot, ngraph::op::v0, 0) NGRAPH_OP(Dot, ngraph::op::v0, 0)
NGRAPH_OP(DynBroadcast, ngraph::op, 0) NGRAPH_OP(DynBroadcast, ngraph::op::v0, 0)
NGRAPH_OP(DynPad, ngraph::op, 0) NGRAPH_OP(DynPad, ngraph::op::v0, 0)
NGRAPH_OP(DynReplaceSlice, ngraph::op, 0) NGRAPH_OP(DynReplaceSlice, ngraph::op::v0, 0)
NGRAPH_OP(DynReshape, ngraph::op::v0, 0) NGRAPH_OP(DynReshape, ngraph::op::v0, 0)
NGRAPH_OP(DynSlice, ngraph::op, 0) NGRAPH_OP(DynSlice, ngraph::op::v0, 0)
NGRAPH_OP(Elu, ngraph::op::v0, 0) NGRAPH_OP(Elu, ngraph::op::v0, 0)
NGRAPH_OP(EmbeddingLookup, ngraph::op::v0, 0) NGRAPH_OP(EmbeddingLookup, ngraph::op::v0, 0)
NGRAPH_OP(Equal, ngraph::op::v0, 0) NGRAPH_OP(Equal, ngraph::op::v0, 0)
...@@ -131,7 +131,7 @@ NGRAPH_OP(Less, ngraph::op::v0, 0) ...@@ -131,7 +131,7 @@ NGRAPH_OP(Less, ngraph::op::v0, 0)
NGRAPH_OP(Less, ngraph::op::v1, 1) NGRAPH_OP(Less, ngraph::op::v1, 1)
NGRAPH_OP(LessEq, ngraph::op::v0, 0) NGRAPH_OP(LessEq, ngraph::op::v0, 0)
NGRAPH_OP(LessEqual, ngraph::op::v1, 1) NGRAPH_OP(LessEqual, ngraph::op::v1, 1)
NGRAPH_OP(Log, ngraph::op, 0) NGRAPH_OP(Log, ngraph::op::v0, 0)
NGRAPH_OP(LogicalAnd, ngraph::op::v1, 1) NGRAPH_OP(LogicalAnd, ngraph::op::v1, 1)
NGRAPH_OP(LogicalNot, ngraph::op::v1, 1) NGRAPH_OP(LogicalNot, ngraph::op::v1, 1)
NGRAPH_OP(LogicalOr, ngraph::op::v1, 1) NGRAPH_OP(LogicalOr, ngraph::op::v1, 1)
...@@ -151,7 +151,7 @@ NGRAPH_OP(Minimum, ngraph::op::v1, 1) ...@@ -151,7 +151,7 @@ NGRAPH_OP(Minimum, ngraph::op::v1, 1)
NGRAPH_OP(Mod, ngraph::op::v1, 1) NGRAPH_OP(Mod, ngraph::op::v1, 1)
NGRAPH_OP(Multiply, ngraph::op::v0, 0) NGRAPH_OP(Multiply, ngraph::op::v0, 0)
NGRAPH_OP(Multiply, ngraph::op::v1, 1) NGRAPH_OP(Multiply, ngraph::op::v1, 1)
NGRAPH_OP(Negative, ngraph::op, 0) NGRAPH_OP(Negative, ngraph::op::v0, 0)
NGRAPH_OP(NonMaxSuppression, ngraph::op::v1, 1) NGRAPH_OP(NonMaxSuppression, ngraph::op::v1, 1)
NGRAPH_OP(NormalizeL2, ngraph::op::v0, 0) NGRAPH_OP(NormalizeL2, ngraph::op::v0, 0)
NGRAPH_OP(Not, ngraph::op::v0, 0) NGRAPH_OP(Not, ngraph::op::v0, 0)
...@@ -166,25 +166,25 @@ NGRAPH_OP(Pad, ngraph::op::v1, 1) ...@@ -166,25 +166,25 @@ NGRAPH_OP(Pad, ngraph::op::v1, 1)
NGRAPH_OP(Parameter, ngraph::op::v0, 0) NGRAPH_OP(Parameter, ngraph::op::v0, 0)
NGRAPH_OP(PartialSlice, ngraph::op::v0, 0) NGRAPH_OP(PartialSlice, ngraph::op::v0, 0)
NGRAPH_OP(PartialSliceBackprop, ngraph::op::v0, 0) NGRAPH_OP(PartialSliceBackprop, ngraph::op::v0, 0)
NGRAPH_OP(Passthrough, ngraph::op, 0) NGRAPH_OP(Passthrough, ngraph::op::v0, 0)
NGRAPH_OP(Power, ngraph::op::v0, 0) NGRAPH_OP(Power, ngraph::op::v0, 0)
NGRAPH_OP(Power, ngraph::op::v1, 1) NGRAPH_OP(Power, ngraph::op::v1, 1)
NGRAPH_OP(PriorBox, ngraph::op::v0, 0) NGRAPH_OP(PriorBox, ngraph::op::v0, 0)
NGRAPH_OP(PriorBoxClustered, ngraph::op::v0, 0) NGRAPH_OP(PriorBoxClustered, ngraph::op::v0, 0)
NGRAPH_OP(Product, ngraph::op, 0) NGRAPH_OP(Product, ngraph::op::v0, 0)
NGRAPH_OP(Proposal, ngraph::op::v0, 0) NGRAPH_OP(Proposal, ngraph::op::v0, 0)
NGRAPH_OP(Quantize, ngraph::op::v0, 0) NGRAPH_OP(Quantize, ngraph::op::v0, 0)
NGRAPH_OP(QuantizedConvolution, ngraph::op::v0, 0) NGRAPH_OP(QuantizedConvolution, ngraph::op::v0, 0)
NGRAPH_OP(QuantizedConvolutionBias, ngraph::op, 0) NGRAPH_OP(QuantizedConvolutionBias, ngraph::op::v0, 0)
NGRAPH_OP(QuantizedConvolutionBiasAdd, ngraph::op, 0) NGRAPH_OP(QuantizedConvolutionBiasAdd, ngraph::op::v0, 0)
NGRAPH_OP(QuantizedConvolutionBiasSignedAdd, ngraph::op, 0) NGRAPH_OP(QuantizedConvolutionBiasSignedAdd, ngraph::op::v0, 0)
NGRAPH_OP(QuantizedConvolutionRelu, ngraph::op, 0) NGRAPH_OP(QuantizedConvolutionRelu, ngraph::op::v0, 0)
NGRAPH_OP(QuantizedDot, ngraph::op::v0, 0) NGRAPH_OP(QuantizedDot, ngraph::op::v0, 0)
NGRAPH_OP(QuantizedDotBias, ngraph::op, 0) NGRAPH_OP(QuantizedDotBias, ngraph::op::v0, 0)
NGRAPH_OP(RNNCell, ngraph::op::v0, 0) NGRAPH_OP(RNNCell, ngraph::op::v0, 0)
NGRAPH_OP(ROIPooling, ngraph::op::v0, 0) NGRAPH_OP(ROIPooling, ngraph::op::v0, 0)
NGRAPH_OP(RandomUniform, ngraph::op, 0) NGRAPH_OP(RandomUniform, ngraph::op::v0, 0)
NGRAPH_OP(Range, ngraph::op, 0) NGRAPH_OP(Range, ngraph::op::v0, 0)
NGRAPH_OP(Recv, ngraph::op::v0, 0) NGRAPH_OP(Recv, ngraph::op::v0, 0)
NGRAPH_OP(ReduceMax, ngraph::op::v1, 1) NGRAPH_OP(ReduceMax, ngraph::op::v1, 1)
NGRAPH_OP(ReduceLogicalAnd, ngraph::op::v1, 1) NGRAPH_OP(ReduceLogicalAnd, ngraph::op::v1, 1)
...@@ -230,7 +230,7 @@ NGRAPH_OP(SpaceToBatch, ngraph::op::v1, 1) ...@@ -230,7 +230,7 @@ NGRAPH_OP(SpaceToBatch, ngraph::op::v1, 1)
NGRAPH_OP(SpaceToDepth, ngraph::op::v0, 0) NGRAPH_OP(SpaceToDepth, ngraph::op::v0, 0)
NGRAPH_OP(Split, ngraph::op::v1, 1) NGRAPH_OP(Split, ngraph::op::v1, 1)
NGRAPH_OP(Split, ngraph::op::v0, 0) NGRAPH_OP(Split, ngraph::op::v0, 0)
NGRAPH_OP(Sqrt, ngraph::op, 0) NGRAPH_OP(Sqrt, ngraph::op::v0, 0)
NGRAPH_OP(SquaredDifference, ngraph::op::v0, 0) NGRAPH_OP(SquaredDifference, ngraph::op::v0, 0)
NGRAPH_OP(Squeeze, ngraph::op::v0, 0) NGRAPH_OP(Squeeze, ngraph::op::v0, 0)
NGRAPH_OP(Stack, ngraph::op::v0, 0) NGRAPH_OP(Stack, ngraph::op::v0, 0)
......
...@@ -87,7 +87,6 @@ ...@@ -87,7 +87,6 @@
#include "ngraph/op/fused/clamp.hpp" #include "ngraph/op/fused/clamp.hpp"
#include "ngraph/op/fused/conv_fused.hpp" #include "ngraph/op/fused/conv_fused.hpp"
#include "ngraph/op/fused/crossentropy.hpp" #include "ngraph/op/fused/crossentropy.hpp"
#include "ngraph/op/fused/crossentropy.hpp"
#include "ngraph/op/fused/depth_to_space.hpp" #include "ngraph/op/fused/depth_to_space.hpp"
#include "ngraph/op/fused/elu.hpp" #include "ngraph/op/fused/elu.hpp"
#include "ngraph/op/fused/fake_quantize.hpp" #include "ngraph/op/fused/fake_quantize.hpp"
...@@ -175,7 +174,6 @@ ...@@ -175,7 +174,6 @@
#include "ngraph/op/sqrt.hpp" #include "ngraph/op/sqrt.hpp"
#include "ngraph/op/stop_gradient.hpp" #include "ngraph/op/stop_gradient.hpp"
#include "ngraph/op/strided_slice.hpp" #include "ngraph/op/strided_slice.hpp"
#include "ngraph/op/strided_slice.hpp"
#include "ngraph/op/subtract.hpp" #include "ngraph/op/subtract.hpp"
#include "ngraph/op/sum.hpp" #include "ngraph/op/sum.hpp"
#include "ngraph/op/tan.hpp" #include "ngraph/op/tan.hpp"
...@@ -184,5 +182,4 @@ ...@@ -184,5 +182,4 @@
#include "ngraph/op/topk.hpp" #include "ngraph/op/topk.hpp"
#include "ngraph/op/util/attr_types.hpp" #include "ngraph/op/util/attr_types.hpp"
#include "ngraph/op/variadic_split.hpp" #include "ngraph/op/variadic_split.hpp"
#include "ngraph/op/variadic_split.hpp"
#include "ngraph/op/xor.hpp" #include "ngraph/op/xor.hpp"
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment