Unverified Commit 1011a992 authored by Robert Kimball's avatar Robert Kimball Committed by GitHub

Move ops in ngraph::op namespace to ngraph::op::v0 (#4377)

* Move op to v0 namespace

* BatchMatMul

* BatchNorm*

* CompiledKernel

* Constant

* Fix more

* More

* Fix Quantaized*

* fix last v0 ops

* fix compile error

* fix build error

* Fix GPU build

* Fix build error
Co-authored-by: 's avatarScott Cyphers <diyessi@users.noreply.github.com>
parent 7018f9ca
......@@ -23,6 +23,7 @@
#include "ngraph/check.hpp"
#include "ngraph/descriptor/tensor.hpp"
#include "ngraph/node.hpp"
#include "ngraph/op/experimental/compiled_kernel.hpp"
#include <mlir/IR/Builders.h>
#include <mlir/IR/Module.h>
......@@ -42,10 +43,6 @@ namespace ngraph
{
class Type;
}
namespace op
{
class CompiledKernel;
}
namespace runtime
{
namespace ngmlir
......
......@@ -22,30 +22,34 @@ namespace ngraph
{
namespace op
{
/// \brief Logical "any" reduction operation.
class NGRAPH_API Any : public util::LogicalReduction
namespace v0
{
public:
static constexpr NodeTypeInfo type_info{"Any", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs an "any" reduction operation.
Any() = default;
/// \brief Constructs an "any" reduction operation.
///
/// \param arg The tensor to be reduced.
/// \param reduction_axes The axis positions (0-based) to be eliminated.
Any(const Output<Node>& arg, const AxisSet& reduction_axes);
/// \brief Constructs an "any" reduction operation.
///
/// \param arg The tensor to be reduced.
/// \param reduction_axes The axis positions (0-based) to be eliminated.
Any(const Output<Node>& arg, const Output<Node>& reduction_axes);
/// \brief Logical "any" reduction operation.
class NGRAPH_API Any : public util::LogicalReduction
{
public:
static constexpr NodeTypeInfo type_info{"Any", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs an "any" reduction operation.
Any() = default;
/// \brief Constructs an "any" reduction operation.
///
/// \param arg The tensor to be reduced.
/// \param reduction_axes The axis positions (0-based) to be eliminated.
Any(const Output<Node>& arg, const AxisSet& reduction_axes);
/// \brief Constructs an "any" reduction operation.
///
/// \param arg The tensor to be reduced.
/// \param reduction_axes The axis positions (0-based) to be eliminated.
Any(const Output<Node>& arg, const Output<Node>& reduction_axes);
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
bool visit_attributes(AttributeVisitor& visitor) override { return true; }
/// \return The default value for Any.
virtual std::shared_ptr<Node> get_default_value() const override;
};
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
bool visit_attributes(AttributeVisitor& visitor) override { return true; }
/// \return The default value for Any.
virtual std::shared_ptr<Node> get_default_value() const override;
};
}
using v0::Any;
}
}
......@@ -22,48 +22,55 @@ namespace ngraph
{
namespace op
{
class NGRAPH_API CropAndResize : public Op
namespace v0
{
public:
enum class ResizeMethod
class NGRAPH_API CropAndResize : public Op
{
unspecified,
bilinear,
nearest
};
public:
enum class ResizeMethod
{
unspecified,
bilinear,
nearest
};
static constexpr NodeTypeInfo type_info{"CropAndResize", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs a crop and resize operation.
CropAndResize() = default;
static constexpr NodeTypeInfo type_info{"CropAndResize", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs a crop and resize operation.
CropAndResize() = default;
/// \param image [N, H, W, C]
/// \param boxes [NUM_BOXES, 4] where boxes[box] is [y1, x1, y2, x2] each in [0, 1]
/// \param box_indices [NUM_BOXES] in [0, N)
/// \param crop_size [crop_height, crop_width]
CropAndResize(const Output<Node>& image,
const Output<Node>& boxes,
const Output<Node>& box_indices,
const Output<Node>& crop_size,
ResizeMethod resize_method,
float extrapolation_value);
/// \param image [N, H, W, C]
/// \param boxes [NUM_BOXES, 4] where boxes[box] is [y1, x1, y2, x2] each in [0, 1]
/// \param box_indices [NUM_BOXES] in [0, N)
/// \param crop_size [crop_height, crop_width]
CropAndResize(const Output<Node>& image,
const Output<Node>& boxes,
const Output<Node>& box_indices,
const Output<Node>& crop_size,
ResizeMethod resize_method,
float extrapolation_value);
void validate_and_infer_types() override;
void validate_and_infer_types() override;
std::shared_ptr<Node> copy_with_new_args(const NodeVector& new_args) const override;
std::shared_ptr<Node> copy_with_new_args(const NodeVector& new_args) const override;
ResizeMethod get_resize_method() const { return m_resize_method; }
void set_resize_method(ResizeMethod resize_method) { m_resize_method = resize_method; }
float get_extrapolation_value() const { return m_extrapolation_value; }
void set_extrapolation_value(float extrapolation_value)
{
m_extrapolation_value = extrapolation_value;
}
ResizeMethod get_resize_method() const { return m_resize_method; }
void set_resize_method(ResizeMethod resize_method)
{
m_resize_method = resize_method;
}
float get_extrapolation_value() const { return m_extrapolation_value; }
void set_extrapolation_value(float extrapolation_value)
{
m_extrapolation_value = extrapolation_value;
}
private:
ResizeMethod m_resize_method{ResizeMethod::unspecified};
float m_extrapolation_value{0};
};
private:
ResizeMethod m_resize_method{ResizeMethod::unspecified};
float m_extrapolation_value{0};
};
}
using v0::CropAndResize;
}
const std::string& as_string(op::CropAndResize::ResizeMethod);
......
......@@ -24,45 +24,50 @@ namespace ngraph
{
namespace op
{
/// \brief Dequantize operation
/// Maps quantized input (q) to real output (r) using scale (s) and zero point (z):
/// r = (q - o) * s
class NGRAPH_API Dequantize : public ngraph::op::Op
namespace v0
{
public:
static constexpr NodeTypeInfo type_info{"Dequantize", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs a Dequantize operation
Dequantize() = default;
/// \brief Dequantize operation
/// Maps quantized input (q) to real output (r) using scale (s) and zero point
/// (z):
/// r = (q - o) * s
class NGRAPH_API Dequantize : public ngraph::op::Op
{
public:
static constexpr NodeTypeInfo type_info{"Dequantize", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs a Dequantize operation
Dequantize() = default;
/// \brief Constructs a Dequantize operation
/// \param input quantized input
/// \param scale scale used for mapping
/// \param zero_point zero point used for mapping
/// \param type output element type
/// \param axes axis positions on which `scale` and `zero_point` are specified
Dequantize(const Output<Node>& input,
const Output<Node>& scale,
const Output<Node>& zero_point,
const element::Type& type,
const AxisSet& axes);
/// \brief Constructs a Dequantize operation
/// \param input quantized input
/// \param scale scale used for mapping
/// \param zero_point zero point used for mapping
/// \param type output element type
/// \param axes axis positions on which `scale` and `zero_point` are specified
Dequantize(const Output<Node>& input,
const Output<Node>& scale,
const Output<Node>& zero_point,
const element::Type& type,
const AxisSet& axes);
void validate_and_infer_types() override;
void validate_and_infer_types() override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
const AxisSet& get_axes() const { return m_axes; }
void set_axes(const AxisSet& axes) { m_axes = axes; }
const element::Type& get_type() const { return m_type; }
void set_type(const element::Type& type) { m_type = type; }
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas) override;
const AxisSet& get_axes() const { return m_axes; }
void set_axes(const AxisSet& axes) { m_axes = axes; }
const element::Type& get_type() const { return m_type; }
void set_type(const element::Type& type) { m_type = type; }
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas) override;
private:
element::Type m_type;
AxisSet m_axes;
};
private:
element::Type m_type;
AxisSet m_axes;
};
}
using v0::Dequantize;
}
}
......@@ -24,19 +24,19 @@ using namespace ngraph;
constexpr NodeTypeInfo op::BatchMatMul::type_info;
op::BatchMatMul::BatchMatMul(const Output<Node>& arg0, const Output<Node>& arg1)
op::v0::BatchMatMul::BatchMatMul(const Output<Node>& arg0, const Output<Node>& arg1)
: Op({arg0, arg1})
{
constructor_validate_and_infer_types();
}
shared_ptr<Node> op::BatchMatMul::copy_with_new_args(const NodeVector& new_args) const
shared_ptr<Node> op::v0::BatchMatMul::copy_with_new_args(const NodeVector& new_args) const
{
check_new_args_count(this, new_args);
return make_shared<BatchMatMul>(new_args.at(0), new_args.at(1));
}
void op::BatchMatMul::validate_and_infer_types()
void op::v0::BatchMatMul::validate_and_infer_types()
{
// Check input types
const auto& arg0_et = get_input_element_type(0);
......@@ -77,7 +77,8 @@ void op::BatchMatMul::validate_and_infer_types()
set_output_type(0, output_et, output_shape);
}
void op::BatchMatMul::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas)
void op::v0::BatchMatMul::generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas)
{
auto delta = deltas.at(0); // NxIxK
......
......@@ -22,34 +22,38 @@ namespace ngraph
{
namespace op
{
/// \brief Matrix multiply for a batch of Rank 2 tensors.
/// The inputs are expected to be Rank 3, where the first dim is the
/// batch size and must be the same for both inputs. The last two dims
/// are the shape of matrices, i.e. `(batch_size, :, :)`.
/// For example, for `a` with shape `(batch_size, n, k)`, and `b` with
/// shape `(batch_size, k, m)`, the result of BatchMatMul will have shape
/// `(batch_size, n, m)`, and `BatchMatMul(a, b)[i] = Dot(a[i], b[i])`.
class NGRAPH_API BatchMatMul : public Op
namespace v0
{
public:
static constexpr NodeTypeInfo type_info{"BatchMatMul", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
BatchMatMul() = default;
/// \brief Constructs a batch of matmul product operation.
///
/// \param arg0 The node producing the first argument.
/// \param arg1 The node producing the second argument.
BatchMatMul(const Output<Node>& arg0, const Output<Node>& arg1);
/// \brief Matrix multiply for a batch of Rank 2 tensors.
/// The inputs are expected to be Rank 3, where the first dim is the
/// batch size and must be the same for both inputs. The last two dims
/// are the shape of matrices, i.e. `(batch_size, :, :)`.
/// For example, for `a` with shape `(batch_size, n, k)`, and `b` with
/// shape `(batch_size, k, m)`, the result of BatchMatMul will have shape
/// `(batch_size, n, m)`, and `BatchMatMul(a, b)[i] = Dot(a[i], b[i])`.
class NGRAPH_API BatchMatMul : public Op
{
public:
static constexpr NodeTypeInfo type_info{"BatchMatMul", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
BatchMatMul() = default;
/// \brief Constructs a batch of matmul product operation.
///
/// \param arg0 The node producing the first argument.
/// \param arg1 The node producing the second argument.
BatchMatMul(const Output<Node>& arg0, const Output<Node>& arg1);
virtual void validate_and_infer_types() override;
virtual void validate_and_infer_types() override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas) override;
};
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas) override;
};
}
using v0::BatchMatMul;
namespace util
{
......
......@@ -22,45 +22,49 @@ namespace ngraph
{
namespace op
{
/// \brief CompiledKernel represents a sub-graph that can be compiled and executed
/// independently.
///
/// This op can be used to delimit sub-graphs that with special compilation requirements
/// within a function. For example, we currently use it to delimit sub-graphs that will be
/// independently compiled and executed by MLIR backend.
class NGRAPH_API CompiledKernel : public ngraph::op::Op
namespace v0
{
public:
static constexpr NodeTypeInfo type_info{"CompiledKernel", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
CompiledKernel() = default;
CompiledKernel(const NodeVector& node_list,
const NodeVector& outputs,
const NodeVector& args);
CompiledKernel(const OutputVector& node_list,
const OutputVector& outputs,
const OutputVector& args);
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
const NodeVector& get_node_list() const { return m_node_list; }
const NodeVector& get_kernel_outputs() const { return m_output_nodes; }
// For node B inside CompiledKernel ck such that A->B and A is outside of ck:
// replace input to B with a dummy Parameter Op and add an entry to ck's
// m_input_map.
void encapsulate_nodes();
const std::unordered_map<std::shared_ptr<Node>, size_t>& get_input_map() const
/// \brief CompiledKernel represents a sub-graph that can be compiled and executed
/// independently.
///
/// This op can be used to delimit sub-graphs that with special compilation requirements
/// within a function. For example, we currently use it to delimit sub-graphs that will
/// be independently compiled and executed by MLIR backend.
class NGRAPH_API CompiledKernel : public Op
{
return m_input_map;
}
void insert_to_input_map(std::shared_ptr<Node>, size_t);
public:
static constexpr NodeTypeInfo type_info{"CompiledKernel", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
CompiledKernel() = default;
CompiledKernel(const NodeVector& node_list,
const NodeVector& outputs,
const NodeVector& args);
CompiledKernel(const OutputVector& node_list,
const OutputVector& outputs,
const OutputVector& args);
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
const NodeVector& get_node_list() const { return m_node_list; }
const NodeVector& get_kernel_outputs() const { return m_output_nodes; }
// For node B inside CompiledKernel ck such that A->B and A is outside of ck:
// replace input to B with a dummy Parameter Op and add an entry to ck's
// m_input_map.
void encapsulate_nodes();
const std::unordered_map<std::shared_ptr<Node>, size_t>& get_input_map() const
{
return m_input_map;
}
void insert_to_input_map(std::shared_ptr<Node>, size_t);
private:
NodeVector m_node_list;
NodeVector m_output_nodes;
// Used to store the information of internal nodes that have input coming from outside
// of CK
std::unordered_map<std::shared_ptr<Node>, size_t> m_input_map;
};
private:
NodeVector m_node_list;
NodeVector m_output_nodes;
// Used to store the information of internal nodes that have input coming from
// outside of CK
std::unordered_map<std::shared_ptr<Node>, size_t> m_input_map;
};
}
using v0::CompiledKernel;
}
}
......@@ -23,35 +23,41 @@ namespace ngraph
{
namespace op
{
/// \brief Operation which "adds" axes to an input tensor, replicating elements from the
/// input as needed along the new axes.
///
/// This is basically the "dynamic shape" version of the static Broadcast op.
class NGRAPH_API DynBroadcast : public Op
namespace v0
{
public:
static constexpr NodeTypeInfo type_info{"DynBroadcast", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
DynBroadcast() = default;
/// \brief Constructs a dynamic broadcast operation.
/// \brief Operation which "adds" axes to an input tensor, replicating elements from the
/// input as needed along the new axes.
///
/// \param arg Node that produces the input tensor to be broadcast.
/// \param shape Node that produces shape of the output tensor.
/// \param broadcast_axes Node that produces the axis positions (0-based) in the result
/// that are being broadcast. The remaining axes in shape must be
/// the same as the shape of arg.
DynBroadcast(const Output<Node>& arg,
const Output<Node>& shape,
const Output<Node>& broadcast_axes);
/// This is basically the "dynamic shape" version of the static Broadcast op.
class NGRAPH_API DynBroadcast : public Op
{
public:
static constexpr NodeTypeInfo type_info{"DynBroadcast", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
DynBroadcast() = default;
/// \brief Constructs a dynamic broadcast operation.
///
/// \param arg Node that produces the input tensor to be broadcast.
/// \param shape Node that produces shape of the output tensor.
/// \param broadcast_axes Node that produces the axis positions (0-based) in the
/// result
/// that are being broadcast. The remaining axes in shape must
/// be
/// the same as the shape of arg.
DynBroadcast(const Output<Node>& arg,
const Output<Node>& shape,
const Output<Node>& broadcast_axes);
void validate_and_infer_types() override;
void validate_and_infer_types() override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas) override;
};
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas) override;
};
}
using v0::DynBroadcast;
}
}
......@@ -22,39 +22,44 @@ namespace ngraph
{
namespace op
{
/// \brief Generic padding operation which takes padding below and above as dynamic shapes.
/// This is similar to existing Pad operation except padding values are dynamic.
class NGRAPH_API DynPad : public Op
namespace v0
{
public:
static constexpr NodeTypeInfo type_info{"DynPad", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
DynPad() = default;
/// \brief Perform dynamic padding of a tensor
///
/// \param arg The node producing input tensor to be padded.
/// \param padding_below The node producing the padding-below widths.
/// \param padding_above The node producing the padding-above widths.
/// \param padding_value The value to be used for padding. Must be scalar.
/// \param pad_mode The padding mode: CONSTANT(default), EDGE or REFLECT.
DynPad(const Output<Node>& arg,
const Output<Node>& padding_below,
const Output<Node>& padding_above,
const Output<Node>& padding_value,
PadMode pad_mode = PadMode::CONSTANT);
/// \brief Generic padding operation which takes padding below and above as dynamic
/// shapes.
/// This is similar to existing Pad operation except padding values are dynamic.
class NGRAPH_API DynPad : public Op
{
public:
static constexpr NodeTypeInfo type_info{"DynPad", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
DynPad() = default;
/// \brief Perform dynamic padding of a tensor
///
/// \param arg The node producing input tensor to be padded.
/// \param padding_below The node producing the padding-below widths.
/// \param padding_above The node producing the padding-above widths.
/// \param padding_value The value to be used for padding. Must be scalar.
/// \param pad_mode The padding mode: CONSTANT(default), EDGE or REFLECT.
DynPad(const Output<Node>& arg,
const Output<Node>& padding_below,
const Output<Node>& padding_above,
const Output<Node>& padding_value,
PadMode pad_mode = PadMode::CONSTANT);
PadMode get_pad_mode() const { return m_pad_mode; }
void validate_and_infer_types() override;
PadMode get_pad_mode() const { return m_pad_mode; }
void validate_and_infer_types() override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas) override;
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas) override;
private:
PadMode m_pad_mode;
};
private:
PadMode m_pad_mode;
};
}
using v0::DynPad;
}
}
......@@ -23,60 +23,65 @@ namespace ngraph
{
namespace op
{
/// \brief Takes a slice of an input tensor, i.e., the sub-tensor that resides within a
/// bounding box, optionally with stride.
class NGRAPH_API DynReplaceSlice : public Op
namespace v0
{
public:
static constexpr NodeTypeInfo type_info{"DynReplaceSlice", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
DynReplaceSlice() = default;
/// \brief Constructs a dynamic tensor replace-slice operation.
///
/// \param arg The tensor in which to replace the slice.
/// \param replacement Data to copy to the slice for replacement.
/// \param lower_bounds The axiswise lower bounds of the slice (inclusive).
/// \param upper_bounds The axiswise upper bounds of the slice (exclusive).
/// \param strides The slicing strides; for example, strides of `{n,m}` means to take
/// every nth row and every mth column of the input matrix.
/// \param lower_bounds_mask Ignores lower_bounds for axis with the mask set
/// \param upper_bounds_mask Ignores upper_bounds for axis with the mask set
/// \param new_axis Add dimension one axis at the set positions
/// \param shrink_axis Delete dimensions at the set positions
/// \param ellipsis_mask Inserts missing dimensions on the set position
DynReplaceSlice(const Output<Node>& arg,
const Output<Node>& replacement,
const Output<Node>& lower_bounds,
const Output<Node>& upper_bounds,
const Output<Node>& strides,
const AxisSet& lower_bounds_mask = AxisSet{},
const AxisSet& upper_bounds_mask = AxisSet{},
const AxisSet& new_axis = AxisSet{},
const AxisSet& shrink_axis = AxisSet{},
const AxisSet& ellipsis_mask = AxisSet{});
/// \brief Takes a slice of an input tensor, i.e., the sub-tensor that resides within a
/// bounding box, optionally with stride.
class NGRAPH_API DynReplaceSlice : public Op
{
public:
static constexpr NodeTypeInfo type_info{"DynReplaceSlice", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
DynReplaceSlice() = default;
/// \brief Constructs a dynamic tensor replace-slice operation.
///
/// \param arg The tensor in which to replace the slice.
/// \param replacement Data to copy to the slice for replacement.
/// \param lower_bounds The axiswise lower bounds of the slice (inclusive).
/// \param upper_bounds The axiswise upper bounds of the slice (exclusive).
/// \param strides The slicing strides; for example, strides of `{n,m}` means to
/// take
/// every nth row and every mth column of the input matrix.
/// \param lower_bounds_mask Ignores lower_bounds for axis with the mask set
/// \param upper_bounds_mask Ignores upper_bounds for axis with the mask set
/// \param new_axis Add dimension one axis at the set positions
/// \param shrink_axis Delete dimensions at the set positions
/// \param ellipsis_mask Inserts missing dimensions on the set position
DynReplaceSlice(const Output<Node>& arg,
const Output<Node>& replacement,
const Output<Node>& lower_bounds,
const Output<Node>& upper_bounds,
const Output<Node>& strides,
const AxisSet& lower_bounds_mask = AxisSet{},
const AxisSet& upper_bounds_mask = AxisSet{},
const AxisSet& new_axis = AxisSet{},
const AxisSet& shrink_axis = AxisSet{},
const AxisSet& ellipsis_mask = AxisSet{});
const AxisSet& get_lower_bounds_mask() const { return m_lower_bounds_mask; }
const AxisSet& get_upper_bounds_mask() const { return m_upper_bounds_mask; }
const AxisSet& get_new_axis() const { return m_new_axis; }
const AxisSet& get_shrink_axis() const { return m_shrink_axis; }
const AxisSet& get_ellipsis_mask() const { return m_ellipsis_mask; }
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
void validate_and_infer_types() override;
const AxisSet& get_lower_bounds_mask() const { return m_lower_bounds_mask; }
const AxisSet& get_upper_bounds_mask() const { return m_upper_bounds_mask; }
const AxisSet& get_new_axis() const { return m_new_axis; }
const AxisSet& get_shrink_axis() const { return m_shrink_axis; }
const AxisSet& get_ellipsis_mask() const { return m_ellipsis_mask; }
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
void validate_and_infer_types() override;
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas) override;
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas) override;
private:
/// Helper method to compute output shape
Shape compute_output_shape() const;
private:
/// Helper method to compute output shape
Shape compute_output_shape() const;
AxisSet m_lower_bounds_mask;
AxisSet m_upper_bounds_mask;
AxisSet m_new_axis;
AxisSet m_shrink_axis;
AxisSet m_ellipsis_mask;
};
AxisSet m_lower_bounds_mask;
AxisSet m_upper_bounds_mask;
AxisSet m_new_axis;
AxisSet m_shrink_axis;
AxisSet m_ellipsis_mask;
};
}
using v0::DynReplaceSlice;
}
}
......@@ -23,58 +23,63 @@ namespace ngraph
{
namespace op
{
/// \brief Takes a slice of an input tensor, i.e., the sub-tensor that resides within a
/// bounding box, optionally with stride.
class NGRAPH_API DynSlice : public Op
namespace v0
{
public:
static constexpr NodeTypeInfo type_info{"DynSlice", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
DynSlice() = default;
/// \brief Constructs a dynamic tensor slice operation.
///
/// \param arg The tensor to be sliced.
/// \param lower_bounds The axiswise lower bounds of the slice (inclusive).
/// \param upper_bounds The axiswise upper bounds of the slice (exclusive).
/// \param strides The slicing strides; for example, strides of `{n,m}` means to take
/// every nth row and every mth column of the input matrix.
/// \param lower_bounds_mask Ignores lower_bounds for axis with the mask set
/// \param upper_bounds_mask Ignores upper_bounds for axis with the mask set
/// \param new_axis Add dimension one axis at the set positions
/// \param shrink_axis Delete dimensions at the set positions
/// \param ellipsis_mask Inserts missing dimensions on the set position
DynSlice(const Output<Node>& arg,
const Output<Node>& lower_bounds,
const Output<Node>& upper_bounds,
const Output<Node>& strides,
const AxisSet& lower_bounds_mask = AxisSet{},
const AxisSet& upper_bounds_mask = AxisSet{},
const AxisSet& new_axis = AxisSet{},
const AxisSet& shrink_axis = AxisSet{},
const AxisSet& ellipsis_mask = AxisSet{});
/// \brief Takes a slice of an input tensor, i.e., the sub-tensor that resides within a
/// bounding box, optionally with stride.
class NGRAPH_API DynSlice : public Op
{
public:
static constexpr NodeTypeInfo type_info{"DynSlice", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
DynSlice() = default;
/// \brief Constructs a dynamic tensor slice operation.
///
/// \param arg The tensor to be sliced.
/// \param lower_bounds The axiswise lower bounds of the slice (inclusive).
/// \param upper_bounds The axiswise upper bounds of the slice (exclusive).
/// \param strides The slicing strides; for example, strides of `{n,m}` means to
/// take
/// every nth row and every mth column of the input matrix.
/// \param lower_bounds_mask Ignores lower_bounds for axis with the mask set
/// \param upper_bounds_mask Ignores upper_bounds for axis with the mask set
/// \param new_axis Add dimension one axis at the set positions
/// \param shrink_axis Delete dimensions at the set positions
/// \param ellipsis_mask Inserts missing dimensions on the set position
DynSlice(const Output<Node>& arg,
const Output<Node>& lower_bounds,
const Output<Node>& upper_bounds,
const Output<Node>& strides,
const AxisSet& lower_bounds_mask = AxisSet{},
const AxisSet& upper_bounds_mask = AxisSet{},
const AxisSet& new_axis = AxisSet{},
const AxisSet& shrink_axis = AxisSet{},
const AxisSet& ellipsis_mask = AxisSet{});
const AxisSet& get_lower_bounds_mask() const { return m_lower_bounds_mask; }
const AxisSet& get_upper_bounds_mask() const { return m_upper_bounds_mask; }
const AxisSet& get_new_axis() const { return m_new_axis; }
const AxisSet& get_shrink_axis() const { return m_shrink_axis; }
const AxisSet& get_ellipsis_mask() const { return m_ellipsis_mask; }
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
void validate_and_infer_types() override;
const AxisSet& get_lower_bounds_mask() const { return m_lower_bounds_mask; }
const AxisSet& get_upper_bounds_mask() const { return m_upper_bounds_mask; }
const AxisSet& get_new_axis() const { return m_new_axis; }
const AxisSet& get_shrink_axis() const { return m_shrink_axis; }
const AxisSet& get_ellipsis_mask() const { return m_ellipsis_mask; }
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
void validate_and_infer_types() override;
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas) override;
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas) override;
private:
/// Helper method to compute output shape
Shape compute_output_shape() const;
private:
/// Helper method to compute output shape
Shape compute_output_shape() const;
AxisSet m_lower_bounds_mask;
AxisSet m_upper_bounds_mask;
AxisSet m_new_axis;
AxisSet m_shrink_axis;
AxisSet m_ellipsis_mask;
};
AxisSet m_lower_bounds_mask;
AxisSet m_upper_bounds_mask;
AxisSet m_new_axis;
AxisSet m_shrink_axis;
AxisSet m_ellipsis_mask;
};
}
using v0::DynSlice;
}
}
......@@ -23,39 +23,49 @@ namespace ngraph
{
namespace op
{
/// \brief Relu(Convolution) forward prop for batched convolution operation.
class NGRAPH_API QuantizedConvolutionRelu : public Op
namespace v0
{
public:
static constexpr NodeTypeInfo type_info{"QuantizedConvolutionRelu", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
QuantizedConvolutionRelu() = default;
QuantizedConvolutionRelu(const Output<Node>& data_batch,
const Output<Node>& filters,
const Strides& window_movement_strides,
const Strides& window_dilation_strides,
const CoordinateDiff& padding_below,
const CoordinateDiff& padding_above,
const Strides& data_dilation_strides,
const Output<Node>& scale);
/// \brief Relu(Convolution) forward prop for batched convolution operation.
class NGRAPH_API QuantizedConvolutionRelu : public Op
{
public:
static constexpr NodeTypeInfo type_info{"QuantizedConvolutionRelu", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
QuantizedConvolutionRelu() = default;
QuantizedConvolutionRelu(const Output<Node>& data_batch,
const Output<Node>& filters,
const Strides& window_movement_strides,
const Strides& window_dilation_strides,
const CoordinateDiff& padding_below,
const CoordinateDiff& padding_above,
const Strides& data_dilation_strides,
const Output<Node>& scale);
const Strides& get_window_movement_strides() const { return m_window_movement_strides; }
const Strides& get_window_dilation_strides() const { return m_window_dilation_strides; }
const CoordinateDiff& get_padding_below() const { return m_padding_below; }
const CoordinateDiff& get_padding_above() const { return m_padding_above; }
const Strides& get_data_dilation_strides() const { return m_data_dilation_strides; }
Output<Node> get_filters() { return input_value(1); }
Output<Node> get_data_batch() { return input_value(0); }
bool with_relu() const { return true; }
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
const Strides& get_window_movement_strides() const
{
return m_window_movement_strides;
}
const Strides& get_window_dilation_strides() const
{
return m_window_dilation_strides;
}
const CoordinateDiff& get_padding_below() const { return m_padding_below; }
const CoordinateDiff& get_padding_above() const { return m_padding_above; }
const Strides& get_data_dilation_strides() const { return m_data_dilation_strides; }
Output<Node> get_filters() { return input_value(1); }
Output<Node> get_data_batch() { return input_value(0); }
bool with_relu() const { return true; }
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
protected:
Strides m_window_movement_strides;
Strides m_window_dilation_strides;
CoordinateDiff m_padding_below;
CoordinateDiff m_padding_above;
Strides m_data_dilation_strides;
};
protected:
Strides m_window_movement_strides;
Strides m_window_dilation_strides;
CoordinateDiff m_padding_below;
CoordinateDiff m_padding_above;
Strides m_data_dilation_strides;
};
}
using v0::QuantizedConvolutionRelu;
}
}
......@@ -24,35 +24,39 @@ namespace ngraph
{
namespace op
{
class NGRAPH_API QuantizedDotBias : public Op
namespace v0
{
public:
static constexpr NodeTypeInfo type_info{"QuantizedDotBias", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
QuantizedDotBias() = default;
QuantizedDotBias(const Output<Node>& data,
const Output<Node>& weights,
const Output<Node>& bias,
const Output<Node>& scale,
bool requantize = true,
bool with_relu = false);
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override
class NGRAPH_API QuantizedDotBias : public Op
{
check_new_args_count(this, new_args);
return std::make_shared<QuantizedDotBias>(new_args.at(0),
new_args.at(1),
new_args.at(2),
new_args.at(3),
m_requantize,
m_with_relu);
}
bool with_relu() const { return m_with_relu; }
bool requantize() const { return m_requantize; }
protected:
bool m_requantize;
bool m_with_relu;
};
} // namespace op
} // namespace ngraph
public:
static constexpr NodeTypeInfo type_info{"QuantizedDotBias", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
QuantizedDotBias() = default;
QuantizedDotBias(const Output<Node>& data,
const Output<Node>& weights,
const Output<Node>& bias,
const Output<Node>& scale,
bool requantize = true,
bool with_relu = false);
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override
{
check_new_args_count(this, new_args);
return std::make_shared<QuantizedDotBias>(new_args.at(0),
new_args.at(1),
new_args.at(2),
new_args.at(3),
m_requantize,
m_with_relu);
}
bool with_relu() const { return m_with_relu; }
bool requantize() const { return m_requantize; }
protected:
bool m_requantize;
bool m_with_relu;
};
}
using v0::QuantizedDotBias;
}
}
......@@ -24,63 +24,67 @@ namespace ngraph
{
namespace op
{
/// \brief Generates a tensor populated with random values of a uniform distribution.
class NGRAPH_API RandomUniform : public op::Op
namespace v0
{
public:
static constexpr NodeTypeInfo type_info{"RandomUniform", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs an uninitialized RandomUniform node.
RandomUniform() = default;
/// \brief Generates a tensor populated with random values of a uniform distribution.
class NGRAPH_API RandomUniform : public op::Op
{
public:
static constexpr NodeTypeInfo type_info{"RandomUniform", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs an uninitialized RandomUniform node.
RandomUniform() = default;
/// \brief Constructs a RandomUniform node.
/// \param min_value Output producing the minimum value (inclusive) for the random
/// uniform distribution. Must return a scalar of floating point type,
/// and the type must match that of `max_value`.
/// \param max_value Output producing the maximum value (inclusive) for the random
/// uniform distribution. Must return a scalar of floating point type,
/// and the type must match that of `min_value`.
/// \param result_shape Output producing the shape of the output tensor. Must return a
/// vector of type `element::i64`.
/// \param use_fixed_seed Output producing a boolean scalar Flag indicating whether to
/// use the value supplied in `fixed_seed` to re-seed the random
/// number generator at this iteration. Note that whenever
/// `use_fixed_seed` is `true`, the same values will be generated
/// in the output tensor. This flag is primarily used for
/// debugging. If `use_fixed_seed` is `false`, the value in
/// `fixed_seed` is ignored.
/// \param fixed_seed Fixed seed value to be supplied to the random number generator if
/// `use_fixed_seed` is `true`. If `use_fixed_seed` is `false`, this
/// value is ignored.
RandomUniform(const Output<Node>& min_value,
const Output<Node>& max_value,
const Output<Node>& result_shape,
const Output<Node>& use_fixed_seed,
uint64_t fixed_seed);
/// \brief Constructs a RandomUniform node.
/// \param min_value Output producing the minimum value (inclusive) for the random
/// uniform distribution. Must return a scalar of floating point
/// type, and the type must match that of `max_value`.
/// \param max_value Output producing the maximum value (inclusive) for the random
/// uniform distribution. Must return a scalar of floating point
/// type, and the type must match that of `min_value`.
/// \param result_shape Output producing the shape of the output tensor. Must return
/// a vector of type `element::i64`.
/// \param use_fixed_seed Output producing a boolean scalar Flag indicating whether
/// to use the value supplied in `fixed_seed` to re-seed the
/// random number generator at this iteration. Note that
/// whenever `use_fixed_seed` is `true`, the same values will
/// be generated in the output tensor. This flag is primarily
/// used for debugging. If `use_fixed_seed` is `false`, the
/// value in `fixed_seed` is ignored.
/// \param fixed_seed Fixed seed value to be supplied to the random number generator
/// if `use_fixed_seed` is `true`. If `use_fixed_seed` is `false`,
/// this value is ignored.
RandomUniform(const Output<Node>& min_value,
const Output<Node>& max_value,
const Output<Node>& result_shape,
const Output<Node>& use_fixed_seed,
uint64_t fixed_seed);
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
/// \brief Returns the fixed seed value to be supplied to the random number generator
/// if `use_fixed_seed` is `true`. If `use_fixed_seed` is `false`, this value is
/// ignored.
uint64_t get_fixed_seed() const { return m_fixed_seed; }
/// \brief Sets the fixed seed value to be supplied to the random number generator
/// if `use_fixed_seed` is `true`. If `use_fixed_seed` is `false`, this value is
/// ignored.
void set_fixed_seed(uint64_t fixed_seed) { m_fixed_seed = fixed_seed; }
// Internally, any implementation of RandomUniform will have state, since it is backed
// by a random number generator.
bool has_state() const override { return true; }
void validate_and_infer_types() override;
/// \brief Returns the fixed seed value to be supplied to the random number
/// generator if `use_fixed_seed` is `true`. If `use_fixed_seed` is `false`,
/// this value is ignored.
uint64_t get_fixed_seed() const { return m_fixed_seed; }
/// \brief Sets the fixed seed value to be supplied to the random number generator
/// if `use_fixed_seed` is `true`. If `use_fixed_seed` is `false`, this value
/// is ignored.
void set_fixed_seed(uint64_t fixed_seed) { m_fixed_seed = fixed_seed; }
// Internally, any implementation of RandomUniform will have state, since it is
// backed by a random number generator.
bool has_state() const override { return true; }
void validate_and_infer_types() override;
protected:
virtual void generate_adjoints(autodiff::Adjoints& /* adjoints */,
const OutputVector& /* deltas */) override
{
}
protected:
virtual void generate_adjoints(autodiff::Adjoints& /* adjoints */,
const OutputVector& /* deltas */) override
{
}
uint64_t m_fixed_seed;
};
uint64_t m_fixed_seed;
};
}
using v0::RandomUniform;
}
}
......@@ -24,48 +24,52 @@ namespace ngraph
{
namespace op
{
/// \brief Matrix multiply for a batch of Rank 2 tensors each with potential
/// transpose.
///
/// The inputs are expected to be Rank 3, where the first dim is the
/// batch size and must be the same for both inputs. The last two dims
/// are the shape of matrices, i.e. `(batch_size, :, :)`.
/// For example, for `a` with shape `(batch_size, n, k)`, and `b` with
/// shape `(batch_size, k, m)`, the result of BatchMatMul will have shape
/// `(batch_size, n, m)`, and `BatchMatMulTranspose(a, b)[i] = Dot(a[i], b[i])`.
class NGRAPH_API BatchMatMulTranspose : public ngraph::op::util::FusedOp
namespace v0
{
public:
static constexpr NodeTypeInfo type_info{"BatchMatMulTranspose", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
BatchMatMulTranspose() = default;
/// \brief Constructs a batch of matmul product operation.
/// \brief Matrix multiply for a batch of Rank 2 tensors each with potential
/// transpose.
///
/// \param arg0 The node producing the first argument.
/// \param arg1 The node producing the second argument.
/// \param transpose_0 Apply transpose to arg0.
/// \param transpose_1 Apply transpose to arg1.
BatchMatMulTranspose(const Output<Node>& arg0,
const Output<Node>& arg1,
bool transpose_0 = false,
bool transpose_1 = false);
/// The inputs are expected to be Rank 3, where the first dim is the
/// batch size and must be the same for both inputs. The last two dims
/// are the shape of matrices, i.e. `(batch_size, :, :)`.
/// For example, for `a` with shape `(batch_size, n, k)`, and `b` with
/// shape `(batch_size, k, m)`, the result of BatchMatMul will have shape
/// `(batch_size, n, m)`, and `BatchMatMulTranspose(a, b)[i] = Dot(a[i], b[i])`.
class NGRAPH_API BatchMatMulTranspose : public ngraph::op::util::FusedOp
{
public:
static constexpr NodeTypeInfo type_info{"BatchMatMulTranspose", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
BatchMatMulTranspose() = default;
/// \brief Constructs a batch of matmul product operation.
///
/// \param arg0 The node producing the first argument.
/// \param arg1 The node producing the second argument.
/// \param transpose_0 Apply transpose to arg0.
/// \param transpose_1 Apply transpose to arg1.
BatchMatMulTranspose(const Output<Node>& arg0,
const Output<Node>& arg1,
bool transpose_0 = false,
bool transpose_1 = false);
bool get_transpose_arg0() const { return m_transpose_arg0; }
bool get_transpose_arg1() const { return m_transpose_arg1; }
virtual void validate_and_infer_types() override;
bool get_transpose_arg0() const { return m_transpose_arg0; }
bool get_transpose_arg1() const { return m_transpose_arg1; }
virtual void validate_and_infer_types() override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
virtual NodeVector decompose_op() const override;
virtual NodeVector decompose_op() const override;
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas) override;
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas) override;
private:
bool m_transpose_arg0;
bool m_transpose_arg1;
};
private:
bool m_transpose_arg0;
bool m_transpose_arg1;
};
}
using v0::BatchMatMulTranspose;
}
}
......@@ -24,70 +24,79 @@ namespace ngraph
{
namespace op
{
class NGRAPH_API CrossEntropy : public ngraph::op::util::FusedOp
namespace v0
{
public:
static constexpr NodeTypeInfo type_info{"CrossEntropy", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
CrossEntropy() = default;
/// \brief CrossEntropy for computing loss
/// \param arg1 Node that produces the input tensor
/// \param arg2 Node that produces ground truth lables for the input
/// \param soft_label flag indicating whether to interpretate the given labels as soft
/// labels
/// \param ignore_index Specifies a target value that is ignored and does not contribute
/// to the input gradient Only valid if soft_label is set to False
CrossEntropy(const Output<Node>& arg1,
const Output<Node>& arg2,
bool soft_label = false,
int64_t ignore_index = -100);
class NGRAPH_API CrossEntropy : public ngraph::op::util::FusedOp
{
public:
static constexpr NodeTypeInfo type_info{"CrossEntropy", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
CrossEntropy() = default;
/// \brief CrossEntropy for computing loss
/// \param arg1 Node that produces the input tensor
/// \param arg2 Node that produces ground truth lables for the input
/// \param soft_label flag indicating whether to interpretate the given labels as
/// soft
/// labels
/// \param ignore_index Specifies a target value that is ignored and does not
/// contribute
/// to the input gradient Only valid if soft_label is set to False
CrossEntropy(const Output<Node>& arg1,
const Output<Node>& arg2,
bool soft_label = false,
int64_t ignore_index = -100);
virtual NodeVector decompose_op() const override;
virtual NodeVector decompose_op() const override;
void pre_validate_and_infer_types() override;
void pre_validate_and_infer_types() override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
bool get_soft_label() const { return m_soft_label; }
int64_t get_ignore_index() const { return m_ignore_index; }
private:
bool m_soft_label;
int64_t m_ignore_index;
};
bool get_soft_label() const { return m_soft_label; }
int64_t get_ignore_index() const { return m_ignore_index; }
private:
bool m_soft_label;
int64_t m_ignore_index;
};
class NGRAPH_API CrossEntropyBackprop : public util::FusedOp
{
public:
static constexpr NodeTypeInfo type_info{"CrossEntropyBackprop", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
CrossEntropyBackprop() = default;
class NGRAPH_API CrossEntropyBackprop : public util::FusedOp
{
public:
static constexpr NodeTypeInfo type_info{"CrossEntropyBackprop", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
CrossEntropyBackprop() = default;
/// \brief Backprop for CrossEntropy
/// \param input Node that produces tensor from the fprop
/// \param labels Node that produces ground truth labels for input
/// \param delta Node that produces the delta during bprop
/// \param soft_label flag indicating whether to interpretate the given labels as soft
/// labels
/// \param ignore_index Specifies a target value that is ignored and does not contribute
/// to the input gradient Only valid if soft_label is set to False
CrossEntropyBackprop(const Output<Node>& input,
const Output<Node>& labels,
const Output<Node>& delta,
bool soft_label = false,
int64_t ignore_index = -100);
/// \brief Backprop for CrossEntropy
/// \param input Node that produces tensor from the fprop
/// \param labels Node that produces ground truth labels for input
/// \param delta Node that produces the delta during bprop
/// \param soft_label flag indicating whether to interpretate the given labels as
/// soft
/// labels
/// \param ignore_index Specifies a target value that is ignored and does not
/// contribute
/// to the input gradient Only valid if soft_label is set to False
CrossEntropyBackprop(const Output<Node>& input,
const Output<Node>& labels,
const Output<Node>& delta,
bool soft_label = false,
int64_t ignore_index = -100);
virtual NodeVector decompose_op() const override;
virtual NodeVector decompose_op() const override;
void pre_validate_and_infer_types() override;
void pre_validate_and_infer_types() override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
bool get_soft_label() const { return m_soft_label; }
int64_t get_ignore_index() const { return m_ignore_index; }
private:
bool m_soft_label;
int64_t m_ignore_index;
};
} // namespace op
} // namespace ngraph
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
bool get_soft_label() const { return m_soft_label; }
int64_t get_ignore_index() const { return m_ignore_index; }
private:
bool m_soft_label;
int64_t m_ignore_index;
};
}
using v0::CrossEntropy;
using v0::CrossEntropyBackprop;
}
}
......@@ -33,21 +33,21 @@ NGRAPH_OP(Add, ngraph::op::v1, 1)
NGRAPH_OP(All, ngraph::op::v0, 0)
NGRAPH_OP(AllReduce, ngraph::op::v0, 0)
NGRAPH_OP(And, ngraph::op::v0, 0)
NGRAPH_OP(Any, ngraph::op, 0)
NGRAPH_OP(Any, ngraph::op::v0, 0)
NGRAPH_OP(ArgMax, ngraph::op::v0, 0)
NGRAPH_OP(ArgMin, ngraph::op::v0, 0)
NGRAPH_OP(Asin, ngraph::op::v0, 0)
NGRAPH_OP(Atan, ngraph::op, 0)
NGRAPH_OP(Atan2, ngraph::op, 0)
NGRAPH_OP(Atan, ngraph::op::v0, 0)
NGRAPH_OP(Atan2, ngraph::op::v0, 0)
NGRAPH_OP(AvgPool, ngraph::op::v0, 0)
NGRAPH_OP(AvgPool, ngraph::op::v1, 1)
NGRAPH_OP(AvgPoolBackprop, ngraph::op::v0, 0)
NGRAPH_OP(AvgPoolBackprop, ngraph::op::v1, 1)
NGRAPH_OP(BatchMatMul, ngraph::op, 0)
NGRAPH_OP(BatchMatMulTranspose, ngraph::op, 0)
NGRAPH_OP(BatchNormInference, ngraph::op, 0)
NGRAPH_OP(BatchNormTraining, ngraph::op, 0)
NGRAPH_OP(BatchNormTrainingBackprop, ngraph::op, 0)
NGRAPH_OP(BatchMatMul, ngraph::op::v0, 0)
NGRAPH_OP(BatchMatMulTranspose, ngraph::op::v0, 0)
NGRAPH_OP(BatchNormInference, ngraph::op::v0, 0)
NGRAPH_OP(BatchNormTraining, ngraph::op::v0, 0)
NGRAPH_OP(BatchNormTrainingBackprop, ngraph::op::v0, 0)
NGRAPH_OP(BatchToSpace, ngraph::op::v1, 1)
NGRAPH_OP(BinaryConvolution, ngraph::op::v1, 1)
NGRAPH_OP(Broadcast, ngraph::op::v0, 0)
......@@ -57,10 +57,10 @@ NGRAPH_OP(BroadcastLike, ngraph::op::v0, 0)
NGRAPH_OP(CTCGreedyDecoder, ngraph::op::v0, 0)
NGRAPH_OP(Ceiling, ngraph::op::v0, 0)
NGRAPH_OP(Clamp, ngraph::op::v0, 0)
NGRAPH_OP(CompiledKernel, ngraph::op, 0)
NGRAPH_OP(CompiledKernel, ngraph::op::v0, 0)
NGRAPH_OP(Concat, ngraph::op::v0, 0)
NGRAPH_OP(Constant, ngraph::op, 0)
NGRAPH_OP(Convert, ngraph::op, 0)
NGRAPH_OP(Constant, ngraph::op::v0, 0)
NGRAPH_OP(Convert, ngraph::op::v0, 0)
NGRAPH_OP(ConvertLike, ngraph::op::v1, 1)
NGRAPH_OP(Convolution, ngraph::op::v0, 0)
NGRAPH_OP(Convolution, ngraph::op::v1, 1)
......@@ -71,25 +71,25 @@ NGRAPH_OP(ConvolutionBackpropFilters, ngraph::op::v1, 1)
NGRAPH_OP(ConvolutionBias, ngraph::op::v0, 0)
NGRAPH_OP(ConvolutionBiasAdd, ngraph::op::v0, 0)
NGRAPH_OP(ConvolutionBiasBackpropFiltersBias, ngraph::op::v0, 0)
NGRAPH_OP(Cos, ngraph::op, 0)
NGRAPH_OP(Cosh, ngraph::op, 0)
NGRAPH_OP(CropAndResize, ngraph::op, 0)
NGRAPH_OP(CrossEntropy, ngraph::op, 0)
NGRAPH_OP(CrossEntropyBackprop, ngraph::op, 0)
NGRAPH_OP(Cos, ngraph::op::v0, 0)
NGRAPH_OP(Cosh, ngraph::op::v0, 0)
NGRAPH_OP(CropAndResize, ngraph::op::v0, 0)
NGRAPH_OP(CrossEntropy, ngraph::op::v0, 0)
NGRAPH_OP(CrossEntropyBackprop, ngraph::op::v0, 0)
NGRAPH_OP(CumSum, ngraph::op::v0, 0)
NGRAPH_OP(DeformableConvolution, ngraph::op::v1, 1)
NGRAPH_OP(DeformablePSROIPooling, ngraph::op::v1, 1)
NGRAPH_OP(DepthToSpace, ngraph::op::v0, 0)
NGRAPH_OP(Dequantize, ngraph::op, 0)
NGRAPH_OP(Dequantize, ngraph::op::v0, 0)
NGRAPH_OP(DetectionOutput, ngraph::op::v0, 0)
NGRAPH_OP(Divide, ngraph::op::v0, 0)
NGRAPH_OP(Divide, ngraph::op::v1, 1)
NGRAPH_OP(Dot, ngraph::op::v0, 0)
NGRAPH_OP(DynBroadcast, ngraph::op, 0)
NGRAPH_OP(DynPad, ngraph::op, 0)
NGRAPH_OP(DynReplaceSlice, ngraph::op, 0)
NGRAPH_OP(DynBroadcast, ngraph::op::v0, 0)
NGRAPH_OP(DynPad, ngraph::op::v0, 0)
NGRAPH_OP(DynReplaceSlice, ngraph::op::v0, 0)
NGRAPH_OP(DynReshape, ngraph::op::v0, 0)
NGRAPH_OP(DynSlice, ngraph::op, 0)
NGRAPH_OP(DynSlice, ngraph::op::v0, 0)
NGRAPH_OP(Elu, ngraph::op::v0, 0)
NGRAPH_OP(EmbeddingLookup, ngraph::op::v0, 0)
NGRAPH_OP(Equal, ngraph::op::v0, 0)
......@@ -131,7 +131,7 @@ NGRAPH_OP(Less, ngraph::op::v0, 0)
NGRAPH_OP(Less, ngraph::op::v1, 1)
NGRAPH_OP(LessEq, ngraph::op::v0, 0)
NGRAPH_OP(LessEqual, ngraph::op::v1, 1)
NGRAPH_OP(Log, ngraph::op, 0)
NGRAPH_OP(Log, ngraph::op::v0, 0)
NGRAPH_OP(LogicalAnd, ngraph::op::v1, 1)
NGRAPH_OP(LogicalNot, ngraph::op::v1, 1)
NGRAPH_OP(LogicalOr, ngraph::op::v1, 1)
......@@ -151,7 +151,7 @@ NGRAPH_OP(Minimum, ngraph::op::v1, 1)
NGRAPH_OP(Mod, ngraph::op::v1, 1)
NGRAPH_OP(Multiply, ngraph::op::v0, 0)
NGRAPH_OP(Multiply, ngraph::op::v1, 1)
NGRAPH_OP(Negative, ngraph::op, 0)
NGRAPH_OP(Negative, ngraph::op::v0, 0)
NGRAPH_OP(NonMaxSuppression, ngraph::op::v1, 1)
NGRAPH_OP(NormalizeL2, ngraph::op::v0, 0)
NGRAPH_OP(Not, ngraph::op::v0, 0)
......@@ -166,25 +166,25 @@ NGRAPH_OP(Pad, ngraph::op::v1, 1)
NGRAPH_OP(Parameter, ngraph::op::v0, 0)
NGRAPH_OP(PartialSlice, ngraph::op::v0, 0)
NGRAPH_OP(PartialSliceBackprop, ngraph::op::v0, 0)
NGRAPH_OP(Passthrough, ngraph::op, 0)
NGRAPH_OP(Passthrough, ngraph::op::v0, 0)
NGRAPH_OP(Power, ngraph::op::v0, 0)
NGRAPH_OP(Power, ngraph::op::v1, 1)
NGRAPH_OP(PriorBox, ngraph::op::v0, 0)
NGRAPH_OP(PriorBoxClustered, ngraph::op::v0, 0)
NGRAPH_OP(Product, ngraph::op, 0)
NGRAPH_OP(Product, ngraph::op::v0, 0)
NGRAPH_OP(Proposal, ngraph::op::v0, 0)
NGRAPH_OP(Quantize, ngraph::op::v0, 0)
NGRAPH_OP(QuantizedConvolution, ngraph::op::v0, 0)
NGRAPH_OP(QuantizedConvolutionBias, ngraph::op, 0)
NGRAPH_OP(QuantizedConvolutionBiasAdd, ngraph::op, 0)
NGRAPH_OP(QuantizedConvolutionBiasSignedAdd, ngraph::op, 0)
NGRAPH_OP(QuantizedConvolutionRelu, ngraph::op, 0)
NGRAPH_OP(QuantizedConvolutionBias, ngraph::op::v0, 0)
NGRAPH_OP(QuantizedConvolutionBiasAdd, ngraph::op::v0, 0)
NGRAPH_OP(QuantizedConvolutionBiasSignedAdd, ngraph::op::v0, 0)
NGRAPH_OP(QuantizedConvolutionRelu, ngraph::op::v0, 0)
NGRAPH_OP(QuantizedDot, ngraph::op::v0, 0)
NGRAPH_OP(QuantizedDotBias, ngraph::op, 0)
NGRAPH_OP(QuantizedDotBias, ngraph::op::v0, 0)
NGRAPH_OP(RNNCell, ngraph::op::v0, 0)
NGRAPH_OP(ROIPooling, ngraph::op::v0, 0)
NGRAPH_OP(RandomUniform, ngraph::op, 0)
NGRAPH_OP(Range, ngraph::op, 0)
NGRAPH_OP(RandomUniform, ngraph::op::v0, 0)
NGRAPH_OP(Range, ngraph::op::v0, 0)
NGRAPH_OP(Recv, ngraph::op::v0, 0)
NGRAPH_OP(ReduceMax, ngraph::op::v1, 1)
NGRAPH_OP(ReduceLogicalAnd, ngraph::op::v1, 1)
......@@ -230,7 +230,7 @@ NGRAPH_OP(SpaceToBatch, ngraph::op::v1, 1)
NGRAPH_OP(SpaceToDepth, ngraph::op::v0, 0)
NGRAPH_OP(Split, ngraph::op::v1, 1)
NGRAPH_OP(Split, ngraph::op::v0, 0)
NGRAPH_OP(Sqrt, ngraph::op, 0)
NGRAPH_OP(Sqrt, ngraph::op::v0, 0)
NGRAPH_OP(SquaredDifference, ngraph::op::v0, 0)
NGRAPH_OP(Squeeze, ngraph::op::v0, 0)
NGRAPH_OP(Stack, ngraph::op::v0, 0)
......
......@@ -87,7 +87,6 @@
#include "ngraph/op/fused/clamp.hpp"
#include "ngraph/op/fused/conv_fused.hpp"
#include "ngraph/op/fused/crossentropy.hpp"
#include "ngraph/op/fused/crossentropy.hpp"
#include "ngraph/op/fused/depth_to_space.hpp"
#include "ngraph/op/fused/elu.hpp"
#include "ngraph/op/fused/fake_quantize.hpp"
......@@ -175,7 +174,6 @@
#include "ngraph/op/sqrt.hpp"
#include "ngraph/op/stop_gradient.hpp"
#include "ngraph/op/strided_slice.hpp"
#include "ngraph/op/strided_slice.hpp"
#include "ngraph/op/subtract.hpp"
#include "ngraph/op/sum.hpp"
#include "ngraph/op/tan.hpp"
......@@ -184,5 +182,4 @@
#include "ngraph/op/topk.hpp"
#include "ngraph/op/util/attr_types.hpp"
#include "ngraph/op/variadic_split.hpp"
#include "ngraph/op/variadic_split.hpp"
#include "ngraph/op/xor.hpp"
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment