Unverified Commit 1011a992 authored by Robert Kimball's avatar Robert Kimball Committed by GitHub

Move ops in ngraph::op namespace to ngraph::op::v0 (#4377)

* Move op to v0 namespace

* BatchMatMul

* BatchNorm*

* CompiledKernel

* Constant

* Fix more

* More

* Fix Quantaized*

* fix last v0 ops

* fix compile error

* fix build error

* Fix GPU build

* Fix build error
Co-authored-by: 's avatarScott Cyphers <diyessi@users.noreply.github.com>
parent 7018f9ca
......@@ -23,6 +23,7 @@
#include "ngraph/check.hpp"
#include "ngraph/descriptor/tensor.hpp"
#include "ngraph/node.hpp"
#include "ngraph/op/experimental/compiled_kernel.hpp"
#include <mlir/IR/Builders.h>
#include <mlir/IR/Module.h>
......@@ -42,10 +43,6 @@ namespace ngraph
{
class Type;
}
namespace op
{
class CompiledKernel;
}
namespace runtime
{
namespace ngmlir
......
......@@ -21,6 +21,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
/// \brief Logical "any" reduction operation.
class NGRAPH_API Any : public util::LogicalReduction
......@@ -48,4 +50,6 @@ namespace ngraph
virtual std::shared_ptr<Node> get_default_value() const override;
};
}
using v0::Any;
}
}
......@@ -21,6 +21,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
class NGRAPH_API CropAndResize : public Op
{
......@@ -53,7 +55,10 @@ namespace ngraph
std::shared_ptr<Node> copy_with_new_args(const NodeVector& new_args) const override;
ResizeMethod get_resize_method() const { return m_resize_method; }
void set_resize_method(ResizeMethod resize_method) { m_resize_method = resize_method; }
void set_resize_method(ResizeMethod resize_method)
{
m_resize_method = resize_method;
}
float get_extrapolation_value() const { return m_extrapolation_value; }
void set_extrapolation_value(float extrapolation_value)
{
......@@ -65,6 +70,8 @@ namespace ngraph
float m_extrapolation_value{0};
};
}
using v0::CropAndResize;
}
const std::string& as_string(op::CropAndResize::ResizeMethod);
template <typename T>
......
......@@ -23,9 +23,12 @@
namespace ngraph
{
namespace op
{
namespace v0
{
/// \brief Dequantize operation
/// Maps quantized input (q) to real output (r) using scale (s) and zero point (z):
/// Maps quantized input (q) to real output (r) using scale (s) and zero point
/// (z):
/// r = (q - o) * s
class NGRAPH_API Dequantize : public ngraph::op::Op
{
......@@ -65,4 +68,6 @@ namespace ngraph
AxisSet m_axes;
};
}
using v0::Dequantize;
}
}
......@@ -24,19 +24,19 @@ using namespace ngraph;
constexpr NodeTypeInfo op::BatchMatMul::type_info;
op::BatchMatMul::BatchMatMul(const Output<Node>& arg0, const Output<Node>& arg1)
op::v0::BatchMatMul::BatchMatMul(const Output<Node>& arg0, const Output<Node>& arg1)
: Op({arg0, arg1})
{
constructor_validate_and_infer_types();
}
shared_ptr<Node> op::BatchMatMul::copy_with_new_args(const NodeVector& new_args) const
shared_ptr<Node> op::v0::BatchMatMul::copy_with_new_args(const NodeVector& new_args) const
{
check_new_args_count(this, new_args);
return make_shared<BatchMatMul>(new_args.at(0), new_args.at(1));
}
void op::BatchMatMul::validate_and_infer_types()
void op::v0::BatchMatMul::validate_and_infer_types()
{
// Check input types
const auto& arg0_et = get_input_element_type(0);
......@@ -77,7 +77,8 @@ void op::BatchMatMul::validate_and_infer_types()
set_output_type(0, output_et, output_shape);
}
void op::BatchMatMul::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas)
void op::v0::BatchMatMul::generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas)
{
auto delta = deltas.at(0); // NxIxK
......
......@@ -21,6 +21,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
/// \brief Matrix multiply for a batch of Rank 2 tensors.
/// The inputs are expected to be Rank 3, where the first dim is the
......@@ -50,6 +52,8 @@ namespace ngraph
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const OutputVector& deltas) override;
};
}
using v0::BatchMatMul;
namespace util
{
......
......@@ -21,14 +21,16 @@
namespace ngraph
{
namespace op
{
namespace v0
{
/// \brief CompiledKernel represents a sub-graph that can be compiled and executed
/// independently.
///
/// This op can be used to delimit sub-graphs that with special compilation requirements
/// within a function. For example, we currently use it to delimit sub-graphs that will be
/// independently compiled and executed by MLIR backend.
class NGRAPH_API CompiledKernel : public ngraph::op::Op
/// within a function. For example, we currently use it to delimit sub-graphs that will
/// be independently compiled and executed by MLIR backend.
class NGRAPH_API CompiledKernel : public Op
{
public:
static constexpr NodeTypeInfo type_info{"CompiledKernel", 0};
......@@ -58,9 +60,11 @@ namespace ngraph
private:
NodeVector m_node_list;
NodeVector m_output_nodes;
// Used to store the information of internal nodes that have input coming from outside
// of CK
// Used to store the information of internal nodes that have input coming from
// outside of CK
std::unordered_map<std::shared_ptr<Node>, size_t> m_input_map;
};
}
using v0::CompiledKernel;
}
}
......@@ -22,6 +22,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
/// \brief Operation which "adds" axes to an input tensor, replicating elements from the
/// input as needed along the new axes.
......@@ -37,8 +39,10 @@ namespace ngraph
///
/// \param arg Node that produces the input tensor to be broadcast.
/// \param shape Node that produces shape of the output tensor.
/// \param broadcast_axes Node that produces the axis positions (0-based) in the result
/// that are being broadcast. The remaining axes in shape must be
/// \param broadcast_axes Node that produces the axis positions (0-based) in the
/// result
/// that are being broadcast. The remaining axes in shape must
/// be
/// the same as the shape of arg.
DynBroadcast(const Output<Node>& arg,
const Output<Node>& shape,
......@@ -54,4 +58,6 @@ namespace ngraph
const OutputVector& deltas) override;
};
}
using v0::DynBroadcast;
}
}
......@@ -22,7 +22,10 @@ namespace ngraph
{
namespace op
{
/// \brief Generic padding operation which takes padding below and above as dynamic shapes.
namespace v0
{
/// \brief Generic padding operation which takes padding below and above as dynamic
/// shapes.
/// This is similar to existing Pad operation except padding values are dynamic.
class NGRAPH_API DynPad : public Op
{
......@@ -57,4 +60,6 @@ namespace ngraph
PadMode m_pad_mode;
};
}
using v0::DynPad;
}
}
......@@ -22,6 +22,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
/// \brief Takes a slice of an input tensor, i.e., the sub-tensor that resides within a
/// bounding box, optionally with stride.
......@@ -37,7 +39,8 @@ namespace ngraph
/// \param replacement Data to copy to the slice for replacement.
/// \param lower_bounds The axiswise lower bounds of the slice (inclusive).
/// \param upper_bounds The axiswise upper bounds of the slice (exclusive).
/// \param strides The slicing strides; for example, strides of `{n,m}` means to take
/// \param strides The slicing strides; for example, strides of `{n,m}` means to
/// take
/// every nth row and every mth column of the input matrix.
/// \param lower_bounds_mask Ignores lower_bounds for axis with the mask set
/// \param upper_bounds_mask Ignores upper_bounds for axis with the mask set
......@@ -79,4 +82,6 @@ namespace ngraph
AxisSet m_ellipsis_mask;
};
}
using v0::DynReplaceSlice;
}
}
......@@ -22,6 +22,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
/// \brief Takes a slice of an input tensor, i.e., the sub-tensor that resides within a
/// bounding box, optionally with stride.
......@@ -36,7 +38,8 @@ namespace ngraph
/// \param arg The tensor to be sliced.
/// \param lower_bounds The axiswise lower bounds of the slice (inclusive).
/// \param upper_bounds The axiswise upper bounds of the slice (exclusive).
/// \param strides The slicing strides; for example, strides of `{n,m}` means to take
/// \param strides The slicing strides; for example, strides of `{n,m}` means to
/// take
/// every nth row and every mth column of the input matrix.
/// \param lower_bounds_mask Ignores lower_bounds for axis with the mask set
/// \param upper_bounds_mask Ignores upper_bounds for axis with the mask set
......@@ -77,4 +80,6 @@ namespace ngraph
AxisSet m_ellipsis_mask;
};
}
using v0::DynSlice;
}
}
......@@ -22,6 +22,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
/// \brief Convolution + bias forward prop for batched convolution operation.
class NGRAPH_API QuantizedConvolutionBias : public Op
......@@ -41,8 +43,14 @@ namespace ngraph
const Output<Node>& scale,
const bool with_relu = false);
const Strides& get_window_movement_strides() const { return m_window_movement_strides; }
const Strides& get_window_dilation_strides() const { return m_window_dilation_strides; }
const Strides& get_window_movement_strides() const
{
return m_window_movement_strides;
}
const Strides& get_window_dilation_strides() const
{
return m_window_dilation_strides;
}
const CoordinateDiff& get_padding_below() const { return m_padding_below; }
const CoordinateDiff& get_padding_above() const { return m_padding_above; }
const Strides& get_data_dilation_strides() const { return m_data_dilation_strides; }
......@@ -81,8 +89,14 @@ namespace ngraph
const Output<Node>& sum_scale,
const bool with_relu = false);
const Strides& get_window_movement_strides() const { return m_window_movement_strides; }
const Strides& get_window_dilation_strides() const { return m_window_dilation_strides; }
const Strides& get_window_movement_strides() const
{
return m_window_movement_strides;
}
const Strides& get_window_dilation_strides() const
{
return m_window_dilation_strides;
}
const CoordinateDiff& get_padding_below() const { return m_padding_below; }
const CoordinateDiff& get_padding_above() const { return m_padding_above; }
const Strides& get_data_dilation_strides() const { return m_data_dilation_strides; }
......@@ -121,8 +135,14 @@ namespace ngraph
const Output<Node>& sum_scale,
const bool with_relu = false);
const Strides& get_window_movement_strides() const { return m_window_movement_strides; }
const Strides& get_window_dilation_strides() const { return m_window_dilation_strides; }
const Strides& get_window_movement_strides() const
{
return m_window_movement_strides;
}
const Strides& get_window_dilation_strides() const
{
return m_window_dilation_strides;
}
const CoordinateDiff& get_padding_below() const { return m_padding_below; }
const CoordinateDiff& get_padding_above() const { return m_padding_above; }
const Strides& get_data_dilation_strides() const { return m_data_dilation_strides; }
......@@ -142,4 +162,8 @@ namespace ngraph
bool m_with_relu;
};
}
using v0::QuantizedConvolutionBias;
using v0::QuantizedConvolutionBiasAdd;
using v0::QuantizedConvolutionBiasSignedAdd;
}
}
......@@ -22,6 +22,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
/// \brief Relu(Convolution) forward prop for batched convolution operation.
class NGRAPH_API QuantizedConvolutionRelu : public Op
......@@ -39,8 +41,14 @@ namespace ngraph
const Strides& data_dilation_strides,
const Output<Node>& scale);
const Strides& get_window_movement_strides() const { return m_window_movement_strides; }
const Strides& get_window_dilation_strides() const { return m_window_dilation_strides; }
const Strides& get_window_movement_strides() const
{
return m_window_movement_strides;
}
const Strides& get_window_dilation_strides() const
{
return m_window_dilation_strides;
}
const CoordinateDiff& get_padding_below() const { return m_padding_below; }
const CoordinateDiff& get_padding_above() const { return m_padding_above; }
const Strides& get_data_dilation_strides() const { return m_data_dilation_strides; }
......@@ -58,4 +66,6 @@ namespace ngraph
Strides m_data_dilation_strides;
};
}
using v0::QuantizedConvolutionRelu;
}
}
......@@ -23,6 +23,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
class NGRAPH_API QuantizedDotBias : public Op
{
......@@ -54,5 +56,7 @@ namespace ngraph
bool m_requantize;
bool m_with_relu;
};
} // namespace op
} // namespace ngraph
}
using v0::QuantizedDotBias;
}
}
......@@ -23,6 +23,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
/// \brief Generates a tensor populated with random values of a uniform distribution.
class NGRAPH_API RandomUniform : public op::Op
......@@ -35,23 +37,23 @@ namespace ngraph
/// \brief Constructs a RandomUniform node.
/// \param min_value Output producing the minimum value (inclusive) for the random
/// uniform distribution. Must return a scalar of floating point type,
/// and the type must match that of `max_value`.
/// uniform distribution. Must return a scalar of floating point
/// type, and the type must match that of `max_value`.
/// \param max_value Output producing the maximum value (inclusive) for the random
/// uniform distribution. Must return a scalar of floating point type,
/// and the type must match that of `min_value`.
/// \param result_shape Output producing the shape of the output tensor. Must return a
/// vector of type `element::i64`.
/// \param use_fixed_seed Output producing a boolean scalar Flag indicating whether to
/// use the value supplied in `fixed_seed` to re-seed the random
/// number generator at this iteration. Note that whenever
/// `use_fixed_seed` is `true`, the same values will be generated
/// in the output tensor. This flag is primarily used for
/// debugging. If `use_fixed_seed` is `false`, the value in
/// `fixed_seed` is ignored.
/// \param fixed_seed Fixed seed value to be supplied to the random number generator if
/// `use_fixed_seed` is `true`. If `use_fixed_seed` is `false`, this
/// value is ignored.
/// uniform distribution. Must return a scalar of floating point
/// type, and the type must match that of `min_value`.
/// \param result_shape Output producing the shape of the output tensor. Must return
/// a vector of type `element::i64`.
/// \param use_fixed_seed Output producing a boolean scalar Flag indicating whether
/// to use the value supplied in `fixed_seed` to re-seed the
/// random number generator at this iteration. Note that
/// whenever `use_fixed_seed` is `true`, the same values will
/// be generated in the output tensor. This flag is primarily
/// used for debugging. If `use_fixed_seed` is `false`, the
/// value in `fixed_seed` is ignored.
/// \param fixed_seed Fixed seed value to be supplied to the random number generator
/// if `use_fixed_seed` is `true`. If `use_fixed_seed` is `false`,
/// this value is ignored.
RandomUniform(const Output<Node>& min_value,
const Output<Node>& max_value,
const Output<Node>& result_shape,
......@@ -61,16 +63,16 @@ namespace ngraph
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
/// \brief Returns the fixed seed value to be supplied to the random number generator
/// if `use_fixed_seed` is `true`. If `use_fixed_seed` is `false`, this value is
/// ignored.
/// \brief Returns the fixed seed value to be supplied to the random number
/// generator if `use_fixed_seed` is `true`. If `use_fixed_seed` is `false`,
/// this value is ignored.
uint64_t get_fixed_seed() const { return m_fixed_seed; }
/// \brief Sets the fixed seed value to be supplied to the random number generator
/// if `use_fixed_seed` is `true`. If `use_fixed_seed` is `false`, this value is
/// ignored.
/// if `use_fixed_seed` is `true`. If `use_fixed_seed` is `false`, this value
/// is ignored.
void set_fixed_seed(uint64_t fixed_seed) { m_fixed_seed = fixed_seed; }
// Internally, any implementation of RandomUniform will have state, since it is backed
// by a random number generator.
// Internally, any implementation of RandomUniform will have state, since it is
// backed by a random number generator.
bool has_state() const override { return true; }
void validate_and_infer_types() override;
......@@ -83,4 +85,6 @@ namespace ngraph
uint64_t m_fixed_seed;
};
}
using v0::RandomUniform;
}
}
......@@ -23,6 +23,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
/// \brief Matrix multiply for a batch of Rank 2 tensors each with potential
/// transpose.
......@@ -68,4 +70,6 @@ namespace ngraph
bool m_transpose_arg1;
};
}
using v0::BatchMatMulTranspose;
}
}
......@@ -23,6 +23,8 @@
namespace ngraph
{
namespace op
{
namespace v0
{
class NGRAPH_API CrossEntropy : public ngraph::op::util::FusedOp
{
......@@ -33,9 +35,11 @@ namespace ngraph
/// \brief CrossEntropy for computing loss
/// \param arg1 Node that produces the input tensor
/// \param arg2 Node that produces ground truth lables for the input
/// \param soft_label flag indicating whether to interpretate the given labels as soft
/// \param soft_label flag indicating whether to interpretate the given labels as
/// soft
/// labels
/// \param ignore_index Specifies a target value that is ignored and does not contribute
/// \param ignore_index Specifies a target value that is ignored and does not
/// contribute
/// to the input gradient Only valid if soft_label is set to False
CrossEntropy(const Output<Node>& arg1,
const Output<Node>& arg2,
......@@ -67,9 +71,11 @@ namespace ngraph
/// \param input Node that produces tensor from the fprop
/// \param labels Node that produces ground truth labels for input
/// \param delta Node that produces the delta during bprop
/// \param soft_label flag indicating whether to interpretate the given labels as soft
/// \param soft_label flag indicating whether to interpretate the given labels as
/// soft
/// labels
/// \param ignore_index Specifies a target value that is ignored and does not contribute
/// \param ignore_index Specifies a target value that is ignored and does not
/// contribute
/// to the input gradient Only valid if soft_label is set to False
CrossEntropyBackprop(const Output<Node>& input,
const Output<Node>& labels,
......@@ -89,5 +95,8 @@ namespace ngraph
bool m_soft_label;
int64_t m_ignore_index;
};
} // namespace op
} // namespace ngraph
}
using v0::CrossEntropy;
using v0::CrossEntropyBackprop;
}
}
......@@ -33,21 +33,21 @@ NGRAPH_OP(Add, ngraph::op::v1, 1)
NGRAPH_OP(All, ngraph::op::v0, 0)
NGRAPH_OP(AllReduce, ngraph::op::v0, 0)
NGRAPH_OP(And, ngraph::op::v0, 0)
NGRAPH_OP(Any, ngraph::op, 0)
NGRAPH_OP(Any, ngraph::op::v0, 0)
NGRAPH_OP(ArgMax, ngraph::op::v0, 0)
NGRAPH_OP(ArgMin, ngraph::op::v0, 0)
NGRAPH_OP(Asin, ngraph::op::v0, 0)
NGRAPH_OP(Atan, ngraph::op, 0)
NGRAPH_OP(Atan2, ngraph::op, 0)
NGRAPH_OP(Atan, ngraph::op::v0, 0)
NGRAPH_OP(Atan2, ngraph::op::v0, 0)
NGRAPH_OP(AvgPool, ngraph::op::v0, 0)
NGRAPH_OP(AvgPool, ngraph::op::v1, 1)
NGRAPH_OP(AvgPoolBackprop, ngraph::op::v0, 0)
NGRAPH_OP(AvgPoolBackprop, ngraph::op::v1, 1)
NGRAPH_OP(BatchMatMul, ngraph::op, 0)
NGRAPH_OP(BatchMatMulTranspose, ngraph::op, 0)
NGRAPH_OP(BatchNormInference, ngraph::op, 0)
NGRAPH_OP(BatchNormTraining, ngraph::op, 0)
NGRAPH_OP(BatchNormTrainingBackprop, ngraph::op, 0)
NGRAPH_OP(BatchMatMul, ngraph::op::v0, 0)
NGRAPH_OP(BatchMatMulTranspose, ngraph::op::v0, 0)
NGRAPH_OP(BatchNormInference, ngraph::op::v0, 0)
NGRAPH_OP(BatchNormTraining, ngraph::op::v0, 0)
NGRAPH_OP(BatchNormTrainingBackprop, ngraph::op::v0, 0)
NGRAPH_OP(BatchToSpace, ngraph::op::v1, 1)
NGRAPH_OP(BinaryConvolution, ngraph::op::v1, 1)
NGRAPH_OP(Broadcast, ngraph::op::v0, 0)
......@@ -57,10 +57,10 @@ NGRAPH_OP(BroadcastLike, ngraph::op::v0, 0)
NGRAPH_OP(CTCGreedyDecoder, ngraph::op::v0, 0)
NGRAPH_OP(Ceiling, ngraph::op::v0, 0)
NGRAPH_OP(Clamp, ngraph::op::v0, 0)
NGRAPH_OP(CompiledKernel, ngraph::op, 0)
NGRAPH_OP(CompiledKernel, ngraph::op::v0, 0)
NGRAPH_OP(Concat, ngraph::op::v0, 0)
NGRAPH_OP(Constant, ngraph::op, 0)
NGRAPH_OP(Convert, ngraph::op, 0)
NGRAPH_OP(Constant, ngraph::op::v0, 0)
NGRAPH_OP(Convert, ngraph::op::v0, 0)
NGRAPH_OP(ConvertLike, ngraph::op::v1, 1)
NGRAPH_OP(Convolution, ngraph::op::v0, 0)
NGRAPH_OP(Convolution, ngraph::op::v1, 1)
......@@ -71,25 +71,25 @@ NGRAPH_OP(ConvolutionBackpropFilters, ngraph::op::v1, 1)
NGRAPH_OP(ConvolutionBias, ngraph::op::v0, 0)
NGRAPH_OP(ConvolutionBiasAdd, ngraph::op::v0, 0)
NGRAPH_OP(ConvolutionBiasBackpropFiltersBias, ngraph::op::v0, 0)
NGRAPH_OP(Cos, ngraph::op, 0)
NGRAPH_OP(Cosh, ngraph::op, 0)
NGRAPH_OP(CropAndResize, ngraph::op, 0)
NGRAPH_OP(CrossEntropy, ngraph::op, 0)
NGRAPH_OP(CrossEntropyBackprop, ngraph::op, 0)
NGRAPH_OP(Cos, ngraph::op::v0, 0)
NGRAPH_OP(Cosh, ngraph::op::v0, 0)
NGRAPH_OP(CropAndResize, ngraph::op::v0, 0)
NGRAPH_OP(CrossEntropy, ngraph::op::v0, 0)
NGRAPH_OP(CrossEntropyBackprop, ngraph::op::v0, 0)
NGRAPH_OP(CumSum, ngraph::op::v0, 0)
NGRAPH_OP(DeformableConvolution, ngraph::op::v1, 1)
NGRAPH_OP(DeformablePSROIPooling, ngraph::op::v1, 1)
NGRAPH_OP(DepthToSpace, ngraph::op::v0, 0)
NGRAPH_OP(Dequantize, ngraph::op, 0)
NGRAPH_OP(Dequantize, ngraph::op::v0, 0)
NGRAPH_OP(DetectionOutput, ngraph::op::v0, 0)
NGRAPH_OP(Divide, ngraph::op::v0, 0)
NGRAPH_OP(Divide, ngraph::op::v1, 1)
NGRAPH_OP(Dot, ngraph::op::v0, 0)
NGRAPH_OP(DynBroadcast, ngraph::op, 0)
NGRAPH_OP(DynPad, ngraph::op, 0)
NGRAPH_OP(DynReplaceSlice, ngraph::op, 0)
NGRAPH_OP(DynBroadcast, ngraph::op::v0, 0)
NGRAPH_OP(DynPad, ngraph::op::v0, 0)
NGRAPH_OP(DynReplaceSlice, ngraph::op::v0, 0)
NGRAPH_OP(DynReshape, ngraph::op::v0, 0)
NGRAPH_OP(DynSlice, ngraph::op, 0)
NGRAPH_OP(DynSlice, ngraph::op::v0, 0)
NGRAPH_OP(Elu, ngraph::op::v0, 0)
NGRAPH_OP(EmbeddingLookup, ngraph::op::v0, 0)
NGRAPH_OP(Equal, ngraph::op::v0, 0)
......@@ -131,7 +131,7 @@ NGRAPH_OP(Less, ngraph::op::v0, 0)
NGRAPH_OP(Less, ngraph::op::v1, 1)
NGRAPH_OP(LessEq, ngraph::op::v0, 0)
NGRAPH_OP(LessEqual, ngraph::op::v1, 1)
NGRAPH_OP(Log, ngraph::op, 0)
NGRAPH_OP(Log, ngraph::op::v0, 0)
NGRAPH_OP(LogicalAnd, ngraph::op::v1, 1)
NGRAPH_OP(LogicalNot, ngraph::op::v1, 1)
NGRAPH_OP(LogicalOr, ngraph::op::v1, 1)
......@@ -151,7 +151,7 @@ NGRAPH_OP(Minimum, ngraph::op::v1, 1)
NGRAPH_OP(Mod, ngraph::op::v1, 1)
NGRAPH_OP(Multiply, ngraph::op::v0, 0)
NGRAPH_OP(Multiply, ngraph::op::v1, 1)
NGRAPH_OP(Negative, ngraph::op, 0)
NGRAPH_OP(Negative, ngraph::op::v0, 0)
NGRAPH_OP(NonMaxSuppression, ngraph::op::v1, 1)
NGRAPH_OP(NormalizeL2, ngraph::op::v0, 0)
NGRAPH_OP(Not, ngraph::op::v0, 0)
......@@ -166,25 +166,25 @@ NGRAPH_OP(Pad, ngraph::op::v1, 1)
NGRAPH_OP(Parameter, ngraph::op::v0, 0)
NGRAPH_OP(PartialSlice, ngraph::op::v0, 0)
NGRAPH_OP(PartialSliceBackprop, ngraph::op::v0, 0)
NGRAPH_OP(Passthrough, ngraph::op, 0)
NGRAPH_OP(Passthrough, ngraph::op::v0, 0)
NGRAPH_OP(Power, ngraph::op::v0, 0)
NGRAPH_OP(Power, ngraph::op::v1, 1)
NGRAPH_OP(PriorBox, ngraph::op::v0, 0)
NGRAPH_OP(PriorBoxClustered, ngraph::op::v0, 0)
NGRAPH_OP(Product, ngraph::op, 0)
NGRAPH_OP(Product, ngraph::op::v0, 0)
NGRAPH_OP(Proposal, ngraph::op::v0, 0)
NGRAPH_OP(Quantize, ngraph::op::v0, 0)
NGRAPH_OP(QuantizedConvolution, ngraph::op::v0, 0)
NGRAPH_OP(QuantizedConvolutionBias, ngraph::op, 0)
NGRAPH_OP(QuantizedConvolutionBiasAdd, ngraph::op, 0)
NGRAPH_OP(QuantizedConvolutionBiasSignedAdd, ngraph::op, 0)
NGRAPH_OP(QuantizedConvolutionRelu, ngraph::op, 0)
NGRAPH_OP(QuantizedConvolutionBias, ngraph::op::v0, 0)
NGRAPH_OP(QuantizedConvolutionBiasAdd, ngraph::op::v0, 0)
NGRAPH_OP(QuantizedConvolutionBiasSignedAdd, ngraph::op::v0, 0)
NGRAPH_OP(QuantizedConvolutionRelu, ngraph::op::v0, 0)
NGRAPH_OP(QuantizedDot, ngraph::op::v0, 0)
NGRAPH_OP(QuantizedDotBias, ngraph::op, 0)
NGRAPH_OP(QuantizedDotBias, ngraph::op::v0, 0)
NGRAPH_OP(RNNCell, ngraph::op::v0, 0)
NGRAPH_OP(ROIPooling, ngraph::op::v0, 0)
NGRAPH_OP(RandomUniform, ngraph::op, 0)
NGRAPH_OP(Range, ngraph::op, 0)
NGRAPH_OP(RandomUniform, ngraph::op::v0, 0)
NGRAPH_OP(Range, ngraph::op::v0, 0)
NGRAPH_OP(Recv, ngraph::op::v0, 0)
NGRAPH_OP(ReduceMax, ngraph::op::v1, 1)
NGRAPH_OP(ReduceLogicalAnd, ngraph::op::v1, 1)
......@@ -230,7 +230,7 @@ NGRAPH_OP(SpaceToBatch, ngraph::op::v1, 1)
NGRAPH_OP(SpaceToDepth, ngraph::op::v0, 0)
NGRAPH_OP(Split, ngraph::op::v1, 1)
NGRAPH_OP(Split, ngraph::op::v0, 0)
NGRAPH_OP(Sqrt, ngraph::op, 0)
NGRAPH_OP(Sqrt, ngraph::op::v0, 0)
NGRAPH_OP(SquaredDifference, ngraph::op::v0, 0)
NGRAPH_OP(Squeeze, ngraph::op::v0, 0)
NGRAPH_OP(Stack, ngraph::op::v0, 0)
......
......@@ -87,7 +87,6 @@
#include "ngraph/op/fused/clamp.hpp"
#include "ngraph/op/fused/conv_fused.hpp"
#include "ngraph/op/fused/crossentropy.hpp"
#include "ngraph/op/fused/crossentropy.hpp"
#include "ngraph/op/fused/depth_to_space.hpp"
#include "ngraph/op/fused/elu.hpp"
#include "ngraph/op/fused/fake_quantize.hpp"
......@@ -175,7 +174,6 @@
#include "ngraph/op/sqrt.hpp"
#include "ngraph/op/stop_gradient.hpp"
#include "ngraph/op/strided_slice.hpp"
#include "ngraph/op/strided_slice.hpp"
#include "ngraph/op/subtract.hpp"
#include "ngraph/op/sum.hpp"
#include "ngraph/op/tan.hpp"
......@@ -184,5 +182,4 @@
#include "ngraph/op/topk.hpp"
#include "ngraph/op/util/attr_types.hpp"
#include "ngraph/op/variadic_split.hpp"
#include "ngraph/op/variadic_split.hpp"
#include "ngraph/op/xor.hpp"
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment