Unverified Commit 7a4cc10c authored by Scott Cyphers's avatar Scott Cyphers Committed by GitHub

Merge branch 'master' into pruthvi/cpu_static_backend

parents 3711118e 410d6e61
......@@ -30,6 +30,11 @@
using namespace std;
using namespace ngraph;
const string op::ConvolutionBias::type_name{"ConvolutionBias"};
const string op::ConvolutionBiasBackpropFiltersBias::type_name{
"ConvolutionBiasBackpropFiltersBias"};
const string op::ConvolutionBiasAdd::type_name{"ConvolutionBiasAdd"};
static void validate_convbias_shapes(const Node* node,
element::Type et_filters,
element::Type et_bias,
......@@ -75,7 +80,7 @@ op::ConvolutionBias::ConvolutionBias(const shared_ptr<Node>& data_batch,
const CoordinateDiff& padding_above,
const Strides& data_dilation_strides,
const bool with_relu)
: FusedOp("ConvolutionBias", check_single_output_args({data_batch, filters, bias}))
: FusedOp(check_single_output_args({data_batch, filters, bias}))
, m_window_movement_strides(window_movement_strides)
, m_window_dilation_strides(window_dilation_strides)
, m_padding_below(padding_below)
......@@ -280,8 +285,7 @@ op::ConvolutionBiasBackpropFiltersBias::ConvolutionBiasBackpropFiltersBias(
const CoordinateDiff& padding_below_forward,
const CoordinateDiff& padding_above_forward,
const Strides& data_dilation_strides_forward)
: FusedOp("ConvolutionBiasBackpropFiltersBias",
check_single_output_args({data_batch, output_delta}))
: FusedOp(check_single_output_args({data_batch, output_delta}))
, m_filters_shape(filters_shape)
, m_bias_shape(bias_shape)
, m_window_movement_strides_forward(window_movement_strides_forward)
......@@ -368,8 +372,7 @@ op::ConvolutionBiasAdd::ConvolutionBiasAdd(const std::shared_ptr<Node>& data_bat
const CoordinateDiff& padding_above,
const Strides& data_dilation_strides,
bool with_relu)
: FusedOp("ConvolutionBiasAdd",
check_single_output_args({data_batch, filters, bias, add_input}))
: FusedOp(check_single_output_args({data_batch, filters, bias, add_input}))
, m_window_movement_strides(window_movement_strides)
, m_window_dilation_strides(window_dilation_strides)
, m_padding_below(padding_below)
......
......@@ -28,6 +28,9 @@ namespace ngraph
class ConvolutionBias : public ngraph::op::util::FusedOp
{
public:
NGRAPH_API
static const std::string type_name;
const std::string& description() const override { return type_name; }
ConvolutionBias(const std::shared_ptr<op::Convolution>& conv,
const std::shared_ptr<Node>& bias,
const bool with_relu = false);
......@@ -79,6 +82,9 @@ namespace ngraph
class ConvolutionBiasBackpropFiltersBias : public ngraph::op::util::FusedOp
{
public:
NGRAPH_API
static const std::string type_name;
const std::string& description() const override { return type_name; }
ConvolutionBiasBackpropFiltersBias(const std::shared_ptr<Node>& data_batch,
const Shape& filters_shape,
const Shape& bias_shape,
......@@ -169,6 +175,9 @@ namespace ngraph
class ConvolutionBiasAdd : public ngraph::op::util::FusedOp
{
public:
NGRAPH_API
static const std::string type_name;
const std::string& description() const override { return type_name; }
ConvolutionBiasAdd(const std::shared_ptr<op::ConvolutionBias>& conv,
const std::shared_ptr<Node>& sum_input,
bool with_relu = false);
......
......@@ -25,8 +25,10 @@
using namespace std;
using namespace ngraph;
const string op::DepthToSpace::type_name{"DepthToSpace"};
op::DepthToSpace::DepthToSpace(const shared_ptr<Node>& data, const size_t block_size)
: FusedOp("DepthToSpace", {data})
: FusedOp(check_single_output_args({data}))
, m_blocksize(block_size)
{
constructor_validate_and_infer_types();
......
......@@ -34,6 +34,9 @@ namespace ngraph
class DepthToSpace : public ngraph::op::util::FusedOp
{
public:
NGRAPH_API
static const std::string type_name;
const std::string& description() const override { return type_name; }
/// \brief Constructs a DepthToSpace operation.
///
/// \param data - Node producing the input tensor
......
......@@ -28,8 +28,10 @@
using namespace std;
using namespace ngraph;
const string op::Elu::type_name{"Elu"};
op::Elu::Elu(const shared_ptr<Node>& data, const shared_ptr<Node>& alpha)
: FusedOp("Elu", {data, alpha})
: FusedOp(check_single_output_args({data, alpha}))
{
constructor_validate_and_infer_types();
}
......
......@@ -31,6 +31,9 @@ namespace ngraph
class Elu : public ngraph::op::util::FusedOp
{
public:
NGRAPH_API
static const std::string type_name;
const std::string& description() const override { return type_name; }
/// \brief Constructs an Elu operation.
///
/// \param data Input tensor
......
......@@ -36,13 +36,15 @@
using namespace std;
using namespace ngraph;
const string op::FakeQuantize::type_name{"FakeQuantize"};
op::FakeQuantize::FakeQuantize(const shared_ptr<Node>& data,
const shared_ptr<Node>& input_low,
const shared_ptr<Node>& input_high,
const shared_ptr<Node>& output_low,
const shared_ptr<Node>& output_high,
size_t levels)
: FusedOp("FakeQuantize", {data, input_low, input_high, output_low, output_high})
: FusedOp(check_single_output_args({data, input_low, input_high, output_low, output_high}))
, m_levels(levels)
{
constructor_validate_and_infer_types();
......
......@@ -38,6 +38,9 @@ namespace ngraph
class FakeQuantize : public ngraph::op::util::FusedOp
{
public:
NGRAPH_API
static const std::string type_name;
const std::string& description() const override { return type_name; }
///
/// \brief Constructs a FakeQuantize operation node.
///
......
......@@ -27,8 +27,10 @@
using namespace std;
using namespace ngraph;
const string op::Gelu::type_name{"Gelu"};
op::Gelu::Gelu(const shared_ptr<Node>& data)
: FusedOp("Gelu", {data})
: FusedOp(check_single_output_args({data}))
{
constructor_validate_and_infer_types();
}
......
......@@ -32,6 +32,9 @@ namespace ngraph
class Gelu : public ngraph::op::util::FusedOp
{
public:
NGRAPH_API
static const std::string type_name;
const std::string& description() const override { return type_name; }
/// \brief Constructs an Gelu operation.
///
/// \param data Input tensor
......
......@@ -25,6 +25,8 @@
using namespace std;
using namespace ngraph;
const string op::Gemm::type_name{"Gemm"};
op::Gemm::Gemm(const std::shared_ptr<ngraph::Node>& A,
const std::shared_ptr<ngraph::Node>& B,
const std::shared_ptr<ngraph::Node>& C,
......@@ -32,7 +34,7 @@ op::Gemm::Gemm(const std::shared_ptr<ngraph::Node>& A,
double beta,
bool transA,
bool transB)
: FusedOp("Gemm", {A, B, C})
: FusedOp(check_single_output_args({A, B, C}))
, m_alpha{alpha}
, m_beta{beta}
, m_transA{transA}
......
......@@ -36,6 +36,9 @@ namespace ngraph
class Gemm : public ngraph::op::util::FusedOp
{
public:
NGRAPH_API
static const std::string type_name;
const std::string& description() const override { return type_name; }
/// \brief Constructs an Gemm operation.
///
/// \param A Input tensor A
......
......@@ -27,8 +27,10 @@
using namespace std;
using namespace ngraph;
const string op::GRN::type_name{"GRN"};
op::GRN::GRN(const shared_ptr<Node>& data, float bias)
: FusedOp("GRN", {data})
: FusedOp(check_single_output_args({data}))
, m_bias(bias)
{
constructor_validate_and_infer_types();
......
......@@ -30,6 +30,9 @@ namespace ngraph
class GRN : public ngraph::op::util::FusedOp
{
public:
NGRAPH_API
static const std::string type_name;
const std::string& description() const override { return type_name; }
/// \brief Constructs a GRN operation.
///
/// \param data - Node producing the input tensor
......
......@@ -27,6 +27,8 @@
using namespace std;
using namespace ngraph;
const string op::GroupConvolutionTranspose::type_name{"GroupConvolutionTranspose"};
op::GroupConvolutionTranspose::GroupConvolutionTranspose(const shared_ptr<Node>& data,
const shared_ptr<Node>& filters,
const Strides& strides,
......@@ -37,7 +39,7 @@ op::GroupConvolutionTranspose::GroupConvolutionTranspose(const shared_ptr<Node>&
const size_t groups,
const PadType& pad_type,
const Shape& output_shape)
: FusedOp("GroupConvolutionTranspose", check_single_output_args({data, filters}))
: FusedOp(check_single_output_args({data, filters}))
, m_strides(strides)
, m_dilations(dilations)
, m_padding_begin(padding_begin)
......
......@@ -35,6 +35,9 @@ namespace ngraph
class GroupConvolutionTranspose : public util::FusedOp
{
public:
NGRAPH_API
static const std::string type_name;
const std::string& description() const override { return type_name; }
///
/// \brief Constructs GroupConvolutionTranspose operation.
///
......
......@@ -31,6 +31,8 @@
using namespace std;
using namespace ngraph;
const string op::GRUCell::type_name{"GRUCell"};
op::GRUCell::GRUCell(const shared_ptr<Node>& X,
const shared_ptr<Node>& W,
const shared_ptr<Node>& R,
......@@ -59,7 +61,7 @@ op::GRUCell::GRUCell(const shared_ptr<Node>& X,
const vector<float>& activation_beta,
float clip,
bool linear_before_reset)
: FusedOp("GRUCell", {X, W, R, H_t})
: FusedOp(check_single_output_args({X, W, R, H_t}))
, RNNCellBase(hidden_size, clip, activations, activation_alpha, activation_beta)
, m_activation_f{get_activation_function(0)}
, m_activation_g{get_activation_function(1)}
......@@ -80,7 +82,7 @@ op::GRUCell::GRUCell(const shared_ptr<Node>& X,
const vector<float>& activation_beta,
float clip,
bool linear_before_reset)
: FusedOp("GRUCell", {X, W, R, H_t, B})
: FusedOp(check_single_output_args({X, W, R, H_t, B}))
, RNNCellBase(hidden_size, clip, activations, activation_alpha, activation_beta)
, m_activation_f{get_activation_function(0)}
, m_activation_g{get_activation_function(1)}
......
......@@ -41,6 +41,9 @@ namespace ngraph
class GRUCell : public util::FusedOp, public util::RNNCellBase
{
public:
NGRAPH_API
static const std::string type_name;
const std::string& description() const override { return type_name; }
///
/// \brief Constructs GRUCell node.
///
......
......@@ -27,8 +27,10 @@
using namespace std;
using namespace ngraph;
const string op::HardSigmoid::type_name{"HardSigmoid"};
op::HardSigmoid::HardSigmoid(const shared_ptr<Node>& data, float alpha, float beta)
: FusedOp("HardSigmoid", {data})
: FusedOp(check_single_output_args({data}))
, m_alpha(alpha)
, m_beta(beta)
{
......
......@@ -30,6 +30,9 @@ namespace ngraph
class HardSigmoid : public ngraph::op::util::FusedOp
{
public:
NGRAPH_API
static const std::string type_name;
const std::string& description() const override { return type_name; }
/// \brief Constructs a HardSigmoid operation.
///
/// \param data Input tensor.
......
......@@ -23,8 +23,10 @@
using namespace std;
using namespace ngraph;
const string op::LeakyRelu::type_name{"LeakyRelu"};
op::LeakyRelu::LeakyRelu(const shared_ptr<Node>& data, const shared_ptr<Node>& alpha)
: FusedOp("LeakyRelu", {data, alpha})
: FusedOp(check_single_output_args({data, alpha}))
{
constructor_validate_and_infer_types();
}
......
......@@ -30,6 +30,9 @@ namespace ngraph
class LeakyRelu : public ngraph::op::util::FusedOp
{
public:
NGRAPH_API
static const std::string type_name;
const std::string& description() const override { return type_name; }
LeakyRelu(const std::shared_ptr<ngraph::Node>& data,
const std::shared_ptr<ngraph::Node>& alpha);
......
......@@ -31,6 +31,8 @@
using namespace std;
using namespace ngraph;
const string op::LSTMCell::type_name{"LSTMCell"};
op::LSTMCell::LSTMCell(const shared_ptr<Node>& X,
const shared_ptr<Node>& W,
const shared_ptr<Node>& R,
......@@ -62,7 +64,7 @@ op::LSTMCell::LSTMCell(const shared_ptr<Node>& X,
const vector<float>& activation_beta,
float clip,
bool input_forget)
: FusedOp("LSTMCell", {X, W, R, H_t, C_t})
: FusedOp(check_single_output_args({X, W, R, H_t, C_t}))
, RNNCellBase(hidden_size, clip, activations, activation_alpha, activation_beta)
, m_activation_f{get_activation_function(0)}
, m_activation_g{get_activation_function(1)}
......@@ -87,7 +89,7 @@ op::LSTMCell::LSTMCell(const shared_ptr<Node>& X,
const vector<float>& activation_beta,
float clip,
bool input_forget)
: FusedOp("LSTMCell", {X, W, R, H_t, C_t, B, P})
: FusedOp(check_single_output_args({X, W, R, H_t, C_t, B, P}))
, RNNCellBase(hidden_size, clip, activations, activation_alpha, activation_beta)
, m_activation_f{get_activation_function(0)}
, m_activation_g{get_activation_function(1)}
......
......@@ -42,6 +42,9 @@ namespace ngraph
class LSTMCell : public util::FusedOp, public util::RNNCellBase
{
public:
NGRAPH_API
static const std::string type_name;
const std::string& description() const override { return type_name; }
///
/// \brief Constructs LSTMCell node.
///
......
......@@ -27,11 +27,13 @@
using namespace std;
using namespace ngraph;
const string op::MVN::type_name{"MVN"};
op::MVN::MVN(const std::shared_ptr<Node>& data,
bool across_channels,
bool normalize_variance,
double eps)
: FusedOp("MVN", {data})
: FusedOp(check_single_output_args({data}))
, m_eps{eps}
, m_across_channels{across_channels}
, m_normalize_variance{normalize_variance}
......@@ -52,7 +54,7 @@ op::MVN::MVN(const std::shared_ptr<Node>& data,
AxisSet reduction_axes,
bool normalize_variance,
double eps)
: FusedOp("MVN", {data})
: FusedOp(check_single_output_args({data}))
, m_eps{eps}
, m_across_channels{false}
, m_normalize_variance{normalize_variance}
......
......@@ -29,6 +29,9 @@ namespace ngraph
class MVN : public ngraph::op::util::FusedOp
{
public:
NGRAPH_API
static const std::string type_name;
const std::string& description() const override { return type_name; }
/// \brief Constructs an MVN operation.
///
/// \param data Input tensor with data
......
......@@ -26,12 +26,14 @@
using namespace std;
using namespace ngraph;
const string op::Normalize::type_name{"Normalize"};
op::Normalize::Normalize(const shared_ptr<ngraph::Node>& data,
const shared_ptr<ngraph::Node>& scale,
bool across_spatial,
bool channel_shared,
float eps)
: FusedOp("Normalize", {data, scale})
: FusedOp(check_single_output_args({data, scale}))
, m_across_spatial{across_spatial}
, m_channel_shared{channel_shared}
, m_eps{eps}
......
......@@ -30,6 +30,9 @@ namespace ngraph
class Normalize : public ngraph::op::util::FusedOp
{
public:
NGRAPH_API
static const std::string type_name;
const std::string& description() const override { return type_name; }
///
/// \brief Constructs a Normalize operation.
///
......
......@@ -29,8 +29,10 @@
using namespace std;
using namespace ngraph;
const string op::PRelu::type_name{"PRelu"};
op::PRelu::PRelu(const shared_ptr<Node>& data, const shared_ptr<Node>& slope)
: FusedOp("PRelu", {data, slope})
: FusedOp(check_single_output_args({data, slope}))
{
constructor_validate_and_infer_types();
}
......
......@@ -31,6 +31,9 @@ namespace ngraph
class PRelu : public ngraph::op::util::FusedOp
{
public:
NGRAPH_API
static const std::string type_name;
const std::string& description() const override { return type_name; }
/// \brief Constructs a PRelu operation.
///
/// \param data Input tensor
......
......@@ -31,6 +31,8 @@
using namespace std;
using namespace ngraph;
const string op::RNNCell::type_name{"RNNCell"};
op::RNNCell::RNNCell(const shared_ptr<Node>& X,
const shared_ptr<Node>& W,
const shared_ptr<Node>& R,
......@@ -50,7 +52,7 @@ op::RNNCell::RNNCell(const shared_ptr<Node>& X,
const vector<float>& activation_alpha,
const vector<float>& activation_beta,
float clip)
: FusedOp("RNNCell", {X, W, R, H_t})
: FusedOp(check_single_output_args({X, W, R, H_t}))
, RNNCellBase(hidden_size, clip, activations, activation_alpha, activation_beta)
, m_activation_f{get_activation_function(0)}
{
......@@ -68,7 +70,7 @@ op::RNNCell::RNNCell(const shared_ptr<Node>& X,
const vector<float>& activation_alpha,
const vector<float>& activation_beta,
float clip)
: FusedOp("RNNCell", {X, W, R, H_t, B})
: FusedOp(check_single_output_args({X, W, R, H_t, B}))
, RNNCellBase(hidden_size, clip, activations, activation_alpha, activation_beta)
, m_activation_f{get_activation_function(0)}
{
......
......@@ -41,6 +41,9 @@ namespace ngraph
class RNNCell : public util::FusedOp, public util::RNNCellBase
{
public:
NGRAPH_API
static const std::string type_name;
const std::string& description() const override { return type_name; }
///
/// \brief Constructs RNNCell node.
///
......
......@@ -21,10 +21,12 @@
using namespace std;
using namespace ngraph;
const string op::ScaleShift::type_name{"ScaleShift"};
op::ScaleShift::ScaleShift(const std::shared_ptr<ngraph::Node>& data,
const std::shared_ptr<ngraph::Node>& scale,
const std::shared_ptr<ngraph::Node>& shift)
: FusedOp("ScaleShift", {data, scale, shift})
: FusedOp(check_single_output_args({data, scale, shift}))
{
constructor_validate_and_infer_types();
}
......
......@@ -31,6 +31,9 @@ namespace ngraph
class ScaleShift : public ngraph::op::util::FusedOp
{
public:
NGRAPH_API
static const std::string type_name;
const std::string& description() const override { return type_name; }
/// \brief Constructs an ScaleShift operation.
///
/// \param data Input tensor
......
......@@ -21,10 +21,12 @@
using namespace std;
using namespace ngraph;
const string op::ShuffleChannels::type_name{"ShuffleChannels"};
op::ShuffleChannels::ShuffleChannels(const shared_ptr<Node>& data,
const int axis,
const size_t groups)
: FusedOp("ShuffleChannels", {data})
: FusedOp(check_single_output_args({data}))
, m_axis(axis)
, m_groups{groups}
{
......
......@@ -29,6 +29,9 @@ namespace ngraph
class ShuffleChannels : public ngraph::op::util::FusedOp
{
public:
NGRAPH_API
static const std::string type_name;
const std::string& description() const override { return type_name; }
/// \brief Constructs a ShuffleChannels node.
///
/// \param data - Node producing the input tensor
......
......@@ -24,8 +24,10 @@
using namespace std;
using namespace ngraph;
const string op::SpaceToDepth::type_name{"SpaceToDepth"};
op::SpaceToDepth::SpaceToDepth(const shared_ptr<Node>& data, const size_t block_size)
: FusedOp("SpaceToDepth", {data})
: FusedOp(check_single_output_args({data}))
, m_blocksize(block_size)
{
constructor_validate_and_infer_types();
......
......@@ -32,6 +32,9 @@ namespace ngraph
class SpaceToDepth : public ngraph::op::util::FusedOp
{
public:
NGRAPH_API
static const std::string type_name;
const std::string& description() const override { return type_name; }
/// \brief Constructs a SpaceToDepth operation.
///
/// \param data - Node producing the input tensor
......
......@@ -21,8 +21,10 @@
using namespace std;
using namespace ngraph;
const string op::Split::type_name{"Split"};
op::Split::Split(const shared_ptr<Node>& data, const int axis, const size_t num_split)
: FusedOp("Split", {data})
: FusedOp(check_single_output_args({data}))
, m_split_evenly{true}
, m_axis{axis}
, m_num_split{num_split}
......@@ -33,7 +35,7 @@ op::Split::Split(const shared_ptr<Node>& data, const int axis, const size_t num_
op::Split::Split(const std::shared_ptr<ngraph::Node>& data,
const int axis,
const std::vector<size_t>& splits)
: FusedOp("Split", {data})
: FusedOp(check_single_output_args({data}))
, m_split_evenly{false}
, m_axis{axis}
, m_splits{splits}
......
......@@ -30,6 +30,9 @@ namespace ngraph
class Split : public ngraph::op::util::FusedOp
{
public:
NGRAPH_API
static const std::string type_name;
const std::string& description() const override { return type_name; }
/// \brief Constructs a Split op that evenly divides the input tensor.
///
/// \param data - Node producing the input tensor
......
......@@ -24,8 +24,10 @@
using namespace std;
using namespace ngraph;
const string op::SquaredDifference::type_name{"SquaredDifference"};
op::SquaredDifference::SquaredDifference(const shared_ptr<Node>& x1, const shared_ptr<Node>& x2)
: FusedOp("SquaredDifference", {x1, x2})
: FusedOp(check_single_output_args({x1, x2}))
{
constructor_validate_and_infer_types();
}
......
......@@ -30,6 +30,9 @@ namespace ngraph
class SquaredDifference : public ngraph::op::util::FusedOp
{
public:
NGRAPH_API
static const std::string type_name;
const std::string& description() const override { return type_name; }
/// \brief Constructs the squared difference operation.
///
/// \param x1 First input tensor
......
......@@ -26,8 +26,10 @@
using namespace std;
using namespace ngraph;
const string op::Squeeze::type_name{"Squeeze"};
op::Squeeze::Squeeze(const shared_ptr<Node>& data, const shared_ptr<Node>& axes)
: FusedOp("Squeeze", {data, axes})
: FusedOp(check_single_output_args({data, axes}))
{
constructor_validate_and_infer_types();
}
......
......@@ -30,6 +30,9 @@ namespace ngraph
class Squeeze : public ngraph::op::util::FusedOp
{
public:
NGRAPH_API
static const std::string type_name;
const std::string& description() const override { return type_name; }
Squeeze(const std::shared_ptr<ngraph::Node>& data,
const std::shared_ptr<ngraph::Node>& axes);
......
......@@ -26,8 +26,10 @@
using namespace std;
using namespace ngraph;
const string op::Unsqueeze::type_name{"Unsqueeze"};
op::Unsqueeze::Unsqueeze(const shared_ptr<Node>& data, const shared_ptr<Node>& axes)
: FusedOp("Unsqueeze", {data, axes})
: FusedOp(check_single_output_args({data, axes}))
{
constructor_validate_and_infer_types();
}
......
......@@ -30,6 +30,9 @@ namespace ngraph
class Unsqueeze : public ngraph::op::util::FusedOp
{
public:
NGRAPH_API
static const std::string type_name;
const std::string& description() const override { return type_name; }
Unsqueeze(const std::shared_ptr<ngraph::Node>& data,
const std::shared_ptr<ngraph::Node>& axes);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment