Unverified Commit a882e387 authored by aslepko's avatar aslepko Committed by GitHub

Merge branch 'master' into aslepko/ci

parents e465f3b7 9db3eb75
#
# OVERRIDE TO STYLE: Comments wrap.
#
BasedOnStyle: LLVM
IndentWidth: 4
UseTab: Never
Language: Cpp
Standard: Cpp11
AccessModifierOffset: -4
AlignConsecutiveDeclarations: false
AlignConsecutiveAssignments: false
AlignTrailingComments: true
AllowShortBlocksOnASingleLine: true
AllowShortCaseLabelsOnASingleLine: true
AllowShortFunctionsOnASingleLine: Inline
AlwaysBreakBeforeMultilineStrings: true
AlwaysBreakTemplateDeclarations: true
BinPackArguments: false
BinPackParameters: false
BreakBeforeBraces: Allman
BreakConstructorInitializersBeforeComma: true
ColumnLimit: 100
#CommentPragmas: '.*'
IndentCaseLabels: false
IndentWrappedFunctionNames: true
KeepEmptyLinesAtTheStartOfBlocks: false
NamespaceIndentation: All
PointerAlignment: Left
SpaceAfterCStyleCast: false
SpaceBeforeAssignmentOperators: true
SpaceBeforeParens: ControlStatements
SpaceInEmptyParentheses: false
SpacesInAngles: false
SpacesInCStyleCastParentheses: false
SpacesInParentheses: false
SpacesInSquareBrackets: false
SortIncludes: false
ReflowComments: true
IncludeCategories:
- Regex: '^".*'
Priority: 3
- Regex: '^<.*'
Priority: 2
SortIncludes: true
......@@ -297,7 +297,10 @@ op::ConvolutionBiasBackpropFiltersBias::ConvolutionBiasBackpropFiltersBias(
// Window movement strides q p_f
// Window dilation strides p_f q
// Padding below a_x a_x
// Padding above b_x b_x - (a_x + (S_x - 1)p_x + b_x - (S_f - 1)p_f) % q
// Padding above b_x b_x -
// (a_x + (S_x - 1)p_x + b_x -
// (S_f - 1)p_f)
// % q
// Data dilation strides p_x p_x
for (size_t i = 0; i < filters_shape.size() - 2; i++)
......
......@@ -24,7 +24,8 @@ namespace ngraph
{
namespace op
{
/// \brief DepthToSpace permutes data from the depth dimension of the input blob into spatial dimensions.
/// \brief DepthToSpace permutes data from the depth dimension of the input blob into
/// spatial dimensions.
///
/// \note Values from the depth dimension (assuming NCHW layout) are moved in
/// spatial blocks to the height and width dimensions.
......
......@@ -30,7 +30,9 @@ namespace ngraph
/// \note Input floating point values are quantized into a discrete
/// set of floating point values.
///
/// \paragraph Implementation This class creates a node which performs the following operation:
/// \paragraph Implementation This class creates a node which performs the following
/// operation:
///
/// round((data - input_low) / (input_high - input_low) * (levels-1)) /
/// (levels-1) * (output_high - output_low) + output_low
///
......
......@@ -26,7 +26,8 @@ namespace ngraph
{
/// \brief Operator performing General Matrix multiplication.
///
/// \note More information: https://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms#Level_3
/// \note More information:
/// https://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms#Level_3
///
/// A' = transpose(A) if transA else A
/// B' = transpose(B) if transB else B
......
......@@ -133,7 +133,8 @@ NodeVector op::GroupConvolution::decompose_op() const
auto filters = input_value(1);
// Split one convolution op to N ops where N is the number of groups
// and concat results after computation.
// reference: https://github.com/NervanaSystems/ngraph-mxnet/blob/fdd692/src/ngraph/ngraph_emitter.cc#L822-L856
// reference:
// https://github.com/NervanaSystems/ngraph-mxnet/blob/fdd692/src/ngraph/ngraph_emitter.cc#L822-L856
std::size_t n_data_channels{data.get_shape().at(1)};
std::size_t n_filters_channels{filters.get_shape().at(0)};
std::size_t data_group_size{n_data_channels / m_groups};
......
......@@ -48,9 +48,10 @@ namespace ngraph
/// \param[in] dilations The dilations along each feature axis.
/// \param[in] padding_begin The padding added at the beggining of each feature axis.
/// \param[in] padding_end The padding added at the end of each feature axis.
/// \param[in] output_padding The zero-padding (adjustment) added to one side of the output.
/// \param[in] groups The number of groups the input channels and output channels
/// are divided into.
/// \param[in] output_padding The zero-padding (adjustment) added to one side of the
/// output.
/// \param[in] groups The number of groups the input channels and output
/// channels are divided into.
/// \param[in] pad_type The provided padding type.
/// \param[in] output_shape The output shape. When provided padding values are
/// automatically inferred.
......@@ -85,11 +86,12 @@ namespace ngraph
/// \param[in] filters The node producing filters data.
/// \param[in] strides The strides along each feature axis.
/// \param[in] dilations The dilations along each feature axis.
/// \param[in] output_padding The zero-padding (adjustment) added to one side of the output.
/// \param[in] output_padding The zero-padding (adjustment) added to one side of the
/// output.
/// \param[in] output_shape The output shape. When provided padding values are
/// automatically inferred.
/// \param[in] groups The number of groups the input channels and output channels
/// are divided into.
/// \param[in] groups The number of groups the input channels and output
/// channels are divided into.
///
GroupConvolutionTranspose(const Output<Node>& data,
const Output<Node>& filters,
......@@ -106,8 +108,8 @@ namespace ngraph
/// \param[in] filters The node producing filters data.
/// \param[in] output_shape The output shape. When provided padding values are
/// automatically inferred.
/// \param[in] groups The number of groups the input channels and output channels
/// are divided into.
/// \param[in] groups The number of groups the input channels and output
/// channels are divided into.
///
GroupConvolutionTranspose(const Output<Node>& data,
const Output<Node>& filters,
......
......@@ -181,7 +181,8 @@ NodeVector op::GRUCell::decompose_op() const
// f, g - are activation functions
// zt = f(Xt*(Wz^T) + Ht-1*(Rz^T) + Wbz + Rbz)
// rt = f(Xt*(Wr^T) + Ht-1*(Rr^T) + Wbr + Rbr)
// ht = g(Xt*(Wh^T) + (rt (.) Ht-1)*(Rh^T) + Rbh + Wbh) # default, when linear_before_reset := false
// ht = g(Xt*(Wh^T) + (rt (.) Ht-1)*(Rh^T) + Rbh + Wbh) # when linear_before_reset := false
// # (default)
// ht = g(Xt*(Wh^T) + (rt (.) (Ht-1*(Rh^T) + Rbh)) + Wbh) # when linear_before_reset := true
// Ht = (1 - zt) (.) ht + zt (.) Ht-1
// -------------------
......
......@@ -152,8 +152,8 @@ namespace ngraph
///
/// \brief Control whether or not apply the linear transformation.
///
/// \note The linear transformation may be applied when computing the output of hidden gate.
/// It's done before multiplying by the output of the reset gate.
/// \note The linear transformation may be applied when computing the output of hidden
/// gate. It's done before multiplying by the output of the reset gate.
///
bool m_linear_before_reset;
};
......
......@@ -206,8 +206,8 @@ NodeVector op::LSTMCell::decompose_op() const
// Shape: [4*hidden_size, hidden_size].
// H_t - The hidden state tensor at current time step. Shape: [batch_size, hidden_size].
// C_t - The cell state tensor at current time step. Shape: [batch_size, hidden_size].
// bias - The sum of biases (weight and recurrence) for input, output, forget, and cell gates.
// Shape: [4 * hidden_size]
// bias - The sum of biases (weight and recurrence) for input, output, forget, and cell
// gates. Shape: [4 * hidden_size]
// p_[iof] - The peephole weight vector for respectively: input, output, and forget gates.
// Each peephole has shape [hidden_size].
//
......
......@@ -70,7 +70,8 @@ namespace ngraph
/// \brief Constructs LSTMCell node.
///
/// \param[in] X The input tensor with shape: [batch_size, input_size].
/// \param[in] W The weight tensor with shape: [4*hidden_size, input_size].
/// \param[in] W The weight tensor with shape: [4*hidden_size,
/// input_size].
/// \param[in] R The recurrence weight tensor with shape:
/// [4*hidden_size, hidden_size].
/// \param[in] H_t The hidden state tensor at current time step with
......@@ -104,7 +105,8 @@ namespace ngraph
/// \brief Constructs LSTMCell node.
///
/// \param[in] X The input tensor with shape: [batch_size, input_size].
/// \param[in] W The weight tensor with shape: [4*hidden_size, input_size].
/// \param[in] W The weight tensor with shape: [4*hidden_size,
/// input_size].
/// \param[in] R The recurrence weight tensor with shape:
/// [4*hidden_size, hidden_size].
/// \param[in] H_t The hidden state tensor at current time step with
......@@ -112,7 +114,8 @@ namespace ngraph
/// \param[in] C_t The cell state tensor at current time step with
/// shape: [batch_size, hidden_size].
/// \param[in] hidden_size The number of hidden units for recurrent cell.
/// \param[in] B The bias tensor for input gate with shape: [8*hidden_size].
/// \param[in] B The bias tensor for input gate with shape:
/// [8*hidden_size].
/// \param[in] P The weight tensor for peepholes with shape:
/// [3*hidden_size] - 3 equals to only iof gates.
/// \param[in] activations The vector of activation functions used inside
......
......@@ -36,9 +36,11 @@ namespace ngraph
/// \brief Constructs an MVN operation.
///
/// \param data Input tensor with data
/// \param normalize_variance flag that denotes whether to perform variance normalization.
/// \param normalize_variance flag that denotes whether to perform variance
/// normalization.
/// \param across_channels flag that denotes if mean values are shared across channels.
/// \param eps the number to be added to the variance to avoid division by zero when normalizing the value
/// \param eps the number to be added to the variance to avoid division by zero when
/// normalizing the value
///
MVN(const Output<Node>& data,
bool across_channels = true,
......@@ -49,8 +51,10 @@ namespace ngraph
///
/// \param data Input tensor with data
/// \param reduction_axes A list of axes, along which to reduce.
/// \param normalize_variance flag that denotes whether to perform variance normalization.
/// \param eps the number to be added to the variance to avoid division by zero when normalizing the value
/// \param normalize_variance flag that denotes whether to perform variance
/// normalization.
/// \param eps the number to be added to the variance to avoid division by zero when
/// normalizing the value
///
MVN(const Output<Node>& data,
AxisSet reduction_axes,
......
......@@ -39,9 +39,11 @@ namespace ngraph
/// \brief Constructs a Normalize operation.
///
/// \param data - Node producing the input tensor
/// \param axes - Node indicating axes along which reduction is calculated
/// \param axes - Node indicating axes along which reduction is
/// calculated
/// \param eps - The epsilon added to L2 norm.
/// \param eps_mode - Specifies how eps is combined with L2 value calculated before division
/// \param eps_mode - Specifies how eps is combined with L2 value calculated
/// before division
///
NormalizeL2(const Output<Node>& data,
const Output<Node>& axes,
......
......@@ -152,7 +152,8 @@ NodeVector op::RNNCell::decompose_op() const
// W - The weight tensor for input gate. Shape: [hidden_size, input_size].
// R - The recurrence weight tensor for input gate. Shape: [hidden_size, hidden_size].
// H_t - The hidden state tensor at current time step. Shape: [batch_size, hidden_size].
// B - The bias tensor for the input gate. Shape: [2 * hidden_size] Concatenation of `[Wb, Rb]`.
// B - The bias tensor for the input gate. Shape: [2 * hidden_size].
// Concatenation of `[Wb, Rb]`.
// Wb - W bias vectors for input gate.
// Rb - R bias vectors for input gate.
// ------ VARIABLE NAMES ------
......
......@@ -65,7 +65,8 @@ namespace ngraph
/// \brief Constructs RNNCell node.
///
/// \param[in] X The input tensor with shape: [batch_size, input_size].
/// \param[in] W The weight tensor with shape: [hidden_size, input_size].
/// \param[in] W The weight tensor with shape: [hidden_size,
/// input_size].
/// \param[in] R The recurrence weight tensor with shape:
/// [hidden_size, hidden_size].
/// \param[in] H_t The hidden state tensor at current time step with
......@@ -94,13 +95,15 @@ namespace ngraph
/// \brief Constructs RNNCell node.
///
/// \param[in] X The input tensor with shape: [batch_size, input_size].
/// \param[in] W The weight tensor with shape: [hidden_size, input_size].
/// \param[in] W The weight tensor with shape: [hidden_size,
/// input_size].
/// \param[in] R The recurrence weight tensor with shape:
/// [hidden_size, hidden_size].
/// \param[in] H_t The hidden state tensor at current time step with
/// shape: [batch_size, hidden_size].
/// \param[in] hidden_size The number of hidden units for recurrent cell.
/// \param[in] B The bias tensor for input gate with shape: [2*hidden_size].
/// \param[in] B The bias tensor for input gate with shape:
/// [2*hidden_size].
/// \param[in] activations The vector of activation functions used inside
/// recurrent cell.
/// \param[in] activation_alpha The vector of alpha parameters for activation
......
......@@ -102,7 +102,8 @@ Shape op::ShuffleChannels::get_pre_shuffle_shape(const Shape& data_shape) const
// [0]: ds[0] * ds[1] * ... * ds[m_axis-1] (or 1 if m_axis == 0)
// [1]: m_groups
// [2]: ds[axis] / m_groups
// [3]: ds[axis+1] * ds[axis+2] * ... * ds[ds.size()-1] (or 1 if m_axis points to the last elem of ds)
// [3]: ds[axis+1] * ds[axis+2] * ... * ds[ds.size()-1] (or 1 if m_axis points to the last elem
// of ds)
Shape res(4, 1);
size_t axis_zb = get_zero_based_axis();
......
......@@ -36,8 +36,11 @@ namespace ngraph
/// \brief Constructs a ShuffleChannels node.
///
/// \param data - Node producing the input tensor
/// \param axis - channel dimension index in the data tensor. A negative value means that the index should be calculated from the back of the input data shape.
/// \param groups - number of groups the channel dimension specified by axis should be split into
/// \param axis - channel dimension index in the data tensor. A negative value means
/// that the index should be calculated from the back of the input data
/// shape.
/// \param groups - number of groups the channel dimension specified by axis should be
/// split into
ShuffleChannels(const Output<Node>& data,
const int axis = 1,
const size_t groups = 1UL);
......
......@@ -37,15 +37,21 @@ namespace ngraph
/// \brief Constructs a Split op that evenly divides the input tensor.
///
/// \param data - Node producing the input tensor
/// \param axis - indicates an axis along which the input tensor should be split. Negative values mean counting from the back of the input tensor's shape.
/// \param axis - indicates an axis along which the input tensor should be split.
/// Negative values mean counting from the back of the input tensor's
/// shape.
/// \param num_split - a number of "pieces" the input tensor will be split to
Split(const Output<Node>& data, const int axis, const size_t num_split);
/// \brief Constructs a Split op that splits the input tensor into variable length "pieces"
/// \brief Constructs a Split op that splits the input tensor into variable length
/// "pieces"
///
/// \param data - Node producing the input tensor
/// \param axis - indicates an axis along which the input tensor should be split. Negative values mean counting from the back of the input tensor's shape.
/// \param splits - a list of lengths that the input tensor should be split to. Use this constructor to split the input tensor to variable length chunks.
/// \param axis - indicates an axis along which the input tensor should be split.
/// Negative values mean counting from the back of the input tensor's
/// shape.
/// \param splits - a list of lengths that the input tensor should be split to. Use this
/// constructor to split the input tensor to variable length chunks.
Split(const Output<Node>& data, const int axis, const std::vector<size_t>& splits);
void pre_validate_and_infer_types() override;
......@@ -65,7 +71,8 @@ namespace ngraph
///
/// \param axis - original axis value; negative values are accepted
/// \param input_tensor_rank - rank of the input data tensor
/// \return Returns a sum of parameters for negative axis value, or axis itself otherwise
/// \return Returns a sum of parameters for negative axis value, or axis itself
/// otherwise
size_t adjust_axis_value(const int axis, const size_t input_tensor_rank) const;
/// used internally for validation purposes, indicates which constructor was used
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment