Commit cdfd67cd authored by Adam Procter's avatar Adam Procter Committed by omarkanawi

clang-format comments: /src/ngraph/op (#3509)

* New clang-format rules for src/ngraph/op/util

* Restore root .clang-format

* Remove redundant .clang-formats

* Revert "Remove redundant .clang-formats"

This reverts commit e158df1e507570b55348ce326fd7e8b4ae819f36.

* Remove redundant .clang-formats

* New clang-format rules for src/ngraph/op/fused

* New clang-format rules for src/ngraph/op/experimental

* New clang-format rules for src/ngraph/op/.
parent afa062d5
#
# OVERRIDE TO STYLE: Comments do *not* wrap.
#
BasedOnStyle: LLVM
IndentWidth: 4
UseTab: Never
Language: Cpp
Standard: Cpp11
AccessModifierOffset: -4
AlignConsecutiveDeclarations: false
AlignConsecutiveAssignments: false
AlignTrailingComments: true
AllowShortBlocksOnASingleLine: true
AllowShortCaseLabelsOnASingleLine: true
AllowShortFunctionsOnASingleLine: Inline
AlwaysBreakBeforeMultilineStrings: true
AlwaysBreakTemplateDeclarations: true
BinPackArguments: false
BinPackParameters: false
BreakBeforeBraces: Allman
BreakConstructorInitializersBeforeComma: true
ColumnLimit: 100
CommentPragmas: '.*'
IndentCaseLabels: false
IndentWrappedFunctionNames: true
KeepEmptyLinesAtTheStartOfBlocks: false
NamespaceIndentation: All
PointerAlignment: Left
SpaceAfterCStyleCast: false
SpaceBeforeAssignmentOperators: true
SpaceBeforeParens: ControlStatements
SpaceInEmptyParentheses: false
SpacesInAngles: false
SpacesInCStyleCastParentheses: false
SpacesInParentheses: false
SpacesInSquareBrackets: false
SortIncludes: false
ReflowComments: true
IncludeCategories:
- Regex: '^".*'
Priority: 3
- Regex: '^<.*'
Priority: 2
SortIncludes: true
...@@ -37,7 +37,8 @@ namespace ngraph ...@@ -37,7 +37,8 @@ namespace ngraph
/// ///
/// \param arg The input tensor /// \param arg The input tensor
/// \param axis The axis along which to compute an index for maximum /// \param axis The axis along which to compute an index for maximum
/// \param index_element_type produce indices. Currently, only int64 or int32 are supported /// \param index_element_type produce indices. Currently, only int64 or int32 are
/// supported
ArgMax(const Output<Node>& arg, size_t axis, const element::Type& index_element_type); ArgMax(const Output<Node>& arg, size_t axis, const element::Type& index_element_type);
virtual std::shared_ptr<Node> virtual std::shared_ptr<Node>
......
...@@ -38,7 +38,8 @@ namespace ngraph ...@@ -38,7 +38,8 @@ namespace ngraph
/// ///
/// \param arg The input tensor /// \param arg The input tensor
/// \param axis The axis along which to compute an index for minimum /// \param axis The axis along which to compute an index for minimum
/// \param index_element_type produce indices. Currently, only int64 or int32 are supported /// \param index_element_type produce indices. Currently, only int64 or int32 are
/// supported
ArgMin(const Output<Node>& arg, size_t axis, const element::Type& index_element_type); ArgMin(const Output<Node>& arg, size_t axis, const element::Type& index_element_type);
virtual std::shared_ptr<Node> virtual std::shared_ptr<Node>
......
...@@ -107,7 +107,8 @@ namespace ngraph ...@@ -107,7 +107,8 @@ namespace ngraph
const Shape& padding_above, const Shape& padding_above,
bool include_padding_in_avg_computation = false); bool include_padding_in_avg_computation = false);
/// \brief Constructs a batched, unpadded average pooling operation (i.e., all padding shapes are set to 0). /// \brief Constructs a batched, unpadded average pooling operation (i.e., all padding
/// shapes are set to 0).
/// ///
/// \param arg The output producing the input data batch tensor.<br> /// \param arg The output producing the input data batch tensor.<br>
/// `[d1, ..., dn]` /// `[d1, ..., dn]`
...@@ -119,7 +120,8 @@ namespace ngraph ...@@ -119,7 +120,8 @@ namespace ngraph
const Shape& window_shape, const Shape& window_shape,
const Strides& window_movement_strides); const Strides& window_movement_strides);
/// \brief Constructs an unstrided batched convolution operation (i.e., all window movement strides are 1 and all padding shapes are set to 0). /// \brief Constructs an unstrided batched convolution operation (i.e., all window
/// movement strides are 1 and all padding shapes are set to 0).
/// ///
/// \param arg The output producing the input data batch tensor.<br> /// \param arg The output producing the input data batch tensor.<br>
/// `[d1, ..., dn]` /// `[d1, ..., dn]`
......
...@@ -122,8 +122,8 @@ namespace ngraph ...@@ -122,8 +122,8 @@ namespace ngraph
/// SHAPE DETAILS: /// SHAPE DETAILS:
/// gamma: must have rank 1, with the same span as input's channel axis. /// gamma: must have rank 1, with the same span as input's channel axis.
/// beta: must have rank 1, with the same span as input's channel axis. /// beta: must have rank 1, with the same span as input's channel axis.
/// input: must have rank >= 2. The second dimension represents the channel axis and /// input: must have rank >= 2. The second dimension represents the channel axis
/// must have a span of at least 1. /// and must have a span of at least 1.
/// mean: must have rank 1, with the same span as input's channel axis. /// mean: must have rank 1, with the same span as input's channel axis.
/// variance: must have rank 1, with the same span as input's channel axis. /// variance: must have rank 1, with the same span as input's channel axis.
/// output: shall have the same shape as 'input'. /// output: shall have the same shape as 'input'.
......
...@@ -23,7 +23,8 @@ namespace ngraph ...@@ -23,7 +23,8 @@ namespace ngraph
{ {
namespace op namespace op
{ {
/// \brief Operation which "adds" axes to an input tensor, replicating elements from the input as needed along the new axes. /// \brief Operation which "adds" axes to an input tensor, replicating elements from the
/// input as needed along the new axes.
class Broadcast : public Op class Broadcast : public Op
{ {
public: public:
...@@ -36,8 +37,9 @@ namespace ngraph ...@@ -36,8 +37,9 @@ namespace ngraph
/// ///
/// \param arg Node that produces the input tensor to be broadcast. /// \param arg Node that produces the input tensor to be broadcast.
/// \param shape The shape of the output tensor. /// \param shape The shape of the output tensor.
/// \param broadcast_axes The axis positions (0-based) in the result that are being broadcast. The /// \param broadcast_axes The axis positions (0-based) in the result that are being
/// remaining axes in shape must be the same as the shape of arg. /// broadcast. The remaining axes in shape must be the same as
/// the shape of arg.
Broadcast(const Output<Node>& arg, const Shape& shape, const AxisSet& broadcast_axes); Broadcast(const Output<Node>& arg, const Shape& shape, const AxisSet& broadcast_axes);
void validate_and_infer_types() override; void validate_and_infer_types() override;
......
...@@ -251,7 +251,10 @@ void op::ConvolutionBackpropData::validate_and_infer_types() ...@@ -251,7 +251,10 @@ void op::ConvolutionBackpropData::validate_and_infer_types()
// Window movement strides q_x p_x // Window movement strides q_x p_x
// Window dilation strides p_f p_f // Window dilation strides p_f p_f
// Padding below a_x (S_f - 1)p_f - a_x // Padding below a_x (S_f - 1)p_f - a_x
// Padding above b_x (S_f - 1)p_f + ((a_x + (S_x - 1)p_x + b_x - (S_f - 1)p_f) % q_x) - b_x // Padding above b_x (S_f - 1)p_f +
// + ((a_x + (S_x - 1)p_x + b_x - (S_f - 1)p_f)
// % q_x)
// - b_x
// Data dilation strides p_x q_x // Data dilation strides p_x q_x
// Output shape S_o S_x // Output shape S_o S_x
// //
...@@ -737,7 +740,8 @@ Shape op::util::infer_convolution_output_shape(const Node* node, ...@@ -737,7 +740,8 @@ Shape op::util::infer_convolution_output_shape(const Node* node,
")."); ").");
// //
// Extract input item shape Di and make sure all dimensions are larger than 0 after padding and dilation. // Extract input item shape Di and make sure all dimensions are larger than 0 after padding and
// dilation.
// //
std::vector<ptrdiff_t> input_item_virtual_shape_signed; std::vector<ptrdiff_t> input_item_virtual_shape_signed;
...@@ -785,8 +789,9 @@ Shape op::util::infer_convolution_output_shape(const Node* node, ...@@ -785,8 +789,9 @@ Shape op::util::infer_convolution_output_shape(const Node* node,
} }
// //
// Extract the physical shape Wp of the convolution window, *not* including dilation, from the filter dimensions. // Extract the physical shape Wp of the convolution window, *not* including dilation, from the
// At the same time, make sure window shape dimensions are all larger than 0. // filter dimensions. At the same time, make sure window shape dimensions are all larger than
// 0.
// //
Shape window_physical_shape; Shape window_physical_shape;
...@@ -804,8 +809,9 @@ Shape op::util::infer_convolution_output_shape(const Node* node, ...@@ -804,8 +809,9 @@ Shape op::util::infer_convolution_output_shape(const Node* node,
} }
// //
// Compute virtual shape Wp of the convolution window, *including* dilation. At the same time, make sure all // Compute virtual shape Wp of the convolution window, *including* dilation. At the same time,
// window dilation strides are larger than 0, and that the dilated filter fits within the spatial dimensions. // make sure all window dilation strides are larger than 0, and that the dilated filter fits
// within the spatial dimensions.
// //
Shape window_virtual_shape; Shape window_virtual_shape;
......
...@@ -65,7 +65,8 @@ namespace ngraph ...@@ -65,7 +65,8 @@ namespace ngraph
const Strides& data_dilation_strides, const Strides& data_dilation_strides,
const PadType& pad_type = PadType::EXPLICIT); const PadType& pad_type = PadType::EXPLICIT);
/// \brief Constructs a batched convolution operation with no data dilation (i.e., all data dilation strides are 1). /// \brief Constructs a batched convolution operation with no data dilation (i.e., all
/// data dilation strides are 1).
/// ///
/// \param data_batch The node producing the input data batch tensor.<br> /// \param data_batch The node producing the input data batch tensor.<br>
/// `[N, C_IN, D1, ... Df]` /// `[N, C_IN, D1, ... Df]`
...@@ -89,7 +90,9 @@ namespace ngraph ...@@ -89,7 +90,9 @@ namespace ngraph
const CoordinateDiff& padding_below, const CoordinateDiff& padding_below,
const CoordinateDiff& padding_above); const CoordinateDiff& padding_above);
/// \brief Constructs a batched convolution operation with no padding or data dilation (i.e., padding above and below are 0 everywhere, and all data dilation strides are 1). /// \brief Constructs a batched convolution operation with no padding or data dilation
/// (i.e., padding above and below are 0 everywhere, and all data dilation
/// strides are 1).
/// ///
/// \param data_batch The node producing the input data batch tensor.<br> /// \param data_batch The node producing the input data batch tensor.<br>
/// `[N, C_IN, D1, ... Df]` /// `[N, C_IN, D1, ... Df]`
...@@ -107,7 +110,9 @@ namespace ngraph ...@@ -107,7 +110,9 @@ namespace ngraph
const Strides& window_movement_strides, const Strides& window_movement_strides,
const Strides& window_dilation_strides); const Strides& window_dilation_strides);
/// \brief Constructs a batched convolution operation with no window dilation, padding, or data dilation (i.e., padding above and below are 0 everywhere, and all window/data dilation strides are 1). /// \brief Constructs a batched convolution operation with no window dilation, padding,
/// or data dilation (i.e., padding above and below are 0 everywhere, and all
/// window/data dilation strides are 1).
/// ///
/// \param data_batch The node producing the input data batch tensor.<br> /// \param data_batch The node producing the input data batch tensor.<br>
/// `[N, C_IN, D1, ... Df]` /// `[N, C_IN, D1, ... Df]`
...@@ -122,7 +127,9 @@ namespace ngraph ...@@ -122,7 +127,9 @@ namespace ngraph
const Output<Node>& filters, const Output<Node>& filters,
const Strides& window_movement_strides); const Strides& window_movement_strides);
/// \brief Constructs a batched convolution operation with no window dilation or movement stride (i.e., padding above and below are 0 everywhere, and all window/data dilation strides and window movement strides are 1). /// \brief Constructs a batched convolution operation with no window dilation or
/// movement stride (i.e., padding above and below are 0 everywhere, and all
/// window/data dilation strides and window movement strides are 1).
/// ///
/// \param data_batch The node producing the input data batch tensor.<br> /// \param data_batch The node producing the input data batch tensor.<br>
/// `[N, C_IN, D1, ... Df]` /// `[N, C_IN, D1, ... Df]`
...@@ -201,8 +208,10 @@ namespace ngraph ...@@ -201,8 +208,10 @@ namespace ngraph
/// \param data_batch_shape The shape of the data batch from forward-prop. /// \param data_batch_shape The shape of the data batch from forward-prop.
/// \param filters The node producing the filters from forward-prop. /// \param filters The node producing the filters from forward-prop.
/// \param output_delta The node producing output delta. /// \param output_delta The node producing output delta.
/// \param window_movement_strides_forward The window movement strides from forward-prop. /// \param window_movement_strides_forward The window movement strides from
/// \param window_dilation_strides_forward The window dilation strides from forward-prop. /// forward-prop.
/// \param window_dilation_strides_forward The window dilation strides from
/// forward-prop.
/// \param padding_below_forward The padding-below sizes from forward-prop. /// \param padding_below_forward The padding-below sizes from forward-prop.
/// \param padding_above_forward The padding-above sizes from forward-prop. /// \param padding_above_forward The padding-above sizes from forward-prop.
/// \param data_dilation_strides_forward The data dilation strides from forward-prop. /// \param data_dilation_strides_forward The data dilation strides from forward-prop.
...@@ -300,8 +309,10 @@ namespace ngraph ...@@ -300,8 +309,10 @@ namespace ngraph
/// \param data_batch The tensor producing the data batch from forward-prop. /// \param data_batch The tensor producing the data batch from forward-prop.
/// \param filters_shape The shape of the filters from forward-prop. /// \param filters_shape The shape of the filters from forward-prop.
/// \param output_delta The node producing output delta. /// \param output_delta The node producing output delta.
/// \param window_movement_strides_forward The window movement strides from forward-prop. /// \param window_movement_strides_forward The window movement strides from
/// \param window_dilation_strides_forward The window dilation strides from forward-prop. /// forward-prop.
/// \param window_dilation_strides_forward The window dilation strides from
/// forward-prop.
/// \param padding_below_forward The padding-below sizes from forward-prop. /// \param padding_below_forward The padding-below sizes from forward-prop.
/// \param padding_above_forward The padding-above sizes from forward-prop. /// \param padding_above_forward The padding-above sizes from forward-prop.
/// \param data_dilation_strides_forward The data dilation strides from forward-prop. /// \param data_dilation_strides_forward The data dilation strides from forward-prop.
......
...@@ -24,7 +24,8 @@ namespace ngraph ...@@ -24,7 +24,8 @@ namespace ngraph
{ {
namespace op namespace op
{ {
/// \brief Generalized dot product operation, including scalar-tensor product, matrix-vector product, and matrix multiplication. /// \brief Generalized dot product operation, including scalar-tensor product, matrix-vector
/// product, and matrix multiplication.
class Dot : public Op class Dot : public Op
{ {
public: public:
...@@ -43,13 +44,15 @@ namespace ngraph ...@@ -43,13 +44,15 @@ namespace ngraph
size_t reduction_axes_count, size_t reduction_axes_count,
bool has_reduction_axes_count = true); bool has_reduction_axes_count = true);
/// \brief Constructs a dot product operation with default dot-axis selection depending on the inputs. /// \brief Constructs a dot product operation with default dot-axis selection depending
/// on the inputs.
/// ///
/// If `arg0` or `arg1` is a scalar, there are no dot-axes. Else, there is one dot-axis. /// If `arg0` or `arg1` is a scalar, there are no dot-axes. Else, there is one dot-axis.
/// ///
/// (Note that in particular, this results in scalar-tensor products where one or the other argument is /// (Note that in particular, this results in scalar-tensor products where one or the
/// a scalar, a matrix-vector products where `arg0` is a matrix and `arg1` is a vector, and a /// other argument is a scalar, a matrix-vector products where `arg0` is a matrix and
/// matrix multiplication where `arg0` and `arg1` are both matrices.) /// `arg1` is a vector, and a matrix multiplication where `arg0` and `arg1` are both
/// matrices.)
/// ///
/// \param arg0 The node producing the first argument. /// \param arg0 The node producing the first argument.
/// \param arg1 The node producing the second argument. /// \param arg1 The node producing the second argument.
......
...@@ -35,8 +35,8 @@ namespace ngraph ...@@ -35,8 +35,8 @@ namespace ngraph
EmbeddingLookup() = default; EmbeddingLookup() = default;
/// \brief Constructs a EmbeddingLookup operation. /// \brief Constructs a EmbeddingLookup operation.
/// ///
/// EmbeddingLookup constructs an output tensor by replacing every index in a given input tensor /// EmbeddingLookup constructs an output tensor by replacing every index in a given
/// with a row (from the weights matrix) at that index /// input tensor with a row (from the weights matrix) at that index
/// ///
/// \param data The input indices for tokens to be translated into embeddings /// \param data The input indices for tokens to be translated into embeddings
/// \param weights is a dense matrix [N,M] where each row 0..N /// \param weights is a dense matrix [N,M] where each row 0..N
......
...@@ -22,6 +22,7 @@ namespace ngraph ...@@ -22,6 +22,7 @@ namespace ngraph
{ {
namespace op namespace op
{ {
// clang-format off
/// \brief Elementwise is-equal operation. /// \brief Elementwise is-equal operation.
/// ///
/// ## Inputs /// ## Inputs
...@@ -37,6 +38,7 @@ namespace ngraph ...@@ -37,6 +38,7 @@ namespace ngraph
/// | Type | Description | /// | Type | Description |
/// | ---------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------ | /// | ---------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------ |
/// | \f$\texttt{bool}[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = 1\text{ if }\texttt{arg0}[i_1,\dots,i_n] = \texttt{arg1}[i_1,\dots,i_n]\text{, else } 0\f$ | /// | \f$\texttt{bool}[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = 1\text{ if }\texttt{arg0}[i_1,\dots,i_n] = \texttt{arg1}[i_1,\dots,i_n]\text{, else } 0\f$ |
// clang-format on
class Equal : public util::BinaryElementwiseComparison class Equal : public util::BinaryElementwiseComparison
{ {
public: public:
......
#
# OVERRIDE TO STYLE: Comments wrap.
#
BasedOnStyle: LLVM
IndentWidth: 4
UseTab: Never
Language: Cpp
Standard: Cpp11
AccessModifierOffset: -4
AlignConsecutiveDeclarations: false
AlignConsecutiveAssignments: false
AlignTrailingComments: true
AllowShortBlocksOnASingleLine: true
AllowShortCaseLabelsOnASingleLine: true
AllowShortFunctionsOnASingleLine: Inline
AlwaysBreakBeforeMultilineStrings: true
AlwaysBreakTemplateDeclarations: true
BinPackArguments: false
BinPackParameters: false
BreakBeforeBraces: Allman
BreakConstructorInitializersBeforeComma: true
ColumnLimit: 100
#CommentPragmas: '.*'
IndentCaseLabels: false
IndentWrappedFunctionNames: true
KeepEmptyLinesAtTheStartOfBlocks: false
NamespaceIndentation: All
PointerAlignment: Left
SpaceAfterCStyleCast: false
SpaceBeforeAssignmentOperators: true
SpaceBeforeParens: ControlStatements
SpaceInEmptyParentheses: false
SpacesInAngles: false
SpacesInCStyleCastParentheses: false
SpacesInParentheses: false
SpacesInSquareBrackets: false
SortIncludes: false
ReflowComments: true
IncludeCategories:
- Regex: '^".*'
Priority: 3
- Regex: '^<.*'
Priority: 2
SortIncludes: true
#
# OVERRIDE TO STYLE: Comments wrap.
#
BasedOnStyle: LLVM
IndentWidth: 4
UseTab: Never
Language: Cpp
Standard: Cpp11
AccessModifierOffset: -4
AlignConsecutiveDeclarations: false
AlignConsecutiveAssignments: false
AlignTrailingComments: true
AllowShortBlocksOnASingleLine: true
AllowShortCaseLabelsOnASingleLine: true
AllowShortFunctionsOnASingleLine: Inline
AlwaysBreakBeforeMultilineStrings: true
AlwaysBreakTemplateDeclarations: true
BinPackArguments: false
BinPackParameters: false
BreakBeforeBraces: Allman
BreakConstructorInitializersBeforeComma: true
ColumnLimit: 100
#CommentPragmas: '.*'
IndentCaseLabels: false
IndentWrappedFunctionNames: true
KeepEmptyLinesAtTheStartOfBlocks: false
NamespaceIndentation: All
PointerAlignment: Left
SpaceAfterCStyleCast: false
SpaceBeforeAssignmentOperators: true
SpaceBeforeParens: ControlStatements
SpaceInEmptyParentheses: false
SpacesInAngles: false
SpacesInCStyleCastParentheses: false
SpacesInParentheses: false
SpacesInSquareBrackets: false
SortIncludes: false
ReflowComments: true
IncludeCategories:
- Regex: '^".*'
Priority: 3
- Regex: '^<.*'
Priority: 2
SortIncludes: true
...@@ -22,19 +22,21 @@ namespace ngraph ...@@ -22,19 +22,21 @@ namespace ngraph
{ {
namespace op namespace op
{ {
// clang-format off
/// \brief Elementwise Local Response Normalization (LRN) operation. /// \brief Elementwise Local Response Normalization (LRN) operation.
/// ///
/// ## Inputs /// ## Inputs
/// ///
/// | | Type | Description | /// | | Type | Description |
/// | ----- | --------------------------------- | ----------------------------------------------- | /// | ----- | --------------------------------------- | ----------------------------------------------- |
/// | `arg` | \f$N[n, c, d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and numeric element type. | /// | `arg` | \f$N[n, c, d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and numeric element type. |
/// ///
/// ## Output /// ## Output
/// ///
/// | Type | Description | /// | Type | Description |
/// | ---------------------- | ------------------------------------------------------------------------------------ | /// | ---------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
/// | \f$N[n, c, d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[n, c, d_1,\dots,d_n] = \frac{N[n,i,d_1,\dots,d_n]}{ (bias + alpha * (\sum_{i=max(0,(nsize-1)/2)}^{min(C, (nsize-1)/2)+1} N[n,i,d_1,\dots,d_n]^{2}) ^ {2})}\f$ | /// | \f$N[n, c, d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[n, c, d_1,\dots,d_n] = \frac{N[n,i,d_1,\dots,d_n]}{ (bias + alpha * (\sum_{i=max(0,(nsize-1)/2)}^{min(C, (nsize-1)/2)+1} N[n,i,d_1,\dots,d_n]^{2}) ^ {2})}\f$ |
// clang-format on
class LRN : public Op class LRN : public Op
{ {
public: public:
......
...@@ -81,7 +81,8 @@ namespace ngraph ...@@ -81,7 +81,8 @@ namespace ngraph
void validate_and_infer_types() override; void validate_and_infer_types() override;
/// \brief Constructs a batched, unpadded max pooling operation (i.e., all padding shapes are set to 0). /// \brief Constructs a batched, unpadded max pooling operation (i.e., all padding
/// shapes are set to 0).
/// ///
/// \param arg The node producing the input data batch tensor. /// \param arg The node producing the input data batch tensor.
/// \param window_shape The window shape. /// \param window_shape The window shape.
...@@ -90,7 +91,8 @@ namespace ngraph ...@@ -90,7 +91,8 @@ namespace ngraph
const Shape& window_shape, const Shape& window_shape,
const Strides& window_movement_strides); const Strides& window_movement_strides);
/// \brief Constructs an unstrided batched max pooling operation (i.e., all window movement strides are 1 and all padding shapes are set to 0). /// \brief Constructs an unstrided batched max pooling operation (i.e., all window
/// movement strides are 1 and all padding shapes are set to 0).
/// ///
/// \param arg The node producing the input data batch tensor. /// \param arg The node producing the input data batch tensor.
/// \param window_shape The window shape. /// \param window_shape The window shape.
......
...@@ -22,6 +22,7 @@ namespace ngraph ...@@ -22,6 +22,7 @@ namespace ngraph
{ {
namespace op namespace op
{ {
// clang-format off
/// \brief One-hot operator. /// \brief One-hot operator.
/// ///
/// ## Parameters /// ## Parameters
...@@ -42,6 +43,7 @@ namespace ngraph ...@@ -42,6 +43,7 @@ namespace ngraph
/// | Type | Description | /// | Type | Description |
/// | ---------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | /// | ---------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
/// | \f$E[d_1,\dots,d_n]\f$ | The tensor \f$T'\f$, where \f$T'[i_1,\dots,i_{m-1},i_m,i_{m+1},\dots,i_n] = 1\f$ if \f$T[i_1,\dots,i_{m-1},i_{m+1},\dots,i_n] = i_m\f$, else \f$0\f$. However, \f$T'\f$ is undefined if any non-integral value or any out-of-bounds value is detected in the input tensor. | /// | \f$E[d_1,\dots,d_n]\f$ | The tensor \f$T'\f$, where \f$T'[i_1,\dots,i_{m-1},i_m,i_{m+1},\dots,i_n] = 1\f$ if \f$T[i_1,\dots,i_{m-1},i_{m+1},\dots,i_n] = i_m\f$, else \f$0\f$. However, \f$T'\f$ is undefined if any non-integral value or any out-of-bounds value is detected in the input tensor. |
// clang-format on
class OneHot : public Op class OneHot : public Op
{ {
public: public:
......
...@@ -25,9 +25,9 @@ namespace ngraph ...@@ -25,9 +25,9 @@ namespace ngraph
{ {
/// \brief A function parameter. /// \brief A function parameter.
/// ///
/// Parameters are nodes that represent the arguments that will be passed to user-defined functions. /// Parameters are nodes that represent the arguments that will be passed to user-defined
/// Function creation requires a sequence of parameters. /// functions. Function creation requires a sequence of parameters. Basic graph operations
/// Basic graph operations do not need parameters attached to a function. /// do not need parameters attached to a function.
class Parameter : public op::Op class Parameter : public op::Op
{ {
protected: protected:
......
...@@ -22,6 +22,7 @@ namespace ngraph ...@@ -22,6 +22,7 @@ namespace ngraph
{ {
namespace op namespace op
{ {
// clang-format off
/// \brief Elementwise exponentiation operation. /// \brief Elementwise exponentiation operation.
/// ///
/// ## Inputs /// ## Inputs
...@@ -36,6 +37,7 @@ namespace ngraph ...@@ -36,6 +37,7 @@ namespace ngraph
/// | Type | Description | /// | Type | Description |
/// | ---------------------- | -------------------------------------------------------------------------------------------------------------- | /// | ---------------------- | -------------------------------------------------------------------------------------------------------------- |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \texttt{arg0}[i_1,\dots,i_n]^{\texttt{arg1}[i_1,\dots,i_n]}\f$ | /// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \texttt{arg0}[i_1,\dots,i_n]^{\texttt{arg1}[i_1,\dots,i_n]}\f$ |
// clang-format on
class Power : public util::BinaryElementwiseArithmetic class Power : public util::BinaryElementwiseArithmetic
{ {
public: public:
......
...@@ -25,8 +25,8 @@ namespace ngraph ...@@ -25,8 +25,8 @@ namespace ngraph
namespace op namespace op
{ {
/// \brief Quantize operation /// \brief Quantize operation
/// Maps real input (r) to quantized output (q) using scale (s), zero point (z) and round mode: /// Maps real input (r) to quantized output (q) using scale (s), zero point (z) and
/// q = ROUND(r / s) + o /// round mode: q = ROUND(r / s) + o
class Quantize : public ngraph::op::Op class Quantize : public ngraph::op::Op
{ {
public: public:
......
...@@ -24,8 +24,10 @@ namespace ngraph ...@@ -24,8 +24,10 @@ namespace ngraph
{ {
namespace op namespace op
{ {
/// \brief Takes two input tensors of identical rank, with the second tensor no larger than the first in any dimension, and returns a copy of // clang-format off
/// the first input tensor with the specified slice overwritten by the second input tensor. /// \brief Takes two input tensors of identical rank, with the second tensor no larger than
/// the first in any dimension, and returns a copy of the first input tensor with
/// the specified slice overwritten by the second input tensor.
/// ///
/// ## Parameters /// ## Parameters
/// ///
...@@ -47,6 +49,7 @@ namespace ngraph ...@@ -47,6 +49,7 @@ namespace ngraph
/// | Type | Description | /// | Type | Description |
/// | ---------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | /// | ---------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
/// | \f$E[d_1,\dots,d_n]\f$ | The tensor \f$T\f$ where \f$T[i_1,\dots,i_n] = \texttt{arg1}[j_1,\dots,j_n]\f$ if \f$j_1,\dots,j_n\f$ is in bounds for `arg1` and for all \f$m\f$, \f$i_m = l_m + j_m s_m\f$, otherwise \f$\texttt{arg0}[i_1,\dots,i_n]\f$. | /// | \f$E[d_1,\dots,d_n]\f$ | The tensor \f$T\f$ where \f$T[i_1,\dots,i_n] = \texttt{arg1}[j_1,\dots,j_n]\f$ if \f$j_1,\dots,j_n\f$ is in bounds for `arg1` and for all \f$m\f$, \f$i_m = l_m + j_m s_m\f$, otherwise \f$\texttt{arg0}[i_1,\dots,i_n]\f$. |
// clang-format on
class ReplaceSlice : public Op class ReplaceSlice : public Op
{ {
public: public:
...@@ -69,7 +72,8 @@ namespace ngraph ...@@ -69,7 +72,8 @@ namespace ngraph
const Coordinate& upper_bounds, const Coordinate& upper_bounds,
const Strides& strides); const Strides& strides);
/// \brief Constructs a tensor slice replacement operation with unit strides; i.e., every element inside the bounding box will be overwritten. /// \brief Constructs a tensor slice replacement operation with unit strides; i.e.,
/// every element inside the bounding box will be overwritten.
/// ///
/// \param arg0 The tensor to overwrite into. /// \param arg0 The tensor to overwrite into.
/// \param arg1 The tensor to write into `arg0`. /// \param arg1 The tensor to write into `arg0`.
......
...@@ -24,18 +24,22 @@ namespace ngraph ...@@ -24,18 +24,22 @@ namespace ngraph
{ {
namespace op namespace op
{ {
// clang-format off
/// \brief Tensor reshape operation. /// \brief Tensor reshape operation.
/// ///
/// "Converts" an input tensor into a new shape with the same number of elements. /// "Converts" an input tensor into a new shape with the same number of elements.
/// ///
/// Given that the input tensor has shape \f$[d_1,\dots,d_n]\f$, the output may have any shape \f$[d'_1,\dots,d'_m]\f$ such that /// Given that the input tensor has shape \f$[d_1,\dots,d_n]\f$, the output may have any
/// \f$\Pi_{0 \leq i \lt n}(d_i) = \Pi_{0 \leq i \lt m}(d'_i)\f$. For example, a \f$3\times{}4\f$ matrix can be reshaped into a /// shape \f$[d'_1,\dots,d'_m]\f$ such that
/// 3-tensor of shape \f$3\times{}2\times{}2\f$, a matrix of shape \f$6\times{}2\f$, or a vector of size \f$12\f$, but not, for /// \f$\Pi_{0 \leq i \lt n}(d_i) = \Pi_{0 \leq i \lt m}(d'_i)\f$. For example, a
/// example, a matrix of size \f$4\times{}4\f$. /// \f$3\times{}4\f$ matrix can be reshaped into a 3-tensor of shape
/// \f$3\times{}2\times{}2\f$, a matrix of shape \f$6\times{}2\f$, or a vector of size
/// \f$12\f$, but not, for example, a matrix of size \f$4\times{}4\f$.
/// ///
/// The parameter `input_order` indicates the order in which to "walk" over the input axes. Given a tensor of shape \f$(d_1,\dots,d_n)\f$, /// The parameter `input_order` indicates the order in which to "walk" over the input axes.
/// an input order of \f$(a_0, a_1, \dots, a_{n-1})\f$ results in the coordinate for axis \f$a_{n-1}\f$ being varied most frequently, /// Given a tensor of shape \f$(d_1,\dots,d_n)\f$, an input order of
/// followed by axis \f$a-2\f$, and so on down to \f$a_0\f$. /// \f$(a_0, a_1, \dots, a_{n-1})\f$ results in the coordinate for axis \f$a_{n-1}\f$ being
/// varied most frequently, followed by axis \f$a-2\f$, and so on down to \f$a_0\f$.
/// ///
/// (TODO: example.) /// (TODO: example.)
/// ///
...@@ -57,6 +61,7 @@ namespace ngraph ...@@ -57,6 +61,7 @@ namespace ngraph
/// | Type | Description | /// | Type | Description |
/// | ------------------------ | ------------------------------------------------------------------------------------------------------ | /// | ------------------------ | ------------------------------------------------------------------------------------------------------ |
/// | \f$E[d'_1,\dots,d'_m]\f$ | The tensor \f$T\f$, where \f$T\f$ is the input tensor with its elements rearranged as described above. | /// | \f$E[d'_1,\dots,d'_m]\f$ | The tensor \f$T\f$, where \f$T\f$ is the input tensor with its elements rearranged as described above. |
// clang-format on
class Reshape : public Op class Reshape : public Op
{ {
public: public:
...@@ -68,10 +73,13 @@ namespace ngraph ...@@ -68,10 +73,13 @@ namespace ngraph
/// \brief Constructs a reshape operation. /// \brief Constructs a reshape operation.
/// ///
/// \param arg The tensor to be reshaped. /// \param arg The tensor to be reshaped.
/// \param input_order The order in which to iterate over input axes. This must be a permutation of the /// \param input_order The order in which to iterate over input axes. This must be a
/// sequence \f$(0,\dots,n-1)\f$ where \f$n\f$ is the rank of the input tensor. /// permutation of the sequence \f$(0,\dots,n-1)\f$ where \f$n\f$ is
/// \param output_shape The output shape. If the input shape is \f$(a_0,\dots,a_{k-1})\f$ then the output shape must /// the rank of the input tensor.
/// be of the form \f$(b_0,\dots,b_{j-1})\f$ where \f$\Pi(a_i) = \Pi(b_i)\f$. /// \param output_shape The output shape. If the input shape is
/// \f$(a_0,\dots,a_{k-1})\f$ then the output shape must
/// be of the form \f$(b_0,\dots,b_{j-1})\f$ where
/// \f$\Pi(a_i) = \Pi(b_i)\f$.
Reshape(const Output<Node>& arg, Reshape(const Output<Node>& arg,
const AxisVector& input_order, const AxisVector& input_order,
const Shape& output_shape); const Shape& output_shape);
......
...@@ -22,9 +22,11 @@ namespace ngraph ...@@ -22,9 +22,11 @@ namespace ngraph
{ {
namespace op namespace op
{ {
// clang-format off
/// \brief Axis-reverse operation. /// \brief Axis-reverse operation.
/// ///
/// Reverses the direction of zero or more axes in a tensor, where "reversing" an axis means that at the output tensor. /// Reverses the direction of zero or more axes in a tensor, where "reversing" an axis means
/// that at the output tensor.
/// ///
/// ## Parameters /// ## Parameters
/// ///
...@@ -43,6 +45,7 @@ namespace ngraph ...@@ -43,6 +45,7 @@ namespace ngraph
/// | Type | Description | /// | Type | Description |
/// | ---------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | /// | ---------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
/// | \f$E[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \texttt{arg}[j_1,\dots,j_n]\f$ and \f$j_k = d_k - i_k - 1\f$ if axis \f$k\f$ is in the reverse set; else \f$j_k = i_k\f$. | /// | \f$E[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \texttt{arg}[j_1,\dots,j_n]\f$ and \f$j_k = d_k - i_k - 1\f$ if axis \f$k\f$ is in the reverse set; else \f$j_k = i_k\f$. |
// clang-format on
class Reverse : public Op class Reverse : public Op
{ {
public: public:
......
...@@ -22,6 +22,7 @@ namespace ngraph ...@@ -22,6 +22,7 @@ namespace ngraph
{ {
namespace op namespace op
{ {
// clang-format off
/// \brief Elementwise selection operation. /// \brief Elementwise selection operation.
/// ///
/// ## Inputs /// ## Inputs
...@@ -37,6 +38,7 @@ namespace ngraph ...@@ -37,6 +38,7 @@ namespace ngraph
/// | Type | Description | /// | Type | Description |
/// | ---------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------- | /// | ---------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
/// | \f$E[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \texttt{arg1}[i_1,\dots,i_n]\text{ if }\texttt{arg0}[i_1,\dots,i_n] \neq 0\text{, else }\texttt{arg2}[i_1,\dots,i_n]\f$ | /// | \f$E[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \texttt{arg1}[i_1,\dots,i_n]\text{ if }\texttt{arg0}[i_1,\dots,i_n] \neq 0\text{, else }\texttt{arg2}[i_1,\dots,i_n]\f$ |
// clang-format on
class Select : public Op class Select : public Op
{ {
public: public:
......
...@@ -22,6 +22,7 @@ namespace ngraph ...@@ -22,6 +22,7 @@ namespace ngraph
{ {
namespace op namespace op
{ {
// clang-format off
/// \brief Elementwise sine operation. /// \brief Elementwise sine operation.
/// ///
/// ## Inputs /// ## Inputs
...@@ -35,6 +36,7 @@ namespace ngraph ...@@ -35,6 +36,7 @@ namespace ngraph
/// | Type | Description | /// | Type | Description |
/// | ---------------------- | ------------------------------------------------------------------------------------ | /// | ---------------------- | ------------------------------------------------------------------------------------ |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \sin(\texttt{arg}[i_1,\dots,i_n])\f$ | /// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \sin(\texttt{arg}[i_1,\dots,i_n])\f$ |
// clang-format on
class Sin : public util::UnaryElementwiseArithmetic class Sin : public util::UnaryElementwiseArithmetic
{ {
public: public:
......
...@@ -24,7 +24,8 @@ namespace ngraph ...@@ -24,7 +24,8 @@ namespace ngraph
{ {
namespace op namespace op
{ {
/// \brief Takes a slice of an input tensor, i.e., the sub-tensor that resides within a bounding box, optionally with stride. /// \brief Takes a slice of an input tensor, i.e., the sub-tensor that resides within a
/// bounding box, optionally with stride.
class Slice : public Op class Slice : public Op
{ {
public: public:
...@@ -44,7 +45,8 @@ namespace ngraph ...@@ -44,7 +45,8 @@ namespace ngraph
const Coordinate& lower_bounds, const Coordinate& lower_bounds,
const Coordinate& upper_bounds, const Coordinate& upper_bounds,
const Strides& strides); const Strides& strides);
/// \brief Constructs a tensor slice operation with unit strides; i.e., every element inside the bounding box will be copied to the output slice. /// \brief Constructs a tensor slice operation with unit strides; i.e., every element
/// inside the bounding box will be copied to the output slice.
/// ///
/// \param arg The tensor to be sliced. /// \param arg The tensor to be sliced.
/// \param lower_bounds The axiswise lower bounds of the slice (inclusive). /// \param lower_bounds The axiswise lower bounds of the slice (inclusive).
......
...@@ -22,6 +22,7 @@ namespace ngraph ...@@ -22,6 +22,7 @@ namespace ngraph
{ {
namespace op namespace op
{ {
// clang-format off
/// \brief Elementwise square root operation. /// \brief Elementwise square root operation.
/// ///
/// ## Inputs /// ## Inputs
...@@ -35,6 +36,7 @@ namespace ngraph ...@@ -35,6 +36,7 @@ namespace ngraph
/// | Type | Description | /// | Type | Description |
/// | ---------------------- | ------------------------------------------------------------------------------------- | /// | ---------------------- | ------------------------------------------------------------------------------------- |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \sqrt{\texttt{arg}[i_1,\dots,i_n]}\f$ | /// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \sqrt{\texttt{arg}[i_1,\dots,i_n]}\f$ |
// clang-format on
class Sqrt : public util::UnaryElementwiseArithmetic class Sqrt : public util::UnaryElementwiseArithmetic
{ {
public: public:
......
...@@ -25,6 +25,7 @@ namespace ngraph ...@@ -25,6 +25,7 @@ namespace ngraph
{ {
namespace op namespace op
{ {
// clang-format off
/// \brief Tensor sum operation. /// \brief Tensor sum operation.
/// ///
/// Element-wise sums the input tensor, eliminating the specified reduction axes. /// Element-wise sums the input tensor, eliminating the specified reduction axes.
...@@ -71,6 +72,7 @@ namespace ngraph ...@@ -71,6 +72,7 @@ namespace ngraph
/// | Type | Description | /// | Type | Description |
/// | ----------------------------------------- | ---------------------------------------------------------------------------------------------------------------- | /// | ----------------------------------------- | ---------------------------------------------------------------------------------------------------------------- |
/// | \f$N[\textit{delete}(A,d_1,\dots,d_n)]\f$ | The tensor \f$T\f$, where \f$T\f$ is the input tensor with the `reduction_axes` \f$A\f$ eliminated by summation. | /// | \f$N[\textit{delete}(A,d_1,\dots,d_n)]\f$ | The tensor \f$T\f$, where \f$T\f$ is the input tensor with the `reduction_axes` \f$A\f$ eliminated by summation. |
// clang-format off
class Sum : public util::ArithmeticReduction class Sum : public util::ArithmeticReduction
{ {
public: public:
......
...@@ -22,6 +22,7 @@ namespace ngraph ...@@ -22,6 +22,7 @@ namespace ngraph
{ {
namespace op namespace op
{ {
// clang-format off
/// \brief Elementwise tangent operation. /// \brief Elementwise tangent operation.
/// ///
/// ## Inputs /// ## Inputs
...@@ -35,6 +36,7 @@ namespace ngraph ...@@ -35,6 +36,7 @@ namespace ngraph
/// | Type | Description | /// | Type | Description |
/// | ---------------------- | ------------------------------------------------------------------------------------ | /// | ---------------------- | ------------------------------------------------------------------------------------ |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \tan(\texttt{arg}[i_1,\dots,i_n])\f$ | /// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \tan(\texttt{arg}[i_1,\dots,i_n])\f$ |
// clang-format on
class Tan : public util::UnaryElementwiseArithmetic class Tan : public util::UnaryElementwiseArithmetic
{ {
public: public:
......
...@@ -26,7 +26,8 @@ namespace ngraph ...@@ -26,7 +26,8 @@ namespace ngraph
{ {
namespace op namespace op
{ {
//brief Computes indices of top k maximum/minimum index along a specified axis for a given tensor // \brief Computes indices of top k maximum/minimum index along a specified axis for a
// given tensor
class TopK : public Op class TopK : public Op
{ {
public: public:
...@@ -49,7 +50,8 @@ namespace ngraph ...@@ -49,7 +50,8 @@ namespace ngraph
/// ///
/// \param arg The input tensor /// \param arg The input tensor
/// \param top_k_axis The axis along which to compute top k indices /// \param top_k_axis The axis along which to compute top k indices
/// \param index_element_type produce indices. Currently, only int64 or int32 are supported /// \param index_element_type produce indices. Currently, only int64 or int32 are
/// supported
/// \param k Number of top indices to compute. Compute all indices if k = 0 /// \param k Number of top indices to compute. Compute all indices if k = 0
/// \param compute_max Compute top k max or top k min? /// \param compute_max Compute top k max or top k min?
/// \param sort SortType for sorting results, default - NONE /// \param sort SortType for sorting results, default - NONE
...@@ -64,7 +66,8 @@ namespace ngraph ...@@ -64,7 +66,8 @@ namespace ngraph
/// \param arg The input tensor /// \param arg The input tensor
/// \param k Number of top indices to compute. Compute all indices if k = 0 /// \param k Number of top indices to compute. Compute all indices if k = 0
/// \param top_k_axis The axis along which to compute top k indices /// \param top_k_axis The axis along which to compute top k indices
/// \param index_element_type produce indices. Currently, only int64 or int32 are supported /// \param index_element_type produce indices. Currently, only int64 or int32 are
/// supported
/// \param compute_max Compute top k max or top k min? /// \param compute_max Compute top k max or top k min?
/// \param sort SortType for sorting results, default - NONE /// \param sort SortType for sorting results, default - NONE
TopK(const Output<Node>& arg, TopK(const Output<Node>& arg,
......
#
# OVERRIDE TO STYLE: Comments wrap.
#
BasedOnStyle: LLVM
IndentWidth: 4
UseTab: Never
Language: Cpp
Standard: Cpp11
AccessModifierOffset: -4
AlignConsecutiveDeclarations: false
AlignConsecutiveAssignments: false
AlignTrailingComments: true
AllowShortBlocksOnASingleLine: true
AllowShortCaseLabelsOnASingleLine: true
AllowShortFunctionsOnASingleLine: Inline
AlwaysBreakBeforeMultilineStrings: true
AlwaysBreakTemplateDeclarations: true
BinPackArguments: false
BinPackParameters: false
BreakBeforeBraces: Allman
BreakConstructorInitializersBeforeComma: true
ColumnLimit: 100
#CommentPragmas: '.*'
IndentCaseLabels: false
IndentWrappedFunctionNames: true
KeepEmptyLinesAtTheStartOfBlocks: false
NamespaceIndentation: All
PointerAlignment: Left
SpaceAfterCStyleCast: false
SpaceBeforeAssignmentOperators: true
SpaceBeforeParens: ControlStatements
SpaceInEmptyParentheses: false
SpacesInAngles: false
SpacesInCStyleCastParentheses: false
SpacesInParentheses: false
SpacesInSquareBrackets: false
SortIncludes: false
ReflowComments: true
IncludeCategories:
- Regex: '^".*'
Priority: 3
- Regex: '^<.*'
Priority: 2
SortIncludes: true
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment