Commit 85c1b19e authored by Adam Procter's avatar Adam Procter Committed by Scott Cyphers

clang-format comments: /src/ngraph/op/util (#3498)

* New clang-format rules for src/ngraph/op/util

* Restore root .clang-format

* Remove redundant .clang-formats

* Revert "Remove redundant .clang-formats"

This reverts commit e158df1e507570b55348ce326fd7e8b4ae819f36.

* Remove redundant .clang-formats
parent 9ece1688
......@@ -24,7 +24,6 @@ BreakBeforeBraces: Allman
BreakConstructorInitializersBeforeComma: true
ColumnLimit: 100
CommentPragmas: '.*'
IndentCaseLabels: false
IndentWrappedFunctionNames: true
......
#
# OVERRIDE TO STYLE: Comments wrap.
#
BasedOnStyle: LLVM
IndentWidth: 4
UseTab: Never
Language: Cpp
Standard: Cpp11
AccessModifierOffset: -4
AlignConsecutiveDeclarations: false
AlignConsecutiveAssignments: false
AlignTrailingComments: true
AllowShortBlocksOnASingleLine: true
AllowShortCaseLabelsOnASingleLine: true
AllowShortFunctionsOnASingleLine: Inline
AlwaysBreakBeforeMultilineStrings: true
AlwaysBreakTemplateDeclarations: true
BinPackArguments: false
BinPackParameters: false
BreakBeforeBraces: Allman
BreakConstructorInitializersBeforeComma: true
ColumnLimit: 100
#CommentPragmas: '.*'
IndentCaseLabels: false
IndentWrappedFunctionNames: true
KeepEmptyLinesAtTheStartOfBlocks: false
NamespaceIndentation: All
PointerAlignment: Left
SpaceAfterCStyleCast: false
SpaceBeforeAssignmentOperators: true
SpaceBeforeParens: ControlStatements
SpaceInEmptyParentheses: false
SpacesInAngles: false
SpacesInCStyleCastParentheses: false
SpacesInParentheses: false
SpacesInSquareBrackets: false
SortIncludes: false
ReflowComments: true
IncludeCategories:
- Regex: '^".*'
Priority: 3
- Regex: '^<.*'
Priority: 2
SortIncludes: true
......@@ -24,8 +24,9 @@ namespace ngraph
{
namespace util
{
/// \brief Abstract base class for arithmetic reduction operations, i.e., operations where chosen axes of the input tensors
/// are eliminated (reduced out) by repeated application of a particular binary arithmetic operation.
/// \brief Abstract base class for arithmetic reduction operations, i.e., operations
/// where chosen axes of the input tensors are eliminated (reduced out) by
/// repeated application of a particular binary arithmetic operation.
class ArithmeticReduction : public Op
{
protected:
......
......@@ -25,12 +25,17 @@ namespace ngraph
{
namespace util
{
/// \brief Abstract base class for elementwise binary arithmetic operations, i.e., operations where the same
/// scalar binary arithmetic operation is applied to each corresponding pair of elements in the two
/// input tensors. Implicit broadcast of input tensors is supported through one of the AutoBroadcast modes
// clang-format off
/// \brief Abstract base class for elementwise binary arithmetic operations, i.e.,
/// operations where the same scalar binary arithmetic operation is applied to
/// each corresponding pair of elements in the two input tensors. Implicit
/// broadcast of input tensors is supported through one of the AutoBroadcast
/// modes.
///
/// For example, if the underlying arithmetic operation (determined by the subclass) is \f$\mathit{op}(x,y)\f$, the input tensors
/// \f$[[x_0,y_0],[z_0,w_0]]\f$ and \f$[[x_1,y_1],[z_1,w_1]]\f$ will be mapped to \f$[[\mathit{op}(x_0,x_1),\mathit{op}(y_0,y_1)],[\mathit{op}(z_0,z_1),\mathit{op}(w_0,w_1)]]\f$.
/// For example, if the underlying arithmetic operation (determined by the subclass) is
/// \f$\mathit{op}(x,y)\f$, the input tensors
/// \f$[[x_0,y_0],[z_0,w_0]]\f$ and \f$[[x_1,y_1],[z_1,w_1]]\f$ will be mapped to
/// \f$[[\mathit{op}(x_0,x_1),\mathit{op}(y_0,y_1)],[\mathit{op}(z_0,z_1),\mathit{op}(w_0,w_1)]]\f$.
///
/// ## Inputs
///
......@@ -42,9 +47,10 @@ namespace ngraph
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
/// | Type | Description |
/// | ---------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \mathit{op}(\texttt{arg0}[i_1,\dots,i_n],\texttt{arg1}[i_1,\dots,i_n])\f$. This will always have the same shape and element type as the input tensors (after auto broadcasting). |
// clang-format on
class BinaryElementwiseArithmetic : public Op
{
protected:
......
......@@ -25,12 +25,17 @@ namespace ngraph
{
namespace util
{
/// \brief Abstract base class for elementwise binary comparison operations, i.e., operations where the same
/// scalar binary comparison operation is applied to each corresponding pair of elements in two
/// input tensors. Implicit broadcast of input tensors is supported through one of the AutoBroadcast modes
// clang-format off
/// \brief Abstract base class for elementwise binary comparison operations, i.e.,
/// operations where the same scalar binary comparison operation is applied to
/// each corresponding pair of elements in two input tensors. Implicit
/// broadcast of input tensors is supported through one of the AutoBroadcast
/// modes.
///
/// For example, if the underlying comparison operation (determined by the subclass) is \f$\mathit{op}(x,y)\f$, the input tensors
/// \f$[[x_0,y_0],[z_0,w_0]]\f$ and \f$[[x_1,y_1],[z_1,w_1]]\f$ will be mapped to \f$[[\mathit{op}(x_0,x_1),\mathit{op}(y_0,y_1)],[\mathit{op}(z_0,z_1),\mathit{op}(w_0,w_1)]]\f$.
/// For example, if the underlying comparison operation (determined by the subclass) is
/// \f$\mathit{op}(x,y)\f$, the input tensors \f$[[x_0,y_0],[z_0,w_0]]\f$ and
/// \f$[[x_1,y_1],[z_1,w_1]]\f$ will be mapped to
/// \f$[[\mathit{op}(x_0,x_1),\mathit{op}(y_0,y_1)],[\mathit{op}(z_0,z_1),\mathit{op}(w_0,w_1)]]\f$.
///
/// ## Inputs
///
......@@ -38,13 +43,14 @@ namespace ngraph
/// | ------ | --------------------------------- | ------------------------------------------------------ |
/// | `arg0` | \f$E[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and element type. |
/// | `arg1` | \f$E[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of the same shape and element type as `arg0`. |
/// | `autob`| AutoBroadcastSpec | Auto broadcast specification. |
/// | `autob`| AutoBroadcastSpec | Auto broadcast specification. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
/// | \f$\texttt{bool}[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \mathit{op}(\texttt{arg0}[i_1,\dots,i_n],\texttt{arg1}[i_1,\dots,i_n])\f$. This will always have the same shape as the input tensors, and the element type `bool`. |
// clang-format on
class BinaryElementwiseComparison : public Op
{
protected:
......
......@@ -24,13 +24,17 @@ namespace ngraph
{
namespace util
{
/// \brief Abstract base class for elementwise binary logical operations, i.e., operations where the same
/// scalar binary logical operation is applied to each corresponding pair of elements in two
/// boolean input tensors. Implicit broadcast of input tensors is supported through one of the AutoBroadcast modes
/// boolean input tensors.
// clang-format off
/// \brief Abstract base class for elementwise binary logical operations, i.e.,
/// operations where the same scalar binary logical operation is applied to
/// each corresponding pair of elements in two boolean input tensors. Implicit
/// broadcast of input tensors is supported through one of the AutoBroadcast
/// modes.
///
/// For example, if the underlying operation (determined by the subclass) is \f$\mathit{op}(x,y)\f$, the input tensors
/// \f$[[x_0,y_0],[z_0,w_0]]\f$ and \f$[[x_1,y_1],[z_1,w_1]]\f$ will be mapped to \f$[[\mathit{op}(x_0,x_1),\mathit{op}(y_0,y_1)],[\mathit{op}(z_0,z_1),\mathit{op}(w_0,w_1)]]\f$.
/// For example, if the underlying operation (determined by the subclass) is
/// \f$\mathit{op}(x,y)\f$, the input tensors \f$[[x_0,y_0],[z_0,w_0]]\f$ and
/// \f$[[x_1,y_1],[z_1,w_1]]\f$ will be mapped to
/// \f$[[\mathit{op}(x_0,x_1),\mathit{op}(y_0,y_1)],[\mathit{op}(z_0,z_1),\mathit{op}(w_0,w_1)]]\f$.
///
/// ## Inputs
///
......@@ -45,6 +49,7 @@ namespace ngraph
/// | Type | Description |
/// | ---------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
/// | \f$\texttt{bool}[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \mathit{op}(\texttt{arg0}[i_1,\dots,i_n],\texttt{arg1}[i_1,\dots,i_n])\f$. This will always have the same shape as the input tensors, and the element type `bool`. |
// clang-format on
class BinaryElementwiseLogical : public Op
{
protected:
......
......@@ -60,9 +60,9 @@ static ngraph::Shape calculate_broadcast_shape(ngraph::Shape left_shape, ngraph:
/// \brief Calculate the output shape of numpy-style broadcast operation for all input shapes.
///
/// This function finds the maximum tensor shape that will be the result of element-wise operation
/// that will be applied to the input shapes vector. The function also prepares the shape of each input
/// for the element-wise operation by left-padding those shapes so that their rank is equal to
/// the left_shape's rank.
/// that will be applied to the input shapes vector. The function also prepares the shape of each
/// input for the element-wise operation by left-padding those shapes so that their rank is equal
/// to the left_shape's rank.
///
/// \param input_shapes A vector of input shapes for which a common shape should be found
/// \return A pair that contains the target shape as its first object and a vector of padded
......@@ -421,7 +421,8 @@ namespace ngraph
{
std::vector<std::size_t> result(output_shape.size() - input_shape.size());
// Populate the result vector with monotonic increasing series from 0 until
// output_shape_size, excluding values in range [start_match_axis, start_match_axis + input_shape.size()
// output_shape_size, excluding values in range:
// [start_match_axis, start_match_axis + input_shape.size()]
std::iota(std::begin(result), std::begin(result) + start_match_axis, 0);
std::iota(std::begin(result) + start_match_axis,
std::end(result),
......
......@@ -27,7 +27,8 @@ namespace ngraph
{
namespace op
{
/// \brief Cast shape of all input nodes for an element-wise operation that requires shape-compatibility
/// \brief Cast shape of all input nodes for an element-wise operation that requires
/// shape-compatibility
///
/// \param inputs Original list of inputs
///
......@@ -35,14 +36,16 @@ namespace ngraph
NodeVector numpy_style_broadcast(const NodeVector& inputs)
NGRAPH_DEPRECATED("Replace with numpy_style_broadcast_values");
/// \brief Cast shape of all input nodes for an element-wise operation that requires shape-compatibility
/// \brief Cast shape of all input nodes for an element-wise operation that requires
/// shape-compatibility
///
/// \param values Original list of inputs
///
/// \return Numpy-style broadcasted list of nodes.
OutputVector numpy_style_broadcast_values(const OutputVector& values);
/// \brief Cast shape of an output to the requested output shape using NumPy's broadcasting rules
/// \brief Cast shape of an output to the requested output shape using NumPy's broadcasting
/// rules
///
/// \param value original value
/// \param shape requested output shape
......@@ -50,7 +53,8 @@ namespace ngraph
/// \return Broadcast output.
std::shared_ptr<Node> numpy_style_broadcast(const Output<Node>& value, const Shape& shape);
/// \brief Cast shape of two outputs to make them compatible for an element-wise binary operation.
/// \brief Cast shape of two outputs to make them compatible for an element-wise binary
/// operation.
///
/// If necessary the right-hand-side argument will be broadcast to match the shape
/// of left-hand-side argument. The starting of the mutually equal shape is
......@@ -70,7 +74,8 @@ namespace ngraph
size_t start_match_axis)
NGRAPH_DEPRECATED("Replace with legacy_style_broadcast_values_for_binary_operation");
/// \brief Cast shape of two outputs to make them compatible for an element-wise binary operation.
/// \brief Cast shape of two outputs to make them compatible for an element-wise binary
/// operation.
///
/// If necessary the right-hand-side argument will be broadcast to match the shape
/// of left-hand-side argument. The starting of the mutually equal shape is
......@@ -89,15 +94,19 @@ namespace ngraph
const Output<Node>& right,
size_t start_match_axis);
/// \brief Broadcast shape of two nodes to make them compatible for a matrix multiplication.
/// \brief Broadcast shape of two nodes to make them compatible for a matrix
/// multiplication.
///
/// \note This function is reflecting broadcasting behaviour of NumPy's `matmul` operation
/// (https://docs.scipy.org/doc/numpy/reference/generated/numpy.matmul.html)
/// This mean that only \"stack of matrices\" axes are bidirectionally broadcasted.
/// The last two dimension are left untouched.
/// \note This function is reflecting broadcasting behaviour of NumPy's `matmul`
/// operation.
/// (https://docs.scipy.org/doc/numpy/reference/generated/numpy.matmul.html).
/// This mean that only \"stack of matrices\" axes are bidirectionally
/// broadcasted. The last two dimension are left untouched.
///
/// \param[in] left The Node providing data for the left-hand side of matrix multiplication.
/// \param[in] right The Node providing data for the right-hand side of matrix multiplication.
/// \param[in] left The Node providing data for the left-hand side of matrix
/// multiplication.
/// \param[in] right The Node providing data for the right-hand side of matrix
/// multiplication.
///
/// \return The vector containing both nodes broadcasted.
///
......@@ -105,15 +114,19 @@ namespace ngraph
const std::shared_ptr<Node>& right)
NGRAPH_DEPRECATED("Replace with numpy_style_broadcast_values_for_matmul_operation.");
/// \brief Broadcast shape of two nodes to make them compatible for a matrix multiplication.
/// \brief Broadcast shape of two nodes to make them compatible for a matrix
/// multiplication.
///
/// \note This function is reflecting broadcasting behaviour of NumPy's `matmul` operation
/// \note This function is reflecting broadcasting behaviour of NumPy's `matmul`
/// operation.
/// (https://docs.scipy.org/doc/numpy/reference/generated/numpy.matmul.html)
/// This mean that only \"stack of matrices\" axes are bidirectionally broadcasted.
/// The last two dimension are left untouched.
/// This mean that only \"stack of matrices\" axes are bidirectionally
/// broadcasted. The last two dimension are left untouched.
///
/// \param[in] left The Node providing data for the left-hand side of matrix multiplication.
/// \param[in] right The Node providing data for the right-hand side of matrix multiplication.
/// \param[in] left The Node providing data for the left-hand side of matrix
/// multiplication.
/// \param[in] right The Node providing data for the right-hand side of matrix
/// multiplication.
///
/// \return The vector containing both outputs broadcasted.
///
......
......@@ -24,7 +24,8 @@ namespace ngraph
{
namespace util
{
/// \brief Abstract base class for fused ops, i.e ops that can be broken down into core ngraph ops
/// \brief Abstract base class for fused ops, i.e ops that can be broken down into core
/// ngraph ops
///
class FusedOp : public Op
{
......
......@@ -24,8 +24,9 @@ namespace ngraph
{
namespace util
{
/// \brief Abstract base class for logical reduction operations, i.e., operations where chosen axes of the input tensors
/// are eliminated (reduced out) by repeated application of a particular binary logical operation.
/// \brief Abstract base class for logical reduction operations, i.e., operations where
/// chosen axes of the input tensors are eliminated (reduced out) by repeated
/// application of a particular binary logical operation.
class LogicalReduction : public Op
{
protected:
......
......@@ -74,7 +74,8 @@ namespace ngraph
///
ActivationFunction get_activation_function(std::size_t idx) const;
///
/// \brief Creates node with element-wise add operation with numpy broadcasting.
/// \brief Creates node with element-wise add operation with numpy
/// broadcasting.
///
/// \param[in] lhs The left hand side argument node.
/// \param[in] rhs The right hand side argument node.
......@@ -83,7 +84,8 @@ namespace ngraph
///
static std::shared_ptr<Node> add(const Output<Node>& lhs, const Output<Node>& rhs);
///
/// \brief Creates node with element-wise subtract operation with numpy broadcasting.
/// \brief Creates node with element-wise subtract operation with numpy
/// broadcasting.
///
/// \param[in] lhs The left hand side argument node.
/// \param[in] rhs The right hand side argument node.
......@@ -92,7 +94,8 @@ namespace ngraph
///
static std::shared_ptr<Node> sub(const Output<Node>& lhs, const Output<Node>& rhs);
///
/// \brief Creates node with element-wise multiply operation with numpy broadcasting.
/// \brief Creates node with element-wise multiply operation with numpy
/// broadcasting.
///
/// \param[in] lhs The left hand side argument node.
/// \param[in] rhs The right hand side argument node.
......@@ -101,7 +104,8 @@ namespace ngraph
///
static std::shared_ptr<Node> mul(const Output<Node>& lhs, const Output<Node>& rhs);
///
/// \brief Creates node with element-wise clip operation with numpy broadcasting.
/// \brief Creates node with element-wise clip operation with numpy
/// broadcasting.
///
/// \param[in] data The input tensor for clipping.
///
......
......@@ -24,11 +24,14 @@ namespace ngraph
{
namespace util
{
/// \brief Abstract base class for elementwise unary arithmetic operations, i.e., operations where the same
/// scalar arithmetic operation is applied to each element.
// clang-format off
/// \brief Abstract base class for elementwise unary arithmetic operations, i.e.,
/// operations where the same scalar arithmetic operation is applied to each
/// element.
///
/// For example, if the underlying operation (determined by the subclass) is \f$\mathit{op}(x)\f$, the input tensor
/// \f$[[x,y],[z,w]]\f$ will be mapped to \f$[[\mathit{op}(x),\mathit{op}(y)],[\mathit{op}(z),\mathit{op}(w)]]\f$.
/// For example, if the underlying operation (determined by the subclass) is
/// \f$\mathit{op}(x)\f$, the input tensor \f$[[x,y],[z,w]]\f$ will be mapped to
/// \f$[[\mathit{op}(x),\mathit{op}(y)],[\mathit{op}(z),\mathit{op}(w)]]\f$.
///
/// ## Inputs
///
......@@ -41,6 +44,7 @@ namespace ngraph
/// | Type | Description |
/// | ---------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \mathit{op}(\texttt{arg}[i_1,\dots,i_n])\f$. This will always have the same shape and element type as the input tensor. |
// clang-format on
class UnaryElementwiseArithmetic : public Op
{
protected:
......
#
# OVERRIDE TO STYLE: Comments wrap.
#
BasedOnStyle: LLVM
IndentWidth: 4
UseTab: Never
Language: Cpp
Standard: Cpp11
AccessModifierOffset: -4
AlignConsecutiveDeclarations: false
AlignConsecutiveAssignments: false
AlignTrailingComments: true
AllowShortBlocksOnASingleLine: true
AllowShortCaseLabelsOnASingleLine: true
AllowShortFunctionsOnASingleLine: Inline
AlwaysBreakBeforeMultilineStrings: true
AlwaysBreakTemplateDeclarations: true
BinPackArguments: false
BinPackParameters: false
BreakBeforeBraces: Allman
BreakConstructorInitializersBeforeComma: true
ColumnLimit: 100
#CommentPragmas: '.*'
IndentCaseLabels: false
IndentWrappedFunctionNames: true
KeepEmptyLinesAtTheStartOfBlocks: false
NamespaceIndentation: All
PointerAlignment: Left
SpaceAfterCStyleCast: false
SpaceBeforeAssignmentOperators: true
SpaceBeforeParens: ControlStatements
SpaceInEmptyParentheses: false
SpacesInAngles: false
SpacesInCStyleCastParentheses: false
SpacesInParentheses: false
SpacesInSquareBrackets: false
SortIncludes: false
ReflowComments: true
IncludeCategories:
- Regex: '^".*'
Priority: 3
- Regex: '^<.*'
Priority: 2
SortIncludes: true
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment