Commit 8cde4078 authored by Adam Procter's avatar Adam Procter Committed by Scott Cyphers

New clang-format rules for src/ngraph/op/experimental (#3500)

parent 9db3eb75
#
# OVERRIDE TO STYLE: Comments wrap.
#
BasedOnStyle: LLVM
IndentWidth: 4
UseTab: Never
Language: Cpp
Standard: Cpp11
AccessModifierOffset: -4
AlignConsecutiveDeclarations: false
AlignConsecutiveAssignments: false
AlignTrailingComments: true
AllowShortBlocksOnASingleLine: true
AllowShortCaseLabelsOnASingleLine: true
AllowShortFunctionsOnASingleLine: Inline
AlwaysBreakBeforeMultilineStrings: true
AlwaysBreakTemplateDeclarations: true
BinPackArguments: false
BinPackParameters: false
BreakBeforeBraces: Allman
BreakConstructorInitializersBeforeComma: true
ColumnLimit: 100
#CommentPragmas: '.*'
IndentCaseLabels: false
IndentWrappedFunctionNames: true
KeepEmptyLinesAtTheStartOfBlocks: false
NamespaceIndentation: All
PointerAlignment: Left
SpaceAfterCStyleCast: false
SpaceBeforeAssignmentOperators: true
SpaceBeforeParens: ControlStatements
SpaceInEmptyParentheses: false
SpacesInAngles: false
SpacesInCStyleCastParentheses: false
SpacesInParentheses: false
SpacesInSquareBrackets: false
SortIncludes: false
ReflowComments: true
IncludeCategories:
- Regex: '^".*'
Priority: 3
- Regex: '^<.*'
Priority: 2
SortIncludes: true
...@@ -33,14 +33,14 @@ op::DynBroadcast::DynBroadcast(const Output<Node>& arg, ...@@ -33,14 +33,14 @@ op::DynBroadcast::DynBroadcast(const Output<Node>& arg,
void op::DynBroadcast::validate_and_infer_types() void op::DynBroadcast::validate_and_infer_types()
{ {
// shape node should have integer data type. For now we only allow i64 // shape node should have integer data type. For now we only allow i64
//TODO: potenially make the type more flexible to include other integer types // TODO: potenially make the type more flexible to include other integer types
auto shape_et = get_input_element_type(1); auto shape_et = get_input_element_type(1);
NODE_VALIDATION_CHECK(this, NODE_VALIDATION_CHECK(this,
shape_et.compatible(element::Type_t::i64), shape_et.compatible(element::Type_t::i64),
"DynBroadcast shape must have element type i64, but has ", "DynBroadcast shape must have element type i64, but has ",
shape_et); shape_et);
//shape node should produce a one dimensional shape. // shape node should produce a one dimensional shape.
auto broadcast_shape_rank = get_input_partial_shape(1).rank(); auto broadcast_shape_rank = get_input_partial_shape(1).rank();
NODE_VALIDATION_CHECK(this, NODE_VALIDATION_CHECK(this,
broadcast_shape_rank.compatible(1), broadcast_shape_rank.compatible(1),
...@@ -48,14 +48,14 @@ void op::DynBroadcast::validate_and_infer_types() ...@@ -48,14 +48,14 @@ void op::DynBroadcast::validate_and_infer_types()
broadcast_shape_rank); broadcast_shape_rank);
// axes node should have integer data type. For now we only allow i64 // axes node should have integer data type. For now we only allow i64
//TODO: potenially make the type more flexible to include other integer types // TODO: potenially make the type more flexible to include other integer types
auto axes_et = get_input_element_type(2); auto axes_et = get_input_element_type(2);
NODE_VALIDATION_CHECK(this, NODE_VALIDATION_CHECK(this,
axes_et.compatible(element::Type_t::i64), axes_et.compatible(element::Type_t::i64),
"DynBroadcast axes must have element type i64, but has ", "DynBroadcast axes must have element type i64, but has ",
axes_et); axes_et);
//axes node should produce a one dimensional shape. // axes node should produce a one dimensional shape.
auto axes_shape_rank = get_input_partial_shape(2).rank(); auto axes_shape_rank = get_input_partial_shape(2).rank();
NODE_VALIDATION_CHECK(this, NODE_VALIDATION_CHECK(this,
axes_shape_rank.compatible(1), axes_shape_rank.compatible(1),
......
...@@ -23,7 +23,9 @@ namespace ngraph ...@@ -23,7 +23,9 @@ namespace ngraph
{ {
namespace op namespace op
{ {
/// \brief Operation which "adds" axes to an input tensor, replicating elements from the input as needed along the new axes. /// \brief Operation which "adds" axes to an input tensor, replicating elements from the
/// input as needed along the new axes.
///
/// This is basically the "dynamic shape" version of the static Broadcast op. /// This is basically the "dynamic shape" version of the static Broadcast op.
class DynBroadcast : public Op class DynBroadcast : public Op
{ {
...@@ -36,8 +38,9 @@ namespace ngraph ...@@ -36,8 +38,9 @@ namespace ngraph
/// ///
/// \param arg Node that produces the input tensor to be broadcast. /// \param arg Node that produces the input tensor to be broadcast.
/// \param shape Node that produces shape of the output tensor. /// \param shape Node that produces shape of the output tensor.
/// \param broadcast_axes Node that produces the axis positions (0-based) in the result that are being broadcast. The /// \param broadcast_axes Node that produces the axis positions (0-based) in the result
/// remaining axes in shape must be the same as the shape of arg. /// that are being broadcast. The remaining axes in shape must be
/// the same as the shape of arg.
DynBroadcast(const Output<Node>& arg, DynBroadcast(const Output<Node>& arg,
const Output<Node>& shape, const Output<Node>& shape,
const Output<Node>& broadcast_axes); const Output<Node>& broadcast_axes);
......
...@@ -40,7 +40,7 @@ void op::DynPad::validate_and_infer_types() ...@@ -40,7 +40,7 @@ void op::DynPad::validate_and_infer_types()
this, arg_t.compatible(padding_value_t), "Padding value and arg type mismatch"); this, arg_t.compatible(padding_value_t), "Padding value and arg type mismatch");
// shape node should have integer data type. For now we only allow i64 // shape node should have integer data type. For now we only allow i64
//TODO: potenially make the type more flexible to include other integer types // TODO: potenially make the type more flexible to include other integer types
auto padding_below_et = get_input_element_type(1); auto padding_below_et = get_input_element_type(1);
NODE_VALIDATION_CHECK(this, NODE_VALIDATION_CHECK(this,
padding_below_et.compatible(element::Type_t::i64), padding_below_et.compatible(element::Type_t::i64),
......
...@@ -23,7 +23,8 @@ namespace ngraph ...@@ -23,7 +23,8 @@ namespace ngraph
{ {
namespace op namespace op
{ {
/// \brief Takes a slice of an input tensor, i.e., the sub-tensor that resides within a bounding box, optionally with stride. /// \brief Takes a slice of an input tensor, i.e., the sub-tensor that resides within a
/// bounding box, optionally with stride.
class DynReplaceSlice : public Op class DynReplaceSlice : public Op
{ {
public: public:
......
...@@ -35,16 +35,17 @@ namespace ngraph ...@@ -35,16 +35,17 @@ namespace ngraph
static const std::string type_name; static const std::string type_name;
const std::string& description() const override { return type_name; } const std::string& description() const override { return type_name; }
DynReshape() = default; DynReshape() = default;
/// \brief Constructs a dynamic reshape operation. This operation does not perform transpose. /// \brief Constructs a dynamic reshape operation. This operation does not perform
/// transpose.
/// ///
/// \param arg The tensor to be reshaped. /// \param arg The tensor to be reshaped.
/// \param pattern The node that defines output shape pattern. /// \param pattern The node that defines output shape pattern.
/// If the input shape is \f$(a_0,\dots,a_{k-1})\f$ then the output shape must /// If the input shape is \f$(a_0,\dots,a_{k-1})\f$ then the output shape must
/// be of the form \f$(b_0,\dots,b_{j-1})\f$ where \f$\Pi(a_i) = \Pi(b_i)\f$. /// be of the form \f$(b_0,\dots,b_{j-1})\f$ where \f$\Pi(a_i) = \Pi(b_i)\f$.
/// A value of -1 is allowed for at most one dimension, in which case the dimension /// A value of -1 is allowed for at most one dimension, in which case the
/// size is inferred based on element count of input tensor. /// dimension size is inferred based on element count of input tensor.
/// \param zero_flag Treats zeros in `pattern` as wildcard flags indicating a copy from input /// \param zero_flag Treats zeros in `pattern` as wildcard flags indicating a copy from
/// shape at the same index. /// input shape at the same index.
DynReshape(const Output<Node>& arg, DynReshape(const Output<Node>& arg,
const Output<Node>& pattern, const Output<Node>& pattern,
bool zero_flag = false); bool zero_flag = false);
......
...@@ -23,7 +23,8 @@ namespace ngraph ...@@ -23,7 +23,8 @@ namespace ngraph
{ {
namespace op namespace op
{ {
/// \brief Takes a slice of an input tensor, i.e., the sub-tensor that resides within a bounding box, optionally with stride. /// \brief Takes a slice of an input tensor, i.e., the sub-tensor that resides within a
/// bounding box, optionally with stride.
class DynSlice : public Op class DynSlice : public Op
{ {
public: public:
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment