Unverified Commit 79378c77 authored by Robert Kimball's avatar Robert Kimball Committed by GitHub

Normalize doxygen comment syntax (#1503)

* normalize doxygen keywords

* more normalization

* normalize more onnx files

* yet more normalization

* fix doxygen error
parent 12a0dca5
...@@ -40,36 +40,36 @@ namespace ngraph ...@@ -40,36 +40,36 @@ namespace ngraph
class Adjoints class Adjoints
{ {
public: public:
/// @brief (dy/dx)(c) for all x used to compute y /// \brief (dy/dx)(c) for all x used to compute y
/// ///
/// @param y The dependent value /// \param y The dependent value
/// @param c An expression for where to evaluate the derivatives /// \param c An expression for where to evaluate the derivatives
Adjoints(const NodeVector& y, const NodeVector& c); Adjoints(const NodeVector& y, const NodeVector& c);
Adjoints(const Adjoints& adjoints) = default; Adjoints(const Adjoints& adjoints) = default;
Adjoints& operator=(const Adjoints& adjoints) = default; Adjoints& operator=(const Adjoints& adjoints) = default;
Adjoints() = default; Adjoints() = default;
/// @brief (dy/dx)(c) /// \brief (dy/dx)(c)
/// ///
/// @param x The node whose adjoint is desired. /// \param x The node whose adjoint is desired.
const NodeVector& get(const std::shared_ptr<Node>& x); const NodeVector& get(const std::shared_ptr<Node>& x);
/// @brief Add a backprop contribution to x's adjoint /// \brief Add a backprop contribution to x's adjoint
/// ///
/// @param x The adjoint node /// \param x The adjoint node
/// @param delta A backprop contribution /// \param delta A backprop contribution
void add_delta(const std::shared_ptr<Node>& x, void add_delta(const std::shared_ptr<Node>& x,
const std::shared_ptr<Node>& delta, const std::shared_ptr<Node>& delta,
size_t output_index = 0); size_t output_index = 0);
/// @brief Add a backprop contribution to a slice of x's adjoint /// \brief Add a backprop contribution to a slice of x's adjoint
/// ///
/// @param x The adjoint node /// \param x The adjoint node
/// @param delta A backprop contribution /// \param delta A backprop contribution
/// @param lower_bounds Lower bounds of slice to add to /// \param lower_bounds Lower bounds of slice to add to
/// @param upper_bounds Upper bounds of slice to add to /// \param upper_bounds Upper bounds of slice to add to
/// @param strides Strides of slice to add to /// \param strides Strides of slice to add to
void add_delta_to_slice(const std::shared_ptr<Node>& x, void add_delta_to_slice(const std::shared_ptr<Node>& x,
const std::shared_ptr<Node>& delta, const std::shared_ptr<Node>& delta,
const Coordinate& lower_bounds, const Coordinate& lower_bounds,
......
...@@ -70,13 +70,13 @@ namespace ngraph ...@@ -70,13 +70,13 @@ namespace ngraph
ngraph::Shape m_final_shape; ngraph::Shape m_final_shape;
}; };
/// @brief Compute the details regarding what reshape and/or broadcast operations must be applied to /// \brief Compute the details regarding what reshape and/or broadcast operations must be applied to
/// arg1 and/or arg2, as well as what the final resulting shape shall be. /// arg1 and/or arg2, as well as what the final resulting shape shall be.
/// ///
/// If this algorithm cannot handle the particular combination of shapes supplied as inputs, throw /// If this algorithm cannot handle the particular combination of shapes supplied as inputs, throw
/// an ngraph::builder::autobroadcast_incompatible_shapes exception. /// an ngraph::builder::autobroadcast_incompatible_shapes exception.
/// ///
/// @exception ngraph::builder::autobroadcast_incompatible_shapes /// \exception ngraph::builder::autobroadcast_incompatible_shapes
static Autobroadcast_plan static Autobroadcast_plan
compute_shapes_and_broadcast_axes(const ngraph::Shape& arg1_in_shape, compute_shapes_and_broadcast_axes(const ngraph::Shape& arg1_in_shape,
const ngraph::Shape& arg2_in_shape) const ngraph::Shape& arg2_in_shape)
......
...@@ -42,7 +42,7 @@ namespace ngraph ...@@ -42,7 +42,7 @@ namespace ngraph
static std::string error_str(const ngraph::Shape& shape1, const ngraph::Shape& shape2); static std::string error_str(const ngraph::Shape& shape1, const ngraph::Shape& shape2);
}; };
/// @brief Wrap two graph nodes, if necessary, to obtain values with identical shapes, /// \brief Wrap two graph nodes, if necessary, to obtain values with identical shapes,
/// using NumPy's auto-broadcast rules. /// using NumPy's auto-broadcast rules.
/// ///
/// The elements in the std::pair returned by this function correspond to those supplied /// The elements in the std::pair returned by this function correspond to those supplied
...@@ -58,33 +58,33 @@ namespace ngraph ...@@ -58,33 +58,33 @@ namespace ngraph
/// There are some shape combinations which the autobroadcast algoritm cannot handle. /// There are some shape combinations which the autobroadcast algoritm cannot handle.
/// An exception is thrown when such combinations are provided to this function. /// An exception is thrown when such combinations are provided to this function.
/// ///
/// @pre /// \pre
/// - \p args.first is not null /// - \p args.first is not null
/// - \p args.second is not null /// - \p args.second is not null
/// ///
/// @post /// \post
/// - The ngraph::Node objects pointed to by \p args.first and \p args.second have not been /// - The ngraph::Node objects pointed to by \p args.first and \p args.second have not been
/// altered by this function, except by possibly having added consumers of their values. /// altered by this function, except by possibly having added consumers of their values.
/// ///
/// - If an exception was not thrown, then the return value's \p first and \p second /// - If an exception was not thrown, then the return value's \p first and \p second
/// elements point to ngraph::Node objects whose output values have the same shape. /// elements point to ngraph::Node objects whose output values have the same shape.
/// ///
/// @exception ngraph::builder::autobroadcast_incompatible_shapes /// \exception ngraph::builder::autobroadcast_incompatible_shapes
std::pair<std::shared_ptr<Node>, std::shared_ptr<Node>> std::pair<std::shared_ptr<Node>, std::shared_ptr<Node>>
numpy_broadcast(const std::pair<std::shared_ptr<Node>, std::shared_ptr<Node>>& args); numpy_broadcast(const std::pair<std::shared_ptr<Node>, std::shared_ptr<Node>>& args);
/// Create a new \p NodeType node, and any additional nodes required to simulate NumPy-style autobroadcast /// Create a new \p NodeType node, and any additional nodes required to simulate NumPy-style autobroadcast
/// semantics. Intended for binary operations such as "Add". /// semantics. Intended for binary operations such as "Add".
/// ///
/// @param [in] operand1_reshapeable The first operand to supply to the \p NodeType constructor. Subject to /// \param [in] operand1_reshapeable The first operand to supply to the \p NodeType constructor. Subject to
/// being wrapped with additional nodes required for autobroadcasting. Must not be null. /// being wrapped with additional nodes required for autobroadcasting. Must not be null.
/// ///
/// @param [in] operand2_reshapeable The second operand to supply to the \p NodeType constructor. Subject to /// \param [in] operand2_reshapeable The second operand to supply to the \p NodeType constructor. Subject to
/// being wrapped with additional nodes required for autobroadcasting. Must not be null. /// being wrapped with additional nodes required for autobroadcasting. Must not be null.
/// ///
/// @return The sink node of any/all nodes created by this function. Will never be null. /// \return The sink node of any/all nodes created by this function. Will never be null.
/// ///
/// @exception ngraph::builder::autobroadcast_incompatible_shapes /// \exception ngraph::builder::autobroadcast_incompatible_shapes
template <typename NodeType> template <typename NodeType>
std::shared_ptr<NodeType> std::shared_ptr<NodeType>
make_with_numpy_broadcast(const std::shared_ptr<Node>& operand1_reshapeable, make_with_numpy_broadcast(const std::shared_ptr<Node>& operand1_reshapeable,
...@@ -99,18 +99,18 @@ namespace ngraph ...@@ -99,18 +99,18 @@ namespace ngraph
/// semantics. Intended for non-binary operations such as "Select", where precisely the second and third /// semantics. Intended for non-binary operations such as "Select", where precisely the second and third
/// operands are subject to autobroadcast semantics. /// operands are subject to autobroadcast semantics.
/// ///
/// @param [in] operand1 This operand is not subject to autobraodcast logic, and will be passed as-is as /// \param [in] operand1 This operand is not subject to autobraodcast logic, and will be passed as-is as
/// the first argument to the \p NodeType constructor. /// the first argument to the \p NodeType constructor.
/// ///
/// @param [in] operand2_reshapeable The second operand to supply to the \p NodeType constructor. Subject to /// \param [in] operand2_reshapeable The second operand to supply to the \p NodeType constructor. Subject to
/// being wrapped with additional nodes required for autobroadcasting. Must not be null. /// being wrapped with additional nodes required for autobroadcasting. Must not be null.
/// ///
/// @param [in] operand3_reshapeable The third operand to supply to the \p NodeType constructor. Subject to /// \param [in] operand3_reshapeable The third operand to supply to the \p NodeType constructor. Subject to
/// being wrapped with additional nodes required for autobroadcasting. Must not be null. /// being wrapped with additional nodes required for autobroadcasting. Must not be null.
/// ///
/// @return The sink node of any/all nodes created by this function. Will never be null. /// \return The sink node of any/all nodes created by this function. Will never be null.
/// ///
/// @exception ngraph::builder::autobroadcast_incompatible_shapes /// \exception ngraph::builder::autobroadcast_incompatible_shapes
template <typename NodeType> template <typename NodeType>
std::shared_ptr<NodeType> std::shared_ptr<NodeType>
make_with_numpy_broadcast(const std::shared_ptr<Node>& operand1, make_with_numpy_broadcast(const std::shared_ptr<Node>& operand1,
......
...@@ -35,44 +35,44 @@ namespace ngraph ...@@ -35,44 +35,44 @@ namespace ngraph
friend class Node; friend class Node;
public: public:
/// @param node The node that owns this input /// \param node The node that owns this input
/// @param index The position of this this tensor in all input tensors /// \param index The position of this this tensor in all input tensors
/// @param output The output that supplies a value for this input /// \param output The output that supplies a value for this input
Input(Node* node, size_t index, Output& output); Input(Node* node, size_t index, Output& output);
/// @return the node that this is an input of /// \return the node that this is an input of
std::shared_ptr<Node> get_node() const; std::shared_ptr<Node> get_node() const;
/// @return the position within all supplied tensors of this input /// \return the position within all supplied tensors of this input
size_t get_index() const { return m_index; } size_t get_index() const { return m_index; }
// @return the connected output /// \return the connected output
const Output& get_output() const { return *m_output; } const Output& get_output() const { return *m_output; }
// @return the connected output /// \return the connected output
Output& get_output() { return *m_output; } Output& get_output() { return *m_output; }
// @return the tensor of the connected output /// \return the tensor of the connected output
const Tensor& get_tensor() const; const Tensor& get_tensor() const;
// @return the tensor of the connected output /// \return the tensor of the connected output
Tensor& get_tensor(); Tensor& get_tensor();
void replace_output(std::shared_ptr<Node> node, size_t i); void replace_output(std::shared_ptr<Node> node, size_t i);
void replace_output(Output& output); void replace_output(Output& output);
protected: protected:
/// @return the tensor view for the connected output /// \return the tensor view for the connected output
std::shared_ptr<const TensorView> get_tensor_view() const; std::shared_ptr<const TensorView> get_tensor_view() const;
/// @return the tensor view for the connected output /// \return the tensor view for the connected output
std::shared_ptr<TensorView> get_tensor_view(); std::shared_ptr<TensorView> get_tensor_view();
/// @return the tensor view type for the connected output /// \return the tensor view type for the connected output
std::shared_ptr<const TensorViewType> get_tensor_view_type() const; std::shared_ptr<const TensorViewType> get_tensor_view_type() const;
public: public:
/// @return the shape of the connected output /// \return the shape of the connected output
const Shape& get_shape() const; const Shape& get_shape() const;
/// @return the element type of the connected output /// \return the element type of the connected output
const element::Type& get_element_type() const; const element::Type& get_element_type() const;
protected: protected:
......
...@@ -29,7 +29,7 @@ namespace ngraph ...@@ -29,7 +29,7 @@ namespace ngraph
namespace layout namespace layout
{ {
/// @brief The standard strided layout, used for row-major and column-major, their permutations and slices. /// \brief The standard strided layout, used for row-major and column-major, their permutations and slices.
/// ///
/// The linearized offset of an index I is dot(I, strides) + offset. /// The linearized offset of an index I is dot(I, strides) + offset.
class DenseTensorViewLayout : public TensorViewLayout class DenseTensorViewLayout : public TensorViewLayout
......
...@@ -34,7 +34,7 @@ namespace ngraph ...@@ -34,7 +34,7 @@ namespace ngraph
namespace layout namespace layout
{ {
/// @brief Interface for describing implementations of tensor views. /// \brief Interface for describing implementations of tensor views.
/// ///
/// Kernel selection will need to pay attention to the layout. /// Kernel selection will need to pay attention to the layout.
class TensorViewLayout class TensorViewLayout
...@@ -59,7 +59,7 @@ namespace ngraph ...@@ -59,7 +59,7 @@ namespace ngraph
const element::Type& get_element_type() const; const element::Type& get_element_type() const;
const Shape& get_shape() const; const Shape& get_shape() const;
virtual const Strides& get_strides() const = 0; virtual const Strides& get_strides() const = 0;
/// @brief Return true if this and other have the same element interpretation /// \brief Return true if this and other have the same element interpretation
virtual bool operator==(const TensorViewLayout& other) const = 0; virtual bool operator==(const TensorViewLayout& other) const = 0;
bool operator!=(const TensorViewLayout& other) const { return !(*this == other); } bool operator!=(const TensorViewLayout& other) const { return !(*this == other); }
void set_tensor_view_type(const element::Type& element_type, const Shape& shape); void set_tensor_view_type(const element::Type& element_type, const Shape& shape);
......
...@@ -37,9 +37,9 @@ namespace ngraph ...@@ -37,9 +37,9 @@ namespace ngraph
class Output class Output
{ {
public: public:
/// @param node Node that owns this output. /// \param node Node that owns this output.
/// @param index Position of the output tensor in all output tensors /// \param index Position of the output tensor in all output tensors
/// @param tensor_view The view of this tensor; where the value will be written /// \param tensor_view The view of this tensor; where the value will be written
Output(Node* node, size_t index, const std::shared_ptr<TensorView>& tensor_view); Output(Node* node, size_t index, const std::shared_ptr<TensorView>& tensor_view);
std::shared_ptr<Node> get_node() const; std::shared_ptr<Node> get_node() const;
...@@ -55,13 +55,13 @@ namespace ngraph ...@@ -55,13 +55,13 @@ namespace ngraph
Tensor& get_tensor() const; Tensor& get_tensor() const;
protected: protected:
/// @return the tensor view type for the output /// \return the tensor view type for the output
std::shared_ptr<const TensorViewType> get_tensor_view_type() const; std::shared_ptr<const TensorViewType> get_tensor_view_type() const;
public: public:
/// @return the shape of the output /// \return the shape of the output
const Shape& get_shape() const; const Shape& get_shape() const;
/// @return the element type of the output /// \return the element type of the output
const element::Type& get_element_type() const; const element::Type& get_element_type() const;
protected: protected:
......
...@@ -27,13 +27,13 @@ namespace ngraph ...@@ -27,13 +27,13 @@ namespace ngraph
namespace descriptor namespace descriptor
{ {
/// @brief A PrimaryTensorView owns the tensor. All other views are the result /// \brief A PrimaryTensorView owns the tensor. All other views are the result
/// of some index operation on the primary view. /// of some index operation on the primary view.
class PrimaryTensorView : public TensorView class PrimaryTensorView : public TensorView
{ {
public: public:
/// @param tensor_view_type The type for this view. /// \param tensor_view_type The type for this view.
/// @param name Description of the tensor, for debugging. /// \param name Description of the tensor, for debugging.
PrimaryTensorView(const std::shared_ptr<const TensorViewType>& tensor_view_type, PrimaryTensorView(const std::shared_ptr<const TensorViewType>& tensor_view_type,
const std::string& name); const std::string& name);
......
...@@ -38,7 +38,7 @@ namespace ngraph ...@@ -38,7 +38,7 @@ namespace ngraph
class Tensor; class Tensor;
class TensorView; class TensorView;
/// @brief Compile-time descriptor of a first-class value that is a view of a tensor. /// \brief Compile-time descriptor of a first-class value that is a view of a tensor.
class TensorView class TensorView
{ {
TensorView(const TensorView&) = delete; TensorView(const TensorView&) = delete;
......
...@@ -24,21 +24,21 @@ namespace ngraph ...@@ -24,21 +24,21 @@ namespace ngraph
{ {
namespace file_util namespace file_util
{ {
// @brief Returns the name with extension for a given path /// \brief Returns the name with extension for a given path
// @param path The path to the output file /// \param path The path to the output file
std::string get_file_name(const std::string& path); std::string get_file_name(const std::string& path);
// @brief Returns the file extension /// \brief Returns the file extension
// @param path The path to the output file /// \param path The path to the output file
std::string get_file_ext(const std::string& path); std::string get_file_ext(const std::string& path);
// @brief Returns the directory portion of the given path /// \brief Returns the directory portion of the given path
// @param path The path to the output file /// \param path The path to the output file
std::string get_directory(const std::string& path); std::string get_directory(const std::string& path);
// @brief Serialize a Function to as a json file /// \brief Serialize a Function to as a json file
// @param s1 Left side of path /// \param s1 Left side of path
// @param s2 Right side of path /// \param s2 Right side of path
std::string path_join(const std::string& s1, const std::string& s2); std::string path_join(const std::string& s1, const std::string& s2);
std::string path_join(const std::string& s1, const std::string& s2, const std::string& s3); std::string path_join(const std::string& s1, const std::string& s2, const std::string& s3);
std::string path_join(const std::string& s1, std::string path_join(const std::string& s1,
...@@ -46,55 +46,54 @@ namespace ngraph ...@@ -46,55 +46,54 @@ namespace ngraph
const std::string& s3, const std::string& s3,
const std::string& s4); const std::string& s4);
// @brief Returns the size in bytes of filename /// \brief Returns the size in bytes of filename
// @param filename The name of the file /// \param filename The name of the file
size_t get_file_size(const std::string& filename); size_t get_file_size(const std::string& filename);
// @brief Removes all files and directories starting at dir /// \brief Removes all files and directories starting at dir
// @param dir The path of the directory to remove /// \param dir The path of the directory to remove
void remove_directory(const std::string& dir); void remove_directory(const std::string& dir);
// @brief Create a directory /// \brief Create a directory
// @param dir Path of the directory to create /// \param dir Path of the directory to create
// @param func The Function to serialize /// \return true if the directory was created, false otherwise
// @return true if the directory was created, false otherwise
bool make_directory(const std::string& dir); bool make_directory(const std::string& dir);
// @brief Gets the path of the system temporary directory /// \brief Gets the path of the system temporary directory
// @return the path to the system temporary directory /// \return the path to the system temporary directory
std::string get_temp_directory_path(); std::string get_temp_directory_path();
// @brief Removes a file from the filesystem /// \brief Removes a file from the filesystem
// @param file The path to the file to be removed /// \param file The path to the file to be removed
void remove_file(const std::string& file); void remove_file(const std::string& file);
// @brief Reads the contents of a file /// \brief Reads the contents of a file
// @param path The path of the file to read /// \param path The path of the file to read
// @return vector<char> of the file's contents /// \return vector<char> of the file's contents
std::vector<char> read_file_contents(const std::string& path); std::vector<char> read_file_contents(const std::string& path);
// @brief Reads the contents of a file /// \brief Reads the contents of a file
// @param path The path of the file to read /// \param path The path of the file to read
// @return string of the file's contents /// \return string of the file's contents
std::string read_file_to_string(const std::string& path); std::string read_file_to_string(const std::string& path);
// @brief Iterate through files and optionally directories. Symbolic links are skipped. /// \brief Iterate through files and optionally directories. Symbolic links are skipped.
// @param path The path to iterate over /// \param path The path to iterate over
// @param func A callback function called with each file or directory encountered /// \param func A callback function called with each file or directory encountered
// @param recurse Optional parameter to enable recursing through path /// \param recurse Optional parameter to enable recursing through path
void iterate_files(const std::string& path, void iterate_files(const std::string& path,
std::function<void(const std::string& file, bool is_dir)> func, std::function<void(const std::string& file, bool is_dir)> func,
bool recurse = false, bool recurse = false,
bool include_links = false); bool include_links = false);
// @brief Create a temporary file /// \brief Create a temporary file
// @param extension Optional extension for the temporary file /// \param extension Optional extension for the temporary file
// @return Name of the temporary file /// \return Name of the temporary file
std::string tmp_filename(const std::string& extension = ""); std::string tmp_filename(const std::string& extension = "");
// @brief Test for the existence of a path or file /// \brief Test for the existence of a path or file
// @param path The path to test /// \param path The path to test
// @param true if the path exists, false otherwise /// \return true if the path exists, false otherwise
bool exists(const std::string& path); bool exists(const std::string& path);
} }
} }
...@@ -26,14 +26,12 @@ namespace ngraph ...@@ -26,14 +26,12 @@ namespace ngraph
{ {
namespace op namespace op
{ {
/** /// \brief Performs ONNX Conv operation.
* @brief Performs ONNX Conv operation. ///
* /// \param node The ONNX node object representing this operation.
* @param node The ONNX node object representing this operation. ///
* /// \return The vector containing Ngraph nodes producing output of ONNX convolution
* @return The vector containing Ngraph nodes producing output of ONNX convolution /// operation.
* operation.
*/
NodeVector conv(const Node& node); NodeVector conv(const Node& node);
} // namespace op } // namespace op
......
...@@ -25,42 +25,38 @@ namespace ngraph ...@@ -25,42 +25,38 @@ namespace ngraph
{ {
namespace onnx_import namespace onnx_import
{ {
/** /// \brief Generate a list of broadcast axes.
* @brief Generate a list of broadcast axes. ///
* /// \details Informally, a broadcast "adds" axes to the input tensor, replicating
* @details Informally, a broadcast "adds" axes to the input tensor, replicating /// elements from the input tensor as needed to fill the new dimensions.
* elements from the input tensor as needed to fill the new dimensions. /// Function calculate which of the output axes are added in this way.
* Function calculate which of the output axes are added in this way. ///
* /// \param output_shape The new shape for the output tensor.
* @param output_shape The new shape for the output tensor. /// \param input_shape The shape of input tensor.
* @param input_shape The shape of input tensor. /// \param start_match_axis The axis along which we want to replicate elements.
* @param start_match_axis The axis along which we want to replicate elements. /// The starting axis position (0-based) int the output
* The starting axis position (0-based) int the output /// shape from which the current shape of the tensor
* shape from which the current shape of the tensor /// matches the desired new shape.
* matches the desired new shape. ///
* /// \return The indices of added axes.
* @return The indices of added axes.
*/
AxisSet calculate_broadcast_axes(const Shape& output_shape, AxisSet calculate_broadcast_axes(const Shape& output_shape,
const Shape& input_shape, const Shape& input_shape,
std::size_t start_match_axis); std::size_t start_match_axis);
/** /// \brief Generate a list of broadcast along axes.
* @brief Generate a list of broadcast along axes. ///
* /// \details Broadcast "adds" elements along axes to the input tensor, replicating
* @details Broadcast "adds" elements along axes to the input tensor, replicating /// elements from the input tensor as needed to fill the new dimensions.
* elements from the input tensor as needed to fill the new dimensions. /// Function calculate which of the output axes are added in this way.
* Function calculate which of the output axes are added in this way. ///
* /// This function will attempt to match shapes, assuming the current shape
* This function will attempt to match shapes, assuming the current shape /// matches the rightmost positions of the desired new shape. This behaviour
* matches the rightmost positions of the desired new shape. This behaviour /// is similar to NumPy's broadcasting.
* is similar to NumPy's broadcasting. ///
* /// \param output_shape The new shape for the output tensor.
* @param output_shape The new shape for the output tensor. /// \param input_shape The shape of input tensor.
* @param input_shape The shape of input tensor. ///
* /// \return The indices of added axes.
* @return The indices of added axes.
*/
inline AxisSet calculate_broadcast_axes(const Shape& output_shape, const Shape& input_shape) inline AxisSet calculate_broadcast_axes(const Shape& output_shape, const Shape& input_shape)
{ {
return calculate_broadcast_axes( return calculate_broadcast_axes(
......
...@@ -28,68 +28,57 @@ namespace ngraph ...@@ -28,68 +28,57 @@ namespace ngraph
{ {
namespace convpool namespace convpool
{ {
/** /// \brief Get shape of kernel (filter) in pixels.
* @brief Get shape of kernel (filter) in pixels. ///
* /// \param node The Node ptr representing Conv or Pool operation.
* @param node The Node ptr representing Conv or Pool operation. /// \return The kernel Shape object representing its dimensions (height, width, depth).
* @return The kernel Shape object representing its dimensions (height, width, depth).
*/
Shape get_kernel_shape(const Node& node); Shape get_kernel_shape(const Node& node);
/** /// \brief Get number of pixels to stride operation by in each direction.
* @brief Get number of pixels to stride operation by in each direction. ///
* /// \param node The Node ptr representing Conv or Pool operation.
* @param node The Node ptr representing Conv or Pool operation. /// \param kernel_shape The shape of the kernel which we retrieve strides for.
* @param kernel_shape The shape of the kernel which we retrieve strides for. /// \return The kernel Shape object representing its dimensions (height, width, depth).
* @return The kernel Shape object representing its dimensions (height, width, depth).
*/
Strides get_strides(const Node& node, const Shape& kernel_shape); Strides get_strides(const Node& node, const Shape& kernel_shape);
/** /// \brief Get number of pixels to stride operation by in each direction.
* @brief Get number of pixels to stride operation by in each direction. ///
* /// \param node The Node ptr representing Conv or Pool operation.
* @param node The Node ptr representing Conv or Pool operation. /// \return The kernel Shape object representing its dimensions (height, width, depth).
* @return The kernel Shape object representing its dimensions (height, width, depth).
*/
Strides get_strides(const Node& node); Strides get_strides(const Node& node);
/** /// \brief Get number of pixels for filter dilation in each direction.
* @brief Get number of pixels for filter dilation in each direction. ///
* /// \param node The Node ptr representing ONNX operation.
* @param node The Node ptr representing ONNX operation. /// \return The Strides object containing number of pixels for filter dilation
* @return The Strides object containing number of pixels for filter dilation /// (height, width, depth).
* (height, width, depth).
*/
Strides get_dilations(const Node& node); Strides get_dilations(const Node& node);
/** /// \brief Get padding values for the operation described by an ONNX node.
* @brief Get padding values for the operation described by an ONNX node. /// \details If `auto_pad` attribute is specified as SAME_UPPER or SAME_LOWER, or VALID
* @details If `auto_pad` attribute is specified as SAME_UPPER or SAME_LOWER, or VALID /// values are calculated. Otherwise values are taken from the `pads` attribute.
* values are calculated. Otherwise values are taken from the `pads` attribute. ///
* /// `pads` value should follow [x1_begin, x2_begin..., x1_end, x2_end,...].
* `pads` value should follow [x1_begin, x2_begin..., x1_end, x2_end,...]. ///
* /// \param node The Node ptr representing ONNX operation.
* @param node The Node ptr representing ONNX operation. /// \param kernel_shape The shape of the kernel which we retrieve pads for.
* @param kernel_shape The shape of the kernel which we retrieve pads for. ///
* /// \return A pair of (padding_above, padding_below), which elements contains number of
* @return A pair of (padding_above, padding_below), which elements contains number of /// pixels to pad in respective dimensions (height, width, depth).
* pixels to pad in respective dimensions (height, width, depth).
*/
std::pair<CoordinateDiff, CoordinateDiff> get_pads(const Node& node, std::pair<CoordinateDiff, CoordinateDiff> get_pads(const Node& node,
const Shape& kernel_shape); const Shape& kernel_shape);
/** /// \brief Get padding values for the operation described by an ONNX node.
* @brief Get padding values for the operation described by an ONNX node. /// \details If `auto_pad` attribute is specified as SAME_UPPER or SAME_LOWER, or VALID
* @details If `auto_pad` attribute is specified as SAME_UPPER or SAME_LOWER, or VALID /// values are calculated. Otherwise values are taken from the `pads` attribute.
* values are calculated. Otherwise values are taken from the `pads` attribute. ///
* /// `pads` value should follow [x1_begin, x2_begin..., x1_end, x2_end,...].
* `pads` value should follow [x1_begin, x2_begin..., x1_end, x2_end,...]. ///
* /// \param node The Node ptr representing ONNX operation.
* @param node The Node ptr representing ONNX operation. ///
* /// \return A pair of (padding_above, padding_below), which elements contains number of
* @return A pair of (padding_above, padding_below), which elements contains number of /// pixels to pad in respective dimensions (height, width, depth).
* pixels to pad in respective dimensions (height, width, depth).
*/
inline std::pair<CoordinateDiff, CoordinateDiff> get_pads(const Node& node) inline std::pair<CoordinateDiff, CoordinateDiff> get_pads(const Node& node)
{ {
return get_pads(node, get_kernel_shape(node)); return get_pads(node, get_kernel_shape(node));
......
...@@ -22,24 +22,20 @@ namespace ngraph ...@@ -22,24 +22,20 @@ namespace ngraph
{ {
namespace onnx_import namespace onnx_import
{ {
/** /// \brief Permute axes according to specified axes_order parameter.
* @brief Permute axes according to specified axes_order parameter. ///
* /// \param node The node which axes we want to permute.
* @param node The node which axes we want to permute. /// \param axes_order The permutation of node tensor axes.
* @param axes_order The permutation of node tensor axes. ///
* /// \return: New node with permuted axes.
* @return: New node with permuted axes.
*/
std::shared_ptr<ngraph::Node> reorder_axes(const std::shared_ptr<ngraph::Node>& node, std::shared_ptr<ngraph::Node> reorder_axes(const std::shared_ptr<ngraph::Node>& node,
std::vector<int> axes_order); std::vector<int> axes_order);
/** /// \brief Return transposed tensor (with axes in reversed order).
* @brief Return transposed tensor (with axes in reversed order). ///
* /// \param node Input tensor we want to transpose
* @param node Input tensor we want to transpose ///
* /// \return: New node with reversed dimensions.
* @return: New node with reversed dimensions.
*/
std::shared_ptr<ngraph::Node> transpose(const std::shared_ptr<ngraph::Node>& node); std::shared_ptr<ngraph::Node> transpose(const std::shared_ptr<ngraph::Node>& node);
} // namespace onnx_import } // namespace onnx_import
......
...@@ -24,23 +24,23 @@ ...@@ -24,23 +24,23 @@
#error("ngraph.hpp is for external use only") #error("ngraph.hpp is for external use only")
#endif #endif
/// @namespace ngraph /// \namespace ngraph
/// @brief The Intel Nervana Graph C++ API. /// \brief The Intel Nervana Graph C++ API.
/// @namespace ngraph::descriptor /// \namespace ngraph::descriptor
/// @brief Descriptors are compile-time representations of objects that will appear at run-time. /// \brief Descriptors are compile-time representations of objects that will appear at run-time.
/// @namespace ngraph::descriptor::layout /// \namespace ngraph::descriptor::layout
/// @brief Layout descriptors describe how tensor views are implemented. /// \brief Layout descriptors describe how tensor views are implemented.
/// @namespace ngraph::op /// \namespace ngraph::op
/// @brief Ops used in graph-building. /// \brief Ops used in graph-building.
/// @namespace ngraph::runtime /// \namespace ngraph::runtime
/// @brief The objects used for executing the graph. /// \brief The objects used for executing the graph.
/// @namespace ngraph::builder /// \namespace ngraph::builder
/// @brief Convenience functions that create addional graph nodes to implement commonly-used /// \brief Convenience functions that create addional graph nodes to implement commonly-used
/// recipes, for example auto-broadcast. /// recipes, for example auto-broadcast.
#include "ngraph/builder/autobroadcast.hpp" #include "ngraph/builder/autobroadcast.hpp"
......
...@@ -32,15 +32,15 @@ namespace ngraph ...@@ -32,15 +32,15 @@ namespace ngraph
class ngraph::pass::CommonFunctionCollection : public ModulePass class ngraph::pass::CommonFunctionCollection : public ModulePass
{ {
public: public:
// @brief Create the CommonFunctionCollection pass /// \brief Create the CommonFunctionCollection pass
// @param function_emitter - This is a function that takes a reference to a Node and as string. /// \param function_emitter - This is a function that takes a reference to a Node and as string.
// The string is the name of the emitted function and the body of the function is /// The string is the name of the emitted function and the body of the function is
// the code for the op. /// the code for the op.
// @param result_map - This is a mapping of source node -> emitted static function node, where /// \param result_map - This is a mapping of source node -> emitted static function node, where
/// the key is the source node and the value is the emitted static function node. The /// the key is the source node and the value is the emitted static function node. The
// name of the function to call is create_function_name(<emitted static function node>) /// name of the function to call is create_function_name(<emitted static function node>)
// @param emitted_functions - string to contain the emitted code for all of the static /// \param emitted_functions - string to contain the emitted code for all of the static
// functions. /// functions.
CommonFunctionCollection(std::function<std::string(Node&, std::string)> function_emitter, CommonFunctionCollection(std::function<std::string(Node&, std::string)> function_emitter,
std::unordered_map<Node*, Node*>& result_map, std::unordered_map<Node*, Node*>& result_map,
std::string& emitted_functions); std::string& emitted_functions);
...@@ -49,10 +49,10 @@ public: ...@@ -49,10 +49,10 @@ public:
bool run_on_module(std::vector<std::shared_ptr<ngraph::Function>>&) override; bool run_on_module(std::vector<std::shared_ptr<ngraph::Function>>&) override;
// @brief Construct the name of the function to call for this op /// \brief Construct the name of the function to call for this op
// @param node - Node used to construct the function name. This node is the `value` of the /// \param node - Node used to construct the function name. This node is the `value` of the
// result_map passed to the pass's constructor. /// result_map passed to the pass's constructor.
// @return string containing the name of the function to be called /// \return string containing the name of the function to be called
static std::string create_function_name(const Node& node); static std::string create_function_name(const Node& node);
private: private:
......
...@@ -26,7 +26,7 @@ namespace ngraph ...@@ -26,7 +26,7 @@ namespace ngraph
} }
} }
/// @brief Allocates a block of memory on the specified alignment. The actual size of the /// \brief Allocates a block of memory on the specified alignment. The actual size of the
/// allocated memory is larger than the requested size by the alignment, so allocating 1 byte /// allocated memory is larger than the requested size by the alignment, so allocating 1 byte
/// on 64 byte alignment will allocate 65 bytes. /// on 64 byte alignment will allocate 65 bytes.
class ngraph::runtime::AlignedBuffer class ngraph::runtime::AlignedBuffer
......
...@@ -33,67 +33,67 @@ namespace ngraph ...@@ -33,67 +33,67 @@ namespace ngraph
} }
} }
/// @brief Interface to a generic backend. /// \brief Interface to a generic backend.
/// ///
/// Backends are responsible for function execution and value allocation. /// Backends are responsible for function execution and value allocation.
class ngraph::runtime::Backend class ngraph::runtime::Backend
{ {
public: public:
virtual ~Backend(); virtual ~Backend();
/// @brief Create a new Backend object /// \brief Create a new Backend object
/// @param type The name of a registered backend, such as "CPU" or "GPU". /// \param type The name of a registered backend, such as "CPU" or "GPU".
/// To select a subdevice use "GPU:N" where s`N` is the subdevice number. /// To select a subdevice use "GPU:N" where s`N` is the subdevice number.
/// @returns shared_ptr to a new Backend or nullptr if the named backend /// \returns shared_ptr to a new Backend or nullptr if the named backend
/// does not exist. /// does not exist.
static std::shared_ptr<Backend> create(const std::string& type); static std::shared_ptr<Backend> create(const std::string& type);
/// @brief Query the list of registered devices /// \brief Query the list of registered devices
/// @returns A vector of all registered devices. /// \returns A vector of all registered devices.
static std::vector<std::string> get_registered_devices(); static std::vector<std::string> get_registered_devices();
/// @brief Create a tensor specific to this backend /// \brief Create a tensor specific to this backend
/// @param element_type The type of the tensor element /// \param element_type The type of the tensor element
/// @param shape The shape of the tensor /// \param shape The shape of the tensor
/// @returns shared_ptr to a new backend specific tensor /// \returns shared_ptr to a new backend specific tensor
virtual std::shared_ptr<ngraph::runtime::TensorView> virtual std::shared_ptr<ngraph::runtime::TensorView>
create_tensor(const ngraph::element::Type& element_type, const Shape& shape) = 0; create_tensor(const ngraph::element::Type& element_type, const Shape& shape) = 0;
/// @brief Create a tensor specific to this backend /// \brief Create a tensor specific to this backend
/// @param element_type The type of the tensor element /// \param element_type The type of the tensor element
/// @param shape The shape of the tensor /// \param shape The shape of the tensor
/// @param memory_pointer A pointer to a buffer used for this tensor. The size of the buffer /// \param memory_pointer A pointer to a buffer used for this tensor. The size of the buffer
/// must be sufficient to contain the tensor. The lifetime of the buffer is the /// must be sufficient to contain the tensor. The lifetime of the buffer is the
/// responsibility of the caller. /// responsibility of the caller.
/// @returns shared_ptr to a new backend specific tensor /// \returns shared_ptr to a new backend specific tensor
virtual std::shared_ptr<ngraph::runtime::TensorView> create_tensor( virtual std::shared_ptr<ngraph::runtime::TensorView> create_tensor(
const ngraph::element::Type& element_type, const Shape& shape, void* memory_pointer) = 0; const ngraph::element::Type& element_type, const Shape& shape, void* memory_pointer) = 0;
/// @brief Create a tensor of C type T specific to this backend /// \brief Create a tensor of C type T specific to this backend
/// @param shape The shape of the tensor /// \param shape The shape of the tensor
/// @returns shared_ptr to a new backend specific tensor /// \returns shared_ptr to a new backend specific tensor
template <typename T> template <typename T>
std::shared_ptr<ngraph::runtime::TensorView> create_tensor(const Shape& shape) std::shared_ptr<ngraph::runtime::TensorView> create_tensor(const Shape& shape)
{ {
return create_tensor(element::from<T>(), shape); return create_tensor(element::from<T>(), shape);
} }
/// @brief Compiles a Function. /// \brief Compiles a Function.
/// @param func The function to compile /// \param func The function to compile
/// @returns true if compile is successful, false otherwise /// \returns true if compile is successful, false otherwise
virtual bool compile(std::shared_ptr<Function> func) = 0; virtual bool compile(std::shared_ptr<Function> func) = 0;
/// @brief Executes a single iteration of a Function. If func is not compiled the call will /// \brief Executes a single iteration of a Function. If func is not compiled the call will
/// compile it. /// compile it.
/// @param func The function to execute /// \param func The function to execute
/// @returns true if iteration is successful, false otherwise /// \returns true if iteration is successful, false otherwise
virtual bool call(std::shared_ptr<Function> func, virtual bool call(std::shared_ptr<Function> func,
const std::vector<std::shared_ptr<runtime::TensorView>>& outputs, const std::vector<std::shared_ptr<runtime::TensorView>>& outputs,
const std::vector<std::shared_ptr<runtime::TensorView>>& inputs) = 0; const std::vector<std::shared_ptr<runtime::TensorView>>& inputs) = 0;
/// @brief Executes a single iteration of a Function. If func is not compiled the call will /// \brief Executes a single iteration of a Function. If func is not compiled the call will
/// compile it. Optionally validates the inputs and outputs against the function graph. /// compile it. Optionally validates the inputs and outputs against the function graph.
/// @param func The function to execute /// \param func The function to execute
/// @returns true if iteration is successful, false otherwise /// \returns true if iteration is successful, false otherwise
bool call_with_validate(std::shared_ptr<Function> func, bool call_with_validate(std::shared_ptr<Function> func,
const std::vector<std::shared_ptr<runtime::TensorView>>& outputs, const std::vector<std::shared_ptr<runtime::TensorView>>& outputs,
const std::vector<std::shared_ptr<runtime::TensorView>>& inputs) const std::vector<std::shared_ptr<runtime::TensorView>>& inputs)
...@@ -102,19 +102,19 @@ public: ...@@ -102,19 +102,19 @@ public:
return call(func, outputs, inputs); return call(func, outputs, inputs);
} }
/// @brief Compiled functions may be cached. This function removes a compiled function /// \brief Compiled functions may be cached. This function removes a compiled function
/// from the cache. /// from the cache.
/// @param func The function to execute /// \param func The function to execute
virtual void remove_compiled_function(std::shared_ptr<Function> func); virtual void remove_compiled_function(std::shared_ptr<Function> func);
/// @brief Enable the collection of per op performance information on a specified Function. /// \brief Enable the collection of per op performance information on a specified Function.
/// Data is collection via the `get_performance_data` method. /// Data is collection via the `get_performance_data` method.
/// @param func The function to collect perfomance data on. /// \param func The function to collect perfomance data on.
/// @param enable Set to true to enable or false to disable data collection /// \param enable Set to true to enable or false to disable data collection
virtual void enable_performance_data(std::shared_ptr<Function> func, bool enable) {} virtual void enable_performance_data(std::shared_ptr<Function> func, bool enable) {}
/// @brief Collect performance information gathered on a Function. /// \brief Collect performance information gathered on a Function.
/// @param func The function to get collected data. /// \param func The function to get collected data.
/// @returns Vector of PerformanceCounter information. /// \returns Vector of PerformanceCounter information.
virtual std::vector<PerformanceCounter> virtual std::vector<PerformanceCounter>
get_performance_data(std::shared_ptr<Function> func) const; get_performance_data(std::shared_ptr<Function> func) const;
......
...@@ -46,15 +46,15 @@ class ngraph::runtime::BackendManager ...@@ -46,15 +46,15 @@ class ngraph::runtime::BackendManager
friend class Backend; friend class Backend;
public: public:
/// @brief Used by build-in backends to register their name and constructor. /// \brief Used by build-in backends to register their name and constructor.
/// This function is not used if the backend is build as a shared library. /// This function is not used if the backend is build as a shared library.
/// @param name The name of the registering backend in UPPER CASE. /// \param name The name of the registering backend in UPPER CASE.
/// @param backend_constructor A function of type new_backend_t which will be called to /// \param backend_constructor A function of type new_backend_t which will be called to
//// construct an instance of the registered backend. //// construct an instance of the registered backend.
static void register_backend(const std::string& name, new_backend_t backend_constructor); static void register_backend(const std::string& name, new_backend_t backend_constructor);
/// @brief Query the list of registered devices /// \brief Query the list of registered devices
/// @returns A vector of all registered devices. /// \returns A vector of all registered devices.
static std::vector<std::string> get_registered_backends(); static std::vector<std::string> get_registered_backends();
private: private:
......
...@@ -46,7 +46,7 @@ namespace ngraph ...@@ -46,7 +46,7 @@ namespace ngraph
EntryPoint compiled_function); EntryPoint compiled_function);
~CPU_CallFrame(); ~CPU_CallFrame();
/// @brief Invoke the function with values matching the signature of the function. /// \brief Invoke the function with values matching the signature of the function.
/// ///
/// Tuples will be expanded into their tensor views to build the call frame. /// Tuples will be expanded into their tensor views to build the call frame.
void call(const std::vector<std::shared_ptr<runtime::TensorView>>& outputs, void call(const std::vector<std::shared_ptr<runtime::TensorView>>& outputs,
......
...@@ -54,7 +54,7 @@ namespace ngraph ...@@ -54,7 +54,7 @@ namespace ngraph
namespace fmt namespace fmt
{ {
/// @brief vector format for Eigen wrappers. /// \brief vector format for Eigen wrappers.
class V class V
{ {
public: public:
......
...@@ -48,16 +48,16 @@ namespace ngraph ...@@ -48,16 +48,16 @@ namespace ngraph
size_t get_size() const; size_t get_size() const;
const element::Type& get_element_type() const; const element::Type& get_element_type() const;
/// @brief Write bytes directly into the tensor /// \brief Write bytes directly into the tensor
/// @param p Pointer to source of data /// \param p Pointer to source of data
/// @param tensor_offset Offset into tensor storage to begin writing. Must be element-aligned. /// \param tensor_offset Offset into tensor storage to begin writing. Must be element-aligned.
/// @param n Number of bytes to write, must be integral number of elements. /// \param n Number of bytes to write, must be integral number of elements.
void write(const void* p, size_t tensor_offset, size_t n) override; void write(const void* p, size_t tensor_offset, size_t n) override;
/// @brief Read bytes directly from the tensor /// \brief Read bytes directly from the tensor
/// @param p Pointer to destination for data /// \param p Pointer to destination for data
/// @param tensor_offset Offset into tensor storage to begin reading. Must be element-aligned. /// \param tensor_offset Offset into tensor storage to begin reading. Must be element-aligned.
/// @param n Number of bytes to read, must be integral number of elements. /// \param n Number of bytes to read, must be integral number of elements.
void read(void* p, size_t tensor_offset, size_t n) const override; void read(void* p, size_t tensor_offset, size_t n) const override;
static constexpr int BufferAlignment = NGRAPH_CPU_ALIGNMENT; static constexpr int BufferAlignment = NGRAPH_CPU_ALIGNMENT;
......
...@@ -49,7 +49,7 @@ namespace ngraph ...@@ -49,7 +49,7 @@ namespace ngraph
~GPU_CallFrame(); ~GPU_CallFrame();
/// @brief Invoke the function with values matching the signature of the function. /// \brief Invoke the function with values matching the signature of the function.
/// ///
/// Tuples will be expanded into their tensor views to build the call frame. /// Tuples will be expanded into their tensor views to build the call frame.
void call(const std::vector<std::shared_ptr<runtime::TensorView>>& outputs, void call(const std::vector<std::shared_ptr<runtime::TensorView>>& outputs,
......
...@@ -42,16 +42,16 @@ public: ...@@ -42,16 +42,16 @@ public:
void* memory_pointer); void* memory_pointer);
virtual ~GPU_TensorView(); virtual ~GPU_TensorView();
/// @brief Write bytes directly into the tensor /// \brief Write bytes directly into the tensor
/// @param p Pointer to source of data /// \param p Pointer to source of data
/// @param tensor_offset Offset into tensor storage to begin writing. Must be element-aligned. /// \param tensor_offset Offset into tensor storage to begin writing. Must be element-aligned.
/// @param n Number of bytes to write, must be integral number of elements. /// \param n Number of bytes to write, must be integral number of elements.
void write(const void* p, size_t tensor_offset, size_t n) override; void write(const void* p, size_t tensor_offset, size_t n) override;
/// @brief Read bytes directly from the tensor /// \brief Read bytes directly from the tensor
/// @param p Pointer to destination for data /// \param p Pointer to destination for data
/// @param tensor_offset Offset into tensor storage to begin reading. Must be element-aligned. /// \param tensor_offset Offset into tensor storage to begin reading. Must be element-aligned.
/// @param n Number of bytes to read, must be integral number of elements. /// \param n Number of bytes to read, must be integral number of elements.
void read(void* p, size_t tensor_offset, size_t n) const override; void read(void* p, size_t tensor_offset, size_t n) const override;
void* m_allocated_buffer_pool = nullptr; void* m_allocated_buffer_pool = nullptr;
......
...@@ -61,16 +61,16 @@ public: ...@@ -61,16 +61,16 @@ public:
size_t get_size() const; size_t get_size() const;
const element::Type& get_element_type() const; const element::Type& get_element_type() const;
/// @brief Write bytes directly into the tensor /// \brief Write bytes directly into the tensor
/// @param p Pointer to source of data /// \param p Pointer to source of data
/// @param tensor_offset Offset into tensor storage to begin writing. Must be element-aligned. /// \param tensor_offset Offset into tensor storage to begin writing. Must be element-aligned.
/// @param n Number of bytes to write, must be integral number of elements. /// \param n Number of bytes to write, must be integral number of elements.
void write(const void* p, size_t tensor_offset, size_t n) override; void write(const void* p, size_t tensor_offset, size_t n) override;
/// @brief Read bytes directly from the tensor /// \brief Read bytes directly from the tensor
/// @param p Pointer to destination for data /// \param p Pointer to destination for data
/// @param tensor_offset Offset into tensor storage to begin reading. Must be element-aligned. /// \param tensor_offset Offset into tensor storage to begin reading. Must be element-aligned.
/// @param n Number of bytes to read, must be integral number of elements. /// \param n Number of bytes to read, must be integral number of elements.
void read(void* p, size_t tensor_offset, size_t n) const override; void read(void* p, size_t tensor_offset, size_t n) const override;
private: private:
......
...@@ -40,16 +40,16 @@ public: ...@@ -40,16 +40,16 @@ public:
const cldnn::engine& backend_engine, const cldnn::engine& backend_engine,
void* memory_pointer = nullptr); void* memory_pointer = nullptr);
/// @brief Write bytes directly into the tensor /// \brief Write bytes directly into the tensor
/// @param p Pointer to source of data /// \param p Pointer to source of data
/// @param tensor_offset Offset into tensor storage to begin writing. Must be element-aligned. /// \param tensor_offset Offset into tensor storage to begin writing. Must be element-aligned.
/// @param n Number of bytes to write, must be integral number of elements. /// \param n Number of bytes to write, must be integral number of elements.
void write(const void* p, size_t tensor_offset, size_t n) override; void write(const void* p, size_t tensor_offset, size_t n) override;
/// @brief Read bytes directly from the tensor /// \brief Read bytes directly from the tensor
/// @param p Pointer to destination for data /// \param p Pointer to destination for data
/// @param tensor_offset Offset into tensor storage to begin reading. Must be element-aligned. /// \param tensor_offset Offset into tensor storage to begin reading. Must be element-aligned.
/// @param n Number of bytes to read, must be integral number of elements. /// \param n Number of bytes to read, must be integral number of elements.
void read(void* p, size_t tensor_offset, size_t n) const override; void read(void* p, size_t tensor_offset, size_t n) const override;
cldnn::memory* get_data_ptr() { return ocl_memory.get(); } cldnn::memory* get_data_ptr() { return ocl_memory.get(); }
......
...@@ -61,16 +61,16 @@ namespace ngraph ...@@ -61,16 +61,16 @@ namespace ngraph
bool get_stale() { return m_stale; } bool get_stale() { return m_stale; }
void set_stale(bool val) { m_stale = val; } void set_stale(bool val) { m_stale = val; }
/// @brief Write bytes directly into the tensor /// \brief Write bytes directly into the tensor
/// @param p Pointer to source of data /// \param p Pointer to source of data
/// @param tensor_offset Offset into tensor storage to begin writing. Must be element-aligned. /// \param tensor_offset Offset into tensor storage to begin writing. Must be element-aligned.
/// @param n Number of bytes to write, must be integral number of elements. /// \param n Number of bytes to write, must be integral number of elements.
virtual void write(const void* p, size_t tensor_offset, size_t n) = 0; virtual void write(const void* p, size_t tensor_offset, size_t n) = 0;
/// @brief Read bytes directly from the tensor /// \brief Read bytes directly from the tensor
/// @param p Pointer to destination for data /// \param p Pointer to destination for data
/// @param tensor_offset Offset into tensor storage to begin reading. Must be element-aligned. /// \param tensor_offset Offset into tensor storage to begin reading. Must be element-aligned.
/// @param n Number of bytes to read, must be integral number of elements. /// \param n Number of bytes to read, must be integral number of elements.
virtual void read(void* p, size_t tensor_offset, size_t n) const = 0; virtual void read(void* p, size_t tensor_offset, size_t n) const = 0;
protected: protected:
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
* You may obtain a copy of the License at * You may obtain a copy of the License at
* *
* http://www.apache.org/licenses/LICENSE-2.0 * http:///www.apache.org/licenses/LICENSE-2.0
* *
* Unless required by applicable law or agreed to in writing, software * Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, * distributed under the License is distributed on an "AS IS" BASIS,
...@@ -23,36 +23,36 @@ ...@@ -23,36 +23,36 @@
namespace ngraph namespace ngraph
{ {
// @brief Serialize a Function to a json string /// \brief Serialize a Function to a json string
// @param func The Function to serialize /// \param func The Function to serialize
// @param indent If 0 then there is no formatting applied and the resulting string is the /// \param indent If 0 then there is no formatting applied and the resulting string is the
// most compact representation. If non-zero then the json string is formatted with the /// most compact representation. If non-zero then the json string is formatted with the
// indent level specified. /// indent level specified.
std::string serialize(std::shared_ptr<ngraph::Function> func, size_t indent = 0); std::string serialize(std::shared_ptr<ngraph::Function> func, size_t indent = 0);
// @brief Serialize a Function to as a json file /// \brief Serialize a Function to as a json file
// @param path The path to the output file /// \param path The path to the output file
// @param func The Function to serialize /// \param func The Function to serialize
// @param indent If 0 then there is no formatting applied and the resulting string is the /// \param indent If 0 then there is no formatting applied and the resulting string is the
// most compact representation. If non-zero then the json string is formatted with the /// most compact representation. If non-zero then the json string is formatted with the
// indent level specified. /// indent level specified.
void serialize(const std::string& path, void serialize(const std::string& path,
std::shared_ptr<ngraph::Function> func, std::shared_ptr<ngraph::Function> func,
size_t indent = 0); size_t indent = 0);
// @brief Serialize a Function to a CPIO file with all constant data stored as binary /// \brief Serialize a Function to a CPIO file with all constant data stored as binary
// @param out The output stream to which the data is serialized. /// \param out The output stream to which the data is serialized.
// @param func The Function to serialize /// \param func The Function to serialize
// @param indent If 0 then there is no formatting applied and the json is the /// \param indent If 0 then there is no formatting applied and the json is the
// most compact representation. If non-zero then the json is formatted with the /// most compact representation. If non-zero then the json is formatted with the
// indent level specified. /// indent level specified.
void serialize(std::ostream& out, std::shared_ptr<ngraph::Function> func, size_t indent = 0); void serialize(std::ostream& out, std::shared_ptr<ngraph::Function> func, size_t indent = 0);
// @brief Deserialize a Function /// \brief Deserialize a Function
// @param in An isteam to the input data /// \param in An isteam to the input data
std::shared_ptr<ngraph::Function> deserialize(std::istream& in); std::shared_ptr<ngraph::Function> deserialize(std::istream& in);
// @brief Deserialize a Function /// \brief Deserialize a Function
// @param str The json formatted string to deseriailze. /// \param str The json formatted string to deseriailze.
std::shared_ptr<ngraph::Function> deserialize(const std::string& str); std::shared_ptr<ngraph::Function> deserialize(const std::string& str);
} }
...@@ -27,12 +27,12 @@ namespace ngraph ...@@ -27,12 +27,12 @@ namespace ngraph
{ {
namespace test namespace test
{ {
/// @brief Same as numpy.allclose /// \brief Same as numpy.allclose
/// @param a First tensor to compare /// \param a First tensor to compare
/// @param b Second tensor to compare /// \param b Second tensor to compare
/// @param rtol Relative tolerance /// \param rtol Relative tolerance
/// @param atol Absolute tolerance /// \param atol Absolute tolerance
/// @returns true if shapes match and for all elements, |a_i-b_i| <= atol + rtol*|b_i|. /// \returns true if shapes match and for all elements, |a_i-b_i| <= atol + rtol*|b_i|.
template <typename T> template <typename T>
bool all_close(const std::vector<T>& a, bool all_close(const std::vector<T>& a,
const std::vector<T>& b, const std::vector<T>& b,
...@@ -52,11 +52,11 @@ namespace ngraph ...@@ -52,11 +52,11 @@ namespace ngraph
return rc; return rc;
} }
/// @brief Same as numpy.allclose /// \brief Same as numpy.allclose
/// @param a First tensor to compare /// \param a First tensor to compare
/// @param b Second tensor to compare /// \param b Second tensor to compare
/// @param rtol Relative tolerance /// \param rtol Relative tolerance
/// @param atol Absolute tolerance /// \param atol Absolute tolerance
/// Returns true if shapes match and for all elements, |a_i-b_i| <= atol + rtol*|b_i|. /// Returns true if shapes match and for all elements, |a_i-b_i| <= atol + rtol*|b_i|.
template <typename T> template <typename T>
bool all_close(const std::shared_ptr<ngraph::runtime::TensorView>& a, bool all_close(const std::shared_ptr<ngraph::runtime::TensorView>& a,
...@@ -78,11 +78,11 @@ namespace ngraph ...@@ -78,11 +78,11 @@ namespace ngraph
return all_close(read_vector<T>(a), read_vector<T>(b), rtol, atol); return all_close(read_vector<T>(a), read_vector<T>(b), rtol, atol);
} }
/// @brief Same as numpy.allclose /// \brief Same as numpy.allclose
/// @param as First tensors to compare /// \param as First tensors to compare
/// @param bs Second tensors to compare /// \param bs Second tensors to compare
/// @param rtol Relative tolerance /// \param rtol Relative tolerance
/// @param atol Absolute tolerance /// \param atol Absolute tolerance
/// Returns true if shapes match and for all elements, |a_i-b_i| <= atol + rtol*|b_i|. /// Returns true if shapes match and for all elements, |a_i-b_i| <= atol + rtol*|b_i|.
template <typename T> template <typename T>
bool all_close(const std::vector<std::shared_ptr<ngraph::runtime::TensorView>>& as, bool all_close(const std::vector<std::shared_ptr<ngraph::runtime::TensorView>>& as,
......
...@@ -25,12 +25,12 @@ namespace ngraph ...@@ -25,12 +25,12 @@ namespace ngraph
{ {
namespace test namespace test
{ {
/// @brief Check if the two f32 numbers are close /// \brief Check if the two f32 numbers are close
/// @param a First number to compare /// \param a First number to compare
/// @param b Second number to compare /// \param b Second number to compare
/// @param mantissa_bits The mantissa width of the underlying number before casting to float /// \param mantissa_bits The mantissa width of the underlying number before casting to float
/// @param tolerance_bits Bit tolerance error /// \param tolerance_bits Bit tolerance error
/// @returns True iff the distance between a and b is within 2 ^ tolerance_bits ULP /// \returns True iff the distance between a and b is within 2 ^ tolerance_bits ULP
/// ///
/// References: /// References:
/// - https://en.wikipedia.org/wiki/Unit_in_the_last_place /// - https://en.wikipedia.org/wiki/Unit_in_the_last_place
...@@ -48,33 +48,33 @@ namespace ngraph ...@@ -48,33 +48,33 @@ namespace ngraph
/// bfloat and f32. /// bfloat and f32.
bool close_f(float a, float b, int mantissa_bits = 8, int tolerance_bits = 2); bool close_f(float a, float b, int mantissa_bits = 8, int tolerance_bits = 2);
/// @brief Check if the two floating point vectors are all close /// \brief Check if the two floating point vectors are all close
/// @param a First number to compare /// \param a First number to compare
/// @param b Second number to compare /// \param b Second number to compare
/// @param mantissa_bits The mantissa width of the underlying number before casting to float /// \param mantissa_bits The mantissa width of the underlying number before casting to float
/// @param tolerance_bits Bit tolerance error /// \param tolerance_bits Bit tolerance error
/// @returns true iff the two floating point vectors are close /// \returns true iff the two floating point vectors are close
bool all_close_f(const std::vector<float>& a, bool all_close_f(const std::vector<float>& a,
const std::vector<float>& b, const std::vector<float>& b,
int mantissa_bits = 8, int mantissa_bits = 8,
int tolerance_bits = 2); int tolerance_bits = 2);
/// @brief Check if the two TensorViews are all close in float /// \brief Check if the two TensorViews are all close in float
/// @param a First TensorView to compare /// \param a First TensorView to compare
/// @param b Second TensorView to compare /// \param b Second TensorView to compare
/// @param mantissa_bits The mantissa width of the underlying number before casting to float /// \param mantissa_bits The mantissa width of the underlying number before casting to float
/// @param tolerance_bits Bit tolerance error /// \param tolerance_bits Bit tolerance error
/// Returns true iff the two TensorViews are all close in float /// Returns true iff the two TensorViews are all close in float
bool all_close_f(const std::shared_ptr<runtime::TensorView>& a, bool all_close_f(const std::shared_ptr<runtime::TensorView>& a,
const std::shared_ptr<runtime::TensorView>& b, const std::shared_ptr<runtime::TensorView>& b,
int mantissa_bits = 8, int mantissa_bits = 8,
int tolerance_bits = 2); int tolerance_bits = 2);
/// @brief Check if the two vectors of TensorViews are all close in float /// \brief Check if the two vectors of TensorViews are all close in float
/// @param as First vector of TensorView to compare /// \param as First vector of TensorView to compare
/// @param bs Second vector of TensorView to compare /// \param bs Second vector of TensorView to compare
/// @param mantissa_bits The mantissa width of the underlying number before casting to float /// \param mantissa_bits The mantissa width of the underlying number before casting to float
/// @param tolerance_bits Bit tolerance error /// \param tolerance_bits Bit tolerance error
/// Returns true iff the two TensorViews are all close in float /// Returns true iff the two TensorViews are all close in float
bool all_close_f(const std::vector<std::shared_ptr<runtime::TensorView>>& as, bool all_close_f(const std::vector<std::shared_ptr<runtime::TensorView>>& as,
const std::vector<std::shared_ptr<runtime::TensorView>>& bs, const std::vector<std::shared_ptr<runtime::TensorView>>& bs,
......
...@@ -25,9 +25,9 @@ namespace ngraph ...@@ -25,9 +25,9 @@ namespace ngraph
namespace autodiff namespace autodiff
{ {
/// @brief Returns a FunctionSpec for the backprop derivative of its argument. /// \brief Returns a FunctionSpec for the backprop derivative of its argument.
/// @param f is f(X_i...) /// \param f is f(X_i...)
/// @returns f'(X_i..., c) where f'(x_i, ..., c)_j is backprop for X_j /// \returns f'(X_i..., c) where f'(x_i, ..., c)_j is backprop for X_j
std::shared_ptr<Function> backprop_function(const std::shared_ptr<Function>& f); std::shared_ptr<Function> backprop_function(const std::shared_ptr<Function>& f);
} }
} }
...@@ -26,12 +26,12 @@ namespace ngraph ...@@ -26,12 +26,12 @@ namespace ngraph
{ {
namespace autodiff namespace autodiff
{ {
/// @brief numeric approximation of the derivative /// \brief numeric approximation of the derivative
/// @param f A function /// \param f A function
/// @param args Values for the arguments (the independent variables) /// \param args Values for the arguments (the independent variables)
/// @param delta increment for the variables /// \param delta increment for the variables
/// @param indep_params parameters with respect to which to compute derivatives /// \param indep_params parameters with respect to which to compute derivatives
/// @returns vector of dy/dvar, where each dy/dvar's shape is concat(y.shape(), var.shape()) /// \returns vector of dy/dvar, where each dy/dvar's shape is concat(y.shape(), var.shape())
template <typename T> template <typename T>
std::vector<std::shared_ptr<runtime::TensorView>> std::vector<std::shared_ptr<runtime::TensorView>>
numeric_derivative(const std::shared_ptr<runtime::Backend>& backend, numeric_derivative(const std::shared_ptr<runtime::Backend>& backend,
......
...@@ -26,7 +26,7 @@ namespace ngraph ...@@ -26,7 +26,7 @@ namespace ngraph
{ {
namespace test namespace test
{ {
/// @brief A predictable pseudo-random number generator /// \brief A predictable pseudo-random number generator
/// The seed is initialized so that we get repeatable pseudo-random numbers for tests /// The seed is initialized so that we get repeatable pseudo-random numbers for tests
template <typename T> template <typename T>
class Uniform class Uniform
...@@ -39,8 +39,8 @@ namespace ngraph ...@@ -39,8 +39,8 @@ namespace ngraph
{ {
} }
/// @brief Randomly initialize a tensor /// \brief Randomly initialize a tensor
/// @param ptv The tensor to initialize /// \param ptv The tensor to initialize
const std::shared_ptr<runtime::TensorView> const std::shared_ptr<runtime::TensorView>
initialize(const std::shared_ptr<runtime::TensorView>& ptv) initialize(const std::shared_ptr<runtime::TensorView>& ptv)
{ {
...@@ -49,8 +49,8 @@ namespace ngraph ...@@ -49,8 +49,8 @@ namespace ngraph
write_vector(ptv, vec); write_vector(ptv, vec);
return ptv; return ptv;
} }
/// @brief Randomly initialize a vector /// \brief Randomly initialize a vector
/// @param vec The tensor to initialize /// \param vec The tensor to initialize
void initialize(std::vector<T>& vec) void initialize(std::vector<T>& vec)
{ {
for (T& elt : vec) for (T& elt : vec)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment