Unverified Commit 79378c77 authored by Robert Kimball's avatar Robert Kimball Committed by GitHub

Normalize doxygen comment syntax (#1503)

* normalize doxygen keywords

* more normalization

* normalize more onnx files

* yet more normalization

* fix doxygen error
parent 12a0dca5
......@@ -40,36 +40,36 @@ namespace ngraph
class Adjoints
{
public:
/// @brief (dy/dx)(c) for all x used to compute y
/// \brief (dy/dx)(c) for all x used to compute y
///
/// @param y The dependent value
/// @param c An expression for where to evaluate the derivatives
/// \param y The dependent value
/// \param c An expression for where to evaluate the derivatives
Adjoints(const NodeVector& y, const NodeVector& c);
Adjoints(const Adjoints& adjoints) = default;
Adjoints& operator=(const Adjoints& adjoints) = default;
Adjoints() = default;
/// @brief (dy/dx)(c)
/// \brief (dy/dx)(c)
///
/// @param x The node whose adjoint is desired.
/// \param x The node whose adjoint is desired.
const NodeVector& get(const std::shared_ptr<Node>& x);
/// @brief Add a backprop contribution to x's adjoint
/// \brief Add a backprop contribution to x's adjoint
///
/// @param x The adjoint node
/// @param delta A backprop contribution
/// \param x The adjoint node
/// \param delta A backprop contribution
void add_delta(const std::shared_ptr<Node>& x,
const std::shared_ptr<Node>& delta,
size_t output_index = 0);
/// @brief Add a backprop contribution to a slice of x's adjoint
/// \brief Add a backprop contribution to a slice of x's adjoint
///
/// @param x The adjoint node
/// @param delta A backprop contribution
/// @param lower_bounds Lower bounds of slice to add to
/// @param upper_bounds Upper bounds of slice to add to
/// @param strides Strides of slice to add to
/// \param x The adjoint node
/// \param delta A backprop contribution
/// \param lower_bounds Lower bounds of slice to add to
/// \param upper_bounds Upper bounds of slice to add to
/// \param strides Strides of slice to add to
void add_delta_to_slice(const std::shared_ptr<Node>& x,
const std::shared_ptr<Node>& delta,
const Coordinate& lower_bounds,
......
......@@ -70,13 +70,13 @@ namespace ngraph
ngraph::Shape m_final_shape;
};
/// @brief Compute the details regarding what reshape and/or broadcast operations must be applied to
/// \brief Compute the details regarding what reshape and/or broadcast operations must be applied to
/// arg1 and/or arg2, as well as what the final resulting shape shall be.
///
/// If this algorithm cannot handle the particular combination of shapes supplied as inputs, throw
/// an ngraph::builder::autobroadcast_incompatible_shapes exception.
///
/// @exception ngraph::builder::autobroadcast_incompatible_shapes
/// \exception ngraph::builder::autobroadcast_incompatible_shapes
static Autobroadcast_plan
compute_shapes_and_broadcast_axes(const ngraph::Shape& arg1_in_shape,
const ngraph::Shape& arg2_in_shape)
......
......@@ -42,7 +42,7 @@ namespace ngraph
static std::string error_str(const ngraph::Shape& shape1, const ngraph::Shape& shape2);
};
/// @brief Wrap two graph nodes, if necessary, to obtain values with identical shapes,
/// \brief Wrap two graph nodes, if necessary, to obtain values with identical shapes,
/// using NumPy's auto-broadcast rules.
///
/// The elements in the std::pair returned by this function correspond to those supplied
......@@ -58,33 +58,33 @@ namespace ngraph
/// There are some shape combinations which the autobroadcast algoritm cannot handle.
/// An exception is thrown when such combinations are provided to this function.
///
/// @pre
/// \pre
/// - \p args.first is not null
/// - \p args.second is not null
///
/// @post
/// \post
/// - The ngraph::Node objects pointed to by \p args.first and \p args.second have not been
/// altered by this function, except by possibly having added consumers of their values.
///
/// - If an exception was not thrown, then the return value's \p first and \p second
/// elements point to ngraph::Node objects whose output values have the same shape.
///
/// @exception ngraph::builder::autobroadcast_incompatible_shapes
/// \exception ngraph::builder::autobroadcast_incompatible_shapes
std::pair<std::shared_ptr<Node>, std::shared_ptr<Node>>
numpy_broadcast(const std::pair<std::shared_ptr<Node>, std::shared_ptr<Node>>& args);
/// Create a new \p NodeType node, and any additional nodes required to simulate NumPy-style autobroadcast
/// semantics. Intended for binary operations such as "Add".
///
/// @param [in] operand1_reshapeable The first operand to supply to the \p NodeType constructor. Subject to
/// \param [in] operand1_reshapeable The first operand to supply to the \p NodeType constructor. Subject to
/// being wrapped with additional nodes required for autobroadcasting. Must not be null.
///
/// @param [in] operand2_reshapeable The second operand to supply to the \p NodeType constructor. Subject to
/// \param [in] operand2_reshapeable The second operand to supply to the \p NodeType constructor. Subject to
/// being wrapped with additional nodes required for autobroadcasting. Must not be null.
///
/// @return The sink node of any/all nodes created by this function. Will never be null.
/// \return The sink node of any/all nodes created by this function. Will never be null.
///
/// @exception ngraph::builder::autobroadcast_incompatible_shapes
/// \exception ngraph::builder::autobroadcast_incompatible_shapes
template <typename NodeType>
std::shared_ptr<NodeType>
make_with_numpy_broadcast(const std::shared_ptr<Node>& operand1_reshapeable,
......@@ -99,18 +99,18 @@ namespace ngraph
/// semantics. Intended for non-binary operations such as "Select", where precisely the second and third
/// operands are subject to autobroadcast semantics.
///
/// @param [in] operand1 This operand is not subject to autobraodcast logic, and will be passed as-is as
/// \param [in] operand1 This operand is not subject to autobraodcast logic, and will be passed as-is as
/// the first argument to the \p NodeType constructor.
///
/// @param [in] operand2_reshapeable The second operand to supply to the \p NodeType constructor. Subject to
/// \param [in] operand2_reshapeable The second operand to supply to the \p NodeType constructor. Subject to
/// being wrapped with additional nodes required for autobroadcasting. Must not be null.
///
/// @param [in] operand3_reshapeable The third operand to supply to the \p NodeType constructor. Subject to
/// \param [in] operand3_reshapeable The third operand to supply to the \p NodeType constructor. Subject to
/// being wrapped with additional nodes required for autobroadcasting. Must not be null.
///
/// @return The sink node of any/all nodes created by this function. Will never be null.
/// \return The sink node of any/all nodes created by this function. Will never be null.
///
/// @exception ngraph::builder::autobroadcast_incompatible_shapes
/// \exception ngraph::builder::autobroadcast_incompatible_shapes
template <typename NodeType>
std::shared_ptr<NodeType>
make_with_numpy_broadcast(const std::shared_ptr<Node>& operand1,
......
......@@ -35,44 +35,44 @@ namespace ngraph
friend class Node;
public:
/// @param node The node that owns this input
/// @param index The position of this this tensor in all input tensors
/// @param output The output that supplies a value for this input
/// \param node The node that owns this input
/// \param index The position of this this tensor in all input tensors
/// \param output The output that supplies a value for this input
Input(Node* node, size_t index, Output& output);
/// @return the node that this is an input of
/// \return the node that this is an input of
std::shared_ptr<Node> get_node() const;
/// @return the position within all supplied tensors of this input
/// \return the position within all supplied tensors of this input
size_t get_index() const { return m_index; }
// @return the connected output
/// \return the connected output
const Output& get_output() const { return *m_output; }
// @return the connected output
/// \return the connected output
Output& get_output() { return *m_output; }
// @return the tensor of the connected output
/// \return the tensor of the connected output
const Tensor& get_tensor() const;
// @return the tensor of the connected output
/// \return the tensor of the connected output
Tensor& get_tensor();
void replace_output(std::shared_ptr<Node> node, size_t i);
void replace_output(Output& output);
protected:
/// @return the tensor view for the connected output
/// \return the tensor view for the connected output
std::shared_ptr<const TensorView> get_tensor_view() const;
/// @return the tensor view for the connected output
/// \return the tensor view for the connected output
std::shared_ptr<TensorView> get_tensor_view();
/// @return the tensor view type for the connected output
/// \return the tensor view type for the connected output
std::shared_ptr<const TensorViewType> get_tensor_view_type() const;
public:
/// @return the shape of the connected output
/// \return the shape of the connected output
const Shape& get_shape() const;
/// @return the element type of the connected output
/// \return the element type of the connected output
const element::Type& get_element_type() const;
protected:
......
......@@ -29,7 +29,7 @@ namespace ngraph
namespace layout
{
/// @brief The standard strided layout, used for row-major and column-major, their permutations and slices.
/// \brief The standard strided layout, used for row-major and column-major, their permutations and slices.
///
/// The linearized offset of an index I is dot(I, strides) + offset.
class DenseTensorViewLayout : public TensorViewLayout
......
......@@ -34,7 +34,7 @@ namespace ngraph
namespace layout
{
/// @brief Interface for describing implementations of tensor views.
/// \brief Interface for describing implementations of tensor views.
///
/// Kernel selection will need to pay attention to the layout.
class TensorViewLayout
......@@ -59,7 +59,7 @@ namespace ngraph
const element::Type& get_element_type() const;
const Shape& get_shape() const;
virtual const Strides& get_strides() const = 0;
/// @brief Return true if this and other have the same element interpretation
/// \brief Return true if this and other have the same element interpretation
virtual bool operator==(const TensorViewLayout& other) const = 0;
bool operator!=(const TensorViewLayout& other) const { return !(*this == other); }
void set_tensor_view_type(const element::Type& element_type, const Shape& shape);
......
......@@ -37,9 +37,9 @@ namespace ngraph
class Output
{
public:
/// @param node Node that owns this output.
/// @param index Position of the output tensor in all output tensors
/// @param tensor_view The view of this tensor; where the value will be written
/// \param node Node that owns this output.
/// \param index Position of the output tensor in all output tensors
/// \param tensor_view The view of this tensor; where the value will be written
Output(Node* node, size_t index, const std::shared_ptr<TensorView>& tensor_view);
std::shared_ptr<Node> get_node() const;
......@@ -55,13 +55,13 @@ namespace ngraph
Tensor& get_tensor() const;
protected:
/// @return the tensor view type for the output
/// \return the tensor view type for the output
std::shared_ptr<const TensorViewType> get_tensor_view_type() const;
public:
/// @return the shape of the output
/// \return the shape of the output
const Shape& get_shape() const;
/// @return the element type of the output
/// \return the element type of the output
const element::Type& get_element_type() const;
protected:
......
......@@ -27,13 +27,13 @@ namespace ngraph
namespace descriptor
{
/// @brief A PrimaryTensorView owns the tensor. All other views are the result
/// \brief A PrimaryTensorView owns the tensor. All other views are the result
/// of some index operation on the primary view.
class PrimaryTensorView : public TensorView
{
public:
/// @param tensor_view_type The type for this view.
/// @param name Description of the tensor, for debugging.
/// \param tensor_view_type The type for this view.
/// \param name Description of the tensor, for debugging.
PrimaryTensorView(const std::shared_ptr<const TensorViewType>& tensor_view_type,
const std::string& name);
......
......@@ -38,7 +38,7 @@ namespace ngraph
class Tensor;
class TensorView;
/// @brief Compile-time descriptor of a first-class value that is a view of a tensor.
/// \brief Compile-time descriptor of a first-class value that is a view of a tensor.
class TensorView
{
TensorView(const TensorView&) = delete;
......
......@@ -24,21 +24,21 @@ namespace ngraph
{
namespace file_util
{
// @brief Returns the name with extension for a given path
// @param path The path to the output file
/// \brief Returns the name with extension for a given path
/// \param path The path to the output file
std::string get_file_name(const std::string& path);
// @brief Returns the file extension
// @param path The path to the output file
/// \brief Returns the file extension
/// \param path The path to the output file
std::string get_file_ext(const std::string& path);
// @brief Returns the directory portion of the given path
// @param path The path to the output file
/// \brief Returns the directory portion of the given path
/// \param path The path to the output file
std::string get_directory(const std::string& path);
// @brief Serialize a Function to as a json file
// @param s1 Left side of path
// @param s2 Right side of path
/// \brief Serialize a Function to as a json file
/// \param s1 Left side of path
/// \param s2 Right side of path
std::string path_join(const std::string& s1, const std::string& s2);
std::string path_join(const std::string& s1, const std::string& s2, const std::string& s3);
std::string path_join(const std::string& s1,
......@@ -46,55 +46,54 @@ namespace ngraph
const std::string& s3,
const std::string& s4);
// @brief Returns the size in bytes of filename
// @param filename The name of the file
/// \brief Returns the size in bytes of filename
/// \param filename The name of the file
size_t get_file_size(const std::string& filename);
// @brief Removes all files and directories starting at dir
// @param dir The path of the directory to remove
/// \brief Removes all files and directories starting at dir
/// \param dir The path of the directory to remove
void remove_directory(const std::string& dir);
// @brief Create a directory
// @param dir Path of the directory to create
// @param func The Function to serialize
// @return true if the directory was created, false otherwise
/// \brief Create a directory
/// \param dir Path of the directory to create
/// \return true if the directory was created, false otherwise
bool make_directory(const std::string& dir);
// @brief Gets the path of the system temporary directory
// @return the path to the system temporary directory
/// \brief Gets the path of the system temporary directory
/// \return the path to the system temporary directory
std::string get_temp_directory_path();
// @brief Removes a file from the filesystem
// @param file The path to the file to be removed
/// \brief Removes a file from the filesystem
/// \param file The path to the file to be removed
void remove_file(const std::string& file);
// @brief Reads the contents of a file
// @param path The path of the file to read
// @return vector<char> of the file's contents
/// \brief Reads the contents of a file
/// \param path The path of the file to read
/// \return vector<char> of the file's contents
std::vector<char> read_file_contents(const std::string& path);
// @brief Reads the contents of a file
// @param path The path of the file to read
// @return string of the file's contents
/// \brief Reads the contents of a file
/// \param path The path of the file to read
/// \return string of the file's contents
std::string read_file_to_string(const std::string& path);
// @brief Iterate through files and optionally directories. Symbolic links are skipped.
// @param path The path to iterate over
// @param func A callback function called with each file or directory encountered
// @param recurse Optional parameter to enable recursing through path
/// \brief Iterate through files and optionally directories. Symbolic links are skipped.
/// \param path The path to iterate over
/// \param func A callback function called with each file or directory encountered
/// \param recurse Optional parameter to enable recursing through path
void iterate_files(const std::string& path,
std::function<void(const std::string& file, bool is_dir)> func,
bool recurse = false,
bool include_links = false);
// @brief Create a temporary file
// @param extension Optional extension for the temporary file
// @return Name of the temporary file
/// \brief Create a temporary file
/// \param extension Optional extension for the temporary file
/// \return Name of the temporary file
std::string tmp_filename(const std::string& extension = "");
// @brief Test for the existence of a path or file
// @param path The path to test
// @param true if the path exists, false otherwise
/// \brief Test for the existence of a path or file
/// \param path The path to test
/// \return true if the path exists, false otherwise
bool exists(const std::string& path);
}
}
......@@ -26,14 +26,12 @@ namespace ngraph
{
namespace op
{
/**
* @brief Performs ONNX Conv operation.
*
* @param node The ONNX node object representing this operation.
*
* @return The vector containing Ngraph nodes producing output of ONNX convolution
* operation.
*/
/// \brief Performs ONNX Conv operation.
///
/// \param node The ONNX node object representing this operation.
///
/// \return The vector containing Ngraph nodes producing output of ONNX convolution
/// operation.
NodeVector conv(const Node& node);
} // namespace op
......
......@@ -25,42 +25,38 @@ namespace ngraph
{
namespace onnx_import
{
/**
* @brief Generate a list of broadcast axes.
*
* @details Informally, a broadcast "adds" axes to the input tensor, replicating
* elements from the input tensor as needed to fill the new dimensions.
* Function calculate which of the output axes are added in this way.
*
* @param output_shape The new shape for the output tensor.
* @param input_shape The shape of input tensor.
* @param start_match_axis The axis along which we want to replicate elements.
* The starting axis position (0-based) int the output
* shape from which the current shape of the tensor
* matches the desired new shape.
*
* @return The indices of added axes.
*/
/// \brief Generate a list of broadcast axes.
///
/// \details Informally, a broadcast "adds" axes to the input tensor, replicating
/// elements from the input tensor as needed to fill the new dimensions.
/// Function calculate which of the output axes are added in this way.
///
/// \param output_shape The new shape for the output tensor.
/// \param input_shape The shape of input tensor.
/// \param start_match_axis The axis along which we want to replicate elements.
/// The starting axis position (0-based) int the output
/// shape from which the current shape of the tensor
/// matches the desired new shape.
///
/// \return The indices of added axes.
AxisSet calculate_broadcast_axes(const Shape& output_shape,
const Shape& input_shape,
std::size_t start_match_axis);
/**
* @brief Generate a list of broadcast along axes.
*
* @details Broadcast "adds" elements along axes to the input tensor, replicating
* elements from the input tensor as needed to fill the new dimensions.
* Function calculate which of the output axes are added in this way.
*
* This function will attempt to match shapes, assuming the current shape
* matches the rightmost positions of the desired new shape. This behaviour
* is similar to NumPy's broadcasting.
*
* @param output_shape The new shape for the output tensor.
* @param input_shape The shape of input tensor.
*
* @return The indices of added axes.
*/
/// \brief Generate a list of broadcast along axes.
///
/// \details Broadcast "adds" elements along axes to the input tensor, replicating
/// elements from the input tensor as needed to fill the new dimensions.
/// Function calculate which of the output axes are added in this way.
///
/// This function will attempt to match shapes, assuming the current shape
/// matches the rightmost positions of the desired new shape. This behaviour
/// is similar to NumPy's broadcasting.
///
/// \param output_shape The new shape for the output tensor.
/// \param input_shape The shape of input tensor.
///
/// \return The indices of added axes.
inline AxisSet calculate_broadcast_axes(const Shape& output_shape, const Shape& input_shape)
{
return calculate_broadcast_axes(
......
......@@ -28,68 +28,57 @@ namespace ngraph
{
namespace convpool
{
/**
* @brief Get shape of kernel (filter) in pixels.
*
* @param node The Node ptr representing Conv or Pool operation.
* @return The kernel Shape object representing its dimensions (height, width, depth).
*/
/// \brief Get shape of kernel (filter) in pixels.
///
/// \param node The Node ptr representing Conv or Pool operation.
/// \return The kernel Shape object representing its dimensions (height, width, depth).
Shape get_kernel_shape(const Node& node);
/**
* @brief Get number of pixels to stride operation by in each direction.
*
* @param node The Node ptr representing Conv or Pool operation.
* @param kernel_shape The shape of the kernel which we retrieve strides for.
* @return The kernel Shape object representing its dimensions (height, width, depth).
*/
/// \brief Get number of pixels to stride operation by in each direction.
///
/// \param node The Node ptr representing Conv or Pool operation.
/// \param kernel_shape The shape of the kernel which we retrieve strides for.
/// \return The kernel Shape object representing its dimensions (height, width, depth).
Strides get_strides(const Node& node, const Shape& kernel_shape);
/**
* @brief Get number of pixels to stride operation by in each direction.
*
* @param node The Node ptr representing Conv or Pool operation.
* @return The kernel Shape object representing its dimensions (height, width, depth).
*/
/// \brief Get number of pixels to stride operation by in each direction.
///
/// \param node The Node ptr representing Conv or Pool operation.
/// \return The kernel Shape object representing its dimensions (height, width, depth).
Strides get_strides(const Node& node);
/**
* @brief Get number of pixels for filter dilation in each direction.
*
* @param node The Node ptr representing ONNX operation.
* @return The Strides object containing number of pixels for filter dilation
* (height, width, depth).
*/
/// \brief Get number of pixels for filter dilation in each direction.
///
/// \param node The Node ptr representing ONNX operation.
/// \return The Strides object containing number of pixels for filter dilation
/// (height, width, depth).
Strides get_dilations(const Node& node);
/**
* @brief Get padding values for the operation described by an ONNX node.
* @details If `auto_pad` attribute is specified as SAME_UPPER or SAME_LOWER, or VALID
* values are calculated. Otherwise values are taken from the `pads` attribute.
*
* `pads` value should follow [x1_begin, x2_begin..., x1_end, x2_end,...].
*
* @param node The Node ptr representing ONNX operation.
* @param kernel_shape The shape of the kernel which we retrieve pads for.
*
* @return A pair of (padding_above, padding_below), which elements contains number of
* pixels to pad in respective dimensions (height, width, depth).
*/
/// \brief Get padding values for the operation described by an ONNX node.
/// \details If `auto_pad` attribute is specified as SAME_UPPER or SAME_LOWER, or VALID
/// values are calculated. Otherwise values are taken from the `pads` attribute.
///
/// `pads` value should follow [x1_begin, x2_begin..., x1_end, x2_end,...].
///
/// \param node The Node ptr representing ONNX operation.
/// \param kernel_shape The shape of the kernel which we retrieve pads for.
///
/// \return A pair of (padding_above, padding_below), which elements contains number of
/// pixels to pad in respective dimensions (height, width, depth).
std::pair<CoordinateDiff, CoordinateDiff> get_pads(const Node& node,
const Shape& kernel_shape);
/**
* @brief Get padding values for the operation described by an ONNX node.
* @details If `auto_pad` attribute is specified as SAME_UPPER or SAME_LOWER, or VALID
* values are calculated. Otherwise values are taken from the `pads` attribute.
*
* `pads` value should follow [x1_begin, x2_begin..., x1_end, x2_end,...].
*
* @param node The Node ptr representing ONNX operation.
*
* @return A pair of (padding_above, padding_below), which elements contains number of
* pixels to pad in respective dimensions (height, width, depth).
*/
/// \brief Get padding values for the operation described by an ONNX node.
/// \details If `auto_pad` attribute is specified as SAME_UPPER or SAME_LOWER, or VALID
/// values are calculated. Otherwise values are taken from the `pads` attribute.
///
/// `pads` value should follow [x1_begin, x2_begin..., x1_end, x2_end,...].
///
/// \param node The Node ptr representing ONNX operation.
///
/// \return A pair of (padding_above, padding_below), which elements contains number of
/// pixels to pad in respective dimensions (height, width, depth).
inline std::pair<CoordinateDiff, CoordinateDiff> get_pads(const Node& node)
{
return get_pads(node, get_kernel_shape(node));
......
......@@ -22,24 +22,20 @@ namespace ngraph
{
namespace onnx_import
{
/**
* @brief Permute axes according to specified axes_order parameter.
*
* @param node The node which axes we want to permute.
* @param axes_order The permutation of node tensor axes.
*
* @return: New node with permuted axes.
*/
/// \brief Permute axes according to specified axes_order parameter.
///
/// \param node The node which axes we want to permute.
/// \param axes_order The permutation of node tensor axes.
///
/// \return: New node with permuted axes.
std::shared_ptr<ngraph::Node> reorder_axes(const std::shared_ptr<ngraph::Node>& node,
std::vector<int> axes_order);
/**
* @brief Return transposed tensor (with axes in reversed order).
*
* @param node Input tensor we want to transpose
*
* @return: New node with reversed dimensions.
*/
/// \brief Return transposed tensor (with axes in reversed order).
///
/// \param node Input tensor we want to transpose
///
/// \return: New node with reversed dimensions.
std::shared_ptr<ngraph::Node> transpose(const std::shared_ptr<ngraph::Node>& node);
} // namespace onnx_import
......
......@@ -24,23 +24,23 @@
#error("ngraph.hpp is for external use only")
#endif
/// @namespace ngraph
/// @brief The Intel Nervana Graph C++ API.
/// \namespace ngraph
/// \brief The Intel Nervana Graph C++ API.
/// @namespace ngraph::descriptor
/// @brief Descriptors are compile-time representations of objects that will appear at run-time.
/// \namespace ngraph::descriptor
/// \brief Descriptors are compile-time representations of objects that will appear at run-time.
/// @namespace ngraph::descriptor::layout
/// @brief Layout descriptors describe how tensor views are implemented.
/// \namespace ngraph::descriptor::layout
/// \brief Layout descriptors describe how tensor views are implemented.
/// @namespace ngraph::op
/// @brief Ops used in graph-building.
/// \namespace ngraph::op
/// \brief Ops used in graph-building.
/// @namespace ngraph::runtime
/// @brief The objects used for executing the graph.
/// \namespace ngraph::runtime
/// \brief The objects used for executing the graph.
/// @namespace ngraph::builder
/// @brief Convenience functions that create addional graph nodes to implement commonly-used
/// \namespace ngraph::builder
/// \brief Convenience functions that create addional graph nodes to implement commonly-used
/// recipes, for example auto-broadcast.
#include "ngraph/builder/autobroadcast.hpp"
......
......@@ -32,15 +32,15 @@ namespace ngraph
class ngraph::pass::CommonFunctionCollection : public ModulePass
{
public:
// @brief Create the CommonFunctionCollection pass
// @param function_emitter - This is a function that takes a reference to a Node and as string.
// The string is the name of the emitted function and the body of the function is
// the code for the op.
// @param result_map - This is a mapping of source node -> emitted static function node, where
/// \brief Create the CommonFunctionCollection pass
/// \param function_emitter - This is a function that takes a reference to a Node and as string.
/// The string is the name of the emitted function and the body of the function is
/// the code for the op.
/// \param result_map - This is a mapping of source node -> emitted static function node, where
/// the key is the source node and the value is the emitted static function node. The
// name of the function to call is create_function_name(<emitted static function node>)
// @param emitted_functions - string to contain the emitted code for all of the static
// functions.
/// name of the function to call is create_function_name(<emitted static function node>)
/// \param emitted_functions - string to contain the emitted code for all of the static
/// functions.
CommonFunctionCollection(std::function<std::string(Node&, std::string)> function_emitter,
std::unordered_map<Node*, Node*>& result_map,
std::string& emitted_functions);
......@@ -49,10 +49,10 @@ public:
bool run_on_module(std::vector<std::shared_ptr<ngraph::Function>>&) override;
// @brief Construct the name of the function to call for this op
// @param node - Node used to construct the function name. This node is the `value` of the
// result_map passed to the pass's constructor.
// @return string containing the name of the function to be called
/// \brief Construct the name of the function to call for this op
/// \param node - Node used to construct the function name. This node is the `value` of the
/// result_map passed to the pass's constructor.
/// \return string containing the name of the function to be called
static std::string create_function_name(const Node& node);
private:
......
......@@ -26,7 +26,7 @@ namespace ngraph
}
}
/// @brief Allocates a block of memory on the specified alignment. The actual size of the
/// \brief Allocates a block of memory on the specified alignment. The actual size of the
/// allocated memory is larger than the requested size by the alignment, so allocating 1 byte
/// on 64 byte alignment will allocate 65 bytes.
class ngraph::runtime::AlignedBuffer
......
......@@ -33,67 +33,67 @@ namespace ngraph
}
}
/// @brief Interface to a generic backend.
/// \brief Interface to a generic backend.
///
/// Backends are responsible for function execution and value allocation.
class ngraph::runtime::Backend
{
public:
virtual ~Backend();
/// @brief Create a new Backend object
/// @param type The name of a registered backend, such as "CPU" or "GPU".
/// \brief Create a new Backend object
/// \param type The name of a registered backend, such as "CPU" or "GPU".
/// To select a subdevice use "GPU:N" where s`N` is the subdevice number.
/// @returns shared_ptr to a new Backend or nullptr if the named backend
/// \returns shared_ptr to a new Backend or nullptr if the named backend
/// does not exist.
static std::shared_ptr<Backend> create(const std::string& type);
/// @brief Query the list of registered devices
/// @returns A vector of all registered devices.
/// \brief Query the list of registered devices
/// \returns A vector of all registered devices.
static std::vector<std::string> get_registered_devices();
/// @brief Create a tensor specific to this backend
/// @param element_type The type of the tensor element
/// @param shape The shape of the tensor
/// @returns shared_ptr to a new backend specific tensor
/// \brief Create a tensor specific to this backend
/// \param element_type The type of the tensor element
/// \param shape The shape of the tensor
/// \returns shared_ptr to a new backend specific tensor
virtual std::shared_ptr<ngraph::runtime::TensorView>
create_tensor(const ngraph::element::Type& element_type, const Shape& shape) = 0;
/// @brief Create a tensor specific to this backend
/// @param element_type The type of the tensor element
/// @param shape The shape of the tensor
/// @param memory_pointer A pointer to a buffer used for this tensor. The size of the buffer
/// \brief Create a tensor specific to this backend
/// \param element_type The type of the tensor element
/// \param shape The shape of the tensor
/// \param memory_pointer A pointer to a buffer used for this tensor. The size of the buffer
/// must be sufficient to contain the tensor. The lifetime of the buffer is the
/// responsibility of the caller.
/// @returns shared_ptr to a new backend specific tensor
/// \returns shared_ptr to a new backend specific tensor
virtual std::shared_ptr<ngraph::runtime::TensorView> create_tensor(
const ngraph::element::Type& element_type, const Shape& shape, void* memory_pointer) = 0;
/// @brief Create a tensor of C type T specific to this backend
/// @param shape The shape of the tensor
/// @returns shared_ptr to a new backend specific tensor
/// \brief Create a tensor of C type T specific to this backend
/// \param shape The shape of the tensor
/// \returns shared_ptr to a new backend specific tensor
template <typename T>
std::shared_ptr<ngraph::runtime::TensorView> create_tensor(const Shape& shape)
{
return create_tensor(element::from<T>(), shape);
}
/// @brief Compiles a Function.
/// @param func The function to compile
/// @returns true if compile is successful, false otherwise
/// \brief Compiles a Function.
/// \param func The function to compile
/// \returns true if compile is successful, false otherwise
virtual bool compile(std::shared_ptr<Function> func) = 0;
/// @brief Executes a single iteration of a Function. If func is not compiled the call will
/// \brief Executes a single iteration of a Function. If func is not compiled the call will
/// compile it.
/// @param func The function to execute
/// @returns true if iteration is successful, false otherwise
/// \param func The function to execute
/// \returns true if iteration is successful, false otherwise
virtual bool call(std::shared_ptr<Function> func,
const std::vector<std::shared_ptr<runtime::TensorView>>& outputs,
const std::vector<std::shared_ptr<runtime::TensorView>>& inputs) = 0;
/// @brief Executes a single iteration of a Function. If func is not compiled the call will
/// \brief Executes a single iteration of a Function. If func is not compiled the call will
/// compile it. Optionally validates the inputs and outputs against the function graph.
/// @param func The function to execute
/// @returns true if iteration is successful, false otherwise
/// \param func The function to execute
/// \returns true if iteration is successful, false otherwise
bool call_with_validate(std::shared_ptr<Function> func,
const std::vector<std::shared_ptr<runtime::TensorView>>& outputs,
const std::vector<std::shared_ptr<runtime::TensorView>>& inputs)
......@@ -102,19 +102,19 @@ public:
return call(func, outputs, inputs);
}
/// @brief Compiled functions may be cached. This function removes a compiled function
/// \brief Compiled functions may be cached. This function removes a compiled function
/// from the cache.
/// @param func The function to execute
/// \param func The function to execute
virtual void remove_compiled_function(std::shared_ptr<Function> func);
/// @brief Enable the collection of per op performance information on a specified Function.
/// \brief Enable the collection of per op performance information on a specified Function.
/// Data is collection via the `get_performance_data` method.
/// @param func The function to collect perfomance data on.
/// @param enable Set to true to enable or false to disable data collection
/// \param func The function to collect perfomance data on.
/// \param enable Set to true to enable or false to disable data collection
virtual void enable_performance_data(std::shared_ptr<Function> func, bool enable) {}
/// @brief Collect performance information gathered on a Function.
/// @param func The function to get collected data.
/// @returns Vector of PerformanceCounter information.
/// \brief Collect performance information gathered on a Function.
/// \param func The function to get collected data.
/// \returns Vector of PerformanceCounter information.
virtual std::vector<PerformanceCounter>
get_performance_data(std::shared_ptr<Function> func) const;
......
......@@ -46,15 +46,15 @@ class ngraph::runtime::BackendManager
friend class Backend;
public:
/// @brief Used by build-in backends to register their name and constructor.
/// \brief Used by build-in backends to register their name and constructor.
/// This function is not used if the backend is build as a shared library.
/// @param name The name of the registering backend in UPPER CASE.
/// @param backend_constructor A function of type new_backend_t which will be called to
/// \param name The name of the registering backend in UPPER CASE.
/// \param backend_constructor A function of type new_backend_t which will be called to
//// construct an instance of the registered backend.
static void register_backend(const std::string& name, new_backend_t backend_constructor);
/// @brief Query the list of registered devices
/// @returns A vector of all registered devices.
/// \brief Query the list of registered devices
/// \returns A vector of all registered devices.
static std::vector<std::string> get_registered_backends();
private:
......
......@@ -46,7 +46,7 @@ namespace ngraph
EntryPoint compiled_function);
~CPU_CallFrame();
/// @brief Invoke the function with values matching the signature of the function.
/// \brief Invoke the function with values matching the signature of the function.
///
/// Tuples will be expanded into their tensor views to build the call frame.
void call(const std::vector<std::shared_ptr<runtime::TensorView>>& outputs,
......
......@@ -54,7 +54,7 @@ namespace ngraph
namespace fmt
{
/// @brief vector format for Eigen wrappers.
/// \brief vector format for Eigen wrappers.
class V
{
public:
......
......@@ -48,16 +48,16 @@ namespace ngraph
size_t get_size() const;
const element::Type& get_element_type() const;
/// @brief Write bytes directly into the tensor
/// @param p Pointer to source of data
/// @param tensor_offset Offset into tensor storage to begin writing. Must be element-aligned.
/// @param n Number of bytes to write, must be integral number of elements.
/// \brief Write bytes directly into the tensor
/// \param p Pointer to source of data
/// \param tensor_offset Offset into tensor storage to begin writing. Must be element-aligned.
/// \param n Number of bytes to write, must be integral number of elements.
void write(const void* p, size_t tensor_offset, size_t n) override;
/// @brief Read bytes directly from the tensor
/// @param p Pointer to destination for data
/// @param tensor_offset Offset into tensor storage to begin reading. Must be element-aligned.
/// @param n Number of bytes to read, must be integral number of elements.
/// \brief Read bytes directly from the tensor
/// \param p Pointer to destination for data
/// \param tensor_offset Offset into tensor storage to begin reading. Must be element-aligned.
/// \param n Number of bytes to read, must be integral number of elements.
void read(void* p, size_t tensor_offset, size_t n) const override;
static constexpr int BufferAlignment = NGRAPH_CPU_ALIGNMENT;
......
......@@ -49,7 +49,7 @@ namespace ngraph
~GPU_CallFrame();
/// @brief Invoke the function with values matching the signature of the function.
/// \brief Invoke the function with values matching the signature of the function.
///
/// Tuples will be expanded into their tensor views to build the call frame.
void call(const std::vector<std::shared_ptr<runtime::TensorView>>& outputs,
......
......@@ -42,16 +42,16 @@ public:
void* memory_pointer);
virtual ~GPU_TensorView();
/// @brief Write bytes directly into the tensor
/// @param p Pointer to source of data
/// @param tensor_offset Offset into tensor storage to begin writing. Must be element-aligned.
/// @param n Number of bytes to write, must be integral number of elements.
/// \brief Write bytes directly into the tensor
/// \param p Pointer to source of data
/// \param tensor_offset Offset into tensor storage to begin writing. Must be element-aligned.
/// \param n Number of bytes to write, must be integral number of elements.
void write(const void* p, size_t tensor_offset, size_t n) override;
/// @brief Read bytes directly from the tensor
/// @param p Pointer to destination for data
/// @param tensor_offset Offset into tensor storage to begin reading. Must be element-aligned.
/// @param n Number of bytes to read, must be integral number of elements.
/// \brief Read bytes directly from the tensor
/// \param p Pointer to destination for data
/// \param tensor_offset Offset into tensor storage to begin reading. Must be element-aligned.
/// \param n Number of bytes to read, must be integral number of elements.
void read(void* p, size_t tensor_offset, size_t n) const override;
void* m_allocated_buffer_pool = nullptr;
......
......@@ -61,16 +61,16 @@ public:
size_t get_size() const;
const element::Type& get_element_type() const;
/// @brief Write bytes directly into the tensor
/// @param p Pointer to source of data
/// @param tensor_offset Offset into tensor storage to begin writing. Must be element-aligned.
/// @param n Number of bytes to write, must be integral number of elements.
/// \brief Write bytes directly into the tensor
/// \param p Pointer to source of data
/// \param tensor_offset Offset into tensor storage to begin writing. Must be element-aligned.
/// \param n Number of bytes to write, must be integral number of elements.
void write(const void* p, size_t tensor_offset, size_t n) override;
/// @brief Read bytes directly from the tensor
/// @param p Pointer to destination for data
/// @param tensor_offset Offset into tensor storage to begin reading. Must be element-aligned.
/// @param n Number of bytes to read, must be integral number of elements.
/// \brief Read bytes directly from the tensor
/// \param p Pointer to destination for data
/// \param tensor_offset Offset into tensor storage to begin reading. Must be element-aligned.
/// \param n Number of bytes to read, must be integral number of elements.
void read(void* p, size_t tensor_offset, size_t n) const override;
private:
......
......@@ -40,16 +40,16 @@ public:
const cldnn::engine& backend_engine,
void* memory_pointer = nullptr);
/// @brief Write bytes directly into the tensor
/// @param p Pointer to source of data
/// @param tensor_offset Offset into tensor storage to begin writing. Must be element-aligned.
/// @param n Number of bytes to write, must be integral number of elements.
/// \brief Write bytes directly into the tensor
/// \param p Pointer to source of data
/// \param tensor_offset Offset into tensor storage to begin writing. Must be element-aligned.
/// \param n Number of bytes to write, must be integral number of elements.
void write(const void* p, size_t tensor_offset, size_t n) override;
/// @brief Read bytes directly from the tensor
/// @param p Pointer to destination for data
/// @param tensor_offset Offset into tensor storage to begin reading. Must be element-aligned.
/// @param n Number of bytes to read, must be integral number of elements.
/// \brief Read bytes directly from the tensor
/// \param p Pointer to destination for data
/// \param tensor_offset Offset into tensor storage to begin reading. Must be element-aligned.
/// \param n Number of bytes to read, must be integral number of elements.
void read(void* p, size_t tensor_offset, size_t n) const override;
cldnn::memory* get_data_ptr() { return ocl_memory.get(); }
......
......@@ -61,16 +61,16 @@ namespace ngraph
bool get_stale() { return m_stale; }
void set_stale(bool val) { m_stale = val; }
/// @brief Write bytes directly into the tensor
/// @param p Pointer to source of data
/// @param tensor_offset Offset into tensor storage to begin writing. Must be element-aligned.
/// @param n Number of bytes to write, must be integral number of elements.
/// \brief Write bytes directly into the tensor
/// \param p Pointer to source of data
/// \param tensor_offset Offset into tensor storage to begin writing. Must be element-aligned.
/// \param n Number of bytes to write, must be integral number of elements.
virtual void write(const void* p, size_t tensor_offset, size_t n) = 0;
/// @brief Read bytes directly from the tensor
/// @param p Pointer to destination for data
/// @param tensor_offset Offset into tensor storage to begin reading. Must be element-aligned.
/// @param n Number of bytes to read, must be integral number of elements.
/// \brief Read bytes directly from the tensor
/// \param p Pointer to destination for data
/// \param tensor_offset Offset into tensor storage to begin reading. Must be element-aligned.
/// \param n Number of bytes to read, must be integral number of elements.
virtual void read(void* p, size_t tensor_offset, size_t n) const = 0;
protected:
......
......@@ -5,7 +5,7 @@
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
* http:///www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
......@@ -23,36 +23,36 @@
namespace ngraph
{
// @brief Serialize a Function to a json string
// @param func The Function to serialize
// @param indent If 0 then there is no formatting applied and the resulting string is the
// most compact representation. If non-zero then the json string is formatted with the
// indent level specified.
/// \brief Serialize a Function to a json string
/// \param func The Function to serialize
/// \param indent If 0 then there is no formatting applied and the resulting string is the
/// most compact representation. If non-zero then the json string is formatted with the
/// indent level specified.
std::string serialize(std::shared_ptr<ngraph::Function> func, size_t indent = 0);
// @brief Serialize a Function to as a json file
// @param path The path to the output file
// @param func The Function to serialize
// @param indent If 0 then there is no formatting applied and the resulting string is the
// most compact representation. If non-zero then the json string is formatted with the
// indent level specified.
/// \brief Serialize a Function to as a json file
/// \param path The path to the output file
/// \param func The Function to serialize
/// \param indent If 0 then there is no formatting applied and the resulting string is the
/// most compact representation. If non-zero then the json string is formatted with the
/// indent level specified.
void serialize(const std::string& path,
std::shared_ptr<ngraph::Function> func,
size_t indent = 0);
// @brief Serialize a Function to a CPIO file with all constant data stored as binary
// @param out The output stream to which the data is serialized.
// @param func The Function to serialize
// @param indent If 0 then there is no formatting applied and the json is the
// most compact representation. If non-zero then the json is formatted with the
// indent level specified.
/// \brief Serialize a Function to a CPIO file with all constant data stored as binary
/// \param out The output stream to which the data is serialized.
/// \param func The Function to serialize
/// \param indent If 0 then there is no formatting applied and the json is the
/// most compact representation. If non-zero then the json is formatted with the
/// indent level specified.
void serialize(std::ostream& out, std::shared_ptr<ngraph::Function> func, size_t indent = 0);
// @brief Deserialize a Function
// @param in An isteam to the input data
/// \brief Deserialize a Function
/// \param in An isteam to the input data
std::shared_ptr<ngraph::Function> deserialize(std::istream& in);
// @brief Deserialize a Function
// @param str The json formatted string to deseriailze.
/// \brief Deserialize a Function
/// \param str The json formatted string to deseriailze.
std::shared_ptr<ngraph::Function> deserialize(const std::string& str);
}
......@@ -27,12 +27,12 @@ namespace ngraph
{
namespace test
{
/// @brief Same as numpy.allclose
/// @param a First tensor to compare
/// @param b Second tensor to compare
/// @param rtol Relative tolerance
/// @param atol Absolute tolerance
/// @returns true if shapes match and for all elements, |a_i-b_i| <= atol + rtol*|b_i|.
/// \brief Same as numpy.allclose
/// \param a First tensor to compare
/// \param b Second tensor to compare
/// \param rtol Relative tolerance
/// \param atol Absolute tolerance
/// \returns true if shapes match and for all elements, |a_i-b_i| <= atol + rtol*|b_i|.
template <typename T>
bool all_close(const std::vector<T>& a,
const std::vector<T>& b,
......@@ -52,11 +52,11 @@ namespace ngraph
return rc;
}
/// @brief Same as numpy.allclose
/// @param a First tensor to compare
/// @param b Second tensor to compare
/// @param rtol Relative tolerance
/// @param atol Absolute tolerance
/// \brief Same as numpy.allclose
/// \param a First tensor to compare
/// \param b Second tensor to compare
/// \param rtol Relative tolerance
/// \param atol Absolute tolerance
/// Returns true if shapes match and for all elements, |a_i-b_i| <= atol + rtol*|b_i|.
template <typename T>
bool all_close(const std::shared_ptr<ngraph::runtime::TensorView>& a,
......@@ -78,11 +78,11 @@ namespace ngraph
return all_close(read_vector<T>(a), read_vector<T>(b), rtol, atol);
}
/// @brief Same as numpy.allclose
/// @param as First tensors to compare
/// @param bs Second tensors to compare
/// @param rtol Relative tolerance
/// @param atol Absolute tolerance
/// \brief Same as numpy.allclose
/// \param as First tensors to compare
/// \param bs Second tensors to compare
/// \param rtol Relative tolerance
/// \param atol Absolute tolerance
/// Returns true if shapes match and for all elements, |a_i-b_i| <= atol + rtol*|b_i|.
template <typename T>
bool all_close(const std::vector<std::shared_ptr<ngraph::runtime::TensorView>>& as,
......
......@@ -25,12 +25,12 @@ namespace ngraph
{
namespace test
{
/// @brief Check if the two f32 numbers are close
/// @param a First number to compare
/// @param b Second number to compare
/// @param mantissa_bits The mantissa width of the underlying number before casting to float
/// @param tolerance_bits Bit tolerance error
/// @returns True iff the distance between a and b is within 2 ^ tolerance_bits ULP
/// \brief Check if the two f32 numbers are close
/// \param a First number to compare
/// \param b Second number to compare
/// \param mantissa_bits The mantissa width of the underlying number before casting to float
/// \param tolerance_bits Bit tolerance error
/// \returns True iff the distance between a and b is within 2 ^ tolerance_bits ULP
///
/// References:
/// - https://en.wikipedia.org/wiki/Unit_in_the_last_place
......@@ -48,33 +48,33 @@ namespace ngraph
/// bfloat and f32.
bool close_f(float a, float b, int mantissa_bits = 8, int tolerance_bits = 2);
/// @brief Check if the two floating point vectors are all close
/// @param a First number to compare
/// @param b Second number to compare
/// @param mantissa_bits The mantissa width of the underlying number before casting to float
/// @param tolerance_bits Bit tolerance error
/// @returns true iff the two floating point vectors are close
/// \brief Check if the two floating point vectors are all close
/// \param a First number to compare
/// \param b Second number to compare
/// \param mantissa_bits The mantissa width of the underlying number before casting to float
/// \param tolerance_bits Bit tolerance error
/// \returns true iff the two floating point vectors are close
bool all_close_f(const std::vector<float>& a,
const std::vector<float>& b,
int mantissa_bits = 8,
int tolerance_bits = 2);
/// @brief Check if the two TensorViews are all close in float
/// @param a First TensorView to compare
/// @param b Second TensorView to compare
/// @param mantissa_bits The mantissa width of the underlying number before casting to float
/// @param tolerance_bits Bit tolerance error
/// \brief Check if the two TensorViews are all close in float
/// \param a First TensorView to compare
/// \param b Second TensorView to compare
/// \param mantissa_bits The mantissa width of the underlying number before casting to float
/// \param tolerance_bits Bit tolerance error
/// Returns true iff the two TensorViews are all close in float
bool all_close_f(const std::shared_ptr<runtime::TensorView>& a,
const std::shared_ptr<runtime::TensorView>& b,
int mantissa_bits = 8,
int tolerance_bits = 2);
/// @brief Check if the two vectors of TensorViews are all close in float
/// @param as First vector of TensorView to compare
/// @param bs Second vector of TensorView to compare
/// @param mantissa_bits The mantissa width of the underlying number before casting to float
/// @param tolerance_bits Bit tolerance error
/// \brief Check if the two vectors of TensorViews are all close in float
/// \param as First vector of TensorView to compare
/// \param bs Second vector of TensorView to compare
/// \param mantissa_bits The mantissa width of the underlying number before casting to float
/// \param tolerance_bits Bit tolerance error
/// Returns true iff the two TensorViews are all close in float
bool all_close_f(const std::vector<std::shared_ptr<runtime::TensorView>>& as,
const std::vector<std::shared_ptr<runtime::TensorView>>& bs,
......
......@@ -25,9 +25,9 @@ namespace ngraph
namespace autodiff
{
/// @brief Returns a FunctionSpec for the backprop derivative of its argument.
/// @param f is f(X_i...)
/// @returns f'(X_i..., c) where f'(x_i, ..., c)_j is backprop for X_j
/// \brief Returns a FunctionSpec for the backprop derivative of its argument.
/// \param f is f(X_i...)
/// \returns f'(X_i..., c) where f'(x_i, ..., c)_j is backprop for X_j
std::shared_ptr<Function> backprop_function(const std::shared_ptr<Function>& f);
}
}
......@@ -26,12 +26,12 @@ namespace ngraph
{
namespace autodiff
{
/// @brief numeric approximation of the derivative
/// @param f A function
/// @param args Values for the arguments (the independent variables)
/// @param delta increment for the variables
/// @param indep_params parameters with respect to which to compute derivatives
/// @returns vector of dy/dvar, where each dy/dvar's shape is concat(y.shape(), var.shape())
/// \brief numeric approximation of the derivative
/// \param f A function
/// \param args Values for the arguments (the independent variables)
/// \param delta increment for the variables
/// \param indep_params parameters with respect to which to compute derivatives
/// \returns vector of dy/dvar, where each dy/dvar's shape is concat(y.shape(), var.shape())
template <typename T>
std::vector<std::shared_ptr<runtime::TensorView>>
numeric_derivative(const std::shared_ptr<runtime::Backend>& backend,
......
......@@ -26,7 +26,7 @@ namespace ngraph
{
namespace test
{
/// @brief A predictable pseudo-random number generator
/// \brief A predictable pseudo-random number generator
/// The seed is initialized so that we get repeatable pseudo-random numbers for tests
template <typename T>
class Uniform
......@@ -39,8 +39,8 @@ namespace ngraph
{
}
/// @brief Randomly initialize a tensor
/// @param ptv The tensor to initialize
/// \brief Randomly initialize a tensor
/// \param ptv The tensor to initialize
const std::shared_ptr<runtime::TensorView>
initialize(const std::shared_ptr<runtime::TensorView>& ptv)
{
......@@ -49,8 +49,8 @@ namespace ngraph
write_vector(ptv, vec);
return ptv;
}
/// @brief Randomly initialize a vector
/// @param vec The tensor to initialize
/// \brief Randomly initialize a vector
/// \param vec The tensor to initialize
void initialize(std::vector<T>& vec)
{
for (T& elt : vec)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment