Commit 8c16125d authored by Scott Cyphers's avatar Scott Cyphers

Merge branch 'master' into cyphers/view

parents 0064cfd0 8d57ce68
......@@ -44,3 +44,10 @@ SpacesInSquareBrackets: false
SortIncludes: false
ReflowComments: true
IncludeCategories:
- Regex: '^".*'
Priority: 3
- Regex: '^<.*'
Priority: 2
SortIncludes: true
......@@ -27,7 +27,10 @@ const ngraph::ElementType element_type_uint64_t = ngraph::ElementType(64, false,
std::map<std::string, ngraph::ElementType> ngraph::ElementType::m_element_list;
ngraph::ElementType::ElementType(size_t bitwidth, bool is_float, bool is_signed, const std::string& cname)
ngraph::ElementType::ElementType(size_t bitwidth,
bool is_float,
bool is_signed,
const std::string& cname)
: m_bitwidth{bitwidth}
, m_is_float{is_float}
, m_is_signed{is_signed}
......
......@@ -18,8 +18,8 @@
#pragma once
#include <string>
#include <map>
#include <string>
namespace ngraph
{
......
......@@ -14,12 +14,12 @@
*/
#include <chrono>
#include <condition_variable>
#include <ctime>
#include <iomanip>
#include <iostream>
#include <ctime>
#include <thread>
#include <mutex>
#include <condition_variable>
#include <thread>
#include "log.hpp"
......
......@@ -15,9 +15,9 @@
#pragma once
#include <deque>
#include <sstream>
#include <stdexcept>
#include <deque>
namespace nervana
{
......
......@@ -14,40 +14,39 @@
#pragma once
#include <string>
#include <map>
#include <string>
namespace ngraph
{
//================================================================================================
// NameableValue
// An Axis labels a dimension of a tensor. The op-graph uses
// the identity of Axis objects to pair and specify dimensions in
// symbolic expressions. This system has several advantages over
// using the length and position of the axis as in other frameworks:
//
// 1) Convenience. The dimensions of tensors, which may be nested
// deep in a computation graph, can be specified without having to
// calculate their lengths.
//
// 2) Safety. Axis labels are analogous to types in general-purpose
// programming languages, allowing objects to interact only when
// they are permitted to do so in advance. In symbolic computation,
// this prevents interference between axes that happen to have the
// same lengths but are logically distinct, e.g. if the number of
// training examples and the number of input features are both 50.
//
// TODO: Please add to the list...
//
// Arguments:
// length: The length of the axis.
// batch: Whether the axis is a batch axis.
// recurrent: Whether the axis is a recurrent axis.
//================================================================================================
class NameableValue
{
public:
//================================================================================================
// NameableValue
// An Axis labels a dimension of a tensor. The op-graph uses
// the identity of Axis objects to pair and specify dimensions in
// symbolic expressions. This system has several advantages over
// using the length and position of the axis as in other frameworks:
//
// 1) Convenience. The dimensions of tensors, which may be nested
// deep in a computation graph, can be specified without having to
// calculate their lengths.
//
// 2) Safety. Axis labels are analogous to types in general-purpose
// programming languages, allowing objects to interact only when
// they are permitted to do so in advance. In symbolic computation,
// this prevents interference between axes that happen to have the
// same lengths but are logically distinct, e.g. if the number of
// training examples and the number of input features are both 50.
//
// TODO: Please add to the list...
//
// Arguments:
// length: The length of the axis.
// batch: Whether the axis is a batch axis.
// recurrent: Whether the axis is a recurrent axis.
//================================================================================================
class NameableValue
{
public:
//!-----------------------------------------------------------------------------------
//! NameableValue
//! An object that can be named.
......@@ -103,7 +102,6 @@ public:
std::string m_graph_label;
std::string m_short_name;
std::string m_doc_string;
};
};
} // end namespace ngraph
#include <iostream>
#include <algorithm>
#include <iostream>
#include "strides.hpp"
#include "util.hpp"
......
#pragma once
#include <cstdio>
#include <vector>
#include <initializer_list>
#include <vector>
#include "element_type.hpp"
#include "tree.hpp"
......@@ -27,7 +27,6 @@ public:
ElementType et = element_type_float);
const ElementType& get_type() const { return m_element_type; }
tensor_stride full_strides() const;
tensor_stride strides() const;
tensor_size sizes() const;
......@@ -53,7 +52,6 @@ class ngraph::tensor_stride
public:
tensor_stride();
const ElementType& get_type() const { return m_element_type; }
tensor_stride full_strides() const;
tensor_stride strides() const;
......
......@@ -12,10 +12,10 @@
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#include <cassert>
#include <cmath>
#include <iostream>
#include <sstream>
#include <cmath>
#include <cassert>
#include "axes.hpp"
#include "util.hpp"
......
......@@ -14,130 +14,130 @@
#pragma once
#include <vector>
#include <string>
#include <memory>
#include <limits>
#include <initializer_list>
#include <limits>
#include <memory>
#include <set>
#include <string>
#include <vector>
#include "uuid.hpp"
#include "element_type.hpp"
#include "names.hpp"
#include "util.hpp"
#include "strides.hpp"
#include "util.hpp"
#include "uuid.hpp"
#include "uuid.hpp"
namespace ngraph
{
class Axes;
class Axis;
class FlattenedAxis;
class TensorDescription;
class Op;
class Axes;
class Axis;
class FlattenedAxis;
class TensorDescription;
class Op;
using op_ptr = std::shared_ptr<Op>;
using tensor_description_ptr = std::shared_ptr<TensorDescription>;
using axes_key_t = size_t;
using op_ptr = std::shared_ptr<Op>;
using tensor_description_ptr = std::shared_ptr<TensorDescription>;
using axes_key_t = size_t;
class slice
{
public:
class slice
{
public:
slice(int64_t start = -1, int64_t stop = -1, int64_t step = 1);
size_t sliced_length(size_t length) const;
private:
private:
size_t m_start;
size_t m_stop;
int64_t m_step;
};
//-----------------------------------------------------------------------------------------------
// default_dtype
//-----------------------------------------------------------------------------------------------
// def default_dtype(dtype=None):
// if dtype is None:
// dtype = np.dtype(np.float32)
// elif not isinstance(dtype, Flex) and not isinstance(dtype, np.dtype):
// try:
// dtype = np.dtype(dtype)
// except TypeError:
// raise TypeError("Could not cast {} to np.dtype".format(dtype))
// return dtype
//-----------------------------------------------------------------------------------------------
// default_int_dtype
//-----------------------------------------------------------------------------------------------
// def default_int_dtype(dtype=None):
// if dtype is None:
// dtype = np.dtype(np.int32)
// elif not isinstance(dtype, Flex) and not isinstance(dtype, np.dtype):
// try:
// dtype = np.dtype(dtype)
// except TypeError:
// raise TypeError("Could not cast {} to np.dtype".format(dtype))
// return dtype
//================================================================================================
// make_axis
// Returns a new Axis.
//
// Args:
// length (int, optional): Length of the axis.
// name (String, optional): Name of the axis.
// batch (bool, optional): This is a batch axis. Defaults to False.
// recurrent (bool, optional): This is a recurrent axis. Defaults to False.
// docstring (String, optional): A docstring for the axis.
//
// Returns:
// Axis: A new Axis.
//================================================================================================
Axis make_axis(size_t length,
};
//-----------------------------------------------------------------------------------------------
// default_dtype
//-----------------------------------------------------------------------------------------------
// def default_dtype(dtype=None):
// if dtype is None:
// dtype = np.dtype(np.float32)
// elif not isinstance(dtype, Flex) and not isinstance(dtype, np.dtype):
// try:
// dtype = np.dtype(dtype)
// except TypeError:
// raise TypeError("Could not cast {} to np.dtype".format(dtype))
// return dtype
//-----------------------------------------------------------------------------------------------
// default_int_dtype
//-----------------------------------------------------------------------------------------------
// def default_int_dtype(dtype=None):
// if dtype is None:
// dtype = np.dtype(np.int32)
// elif not isinstance(dtype, Flex) and not isinstance(dtype, np.dtype):
// try:
// dtype = np.dtype(dtype)
// except TypeError:
// raise TypeError("Could not cast {} to np.dtype".format(dtype))
// return dtype
//================================================================================================
// make_axis
// Returns a new Axis.
//
// Args:
// length (int, optional): Length of the axis.
// name (String, optional): Name of the axis.
// batch (bool, optional): This is a batch axis. Defaults to False.
// recurrent (bool, optional): This is a recurrent axis. Defaults to False.
// docstring (String, optional): A docstring for the axis.
//
// Returns:
// Axis: A new Axis.
//================================================================================================
Axis make_axis(size_t length,
const std::string& name = "",
bool batch = false,
bool recurrent = false);
//================================================================================================
// make_axes
// Makes an Axes object.
//
// Args:
// axes: A list of Axis.
//
// Returns:
// Axes: An Axes.
//================================================================================================
Axes make_axes(const std::vector<Axis>&);
//================================================================================================
// Axis
// An Axis labels a dimension of a tensor. The op-graph uses
// the identity of Axis objects to pair and specify dimensions in
// symbolic expressions. This system has several advantages over
// using the length and position of the axis as in other frameworks:
//
// 1) Convenience. The dimensions of tensors, which may be nested
// deep in a computation graph, can be specified without having to
// calculate their lengths.
//
// 2) Safety. Axis labels are analogous to types in general-purpose
// programming languages, allowing objects to interact only when
// they are permitted to do so in advance. In symbolic computation,
// this prevents interference between axes that happen to have the
// same lengths but are logically distinct, e.g. if the number of
// training examples and the number of input features are both 50.
//
// TODO: Please add to the list...
//
// Arguments:
// length: The length of the axis.
// batch: Whether the axis is a batch axis.
// recurrent: Whether the axis is a recurrent axis.
//================================================================================================
class Axis
{
public:
//================================================================================================
// make_axes
// Makes an Axes object.
//
// Args:
// axes: A list of Axis.
//
// Returns:
// Axes: An Axes.
//================================================================================================
Axes make_axes(const std::vector<Axis>&);
//================================================================================================
// Axis
// An Axis labels a dimension of a tensor. The op-graph uses
// the identity of Axis objects to pair and specify dimensions in
// symbolic expressions. This system has several advantages over
// using the length and position of the axis as in other frameworks:
//
// 1) Convenience. The dimensions of tensors, which may be nested
// deep in a computation graph, can be specified without having to
// calculate their lengths.
//
// 2) Safety. Axis labels are analogous to types in general-purpose
// programming languages, allowing objects to interact only when
// they are permitted to do so in advance. In symbolic computation,
// this prevents interference between axes that happen to have the
// same lengths but are logically distinct, e.g. if the number of
// training examples and the number of input features are both 50.
//
// TODO: Please add to the list...
//
// Arguments:
// length: The length of the axis.
// batch: Whether the axis is a batch axis.
// recurrent: Whether the axis is a recurrent axis.
//================================================================================================
class Axis
{
public:
Axis& operator+(const Axis& rhs);
Axis& operator-(const Axis& rhs);
......@@ -145,7 +145,6 @@ public:
Axis(size_t length, const std::string& new_name);
virtual ~Axis() {}
void named(const std::string& new_name);
//!-----------------------------------------------------------------------------------
......@@ -232,99 +231,99 @@ public:
uuid_type uuid;
size_t __length;
static size_t __name_counter;
};
//-----------------------------------------------------------------------------------------------
// _sliced_length
//-----------------------------------------------------------------------------------------------
// def _sliced_length(s, incoming_length):
// start, stop, step = s.indices(incoming_length)
// # max with 0 so we dont ever return a negative length. This
// # matches how python handles it internally. Raising an exception
// # might also be reasonable.
// if step == 1:
// return max(stop - start, 0)
// elif step == -1:
// return max(start - stop, 0)
// else:
// _validate_slice(s)
//-----------------------------------------------------------------------------------------------
// _validate_slice
//-----------------------------------------------------------------------------------------------
// def _validate_slice(s):
// if s.step not in (-1, 1, None):
// raise ValueError((
// 'SlicedAxis cant currently handle a step size other '
// 'than -1, 1 or None. Was given {step} in slice {slice}'
// ).format(
// step=s.step,
// slice=s,
// ))
//-----------------------------------------------------------------------------------------------
// slice_axis
// Slice an axis, return complete new axis
// TODO: deprecate this after the axis refactoring
//
// Arguments:
// axis: the axis to be sliced
// s: slice
//
// Returns:
// Axis instance, the new sliced axis
//-----------------------------------------------------------------------------------------------
// def slice_axis(axis, s):
Axis slice_axis(const Axis& axis, const slice& s);
//-----------------------------------------------------------------------------------------------
// duplicates
// Returns a list of Axis objects which have duplicate names in arr
//
// Arguments:
// arr: The iterable of Axis objects to check for duplicates in.
//
// Returns:
// list of Axis: duplicate Axis found in arr
//-----------------------------------------------------------------------------------------------
std::vector<std::string> duplicates(const std::vector<Axis>& ax);
//-----------------------------------------------------------------------------------------------
// with_args_as_axes
// A decorator to cast arguments to axes.
//
// Arguments:
// f: The function to be decorated.
//
// Returns:
// The decorated function.
//-----------------------------------------------------------------------------------------------
// def with_args_as_axes(f):
// @wraps(f)
// def wrapper(*args):
// """
// The decorated function. Performs the conversion
// to Axes.
// Arguments:
// *args: Arguments intended for the original function.
// Returns:
// Return value of the original function.
// """
// args = [Axes(arg) for arg in args]
// return f(*args)
// return wrapper
//================================================================================================
// Axes
// An Axes is a tuple of Axis objects used as a label for a tensor's
// dimensions.
//================================================================================================
class Axes
{
public:
};
//-----------------------------------------------------------------------------------------------
// _sliced_length
//-----------------------------------------------------------------------------------------------
// def _sliced_length(s, incoming_length):
// start, stop, step = s.indices(incoming_length)
// # max with 0 so we dont ever return a negative length. This
// # matches how python handles it internally. Raising an exception
// # might also be reasonable.
// if step == 1:
// return max(stop - start, 0)
// elif step == -1:
// return max(start - stop, 0)
// else:
// _validate_slice(s)
//-----------------------------------------------------------------------------------------------
// _validate_slice
//-----------------------------------------------------------------------------------------------
// def _validate_slice(s):
// if s.step not in (-1, 1, None):
// raise ValueError((
// 'SlicedAxis cant currently handle a step size other '
// 'than -1, 1 or None. Was given {step} in slice {slice}'
// ).format(
// step=s.step,
// slice=s,
// ))
//-----------------------------------------------------------------------------------------------
// slice_axis
// Slice an axis, return complete new axis
// TODO: deprecate this after the axis refactoring
//
// Arguments:
// axis: the axis to be sliced
// s: slice
//
// Returns:
// Axis instance, the new sliced axis
//-----------------------------------------------------------------------------------------------
// def slice_axis(axis, s):
Axis slice_axis(const Axis& axis, const slice& s);
//-----------------------------------------------------------------------------------------------
// duplicates
// Returns a list of Axis objects which have duplicate names in arr
//
// Arguments:
// arr: The iterable of Axis objects to check for duplicates in.
//
// Returns:
// list of Axis: duplicate Axis found in arr
//-----------------------------------------------------------------------------------------------
std::vector<std::string> duplicates(const std::vector<Axis>& ax);
//-----------------------------------------------------------------------------------------------
// with_args_as_axes
// A decorator to cast arguments to axes.
//
// Arguments:
// f: The function to be decorated.
//
// Returns:
// The decorated function.
//-----------------------------------------------------------------------------------------------
// def with_args_as_axes(f):
// @wraps(f)
// def wrapper(*args):
// """
// The decorated function. Performs the conversion
// to Axes.
// Arguments:
// *args: Arguments intended for the original function.
// Returns:
// Return value of the original function.
// """
// args = [Axes(arg) for arg in args]
// return f(*args)
// return wrapper
//================================================================================================
// Axes
// An Axes is a tuple of Axis objects used as a label for a tensor's
// dimensions.
//================================================================================================
class Axes
{
public:
std::vector<Axis> axes;
uuid_type uuid;
......@@ -706,47 +705,47 @@ public:
std::vector<Axis> convert(const Axes& ax);
std::vector<Axis> convert(const std::vector<Axes>& ax);
private:
private:
void check_duplicates();
};
//================================================================================================
// DuplicateAxisNames
//================================================================================================
// class DuplicateAxisNames(ValueError):
// def __init__(self, message, duplicate_axis_names):
// super(DuplicateAxisNames, self).__init__(message)
// self.duplicate_axis_names = duplicate_axis_names
//================================================================================================
// IncompatibleAxesError
//================================================================================================
// class IncompatibleAxesError(ValueError):
// pass
//================================================================================================
// UnmatchedAxesError
//================================================================================================
// class UnmatchedAxesError(IncompatibleAxesError):
// pass
};
//================================================================================================
// AxesMap
// AxesMap provides a way to define a axis name mapping: {Axis.name: Axis.name} and
// then apply this mapping to an Axes and get new Axes out.
//
// Right now AxesMap is implemented as immutible because I didn't want to deal with
// enforcing _assert_valid_axes_map on every method which mutates a dict and I didn't
// need a mutable datastructure anyway. Feel free to make it mutable and add in
// invariant enforcement.
//================================================================================================
class AxesMap : public std::map<std::string, std::string>
{
public:
//================================================================================================
// DuplicateAxisNames
//================================================================================================
// class DuplicateAxisNames(ValueError):
// def __init__(self, message, duplicate_axis_names):
// super(DuplicateAxisNames, self).__init__(message)
// self.duplicate_axis_names = duplicate_axis_names
//================================================================================================
// IncompatibleAxesError
//================================================================================================
// class IncompatibleAxesError(ValueError):
// pass
//================================================================================================
// UnmatchedAxesError
//================================================================================================
// class UnmatchedAxesError(IncompatibleAxesError):
// pass
//================================================================================================
// AxesMap
// AxesMap provides a way to define a axis name mapping: {Axis.name: Axis.name} and
// then apply this mapping to an Axes and get new Axes out.
//
// Right now AxesMap is implemented as immutible because I didn't want to deal with
// enforcing _assert_valid_axes_map on every method which mutates a dict and I didn't
// need a mutable datastructure anyway. Feel free to make it mutable and add in
// invariant enforcement.
//================================================================================================
class AxesMap : public std::map<std::string, std::string>
{
public:
AxesMap(const std::pair<std::string, std::string>&);
AxesMap(std::initializer_list<std::pair<std::string, std::string>>);
......@@ -762,74 +761,70 @@ public:
//--------------------------------------------------------------------------------------------
Axis map_axis(const Axis& old_axis) const;
private:
private:
std::map<std::string, std::set<std::string>> duplicate_axis_names();
void assert_valid_axes_map();
public:
public:
// def invert(self):
// return {v: k for k, v in self.items()}
};
};
//-----------------------------------------------------------------------------------------------
// _reduce_nested
// Reduces a nested sequence by applying a function to each
// of its elements and returns an aggregation.
//
// Arguments:
// elem: The object to be reduced, either a sequence
// or a singleton.
// agg: A variable holding information collected
// as the sequence is collapsed.
// func: A function to augment the aggregate by processing
// a singleton. Should have the form func(agg, elem) -> agg
//
// Returns:
// agg: The final aggregate returned by the function.
//-----------------------------------------------------------------------------------------------
// def _reduce_nested(elem, agg, func):
// if isinstance(elem, collections.Iterable):
// for sub in elem:
// agg = _reduce_nested(sub, agg, func)
// return agg
// else:
// return func(agg, elem)
//================================================================================================
// FlattenedAxis
// A FlattenedAxis has length which is the product of the lengths of all
// Axis in the axes. The original Axes object is stored so that we can later
// unflatten this Axis back to its original component Axis.
//
// Notes: since we allows Axis to have duplicated names globally, NameableValue
// is not used here.
//================================================================================================
class FlattenedAxis : public Axis
{
public:
//-----------------------------------------------------------------------------------------------
// _reduce_nested
// Reduces a nested sequence by applying a function to each
// of its elements and returns an aggregation.
//
// Arguments:
// elem: The object to be reduced, either a sequence
// or a singleton.
// agg: A variable holding information collected
// as the sequence is collapsed.
// func: A function to augment the aggregate by processing
// a singleton. Should have the form func(agg, elem) -> agg
//
// Returns:
// agg: The final aggregate returned by the function.
//-----------------------------------------------------------------------------------------------
// def _reduce_nested(elem, agg, func):
// if isinstance(elem, collections.Iterable):
// for sub in elem:
// agg = _reduce_nested(sub, agg, func)
// return agg
// else:
// return func(agg, elem)
//================================================================================================
// FlattenedAxis
// A FlattenedAxis has length which is the product of the lengths of all
// Axis in the axes. The original Axes object is stored so that we can later
// unflatten this Axis back to its original component Axis.
//
// Notes: since we allows Axis to have duplicated names globally, NameableValue
// is not used here.
//================================================================================================
class FlattenedAxis : public Axis
{
public:
FlattenedAxis(const std::vector<Axis>& list, const std::string& new_name = "");
virtual ~FlattenedAxis() {}
//--------------------------------------------------------------------------------------------
// Returns:
// True is this is a FlattendAxis.
//--------------------------------------------------------------------------------------------
bool is_flattened() const { return true; }
//--------------------------------------------------------------------------------------------
// Returns:
// Whether this axes contains no collapsed axes.
//--------------------------------------------------------------------------------------------
bool empty() const { return axes.size() == 0; }
//--------------------------------------------------------------------------------------------
// Returns:
// Whether this axes contains exactly one collapsed axes.
//--------------------------------------------------------------------------------------------
bool single() const { return axes.size() == 0; }
bool operator==(const Axis& other) const;
// def __hash__(self):
......@@ -841,96 +836,96 @@ public:
// return 'FlattenedAxis(%s)' % ', '.join(repr(axis) for axis in self.axes)
std::vector<Axis> axes;
};
};
//-----------------------------------------------------------------------------------------------
// default_dtype
// Reduces a nested tuple describing the strides of a tensor
// into a tuple giving the stride of each of its dimensions.
//
// Arguments:
// strides: The nested tuple.
//
// Returns:
// strides: The tuple of strides.
//-----------------------------------------------------------------------------------------------
// def reduce_strides(strides):
// return tuple(int(_reduce_nested(elem, float('inf'), min))
// for elem in strides)
//-----------------------------------------------------------------------------------------------
// _make_stride
// Generates a nested tuple that provides the striding information
// for an occurrence of axis. If the axis is a FlattenedAxis, the
// stride will be a tuple containing the strides of each collapsed
// axis. Otherwise, the stride will be an integer.
//
// Arguments:
// inner_size: The total size of all dimensions smaller than this
// axis, i.e. all axes to the right of this one when they are
// laid out in c-contiguous order.
// axis: The axis for which we are generating a stride.
// fsz: A nested tuple supplying the sizes of each dimension collapsed
// into the axis. The size may be larger than the length of the axis.
//
// Returns:
// inner_size: The total size of this axis and all smaller dimensions.
// stride: The stride given to the axis.
//-----------------------------------------------------------------------------------------------
// def _make_stride(inner_size, axis, fsz):
// if axis.is_flattened:
// return _make_strides(inner_size, axis.axes, fsz)
// else:
// stride = inner_size
// inner_size *= fsz
// return inner_size, stride
//-----------------------------------------------------------------------------------------------
// _make_strides
// Generates a tuple of strides for a set of axes. See _make_stride
// for a description of the stride given to each axis.
//
// Arguments:
// inner_size: The total size of all dimensions smaller than
// the axes.
// axes: The axes for which we are generating strides.
// full_sizes: The size of each axis.
//
// Returns:
// inner_size: The total size of these axes and all smaller dimensions.
// strides: The strides generated for the axes.
//-----------------------------------------------------------------------------------------------
// def _make_strides(inner_size, axes, full_sizes):
// full_strides = []
// for axis, fsz in reversed(list(zip(axes, full_sizes))):
// inner_size, stride = _make_stride(inner_size, axis, fsz)
// full_strides.append(stride)
// return inner_size, tuple(reversed(full_strides))
//================================================================================================
// TensorDescription
// Description of a tensor that will be allocated in hardware.
//
// Names the tensor's dimensions with axes and holds pointers to the
// buffer allocated by the analysis and the backend tensor value
// (e.g. a cpu or gpu tensor).
//
// Arguments:
// axes: Axes of the tensor.
// base: If a view, the viewed tensor's description.
// dtype: The type of the tensor.
// full_strides: The strides of each axis.
// full_sizes: The allocated size of each axis (may be larger than the axis).
// offset: An offset into the viewed tensor.
// next_tensor_decription: In a reshape, tensor description of reshaped tensor.
// is_persistent: The tensor should be persistent, i.e. survive from computation to
// computation.
// is_input: The device tensor can be written from the host.
// **kwargs: Additional args for related classes.
//================================================================================================
class TensorDescription : public NameableValue
{
public:
//-----------------------------------------------------------------------------------------------
// default_dtype
// Reduces a nested tuple describing the strides of a tensor
// into a tuple giving the stride of each of its dimensions.
//
// Arguments:
// strides: The nested tuple.
//
// Returns:
// strides: The tuple of strides.
//-----------------------------------------------------------------------------------------------
// def reduce_strides(strides):
// return tuple(int(_reduce_nested(elem, float('inf'), min))
// for elem in strides)
//-----------------------------------------------------------------------------------------------
// _make_stride
// Generates a nested tuple that provides the striding information
// for an occurrence of axis. If the axis is a FlattenedAxis, the
// stride will be a tuple containing the strides of each collapsed
// axis. Otherwise, the stride will be an integer.
//
// Arguments:
// inner_size: The total size of all dimensions smaller than this
// axis, i.e. all axes to the right of this one when they are
// laid out in c-contiguous order.
// axis: The axis for which we are generating a stride.
// fsz: A nested tuple supplying the sizes of each dimension collapsed
// into the axis. The size may be larger than the length of the axis.
//
// Returns:
// inner_size: The total size of this axis and all smaller dimensions.
// stride: The stride given to the axis.
//-----------------------------------------------------------------------------------------------
// def _make_stride(inner_size, axis, fsz):
// if axis.is_flattened:
// return _make_strides(inner_size, axis.axes, fsz)
// else:
// stride = inner_size
// inner_size *= fsz
// return inner_size, stride
//-----------------------------------------------------------------------------------------------
// _make_strides
// Generates a tuple of strides for a set of axes. See _make_stride
// for a description of the stride given to each axis.
//
// Arguments:
// inner_size: The total size of all dimensions smaller than
// the axes.
// axes: The axes for which we are generating strides.
// full_sizes: The size of each axis.
//
// Returns:
// inner_size: The total size of these axes and all smaller dimensions.
// strides: The strides generated for the axes.
//-----------------------------------------------------------------------------------------------
// def _make_strides(inner_size, axes, full_sizes):
// full_strides = []
// for axis, fsz in reversed(list(zip(axes, full_sizes))):
// inner_size, stride = _make_stride(inner_size, axis, fsz)
// full_strides.append(stride)
// return inner_size, tuple(reversed(full_strides))
//================================================================================================
// TensorDescription
// Description of a tensor that will be allocated in hardware.
//
// Names the tensor's dimensions with axes and holds pointers to the
// buffer allocated by the analysis and the backend tensor value
// (e.g. a cpu or gpu tensor).
//
// Arguments:
// axes: Axes of the tensor.
// base: If a view, the viewed tensor's description.
// dtype: The type of the tensor.
// full_strides: The strides of each axis.
// full_sizes: The allocated size of each axis (may be larger than the axis).
// offset: An offset into the viewed tensor.
// next_tensor_decription: In a reshape, tensor description of reshaped tensor.
// is_persistent: The tensor should be persistent, i.e. survive from computation to
// computation.
// is_input: The device tensor can be written from the host.
// **kwargs: Additional args for related classes.
//================================================================================================
class TensorDescription : public NameableValue
{
public:
//!-----------------------------------------------------------------------------------
//! constructor
//!-----------------------------------------------------------------------------------
......@@ -1487,7 +1482,7 @@ public:
ngraph::tensor_size full_sizes;
ngraph::tensor_stride full_strides;
tensor_description_ptr next_tensor_description;
};
};
} // end of namespace ngraph
......
......@@ -12,10 +12,10 @@
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#include <cmath>
#include <exception>
#include <memory>
#include <sstream>
#include <exception>
#include <cmath>
#include "exop.hpp"
#include "op_graph.hpp"
......
......@@ -15,67 +15,66 @@
#pragma once
#include <iostream>
#include <string>
#include <list>
#include <map>
#include <memory>
#include <vector>
#include <sstream>
#include <set>
#include <list>
#include <sstream>
#include <string>
#include <vector>
#include "axes.hpp"
#include "mock.hpp"
#include "op_graph.hpp"
#include "axes.hpp"
namespace ngraph
{
// forward declaration. This will hopefully go away
class ExecutionGraph;
class TensorDescription;
class InputDecl;
class OutputDecl;
class TensorDecl;
class TensorViewDecl;
class ExOp;
class Op;
class ComputationDecl;
class ExOpBlock;
class ExecutionState;
using output_decl_ptr = std::shared_ptr<OutputDecl>;
using input_decl_ptr = std::shared_ptr<InputDecl>;
using tensor_decl_ptr = std::shared_ptr<TensorDecl>;
using tensor_view_decl_ptr = std::shared_ptr<TensorViewDecl>;
using exop_ptr = std::shared_ptr<ExOp>;
using computation_decl_ptr = std::shared_ptr<ComputationDecl>;
using execution_graph_ptr = std::shared_ptr<ExecutionGraph>;
using exop_block_ptr = std::shared_ptr<ExOpBlock>;
using tensor_ptr = std::shared_ptr<TensorInterface>;
using transformer_ptr = std::shared_ptr<Transformer>;
using execution_state_ptr = std::shared_ptr<ExecutionState>;
//================================================================================================
// OutputDecl
// One value computed by an exop
//
// Arguments:
// exop: The exop.
// pos: The position of the value, defaults to 0.
// tensor_description: Tensor description of the value.
// write_view: The tensor view where the value is written.
//
// Attributes:
// exop: The exop.
// pos: The position of the value.
// tensor_description: Tensor description of the value.
// write_view: The tensor view where the value is written.
// value_users: Arguments using this value.
//================================================================================================
class OutputDecl
{
public:
// forward declaration. This will hopefully go away
class ExecutionGraph;
class TensorDescription;
class InputDecl;
class OutputDecl;
class TensorDecl;
class TensorViewDecl;
class ExOp;
class Op;
class ComputationDecl;
class ExOpBlock;
class ExecutionState;
using output_decl_ptr = std::shared_ptr<OutputDecl>;
using input_decl_ptr = std::shared_ptr<InputDecl>;
using tensor_decl_ptr = std::shared_ptr<TensorDecl>;
using tensor_view_decl_ptr = std::shared_ptr<TensorViewDecl>;
using exop_ptr = std::shared_ptr<ExOp>;
using computation_decl_ptr = std::shared_ptr<ComputationDecl>;
using execution_graph_ptr = std::shared_ptr<ExecutionGraph>;
using exop_block_ptr = std::shared_ptr<ExOpBlock>;
using tensor_ptr = std::shared_ptr<TensorInterface>;
using transformer_ptr = std::shared_ptr<Transformer>;
using execution_state_ptr = std::shared_ptr<ExecutionState>;
//================================================================================================
// OutputDecl
// One value computed by an exop
//
// Arguments:
// exop: The exop.
// pos: The position of the value, defaults to 0.
// tensor_description: Tensor description of the value.
// write_view: The tensor view where the value is written.
//
// Attributes:
// exop: The exop.
// pos: The position of the value.
// tensor_description: Tensor description of the value.
// write_view: The tensor view where the value is written.
// value_users: Arguments using this value.
//================================================================================================
class OutputDecl
{
public:
OutputDecl(const ExOp& _exop, size_t _pos, tensor_decl_ptr, tensor_description_ptr);
tensor_decl_ptr tensor_decl();
void tensor_decl(tensor_decl_ptr tensor_decl);
......@@ -95,29 +94,29 @@ public:
tensor_decl_ptr __tensor;
tensor_view_decl_ptr __write_view;
std::set<InputDecl*> value_users;
};
};
//================================================================================================
// InputDecl
// An argument for an exop.
//
// Arguments:
// exop: The exop.
// pos: The position of the value, defaults to 0.
// tensor_description: Tensor description of the value.
// read_view: The tensor view where the value is read from.
//
// Attributes:
// exop: The exop.
// pos: The position of the value.
// tensor_description: Tensor description of the value.
// read_view: The tensor view where the value is read from.
// value: Arguments supplying this value.
//================================================================================================
class InputDecl
{
public:
//================================================================================================
// InputDecl
// An argument for an exop.
//
// Arguments:
// exop: The exop.
// pos: The position of the value, defaults to 0.
// tensor_description: Tensor description of the value.
// read_view: The tensor view where the value is read from.
//
// Attributes:
// exop: The exop.
// pos: The position of the value.
// tensor_description: Tensor description of the value.
// read_view: The tensor view where the value is read from.
// value: Arguments supplying this value.
//================================================================================================
class InputDecl
{
public:
InputDecl(const ExOp& _exop,
size_t _pos,
tensor_description_ptr _tensor_description,
......@@ -134,37 +133,37 @@ public:
tensor_description_ptr tensor_description;
tensor_view_decl_ptr read_view;
OutputDecl* m_value;
};
};
//================================================================================================
// ExecutionGraphElt
// An element of an exection graph.
//
// Arguments:
// execution_graph: The execution graph that indexes this exop.
//
// Attributes:
// execution_graph: The execution graph that indexes this exop.
//================================================================================================
//================================================================================================
// ExecutionGraphElt
// An element of an exection graph.
//
// Arguments:
// execution_graph: The execution graph that indexes this exop.
//
// Attributes:
// execution_graph: The execution graph that indexes this exop.
//================================================================================================
class ExecutionGraphElt
{
public:
class ExecutionGraphElt
{
public:
ExecutionGraphElt(ExecutionGraph& eg)
: execution_graph{eg}
{
}
ExecutionGraph& execution_graph;
};
};
//================================================================================================
// ExOp
//================================================================================================
//================================================================================================
// ExOp
//================================================================================================
class ExOp : public ExecutionGraphElt
{
public:
class ExOp : public ExecutionGraphElt
{
public:
// An exop that indicates an op to be executed.
// The op might be different from what was originally found in the computation graph.
......@@ -220,17 +219,18 @@ public:
std::vector<tensor_decl_ptr> liveness_free_list;
std::vector<tensor_decl_ptr> liveness_new_list;
std::vector<InputDecl> args;
std::vector<InputDecl*> write_args; // TODO: Kludge until we have values with writers/readers
std::vector<InputDecl*>
write_args; // TODO: Kludge until we have values with writers/readers
std::vector<OutputDecl> values;
};
};
//================================================================================================
// TensorDecl
//================================================================================================
//================================================================================================
// TensorDecl
//================================================================================================
class TensorDecl : public ExecutionGraphElt
{
public:
class TensorDecl : public ExecutionGraphElt
{
public:
// Allocate for a tensor.
// Arguments:
......@@ -294,15 +294,15 @@ public:
bool is_compile_only;
tensor_ptr initial_value;
tensor_decl_ptr source_tensor;
};
};
//================================================================================================
// ExOpBlock
//================================================================================================
//================================================================================================
// ExOpBlock
//================================================================================================
class ExOpBlock : public ExecutionGraphElt
{
public:
class ExOpBlock : public ExecutionGraphElt
{
public:
// Sequentially execute a list of exops.
// Attributes:
......@@ -312,7 +312,8 @@ public:
// root_set: Set of exops whose values are needed.
ExOpBlock(ComputationDecl& cgraph);
bool is_exop_end_of_list();
void add_ops(std::initializer_list<computation_op_ptr> roots, exop_ptr after_exop = nullptr);
void add_ops(std::initializer_list<computation_op_ptr> roots,
exop_ptr after_exop = nullptr);
exop_ptr add_op(op_ptr op, exop_ptr after_exop);
exop_ptr add_exop(exop_ptr exop, exop_ptr after_exop = nullptr);
void move_exop_to_after_exop(exop_ptr exop, exop_ptr after_exop);
......@@ -336,17 +337,16 @@ public:
// replacement for next_exop, prev_exop
std::list<exop_ptr>::iterator begin() { return op_list.begin(); }
std::list<exop_ptr>::iterator end() { return op_list.end(); }
std::list<exop_ptr> op_list;
};
};
//================================================================================================
// TensorViewDecl
//================================================================================================
//================================================================================================
// TensorViewDecl
//================================================================================================
class TensorViewDecl : public ExecutionGraphElt
{
public:
class TensorViewDecl : public ExecutionGraphElt
{
public:
// Declare a view of a tensor.
// Arguments:
......@@ -373,17 +373,17 @@ public:
std::set<InputDecl*> readers;
std::set<OutputDecl*> writers;
OutputDecl* value;
};
};
// static exop_ptr _default_default;
// static exop_ptr _default_default;
//================================================================================================
// ComputationDecl
//================================================================================================
//================================================================================================
// ComputationDecl
//================================================================================================
class ComputationDecl : public ExecutionGraphElt
{
public:
class ComputationDecl : public ExecutionGraphElt
{
public:
// One computation to be run.
// Every computation has its own execution graph. Persistent tensors are shared
......@@ -406,15 +406,15 @@ public:
exop_block_ptr exop_block;
exop_ptr returns;
std::set<ExOp*> values;
};
};
//================================================================================================
// ExecutionState
//================================================================================================
//================================================================================================
// ExecutionState
//================================================================================================
class ExecutionState
{
public:
class ExecutionState
{
public:
// Proxy for the state of a device.
// Arguments:
......@@ -429,15 +429,15 @@ public:
// persistent tensors
std::map<tensor_description_ptr, tensor_decl_ptr> __tensors_decls;
};
};
//================================================================================================
// ExecutionGraph
//================================================================================================
//================================================================================================
// ExecutionGraph
//================================================================================================
class ExecutionGraph
{
public:
class ExecutionGraph
{
public:
// Information for compiling a computation_op.
// Arguments:
......@@ -452,6 +452,6 @@ public:
// temporary tensors
std::map<tensor_description_ptr, tensor_decl_ptr> tensor_decls;
computation_decl_ptr computation_decl;
};
};
} // end namespace ngraph
......@@ -14,49 +14,47 @@
#pragma once
#include <string>
#include <memory>
#include <map>
#include <vector>
#include <type_traits>
#include <memory>
#include <sstream>
#include <string>
#include <type_traits>
#include <vector>
#include "element_type.hpp"
namespace ngraph
{
class ExecutionState;
class ExecutionState;
class Op;
// class TensorDescription;
class ComputationOp;
class Op;
// class TensorDescription;
class ComputationOp;
using computation_op_ptr = std::shared_ptr<ComputationOp>;
using op_ptr = std::shared_ptr<Op>;
using scalar_t = float;
using computation_op_ptr = std::shared_ptr<ComputationOp>;
using op_ptr = std::shared_ptr<Op>;
using scalar_t = float;
//================================================================================================
// TensorInterface
//================================================================================================
//================================================================================================
// TensorInterface
//================================================================================================
class TensorInterface
{
public:
class TensorInterface
{
public:
virtual ~TensorInterface() {}
virtual const ElementType& element_type() const = 0;
virtual std::string value_string() const = 0;
};
};
//================================================================================================
// Tensor
//================================================================================================
//================================================================================================
// Tensor
//================================================================================================
template <typename T>
class Tensor : public TensorInterface
{
public:
template <typename T>
class Tensor : public TensorInterface
{
public:
Tensor(const T& val)
: m_value{val}
, m_element_type{element_type_float}
......@@ -64,9 +62,7 @@ public:
}
virtual ~Tensor() {}
const ElementType& element_type() const override { return m_element_type; }
std::string value_string() const override
{
std::string rc = "WTF";
......@@ -79,104 +75,104 @@ public:
return rc;
}
private:
private:
T m_value;
ElementType m_element_type;
};
};
//================================================================================================
// Transformer
//================================================================================================
//================================================================================================
// Transformer
//================================================================================================
class Transformer
{
public:
class Transformer
{
public:
virtual ~Transformer() {}
virtual ExecutionState& execution_state() = 0;
};
//================================================================================================
// TensorDescription
//================================================================================================
// class TensorDescription
// {
// public:
// virtual ~TensorDescription();
// virtual axes_key_t axes_key() const = 0;
// virtual std::string name() const = 0;
// virtual std::vector<size_t> shape() const = 0;
// virtual std::shared_ptr<TensorDescription> base() = 0;
// virtual ElementType element_type() const = 0;
// virtual size_t tensor_size() = 0;
// virtual bool is_persistent() = 0;
// virtual bool is_input() = 0;
// };
//================================================================================================
// Op
//================================================================================================
// class Op
// {
// // Any operation that can be in an AST.
// // Arguments:
// // args: Values used by this node.
// // const: The value of a constant Op, or None,
// // constant (bool): The Op is constant. Default False.
// // forward: If not None, the node to use instead of this node.
// // metadata: String key value dictionary for frontend metadata.
// // kwargs: Args defined in related classes.
// // Attributes:
// // const: The value of a constant.
// // constant (bool): The value is constant.
// // control_deps (OrderedSet): Ops in addtion to args that must run before this op.
// // persistent (bool): The value will be retained from computation to computation and
// // not shared. Always True if reference is set.
// // metadata: Dictionary with of string keys and values used for attaching
// // arbitrary metadata to nodes.
// // trainable: The value is trainable.
// public:
// virtual ~Op() {}
// virtual std::string name() const = 0;
// virtual tensor_description_ptr tensor_description() = 0;
// virtual op_ptr tensor() = 0;
// virtual bool is_tensor_op() = 0;
// virtual bool is_state_op() const = 0;
// virtual bool is_sequencing_op() const = 0;
// virtual op_ptr effective_tensor_op() = 0;
// virtual const std::vector<op_ptr>& all_deps() const = 0;
// // ops
// // TODO support multiple types
// static op_ptr constant(float value)
// {
// op_ptr = make_shared<LiteralScalarOp>(value);
// }
// };
//================================================================================================
// TensorOp
//================================================================================================
// class TensorOp : public Op
// {
// public:
// std::string name() const override { return "TensorOp"; }
// tensor_description_ptr tensor_description() override { return nullptr; }
// op_ptr tensor() override { return nullptr; }
// bool is_tensor_op() override { return true; }
// bool is_state_op() const override { return false; }
// op_ptr effective_tensor_op() override { return nullptr; }
// const std::vector<op_ptr>& all_deps() const override { return m_all_deps; }
// private:
// std::vector<op_ptr> m_all_deps;
// };
};
//================================================================================================
// TensorDescription
//================================================================================================
// class TensorDescription
// {
// public:
// virtual ~TensorDescription();
// virtual axes_key_t axes_key() const = 0;
// virtual std::string name() const = 0;
// virtual std::vector<size_t> shape() const = 0;
// virtual std::shared_ptr<TensorDescription> base() = 0;
// virtual ElementType element_type() const = 0;
// virtual size_t tensor_size() = 0;
// virtual bool is_persistent() = 0;
// virtual bool is_input() = 0;
// };
//================================================================================================
// Op
//================================================================================================
// class Op
// {
// // Any operation that can be in an AST.
// // Arguments:
// // args: Values used by this node.
// // const: The value of a constant Op, or None,
// // constant (bool): The Op is constant. Default False.
// // forward: If not None, the node to use instead of this node.
// // metadata: String key value dictionary for frontend metadata.
// // kwargs: Args defined in related classes.
// // Attributes:
// // const: The value of a constant.
// // constant (bool): The value is constant.
// // control_deps (OrderedSet): Ops in addtion to args that must run before this op.
// // persistent (bool): The value will be retained from computation to computation and
// // not shared. Always True if reference is set.
// // metadata: Dictionary with of string keys and values used for attaching
// // arbitrary metadata to nodes.
// // trainable: The value is trainable.
// public:
// virtual ~Op() {}
// virtual std::string name() const = 0;
// virtual tensor_description_ptr tensor_description() = 0;
// virtual op_ptr tensor() = 0;
// virtual bool is_tensor_op() = 0;
// virtual bool is_state_op() const = 0;
// virtual bool is_sequencing_op() const = 0;
// virtual op_ptr effective_tensor_op() = 0;
// virtual const std::vector<op_ptr>& all_deps() const = 0;
// // ops
// // TODO support multiple types
// static op_ptr constant(float value)
// {
// op_ptr = make_shared<LiteralScalarOp>(value);
// }
// };
//================================================================================================
// TensorOp
//================================================================================================
// class TensorOp : public Op
// {
// public:
// std::string name() const override { return "TensorOp"; }
// tensor_description_ptr tensor_description() override { return nullptr; }
// op_ptr tensor() override { return nullptr; }
// bool is_tensor_op() override { return true; }
// bool is_state_op() const override { return false; }
// op_ptr effective_tensor_op() override { return nullptr; }
// const std::vector<op_ptr>& all_deps() const override { return m_all_deps; }
// private:
// std::vector<op_ptr> m_all_deps;
// };
} // end of namespace ngraph
......@@ -14,24 +14,21 @@
#pragma once
#include "mock.hpp"
#include "exop.hpp"
#include "mock.hpp"
namespace ngraph
{
//================================================================================================
// CpuTransformer
//================================================================================================
class CpuTransformer : public Transformer
{
public:
//================================================================================================
// CpuTransformer
//================================================================================================
class CpuTransformer : public Transformer
{
public:
virtual ~CpuTransformer() {}
ExecutionState& execution_state() override { return m_execution_state; }
private:
private:
ExecutionState m_execution_state;
};
};
} // end namespace ngraph
......@@ -14,8 +14,8 @@
#pragma once
#include <vector>
#include <memory>
#include <vector>
#include "element_type.hpp"
#include "strides.hpp"
......
......@@ -14,8 +14,8 @@
#include <sstream>
#include "op_graph.hpp"
#include "axes.hpp"
#include "op_graph.hpp"
#include "util.hpp"
using namespace ngraph;
......@@ -2794,7 +2794,9 @@ ElementWiseOp::ElementWiseOp()
{
}
void ElementWiseOp::ElementWiseOp_init(std::vector<op_ptr>, Axes) {}
void ElementWiseOp::ElementWiseOp_init(std::vector<op_ptr>, Axes)
{
}
//================================================================================================
// UnaryElementWiseOp
......
This source diff could not be displayed because it is too large. You can view the blob instead.
#pragma once
#include <algorithm>
#include <functional>
#include <vector>
#include <initializer_list>
#include <iostream>
#include <algorithm>
#include <vector>
#include "util.hpp"
......@@ -51,7 +51,6 @@ public:
bool is_list() const { return m_is_list; }
T get_value() const { return m_value; }
const std::vector<tree>& get_list() const { return m_list; }
static void traverse_tree(tree& s, std::function<void(T*)> func)
{
if (s.is_list())
......
......@@ -12,8 +12,8 @@
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#include <map>
#include <iomanip>
#include <map>
#include "util.hpp"
......
......@@ -14,23 +14,22 @@
#pragma once
#include <string>
#include <sstream>
#include <vector>
#include <chrono>
#include <algorithm>
#include <map>
#include <chrono>
#include <iostream>
#include <map>
#include <sstream>
#include <string>
#include <vector>
namespace ngraph
{
class stopwatch;
extern std::map<std::string, stopwatch*> stopwatch_statistics;
class stopwatch;
extern std::map<std::string, stopwatch*> stopwatch_statistics;
template <typename T>
std::string join(const T& v, const std::string& sep)
{
template <typename T>
std::string join(const T& v, const std::string& sep)
{
std::ostringstream ss;
for (const auto& x : v)
{
......@@ -41,11 +40,11 @@ std::string join(const T& v, const std::string& sep)
ss << x;
}
return ss.str();
}
}
template <typename U, typename T>
bool contains(const U& container, const T& obj)
{
template <typename U, typename T>
bool contains(const U& container, const T& obj)
{
bool rc = false;
for (auto o : container)
{
......@@ -56,11 +55,11 @@ bool contains(const U& container, const T& obj)
}
}
return rc;
}
}
template <typename U, typename T>
bool contains_key(const U& container, const T& obj)
{
template <typename U, typename T>
bool contains_key(const U& container, const T& obj)
{
bool rc = false;
for (auto o : container)
{
......@@ -71,28 +70,28 @@ bool contains_key(const U& container, const T& obj)
}
}
return rc;
}
}
template <typename U, typename T>
void remove_from(U& container, const T& obj)
{
template <typename U, typename T>
void remove_from(U& container, const T& obj)
{
auto it = container.find(obj);
if (it != container.end())
{
container.erase(it);
}
}
}
size_t hash_combine(const std::vector<size_t>& list);
void dump(std::ostream& out, const void*, size_t);
size_t hash_combine(const std::vector<size_t>& list);
void dump(std::ostream& out, const void*, size_t);
std::string to_lower(const std::string& s);
std::string trim(const std::string& s);
std::vector<std::string> split(const std::string& s, char delimiter, bool trim = false);
std::string to_lower(const std::string& s);
std::string trim(const std::string& s);
std::vector<std::string> split(const std::string& s, char delimiter, bool trim = false);
class stopwatch
{
public:
class stopwatch
{
public:
stopwatch() {}
stopwatch(const std::string& name)
: m_name{name}
......@@ -149,21 +148,21 @@ public:
size_t get_total_milliseconds() const { return get_total_nanoseconds() / 1e6; }
size_t get_total_microseconds() const { return get_total_nanoseconds() / 1e3; }
size_t get_total_nanoseconds() const { return m_total_time.count(); }
private:
private:
std::chrono::high_resolution_clock m_clock;
std::chrono::time_point<std::chrono::high_resolution_clock> m_start_time;
bool m_active = false;
std::chrono::nanoseconds m_total_time = std::chrono::high_resolution_clock::duration::zero();
std::chrono::nanoseconds m_total_time =
std::chrono::high_resolution_clock::duration::zero();
std::chrono::nanoseconds m_last_time;
size_t m_total_count = 0;
std::string m_name;
};
};
template <class InputIt, class BinaryOp>
typename std::iterator_traits<InputIt>::value_type
template <class InputIt, class BinaryOp>
typename std::iterator_traits<InputIt>::value_type
reduce(InputIt first, InputIt last, BinaryOp op)
{
{
typename std::iterator_traits<InputIt>::value_type result;
if (first == last)
......@@ -180,18 +179,18 @@ typename std::iterator_traits<InputIt>::value_type
}
}
return result;
}
}
template <typename T>
T plus(const T& a, const T& b)
{
template <typename T>
T plus(const T& a, const T& b)
{
return a + b;
}
}
template <typename T>
T mul(const T& a, const T& b)
{
template <typename T>
T mul(const T& a, const T& b)
{
return a * b;
}
}
} // end namespace ngraph
......@@ -15,10 +15,10 @@
#pragma once
#include <array>
#include <random>
#include <cstring>
#include <iomanip>
#include <iostream>
#include <cstring>
#include <random>
static std::mt19937_64 random_generator;
......@@ -74,7 +74,6 @@ public:
}
bool operator!=(const uuid_type& other) const { return !(*this == other); }
friend std::ostream& operator<<(std::ostream& out, const uuid_type& id)
{
out << id.to_string();
......
......@@ -12,10 +12,10 @@
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#include <vector>
#include <string>
#include <sstream>
#include <string>
#include <unordered_map>
#include <vector>
#include "gtest/gtest.h"
......@@ -310,7 +310,7 @@ TEST(axes, index)
EXPECT_EQ(7, b[1].length());
}
TEST(axes, as_nested_list)
TEST(axes, DISABLED_as_nested_list)
{
Axis C = make_axis(5);
Axis H = make_axis(3);
......@@ -325,7 +325,7 @@ TEST(axes, as_nested_list)
FAIL();
}
TEST(axes, flatten)
TEST(axes, DISABLED_flatten)
{
Axis C = make_axis(5);
Axis H = make_axis(3);
......@@ -336,7 +336,7 @@ TEST(axes, flatten)
EXPECT_TRUE(c.is_flattened());
}
TEST(axes, as_flattened_list)
TEST(axes, DISABLED_as_flattened_list)
{
FAIL();
}
......@@ -364,7 +364,7 @@ TEST(axes, hash_axes)
m2[axes] = 1;
}
TEST(axes, reaxe_0d_to_1d)
TEST(axes, DISABLED_reaxe_0d_to_1d)
{
TensorDescription td{};
ngraph::ndarray x = random(td);
......@@ -382,7 +382,7 @@ TEST(axes, reaxe_0d_to_1d)
FAIL();
}
TEST(axes, reaxe_0d_to_2d)
TEST(axes, DISABLED_reaxe_0d_to_2d)
{
// td = TensorDescription(axes=())
// x = random(td)
......@@ -407,7 +407,7 @@ TEST(axes, reaxe_0d_to_2d)
// I started refactoring into smaller pieces as seen in tests above, but
// stopped ...
//-----------------------------------------------------------------------------------------------
TEST(axes, simple_tensors)
TEST(axes, DISABLED_simple_tensors)
{
// # A simple vector
// td1 = TensorDescription(axes=[ax_A])
......@@ -582,7 +582,7 @@ TEST(axes, axes_map)
// assert axes_after == axes_map.map_axes(axes_before)
}
TEST(axes, axes_map_immutable)
TEST(axes, DISABLED_axes_map_immutable)
{
FAIL();
// axes_map = AxesMap({})
......@@ -591,7 +591,7 @@ TEST(axes, axes_map_immutable)
// axes_map["x"] = "y"
}
TEST(axes, axes_map_init_from_axes)
TEST(axes, DISABLED_axes_map_init_from_axes)
{
FAIL();
// axes_map = AxesMap({ng.make_axis(1, name="aaa"): ng.make_axis(1, name="zzz")})
......
......@@ -12,9 +12,9 @@
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#include <vector>
#include <string>
#include <sstream>
#include <string>
#include <vector>
#include "gtest/gtest.h"
......
......@@ -12,9 +12,9 @@
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#include <vector>
#include <string>
#include <sstream>
#include <string>
#include <vector>
#include "gtest/gtest.h"
......
......@@ -12,8 +12,8 @@
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#include <iostream>
#include <chrono>
#include <iostream>
#include "gtest/gtest.h"
......
......@@ -12,9 +12,9 @@
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#include <vector>
#include <string>
#include <sstream>
#include <string>
#include <vector>
#include "gtest/gtest.h"
......@@ -22,4 +22,6 @@
using namespace ngraph;
TEST(names, name) {}
TEST(names, name)
{
}
......@@ -12,9 +12,9 @@
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#include <vector>
#include <string>
#include <sstream>
#include <string>
#include <vector>
#include "gtest/gtest.h"
......
......@@ -12,10 +12,10 @@
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#include <vector>
#include <string>
#include <sstream>
#include <string>
#include <unordered_map>
#include <vector>
#include "gtest/gtest.h"
......
......@@ -12,10 +12,10 @@
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#include <vector>
#include <string>
#include <sstream>
#include <memory>
#include <sstream>
#include <string>
#include <vector>
#include "gtest/gtest.h"
......
......@@ -12,9 +12,9 @@
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#include <vector>
#include <string>
#include <sstream>
#include <string>
#include <vector>
#include "gtest/gtest.h"
......@@ -134,7 +134,9 @@ TEST(util, contains)
EXPECT_FALSE(contains(v1, 8));
}
TEST(util, remove_from) {}
TEST(util, remove_from)
{
}
TEST(util, reduce)
{
......
......@@ -12,9 +12,9 @@
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#include <vector>
#include <string>
#include <sstream>
#include <string>
#include <vector>
#include "gtest/gtest.h"
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment