Commit a138509b authored by Bob Kimball's avatar Bob Kimball

clang-format-3.9 style

parent 22e2307b
......@@ -43,10 +43,10 @@ public:
private:
static std::map<std::string, ElementType> m_element_list;
size_t m_bitwidth;
bool m_is_float;
bool m_is_signed;
const std::string m_cname;
size_t m_bitwidth;
bool m_is_float;
bool m_is_signed;
const std::string m_cname;
};
extern const ngraph::ElementType element_type_float;
......
......@@ -36,8 +36,7 @@ namespace nervana
return i < _size ? _string[i] : throw std::out_of_range("");
}
constexpr const char* get_ptr(size_t offset) const { return &_string[offset]; }
constexpr size_t size() const { return _size; }
constexpr size_t size() const { return _size; }
private:
const char* _string;
size_t _size;
......@@ -45,9 +44,8 @@ namespace nervana
constexpr const char* find_last(conststring s, size_t offset, char ch)
{
return offset == 0
? s.get_ptr(0)
: (s[offset] == ch ? s.get_ptr(offset + 1) : find_last(s, offset - 1, ch));
return offset == 0 ? s.get_ptr(0) : (s[offset] == ch ? s.get_ptr(offset + 1)
: find_last(s, offset - 1, ch));
}
constexpr const char* find_last(conststring s, char ch)
......@@ -69,7 +67,6 @@ namespace nervana
~log_helper();
std::ostream& stream() { return _stream; }
private:
std::stringstream _stream;
};
......@@ -84,9 +81,9 @@ namespace nervana
static void stop();
private:
static void log_item(const std::string& s);
static void process_event(const std::string& s);
static void thread_entry(void* param);
static void log_item(const std::string& s);
static void process_event(const std::string& s);
static void thread_entry(void* param);
static std::string log_path;
static std::deque<std::string> queue;
};
......
......@@ -18,7 +18,7 @@
using namespace ngraph;
size_t NameableValue::__counter = 0;
size_t NameableValue::__counter = 0;
std::map<std::string, NameableValue> NameableValue::__all_names;
NameableValue::NameableValue(const std::string& name,
......
......@@ -95,7 +95,7 @@ namespace ngraph
//!-----------------------------------------------------------------------------------
NameableValue& named(const std::string& name);
static size_t __counter;
static size_t __counter;
static std::map<std::string, NameableValue> __all_names;
std::string m_name;
......
......@@ -27,10 +27,9 @@ public:
ElementType et = element_type_float);
const ElementType& get_type() const { return m_element_type; }
tensor_stride full_strides() const;
tensor_stride strides() const;
tensor_size sizes() const;
tensor_stride full_strides() const;
tensor_stride strides() const;
tensor_size sizes() const;
tensor_size operator[](size_t index) const;
......@@ -53,9 +52,8 @@ class ngraph::tensor_stride
public:
tensor_stride();
const ElementType& get_type() const { return m_element_type; }
tensor_stride full_strides() const;
tensor_stride strides() const;
tensor_stride full_strides() const;
tensor_stride strides() const;
tensor_stride reduce_strides() const;
......
......@@ -12,10 +12,10 @@
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#include <cassert>
#include <cmath>
#include <iostream>
#include <sstream>
#include <cmath>
#include <cassert>
#include "axes.hpp"
#include "util.hpp"
......@@ -268,7 +268,7 @@ Axis ngraph::slice_axis(const Axis& axis, const slice& s)
std::vector<std::string> ngraph::duplicates(const std::vector<Axis>& ax)
{
std::map<std::string, size_t> counts;
std::vector<std::string> rc;
std::vector<std::string> rc;
for (const Axis& axis : ax)
{
auto it = counts.find(axis.name);
......
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -12,10 +12,10 @@
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#include <cmath>
#include <exception>
#include <memory>
#include <sstream>
#include <exception>
#include <cmath>
#include "exop.hpp"
#include "op_graph.hpp"
......@@ -404,10 +404,10 @@ void ExOpBlock::add_ops(std::initializer_list<computation_op_ptr> roots, exop_pt
}
}
std::vector<op_ptr> available;
std::vector<op_ptr> available;
std::map<op_ptr, size_t> counts;
std::map<op_ptr, std::vector<op_ptr>> parents;
std::vector<op_ptr> ready;
std::vector<op_ptr> ready;
available.insert(available.end(), roots.begin(), roots.end());
while (available.size() > 0)
......@@ -1012,7 +1012,7 @@ tensor_decl_ptr ExecutionState::ensure_tensor_decl(ExecutionGraph& execut
bool is_constant = false;
bool is_compile_only = false;
tensor_decl = std::make_shared<TensorDecl>(execution_graph,
tensor_decl = std::make_shared<TensorDecl>(execution_graph,
tensor_description_base->element_type(),
tensor_description_base->tensor_size(),
tensor_description_base->is_persistent(),
......@@ -1057,7 +1057,7 @@ tensor_decl_ptr ExecutionGraph::get_tensor_decl(op_ptr op,
bool is_constant = false;
bool is_compile_only = false;
tensor_decl = std::make_shared<TensorDecl>(*this,
tensor_decl = std::make_shared<TensorDecl>(*this,
tensor_description_base->element_type(),
tensor_description_base->tensor_size(),
tensor_description_base->is_persistent(),
......
......@@ -15,443 +15,443 @@
#pragma once
#include <iostream>
#include <string>
#include <list>
#include <map>
#include <memory>
#include <vector>
#include <sstream>
#include <set>
#include <list>
#include <sstream>
#include <string>
#include <vector>
#include "axes.hpp"
#include "mock.hpp"
#include "op_graph.hpp"
#include "axes.hpp"
namespace ngraph
{
// forward declaration. This will hopefully go away
class ExecutionGraph;
class TensorDescription;
class InputDecl;
class OutputDecl;
class TensorDecl;
class TensorViewDecl;
class ExOp;
class Op;
class ComputationDecl;
class ExOpBlock;
class ExecutionState;
using output_decl_ptr = std::shared_ptr<OutputDecl>;
using input_decl_ptr = std::shared_ptr<InputDecl>;
using tensor_decl_ptr = std::shared_ptr<TensorDecl>;
using tensor_view_decl_ptr = std::shared_ptr<TensorViewDecl>;
using exop_ptr = std::shared_ptr<ExOp>;
using computation_decl_ptr = std::shared_ptr<ComputationDecl>;
using execution_graph_ptr = std::shared_ptr<ExecutionGraph>;
using exop_block_ptr = std::shared_ptr<ExOpBlock>;
using tensor_ptr = std::shared_ptr<TensorInterface>;
using transformer_ptr = std::shared_ptr<Transformer>;
using execution_state_ptr = std::shared_ptr<ExecutionState>;
//================================================================================================
// OutputDecl
// One value computed by an exop
//
// Arguments:
// exop: The exop.
// pos: The position of the value, defaults to 0.
// tensor_description: Tensor description of the value.
// write_view: The tensor view where the value is written.
//
// Attributes:
// exop: The exop.
// pos: The position of the value.
// tensor_description: Tensor description of the value.
// write_view: The tensor view where the value is written.
// value_users: Arguments using this value.
//================================================================================================
class OutputDecl
{
public:
OutputDecl(const ExOp& _exop, size_t _pos, tensor_decl_ptr, tensor_description_ptr);
tensor_decl_ptr tensor_decl();
void tensor_decl(tensor_decl_ptr tensor_decl);
tensor_view_decl_ptr write_view();
void write_view(tensor_view_decl_ptr view);
friend std::ostream& operator<<(std::ostream& out, const OutputDecl& obj);
// def __repr__()
// {
// return "Val({exop}:{pos})".format(exop=self.exop.name, pos=self.pos)
// }
bool is_tensor_op() const;
const ExOp& exop;
size_t pos;
tensor_description_ptr tensor_description;
tensor_decl_ptr __tensor;
tensor_view_decl_ptr __write_view;
std::set<InputDecl*> value_users;
};
//================================================================================================
// InputDecl
// An argument for an exop.
//
// Arguments:
// exop: The exop.
// pos: The position of the value, defaults to 0.
// tensor_description: Tensor description of the value.
// read_view: The tensor view where the value is read from.
//
// Attributes:
// exop: The exop.
// pos: The position of the value.
// tensor_description: Tensor description of the value.
// read_view: The tensor view where the value is read from.
// value: Arguments supplying this value.
//================================================================================================
class InputDecl
{
public:
InputDecl(const ExOp& _exop,
size_t _pos,
tensor_description_ptr _tensor_description,
OutputDecl* _value);
TensorDecl& tensor_decl();
OutputDecl* value();
const OutputDecl* value() const;
void value(OutputDecl* value);
friend std::ostream& operator<<(std::ostream& out, const InputDecl& obj);
const ExOp& exop;
size_t pos;
tensor_description_ptr tensor_description;
tensor_view_decl_ptr read_view;
OutputDecl* m_value;
};
//================================================================================================
// ExecutionGraphElt
// An element of an exection graph.
//
// Arguments:
// execution_graph: The execution graph that indexes this exop.
//
// Attributes:
// execution_graph: The execution graph that indexes this exop.
//================================================================================================
class ExecutionGraphElt
{
public:
ExecutionGraphElt(ExecutionGraph& eg)
: execution_graph{eg}
// forward declaration. This will hopefully go away
class ExecutionGraph;
class TensorDescription;
class InputDecl;
class OutputDecl;
class TensorDecl;
class TensorViewDecl;
class ExOp;
class Op;
class ComputationDecl;
class ExOpBlock;
class ExecutionState;
using output_decl_ptr = std::shared_ptr<OutputDecl>;
using input_decl_ptr = std::shared_ptr<InputDecl>;
using tensor_decl_ptr = std::shared_ptr<TensorDecl>;
using tensor_view_decl_ptr = std::shared_ptr<TensorViewDecl>;
using exop_ptr = std::shared_ptr<ExOp>;
using computation_decl_ptr = std::shared_ptr<ComputationDecl>;
using execution_graph_ptr = std::shared_ptr<ExecutionGraph>;
using exop_block_ptr = std::shared_ptr<ExOpBlock>;
using tensor_ptr = std::shared_ptr<TensorInterface>;
using transformer_ptr = std::shared_ptr<Transformer>;
using execution_state_ptr = std::shared_ptr<ExecutionState>;
//================================================================================================
// OutputDecl
// One value computed by an exop
//
// Arguments:
// exop: The exop.
// pos: The position of the value, defaults to 0.
// tensor_description: Tensor description of the value.
// write_view: The tensor view where the value is written.
//
// Attributes:
// exop: The exop.
// pos: The position of the value.
// tensor_description: Tensor description of the value.
// write_view: The tensor view where the value is written.
// value_users: Arguments using this value.
//================================================================================================
class OutputDecl
{
public:
OutputDecl(const ExOp& _exop, size_t _pos, tensor_decl_ptr, tensor_description_ptr);
tensor_decl_ptr tensor_decl();
void tensor_decl(tensor_decl_ptr tensor_decl);
tensor_view_decl_ptr write_view();
void write_view(tensor_view_decl_ptr view);
friend std::ostream& operator<<(std::ostream& out, const OutputDecl& obj);
// def __repr__()
// {
// return "Val({exop}:{pos})".format(exop=self.exop.name, pos=self.pos)
// }
bool is_tensor_op() const;
const ExOp& exop;
size_t pos;
tensor_description_ptr tensor_description;
tensor_decl_ptr __tensor;
tensor_view_decl_ptr __write_view;
std::set<InputDecl*> value_users;
};
//================================================================================================
// InputDecl
// An argument for an exop.
//
// Arguments:
// exop: The exop.
// pos: The position of the value, defaults to 0.
// tensor_description: Tensor description of the value.
// read_view: The tensor view where the value is read from.
//
// Attributes:
// exop: The exop.
// pos: The position of the value.
// tensor_description: Tensor description of the value.
// read_view: The tensor view where the value is read from.
// value: Arguments supplying this value.
//================================================================================================
class InputDecl
{
}
public:
InputDecl(const ExOp& _exop,
size_t _pos,
tensor_description_ptr _tensor_description,
OutputDecl* _value);
TensorDecl& tensor_decl();
OutputDecl* value();
const OutputDecl* value() const;
void value(OutputDecl* value);
friend std::ostream& operator<<(std::ostream& out, const InputDecl& obj);
const ExOp& exop;
size_t pos;
tensor_description_ptr tensor_description;
tensor_view_decl_ptr read_view;
OutputDecl* m_value;
};
//================================================================================================
// ExecutionGraphElt
// An element of an exection graph.
//
// Arguments:
// execution_graph: The execution graph that indexes this exop.
//
// Attributes:
// execution_graph: The execution graph that indexes this exop.
//================================================================================================
class ExecutionGraphElt
{
public:
ExecutionGraphElt(ExecutionGraph& eg)
: execution_graph{eg}
{
}
ExecutionGraph& execution_graph;
};
ExecutionGraph& execution_graph;
};
//================================================================================================
// ExOp
//================================================================================================
//================================================================================================
// ExOp
//================================================================================================
class ExOp : public ExecutionGraphElt
{
public:
// An exop that indicates an op to be executed.
// The op might be different from what was originally found in the computation graph.
// The args are exops that reflect the current version of the graph, and may differ
// from the exops of the op's args.
// The views_in are the current tensor views for the args.
// The views_out are the current tensor views for any results.
// Arguments:
// op: The op to execute.
// Parameters:
// op: The computation graph op.
// views_in: Tensor views of the args.
// views_out: Tensor views of the result.
// Attributes:
// op: The computation graph op to execute.
// args: exops for the arguments.
// views_in: Views for the arguments.
// views_out: Views for the results.
// tensor: Tensor of the primary output.
// tensor_view: View of the primary output.
// ref_ops: All computation graph ops covered by this op
// op_map: A map from ops to ref ops, sha
ExOp(ComputationDecl& cgraph, op_ptr _op, bool create_value = true);
friend std::ostream& operator<<(std::ostream& out, const ExOp& obj);
// factory methods to make exops
static exop_ptr literal_scalar_exop(scalar_t scalar, ComputationDecl& computation_graph);
// A node in the graph, with inputs and outputs.
InputDecl& add_arg(OutputDecl& value, tensor_description_ptr tensor_description = nullptr);
InputDecl& add_write_arg(OutputDecl& value,
class ExOp : public ExecutionGraphElt
{
public:
// An exop that indicates an op to be executed.
// The op might be different from what was originally found in the computation graph.
// The args are exops that reflect the current version of the graph, and may differ
// from the exops of the op's args.
// The views_in are the current tensor views for the args.
// The views_out are the current tensor views for any results.
// Arguments:
// op: The op to execute.
// Parameters:
// op: The computation graph op.
// views_in: Tensor views of the args.
// views_out: Tensor views of the result.
// Attributes:
// op: The computation graph op to execute.
// args: exops for the arguments.
// views_in: Views for the arguments.
// views_out: Views for the results.
// tensor: Tensor of the primary output.
// tensor_view: View of the primary output.
// ref_ops: All computation graph ops covered by this op
// op_map: A map from ops to ref ops, sha
ExOp(ComputationDecl& cgraph, op_ptr _op, bool create_value = true);
friend std::ostream& operator<<(std::ostream& out, const ExOp& obj);
// factory methods to make exops
static exop_ptr literal_scalar_exop(scalar_t scalar, ComputationDecl& computation_graph);
// A node in the graph, with inputs and outputs.
InputDecl& add_arg(OutputDecl& value, tensor_description_ptr tensor_description = nullptr);
InputDecl& add_write_arg(OutputDecl& value,
tensor_description_ptr tensor_description = nullptr);
OutputDecl& add_value(tensor_decl_ptr tensor_decl,
tensor_description_ptr tensor_description = nullptr);
OutputDecl& add_value(tensor_decl_ptr tensor_decl,
tensor_description_ptr tensor_description = nullptr);
op_ptr get_op();
void set_op(op_ptr _op);
void add_ref_op(op_ptr _op);
size_t memory_usage();
size_t memory_footprint();
size_t memory_efficiency();
bool is_exop_end_of_list();
std::string name() const;
ComputationDecl& computation_graph;
tensor_decl_ptr tensor_decl;
tensor_view_decl_ptr tensor_view;
std::vector<op_ptr> ref_ops;
op_ptr op;
std::vector<tensor_decl_ptr> liveness_live_list;
std::vector<tensor_decl_ptr> liveness_free_list;
std::vector<tensor_decl_ptr> liveness_new_list;
std::vector<InputDecl> args;
std::vector<InputDecl*> write_args; // TODO: Kludge until we have values with writers/readers
std::vector<OutputDecl> values;
};
//================================================================================================
// TensorDecl
//================================================================================================
class TensorDecl : public ExecutionGraphElt
{
public:
// Allocate for a tensor.
// Arguments:
// op: The AllocateTensorOp
// element_type: The type of the elements.
// size: The number of elements.
// is_persistent: True if the tensor is persistent.
// is_input: True if the tensor can be used as an argument.
// tensor_description_base: The base tensor description for the tensor.
// source_tensor: For a clone, the tensor that started the chain of clones
// this tensor is cloned from.
// Parameters:
// op: The AllocateTensorOp
// element_type: The type of the elements.
// size: The number of elements.
// is_persistent: True if the tensor is persistent.
// is_input: True if the tensor can be used as an argument.
// is_output: True if the tensor needs to be available for output. Defaults to is_persistent.
// tensor_descriptions: The set of tensor descriptions for the tensor.
// tensor_description_base: The tensor description base for this tensor.
// is_compile_only: If True, this tensor is only needed during compilation, and should not be
// allocated.
TensorDecl(ExecutionGraph&,
ElementType,
size_t,
bool _is_persistent,
bool _is_input,
tensor_description_ptr,
bool _is_output,
bool _is_constant,
tensor_description_ptr tensor_description,
bool _is_compile_only);
tensor_view_decl_ptr get_tensor_view(tensor_description_ptr tensor_description = nullptr,
InputDecl* reader = nullptr,
OutputDecl* writer = nullptr);
tensor_view_decl_ptr get_tensor_view(tensor_description_ptr tensor_description = nullptr,
InputDecl* reader = nullptr);
tensor_view_decl_ptr get_tensor_view(tensor_description_ptr tensor_description = nullptr,
OutputDecl* writer = nullptr);
void merge_flags(const TensorDecl& tensor);
tensor_description_ptr buffer_key();
std::string prefix();
std::string variable_name();
std::string tensor_name();
std::string buffer_name();
// std::string name();
friend std::ostream& operator<<(std::ostream& out, const TensorDecl& obj);
// op_ptr op;
ElementType element_type;
size_t size;
bool is_persistent;
bool is_input;
bool is_output;
size_t buffer_pool_offset;
std::map<axes_key_t, tensor_view_decl_ptr> tensor_view_decls;
tensor_description_ptr tensor_description_base;
size_t lifespan;
bool is_constant;
bool is_compile_only;
tensor_ptr initial_value;
tensor_decl_ptr source_tensor;
};
//================================================================================================
// ExOpBlock
//================================================================================================
class ExOpBlock : public ExecutionGraphElt
{
public:
// Sequentially execute a list of exops.
// Attributes:
// computation_graph: The associated computation graph.
// prev_exop: The latst exop.
// next_exop: The first exop.
// root_set: Set of exops whose values are needed.
ExOpBlock(ComputationDecl& cgraph);
bool is_exop_end_of_list();
void add_ops(std::initializer_list<computation_op_ptr> roots, exop_ptr after_exop = nullptr);
exop_ptr add_op(op_ptr op, exop_ptr after_exop);
exop_ptr add_exop(exop_ptr exop, exop_ptr after_exop = nullptr);
void move_exop_to_after_exop(exop_ptr exop, exop_ptr after_exop);
void remove_exop(exop_ptr exop);
void replace_op(op_ptr old_op, op_ptr new_op);
void replace_users(exop_ptr old_exop, exop_ptr new_exop);
void replace_value(OutputDecl* old_value, OutputDecl* new_value);
void replace_exop(exop_ptr old_exop, exop_ptr new_exop);
void merge_exop(exop_ptr old_exop, exop_ptr new_exop);
size_t memory_footprint();
size_t worst_case_footprint();
size_t memory_efficiency();
size_t persistent_size();
std::set<OutputDecl*> get_vars();
std::set<OutputDecl*> get_temp_vars();
std::set<OutputDecl*> get_persistent_vars();
ComputationDecl& computation_graph;
std::set<ExOp*> root_set;
// replacement for next_exop, prev_exop
std::list<exop_ptr>::iterator begin() { return op_list.begin(); }
std::list<exop_ptr>::iterator end() { return op_list.end(); }
std::list<exop_ptr> op_list;
};
//================================================================================================
// TensorViewDecl
//================================================================================================
class TensorViewDecl : public ExecutionGraphElt
{
public:
// Declare a view of a tensor.
// Arguments:
// tensor: The tensor.
// tensor_description: The description of the view.
TensorViewDecl(TensorDecl&, tensor_description_ptr, ExecutionGraph&);
std::string name() const;
// op_ptr op();
tensor_view_decl_ptr get_tensor_view(tensor_description_ptr, InputDecl*, OutputDecl*);
tensor_view_decl_ptr get_tensor_view(tensor_description_ptr, InputDecl*);
tensor_view_decl_ptr get_tensor_view(tensor_description_ptr, OutputDecl*);
// def key()
// {
// """
// // Returns: A tuple unique to this view of the tensor.
// """
// return tensor_description->parameter_key
// }
TensorDecl& tensor_decl;
tensor_description_ptr tensor_description;
// initializers;
std::set<InputDecl*> readers;
std::set<OutputDecl*> writers;
OutputDecl* value;
};
// static exop_ptr _default_default;
//================================================================================================
// ComputationDecl
//================================================================================================
class ComputationDecl : public ExecutionGraphElt
{
public:
// One computation to be run.
// Every computation has its own execution graph. Persistent tensors are shared
// between computations, other tensors are not.
// Attributes:
// computation: The computation op.
// ops: A map from ops to the exop that handles the op in this computation.
// exop: The SSA block of exops for this computation.
// values: The ops whose values are returned from the computation.
// tensors: Map from base tensor descriptions to tensors.
ComputationDecl(ExecutionGraph& eg, computation_op_ptr op);
tensor_decl_ptr get_tensor_decl(op_ptr _op = nullptr);
ExOp* get_exop(op_ptr _op);
computation_op_ptr computation_op;
std::map<op_ptr, ExOp*> ops;
std::vector<tensor_decl_ptr> tensors;
std::map<Op*, InputDecl*> op_returns; // op_returns_anchor?
exop_block_ptr exop_block;
exop_ptr returns;
std::set<ExOp*> values;
};
//================================================================================================
// ExecutionState
//================================================================================================
class ExecutionState
{
public:
// Proxy for the state of a device.
op_ptr get_op();
void set_op(op_ptr _op);
void add_ref_op(op_ptr _op);
size_t memory_usage();
size_t memory_footprint();
size_t memory_efficiency();
bool is_exop_end_of_list();
std::string name() const;
ComputationDecl& computation_graph;
tensor_decl_ptr tensor_decl;
tensor_view_decl_ptr tensor_view;
std::vector<op_ptr> ref_ops;
op_ptr op;
std::vector<tensor_decl_ptr> liveness_live_list;
std::vector<tensor_decl_ptr> liveness_free_list;
std::vector<tensor_decl_ptr> liveness_new_list;
std::vector<InputDecl> args;
std::vector<InputDecl*>
write_args; // TODO: Kludge until we have values with writers/readers
std::vector<OutputDecl> values;
};
//================================================================================================
// TensorDecl
//================================================================================================
class TensorDecl : public ExecutionGraphElt
{
public:
// Allocate for a tensor.
// Arguments:
// op: The AllocateTensorOp
// element_type: The type of the elements.
// size: The number of elements.
// is_persistent: True if the tensor is persistent.
// is_input: True if the tensor can be used as an argument.
// tensor_description_base: The base tensor description for the tensor.
// source_tensor: For a clone, the tensor that started the chain of clones
// this tensor is cloned from.
// Parameters:
// op: The AllocateTensorOp
// element_type: The type of the elements.
// size: The number of elements.
// is_persistent: True if the tensor is persistent.
// is_input: True if the tensor can be used as an argument.
// is_output: True if the tensor needs to be available for output. Defaults to is_persistent.
// tensor_descriptions: The set of tensor descriptions for the tensor.
// tensor_description_base: The tensor description base for this tensor.
// is_compile_only: If True, this tensor is only needed during compilation, and should not be
// allocated.
TensorDecl(ExecutionGraph&,
ElementType,
size_t,
bool _is_persistent,
bool _is_input,
tensor_description_ptr,
bool _is_output,
bool _is_constant,
tensor_description_ptr tensor_description,
bool _is_compile_only);
tensor_view_decl_ptr get_tensor_view(tensor_description_ptr tensor_description = nullptr,
InputDecl* reader = nullptr,
OutputDecl* writer = nullptr);
tensor_view_decl_ptr get_tensor_view(tensor_description_ptr tensor_description = nullptr,
InputDecl* reader = nullptr);
tensor_view_decl_ptr get_tensor_view(tensor_description_ptr tensor_description = nullptr,
OutputDecl* writer = nullptr);
void merge_flags(const TensorDecl& tensor);
tensor_description_ptr buffer_key();
std::string prefix();
std::string variable_name();
std::string tensor_name();
std::string buffer_name();
// std::string name();
friend std::ostream& operator<<(std::ostream& out, const TensorDecl& obj);
// op_ptr op;
ElementType element_type;
size_t size;
bool is_persistent;
bool is_input;
bool is_output;
size_t buffer_pool_offset;
std::map<axes_key_t, tensor_view_decl_ptr> tensor_view_decls;
tensor_description_ptr tensor_description_base;
size_t lifespan;
bool is_constant;
bool is_compile_only;
tensor_ptr initial_value;
tensor_decl_ptr source_tensor;
};
//================================================================================================
// ExOpBlock
//================================================================================================
class ExOpBlock : public ExecutionGraphElt
{
public:
// Sequentially execute a list of exops.
// Attributes:
// computation_graph: The associated computation graph.
// prev_exop: The latst exop.
// next_exop: The first exop.
// root_set: Set of exops whose values are needed.
ExOpBlock(ComputationDecl& cgraph);
bool is_exop_end_of_list();
void add_ops(std::initializer_list<computation_op_ptr> roots,
exop_ptr after_exop = nullptr);
exop_ptr add_op(op_ptr op, exop_ptr after_exop);
exop_ptr add_exop(exop_ptr exop, exop_ptr after_exop = nullptr);
void move_exop_to_after_exop(exop_ptr exop, exop_ptr after_exop);
void remove_exop(exop_ptr exop);
void replace_op(op_ptr old_op, op_ptr new_op);
void replace_users(exop_ptr old_exop, exop_ptr new_exop);
void replace_value(OutputDecl* old_value, OutputDecl* new_value);
void replace_exop(exop_ptr old_exop, exop_ptr new_exop);
void merge_exop(exop_ptr old_exop, exop_ptr new_exop);
size_t memory_footprint();
size_t worst_case_footprint();
size_t memory_efficiency();
size_t persistent_size();
std::set<OutputDecl*> get_vars();
std::set<OutputDecl*> get_temp_vars();
std::set<OutputDecl*> get_persistent_vars();
ComputationDecl& computation_graph;
std::set<ExOp*> root_set;
// replacement for next_exop, prev_exop
std::list<exop_ptr>::iterator begin() { return op_list.begin(); }
std::list<exop_ptr>::iterator end() { return op_list.end(); }
std::list<exop_ptr> op_list;
};
//================================================================================================
// TensorViewDecl
//================================================================================================
class TensorViewDecl : public ExecutionGraphElt
{
public:
// Declare a view of a tensor.
// Arguments:
// tensor: The tensor.
// tensor_description: The description of the view.
TensorViewDecl(TensorDecl&, tensor_description_ptr, ExecutionGraph&);
std::string name() const;
// op_ptr op();
tensor_view_decl_ptr get_tensor_view(tensor_description_ptr, InputDecl*, OutputDecl*);
tensor_view_decl_ptr get_tensor_view(tensor_description_ptr, InputDecl*);
tensor_view_decl_ptr get_tensor_view(tensor_description_ptr, OutputDecl*);
// def key()
// {
// """
// // Returns: A tuple unique to this view of the tensor.
// """
// return tensor_description->parameter_key
// }
TensorDecl& tensor_decl;
tensor_description_ptr tensor_description;
// initializers;
std::set<InputDecl*> readers;
std::set<OutputDecl*> writers;
OutputDecl* value;
};
// static exop_ptr _default_default;
//================================================================================================
// ComputationDecl
//================================================================================================
class ComputationDecl : public ExecutionGraphElt
{
public:
// One computation to be run.
// Every computation has its own execution graph. Persistent tensors are shared
// between computations, other tensors are not.
// Attributes:
// computation: The computation op.
// ops: A map from ops to the exop that handles the op in this computation.
// exop: The SSA block of exops for this computation.
// values: The ops whose values are returned from the computation.
// tensors: Map from base tensor descriptions to tensors.
ComputationDecl(ExecutionGraph& eg, computation_op_ptr op);
tensor_decl_ptr get_tensor_decl(op_ptr _op = nullptr);
ExOp* get_exop(op_ptr _op);
computation_op_ptr computation_op;
std::map<op_ptr, ExOp*> ops;
std::vector<tensor_decl_ptr> tensors;
std::map<Op*, InputDecl*> op_returns; // op_returns_anchor?
exop_block_ptr exop_block;
exop_ptr returns;
std::set<ExOp*> values;
};
//================================================================================================
// ExecutionState
//================================================================================================
class ExecutionState
{
public:
// Proxy for the state of a device.
// Arguments:
// transformer: The associated transformer.
ExecutionState(transformer_ptr transformer = nullptr);
transformer_ptr transformer();
execution_graph_ptr make_execution_graph(computation_op_ptr);
tensor_decl_ptr get_op_tensor(op_ptr op);
tensor_decl_ptr ensure_tensor_decl(ExecutionGraph&, tensor_description_ptr, op_ptr);
// Arguments:
// transformer: The associated transformer.
ExecutionState(transformer_ptr transformer = nullptr);
transformer_ptr transformer();
execution_graph_ptr make_execution_graph(computation_op_ptr);
tensor_decl_ptr get_op_tensor(op_ptr op);
tensor_decl_ptr ensure_tensor_decl(ExecutionGraph&, tensor_description_ptr, op_ptr);
transformer_ptr __transformer;
transformer_ptr __transformer;
// persistent tensors
std::map<tensor_description_ptr, tensor_decl_ptr> __tensors_decls;
};
// persistent tensors
std::map<tensor_description_ptr, tensor_decl_ptr> __tensors_decls;
};
//================================================================================================
// ExecutionGraph
//================================================================================================
//================================================================================================
// ExecutionGraph
//================================================================================================
class ExecutionGraph
{
public:
// Information for compiling a computation_op.
// Arguments:
// execution_state: The execution state the graph will be applied to. The definitons in
// the execution state can be used in the execution graph.
// computation_op: A computation to be processed
ExecutionGraph(ExecutionState& execution_state, computation_op_ptr computation_op);
tensor_decl_ptr get_tensor_decl(op_ptr, tensor_description_ptr = nullptr);
ExecutionState& execution_state;
// temporary tensors
std::map<tensor_description_ptr, tensor_decl_ptr> tensor_decls;
computation_decl_ptr computation_decl;
};
class ExecutionGraph
{
public:
// Information for compiling a computation_op.
// Arguments:
// execution_state: The execution state the graph will be applied to. The definitons in
// the execution state can be used in the execution graph.
// computation_op: A computation to be processed
ExecutionGraph(ExecutionState& execution_state, computation_op_ptr computation_op);
tensor_decl_ptr get_tensor_decl(op_ptr, tensor_description_ptr = nullptr);
ExecutionState& execution_state;
// temporary tensors
std::map<tensor_description_ptr, tensor_decl_ptr> tensor_decls;
computation_decl_ptr computation_decl;
};
} // end namespace ngraph
......@@ -14,169 +14,165 @@
#pragma once
#include <string>
#include <memory>
#include <map>
#include <vector>
#include <type_traits>
#include <memory>
#include <sstream>
#include <string>
#include <type_traits>
#include <vector>
#include "element_type.hpp"
namespace ngraph
{
class ExecutionState;
class ExecutionState;
class Op;
// class TensorDescription;
class ComputationOp;
using computation_op_ptr = std::shared_ptr<ComputationOp>;
using op_ptr = std::shared_ptr<Op>;
using scalar_t = float;
//================================================================================================
// TensorInterface
//================================================================================================
class Op;
// class TensorDescription;
class ComputationOp;
class TensorInterface
{
public:
virtual ~TensorInterface() {}
virtual const ElementType& element_type() const = 0;
virtual std::string value_string() const = 0;
};
using computation_op_ptr = std::shared_ptr<ComputationOp>;
using op_ptr = std::shared_ptr<Op>;
using scalar_t = float;
//================================================================================================
// Tensor
//================================================================================================
//================================================================================================
// TensorInterface
//================================================================================================
template <typename T>
class Tensor : public TensorInterface
{
public:
Tensor(const T& val)
: m_value{val}
, m_element_type{element_type_float}
class TensorInterface
{
}
virtual ~Tensor() {}
const ElementType& element_type() const override { return m_element_type; }
std::string value_string() const override
public:
virtual ~TensorInterface() {}
virtual const ElementType& element_type() const = 0;
virtual std::string value_string() const = 0;
};
//================================================================================================
// Tensor
//================================================================================================
template <typename T>
class Tensor : public TensorInterface
{
std::string rc = "WTF";
if (std::is_floating_point<T>::value)
public:
Tensor(const T& val)
: m_value{val}
, m_element_type{element_type_float}
{
std::stringstream ss;
ss << m_value;
rc = ss.str();
}
return rc;
}
private:
T m_value;
ElementType m_element_type;
};
virtual ~Tensor() {}
const ElementType& element_type() const override { return m_element_type; }
std::string value_string() const override
{
std::string rc = "WTF";
if (std::is_floating_point<T>::value)
{
std::stringstream ss;
ss << m_value;
rc = ss.str();
}
return rc;
}
//================================================================================================
// Transformer
//================================================================================================
private:
T m_value;
ElementType m_element_type;
};
class Transformer
{
public:
virtual ~Transformer() {}
virtual ExecutionState& execution_state() = 0;
};
//================================================================================================
// TensorDescription
//================================================================================================
// class TensorDescription
// {
// public:
// virtual ~TensorDescription();
// virtual axes_key_t axes_key() const = 0;
// virtual std::string name() const = 0;
// virtual std::vector<size_t> shape() const = 0;
// virtual std::shared_ptr<TensorDescription> base() = 0;
// virtual ElementType element_type() const = 0;
// virtual size_t tensor_size() = 0;
// virtual bool is_persistent() = 0;
// virtual bool is_input() = 0;
// };
//================================================================================================
// Op
//================================================================================================
// class Op
// {
// // Any operation that can be in an AST.
// // Arguments:
// // args: Values used by this node.
// // const: The value of a constant Op, or None,
// // constant (bool): The Op is constant. Default False.
// // forward: If not None, the node to use instead of this node.
// // metadata: String key value dictionary for frontend metadata.
// // kwargs: Args defined in related classes.
// // Attributes:
// // const: The value of a constant.
// // constant (bool): The value is constant.
// // control_deps (OrderedSet): Ops in addtion to args that must run before this op.
// // persistent (bool): The value will be retained from computation to computation and
// // not shared. Always True if reference is set.
// // metadata: Dictionary with of string keys and values used for attaching
// // arbitrary metadata to nodes.
// // trainable: The value is trainable.
// public:
// virtual ~Op() {}
// virtual std::string name() const = 0;
// virtual tensor_description_ptr tensor_description() = 0;
// virtual op_ptr tensor() = 0;
// virtual bool is_tensor_op() = 0;
// virtual bool is_state_op() const = 0;
// virtual bool is_sequencing_op() const = 0;
// virtual op_ptr effective_tensor_op() = 0;
// virtual const std::vector<op_ptr>& all_deps() const = 0;
// // ops
// // TODO support multiple types
// static op_ptr constant(float value)
// {
// op_ptr = make_shared<LiteralScalarOp>(value);
// }
// };
//================================================================================================
// TensorOp
//================================================================================================
// class TensorOp : public Op
// {
// public:
// std::string name() const override { return "TensorOp"; }
// tensor_description_ptr tensor_description() override { return nullptr; }
// op_ptr tensor() override { return nullptr; }
// bool is_tensor_op() override { return true; }
// bool is_state_op() const override { return false; }
// op_ptr effective_tensor_op() override { return nullptr; }
// const std::vector<op_ptr>& all_deps() const override { return m_all_deps; }
// private:
// std::vector<op_ptr> m_all_deps;
// };
//================================================================================================
// Transformer
//================================================================================================
class Transformer
{
public:
virtual ~Transformer() {}
virtual ExecutionState& execution_state() = 0;
};
//================================================================================================
// TensorDescription
//================================================================================================
// class TensorDescription
// {
// public:
// virtual ~TensorDescription();
// virtual axes_key_t axes_key() const = 0;
// virtual std::string name() const = 0;
// virtual std::vector<size_t> shape() const = 0;
// virtual std::shared_ptr<TensorDescription> base() = 0;
// virtual ElementType element_type() const = 0;
// virtual size_t tensor_size() = 0;
// virtual bool is_persistent() = 0;
// virtual bool is_input() = 0;
// };
//================================================================================================
// Op
//================================================================================================
// class Op
// {
// // Any operation that can be in an AST.
// // Arguments:
// // args: Values used by this node.
// // const: The value of a constant Op, or None,
// // constant (bool): The Op is constant. Default False.
// // forward: If not None, the node to use instead of this node.
// // metadata: String key value dictionary for frontend metadata.
// // kwargs: Args defined in related classes.
// // Attributes:
// // const: The value of a constant.
// // constant (bool): The value is constant.
// // control_deps (OrderedSet): Ops in addtion to args that must run before this op.
// // persistent (bool): The value will be retained from computation to computation and
// // not shared. Always True if reference is set.
// // metadata: Dictionary with of string keys and values used for attaching
// // arbitrary metadata to nodes.
// // trainable: The value is trainable.
// public:
// virtual ~Op() {}
// virtual std::string name() const = 0;
// virtual tensor_description_ptr tensor_description() = 0;
// virtual op_ptr tensor() = 0;
// virtual bool is_tensor_op() = 0;
// virtual bool is_state_op() const = 0;
// virtual bool is_sequencing_op() const = 0;
// virtual op_ptr effective_tensor_op() = 0;
// virtual const std::vector<op_ptr>& all_deps() const = 0;
// // ops
// // TODO support multiple types
// static op_ptr constant(float value)
// {
// op_ptr = make_shared<LiteralScalarOp>(value);
// }
// };
//================================================================================================
// TensorOp
//================================================================================================
// class TensorOp : public Op
// {
// public:
// std::string name() const override { return "TensorOp"; }
// tensor_description_ptr tensor_description() override { return nullptr; }
// op_ptr tensor() override { return nullptr; }
// bool is_tensor_op() override { return true; }
// bool is_state_op() const override { return false; }
// op_ptr effective_tensor_op() override { return nullptr; }
// const std::vector<op_ptr>& all_deps() const override { return m_all_deps; }
// private:
// std::vector<op_ptr> m_all_deps;
// };
} // end of namespace ngraph
......@@ -14,24 +14,21 @@
#pragma once
#include "mock.hpp"
#include "exop.hpp"
#include "mock.hpp"
namespace ngraph
{
//================================================================================================
// CpuTransformer
//================================================================================================
class CpuTransformer : public Transformer
{
public:
virtual ~CpuTransformer() {}
ExecutionState& execution_state() override { return m_execution_state; }
private:
ExecutionState m_execution_state;
};
//================================================================================================
// CpuTransformer
//================================================================================================
class CpuTransformer : public Transformer
{
public:
virtual ~CpuTransformer() {}
ExecutionState& execution_state() override { return m_execution_state; }
private:
ExecutionState m_execution_state;
};
} // end namespace ngraph
......@@ -14,8 +14,8 @@
#pragma once
#include <vector>
#include <memory>
#include <vector>
#include "element_type.hpp"
#include "strides.hpp"
......
......@@ -14,8 +14,8 @@
#include <sstream>
#include "op_graph.hpp"
#include "axes.hpp"
#include "op_graph.hpp"
#include "util.hpp"
using namespace ngraph;
......@@ -2794,7 +2794,9 @@ ElementWiseOp::ElementWiseOp()
{
}
void ElementWiseOp::ElementWiseOp_init(std::vector<op_ptr>, Axes) {}
void ElementWiseOp::ElementWiseOp_init(std::vector<op_ptr>, Axes)
{
}
//================================================================================================
// UnaryElementWiseOp
......
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -51,7 +51,6 @@ public:
bool is_list() const { return m_is_list; }
T get_value() const { return m_value; }
const std::vector<tree>& get_list() const { return m_list; }
static void traverse_tree(tree& s, std::function<void(T*)> func)
{
if (s.is_list())
......
......@@ -83,10 +83,10 @@ namespace ngraph
}
size_t hash_combine(const std::vector<size_t>& list);
void dump(std::ostream& out, const void*, size_t);
void dump(std::ostream& out, const void*, size_t);
std::string to_lower(const std::string& s);
std::string trim(const std::string& s);
std::string to_lower(const std::string& s);
std::string trim(const std::string& s);
std::vector<std::string> split(const std::string& s, char delimiter, bool trim = false);
class stopwatch
......@@ -148,7 +148,6 @@ namespace ngraph
size_t get_total_milliseconds() const { return get_total_nanoseconds() / 1e6; }
size_t get_total_microseconds() const { return get_total_nanoseconds() / 1e3; }
size_t get_total_nanoseconds() const { return m_total_time.count(); }
private:
std::chrono::high_resolution_clock m_clock;
std::chrono::time_point<std::chrono::high_resolution_clock> m_start_time;
......
......@@ -74,7 +74,6 @@ public:
}
bool operator!=(const uuid_type& other) const { return !(*this == other); }
friend std::ostream& operator<<(std::ostream& out, const uuid_type& id)
{
out << id.to_string();
......
......@@ -22,4 +22,6 @@
using namespace ngraph;
TEST(names, name) {}
TEST(names, name)
{
}
......@@ -134,7 +134,9 @@ TEST(util, contains)
EXPECT_FALSE(contains(v1, 8));
}
TEST(util, remove_from) {}
TEST(util, remove_from)
{
}
TEST(util, reduce)
{
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment