Commit d9acd066 authored by Scott Cyphers's avatar Scott Cyphers Committed by GitHub

Basic layouts, tuples (#148)

* Basic layouts, tuples

* Review comments.

* Add incude for size_t.

* Bop use-after-free in GetTupleElement

* Inline call to get_element_types()
parent cdb079b8
......@@ -13,9 +13,11 @@
set (SRC
descriptor/input.cpp
descriptor/layout/dense_tensor_view_layout.cpp
descriptor/output.cpp
descriptor/tensor_view.cpp
descriptor/primary_tensor_view.cpp
descriptor/tensor.cpp
descriptor/tuple.cpp
function.cpp
log.cpp
node.cpp
......@@ -27,6 +29,7 @@ set (SRC
ops/constant.cpp
ops/convert.cpp
ops/dot.cpp
ops/get_tuple_element.cpp
ops/op.cpp
ops/parameter.cpp
ops/select.cpp
......@@ -47,6 +50,8 @@ set (SRC
pass/visualize_tree.cpp
runtime/call_frame.cpp
runtime/external_function.cpp
runtime/tuple.cpp
runtime/utils.cpp
shape.cpp
types/element_type.cpp
types/type.cpp
......
......@@ -31,21 +31,21 @@ namespace ngraph
}
class ValueType;
/// Zero or more value types
using ValueTypes = std::vector<std::shared_ptr<ValueType>>;
/// @brief Zero or more value types
using ValueTypes = std::vector<std::shared_ptr<const ValueType>>;
/// Zero or more nodes
/// @brief Zero or more nodes
using Nodes = std::vector<std::shared_ptr<Node>>;
/// A sequence of axes
/// @brief A sequence of axes
using AxisVector = std::vector<size_t>;
/// A set of axes, for example, reduction axes
/// @brief A set of axes, for example, reduction axes
using AxisSet = std::set<size_t>;
/// Shape for a tensor
/// @brief Shape for a tensor
using Shape = std::vector<size_t>;
/// Strides of a tensor
/// @brief Strides of a tensor
using Strides = std::vector<size_t>;
}
......@@ -14,6 +14,8 @@
#pragma once
#include <memory>
namespace ngraph
{
namespace descriptor
......@@ -23,8 +25,11 @@ namespace ngraph
// during execution.
class Buffer
{
public:
size_t size() const { return m_size; }
protected:
size_t size;
size_t m_size;
};
}
}
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include <cassert>
#include "ngraph/descriptor/buffer.hpp"
namespace ngraph
{
namespace descriptor
{
/// @brief Specifies a contiguous portion of a buffer.
///
/// Currently only implemented for linear buffers.
class BufferPos
{
public:
BufferPos() {}
BufferPos(std::shared_ptr<Buffer> buffer, size_t offset, size_t size)
: m_buffer(buffer)
, m_offset(offset)
, m_size(size)
{
assert(buffer->size() >= offset + size);
}
BufferPos(const BufferPos& buffer_pos) = default;
BufferPos& operator=(const BufferPos& buffer_pos) = default;
protected:
std::shared_ptr<Buffer> m_buffer;
size_t m_offset;
size_t m_size;
};
}
}
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#include "ngraph/descriptor/layout/dense_tensor_view_layout.hpp"
#include "ngraph/except.hpp"
#include "ngraph/shape.hpp"
using namespace ngraph::descriptor::layout;
using ngraph::Shape;
using ngraph::descriptor::TensorView;
using ngraph::TensorViewType;
DenseTensorViewLayout::DenseTensorViewLayout(const TensorView& tensor_view)
: TensorViewLayout(tensor_view)
{
auto tensor_view_type = tensor_view.get_tensor_view_type();
Shape shape = tensor_view_type->get_shape();
m_size = ngraph::shape_size(shape);
m_strides = ngraph::row_major_strides(shape);
}
size_t DenseTensorViewLayout::get_index_offset(const std::vector<size_t>& indices)
{
if (indices.size() != m_strides.size())
{
throw ngraph_error("Indices have the incorrect rank.");
}
size_t result = 0;
for (int i = 0; i < indices.size(); i++)
{
result += m_strides[i] + indices[i];
}
return result;
}
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include <vector>
#include "ngraph/descriptor/buffer.hpp"
#include "ngraph/descriptor/layout/tensor_view_layout.hpp"
#include "ngraph/descriptor/tensor_view.hpp"
namespace ngraph
{
namespace descriptor
{
namespace layout
{
/// @brief The standard strided layout, used for row-major and column-major, their permutations and slices.
///
/// The linearized offset of an index I is dot(I, strides) + offset.
class DenseTensorViewLayout : public TensorViewLayout
{
public:
~DenseTensorViewLayout() {}
DenseTensorViewLayout(const TensorView& tensor_view);
virtual size_t get_size() override { return m_size; }
size_t get_offset() const { return m_offset; }
virtual size_t get_index_offset(const std::vector<size_t>& indices) override;
protected:
Strides m_strides;
size_t m_offset;
size_t m_size;
};
}
}
}
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include <tuple>
#include <vector>
#include "ngraph/descriptor/buffer_pos.hpp"
namespace ngraph
{
namespace descriptor
{
class TensorView;
namespace layout
{
/// @brief Interface for describing implementations of tensor views.
///
/// Kernel selection will need to pay attention to the layout.
class TensorViewLayout
{
protected:
TensorViewLayout(const ngraph::descriptor::TensorView& tensor_view)
: m_tensor_view(tensor_view)
{
}
public:
virtual ~TensorViewLayout() {}
/// Extent of this view in buffer.
///
/// When we support non-linear buffers, this will need to be something other than size_t.
virtual size_t get_size() = 0;
/// Offset of an index; useful for slice implementation.
///
/// With non-linear buffers, this will need to be something other than size_t.
virtual size_t get_index_offset(const std::vector<size_t>& indices) = 0;
/// Where this view is located in the buffer.
const BufferPos& get_buffer_pos() const { return m_buffer_pos; }
BufferPos& get_buffer_pos() { return m_buffer_pos; }
protected:
const ngraph::descriptor::TensorView& m_tensor_view;
BufferPos m_buffer_pos;
};
}
}
}
......@@ -12,15 +12,17 @@
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#include "ngraph/descriptor/tensor_view.hpp"
#include "ngraph/descriptor/primary_tensor_view.hpp"
using namespace ngraph;
using namespace descriptor;
PrimaryTensorView::PrimaryTensorView(const std::shared_ptr<const TensorViewType>& tensor_view_type,
const Node* parent, size_t value_index)
const std::string& name,
bool is_output,
bool is_input)
: TensorView(tensor_view_type)
, m_tensor(tensor_view_type->get_element_type(), this, parent, value_index)
, m_tensor(tensor_view_type->get_element_type(), this, name, is_output, is_input)
{
// Set the name in the parent TensorView.
// This can't be done until after the m_tensor is constructed.
......
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include <string>
#include "ngraph/descriptor/tensor.hpp"
#include "ngraph/descriptor/tensor_view.hpp"
#include "ngraph/log.hpp"
#include "ngraph/shape.hpp"
#include "ngraph/types/type.hpp"
namespace ngraph
{
class Node;
namespace descriptor
{
class Tensor;
class TensorViewLayout;
/// @brief A PrimaryTensorView owns the tensor. All other views are the result
/// of some index operation on the primary view.
class PrimaryTensorView : public TensorView
{
public:
/// @param tensor_view_type The type for this view.
/// @param name Description of the tensor, for debugging.
/// @param is_output The view can be read from the host at the end of a computation.
/// @param is_input The view can be written from the host at the beginning of a computation.
PrimaryTensorView(const std::shared_ptr<const TensorViewType>& tensor_view_type,
const std::string& name,
bool is_output,
bool is_input);
virtual const Tensor& get_tensor() const override;
virtual Tensor& get_tensor() override;
protected:
Tensor m_tensor;
};
}
}
......@@ -13,7 +13,7 @@
// ----------------------------------------------------------------------------
#include "ngraph/descriptor/tensor.hpp"
#include "ngraph/descriptor/tensor_view.hpp"
#include "ngraph/descriptor/primary_tensor_view.hpp"
#include "ngraph/node.hpp"
using namespace ngraph;
......@@ -21,14 +21,15 @@ using namespace ngraph::descriptor;
Tensor::Tensor(const element::Type& element_type,
PrimaryTensorView* primary_tensor_view,
const Node* parent,
size_t value_index)
const std::string& name,
bool is_output,
bool is_input)
: m_element_type(element_type)
, m_primary_tensor_view(primary_tensor_view)
, m_is_output{parent->is_output()}
, m_is_input{parent->is_parameter()}
, m_is_output{is_output}
, m_is_input{is_input}
, m_is_persistent{false}
, m_name{parent->get_node_id() + "_" + std::to_string(value_index)}
, m_name{name}
, m_next_view_id{0}
{
size_t size = 1;
......@@ -39,6 +40,11 @@ Tensor::Tensor(const element::Type& element_type,
m_size = size * m_element_type.size();
}
std::string Tensor::make_tensor_name(const Node* node, size_t value_index)
{
return node->get_node_id() + "_" + std::to_string(value_index);
}
std::string Tensor::get_next_view_name()
{
return m_name + "_TV" + std::to_string(m_next_view_id++);
......
......@@ -45,8 +45,9 @@ private:
Tensor(const element::Type& element_type,
PrimaryTensorView* tensor_view,
const Node* parent,
size_t value_index);
const std::string& name,
bool is_output,
bool is_input);
std::string get_next_view_name();
......@@ -59,6 +60,8 @@ public:
void set_pool_offset(size_t);
size_t get_pool_offset() const;
static std::string make_tensor_name(const Node* node, size_t value_index);
protected:
const element::Type& m_element_type;
PrimaryTensorView* m_primary_tensor_view;
......
......@@ -14,10 +14,13 @@
#pragma once
#include <memory>
#include "ngraph/descriptor/tensor.hpp"
#include "ngraph/descriptor/value.hpp"
#include "ngraph/log.hpp"
#include "ngraph/shape.hpp"
#include "ngraph/types/type.hpp"
#include "ngraph/log.hpp"
namespace ngraph
{
......@@ -25,11 +28,14 @@ namespace ngraph
namespace descriptor
{
class Tensor;
class TensorViewLayout;
namespace layout
{
class Tensor;
class TensorViewLayout;
}
// Describes a view of an instantiated tensor
class TensorView
/// @brief Compile-time descriptor of a first-class value that is a view of a tensor.
class TensorView : public Value
{
TensorView(const TensorView&) = delete;
TensorView& operator=(const TensorView&) = delete;
......@@ -44,6 +50,12 @@ namespace ngraph
virtual ~TensorView() {}
virtual const Tensor& get_tensor() const = 0;
virtual Tensor& get_tensor() = 0;
virtual std::shared_ptr<const ValueType> get_value_type() const override
{
return m_tensor_view_type;
}
const std::string& get_name() const { return m_name; }
std::shared_ptr<const TensorViewType> get_tensor_view_type() const
......@@ -51,35 +63,27 @@ namespace ngraph
return m_tensor_view_type;
}
const std::shared_ptr<TensorViewLayout>& get_tensor_view_layout() const
const std::shared_ptr<layout::TensorViewLayout>& get_tensor_view_layout() const
{
return m_tensor_view_layout;
}
void set_tensor_view_layout(const std::shared_ptr<TensorViewLayout>& tensor_view_layout)
void set_tensor_view_layout(
const std::shared_ptr<layout::TensorViewLayout>& tensor_view_layout)
{
m_tensor_view_layout = tensor_view_layout;
}
protected:
std::shared_ptr<const TensorViewType> m_tensor_view_type;
std::shared_ptr<TensorViewLayout> m_tensor_view_layout;
std::string m_name;
};
// A PrimaryTensorView owns the tensor. All other views are the result
// of some index operation on the primary view.
class PrimaryTensorView : public TensorView
{
public:
PrimaryTensorView(const std::shared_ptr<const TensorViewType>& tensor_view_type,
const Node* parent, size_t value_index);
virtual const Tensor& get_tensor() const override;
virtual Tensor& get_tensor() override;
virtual void collect_tensor_views(std::vector<std::shared_ptr<TensorView>>& views,
const std::shared_ptr<Value>& value) const override
{
views.push_back(std::static_pointer_cast<TensorView>(value));
}
protected:
Tensor m_tensor;
std::shared_ptr<const TensorViewType> m_tensor_view_type;
std::shared_ptr<layout::TensorViewLayout> m_tensor_view_layout;
std::string m_name;
};
}
}
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#include <memory>
#include <vector>
#include "ngraph/descriptor/tensor_view.hpp"
#include "ngraph/descriptor/tuple.hpp"
#include "ngraph/types/type.hpp"
using namespace ngraph::descriptor;
Tuple::Tuple(const std::vector<std::shared_ptr<ngraph::descriptor::Value>>& elements)
: m_elements(elements)
{
std::vector<std::shared_ptr<const ngraph::ValueType>> types;
for (auto element : m_elements)
{
types.push_back(element->get_value_type());
}
m_tuple_type = std::make_shared<ngraph::TupleType>(types);
}
void Tuple::collect_tensor_views(std::vector<std::shared_ptr<TensorView>>& views,
const std::shared_ptr<Value>& value) const
{
for (auto element : m_elements)
{
element->collect_tensor_views(views, element);
}
}
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include <memory>
#include <vector>
#include "ngraph/descriptor/value.hpp"
#include "ngraph/types/type.hpp"
namespace ngraph
{
namespace descriptor
{
/// @brief Compile-time descriptor of a first-class value that is a tuple of zero or more first-class values.
class Tuple : public Value
{
public:
Tuple(const std::vector<std::shared_ptr<ngraph::descriptor::Value>>& elements);
const std::shared_ptr<ngraph::TupleType> get_tuple_type() const;
std::shared_ptr<ngraph::TupleType> get_tuple_type();
virtual std::shared_ptr<const ValueType> get_value_type() const override
{
return m_tuple_type;
}
virtual void collect_tensor_views(std::vector<std::shared_ptr<TensorView>>& views,
const std::shared_ptr<Value>& value) const override;
protected:
std::shared_ptr<ngraph::TupleType> m_tuple_type;
std::vector<std::shared_ptr<ngraph::descriptor::Value>> m_elements;
};
}
}
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include <memory>
#include "ngraph/types/type.hpp"
namespace ngraph
{
namespace descriptor
{
class TensorView;
/// @brief Compile-time descriptor of a first-class value.
class Value
{
public:
virtual ~Value() {}
virtual std::shared_ptr<const ngraph::ValueType> get_value_type() const = 0;
/// @brief helper for collecting all the tensor views in a sequence of values
///
/// @param views The vector of tensor views being collected.
/// @param value A shared pointer for this.
///
/// Append each tensor view in this value to views. Since this may be a tensor view
/// we need to pass a shared pointer to this since we can't get one from this.
virtual void collect_tensor_views(std::vector<std::shared_ptr<TensorView>>& views,
const std::shared_ptr<Value>& value) const = 0;
};
}
}
......@@ -18,14 +18,33 @@
#pragma once
/// @namespace ngraph
/// @brief The Intel Nervana Graph C++ API.
/// @namespace ngraph::descriptor
/// @brief Descriptors are compile-time representations of objects that will appear at run-time.
/// @namespace ngraph::descriptor::layout
/// @brief Layout descriptors describe how tensor views are implemented.
/// @namespace ngraph::op
/// @brief Ops used in graph-building.
/// @namespace ngraph::runtime
/// @brief The objects used for executing the graph.
#include "ngraph/common.hpp"
#include "ngraph/descriptor/buffer.hpp"
#include "ngraph/descriptor/call_frame.hpp"
#include "ngraph/descriptor/input.hpp"
#include "ngraph/descriptor/layout/dense_tensor_view_layout.hpp"
#include "ngraph/descriptor/layout/tensor_view_layout.hpp"
#include "ngraph/descriptor/output.hpp"
#include "ngraph/descriptor/primary_tensor_view.hpp"
#include "ngraph/descriptor/tensor.hpp"
#include "ngraph/descriptor/tensor_view.hpp"
#include "ngraph/descriptor/tensor_view_layout.hpp"
#include "ngraph/descriptor/tuple.hpp"
#include "ngraph/descriptor/value.hpp"
#include "ngraph/except.hpp"
#include "ngraph/function.hpp"
#include "ngraph/node.hpp"
......@@ -41,6 +60,7 @@
#include "ngraph/ops/equal.hpp"
#include "ngraph/ops/exp.hpp"
#include "ngraph/ops/floor.hpp"
#include "ngraph/ops/get_tuple_element.hpp"
#include "ngraph/ops/greater.hpp"
#include "ngraph/ops/less.hpp"
#include "ngraph/ops/log.hpp"
......@@ -56,10 +76,13 @@
#include "ngraph/ops/select.hpp"
#include "ngraph/ops/subtract.hpp"
#include "ngraph/ops/tuple.hpp"
#include "ngraph/runtime/external_function.hpp"
#include "ngraph/runtime/call_frame.hpp"
#include "ngraph/runtime/external_function.hpp"
#include "ngraph/runtime/instruction.hpp"
#include "ngraph/runtime/parameterized_tensor_view.hpp"
#include "ngraph/runtime/tensor_view.hpp"
#include "ngraph/runtime/tuple.hpp"
#include "ngraph/runtime/value.hpp"
#include "ngraph/shape.hpp"
#include "ngraph/types/element_type.hpp"
#include "ngraph/types/type.hpp"
......@@ -36,7 +36,7 @@ Node::~Node()
{
}
void Node::set_value_type_checked(const shared_ptr<ValueType>& value_type)
void Node::set_value_type_checked(const shared_ptr<const ValueType>& value_type)
{
if (nullptr == m_value_type)
{
......@@ -58,7 +58,7 @@ void Node::assign_tensors()
size_t i = 0;
for (auto tvt : tensor_view_types)
{
auto tensor_view_descriptor = make_shared<descriptor::PrimaryTensorView>(tvt, this, i);
auto tensor_view_descriptor = make_shared<descriptor::PrimaryTensorView>(tvt, ngraph::descriptor::Tensor::make_tensor_name(this, i), is_output(), is_parameter());
m_outputs.emplace_back(this, i, tensor_view_descriptor);
i++;
}
......
......@@ -16,8 +16,8 @@
#include <set>
#include <string>
#include <vector>
#include <unordered_set>
#include <vector>
#include <iostream>
......@@ -64,7 +64,7 @@ namespace ngraph
void assign_tensors();
const Nodes& get_arguments() const { return m_arguments; }
void clear_arguments() { m_arguments.clear(); }
void clear_arguments() { m_arguments.clear(); }
const std::multiset<Node*>& users() const { return m_users; }
......@@ -78,15 +78,15 @@ namespace ngraph
return typeid(*this) == typeid(*node.get());
}
std::shared_ptr<ValueType> get_value_type() { return m_value_type; }
const std::shared_ptr<ValueType> get_value_type() const { return m_value_type; }
std::shared_ptr<const ValueType> get_value_type() { return m_value_type; }
const std::shared_ptr<const ValueType> get_value_type() const { return m_value_type; }
void set_value_type(const element::Type& element_type, const Shape& shape)
{
m_value_type = std::make_shared<TensorViewType>(element_type, shape);
}
void set_value_type(const std::shared_ptr<ValueType>& value_type)
void set_value_type(const std::shared_ptr<const ValueType>& value_type)
{
m_value_type = value_type;
}
......@@ -95,7 +95,7 @@ namespace ngraph
// value_type agrees with the value type that was set.
// This is used when the framework specifies a value type for the value, and we
// independently compute what we thing the value type should be from the arguments.
void set_value_type_checked(const std::shared_ptr<ValueType>& value_type);
void set_value_type_checked(const std::shared_ptr<const ValueType>& value_type);
bool is_parameter() const;
bool is_output() const;
......@@ -104,9 +104,9 @@ namespace ngraph
size_t get_instance_id() const { return m_instance_id; }
friend std::ostream& operator<<(std::ostream&, const Node&);
std::vector<descriptor::Input>& get_inputs() { return m_inputs; }
std::vector<descriptor::Input>& get_inputs() { return m_inputs; }
const std::vector<descriptor::Input>& get_inputs() const { return m_inputs; }
std::vector<descriptor::Output>& get_outputs() { return m_outputs; }
std::vector<descriptor::Output>& get_outputs() { return m_outputs; }
const std::vector<descriptor::Output>& get_outputs() const { return m_outputs; }
std::unordered_set<descriptor::Tensor*> liveness_live_list;
......@@ -114,14 +114,14 @@ namespace ngraph
std::unordered_set<descriptor::Tensor*> liveness_free_list;
protected:
Nodes m_arguments;
std::shared_ptr<ValueType> m_value_type;
std::multiset<Node*> m_users;
std::string m_name;
size_t m_instance_id;
static size_t m_next_instance_id;
std::vector<descriptor::Input> m_inputs;
std::vector<descriptor::Output> m_outputs;
bool m_is_output;
Nodes m_arguments;
std::shared_ptr<const ValueType> m_value_type;
std::multiset<Node*> m_users;
std::string m_name;
size_t m_instance_id;
static size_t m_next_instance_id;
std::vector<descriptor::Input> m_inputs;
std::vector<descriptor::Output> m_outputs;
bool m_is_output;
};
}
......@@ -29,9 +29,9 @@ void BinaryElementwiseBuiltin::propagate_types()
}
auto arg0_tensor_type =
dynamic_pointer_cast<TensorViewType>(m_arguments.at(0)->get_value_type());
dynamic_pointer_cast<const TensorViewType>(m_arguments.at(0)->get_value_type());
auto arg1_tensor_type =
dynamic_pointer_cast<TensorViewType>(m_arguments.at(1)->get_value_type());
dynamic_pointer_cast<const TensorViewType>(m_arguments.at(1)->get_value_type());
if (nullptr == arg0_tensor_type || nullptr == arg1_tensor_type)
{
throw ngraph_error("Arguments must be tensor views");
......
......@@ -28,7 +28,7 @@ void Broadcast::propagate_types()
{
throw ngraph_error("Argument to broadcast is missing type.");
}
auto arg_tensor_view_type = dynamic_pointer_cast<TensorViewType>(arg_type);
auto arg_tensor_view_type = dynamic_pointer_cast<const TensorViewType>(arg_type);
if (nullptr == arg_tensor_view_type)
{
throw ngraph_error("Argument to broadcast is not a tensor view");
......
......@@ -32,7 +32,7 @@ void Concat::propagate_types()
throw ngraph_error("Argument to concat is missing type.");
}
auto arg0_tensor_view_type = dynamic_pointer_cast<TensorViewType>(arg0_type);
auto arg0_tensor_view_type = dynamic_pointer_cast<const TensorViewType>(arg0_type);
if (nullptr == arg0_tensor_view_type)
{
throw ngraph_error("Argument to concat is not a tensor view");
......@@ -55,7 +55,7 @@ void Concat::propagate_types()
throw ngraph_error("Argument to concat is missing type.");
}
auto argi_tensor_view_type = dynamic_pointer_cast<TensorViewType>(argi_type);
auto argi_tensor_view_type = dynamic_pointer_cast<const TensorViewType>(argi_type);
if (nullptr == argi_tensor_view_type)
{
throw ngraph_error("Argument to concat is not a tensor view");
......
......@@ -22,9 +22,9 @@ using namespace ngraph::op;
void Dot::propagate_types()
{
auto arg0_tensor_type =
dynamic_pointer_cast<TensorViewType>(m_arguments.at(0)->get_value_type());
dynamic_pointer_cast<const TensorViewType>(m_arguments.at(0)->get_value_type());
auto arg1_tensor_type =
dynamic_pointer_cast<TensorViewType>(m_arguments.at(1)->get_value_type());
dynamic_pointer_cast<const TensorViewType>(m_arguments.at(1)->get_value_type());
if (nullptr == arg0_tensor_type || nullptr == arg1_tensor_type)
{
throw ngraph_error("Arguments to dot must be tensor views");
......
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#include <sstream>
#include "ngraph/ops/get_tuple_element.hpp"
using namespace std;
using namespace ngraph::op;
void GetTupleElement::propagate_types()
{
if (m_arguments.size() != 1)
{
throw ngraph_error("Wrong number of arguments.");
}
auto arg0_tuple_type =
dynamic_pointer_cast<const TupleType>(m_arguments.at(0)->get_value_type());
if (nullptr == arg0_tuple_type)
{
throw ngraph_error("Argument must be a tuple view");
}
if (m_n >= arg0_tuple_type->get_element_types().size()){
throw ngraph_error("Indexing tuple beyond its size");
}
set_value_type_checked(arg0_tuple_type->get_element_types().at(m_n));
}
......@@ -14,27 +14,30 @@
#pragma once
#include <vector>
#include "ngraph/ops/op.hpp"
namespace ngraph
{
namespace descriptor
namespace op
{
// An interface for describing implementations of tensor views
// Kernel selection will need to pay attention to the layout
class TensorViewLayout
class Node;
class GetTupleElement : public Builtin
{
public:
virtual ~TensorViewLayout() {}
};
GetTupleElement(const std::shared_ptr<Node>& arg, size_t n)
: Builtin({arg})
, m_n{n}
{
}
virtual void propagate_types() override;
virtual std::string description() const override { return "GetTupleElement"; }
size_t get_n() const { return m_n; }
// The standard strided layout
class DenseTensorViewLayout : public TensorViewLayout
{
protected:
std::shared_ptr<Buffer> m_buffer;
Strides m_strides;
size_t m_offset;
size_t m_n;
};
}
}
......@@ -29,11 +29,11 @@ void Select::propagate_types()
}
auto arg0_tensor_type =
dynamic_pointer_cast<TensorViewType>(m_arguments.at(0)->get_value_type());
dynamic_pointer_cast<const TensorViewType>(m_arguments.at(0)->get_value_type());
auto arg1_tensor_type =
dynamic_pointer_cast<TensorViewType>(m_arguments.at(1)->get_value_type());
dynamic_pointer_cast<const TensorViewType>(m_arguments.at(1)->get_value_type());
auto arg2_tensor_type =
dynamic_pointer_cast<TensorViewType>(m_arguments.at(2)->get_value_type());
dynamic_pointer_cast<const TensorViewType>(m_arguments.at(2)->get_value_type());
if (nullptr == arg0_tensor_type || nullptr == arg1_tensor_type || nullptr == arg2_tensor_type)
{
throw ngraph_error("Arguments must be tensor views");
......
......@@ -13,6 +13,7 @@
// ----------------------------------------------------------------------------
#include <memory>
#include <vector>
#include "ngraph/ngraph.hpp"
......@@ -21,5 +22,10 @@ using namespace ngraph::op;
void Tuple::propagate_types()
{
throw ngraph_error("NIY");
vector<shared_ptr<const ValueType>> element_types;
for (auto argument : m_arguments)
{
element_types.push_back(argument->get_value_type());
}
set_value_type_checked(make_shared<TupleType>(element_types));
}
......@@ -28,7 +28,7 @@ void UnaryElementwiseBuiltin::propagate_types()
}
auto arg_tensor_type =
dynamic_pointer_cast<TensorViewType>(m_arguments.at(0)->get_value_type());
dynamic_pointer_cast<const TensorViewType>(m_arguments.at(0)->get_value_type());
if (nullptr == arg_tensor_type)
{
throw ngraph_error("Argument must be tensor view");
......
......@@ -35,8 +35,9 @@ CallFrame::CallFrame(size_t
copy(temps.begin(), temps.end(), m_tensors.begin() + m_n_inputs + m_n_outputs);
}
void CallFrame::operator()(const std::vector<std::shared_ptr<ngraph::runtime::TensorView>>& inputs,
const std::vector<std::shared_ptr<ngraph::runtime::TensorView>>& outputs)
void CallFrame::tensor_call(
const std::vector<std::shared_ptr<ngraph::runtime::TensorView>>& inputs,
const std::vector<std::shared_ptr<ngraph::runtime::TensorView>>& outputs)
{
copy(inputs.begin(), inputs.end(), m_tensors.begin());
copy(outputs.begin(), outputs.end(), m_tensors.begin() + m_n_inputs);
......@@ -51,3 +52,20 @@ void CallFrame::operator()(const std::vector<std::shared_ptr<ngraph::runtime::Te
// Don't hold onto inputs/outputs
fill_n(m_tensors.begin(), m_n_inputs + m_n_outputs, nullptr);
}
void CallFrame::operator()(const std::vector<std::shared_ptr<ngraph::runtime::Value>>& arguments,
const std::vector<std::shared_ptr<ngraph::runtime::Value>>& results)
{
// TODO: Check types of args and result
std::vector<std::shared_ptr<ngraph::runtime::TensorView>> inputs;
for (auto argument : arguments){
argument->collect_tensor_views(inputs, argument);
}
std::vector<std::shared_ptr<ngraph::runtime::TensorView>> outputs;
for (auto result : results){
result->collect_tensor_views(outputs, result);
}
tensor_call(inputs, outputs);
}
......@@ -38,9 +38,19 @@ namespace ngraph
size_t initial_pc,
const std::shared_ptr<std::vector<std::shared_ptr<Instruction>>>& instructions);
void operator()(const std::vector<std::shared_ptr<ngraph::runtime::TensorView>>& inputs,
const std::vector<std::shared_ptr<ngraph::runtime::TensorView>>& outpus);
void set_return() { m_return = true; }
/// @brief Invoke the function with values matching the signature of the function.
///
/// Tuples will be expanded into their tensor views to build the call frame.
void operator()(const std::vector<std::shared_ptr<ngraph::runtime::Value>>& inputs,
const std::vector<std::shared_ptr<ngraph::runtime::Value>>& outpus);
/// @brief Invoke the function with tuples pre-expanded to their underlying tensor views.
void tensor_call(
const std::vector<std::shared_ptr<ngraph::runtime::TensorView>>& inputs,
const std::vector<std::shared_ptr<ngraph::runtime::TensorView>>& outpus);
void set_return() { m_return = true; }
std::shared_ptr<TensorView> get_tensor(size_t i) { return m_tensors[i]; }
template <typename ET>
......
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include <cassert>
#include "ngraph/runtime/call_frame.hpp"
#include "ngraph/runtime/eigen/utils.hpp"
#include "ngraph/runtime/instruction.hpp"
#include "ngraph/runtime/tensor_view.hpp"
namespace ngraph
{
namespace runtime
{
namespace eigen
{
/// @brief Copies a tensor from in to out.
template <typename ET>
class CopyInstruction : public Instruction
{
public:
/// @param in Index of input tensor in call frame.
/// @param out Index of output tensor in call frame.
CopyInstruction(size_t in, size_t out)
: m_in(in)
, m_out(out)
{
}
virtual void execute(CallFrame& call_frame) const override
{
call_frame.get_parameterized_tensor<ET>(m_out)->get_vector() =
call_frame.get_parameterized_tensor<ET>(m_in)->get_vector();
}
protected:
size_t m_in;
size_t m_out;
};
}
}
}
This diff is collapsed.
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include <memory>
#include <vector>
#include "ngraph/descriptor/layout/dense_tensor_view_layout.hpp"
#include "ngraph/descriptor/primary_tensor_view.hpp"
#include "ngraph/runtime/tensor_view.hpp"
#include "ngraph/shape.hpp"
#include "ngraph/types/element_type.hpp"
namespace ngraph
{
namespace runtime
{
template <typename ET>
class ParameterizedTensorView : public TensorView
{
public:
/// Create a tensor
ParameterizedTensorView(const ngraph::Shape& shape)
: TensorView(std::make_shared<ngraph::descriptor::PrimaryTensorView>(
std::make_shared<ngraph::TensorViewType>(ET::element_type(), shape),
"external",
true,
true))
{
m_descriptor->set_tensor_view_layout(
std::make_shared<ngraph::descriptor::layout::DenseTensorViewLayout>(
*m_descriptor));
m_vector.resize(m_descriptor->get_tensor_view_layout()->get_size());
}
ParameterizedTensorView(
const std::shared_ptr<ngraph::descriptor::TensorView>& descriptor);
using element_type = ET;
using value_type = typename ET::type;
using storage_type = std::vector<value_type>;
template <typename T>
ParameterizedTensorView<ET>& operator=(const std::vector<T>& value)
{
get_vector() = value;
return *this;
}
// For getting the data out
storage_type& get_vector() { return m_vector; }
protected:
storage_type m_vector;
};
}
}
......@@ -17,6 +17,8 @@
#include <memory>
#include <vector>
#include "ngraph/descriptor/tensor_view.hpp"
#include "ngraph/runtime/value.hpp"
#include "ngraph/shape.hpp"
#include "ngraph/types/element_type.hpp"
......@@ -27,58 +29,45 @@ namespace ngraph
template <typename ET>
class ParameterizedTensorView;
class TensorView
class TensorView : public Value
{
protected:
TensorView(const std::shared_ptr<ngraph::descriptor::TensorView>& descriptor)
: m_descriptor(descriptor)
{
}
public:
TensorView() {}
virtual ~TensorView() {}
template <typename ET>
ParameterizedTensorView<ET>* get_parameterized_tensor()
{
return dynamic_cast<ParameterizedTensorView<ET>*>(this);
}
};
template <typename ET>
class ParameterizedTensorView : public ngraph::runtime::TensorView
{
public:
ParameterizedTensorView(const ngraph::Shape& shape)
: m_vector(ngraph::shape_size(shape), 0)
, m_shape(shape)
std::shared_ptr<const ngraph::descriptor::TensorView> get_tensor_view_descriptor() const
{
return m_descriptor;
}
virtual ~ParameterizedTensorView() {}
// Standard definitions from vector
using element_type = ET;
using value_type = typename ET::type;
using storage_type = std::vector<value_type>;
using size_type = typename storage_type::size_type;
using difference_type = typename storage_type::difference_type;
using reference = typename storage_type::reference;
using const_reference = typename storage_type::const_reference;
using pointer = typename storage_type::pointer;
using const_pointer = typename storage_type::const_pointer;
using iterator = typename storage_type::iterator;
using const_iterator = typename storage_type::const_iterator;
using reverse_iterator = typename storage_type::reverse_iterator;
using const_reverse_iterator = typename storage_type::const_reverse_iterator;
virtual std::shared_ptr<ngraph::descriptor::Value> get_descriptor() const override
{
return m_descriptor;
}
template <typename T>
ParameterizedTensorView<ET>& operator=(const std::vector<T>& value)
virtual void collect_tensor_views(std::vector<std::shared_ptr<TensorView>>& views,
const std::shared_ptr<Value>& value) const override
{
get_vector() = value;
return *this;
views.push_back(std::static_pointer_cast<TensorView>(value));
}
// For getting the data out
storage_type& get_vector() { return m_vector; }
const ngraph::Shape& get_shape() const { return m_shape; }
const Shape& get_shape() { return m_descriptor->get_tensor_view_type()->get_shape(); }
protected:
storage_type m_vector;
ngraph::Shape m_shape;
std::shared_ptr<ngraph::descriptor::TensorView> m_descriptor;
};
}
}
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#include <memory>
#include <vector>
#include "ngraph/descriptor/tuple.hpp"
#include "ngraph/runtime/tensor_view.hpp"
#include "ngraph/runtime/tuple.hpp"
using namespace ngraph::runtime;
Tuple::Tuple(const std::vector<std::shared_ptr<ngraph::runtime::Value>>& elements)
: m_elements(elements)
{
std::vector<std::shared_ptr<ngraph::descriptor::Value>> descriptors;
for (auto element : m_elements)
{
descriptors.push_back(element->get_descriptor());
}
m_descriptor = std::make_shared<ngraph::descriptor::Tuple>(descriptors);
}
void Tuple::collect_tensor_views(std::vector<std::shared_ptr<TensorView>>& views,
const std::shared_ptr<Value>& value) const
{
for (auto element : m_elements)
{
element->collect_tensor_views(views, element);
}
}
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include <memory>
#include <vector>
#include "ngraph/descriptor/tuple.hpp"
#include "ngraph/runtime/value.hpp"
namespace ngraph
{
namespace runtime
{
/// @brief A first-class value holding zero or more first-class values.
class Tuple : public Value
{
public:
Tuple(const std::vector<std::shared_ptr<ngraph::runtime::Value>>& elements);
virtual std::shared_ptr<ngraph::descriptor::Value> get_descriptor() const override
{
return m_descriptor;
}
std::shared_ptr<const ngraph::descriptor::Value> get_tuple_descriptor() const
{
return m_descriptor;
}
virtual void
collect_tensor_views(std::vector<std::shared_ptr<TensorView>>& views,
const std::shared_ptr<Value>& value) const override;
protected:
std::vector<std::shared_ptr<Value>> m_elements;
std::shared_ptr<ngraph::descriptor::Tuple> m_descriptor;
};
}
}
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#include "ngraph/runtime/utils.hpp"
std::shared_ptr<ngraph::runtime::Tuple> ngraph::runtime::make_tuple(
const std::vector<std::shared_ptr<ngraph::runtime::Value>>& elements)
{
return std::make_shared<ngraph::runtime::Tuple>(elements);
}
......@@ -15,20 +15,28 @@
#pragma once
#include <memory>
#include <vector>
#include "ngraph/runtime/call_frame.hpp"
#include "ngraph/runtime/tensor_view.hpp"
#include "ngraph/runtime/parameterized_tensor_view.hpp"
#include "ngraph/runtime/tuple.hpp"
#include "ngraph/runtime/value.hpp"
#include "ngraph/types/element_type.hpp"
namespace ngraph
{
namespace runtime
{
/// @brief Framework constructor of a tensor of a specific element type and shape.
template <typename ET>
std::shared_ptr<ngraph::runtime::ParameterizedTensorView<ET>>
make_tensor(const Shape& shape)
{
return std::make_shared<runtime::ParameterizedTensorView<ET>>(shape);
}
/// @brief Framework constructor of a tuple from a sequence of values.
std::shared_ptr<ngraph::runtime::Tuple>
make_tuple(const std::vector<std::shared_ptr<ngraph::runtime::Value>>& elements);
}
}
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include <memory>
#include <vector>
#include "ngraph/descriptor/value.hpp"
namespace ngraph
{
namespace runtime
{
class TensorView;
/// @brief A first-class runtime value.
class Value
{
public:
virtual ~Value() {}
/// @brief The compile-time descriptor for this value.
virtual std::shared_ptr<ngraph::descriptor::Value> get_descriptor() const = 0;
/// @brief helper for collecting all the tensor views in a sequence of values
///
/// @param views The vector of tensor views being collected.
/// @param value A shared pointer for this.
///
/// Append each tensor view in this value to views. Since this may be a tensor view
/// we need to pass a shared pointer to this since we can't get one from this.
virtual void collect_tensor_views(std::vector<std::shared_ptr<TensorView>>& views,
const std::shared_ptr<Value>& value) const = 0;
};
}
}
......@@ -82,16 +82,16 @@ namespace ngraph
TupleType() {}
/// @param element_types A vector of types for the tuple elements
TupleType(const std::vector<std::shared_ptr<ValueType>>& element_types)
TupleType(const std::vector<std::shared_ptr<const ValueType>>& element_types)
: m_element_types(element_types)
{
}
const std::vector<std::shared_ptr<ValueType>> get_element_types() const
const std::vector<std::shared_ptr<const ValueType>> get_element_types() const
{
return m_element_types;
}
std::vector<std::shared_ptr<ValueType>> set_element_types() { return m_element_types; }
std::vector<std::shared_ptr<const ValueType>> set_element_types() { return m_element_types; }
virtual bool operator==(const ValueType& that) const override;
virtual void collect_tensor_views(
......@@ -99,6 +99,6 @@ namespace ngraph
friend std::ostream& operator<<(std::ostream&, const TupleType&);
protected:
std::vector<std::shared_ptr<ValueType>> m_element_types;
std::vector<std::shared_ptr<const ValueType>> m_element_types;
};
}
This diff is collapsed.
......@@ -44,7 +44,7 @@ TEST(input_output, param_tuple)
// Same as param_tensor, but for a tuple
auto tv_tp_0 = make_shared<TensorViewType>(element::Float32::element_type(), Shape{2, 4});
auto tv_tp_1 = make_shared<TensorViewType>(element::Float32::element_type(), Shape{2, 4, 6});
auto tp_tp = make_shared<TupleType>(std::vector<std::shared_ptr<ValueType>>{tv_tp_0, tv_tp_1});
auto tp_tp = make_shared<TupleType>(ValueTypes{tv_tp_0, tv_tp_1});
auto param = make_shared<op::Parameter>(tp_tp);
param->propagate_types();
param->assign_tensors();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment