Unverified Commit 4a25881e authored by Scott Cyphers's avatar Scott Cyphers Committed by GitHub

TensorIterator (#3038)

* TensorIterator

* ssize_t is not on windows

* RNN building test

* simplify

* Simplify output

* typo

* typos

* remove arg

* Sequence version

* style

* Serialization for all but TensorIterator

* Add ops for igpu

* style

* typo, ngpu

* missing headers, output vector

* Fix const json issues

* TensorIterator serialization

* Serialization for TensorIterator
Switch Outout<T> to use shared_ptr do nodes don't vanish
Switch Result to new node style
Add serialization/deserialization to test

* Switch Output to use a shared_ptr to prevent nodes from disappearing early.

* Eliminate wrapped enum
Switch allreduce to new op form

* Convert to new op form

* Disambiguate concat

* Add autobroadcast for SequencePush
Add validation for SequencePush

* compute shapes for SequenceRepeat

* Add explicit conversion from PartialShape to dimension vector
validate and infer types for SliceInput

* validate and infer types for SequenceOutput

* Add sequence attributes

* Move test to serializer so it doesn't fail when there is no serializer?

* const arg

* Beginning of TensorIterator validation

* Validation up to parameters

* Fix shape in test

* Remove mis-typed AxisSet

* Simplify, add doc

* Review comments

* Tweaks

* free/bound

* Try fused op

* Discussion

* more

* comments

* Start of LSTMCell test

* Add LSTMCell example

* Reorg

* Reorg

* Fused ops don't need handlers

* Serialization

* Use `as_type` and `is_type` for up-conversions of descriptions
Allocate output space for each output

* Clean up type checking

* Fix ser/deser issues

* Refactor, cleanup type info to make it safer to use for non-ops

* Implement validate_and_infer_types and modify unit tests.

* For ops in the loop body: revalidate and infer types.

Nested loop is not supported.

* Put body ops in a set and call revalidate and infer types on the set.

* Set slice[axis] to part_size.

Call set_partial_shape to set shape for body parameters.

Add more unit tests.

* Give tensor iterator body a lambda

* Update validate_and_infer_types and unit tests.

* Serialization of body

* Change static function to TensorIterator function.

* review comments
parent ccbba5e4
......@@ -83,6 +83,8 @@ set (SRC
function.cpp
function.hpp
graph_util.cpp
lambda.cpp
lambda.hpp
log.cpp
log.hpp
ngraph.cpp
......@@ -315,6 +317,8 @@ set (SRC
op/tan.hpp
op/tanh.cpp
op/tanh.hpp
op/tensor_iterator.cpp
op/tensor_iterator.hpp
op/topk.cpp
op/topk.hpp
op/xor.cpp
......
......@@ -26,13 +26,14 @@
using namespace std;
using namespace ngraph;
constexpr DiscreteTypeInfo Function::type_info;
atomic<size_t> Function::m_next_instance_id(0);
Function::Function(const ResultVector& results,
const ParameterVector& parameters,
const std::string& name)
: m_results(results)
, m_parameters(parameters)
: Lambda(results, parameters)
, m_temporary_pool_size(0)
, m_instance_id(m_next_instance_id.fetch_add(1))
, m_name(name)
......@@ -44,48 +45,24 @@ Function::Function(const ResultVector& results,
Function::Function(const OutputVector& results,
const ParameterVector& parameters,
const std::string& name)
: m_results(results.size())
, m_parameters(parameters)
: Lambda(results, parameters)
, m_temporary_pool_size(0)
, m_instance_id(m_next_instance_id.fetch_add(1))
, m_name(name)
, m_unique_name("Function_" + to_string(m_instance_id))
{
if (std::any_of(results.cbegin(), results.cend(), [](Output<Node> n) {
return as_type_ptr<op::Result>(n.get_node_shared_ptr());
}))
{
throw ngraph_error(
" Results already contain op::Results. Use a c-tor that takes a ResultVector");
}
std::transform(results.begin(), results.end(), m_results.begin(), [](Output<Node> n) {
return std::make_shared<op::Result>(n);
});
init();
}
Function::Function(const NodeVector& results,
const ParameterVector& parameters,
const std::string& name)
: m_results(results.size())
, m_parameters(parameters)
: Lambda(as_output_vector(results), parameters)
, m_temporary_pool_size(0)
, m_instance_id(m_next_instance_id.fetch_add(1))
, m_name(name)
, m_unique_name("Function_" + to_string(m_instance_id))
{
if (std::any_of(results.cbegin(), results.cend(), [](std::shared_ptr<Node> n) {
return as_type_ptr<op::Result>(n);
}))
{
throw ngraph_error(
" Results already contain op::Results. Use a c-tor that takes a ResultVector");
}
std::transform(results.begin(), results.end(), m_results.begin(), [](std::shared_ptr<Node> n) {
return std::make_shared<op::Result>(n);
});
init();
}
......
......@@ -23,6 +23,7 @@
#include <string>
#include <vector>
#include "ngraph/lambda.hpp"
#include "ngraph/node.hpp"
#include "ngraph/op/parameter.hpp"
#include "ngraph/op/result.hpp"
......@@ -30,9 +31,11 @@
namespace ngraph
{
/// A user-defined function.
class Function
class Function : public Lambda
{
public:
static constexpr DiscreteTypeInfo type_info{"Function", 0};
const DiscreteTypeInfo& get_type_info() const { return type_info; }
Function(const NodeVector& results,
const ParameterVector& parameters,
const std::string& name = "");
......@@ -70,10 +73,6 @@ namespace ngraph
/// Return the partial shape of element i
const PartialShape& get_output_partial_shape(size_t i) const;
/// Return the function parameters
const ParameterVector& get_parameters() const { return m_parameters; }
/// Return a list of function's outputs
const ResultVector& get_results() const { return m_results; }
/// Check that there is a single result and return it.
std::shared_ptr<Node> get_result() const;
......@@ -128,8 +127,6 @@ namespace ngraph
const std::shared_ptr<op::Parameter>& parameter);
protected:
ResultVector m_results;
ParameterVector m_parameters;
size_t m_temporary_pool_size;
private:
......
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "ngraph/lambda.hpp"
using namespace std;
using namespace ngraph;
constexpr DiscreteTypeInfo Lambda::type_info;
Lambda::Lambda(const OutputVector& results, const ParameterVector& parameters)
: Lambda(as_result_vector(results), parameters)
{
}
Lambda::Lambda(const ResultVector& results, const ParameterVector& parameters)
: m_results(results)
, m_parameters(parameters)
{
}
int64_t Lambda::get_parameter_index(const std::shared_ptr<op::Parameter>& parameter) const
{
int64_t pos = 0;
for (auto p : get_parameters())
{
if (p == parameter)
{
return pos;
}
pos++;
}
return -1;
}
int64_t Lambda::get_result_index(const Output<Node>& value) const
{
int64_t pos = 0;
if (is_type<op::Result>(value.get_node_shared_ptr()))
{
auto result = value.get_node_shared_ptr();
for (auto r : get_results())
{
if (r == result)
{
return pos;
}
pos++;
}
}
else
{
for (auto r : get_results())
{
if (r->input_value(0) == value)
{
return pos;
}
pos++;
}
}
return -1;
}
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include "ngraph/node.hpp"
#include "ngraph/op/parameter.hpp"
#include "ngraph/op/result.hpp"
namespace ngraph
{
class Lambda
{
public:
static constexpr DiscreteTypeInfo type_info{"Lamdba", 0};
const DiscreteTypeInfo& get_type_info() const { return type_info; }
/// Return the function parameters
const ParameterVector& get_parameters() const { return m_parameters; };
/// Index for parameter, or -1
int64_t get_parameter_index(const std::shared_ptr<op::Parameter>& parameter) const;
/// Return a list of function's outputs
const ResultVector& get_results() const { return m_results; };
/// Index for value or result referencing it, or -1
int64_t get_result_index(const Output<Node>& value) const;
protected:
Lambda(const ResultVector& results, const ParameterVector& parameters);
Lambda(const OutputVector& results, const ParameterVector& parameters);
ResultVector m_results;
ParameterVector m_parameters;
};
}
......@@ -81,6 +81,7 @@ namespace ngraph
#include "ngraph/dimension.hpp"
#include "ngraph/except.hpp"
#include "ngraph/function.hpp"
#include "ngraph/lambda.hpp"
#include "ngraph/node.hpp"
#include "ngraph/op/abs.hpp"
#include "ngraph/op/acos.hpp"
......@@ -209,6 +210,7 @@ namespace ngraph
#include "ngraph/op/sum.hpp"
#include "ngraph/op/tan.hpp"
#include "ngraph/op/tanh.hpp"
#include "ngraph/op/tensor_iterator.hpp"
#include "ngraph/op/topk.hpp"
#include "ngraph/op/util/attr_types.hpp"
#include "ngraph/op/xor.hpp"
......
......@@ -817,6 +817,18 @@ NodeVector ngraph::as_node_vector(const OutputVector& values)
return node_vector;
}
ResultVector ngraph::as_result_vector(const OutputVector& values)
{
ResultVector result;
for (auto value : values)
{
shared_ptr<Node> node = value.get_node_shared_ptr();
result.push_back(is_type<op::Result>(node) ? as_type_ptr<op::Result>(node)
: make_shared<op::Result>(value));
}
return result;
}
std::tuple<element::Type, PartialShape>
Node::validate_and_infer_elementwise_args(const op::AutoBroadcastSpec& autob)
{
......
......@@ -61,8 +61,11 @@ namespace ngraph
{
struct AutoBroadcastSpec;
class Constant;
class Result;
} // namespace op
using ResultVector = std::vector<std::shared_ptr<op::Result>>;
namespace autodiff
{
class Adjoints;
......@@ -80,6 +83,8 @@ namespace ngraph
OutputVector as_output_vector(const NodeVector& args);
NodeVector as_node_vector(const OutputVector& values);
/// Returns a ResultVector referencing values.
ResultVector as_result_vector(const OutputVector& values);
/// Alias useful for cloning
using NodeMap = std::unordered_map<ngraph::Node*, std::shared_ptr<ngraph::Node>>;
......
......@@ -141,7 +141,7 @@ void op::LSTMCell::pre_validate_and_infer_types()
", ",
get_hidden_size(),
"). Actual shape is:",
w_shape,
r_shape,
".");
NODE_VALIDATION_CHECK(this,
(ht_shape == Shape{batch_size, get_hidden_size()}),
......@@ -150,7 +150,7 @@ void op::LSTMCell::pre_validate_and_infer_types()
", ",
get_hidden_size(),
"). Actual shape is:",
w_shape,
ht_shape,
".");
NODE_VALIDATION_CHECK(this,
(ct_shape == Shape{batch_size, get_hidden_size()}),
......@@ -159,7 +159,7 @@ void op::LSTMCell::pre_validate_and_infer_types()
", ",
get_hidden_size(),
"). Actual shape is:",
w_shape,
ct_shape,
".");
const auto& b_pshape = get_input_partial_shape(5);
......
......@@ -57,4 +57,5 @@ NGRAPH_OP(SquaredDifference, ngraph::op)
NGRAPH_OP(SoftmaxCrossEntropy, ngraph::op)
NGRAPH_OP(SoftmaxCrossEntropyBackprop, ngraph::op)
NGRAPH_OP(Squeeze, ngraph::op)
NGRAPH_OP(TensorIterator, ngraph::op)
NGRAPH_OP(Unsqueeze, ngraph::op)
This diff is collapsed.
This diff is collapsed.
......@@ -181,6 +181,8 @@ namespace ngraph
/// \param i The index of the dimension being selected.
/// \return A reference to the `i`th Dimension of this shape.
Dimension& operator[](size_t i) { return m_dimensions[i]; }
/// \brief Returns a vector of the dimensions. This has no meaning if dynamic.
explicit operator std::vector<Dimension>() const { return m_dimensions; }
friend std::ostream& operator<<(std::ostream& str, const PartialShape& shape);
friend PartialShape operator+(const PartialShape& s1, const PartialShape& s2);
......
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment