Commit 06f9efd9 authored by Robert Kimball's avatar Robert Kimball Committed by Scott Cyphers

Remove descriptor::Value and runtime::Value (#355)

* general cleanup

* remove runtime::Value

* more cleanup

* more cleanup
parent f4bb3e46
...@@ -16,7 +16,6 @@ ...@@ -16,7 +16,6 @@
#include <memory> #include <memory>
#include "ngraph/descriptor/value.hpp"
#include "ngraph/shape.hpp" #include "ngraph/shape.hpp"
namespace ngraph namespace ngraph
...@@ -36,7 +35,7 @@ namespace ngraph ...@@ -36,7 +35,7 @@ namespace ngraph
class TensorView; class TensorView;
/// @brief Compile-time descriptor of a first-class value that is a view of a tensor. /// @brief Compile-time descriptor of a first-class value that is a view of a tensor.
class TensorView : public Value class TensorView
{ {
TensorView(const TensorView&) = delete; TensorView(const TensorView&) = delete;
TensorView& operator=(const TensorView&) = delete; TensorView& operator=(const TensorView&) = delete;
...@@ -52,7 +51,7 @@ namespace ngraph ...@@ -52,7 +51,7 @@ namespace ngraph
virtual const Tensor& get_tensor() const = 0; virtual const Tensor& get_tensor() const = 0;
virtual Tensor& get_tensor() = 0; virtual Tensor& get_tensor() = 0;
virtual std::shared_ptr<const ValueType> get_value_type() const override; virtual std::shared_ptr<const ValueType> get_value_type() const;
const std::string& get_name() const { return m_name; } const std::string& get_name() const { return m_name; }
std::shared_ptr<const TensorViewType> get_tensor_view_type() const std::shared_ptr<const TensorViewType> get_tensor_view_type() const
...@@ -72,9 +71,9 @@ namespace ngraph ...@@ -72,9 +71,9 @@ namespace ngraph
} }
virtual void collect_tensor_views(std::vector<std::shared_ptr<TensorView>>& views, virtual void collect_tensor_views(std::vector<std::shared_ptr<TensorView>>& views,
const std::shared_ptr<Value>& value) const override const std::shared_ptr<TensorView>& value) const
{ {
views.push_back(std::static_pointer_cast<TensorView>(value)); views.push_back(value);
} }
protected: protected:
......
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#include <memory>
#include <vector>
#include "ngraph/descriptor/tensor_view.hpp"
#include "ngraph/descriptor/tuple.hpp"
#include "ngraph/types/type.hpp"
using namespace ngraph::descriptor;
Tuple::Tuple(const std::vector<std::shared_ptr<ngraph::descriptor::Value>>& elements)
: m_elements(elements)
{
std::vector<std::shared_ptr<const ngraph::ValueType>> types;
for (auto element : m_elements)
{
types.push_back(element->get_value_type());
}
m_tuple_type = std::make_shared<ngraph::TupleType>(types);
}
void Tuple::collect_tensor_views(std::vector<std::shared_ptr<TensorView>>& views,
const std::shared_ptr<Value>& value) const
{
for (auto element : m_elements)
{
element->collect_tensor_views(views, element);
}
}
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include <memory>
#include <vector>
#include "ngraph/descriptor/value.hpp"
#include "ngraph/types/type.hpp"
namespace ngraph
{
namespace descriptor
{
/// @brief Compile-time descriptor of a first-class value that is a tuple of zero or more first-class values.
class Tuple : public Value
{
public:
Tuple(const std::vector<std::shared_ptr<ngraph::descriptor::Value>>& elements);
const std::shared_ptr<ngraph::TupleType> get_tuple_type() const;
std::shared_ptr<ngraph::TupleType> get_tuple_type();
virtual std::shared_ptr<const ValueType> get_value_type() const override
{
return m_tuple_type;
}
virtual void collect_tensor_views(std::vector<std::shared_ptr<TensorView>>& views,
const std::shared_ptr<Value>& value) const override;
protected:
std::shared_ptr<ngraph::TupleType> m_tuple_type;
std::vector<std::shared_ptr<ngraph::descriptor::Value>> m_elements;
};
}
}
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include <memory>
#include <vector>
namespace ngraph
{
class ValueType;
namespace descriptor
{
class TensorView;
/// @brief Compile-time descriptor of a first-class value.
class Value
{
public:
virtual ~Value() {}
virtual std::shared_ptr<const ngraph::ValueType> get_value_type() const = 0;
/// @brief helper for collecting all the tensor views in a sequence of values
///
/// @param views The vector of tensor views being collected.
/// @param value A shared pointer for this.
///
/// Append each tensor view in this value to views. Since this may be a tensor view
/// we need to pass a shared pointer to this since we can't get one from this.
virtual void collect_tensor_views(std::vector<std::shared_ptr<TensorView>>& views,
const std::shared_ptr<Value>& value) const = 0;
};
}
}
...@@ -54,8 +54,6 @@ ...@@ -54,8 +54,6 @@
#include "ngraph/descriptor/primary_tensor_view.hpp" #include "ngraph/descriptor/primary_tensor_view.hpp"
#include "ngraph/descriptor/tensor.hpp" #include "ngraph/descriptor/tensor.hpp"
#include "ngraph/descriptor/tensor_view.hpp" #include "ngraph/descriptor/tensor_view.hpp"
#include "ngraph/descriptor/tuple.hpp"
#include "ngraph/descriptor/value.hpp"
#include "ngraph/except.hpp" #include "ngraph/except.hpp"
#include "ngraph/function.hpp" #include "ngraph/function.hpp"
#include "ngraph/node.hpp" #include "ngraph/node.hpp"
...@@ -113,7 +111,6 @@ ...@@ -113,7 +111,6 @@
#include "ngraph/runtime/external_function.hpp" #include "ngraph/runtime/external_function.hpp"
#include "ngraph/runtime/manager.hpp" #include "ngraph/runtime/manager.hpp"
#include "ngraph/runtime/tensor_view.hpp" #include "ngraph/runtime/tensor_view.hpp"
#include "ngraph/runtime/value.hpp"
#include "ngraph/shape.hpp" #include "ngraph/shape.hpp"
#include "ngraph/types/element_type.hpp" #include "ngraph/types/element_type.hpp"
#include "ngraph/types/type.hpp" #include "ngraph/types/type.hpp"
...@@ -32,8 +32,6 @@ namespace ngraph ...@@ -32,8 +32,6 @@ namespace ngraph
class ExternalFunction; class ExternalFunction;
class CallFrame; class CallFrame;
class TensorView; class TensorView;
class Tuple;
class Value;
/// @brief Interface to a generic backend. /// @brief Interface to a generic backend.
/// ///
......
...@@ -24,9 +24,6 @@ namespace ngraph ...@@ -24,9 +24,6 @@ namespace ngraph
{ {
namespace runtime namespace runtime
{ {
class PrimaryTensorView;
class Value;
// A VM for executing lightly-compiled graph functions. // A VM for executing lightly-compiled graph functions.
class CallFrame class CallFrame
{ {
...@@ -35,9 +32,8 @@ namespace ngraph ...@@ -35,9 +32,8 @@ namespace ngraph
/// @brief Invoke the function with values matching the signature of the function. /// @brief Invoke the function with values matching the signature of the function.
/// ///
/// Tuples will be expanded into their tensor views to build the call frame. /// Tuples will be expanded into their tensor views to build the call frame.
virtual void virtual void call(const std::vector<std::shared_ptr<runtime::TensorView>>& inputs,
call(const std::vector<std::shared_ptr<ngraph::runtime::Value>>& inputs, const std::vector<std::shared_ptr<runtime::TensorView>>& outputs) = 0;
const std::vector<std::shared_ptr<ngraph::runtime::Value>>& outputs) = 0;
/// @brief Invoke the function with tuples pre-expanded to their underlying tensor views. /// @brief Invoke the function with tuples pre-expanded to their underlying tensor views.
virtual void tensor_call(const TensorViewPtrs& inputs, virtual void tensor_call(const TensorViewPtrs& inputs,
......
...@@ -52,18 +52,18 @@ void runtime::cpu::CPU_CallFrame::tensor_call( ...@@ -52,18 +52,18 @@ void runtime::cpu::CPU_CallFrame::tensor_call(
} }
void runtime::cpu::CPU_CallFrame::call( void runtime::cpu::CPU_CallFrame::call(
const std::vector<std::shared_ptr<ngraph::runtime::Value>>& arguments, const std::vector<std::shared_ptr<runtime::TensorView>>& arguments,
const std::vector<std::shared_ptr<ngraph::runtime::Value>>& results) const std::vector<std::shared_ptr<runtime::TensorView>>& results)
{ {
// TODO: Check types of args and result // TODO: Check types of args and result
vector<shared_ptr<ngraph::runtime::TensorView>> inputs; vector<shared_ptr<runtime::TensorView>> inputs;
for (shared_ptr<ngraph::runtime::Value> argument : arguments) for (shared_ptr<runtime::TensorView> argument : arguments)
{ {
argument->collect_tensor_views(inputs, argument); argument->collect_tensor_views(inputs, argument);
} }
vector<shared_ptr<ngraph::runtime::TensorView>> outputs; vector<shared_ptr<runtime::TensorView>> outputs;
for (shared_ptr<ngraph::runtime::Value> result : results) for (shared_ptr<runtime::TensorView> result : results)
{ {
result->collect_tensor_views(outputs, result); result->collect_tensor_views(outputs, result);
} }
......
...@@ -66,8 +66,8 @@ namespace ngraph ...@@ -66,8 +66,8 @@ namespace ngraph
/// @brief Invoke the function with values matching the signature of the function. /// @brief Invoke the function with values matching the signature of the function.
/// ///
/// Tuples will be expanded into their tensor views to build the call frame. /// Tuples will be expanded into their tensor views to build the call frame.
void call(const std::vector<std::shared_ptr<ngraph::runtime::Value>>& inputs, void call(const std::vector<std::shared_ptr<runtime::TensorView>>& inputs,
const std::vector<std::shared_ptr<ngraph::runtime::Value>>& outputs); const std::vector<std::shared_ptr<runtime::TensorView>>& outputs);
/// @brief Invoke the function with tuples pre-expanded to their underlying /// @brief Invoke the function with tuples pre-expanded to their underlying
/// tensor views. /// tensor views.
......
...@@ -263,17 +263,18 @@ void runtime::interpreter::INT_CallFrame::tensor_call( ...@@ -263,17 +263,18 @@ void runtime::interpreter::INT_CallFrame::tensor_call(
tensor_call(args, out); tensor_call(args, out);
} }
void runtime::interpreter::INT_CallFrame::call(const vector<shared_ptr<runtime::Value>>& arguments, void runtime::interpreter::INT_CallFrame::call(
const vector<shared_ptr<runtime::Value>>& results) const vector<shared_ptr<runtime::TensorView>>& arguments,
const vector<shared_ptr<runtime::TensorView>>& results)
{ {
vector<shared_ptr<runtime::TensorView>> inputs; vector<shared_ptr<runtime::TensorView>> inputs;
for (shared_ptr<runtime::Value> argument : arguments) for (shared_ptr<runtime::TensorView> argument : arguments)
{ {
argument->collect_tensor_views(inputs, argument); argument->collect_tensor_views(inputs, argument);
} }
vector<shared_ptr<runtime::TensorView>> outputs; vector<shared_ptr<runtime::TensorView>> outputs;
for (shared_ptr<runtime::Value> result : results) for (shared_ptr<runtime::TensorView> result : results)
{ {
result->collect_tensor_views(outputs, result); result->collect_tensor_views(outputs, result);
} }
......
...@@ -109,8 +109,8 @@ public: ...@@ -109,8 +109,8 @@ public:
/// @brief Invoke the function with values matching the signature of the function. /// @brief Invoke the function with values matching the signature of the function.
/// ///
/// Tuples will be expanded into their tensor views to build the call frame. /// Tuples will be expanded into their tensor views to build the call frame.
void call(const std::vector<std::shared_ptr<runtime::Value>>& inputs, void call(const std::vector<std::shared_ptr<runtime::TensorView>>& inputs,
const std::vector<std::shared_ptr<runtime::Value>>& outputs); const std::vector<std::shared_ptr<runtime::TensorView>>& outputs);
private: private:
/// @brief Invoke the function with tuples pre-expanded to their underlying /// @brief Invoke the function with tuples pre-expanded to their underlying
......
...@@ -25,15 +25,15 @@ std::shared_ptr<const ngraph::descriptor::TensorView> TensorView::get_tensor_vie ...@@ -25,15 +25,15 @@ std::shared_ptr<const ngraph::descriptor::TensorView> TensorView::get_tensor_vie
return m_descriptor; return m_descriptor;
} }
std::shared_ptr<ngraph::descriptor::Value> TensorView::get_descriptor() const std::shared_ptr<ngraph::descriptor::TensorView> TensorView::get_descriptor() const
{ {
return m_descriptor; return m_descriptor;
} }
void TensorView::collect_tensor_views(std::vector<std::shared_ptr<TensorView>>& views, void TensorView::collect_tensor_views(std::vector<std::shared_ptr<TensorView>>& views,
const std::shared_ptr<Value>& value) const const std::shared_ptr<TensorView>& value) const
{ {
views.push_back(std::static_pointer_cast<TensorView>(value)); views.push_back(value);
} }
const ngraph::Shape& TensorView::get_shape() const const ngraph::Shape& TensorView::get_shape() const
......
...@@ -18,7 +18,6 @@ ...@@ -18,7 +18,6 @@
#include <vector> #include <vector>
#include "ngraph/descriptor/tensor_view.hpp" #include "ngraph/descriptor/tensor_view.hpp"
#include "ngraph/runtime/value.hpp"
#include "ngraph/shape.hpp" #include "ngraph/shape.hpp"
#include "ngraph/util.hpp" #include "ngraph/util.hpp"
...@@ -31,7 +30,7 @@ namespace ngraph ...@@ -31,7 +30,7 @@ namespace ngraph
namespace runtime namespace runtime
{ {
class TensorView : public Value class TensorView
{ {
protected: protected:
TensorView(const std::shared_ptr<ngraph::descriptor::TensorView>& descriptor) TensorView(const std::shared_ptr<ngraph::descriptor::TensorView>& descriptor)
...@@ -46,10 +45,10 @@ namespace ngraph ...@@ -46,10 +45,10 @@ namespace ngraph
std::shared_ptr<const ngraph::descriptor::TensorView> std::shared_ptr<const ngraph::descriptor::TensorView>
get_tensor_view_descriptor() const; get_tensor_view_descriptor() const;
virtual std::shared_ptr<ngraph::descriptor::Value> get_descriptor() const override; virtual std::shared_ptr<descriptor::TensorView> get_descriptor() const;
virtual void collect_tensor_views(std::vector<std::shared_ptr<TensorView>>& views, virtual void collect_tensor_views(std::vector<std::shared_ptr<TensorView>>& views,
const std::shared_ptr<Value>& value) const override; const std::shared_ptr<TensorView>& value) const;
const ngraph::Shape& get_shape() const; const ngraph::Shape& get_shape() const;
const ngraph::Strides& get_strides() const; const ngraph::Strides& get_strides() const;
......
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include <memory>
#include <vector>
#include "ngraph/descriptor/value.hpp"
namespace ngraph
{
namespace descriptor
{
class Value;
}
namespace runtime
{
class TensorView;
/// @brief A first-class runtime value.
class Value
{
public:
virtual ~Value() {}
Value& operator=(const Value&) = default;
/// @brief The compile-time descriptor for this value.
virtual std::shared_ptr<ngraph::descriptor::Value> get_descriptor() const = 0;
/// @brief helper for collecting all the tensor views in a sequence of values
///
/// @param views The vector of tensor views being collected.
/// @param value A shared pointer for this.
///
/// Append each tensor view in this value to views. Since this may be a tensor view
/// we need to pass a shared pointer to this since we can't get one from this.
virtual void collect_tensor_views(std::vector<std::shared_ptr<TensorView>>& views,
const std::shared_ptr<Value>& value) const = 0;
};
}
}
...@@ -205,7 +205,7 @@ TEST(benchmark, concat_32x1x200_axis1_6) ...@@ -205,7 +205,7 @@ TEST(benchmark, concat_32x1x200_axis1_6)
auto backend = manager->allocate_backend(); auto backend = manager->allocate_backend();
auto cf = backend->make_call_frame(external); auto cf = backend->make_call_frame(external);
vector<shared_ptr<runtime::Value>> input_vals; vector<shared_ptr<runtime::TensorView>> input_vals;
for (size_t i = 0; i < n_arrays; i++) for (size_t i = 0; i < n_arrays; i++)
{ {
......
...@@ -19,7 +19,6 @@ ...@@ -19,7 +19,6 @@
#include "ngraph/runtime/backend.hpp" #include "ngraph/runtime/backend.hpp"
#include "ngraph/runtime/manager.hpp" #include "ngraph/runtime/manager.hpp"
#include "ngraph/runtime/value.hpp"
#include "ngraph/types/element_type.hpp" #include "ngraph/types/element_type.hpp"
namespace ngraph namespace ngraph
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment