Unverified Commit 5fc7cf65 authored by Robert Kimball's avatar Robert Kimball Committed by GitHub

Rename runtime::TensorView to runtime::Tensor (#1699)

* rename files

* rename runtime TensorView to Tensor

* rename HostTensorView to HostTensor
parent 7b9bf2a8
...@@ -65,5 +65,5 @@ make_shared<Function>(results, result_type, parameters); ...@@ -65,5 +65,5 @@ make_shared<Function>(results, result_type, parameters);
make_shared<Function>(results, parameters); make_shared<Function>(results, parameters);
``` ```
The runtime::TensorView methods to get_tensor<> and write<T>(std::vector&) have been removed The runtime::Tensor methods to get_tensor<> and write<T>(std::vector&) have been removed
to the unit test directory under utils/test_tool.hpp read_vector and write_vector. to the unit test directory under utils/test_tool.hpp read_vector and write_vector.
...@@ -35,9 +35,8 @@ ...@@ -35,9 +35,8 @@
using namespace ngraph; using namespace ngraph;
size_t size_t accuracy_count(const std::shared_ptr<runtime::Tensor>& t_softmax,
accuracy_count(const std::shared_ptr<runtime::TensorView>& t_softmax, const std::shared_ptr<runtime::Tensor>& t_Y)
const std::shared_ptr<runtime::TensorView>& t_Y)
{ {
const Shape& softmax_shape = t_softmax->get_shape(); const Shape& softmax_shape = t_softmax->get_shape();
size_t batch_size = softmax_shape.at(0); size_t batch_size = softmax_shape.at(0);
...@@ -76,13 +75,13 @@ size_t ...@@ -76,13 +75,13 @@ size_t
float test_accuracy(MNistDataLoader& loader, float test_accuracy(MNistDataLoader& loader,
std::shared_ptr<runtime::Backend> backend, std::shared_ptr<runtime::Backend> backend,
std::shared_ptr<Function> function, std::shared_ptr<Function> function,
const std::shared_ptr<runtime::TensorView>& t_X, const std::shared_ptr<runtime::Tensor>& t_X,
const std::shared_ptr<runtime::TensorView>& t_Y, const std::shared_ptr<runtime::Tensor>& t_Y,
const std::shared_ptr<runtime::TensorView>& t_softmax, const std::shared_ptr<runtime::Tensor>& t_softmax,
const std::shared_ptr<runtime::TensorView>& t_W0, const std::shared_ptr<runtime::Tensor>& t_W0,
const std::shared_ptr<runtime::TensorView>& t_b0, const std::shared_ptr<runtime::Tensor>& t_b0,
const std::shared_ptr<runtime::TensorView>& t_W1, const std::shared_ptr<runtime::Tensor>& t_W1,
const std::shared_ptr<runtime::TensorView>& t_b1) const std::shared_ptr<runtime::Tensor>& t_b1)
{ {
loader.reset(); loader.reset();
size_t batch_size = loader.get_batch_size(); size_t batch_size = loader.get_batch_size();
......
...@@ -34,9 +34,8 @@ ...@@ -34,9 +34,8 @@
using namespace ngraph; using namespace ngraph;
size_t size_t accuracy_count(const std::shared_ptr<runtime::Tensor>& t_softmax,
accuracy_count(const std::shared_ptr<runtime::TensorView>& t_softmax, const std::shared_ptr<runtime::Tensor>& t_Y)
const std::shared_ptr<runtime::TensorView>& t_Y)
{ {
const Shape& softmax_shape = t_softmax->get_shape(); const Shape& softmax_shape = t_softmax->get_shape();
size_t batch_size = softmax_shape.at(0); size_t batch_size = softmax_shape.at(0);
...@@ -75,13 +74,13 @@ size_t ...@@ -75,13 +74,13 @@ size_t
float test_accuracy(MNistDataLoader& loader, float test_accuracy(MNistDataLoader& loader,
std::shared_ptr<runtime::Backend> backend, std::shared_ptr<runtime::Backend> backend,
std::shared_ptr<Function> function, std::shared_ptr<Function> function,
const std::shared_ptr<runtime::TensorView>& t_X, const std::shared_ptr<runtime::Tensor>& t_X,
const std::shared_ptr<runtime::TensorView>& t_Y, const std::shared_ptr<runtime::Tensor>& t_Y,
const std::shared_ptr<runtime::TensorView>& t_softmax, const std::shared_ptr<runtime::Tensor>& t_softmax,
const std::shared_ptr<runtime::TensorView>& t_W0, const std::shared_ptr<runtime::Tensor>& t_W0,
const std::shared_ptr<runtime::TensorView>& t_b0, const std::shared_ptr<runtime::Tensor>& t_b0,
const std::shared_ptr<runtime::TensorView>& t_W1, const std::shared_ptr<runtime::Tensor>& t_W1,
const std::shared_ptr<runtime::TensorView>& t_b1) const std::shared_ptr<runtime::Tensor>& t_b1)
{ {
loader.reset(); loader.reset();
size_t batch_size = loader.get_batch_size(); size_t batch_size = loader.get_batch_size();
......
...@@ -22,7 +22,7 @@ ...@@ -22,7 +22,7 @@
#include <ngraph/ngraph.hpp> #include <ngraph/ngraph.hpp>
// Make a runtime tensor for a node output // Make a runtime tensor for a node output
std::shared_ptr<ngraph::runtime::TensorView> make_output_tensor( std::shared_ptr<ngraph::runtime::Tensor> make_output_tensor(
const std::shared_ptr<ngraph::runtime::Backend>& backend, const std::shared_ptr<ngraph::runtime::Backend>& backend,
const std::shared_ptr<ngraph::Node>& node, const std::shared_ptr<ngraph::Node>& node,
size_t output_pos) size_t output_pos)
...@@ -35,7 +35,7 @@ std::shared_ptr<ngraph::runtime::TensorView> make_output_tensor( ...@@ -35,7 +35,7 @@ std::shared_ptr<ngraph::runtime::TensorView> make_output_tensor(
// Initialize a tensor from a random generator // Initialize a tensor from a random generator
template <typename T> template <typename T>
void randomize(std::function<T()> rand, void randomize(std::function<T()> rand,
const std::shared_ptr<ngraph::runtime::TensorView>& t) const std::shared_ptr<ngraph::runtime::Tensor>& t)
{ {
if (t->get_element_type().bitwidth() != 8 * sizeof(T)) if (t->get_element_type().bitwidth() != 8 * sizeof(T))
{ {
...@@ -54,7 +54,7 @@ void randomize(std::function<T()> rand, ...@@ -54,7 +54,7 @@ void randomize(std::function<T()> rand,
// Get a scalar value from a tensor, optionally at an element offset // Get a scalar value from a tensor, optionally at an element offset
template <typename T> template <typename T>
T get_scalar(const std::shared_ptr<ngraph::runtime::TensorView>& t, T get_scalar(const std::shared_ptr<ngraph::runtime::Tensor>& t,
size_t element_offset = 0) size_t element_offset = 0)
{ {
T result; T result;
...@@ -64,7 +64,7 @@ T get_scalar(const std::shared_ptr<ngraph::runtime::TensorView>& t, ...@@ -64,7 +64,7 @@ T get_scalar(const std::shared_ptr<ngraph::runtime::TensorView>& t,
// Set a scalar value in a tensor, optionally at an element offset // Set a scalar value in a tensor, optionally at an element offset
template <typename T> template <typename T>
void set_scalar(const std::shared_ptr<ngraph::runtime::TensorView>& t, void set_scalar(const std::shared_ptr<ngraph::runtime::Tensor>& t,
T value, T value,
size_t element_offset = 0) size_t element_offset = 0)
{ {
...@@ -91,9 +91,8 @@ std::ostream& operator<<(std::ostream& s, const ngraph::Shape& shape) ...@@ -91,9 +91,8 @@ std::ostream& operator<<(std::ostream& s, const ngraph::Shape& shape)
class TensorDumper class TensorDumper
{ {
protected: protected:
TensorDumper( TensorDumper(const std::string& name,
const std::string& name, const std::shared_ptr<ngraph::runtime::Tensor>& tensor)
const std::shared_ptr<ngraph::runtime::TensorView>& tensor)
: m_name(name) : m_name(name)
, m_tensor(tensor) , m_tensor(tensor)
{ {
...@@ -102,7 +101,7 @@ protected: ...@@ -102,7 +101,7 @@ protected:
public: public:
virtual ~TensorDumper() {} virtual ~TensorDumper() {}
const std::string& get_name() const { return m_name; } const std::string& get_name() const { return m_name; }
std::shared_ptr<ngraph::runtime::TensorView> get_tensor() const std::shared_ptr<ngraph::runtime::Tensor> get_tensor() const
{ {
return m_tensor; return m_tensor;
} }
...@@ -110,7 +109,7 @@ public: ...@@ -110,7 +109,7 @@ public:
protected: protected:
std::string m_name; std::string m_name;
std::shared_ptr<ngraph::runtime::TensorView> m_tensor; std::shared_ptr<ngraph::runtime::Tensor> m_tensor;
}; };
std::ostream& operator<<(std::ostream& s, const TensorDumper& td) std::ostream& operator<<(std::ostream& s, const TensorDumper& td)
...@@ -123,7 +122,7 @@ class MinMax : public TensorDumper ...@@ -123,7 +122,7 @@ class MinMax : public TensorDumper
{ {
public: public:
MinMax(const std::string& name, MinMax(const std::string& name,
const std::shared_ptr<ngraph::runtime::TensorView>& tensor) const std::shared_ptr<ngraph::runtime::Tensor>& tensor)
: TensorDumper(name, tensor) : TensorDumper(name, tensor)
{ {
size_t n = m_tensor->get_element_count(); size_t n = m_tensor->get_element_count();
...@@ -153,14 +152,14 @@ class DumpTensor : public TensorDumper ...@@ -153,14 +152,14 @@ class DumpTensor : public TensorDumper
{ {
public: public:
DumpTensor(const std::string& name, DumpTensor(const std::string& name,
const std::shared_ptr<ngraph::runtime::TensorView>& tensor) const std::shared_ptr<ngraph::runtime::Tensor>& tensor)
: TensorDumper(name, tensor) : TensorDumper(name, tensor)
{ {
} }
std::ostream& dump(std::ostream& s) const override std::ostream& dump(std::ostream& s) const override
{ {
std::shared_ptr<ngraph::runtime::TensorView> t{get_tensor()}; std::shared_ptr<ngraph::runtime::Tensor> t{get_tensor()};
const ngraph::Shape& shape = t->get_shape(); const ngraph::Shape& shape = t->get_shape();
s << "Tensor<" << get_name() << ": "; s << "Tensor<" << get_name() << ": ";
for (size_t i = 0; i < shape.size(); ++i) for (size_t i = 0; i < shape.size(); ++i)
......
...@@ -175,7 +175,7 @@ the three parameters and the return value. ...@@ -175,7 +175,7 @@ the three parameters and the return value.
Each tensor is a shared pointer to a :term:`Tensorview`, which is the interface Each tensor is a shared pointer to a :term:`Tensorview`, which is the interface
backends implement for tensor use. When there are no more references to the backends implement for tensor use. When there are no more references to the
tensor view, it will be freed when convenient for the backend. See the tensor view, it will be freed when convenient for the backend. See the
:doc:`../programmable/index` documentation for details on ``TensorView``. :doc:`../programmable/index` documentation for details on ``Tensor``.
.. _initialize_inputs: .. _initialize_inputs:
...@@ -189,7 +189,7 @@ Next we need to copy some data into the tensors. ...@@ -189,7 +189,7 @@ Next we need to copy some data into the tensors.
:language: cpp :language: cpp
:lines: 48-55 :lines: 48-55
The ``runtime::TensorView`` interface has ``write`` and ``read`` methods for The ``runtime::Tensor`` interface has ``write`` and ``read`` methods for
copying data to/from the tensor. copying data to/from the tensor.
.. _invoke_cmp: .. _invoke_cmp:
......
...@@ -26,10 +26,10 @@ framework developer to develop a custom UI or API. ...@@ -26,10 +26,10 @@ framework developer to develop a custom UI or API.
TensorView Tensor
=========== =======
.. doxygenclass:: ngraph::runtime::TensorView .. doxygenclass:: ngraph::runtime::Tensor
:project: ngraph :project: ngraph
:members: :members:
......
...@@ -28,4 +28,4 @@ else: ...@@ -28,4 +28,4 @@ else:
sys.setdlopenflags(flags) sys.setdlopenflags(flags)
from _pyngraph.runtime import Backend from _pyngraph.runtime import Backend
from _pyngraph.runtime import TensorView from _pyngraph.runtime import Tensor
...@@ -20,7 +20,7 @@ from typing import List, Union ...@@ -20,7 +20,7 @@ from typing import List, Union
import numpy as np import numpy as np
from ngraph.impl import Function, Node, Shape, serialize, util from ngraph.impl import Function, Node, Shape, serialize, util
from ngraph.impl.runtime import Backend, TensorView from ngraph.impl.runtime import Backend, Tensor
from ngraph.utils.types import get_dtype, NumericData from ngraph.utils.types import get_dtype, NumericData
from ngraph.exceptions import UserInputError from ngraph.exceptions import UserInputError
...@@ -68,7 +68,7 @@ class Computation(object): ...@@ -68,7 +68,7 @@ class Computation(object):
self.function = ng_function self.function = ng_function
self.parameters = ng_function.get_parameters() self.parameters = ng_function.get_parameters()
self.tensor_views = [] # type: List[TensorView] self.tensor_views = [] # type: List[Tensor]
for parameter in self.parameters: for parameter in self.parameters:
shape = parameter.get_shape() shape = parameter.get_shape()
element_type = parameter.get_element_type() element_type = parameter.get_element_type()
...@@ -107,12 +107,12 @@ class Computation(object): ...@@ -107,12 +107,12 @@ class Computation(object):
return serialize(self.function, indent) return serialize(self.function, indent)
@staticmethod @staticmethod
def _get_buffer_size(element_type, element_count): # type: (TensorView, int) -> int def _get_buffer_size(element_type, element_count): # type: (Tensor, int) -> int
return int((element_type.bitwidth / 8.0) * element_count) return int((element_type.bitwidth / 8.0) * element_count)
@staticmethod @staticmethod
def _write_ndarray_to_tensor_view(value, tensor_view): def _write_ndarray_to_tensor_view(value, tensor_view):
# type: (np.ndarray, TensorView) -> None # type: (np.ndarray, Tensor) -> None
tensor_view_dtype = get_dtype(tensor_view.element_type) tensor_view_dtype = get_dtype(tensor_view.element_type)
if list(tensor_view.shape) != list(value.shape) and len(value.shape) > 0: if list(tensor_view.shape) != list(value.shape) and len(value.shape) > 0:
raise UserInputError('Provided tensor\'s shape: %s does not match the expected: %s.', raise UserInputError('Provided tensor\'s shape: %s does not match the expected: %s.',
...@@ -132,7 +132,7 @@ class Computation(object): ...@@ -132,7 +132,7 @@ class Computation(object):
@staticmethod @staticmethod
def _read_tensor_view_to_ndarray(tensor_view, output): def _read_tensor_view_to_ndarray(tensor_view, output):
# type: (TensorView, np.ndarray) -> None # type: (Tensor, np.ndarray) -> None
buffer_size = Computation._get_buffer_size( buffer_size = Computation._get_buffer_size(
tensor_view.element_type, tensor_view.element_count) tensor_view.element_type, tensor_view.element_count)
tensor_view.read(util.numpy_to_c(output), 0, buffer_size) tensor_view.read(util.numpy_to_c(output), 0, buffer_size)
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
#include <pybind11/stl.h> #include <pybind11/stl.h>
#include "ngraph/runtime/backend.hpp" #include "ngraph/runtime/backend.hpp"
#include "ngraph/runtime/tensor_view.hpp" #include "ngraph/runtime/tensor.hpp"
#include "pyngraph/runtime/backend.hpp" #include "pyngraph/runtime/backend.hpp"
namespace py = pybind11; namespace py = pybind11;
...@@ -31,7 +31,7 @@ void regclass_pyngraph_runtime_Backend(py::module m) ...@@ -31,7 +31,7 @@ void regclass_pyngraph_runtime_Backend(py::module m)
backend.def_static("create", &ngraph::runtime::Backend::create); backend.def_static("create", &ngraph::runtime::Backend::create);
backend.def_static("get_registered_devices", &ngraph::runtime::Backend::get_registered_devices); backend.def_static("get_registered_devices", &ngraph::runtime::Backend::get_registered_devices);
backend.def("create_tensor", backend.def("create_tensor",
(std::shared_ptr<ngraph::runtime::TensorView>(ngraph::runtime::Backend::*)( (std::shared_ptr<ngraph::runtime::Tensor>(ngraph::runtime::Backend::*)(
const ngraph::element::Type&, const ngraph::Shape&)) & const ngraph::element::Type&, const ngraph::Shape&)) &
ngraph::runtime::Backend::create_tensor); ngraph::runtime::Backend::create_tensor);
backend.def("compile", backend.def("compile",
...@@ -40,8 +40,8 @@ void regclass_pyngraph_runtime_Backend(py::module m) ...@@ -40,8 +40,8 @@ void regclass_pyngraph_runtime_Backend(py::module m)
backend.def("call", backend.def("call",
(void (ngraph::runtime::Backend::*)( (void (ngraph::runtime::Backend::*)(
std::shared_ptr<ngraph::Function>, std::shared_ptr<ngraph::Function>,
const std::vector<std::shared_ptr<ngraph::runtime::TensorView>>&, const std::vector<std::shared_ptr<ngraph::runtime::Tensor>>&,
const std::vector<std::shared_ptr<ngraph::runtime::TensorView>>&)) & const std::vector<std::shared_ptr<ngraph::runtime::Tensor>>&)) &
ngraph::runtime::Backend::call); ngraph::runtime::Backend::call);
backend.def("remove_compiled_function", backend.def("remove_compiled_function",
(void (ngraph::runtime::Backend::*)(std::shared_ptr<ngraph::Function>)) & (void (ngraph::runtime::Backend::*)(std::shared_ptr<ngraph::Function>)) &
......
...@@ -23,6 +23,6 @@ void regmodule_pyngraph_runtime(py::module m) ...@@ -23,6 +23,6 @@ void regmodule_pyngraph_runtime(py::module m)
{ {
py::module m_runtime = py::module m_runtime =
m.def_submodule("runtime", "Package ngraph.impl.runtime wraps ngraph::runtime"); m.def_submodule("runtime", "Package ngraph.impl.runtime wraps ngraph::runtime");
regclass_pyngraph_runtime_TensorView(m_runtime); regclass_pyngraph_runtime_Tensor(m_runtime);
regclass_pyngraph_runtime_Backend(m_runtime); regclass_pyngraph_runtime_Backend(m_runtime);
} }
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
#include <pybind11/pybind11.h> #include <pybind11/pybind11.h>
#include "pyngraph/runtime/backend.hpp" #include "pyngraph/runtime/backend.hpp"
#include "pyngraph/runtime/tensor_view.hpp" #include "pyngraph/runtime/tensor.hpp"
namespace py = pybind11; namespace py = pybind11;
......
...@@ -18,25 +18,24 @@ ...@@ -18,25 +18,24 @@
#include <pybind11/stl.h> #include <pybind11/stl.h>
#include "ngraph/descriptor/tensor.hpp" #include "ngraph/descriptor/tensor.hpp"
#include "ngraph/runtime/tensor_view.hpp" #include "ngraph/runtime/tensor.hpp"
#include "pyngraph/runtime/tensor_view.hpp" #include "pyngraph/runtime/tensor.hpp"
namespace py = pybind11; namespace py = pybind11;
void regclass_pyngraph_runtime_TensorView(py::module m) void regclass_pyngraph_runtime_Tensor(py::module m)
{ {
py::class_<ngraph::runtime::TensorView, std::shared_ptr<ngraph::runtime::TensorView>> py::class_<ngraph::runtime::Tensor, std::shared_ptr<ngraph::runtime::Tensor>> tensor(m,
tensorView(m, "TensorView"); "Tensor");
tensorView.doc() = "ngraph.impl.runtime.TensorView wraps ngraph::runtime::TensorView"; tensor.doc() = "ngraph.impl.runtime.Tensor wraps ngraph::runtime::Tensor";
tensorView.def("write", tensor.def("write",
(void (ngraph::runtime::TensorView::*)(const void*, size_t, size_t)) & (void (ngraph::runtime::Tensor::*)(const void*, size_t, size_t)) &
ngraph::runtime::TensorView::write); ngraph::runtime::Tensor::write);
tensorView.def("read", &ngraph::runtime::TensorView::read); tensor.def("read", &ngraph::runtime::Tensor::read);
tensorView.def_property_readonly("shape", &ngraph::runtime::TensorView::get_shape); tensor.def_property_readonly("shape", &ngraph::runtime::Tensor::get_shape);
tensorView.def_property_readonly("element_count", tensor.def_property_readonly("element_count", &ngraph::runtime::Tensor::get_element_count);
&ngraph::runtime::TensorView::get_element_count); tensor.def_property_readonly("element_type", [](const ngraph::runtime::Tensor& self) {
tensorView.def_property_readonly("element_type", [](const ngraph::runtime::TensorView& self) {
return self.get_element_type(); return self.get_element_type();
}); });
} }
...@@ -20,4 +20,4 @@ ...@@ -20,4 +20,4 @@
namespace py = pybind11; namespace py = pybind11;
void regclass_pyngraph_runtime_TensorView(py::module m); void regclass_pyngraph_runtime_Tensor(py::module m);
...@@ -212,7 +212,7 @@ sources = ['pyngraph/function.cpp', ...@@ -212,7 +212,7 @@ sources = ['pyngraph/function.cpp',
'pyngraph/ops/softmax.cpp', 'pyngraph/ops/softmax.cpp',
'pyngraph/runtime/backend.cpp', 'pyngraph/runtime/backend.cpp',
'pyngraph/runtime/regmodule_pyngraph_runtime.cpp', 'pyngraph/runtime/regmodule_pyngraph_runtime.cpp',
'pyngraph/runtime/tensor_view.cpp', 'pyngraph/runtime/tensor.cpp',
'pyngraph/passes/manager.cpp', 'pyngraph/passes/manager.cpp',
'pyngraph/passes/regmodule_pyngraph_passes.cpp', 'pyngraph/passes/regmodule_pyngraph_passes.cpp',
'pyngraph/types/element_type.cpp', 'pyngraph/types/element_type.cpp',
......
...@@ -140,8 +140,8 @@ set (SRC ...@@ -140,8 +140,8 @@ set (SRC
runtime/aligned_buffer.cpp runtime/aligned_buffer.cpp
runtime/backend.cpp runtime/backend.cpp
runtime/backend_manager.cpp runtime/backend_manager.cpp
runtime/host_tensor_view.cpp runtime/host_tensor.cpp
runtime/tensor_view.cpp runtime/tensor.cpp
serializer.cpp serializer.cpp
shape.cpp shape.cpp
strides.cpp strides.cpp
......
...@@ -68,9 +68,6 @@ namespace ngraph ...@@ -68,9 +68,6 @@ namespace ngraph
size_t m_pool_offset{0}; size_t m_pool_offset{0};
}; };
using TensorView = Tensor;
using TensorViewPtrs = std::vector<std::shared_ptr<Tensor>>;
std::ostream& operator<<(std::ostream&, const ngraph::descriptor::Tensor&); std::ostream& operator<<(std::ostream&, const ngraph::descriptor::Tensor&);
} }
} }
...@@ -23,7 +23,7 @@ ...@@ -23,7 +23,7 @@
#include "ngraph/function.hpp" #include "ngraph/function.hpp"
#include "ngraph/runtime/backend.hpp" #include "ngraph/runtime/backend.hpp"
#include "ngraph/runtime/tensor_view.hpp" #include "ngraph/runtime/tensor.hpp"
namespace ngraph namespace ngraph
{ {
...@@ -53,16 +53,16 @@ namespace ngraph ...@@ -53,16 +53,16 @@ namespace ngraph
} }
bool call(const std::shared_ptr<Function>& function, bool call(const std::shared_ptr<Function>& function,
const std::vector<std::shared_ptr<runtime::TensorView>>& outputs, const std::vector<std::shared_ptr<runtime::Tensor>>& outputs,
const std::vector<std::shared_ptr<runtime::TensorView>>& inputs) const const std::vector<std::shared_ptr<runtime::Tensor>>& inputs) const
{ {
return get().call(function, outputs, inputs); return get().call(function, outputs, inputs);
} }
bool call_with_validate( bool call_with_validate(
const std::shared_ptr<Function>& function, const std::shared_ptr<Function>& function,
const std::vector<std::shared_ptr<runtime::TensorView>>& outputs, const std::vector<std::shared_ptr<runtime::Tensor>>& outputs,
const std::vector<std::shared_ptr<runtime::TensorView>>& inputs) const const std::vector<std::shared_ptr<runtime::Tensor>>& inputs) const
{ {
return get().call_with_validate(function, outputs, inputs); return get().call_with_validate(function, outputs, inputs);
} }
......
...@@ -129,6 +129,6 @@ ...@@ -129,6 +129,6 @@
#include "ngraph/op/tanh.hpp" #include "ngraph/op/tanh.hpp"
#include "ngraph/op/topk.hpp" #include "ngraph/op/topk.hpp"
#include "ngraph/runtime/backend.hpp" #include "ngraph/runtime/backend.hpp"
#include "ngraph/runtime/tensor_view.hpp" #include "ngraph/runtime/tensor.hpp"
#include "ngraph/shape.hpp" #include "ngraph/shape.hpp"
#include "ngraph/type/element_type.hpp" #include "ngraph/type/element_type.hpp"
...@@ -50,8 +50,8 @@ vector<ngraph::runtime::PerformanceCounter> ...@@ -50,8 +50,8 @@ vector<ngraph::runtime::PerformanceCounter>
} }
void runtime::Backend::validate_call(shared_ptr<const Function> function, void runtime::Backend::validate_call(shared_ptr<const Function> function,
const vector<shared_ptr<runtime::TensorView>>& outputs, const vector<shared_ptr<runtime::Tensor>>& outputs,
const vector<shared_ptr<runtime::TensorView>>& inputs) const vector<shared_ptr<runtime::Tensor>>& inputs)
{ {
const op::ParameterVector& input_parameters = function->get_parameters(); const op::ParameterVector& input_parameters = function->get_parameters();
if (input_parameters.size() != inputs.size()) if (input_parameters.size() != inputs.size())
......
...@@ -28,7 +28,7 @@ namespace ngraph ...@@ -28,7 +28,7 @@ namespace ngraph
namespace runtime namespace runtime
{ {
class ExternalFunction; class ExternalFunction;
class TensorView; class Tensor;
class Backend; class Backend;
} }
} }
...@@ -55,7 +55,7 @@ public: ...@@ -55,7 +55,7 @@ public:
/// \param element_type The type of the tensor element /// \param element_type The type of the tensor element
/// \param shape The shape of the tensor /// \param shape The shape of the tensor
/// \returns shared_ptr to a new backend specific tensor /// \returns shared_ptr to a new backend specific tensor
virtual std::shared_ptr<ngraph::runtime::TensorView> virtual std::shared_ptr<ngraph::runtime::Tensor>
create_tensor(const ngraph::element::Type& element_type, const Shape& shape) = 0; create_tensor(const ngraph::element::Type& element_type, const Shape& shape) = 0;
/// \brief Create a tensor specific to this backend /// \brief Create a tensor specific to this backend
...@@ -65,14 +65,14 @@ public: ...@@ -65,14 +65,14 @@ public:
/// must be sufficient to contain the tensor. The lifetime of the buffer is the /// must be sufficient to contain the tensor. The lifetime of the buffer is the
/// responsibility of the caller. /// responsibility of the caller.
/// \returns shared_ptr to a new backend specific tensor /// \returns shared_ptr to a new backend specific tensor
virtual std::shared_ptr<ngraph::runtime::TensorView> create_tensor( virtual std::shared_ptr<ngraph::runtime::Tensor> create_tensor(
const ngraph::element::Type& element_type, const Shape& shape, void* memory_pointer) = 0; const ngraph::element::Type& element_type, const Shape& shape, void* memory_pointer) = 0;
/// \brief Create a tensor of C type T specific to this backend /// \brief Create a tensor of C type T specific to this backend
/// \param shape The shape of the tensor /// \param shape The shape of the tensor
/// \returns shared_ptr to a new backend specific tensor /// \returns shared_ptr to a new backend specific tensor
template <typename T> template <typename T>
std::shared_ptr<ngraph::runtime::TensorView> create_tensor(const Shape& shape) std::shared_ptr<ngraph::runtime::Tensor> create_tensor(const Shape& shape)
{ {
return create_tensor(element::from<T>(), shape); return create_tensor(element::from<T>(), shape);
} }
...@@ -87,16 +87,16 @@ public: ...@@ -87,16 +87,16 @@ public:
/// \param func The function to execute /// \param func The function to execute
/// \returns true if iteration is successful, false otherwise /// \returns true if iteration is successful, false otherwise
virtual bool call(std::shared_ptr<Function> func, virtual bool call(std::shared_ptr<Function> func,
const std::vector<std::shared_ptr<runtime::TensorView>>& outputs, const std::vector<std::shared_ptr<runtime::Tensor>>& outputs,
const std::vector<std::shared_ptr<runtime::TensorView>>& inputs) = 0; const std::vector<std::shared_ptr<runtime::Tensor>>& inputs) = 0;
/// \brief Executes a single iteration of a Function. If func is not compiled the call will /// \brief Executes a single iteration of a Function. If func is not compiled the call will
/// compile it. Optionally validates the inputs and outputs against the function graph. /// compile it. Optionally validates the inputs and outputs against the function graph.
/// \param func The function to execute /// \param func The function to execute
/// \returns true if iteration is successful, false otherwise /// \returns true if iteration is successful, false otherwise
bool call_with_validate(std::shared_ptr<Function> func, bool call_with_validate(std::shared_ptr<Function> func,
const std::vector<std::shared_ptr<runtime::TensorView>>& outputs, const std::vector<std::shared_ptr<runtime::Tensor>>& outputs,
const std::vector<std::shared_ptr<runtime::TensorView>>& inputs) const std::vector<std::shared_ptr<runtime::Tensor>>& inputs)
{ {
validate_call(func, outputs, inputs); validate_call(func, outputs, inputs);
return call(func, outputs, inputs); return call(func, outputs, inputs);
...@@ -120,6 +120,6 @@ public: ...@@ -120,6 +120,6 @@ public:
protected: protected:
void validate_call(std::shared_ptr<const Function> func, void validate_call(std::shared_ptr<const Function> func,
const std::vector<std::shared_ptr<runtime::TensorView>>& outputs, const std::vector<std::shared_ptr<runtime::Tensor>>& outputs,
const std::vector<std::shared_ptr<runtime::TensorView>>& inputs); const std::vector<std::shared_ptr<runtime::Tensor>>& inputs);
}; };
...@@ -17,7 +17,7 @@ ...@@ -17,7 +17,7 @@
#include "ngraph/op/function_call.hpp" #include "ngraph/op/function_call.hpp"
#include "ngraph/runtime/backend.hpp" #include "ngraph/runtime/backend.hpp"
#include "ngraph/runtime/cpu/cpu_builder.hpp" #include "ngraph/runtime/cpu/cpu_builder.hpp"
#include "ngraph/runtime/tensor_view.hpp" #include "ngraph/runtime/tensor.hpp"
using namespace std; using namespace std;
using namespace ngraph; using namespace ngraph;
......
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
#include "ngraph/runtime/backend.hpp" #include "ngraph/runtime/backend.hpp"
#include "ngraph/runtime/cpu/cpu_builder.hpp" #include "ngraph/runtime/cpu/cpu_builder.hpp"
#include "ngraph/runtime/cpu/cpu_external_function.hpp" #include "ngraph/runtime/cpu/cpu_external_function.hpp"
#include "ngraph/runtime/tensor_view.hpp" #include "ngraph/runtime/tensor.hpp"
using namespace std; using namespace std;
using namespace ngraph; using namespace ngraph;
......
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
#include "ngraph/op/reduce_window.hpp" #include "ngraph/op/reduce_window.hpp"
#include "ngraph/runtime/cpu/cpu_builder.hpp" #include "ngraph/runtime/cpu/cpu_builder.hpp"
#include "ngraph/runtime/cpu/cpu_external_function.hpp" #include "ngraph/runtime/cpu/cpu_external_function.hpp"
#include "ngraph/runtime/tensor_view.hpp" #include "ngraph/runtime/tensor.hpp"
using namespace std; using namespace std;
using namespace ngraph; using namespace ngraph;
......
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
#include "ngraph/runtime/backend.hpp" #include "ngraph/runtime/backend.hpp"
#include "ngraph/runtime/cpu/cpu_builder.hpp" #include "ngraph/runtime/cpu/cpu_builder.hpp"
#include "ngraph/runtime/reference/select_and_scatter.hpp" #include "ngraph/runtime/reference/select_and_scatter.hpp"
#include "ngraph/runtime/tensor_view.hpp" #include "ngraph/runtime/tensor.hpp"
using namespace std; using namespace std;
using namespace ngraph; using namespace ngraph;
......
...@@ -60,13 +60,13 @@ shared_ptr<runtime::cpu::CPU_CallFrame> runtime::cpu::CPU_Backend::make_call_fra ...@@ -60,13 +60,13 @@ shared_ptr<runtime::cpu::CPU_CallFrame> runtime::cpu::CPU_Backend::make_call_fra
return external_function->make_call_frame(); return external_function->make_call_frame();
} }
shared_ptr<runtime::TensorView> shared_ptr<runtime::Tensor>
runtime::cpu::CPU_Backend::create_tensor(const element::Type& element_type, const Shape& shape) runtime::cpu::CPU_Backend::create_tensor(const element::Type& element_type, const Shape& shape)
{ {
return make_shared<runtime::cpu::CPUTensorView>(element_type, shape); return make_shared<runtime::cpu::CPUTensorView>(element_type, shape);
} }
shared_ptr<runtime::TensorView> runtime::cpu::CPU_Backend::create_tensor( shared_ptr<runtime::Tensor> runtime::cpu::CPU_Backend::create_tensor(
const element::Type& element_type, const Shape& shape, void* memory_pointer) const element::Type& element_type, const Shape& shape, void* memory_pointer)
{ {
return make_shared<runtime::cpu::CPUTensorView>(element_type, shape, memory_pointer); return make_shared<runtime::cpu::CPUTensorView>(element_type, shape, memory_pointer);
...@@ -88,8 +88,8 @@ bool runtime::cpu::CPU_Backend::compile(shared_ptr<Function> func) ...@@ -88,8 +88,8 @@ bool runtime::cpu::CPU_Backend::compile(shared_ptr<Function> func)
} }
bool runtime::cpu::CPU_Backend::call(shared_ptr<Function> func, bool runtime::cpu::CPU_Backend::call(shared_ptr<Function> func,
const vector<shared_ptr<runtime::TensorView>>& outputs, const vector<shared_ptr<runtime::Tensor>>& outputs,
const vector<shared_ptr<runtime::TensorView>>& inputs) const vector<shared_ptr<runtime::Tensor>>& inputs)
{ {
bool rc = true; bool rc = true;
......
...@@ -36,20 +36,20 @@ namespace ngraph ...@@ -36,20 +36,20 @@ namespace ngraph
std::shared_ptr<CPU_CallFrame> std::shared_ptr<CPU_CallFrame>
make_call_frame(const std::shared_ptr<CPU_ExternalFunction>& external_function); make_call_frame(const std::shared_ptr<CPU_ExternalFunction>& external_function);
std::shared_ptr<ngraph::runtime::TensorView> std::shared_ptr<ngraph::runtime::Tensor>
create_tensor(const ngraph::element::Type& element_type, create_tensor(const ngraph::element::Type& element_type,
const Shape& shape, const Shape& shape,
void* memory_pointer) override; void* memory_pointer) override;
std::shared_ptr<ngraph::runtime::TensorView> std::shared_ptr<ngraph::runtime::Tensor>
create_tensor(const ngraph::element::Type& element_type, create_tensor(const ngraph::element::Type& element_type,
const Shape& shape) override; const Shape& shape) override;
bool compile(std::shared_ptr<Function> func) override; bool compile(std::shared_ptr<Function> func) override;
bool call(std::shared_ptr<Function> func, bool call(std::shared_ptr<Function> func,
const std::vector<std::shared_ptr<runtime::TensorView>>& outputs, const std::vector<std::shared_ptr<runtime::Tensor>>& outputs,
const std::vector<std::shared_ptr<runtime::TensorView>>& inputs) override; const std::vector<std::shared_ptr<runtime::Tensor>>& inputs) override;
void remove_compiled_function(std::shared_ptr<Function> func) override; void remove_compiled_function(std::shared_ptr<Function> func) override;
......
...@@ -39,8 +39,8 @@ runtime::cpu::CPU_CallFrame::~CPU_CallFrame() ...@@ -39,8 +39,8 @@ runtime::cpu::CPU_CallFrame::~CPU_CallFrame()
} }
void runtime::cpu::CPU_CallFrame::call( void runtime::cpu::CPU_CallFrame::call(
const std::vector<std::shared_ptr<runtime::TensorView>>& output_tvs, const std::vector<std::shared_ptr<runtime::Tensor>>& output_tvs,
const std::vector<std::shared_ptr<runtime::TensorView>>& input_tvs) const std::vector<std::shared_ptr<runtime::Tensor>>& input_tvs)
{ {
vector<void*> inputs; vector<void*> inputs;
vector<void*> outputs; vector<void*> outputs;
...@@ -80,7 +80,7 @@ void runtime::cpu::CPU_CallFrame::call( ...@@ -80,7 +80,7 @@ void runtime::cpu::CPU_CallFrame::call(
} }
void runtime::cpu::CPU_CallFrame::propagate_layouts( void runtime::cpu::CPU_CallFrame::propagate_layouts(
const std::vector<std::shared_ptr<runtime::TensorView>>& tvs, const std::vector<std::shared_ptr<runtime::Tensor>>& tvs,
const LayoutDescriptorPtrs& layouts) const const LayoutDescriptorPtrs& layouts) const
{ {
if (layouts.size() != tvs.size()) if (layouts.size() != tvs.size())
......
...@@ -23,7 +23,7 @@ ...@@ -23,7 +23,7 @@
#include "ngraph/function.hpp" #include "ngraph/function.hpp"
#include "ngraph/runtime/cpu/cpu_layout_descriptor.hpp" #include "ngraph/runtime/cpu/cpu_layout_descriptor.hpp"
#include "ngraph/runtime/cpu/cpu_runtime_context.hpp" #include "ngraph/runtime/cpu/cpu_runtime_context.hpp"
#include "ngraph/runtime/tensor_view.hpp" #include "ngraph/runtime/tensor.hpp"
namespace ngraph namespace ngraph
{ {
...@@ -49,10 +49,10 @@ namespace ngraph ...@@ -49,10 +49,10 @@ namespace ngraph
/// \brief Invoke the function with values matching the signature of the function. /// \brief Invoke the function with values matching the signature of the function.
/// ///
/// Tuples will be expanded into their tensor views to build the call frame. /// Tuples will be expanded into their tensor views to build the call frame.
void call(const std::vector<std::shared_ptr<runtime::TensorView>>& outputs, void call(const std::vector<std::shared_ptr<runtime::Tensor>>& outputs,
const std::vector<std::shared_ptr<runtime::TensorView>>& inputs); const std::vector<std::shared_ptr<runtime::Tensor>>& inputs);
void propagate_layouts(const std::vector<std::shared_ptr<runtime::TensorView>>& tvs, void propagate_layouts(const std::vector<std::shared_ptr<runtime::Tensor>>& tvs,
const LayoutDescriptorPtrs& layouts) const; const LayoutDescriptorPtrs& layouts) const;
void setup_runtime_context(); void setup_runtime_context();
......
...@@ -539,7 +539,7 @@ using namespace ngraph::runtime; ...@@ -539,7 +539,7 @@ using namespace ngraph::runtime;
if (c) if (c)
{ {
m_active_constants.push_back(node); m_active_constants.push_back(node);
shared_ptr<descriptor::TensorView> tv = node->get_outputs()[0].get_tensor_ptr(); shared_ptr<descriptor::Tensor> tv = node->get_outputs()[0].get_tensor_ptr();
string type = tv->get_element_type().c_type_string(); string type = tv->get_element_type().c_type_string();
writer << "static " << type << "* " << tv->get_name() << " = ((" << type << "*)(" writer << "static " << type << "* " << tv->get_name() << " = ((" << type << "*)("
<< c->get_data_ptr() << "));\n"; << c->get_data_ptr() << "));\n";
...@@ -565,15 +565,15 @@ using namespace ngraph::runtime; ...@@ -565,15 +565,15 @@ using namespace ngraph::runtime;
set<string> output_names; set<string> output_names;
for (shared_ptr<Node> op : current_function->get_results()) for (shared_ptr<Node> op : current_function->get_results())
{ {
shared_ptr<descriptor::TensorView> tv = op->get_output_tensor_ptr(); shared_ptr<descriptor::Tensor> tv = op->get_output_tensor_ptr();
output_names.insert(tv->get_name()); output_names.insert(tv->get_name());
} }
set<descriptor::TensorView*> constants; set<descriptor::Tensor*> constants;
for (shared_ptr<Node> node : ordered_ops) for (shared_ptr<Node> node : ordered_ops)
{ {
if (dynamic_cast<ngraph::op::Constant*>(node.get())) if (dynamic_cast<ngraph::op::Constant*>(node.get()))
{ {
shared_ptr<descriptor::TensorView> tv = node->get_outputs()[0].get_tensor_ptr(); shared_ptr<descriptor::Tensor> tv = node->get_outputs()[0].get_tensor_ptr();
constants.insert(tv.get()); constants.insert(tv.get());
} }
} }
...@@ -607,7 +607,7 @@ using namespace ngraph::runtime; ...@@ -607,7 +607,7 @@ using namespace ngraph::runtime;
for (const descriptor::Input& input : node->get_inputs()) for (const descriptor::Input& input : node->get_inputs())
{ {
const descriptor::Output& output = input.get_output(); const descriptor::Output& output = input.get_output();
shared_ptr<descriptor::TensorView> tv = output.get_tensor_ptr(); shared_ptr<descriptor::Tensor> tv = output.get_tensor_ptr();
tensor_index_map.insert({tv->get_name(), tensor_index++}); tensor_index_map.insert({tv->get_name(), tensor_index++});
} }
} }
...@@ -667,7 +667,7 @@ using namespace ngraph::runtime; ...@@ -667,7 +667,7 @@ using namespace ngraph::runtime;
{ {
for (size_t i = 0; i < param->get_output_size(); ++i) for (size_t i = 0; i < param->get_output_size(); ++i)
{ {
shared_ptr<descriptor::TensorView> tv = param->get_output_tensor_ptr(i); shared_ptr<descriptor::Tensor> tv = param->get_output_tensor_ptr(i);
const element::Type& et = tv->get_element_type(); const element::Type& et = tv->get_element_type();
string type = et.c_type_string(); string type = et.c_type_string();
stringstream ss; stringstream ss;
...@@ -684,7 +684,7 @@ using namespace ngraph::runtime; ...@@ -684,7 +684,7 @@ using namespace ngraph::runtime;
for (size_t i = 0; i < current_function->get_output_size(); ++i) for (size_t i = 0; i < current_function->get_output_size(); ++i)
{ {
shared_ptr<Node> op = current_function->get_output_op(i); shared_ptr<Node> op = current_function->get_output_op(i);
shared_ptr<descriptor::TensorView> tv = op->get_output_tensor_ptr(); shared_ptr<descriptor::Tensor> tv = op->get_output_tensor_ptr();
string type = tv->get_element_type().c_type_string(); string type = tv->get_element_type().c_type_string();
stringstream ss; stringstream ss;
ss << "((" << type << "*)(outputs[" << i << "]))"; ss << "((" << type << "*)(outputs[" << i << "]))";
...@@ -698,7 +698,7 @@ using namespace ngraph::runtime; ...@@ -698,7 +698,7 @@ using namespace ngraph::runtime;
auto input_node = res->get_inputs().at(0).get_output().get_node(); auto input_node = res->get_inputs().at(0).get_output().get_node();
if (!input_node->is_constant() && !input_node->is_parameter()) if (!input_node->is_constant() && !input_node->is_parameter())
{ {
shared_ptr<descriptor::TensorView> itv = shared_ptr<descriptor::Tensor> itv =
res->get_inputs().at(0).get_output().get_tensor_ptr(); res->get_inputs().at(0).get_output().get_tensor_ptr();
auto output_name = ss.str(); auto output_name = ss.str();
m_variable_name_map[itv->get_name()] = ss.str(); m_variable_name_map[itv->get_name()] = ss.str();
...@@ -723,14 +723,14 @@ using namespace ngraph::runtime; ...@@ -723,14 +723,14 @@ using namespace ngraph::runtime;
for (const descriptor::Input& input : node->get_inputs()) for (const descriptor::Input& input : node->get_inputs())
{ {
const descriptor::Output& output = input.get_output(); const descriptor::Output& output = input.get_output();
shared_ptr<descriptor::TensorView> tv = output.get_tensor_ptr(); shared_ptr<descriptor::Tensor> tv = output.get_tensor_ptr();
in.push_back(TensorViewWrapper(tv, m_variable_name_map[tv->get_name()])); in.push_back(TensorViewWrapper(tv, m_variable_name_map[tv->get_name()]));
node_input_names.emplace_back(tv->get_name()); node_input_names.emplace_back(tv->get_name());
} }
vector<TensorViewWrapper> out; vector<TensorViewWrapper> out;
for (const descriptor::Output& output : node->get_outputs()) for (const descriptor::Output& output : node->get_outputs())
{ {
shared_ptr<descriptor::TensorView> tv = output.get_tensor_ptr(); shared_ptr<descriptor::Tensor> tv = output.get_tensor_ptr();
out.push_back(TensorViewWrapper(tv, m_variable_name_map[tv->get_name()])); out.push_back(TensorViewWrapper(tv, m_variable_name_map[tv->get_name()]));
node_output_names.emplace_back(tv->get_name()); node_output_names.emplace_back(tv->get_name());
} }
...@@ -784,7 +784,7 @@ using namespace ngraph::runtime; ...@@ -784,7 +784,7 @@ using namespace ngraph::runtime;
for (const descriptor::Input& input : node->get_inputs()) for (const descriptor::Input& input : node->get_inputs())
{ {
const descriptor::Output& output = input.get_output(); const descriptor::Output& output = input.get_output();
shared_ptr<descriptor::TensorView> tv = output.get_tensor_ptr(); shared_ptr<descriptor::Tensor> tv = output.get_tensor_ptr();
auto input_name = tv->get_name(); auto input_name = tv->get_name();
if (output.get_node()->is_parameter()) if (output.get_node()->is_parameter())
...@@ -1218,7 +1218,7 @@ void runtime::cpu::CPU_ExternalFunction::build() ...@@ -1218,7 +1218,7 @@ void runtime::cpu::CPU_ExternalFunction::build()
{ {
for (size_t i = 0; i < param->get_output_size(); ++i) for (size_t i = 0; i < param->get_output_size(); ++i)
{ {
shared_ptr<descriptor::TensorView> tv = param->get_output_tensor_ptr(i); shared_ptr<descriptor::Tensor> tv = param->get_output_tensor_ptr(i);
function_input_index.emplace_back( function_input_index.emplace_back(
tensor_data[tv->get_name()], arg_index, tensor_stale[tv->get_name()]); tensor_data[tv->get_name()], arg_index, tensor_stale[tv->get_name()]);
m_tensor_roles[tv->get_name()] = CPUTensorRole::INPUT; m_tensor_roles[tv->get_name()] = CPUTensorRole::INPUT;
...@@ -1231,7 +1231,7 @@ void runtime::cpu::CPU_ExternalFunction::build() ...@@ -1231,7 +1231,7 @@ void runtime::cpu::CPU_ExternalFunction::build()
for (size_t i = 0; i < m_function->get_output_size(); ++i) for (size_t i = 0; i < m_function->get_output_size(); ++i)
{ {
shared_ptr<Node> op = m_function->get_output_op(i); shared_ptr<Node> op = m_function->get_output_op(i);
shared_ptr<descriptor::TensorView> tv = op->get_output_tensor_ptr(); shared_ptr<descriptor::Tensor> tv = op->get_output_tensor_ptr();
function_output_index.emplace_back(tensor_data[tv->get_name()], i); function_output_index.emplace_back(tensor_data[tv->get_name()], i);
m_tensor_roles[tv->get_name()] = CPUTensorRole::OUTPUT; m_tensor_roles[tv->get_name()] = CPUTensorRole::OUTPUT;
...@@ -1242,7 +1242,7 @@ void runtime::cpu::CPU_ExternalFunction::build() ...@@ -1242,7 +1242,7 @@ void runtime::cpu::CPU_ExternalFunction::build()
auto input_node = res->get_inputs().at(0).get_output().get_node(); auto input_node = res->get_inputs().at(0).get_output().get_node();
if (!input_node->is_constant() && !input_node->is_parameter()) if (!input_node->is_constant() && !input_node->is_parameter())
{ {
shared_ptr<descriptor::TensorView> itv = shared_ptr<descriptor::Tensor> itv =
res->get_inputs().at(0).get_output().get_tensor_ptr(); res->get_inputs().at(0).get_output().get_tensor_ptr();
function_output_index.emplace_back(tensor_data[itv->get_name()], i); function_output_index.emplace_back(tensor_data[itv->get_name()], i);
m_tensor_roles[itv->get_name()] = CPUTensorRole::OUTPUT; m_tensor_roles[itv->get_name()] = CPUTensorRole::OUTPUT;
...@@ -1270,7 +1270,7 @@ void runtime::cpu::CPU_ExternalFunction::build() ...@@ -1270,7 +1270,7 @@ void runtime::cpu::CPU_ExternalFunction::build()
for (const descriptor::Input& input : node->get_inputs()) for (const descriptor::Input& input : node->get_inputs())
{ {
const descriptor::Output& output = input.get_output(); const descriptor::Output& output = input.get_output();
shared_ptr<descriptor::TensorView> tv = output.get_tensor_ptr(); shared_ptr<descriptor::Tensor> tv = output.get_tensor_ptr();
in.push_back(TensorViewWrapper(tv, tv->get_name())); in.push_back(TensorViewWrapper(tv, tv->get_name()));
in_names.push_back(tv->get_name()); in_names.push_back(tv->get_name());
} }
...@@ -1279,7 +1279,7 @@ void runtime::cpu::CPU_ExternalFunction::build() ...@@ -1279,7 +1279,7 @@ void runtime::cpu::CPU_ExternalFunction::build()
for (const descriptor::Output& output : node->get_outputs()) for (const descriptor::Output& output : node->get_outputs())
{ {
shared_ptr<descriptor::TensorView> tv = output.get_tensor_ptr(); shared_ptr<descriptor::Tensor> tv = output.get_tensor_ptr();
out.push_back(TensorViewWrapper(tv, tv->get_name())); out.push_back(TensorViewWrapper(tv, tv->get_name()));
out_names.push_back(tv->get_name()); out_names.push_back(tv->get_name());
} }
...@@ -1379,7 +1379,7 @@ void runtime::cpu::CPU_ExternalFunction::build() ...@@ -1379,7 +1379,7 @@ void runtime::cpu::CPU_ExternalFunction::build()
for (const descriptor::Input& input : node->get_inputs()) for (const descriptor::Input& input : node->get_inputs())
{ {
const descriptor::Output& output = input.get_output(); const descriptor::Output& output = input.get_output();
shared_ptr<descriptor::TensorView> tv = output.get_tensor_ptr(); shared_ptr<descriptor::Tensor> tv = output.get_tensor_ptr();
temp << &tensor_data[tv->get_name()]; temp << &tensor_data[tv->get_name()];
node_inputs.push_back(tv->get_name() + "(" + temp.str() + ")"); node_inputs.push_back(tv->get_name() + "(" + temp.str() + ")");
temp.str(""); temp.str("");
...@@ -1387,7 +1387,7 @@ void runtime::cpu::CPU_ExternalFunction::build() ...@@ -1387,7 +1387,7 @@ void runtime::cpu::CPU_ExternalFunction::build()
for (const descriptor::Output& output : node->get_outputs()) for (const descriptor::Output& output : node->get_outputs())
{ {
shared_ptr<descriptor::TensorView> tv = output.get_tensor_ptr(); shared_ptr<descriptor::Tensor> tv = output.get_tensor_ptr();
temp << &tensor_data[tv->get_name()]; temp << &tensor_data[tv->get_name()];
node_outputs.push_back(tv->get_name() + "(" + temp.str() + ")"); node_outputs.push_back(tv->get_name() + "(" + temp.str() + ")");
temp.str(""); temp.str("");
...@@ -1671,7 +1671,7 @@ string runtime::cpu::CPU_ExternalFunction::emit_op_as_function(const Node& node, ...@@ -1671,7 +1671,7 @@ string runtime::cpu::CPU_ExternalFunction::emit_op_as_function(const Node& node,
for (const descriptor::Input& input : node.get_inputs()) for (const descriptor::Input& input : node.get_inputs())
{ {
const descriptor::Output& output = input.get_output(); const descriptor::Output& output = input.get_output();
shared_ptr<descriptor::TensorView> tv = output.get_tensor_ptr(); shared_ptr<descriptor::Tensor> tv = output.get_tensor_ptr();
TensorViewWrapper tvw{tv, "_arg" + to_string(arg_index)}; TensorViewWrapper tvw{tv, "_arg" + to_string(arg_index)};
if (!contains(arg_names, tvw.get_name())) if (!contains(arg_names, tvw.get_name()))
{ {
...@@ -1688,7 +1688,7 @@ string runtime::cpu::CPU_ExternalFunction::emit_op_as_function(const Node& node, ...@@ -1688,7 +1688,7 @@ string runtime::cpu::CPU_ExternalFunction::emit_op_as_function(const Node& node,
vector<TensorViewWrapper> out; vector<TensorViewWrapper> out;
for (const descriptor::Output& output : node.get_outputs()) for (const descriptor::Output& output : node.get_outputs())
{ {
shared_ptr<descriptor::TensorView> tv = output.get_tensor_ptr(); shared_ptr<descriptor::Tensor> tv = output.get_tensor_ptr();
TensorViewWrapper tvw{tv, "_out" + to_string(arg_index)}; TensorViewWrapper tvw{tv, "_out" + to_string(arg_index)};
if (arg_index++ > 0) if (arg_index++ > 0)
{ {
......
...@@ -171,7 +171,7 @@ namespace ngraph ...@@ -171,7 +171,7 @@ namespace ngraph
void handle_output_alias( void handle_output_alias(
codegen::CodeWriter& writer, codegen::CodeWriter& writer,
const Node&, const Node&,
const std::unordered_map<descriptor::TensorView*, std::vector<size_t>>&); const std::unordered_map<descriptor::Tensor*, std::vector<size_t>>&);
bool is_functionally_identical( bool is_functionally_identical(
const Node&, const Node&,
......
...@@ -35,7 +35,7 @@ namespace ngraph ...@@ -35,7 +35,7 @@ namespace ngraph
class LayoutDescriptor : public ngraph::descriptor::layout::TensorLayout class LayoutDescriptor : public ngraph::descriptor::layout::TensorLayout
{ {
public: public:
LayoutDescriptor(const ngraph::descriptor::TensorView& tv); LayoutDescriptor(const ngraph::descriptor::Tensor& tv);
~LayoutDescriptor() override {} ~LayoutDescriptor() override {}
virtual size_t get_allocated_size() override { return m_mkldnn_memory_size; } virtual size_t get_allocated_size() override { return m_mkldnn_memory_size; }
size_t get_offset() const { return m_offset; } size_t get_offset() const { return m_offset; }
......
...@@ -35,8 +35,7 @@ runtime::cpu::CPUTensorView::CPUTensorView(const ngraph::element::Type& element_ ...@@ -35,8 +35,7 @@ runtime::cpu::CPUTensorView::CPUTensorView(const ngraph::element::Type& element_
const Shape& shape, const Shape& shape,
void* memory_pointer, void* memory_pointer,
const string& name) const string& name)
: runtime::TensorView( : runtime::Tensor(std::make_shared<ngraph::descriptor::Tensor>(element_type, shape, name))
std::make_shared<ngraph::descriptor::TensorView>(element_type, shape, name))
, buffer(nullptr) , buffer(nullptr)
, aligned_buffer(nullptr) , aligned_buffer(nullptr)
{ {
......
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
#include <string> #include <string>
#include "ngraph/runtime/tensor_view.hpp" #include "ngraph/runtime/tensor.hpp"
#include "ngraph/type/element_type.hpp" #include "ngraph/type/element_type.hpp"
// This define is a workaround for gcc on centos and is required for aligned() // This define is a workaround for gcc on centos and is required for aligned()
...@@ -30,7 +30,7 @@ namespace ngraph ...@@ -30,7 +30,7 @@ namespace ngraph
{ {
namespace cpu namespace cpu
{ {
class CPUTensorView : public ngraph::runtime::TensorView class CPUTensorView : public ngraph::runtime::Tensor
{ {
public: public:
CPUTensorView(const ngraph::element::Type& element_type, CPUTensorView(const ngraph::element::Type& element_type,
......
...@@ -98,13 +98,13 @@ runtime::gpu::GPU_Backend::BackendContext::~BackendContext() ...@@ -98,13 +98,13 @@ runtime::gpu::GPU_Backend::BackendContext::~BackendContext()
delete m_runtime_context->compiled_kernel_pool; delete m_runtime_context->compiled_kernel_pool;
} }
shared_ptr<runtime::TensorView> shared_ptr<runtime::Tensor>
runtime::gpu::GPU_Backend::create_tensor(const element::Type& element_type, const Shape& shape) runtime::gpu::GPU_Backend::create_tensor(const element::Type& element_type, const Shape& shape)
{ {
return make_shared<runtime::gpu::GPU_TensorView>(element_type, shape); return make_shared<runtime::gpu::GPU_TensorView>(element_type, shape);
} }
shared_ptr<runtime::TensorView> runtime::gpu::GPU_Backend::create_tensor( shared_ptr<runtime::Tensor> runtime::gpu::GPU_Backend::create_tensor(
const element::Type& element_type, const Shape& shape, void* memory_pointer) const element::Type& element_type, const Shape& shape, void* memory_pointer)
{ {
return make_shared<runtime::gpu::GPU_TensorView>(element_type, shape, memory_pointer); return make_shared<runtime::gpu::GPU_TensorView>(element_type, shape, memory_pointer);
...@@ -126,7 +126,7 @@ bool runtime::gpu::GPU_Backend::compile(shared_ptr<Function> func) ...@@ -126,7 +126,7 @@ bool runtime::gpu::GPU_Backend::compile(shared_ptr<Function> func)
} }
void runtime::gpu::GPU_Backend::initialize_io(void** target, void runtime::gpu::GPU_Backend::initialize_io(void** target,
const vector<shared_ptr<runtime::TensorView>>& source) const vector<shared_ptr<runtime::Tensor>>& source)
{ {
for (size_t i = 0; i < source.size(); i++) for (size_t i = 0; i < source.size(); i++)
{ {
...@@ -144,8 +144,8 @@ void runtime::gpu::GPU_Backend::initialize_io(void** target, ...@@ -144,8 +144,8 @@ void runtime::gpu::GPU_Backend::initialize_io(void** target,
} }
bool runtime::gpu::GPU_Backend::call(shared_ptr<Function> func, bool runtime::gpu::GPU_Backend::call(shared_ptr<Function> func,
const vector<shared_ptr<runtime::TensorView>>& outputs, const vector<shared_ptr<runtime::Tensor>>& outputs,
const vector<shared_ptr<runtime::TensorView>>& inputs) const vector<shared_ptr<runtime::Tensor>>& inputs)
{ {
bool rc = true; bool rc = true;
......
...@@ -42,20 +42,20 @@ namespace ngraph ...@@ -42,20 +42,20 @@ namespace ngraph
public: public:
GPU_Backend(); GPU_Backend();
std::shared_ptr<ngraph::runtime::TensorView> std::shared_ptr<ngraph::runtime::Tensor>
create_tensor(const ngraph::element::Type& element_type, create_tensor(const ngraph::element::Type& element_type,
const Shape& shape, const Shape& shape,
void* memory_pointer) override; void* memory_pointer) override;
std::shared_ptr<ngraph::runtime::TensorView> std::shared_ptr<ngraph::runtime::Tensor>
create_tensor(const ngraph::element::Type& element_type, create_tensor(const ngraph::element::Type& element_type,
const Shape& shape) override; const Shape& shape) override;
bool compile(std::shared_ptr<Function> func) override; bool compile(std::shared_ptr<Function> func) override;
bool call(std::shared_ptr<Function> func, bool call(std::shared_ptr<Function> func,
const std::vector<std::shared_ptr<runtime::TensorView>>& outputs, const std::vector<std::shared_ptr<runtime::Tensor>>& outputs,
const std::vector<std::shared_ptr<runtime::TensorView>>& inputs) override; const std::vector<std::shared_ptr<runtime::Tensor>>& inputs) override;
void remove_compiled_function(std::shared_ptr<Function> func) override; void remove_compiled_function(std::shared_ptr<Function> func) override;
void enable_performance_data(std::shared_ptr<Function> func, bool enable) override; void enable_performance_data(std::shared_ptr<Function> func, bool enable) override;
...@@ -87,14 +87,14 @@ namespace ngraph ...@@ -87,14 +87,14 @@ namespace ngraph
std::vector<void*> m_outputs; std::vector<void*> m_outputs;
}; };
/// \brief Convert a vector of TensorView into a vector of void* where each void* /// \brief Convert a vector of Tensor into a vector of void* where each void*
/// points to a TensorView's data buffer. /// points to a Tensor's data buffer.
/// \param target Pointer to a pre-allocated array of void* with /// \param target Pointer to a pre-allocated array of void* with
/// size >= source.size() /// size >= source.size()
/// \param source Source vector of TensorViews /// \param source Source vector of Tensors
static void static void
initialize_io(void** target, initialize_io(void** target,
const std::vector<std::shared_ptr<runtime::TensorView>>& source); const std::vector<std::shared_ptr<runtime::Tensor>>& source);
std::map<std::shared_ptr<Function>, FunctionInstance> m_function_map; std::map<std::shared_ptr<Function>, FunctionInstance> m_function_map;
std::shared_ptr<BackendContext> m_context; std::shared_ptr<BackendContext> m_context;
......
...@@ -413,7 +413,7 @@ void runtime::gpu::GPU_ExternalFunction::emit_functions() ...@@ -413,7 +413,7 @@ void runtime::gpu::GPU_ExternalFunction::emit_functions()
shared_ptr<descriptor::Tensor> tv = op->get_output_tensor_ptr(); shared_ptr<descriptor::Tensor> tv = op->get_output_tensor_ptr();
output_names.insert(tv->get_name()); output_names.insert(tv->get_name());
} }
set<descriptor::TensorView*> constants; set<descriptor::Tensor*> constants;
for (shared_ptr<Node> node : m_function_ordered_ops.at(current_function)) for (shared_ptr<Node> node : m_function_ordered_ops.at(current_function))
{ {
if (dynamic_cast<ngraph::op::Constant*>(node.get())) if (dynamic_cast<ngraph::op::Constant*>(node.get()))
......
...@@ -29,8 +29,7 @@ using namespace std; ...@@ -29,8 +29,7 @@ using namespace std;
runtime::gpu::GPU_TensorView::GPU_TensorView(const ngraph::element::Type& element_type, runtime::gpu::GPU_TensorView::GPU_TensorView(const ngraph::element::Type& element_type,
const Shape& shape, const Shape& shape,
void* memory_pointer) void* memory_pointer)
: runtime::TensorView( : runtime::Tensor(std::make_shared<ngraph::descriptor::Tensor>(element_type, shape, "external"))
std::make_shared<ngraph::descriptor::TensorView>(element_type, shape, "external"))
, m_custom_memory(false) , m_custom_memory(false)
{ {
m_descriptor->set_tensor_layout( m_descriptor->set_tensor_layout(
......
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
#include <cuda.h> #include <cuda.h>
#include <memory> #include <memory>
#include "ngraph/runtime/tensor_view.hpp" #include "ngraph/runtime/tensor.hpp"
#include "ngraph/type/element_type.hpp" #include "ngraph/type/element_type.hpp"
namespace ngraph namespace ngraph
...@@ -33,7 +33,7 @@ namespace ngraph ...@@ -33,7 +33,7 @@ namespace ngraph
} }
} }
class ngraph::runtime::gpu::GPU_TensorView : public ngraph::runtime::TensorView class ngraph::runtime::gpu::GPU_TensorView : public ngraph::runtime::Tensor
{ {
public: public:
GPU_TensorView(const ngraph::element::Type& element_type, const Shape& shape); GPU_TensorView(const ngraph::element::Type& element_type, const Shape& shape);
......
...@@ -21,8 +21,8 @@ ...@@ -21,8 +21,8 @@
using namespace std; using namespace std;
using namespace ngraph; using namespace ngraph;
runtime::gpu::GPU_TensorViewWrapper::GPU_TensorViewWrapper( runtime::gpu::GPU_TensorViewWrapper::GPU_TensorViewWrapper(const shared_ptr<descriptor::Tensor>& tv,
const shared_ptr<descriptor::TensorView>& tv, const string& alias) const string& alias)
: m_tensor(tv) : m_tensor(tv)
, m_alias(alias) , m_alias(alias)
{ {
......
...@@ -18,17 +18,16 @@ ...@@ -18,17 +18,16 @@
#include <memory> #include <memory>
#include "ngraph/descriptor/layout/dense_tensor_layout.hpp" #include "ngraph/descriptor/layout/dense_tensor_layout.hpp"
#include "ngraph/runtime/host_tensor_view.hpp" #include "ngraph/runtime/host_tensor.hpp"
using namespace ngraph; using namespace ngraph;
using namespace std; using namespace std;
runtime::HostTensorView::HostTensorView(const ngraph::element::Type& element_type, runtime::HostTensor::HostTensor(const ngraph::element::Type& element_type,
const Shape& shape, const Shape& shape,
void* memory_pointer, void* memory_pointer,
const string& name) const string& name)
: runtime::TensorView( : runtime::Tensor(std::make_shared<ngraph::descriptor::Tensor>(element_type, shape, name))
std::make_shared<ngraph::descriptor::TensorView>(element_type, shape, name))
, m_allocated_buffer_pool(nullptr) , m_allocated_buffer_pool(nullptr)
, m_aligned_buffer_pool(nullptr) , m_aligned_buffer_pool(nullptr)
...@@ -55,14 +54,14 @@ runtime::HostTensorView::HostTensorView(const ngraph::element::Type& element_typ ...@@ -55,14 +54,14 @@ runtime::HostTensorView::HostTensorView(const ngraph::element::Type& element_typ
} }
} }
runtime::HostTensorView::HostTensorView(const ngraph::element::Type& element_type, runtime::HostTensor::HostTensor(const ngraph::element::Type& element_type,
const Shape& shape, const Shape& shape,
const string& name) const string& name)
: HostTensorView(element_type, shape, nullptr, name) : HostTensor(element_type, shape, nullptr, name)
{ {
} }
runtime::HostTensorView::~HostTensorView() runtime::HostTensor::~HostTensor()
{ {
if (m_allocated_buffer_pool != nullptr) if (m_allocated_buffer_pool != nullptr)
{ {
...@@ -70,17 +69,17 @@ runtime::HostTensorView::~HostTensorView() ...@@ -70,17 +69,17 @@ runtime::HostTensorView::~HostTensorView()
} }
} }
char* runtime::HostTensorView::get_data_ptr() char* runtime::HostTensor::get_data_ptr()
{ {
return m_aligned_buffer_pool; return m_aligned_buffer_pool;
} }
const char* runtime::HostTensorView::get_data_ptr() const const char* runtime::HostTensor::get_data_ptr() const
{ {
return m_aligned_buffer_pool; return m_aligned_buffer_pool;
} }
void runtime::HostTensorView::write(const void* source, size_t tensor_offset, size_t n) void runtime::HostTensor::write(const void* source, size_t tensor_offset, size_t n)
{ {
if (tensor_offset + n > m_buffer_size) if (tensor_offset + n > m_buffer_size)
{ {
...@@ -90,7 +89,7 @@ void runtime::HostTensorView::write(const void* source, size_t tensor_offset, si ...@@ -90,7 +89,7 @@ void runtime::HostTensorView::write(const void* source, size_t tensor_offset, si
memcpy(&target[tensor_offset], source, n); memcpy(&target[tensor_offset], source, n);
} }
void runtime::HostTensorView::read(void* target, size_t tensor_offset, size_t n) const void runtime::HostTensor::read(void* target, size_t tensor_offset, size_t n) const
{ {
if (tensor_offset + n > m_buffer_size) if (tensor_offset + n > m_buffer_size)
{ {
......
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
#include <memory> #include <memory>
#include "ngraph/runtime/tensor_view.hpp" #include "ngraph/runtime/tensor.hpp"
#include "ngraph/type/element_type.hpp" #include "ngraph/type/element_type.hpp"
namespace ngraph namespace ngraph
...@@ -27,21 +27,21 @@ namespace ngraph ...@@ -27,21 +27,21 @@ namespace ngraph
{ {
static size_t alignment = 64; static size_t alignment = 64;
class HostTensorView; class HostTensor;
} }
} }
class ngraph::runtime::HostTensorView : public ngraph::runtime::TensorView class ngraph::runtime::HostTensor : public ngraph::runtime::Tensor
{ {
public: public:
HostTensorView(const ngraph::element::Type& element_type, HostTensor(const ngraph::element::Type& element_type,
const Shape& shape, const Shape& shape,
const std::string& name = "external"); const std::string& name = "external");
HostTensorView(const ngraph::element::Type& element_type, HostTensor(const ngraph::element::Type& element_type,
const Shape& shape, const Shape& shape,
void* memory_pointer, void* memory_pointer,
const std::string& name = "external"); const std::string& name = "external");
virtual ~HostTensorView() override; virtual ~HostTensor() override;
char* get_data_ptr(); char* get_data_ptr();
const char* get_data_ptr() const; const char* get_data_ptr() const;
...@@ -71,9 +71,9 @@ public: ...@@ -71,9 +71,9 @@ public:
void read(void* p, size_t tensor_offset, size_t n) const override; void read(void* p, size_t tensor_offset, size_t n) const override;
private: private:
HostTensorView(const HostTensorView&) = delete; HostTensor(const HostTensor&) = delete;
HostTensorView(HostTensorView&&) = delete; HostTensor(HostTensor&&) = delete;
HostTensorView& operator=(const HostTensorView&) = delete; HostTensor& operator=(const HostTensor&) = delete;
char* m_allocated_buffer_pool; char* m_allocated_buffer_pool;
char* m_aligned_buffer_pool; char* m_aligned_buffer_pool;
......
...@@ -278,14 +278,14 @@ runtime::intelgpu::IntelGPUBackend::IntelGPUBackend() ...@@ -278,14 +278,14 @@ runtime::intelgpu::IntelGPUBackend::IntelGPUBackend()
ocl_engine = make_shared<cldnn::engine>(); ocl_engine = make_shared<cldnn::engine>();
} }
shared_ptr<runtime::TensorView> shared_ptr<runtime::Tensor>
runtime::intelgpu::IntelGPUBackend::create_tensor(const element::Type& element_type, runtime::intelgpu::IntelGPUBackend::create_tensor(const element::Type& element_type,
const Shape& shape) const Shape& shape)
{ {
return make_shared<runtime::intelgpu::IntelGPUTensorView>(element_type, shape, *ocl_engine); return make_shared<runtime::intelgpu::IntelGPUTensorView>(element_type, shape, *ocl_engine);
} }
shared_ptr<runtime::TensorView> runtime::intelgpu::IntelGPUBackend::create_tensor( shared_ptr<runtime::Tensor> runtime::intelgpu::IntelGPUBackend::create_tensor(
const element::Type& element_type, const Shape& shape, void* memory_pointer) const element::Type& element_type, const Shape& shape, void* memory_pointer)
{ {
return make_shared<runtime::intelgpu::IntelGPUTensorView>( return make_shared<runtime::intelgpu::IntelGPUTensorView>(
...@@ -1373,10 +1373,9 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func) ...@@ -1373,10 +1373,9 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func)
return true; return true;
} }
bool runtime::intelgpu::IntelGPUBackend::call( bool runtime::intelgpu::IntelGPUBackend::call(shared_ptr<Function> func,
shared_ptr<Function> func, const vector<shared_ptr<runtime::Tensor>>& outputs,
const vector<shared_ptr<runtime::TensorView>>& outputs, const vector<shared_ptr<runtime::Tensor>>& inputs)
const vector<shared_ptr<runtime::TensorView>>& inputs)
{ {
validate_call(func, outputs, inputs); validate_call(func, outputs, inputs);
......
...@@ -39,19 +39,19 @@ class ngraph::runtime::intelgpu::IntelGPUBackend : public runtime::Backend ...@@ -39,19 +39,19 @@ class ngraph::runtime::intelgpu::IntelGPUBackend : public runtime::Backend
{ {
public: public:
IntelGPUBackend(); IntelGPUBackend();
std::shared_ptr<ngraph::runtime::TensorView> std::shared_ptr<ngraph::runtime::Tensor>
create_tensor(const ngraph::element::Type& element_type, create_tensor(const ngraph::element::Type& element_type,
const Shape& shape, const Shape& shape,
void* memory_pointer) override; void* memory_pointer) override;
std::shared_ptr<ngraph::runtime::TensorView> std::shared_ptr<ngraph::runtime::Tensor>
create_tensor(const ngraph::element::Type& element_type, const Shape& shape) override; create_tensor(const ngraph::element::Type& element_type, const Shape& shape) override;
bool compile(std::shared_ptr<Function> func) override; bool compile(std::shared_ptr<Function> func) override;
bool call(std::shared_ptr<Function> func, bool call(std::shared_ptr<Function> func,
const std::vector<std::shared_ptr<runtime::TensorView>>& outputs, const std::vector<std::shared_ptr<runtime::Tensor>>& outputs,
const std::vector<std::shared_ptr<runtime::TensorView>>& inputs) override; const std::vector<std::shared_ptr<runtime::Tensor>>& inputs) override;
private: private:
class FunctionInstance class FunctionInstance
......
...@@ -22,7 +22,7 @@ ...@@ -22,7 +22,7 @@
using namespace std; using namespace std;
using namespace ngraph; using namespace ngraph;
runtime::intelgpu::IntelGPULayout::IntelGPULayout(const descriptor::TensorView& tv, runtime::intelgpu::IntelGPULayout::IntelGPULayout(const descriptor::Tensor& tv,
const cldnn::layout& layout) const cldnn::layout& layout)
: TensorLayout(tv) : TensorLayout(tv)
, cldnn_layout(layout) , cldnn_layout(layout)
......
...@@ -36,7 +36,7 @@ namespace ngraph ...@@ -36,7 +36,7 @@ namespace ngraph
class ngraph::runtime::intelgpu::IntelGPULayout : public ngraph::descriptor::layout::TensorLayout class ngraph::runtime::intelgpu::IntelGPULayout : public ngraph::descriptor::layout::TensorLayout
{ {
public: public:
IntelGPULayout(const ngraph::descriptor::TensorView& tv, const cldnn::layout& layout); IntelGPULayout(const ngraph::descriptor::Tensor& tv, const cldnn::layout& layout);
~IntelGPULayout() override {} ~IntelGPULayout() override {}
size_t get_index_offset(const std::vector<size_t>& indices) override; size_t get_index_offset(const std::vector<size_t>& indices) override;
......
...@@ -29,7 +29,7 @@ runtime::intelgpu::IntelGPUTensorView::IntelGPUTensorView(const element::Type& e ...@@ -29,7 +29,7 @@ runtime::intelgpu::IntelGPUTensorView::IntelGPUTensorView(const element::Type& e
const Shape& shape, const Shape& shape,
const cldnn::engine& backend_engine, const cldnn::engine& backend_engine,
void* memory_pointer) void* memory_pointer)
: runtime::TensorView(make_shared<descriptor::TensorView>(element_type, shape, "external")) : runtime::Tensor(make_shared<descriptor::Tensor>(element_type, shape, "external"))
{ {
const cldnn::layout layout = IntelGPULayout::create_cldnn_layout(element_type, shape); const cldnn::layout layout = IntelGPULayout::create_cldnn_layout(element_type, shape);
......
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
#include <CPP/engine.hpp> #include <CPP/engine.hpp>
#include <CPP/memory.hpp> #include <CPP/memory.hpp>
#include "ngraph/runtime/tensor_view.hpp" #include "ngraph/runtime/tensor.hpp"
namespace ngraph namespace ngraph
{ {
...@@ -32,7 +32,7 @@ namespace ngraph ...@@ -32,7 +32,7 @@ namespace ngraph
} }
} }
class ngraph::runtime::intelgpu::IntelGPUTensorView : public ngraph::runtime::TensorView class ngraph::runtime::intelgpu::IntelGPUTensorView : public ngraph::runtime::Tensor
{ {
public: public:
IntelGPUTensorView(const element::Type& element_type, IntelGPUTensorView(const element::Type& element_type,
......
...@@ -46,16 +46,16 @@ extern "C" void delete_backend(runtime::Backend* backend) ...@@ -46,16 +46,16 @@ extern "C" void delete_backend(runtime::Backend* backend)
delete backend; delete backend;
} }
shared_ptr<runtime::TensorView> shared_ptr<runtime::Tensor>
runtime::interpreter::INTBackend::create_tensor(const element::Type& type, const Shape& shape) runtime::interpreter::INTBackend::create_tensor(const element::Type& type, const Shape& shape)
{ {
return make_shared<runtime::HostTensorView>(type, shape, "external"); return make_shared<runtime::HostTensor>(type, shape, "external");
} }
shared_ptr<runtime::TensorView> runtime::interpreter::INTBackend::create_tensor( shared_ptr<runtime::Tensor> runtime::interpreter::INTBackend::create_tensor(
const element::Type& type, const Shape& shape, void* memory_pointer) const element::Type& type, const Shape& shape, void* memory_pointer)
{ {
return make_shared<runtime::HostTensorView>(type, shape, memory_pointer, "external"); return make_shared<runtime::HostTensor>(type, shape, memory_pointer, "external");
} }
bool runtime::interpreter::INTBackend::compile(shared_ptr<Function> function) bool runtime::interpreter::INTBackend::compile(shared_ptr<Function> function)
...@@ -80,34 +80,34 @@ bool runtime::interpreter::INTBackend::compile(shared_ptr<Function> function) ...@@ -80,34 +80,34 @@ bool runtime::interpreter::INTBackend::compile(shared_ptr<Function> function)
} }
bool runtime::interpreter::INTBackend::call(shared_ptr<Function> function, bool runtime::interpreter::INTBackend::call(shared_ptr<Function> function,
const vector<shared_ptr<runtime::TensorView>>& outputs, const vector<shared_ptr<runtime::Tensor>>& outputs,
const vector<shared_ptr<runtime::TensorView>>& inputs) const vector<shared_ptr<runtime::Tensor>>& inputs)
{ {
validate_call(function, outputs, inputs); validate_call(function, outputs, inputs);
compile(function); compile(function);
FunctionInstance& instance = m_function_map[function]; FunctionInstance& instance = m_function_map[function];
// convert inputs to HostTensorView // convert inputs to HostTensor
vector<shared_ptr<runtime::HostTensorView>> func_inputs; vector<shared_ptr<runtime::HostTensor>> func_inputs;
for (auto tv : inputs) for (auto tv : inputs)
{ {
func_inputs.push_back(static_pointer_cast<runtime::HostTensorView>(tv)); func_inputs.push_back(static_pointer_cast<runtime::HostTensor>(tv));
} }
if (instance.m_nan_check_enabled) if (instance.m_nan_check_enabled)
{ {
perform_nan_check(func_inputs); perform_nan_check(func_inputs);
} }
// convert outputs to HostTensorView // convert outputs to HostTensor
vector<shared_ptr<runtime::HostTensorView>> func_outputs; vector<shared_ptr<runtime::HostTensor>> func_outputs;
for (auto tv : outputs) for (auto tv : outputs)
{ {
func_outputs.push_back(static_pointer_cast<runtime::HostTensorView>(tv)); func_outputs.push_back(static_pointer_cast<runtime::HostTensor>(tv));
} }
// map function params -> HostTensorView // map function params -> HostTensor
unordered_map<descriptor::TensorView*, shared_ptr<runtime::HostTensorView>> tensor_map; unordered_map<descriptor::Tensor*, shared_ptr<runtime::HostTensor>> tensor_map;
size_t input_count = 0; size_t input_count = 0;
for (auto param : function->get_parameters()) for (auto param : function->get_parameters())
{ {
...@@ -118,7 +118,7 @@ bool runtime::interpreter::INTBackend::call(shared_ptr<Function> function, ...@@ -118,7 +118,7 @@ bool runtime::interpreter::INTBackend::call(shared_ptr<Function> function,
} }
} }
// map function outputs -> HostTensorView // map function outputs -> HostTensor
for (size_t output_count = 0; output_count < function->get_output_size(); ++output_count) for (size_t output_count = 0; output_count < function->get_output_size(); ++output_count)
{ {
auto output = function->get_output_op(output_count); auto output = function->get_output_op(output_count);
...@@ -126,7 +126,7 @@ bool runtime::interpreter::INTBackend::call(shared_ptr<Function> function, ...@@ -126,7 +126,7 @@ bool runtime::interpreter::INTBackend::call(shared_ptr<Function> function,
{ {
throw ngraph_error("One of function's outputs isn't op::Result"); throw ngraph_error("One of function's outputs isn't op::Result");
} }
descriptor::TensorView* tv = output->get_output_tensor_ptr(0).get(); descriptor::Tensor* tv = output->get_output_tensor_ptr(0).get();
tensor_map.insert({tv, func_outputs[output_count]}); tensor_map.insert({tv, func_outputs[output_count]});
} }
...@@ -140,19 +140,19 @@ bool runtime::interpreter::INTBackend::call(shared_ptr<Function> function, ...@@ -140,19 +140,19 @@ bool runtime::interpreter::INTBackend::call(shared_ptr<Function> function,
continue; continue;
} }
// get op inputs from map // get op inputs from map
vector<shared_ptr<runtime::HostTensorView>> op_inputs; vector<shared_ptr<runtime::HostTensor>> op_inputs;
for (const descriptor::Input& input : op->get_inputs()) for (const descriptor::Input& input : op->get_inputs())
{ {
descriptor::TensorView* tv = input.get_output().get_tensor_ptr().get(); descriptor::Tensor* tv = input.get_output().get_tensor_ptr().get();
op_inputs.push_back(tensor_map.at(tv)); op_inputs.push_back(tensor_map.at(tv));
} }
// get op outputs from map or create // get op outputs from map or create
vector<shared_ptr<runtime::HostTensorView>> op_outputs; vector<shared_ptr<runtime::HostTensor>> op_outputs;
for (size_t i = 0; i < op->get_output_size(); ++i) for (size_t i = 0; i < op->get_output_size(); ++i)
{ {
descriptor::TensorView* tv = op->get_output_tensor_ptr(i).get(); descriptor::Tensor* tv = op->get_output_tensor_ptr(i).get();
shared_ptr<runtime::HostTensorView> htv; shared_ptr<runtime::HostTensor> htv;
auto it = tensor_map.find(tv); auto it = tensor_map.find(tv);
if (it == tensor_map.end()) if (it == tensor_map.end())
{ {
...@@ -160,7 +160,7 @@ bool runtime::interpreter::INTBackend::call(shared_ptr<Function> function, ...@@ -160,7 +160,7 @@ bool runtime::interpreter::INTBackend::call(shared_ptr<Function> function,
const Shape& shape = op->get_output_shape(i); const Shape& shape = op->get_output_shape(i);
const element::Type& type = op->get_output_element_type(i); const element::Type& type = op->get_output_element_type(i);
string name = op->get_output_tensor(i).get_name(); string name = op->get_output_tensor(i).get_name();
htv = make_shared<runtime::HostTensorView>(type, shape, name); htv = make_shared<runtime::HostTensor>(type, shape, name);
tensor_map.insert({tv, htv}); tensor_map.insert({tv, htv});
} }
else else
...@@ -222,11 +222,10 @@ bool runtime::interpreter::INTBackend::call(shared_ptr<Function> function, ...@@ -222,11 +222,10 @@ bool runtime::interpreter::INTBackend::call(shared_ptr<Function> function,
return true; return true;
} }
void runtime::interpreter::INTBackend::generate_calls( void runtime::interpreter::INTBackend::generate_calls(const element::Type& type,
const element::Type& type, const NodeWrapper& op,
const NodeWrapper& op, const vector<shared_ptr<HostTensor>>& outputs,
const vector<shared_ptr<HostTensorView>>& outputs, const vector<shared_ptr<HostTensor>>& inputs)
const vector<shared_ptr<HostTensorView>>& inputs)
{ {
if (type == element::boolean) if (type == element::boolean)
{ {
...@@ -307,11 +306,11 @@ vector<runtime::PerformanceCounter> ...@@ -307,11 +306,11 @@ vector<runtime::PerformanceCounter>
return rc; return rc;
} }
void runtime::interpreter::INTBackend::perform_nan_check( void runtime::interpreter::INTBackend::perform_nan_check(const vector<shared_ptr<HostTensor>>& tvs,
const vector<shared_ptr<HostTensorView>>& tvs, const Node* op) const Node* op)
{ {
size_t arg_number = 1; size_t arg_number = 1;
for (shared_ptr<HostTensorView> tv : tvs) for (shared_ptr<HostTensor> tv : tvs)
{ {
const element::Type& type = tv->get_element_type(); const element::Type& type = tv->get_element_type();
if (type == element::f32) if (type == element::f32)
......
...@@ -54,7 +54,7 @@ ...@@ -54,7 +54,7 @@
#include "ngraph/op/sum.hpp" #include "ngraph/op/sum.hpp"
#include "ngraph/op/topk.hpp" #include "ngraph/op/topk.hpp"
#include "ngraph/runtime/backend.hpp" #include "ngraph/runtime/backend.hpp"
#include "ngraph/runtime/host_tensor_view.hpp" #include "ngraph/runtime/host_tensor.hpp"
#include "ngraph/runtime/interpreter/node_wrapper.hpp" #include "ngraph/runtime/interpreter/node_wrapper.hpp"
#include "ngraph/runtime/reference/abs.hpp" #include "ngraph/runtime/reference/abs.hpp"
#include "ngraph/runtime/reference/acos.hpp" #include "ngraph/runtime/reference/acos.hpp"
...@@ -124,7 +124,7 @@ ...@@ -124,7 +124,7 @@
#include "ngraph/runtime/reference/tan.hpp" #include "ngraph/runtime/reference/tan.hpp"
#include "ngraph/runtime/reference/tanh.hpp" #include "ngraph/runtime/reference/tanh.hpp"
#include "ngraph/runtime/reference/topk.hpp" #include "ngraph/runtime/reference/topk.hpp"
#include "ngraph/runtime/tensor_view.hpp" #include "ngraph/runtime/tensor.hpp"
#ifdef NGRAPH_DISTRIBUTED #ifdef NGRAPH_DISTRIBUTED
#include "ngraph/runtime/reference/allreduce.hpp" #include "ngraph/runtime/reference/allreduce.hpp"
...@@ -144,17 +144,16 @@ namespace ngraph ...@@ -144,17 +144,16 @@ namespace ngraph
class ngraph::runtime::interpreter::INTBackend : public Backend class ngraph::runtime::interpreter::INTBackend : public Backend
{ {
public: public:
std::shared_ptr<TensorView> std::shared_ptr<Tensor>
create_tensor(const element::Type& type, const Shape& shape, void* memory_pointer) override; create_tensor(const element::Type& type, const Shape& shape, void* memory_pointer) override;
std::shared_ptr<TensorView> create_tensor(const element::Type& type, std::shared_ptr<Tensor> create_tensor(const element::Type& type, const Shape& shape) override;
const Shape& shape) override;
bool compile(std::shared_ptr<Function> function) override; bool compile(std::shared_ptr<Function> function) override;
bool call(std::shared_ptr<Function> function, bool call(std::shared_ptr<Function> function,
const std::vector<std::shared_ptr<TensorView>>& outputs, const std::vector<std::shared_ptr<Tensor>>& outputs,
const std::vector<std::shared_ptr<TensorView>>& intputs) override; const std::vector<std::shared_ptr<Tensor>>& intputs) override;
void set_nan_check(std::shared_ptr<Function> func, bool); void set_nan_check(std::shared_ptr<Function> func, bool);
...@@ -174,18 +173,18 @@ private: ...@@ -174,18 +173,18 @@ private:
}; };
std::map<std::shared_ptr<Function>, FunctionInstance> m_function_map; std::map<std::shared_ptr<Function>, FunctionInstance> m_function_map;
static void perform_nan_check(const std::vector<std::shared_ptr<HostTensorView>>&, static void perform_nan_check(const std::vector<std::shared_ptr<HostTensor>>&,
const Node* op = nullptr); const Node* op = nullptr);
void generate_calls(const element::Type& type, void generate_calls(const element::Type& type,
const NodeWrapper& op, const NodeWrapper& op,
const std::vector<std::shared_ptr<HostTensorView>>& outputs, const std::vector<std::shared_ptr<HostTensor>>& outputs,
const std::vector<std::shared_ptr<HostTensorView>>& inputs); const std::vector<std::shared_ptr<HostTensor>>& inputs);
template <typename T> template <typename T>
void op_engine(const NodeWrapper& node_wrapper, void op_engine(const NodeWrapper& node_wrapper,
const std::vector<std::shared_ptr<HostTensorView>>& out, const std::vector<std::shared_ptr<HostTensor>>& out,
const std::vector<std::shared_ptr<HostTensorView>>& args) const std::vector<std::shared_ptr<HostTensor>>& args)
{ {
const Node& node = node_wrapper.get_node(); const Node& node = node_wrapper.get_node();
std::string node_op = node.description(); std::string node_op = node.description();
...@@ -405,7 +404,7 @@ private: ...@@ -405,7 +404,7 @@ private:
const op::Concat* concat = static_cast<const op::Concat*>(&node); const op::Concat* concat = static_cast<const op::Concat*>(&node);
std::vector<const T*> in_args; std::vector<const T*> in_args;
std::vector<Shape> in_shapes; std::vector<Shape> in_shapes;
for (std::shared_ptr<HostTensorView> arg : args) for (std::shared_ptr<HostTensor> arg : args)
{ {
in_args.push_back(arg->get_data_ptr<T>()); in_args.push_back(arg->get_data_ptr<T>());
in_shapes.push_back(arg->get_shape()); in_shapes.push_back(arg->get_shape());
...@@ -665,16 +664,16 @@ private: ...@@ -665,16 +664,16 @@ private:
{ {
std::shared_ptr<Function> function = node.get_functions()[0]; std::shared_ptr<Function> function = node.get_functions()[0];
std::vector<std::shared_ptr<runtime::TensorView>> outputs; std::vector<std::shared_ptr<runtime::Tensor>> outputs;
for (auto tv : out) for (auto tv : out)
{ {
outputs.push_back(std::static_pointer_cast<runtime::TensorView>(tv)); outputs.push_back(std::static_pointer_cast<runtime::Tensor>(tv));
} }
std::vector<std::shared_ptr<runtime::TensorView>> inputs; std::vector<std::shared_ptr<runtime::Tensor>> inputs;
for (auto tv : args) for (auto tv : args)
{ {
inputs.push_back(std::static_pointer_cast<runtime::TensorView>(tv)); inputs.push_back(std::static_pointer_cast<runtime::Tensor>(tv));
} }
call(function, outputs, inputs); call(function, outputs, inputs);
...@@ -915,11 +914,11 @@ private: ...@@ -915,11 +914,11 @@ private:
std::shared_ptr<Function> reduction_function = reduce->get_functions()[0]; std::shared_ptr<Function> reduction_function = reduce->get_functions()[0];
std::function<T(T, T)> f = [this, &node, reduction_function](T x, T y) -> T { std::function<T(T, T)> f = [this, &node, reduction_function](T x, T y) -> T {
auto tx = std::make_shared<HostTensorView>( auto tx = std::make_shared<HostTensor>(
node.get_inputs().at(0).get_element_type(), Shape{}, "reduce_temp_x"); node.get_inputs().at(0).get_element_type(), Shape{}, "reduce_temp_x");
auto ty = std::make_shared<HostTensorView>( auto ty = std::make_shared<HostTensor>(
node.get_inputs().at(1).get_element_type(), Shape{}, "reduce_temp_y"); node.get_inputs().at(1).get_element_type(), Shape{}, "reduce_temp_y");
auto tr = std::make_shared<HostTensorView>( auto tr = std::make_shared<HostTensor>(
node.get_output_element_type(0), Shape{}, "reduce_temp_r"); node.get_output_element_type(0), Shape{}, "reduce_temp_r");
*(tx->get_data_ptr<T>()) = x; *(tx->get_data_ptr<T>()) = x;
*(ty->get_data_ptr<T>()) = y; *(ty->get_data_ptr<T>()) = y;
...@@ -942,11 +941,11 @@ private: ...@@ -942,11 +941,11 @@ private:
std::shared_ptr<Function> reduction_function = reduce_window->get_functions()[0]; std::shared_ptr<Function> reduction_function = reduce_window->get_functions()[0];
std::function<T(T, T)> f = [this, &node, reduction_function](T x, T y) -> T { std::function<T(T, T)> f = [this, &node, reduction_function](T x, T y) -> T {
auto tx = std::make_shared<HostTensorView>( auto tx = std::make_shared<HostTensor>(
node.get_inputs().at(0).get_element_type(), Shape{}, "reduce_window_temp_x"); node.get_inputs().at(0).get_element_type(), Shape{}, "reduce_window_temp_x");
auto ty = std::make_shared<HostTensorView>( auto ty = std::make_shared<HostTensor>(
node.get_inputs().at(1).get_element_type(), Shape{}, "reduce_window_temp_y"); node.get_inputs().at(1).get_element_type(), Shape{}, "reduce_window_temp_y");
auto tr = std::make_shared<HostTensorView>( auto tr = std::make_shared<HostTensor>(
node.get_output_element_type(0), Shape{}, "reduce_window_temp_r"); node.get_output_element_type(0), Shape{}, "reduce_window_temp_r");
*(tx->get_data_ptr<T>()) = x; *(tx->get_data_ptr<T>()) = x;
*(ty->get_data_ptr<T>()) = y; *(ty->get_data_ptr<T>()) = y;
...@@ -1056,11 +1055,11 @@ private: ...@@ -1056,11 +1055,11 @@ private:
select_and_scatter->get_functions()[0]; select_and_scatter->get_functions()[0];
std::function<bool(T, T)> f_selection = [this, &node, selection_function](T x, std::function<bool(T, T)> f_selection = [this, &node, selection_function](T x,
T y) -> bool { T y) -> bool {
auto tx = std::make_shared<runtime::HostTensorView>( auto tx = std::make_shared<runtime::HostTensor>(
node.get_inputs().at(0).get_element_type(), Shape{}, "selection_temp_x"); node.get_inputs().at(0).get_element_type(), Shape{}, "selection_temp_x");
auto ty = std::make_shared<runtime::HostTensorView>( auto ty = std::make_shared<runtime::HostTensor>(
node.get_inputs().at(1).get_element_type(), Shape{}, "selection_temp_y"); node.get_inputs().at(1).get_element_type(), Shape{}, "selection_temp_y");
auto tr = std::make_shared<runtime::HostTensorView>( auto tr = std::make_shared<runtime::HostTensor>(
element::boolean, Shape{}, "selection_temp_r"); element::boolean, Shape{}, "selection_temp_r");
*(tx->get_data_ptr<T>()) = x; *(tx->get_data_ptr<T>()) = x;
*(ty->get_data_ptr<T>()) = y; *(ty->get_data_ptr<T>()) = y;
...@@ -1071,11 +1070,11 @@ private: ...@@ -1071,11 +1070,11 @@ private:
std::shared_ptr<ngraph::Function> scatter_function = std::shared_ptr<ngraph::Function> scatter_function =
select_and_scatter->get_functions()[1]; select_and_scatter->get_functions()[1];
std::function<T(T, T)> f_scatter = [this, &node, scatter_function](T x, T y) -> T { std::function<T(T, T)> f_scatter = [this, &node, scatter_function](T x, T y) -> T {
auto tx = std::make_shared<runtime::HostTensorView>( auto tx = std::make_shared<runtime::HostTensor>(
node.get_inputs().at(0).get_element_type(), Shape{}, "scatter_temp_x"); node.get_inputs().at(0).get_element_type(), Shape{}, "scatter_temp_x");
auto ty = std::make_shared<runtime::HostTensorView>( auto ty = std::make_shared<runtime::HostTensor>(
node.get_inputs().at(1).get_element_type(), Shape{}, "scatter_temp_y"); node.get_inputs().at(1).get_element_type(), Shape{}, "scatter_temp_y");
auto tr = std::make_shared<runtime::HostTensorView>( auto tr = std::make_shared<runtime::HostTensor>(
node.get_output_element_type(0), Shape{}, "scatter_temp_r"); node.get_output_element_type(0), Shape{}, "scatter_temp_r");
*(tx->get_data_ptr<T>()) = x; *(tx->get_data_ptr<T>()) = x;
*(ty->get_data_ptr<T>()) = y; *(ty->get_data_ptr<T>()) = y;
......
...@@ -14,60 +14,59 @@ ...@@ -14,60 +14,59 @@
// limitations under the License. // limitations under the License.
//***************************************************************************** //*****************************************************************************
#include "ngraph/runtime/tensor_view.hpp" #include "ngraph/runtime/tensor.hpp"
#include "ngraph/descriptor/layout/tensor_layout.hpp" #include "ngraph/descriptor/layout/tensor_layout.hpp"
#include "ngraph/type/element_type.hpp" #include "ngraph/type/element_type.hpp"
using namespace ngraph; using namespace ngraph;
using namespace std; using namespace std;
const Shape& runtime::TensorView::get_shape() const const Shape& runtime::Tensor::get_shape() const
{ {
return m_descriptor->get_shape(); return m_descriptor->get_shape();
} }
Strides runtime::TensorView::get_strides() const Strides runtime::Tensor::get_strides() const
{ {
return m_descriptor->get_tensor_layout()->get_strides(); return m_descriptor->get_tensor_layout()->get_strides();
} }
const element::Type& runtime::TensorView::get_element_type() const const element::Type& runtime::Tensor::get_element_type() const
{ {
return m_descriptor->get_element_type(); return m_descriptor->get_element_type();
} }
shared_ptr<descriptor::layout::TensorLayout> runtime::TensorView::get_tensor_layout() const shared_ptr<descriptor::layout::TensorLayout> runtime::Tensor::get_tensor_layout() const
{ {
return m_descriptor->get_tensor_layout(); return m_descriptor->get_tensor_layout();
} }
void runtime::TensorView::set_tensor_layout( void runtime::Tensor::set_tensor_layout(const shared_ptr<descriptor::layout::TensorLayout>& layout)
const shared_ptr<descriptor::layout::TensorLayout>& layout)
{ {
m_descriptor->set_tensor_layout(layout); m_descriptor->set_tensor_layout(layout);
} }
size_t runtime::TensorView::get_element_count() const size_t runtime::Tensor::get_element_count() const
{ {
return get_tensor_layout()->get_size(); return get_tensor_layout()->get_size();
} }
size_t runtime::TensorView::get_size_in_bytes() const size_t runtime::Tensor::get_size_in_bytes() const
{ {
return get_tensor_layout()->get_size() * get_element_type().size(); return get_tensor_layout()->get_size() * get_element_type().size();
} }
const std::string& runtime::TensorView::get_name() const const std::string& runtime::Tensor::get_name() const
{ {
return m_descriptor->get_name(); return m_descriptor->get_name();
} }
bool runtime::TensorView::get_stale() const bool runtime::Tensor::get_stale() const
{ {
return m_stale; return m_stale;
} }
void runtime::TensorView::set_stale(bool val) void runtime::Tensor::set_stale(bool val)
{ {
m_stale = val; m_stale = val;
} }
...@@ -33,18 +33,18 @@ namespace ngraph ...@@ -33,18 +33,18 @@ namespace ngraph
namespace runtime namespace runtime
{ {
class TensorView class Tensor
{ {
protected: protected:
TensorView(const std::shared_ptr<ngraph::descriptor::Tensor>& descriptor) Tensor(const std::shared_ptr<ngraph::descriptor::Tensor>& descriptor)
: m_descriptor(descriptor) : m_descriptor(descriptor)
, m_stale(true) , m_stale(true)
{ {
} }
public: public:
virtual ~TensorView() {} virtual ~Tensor() {}
TensorView& operator=(const TensorView&) = default; Tensor& operator=(const Tensor&) = default;
/// \brief Get tensor shape /// \brief Get tensor shape
/// \return const reference to a Shape /// \return const reference to a Shape
...@@ -104,6 +104,6 @@ namespace ngraph ...@@ -104,6 +104,6 @@ namespace ngraph
bool m_stale; bool m_stale;
}; };
using TensorViewPtrs = std::vector<std::shared_ptr<TensorView>>; using TensorViewPtrs = std::vector<std::shared_ptr<Tensor>>;
} }
} }
...@@ -19,9 +19,8 @@ ...@@ -19,9 +19,8 @@
#include "benchmark.hpp" #include "benchmark.hpp"
#include "ngraph/file_util.hpp" #include "ngraph/file_util.hpp"
#include "ngraph/runtime/backend.hpp" #include "ngraph/runtime/backend.hpp"
#include "ngraph/runtime/host_tensor_view.hpp" #include "ngraph/runtime/host_tensor.hpp"
#include "ngraph/runtime/tensor_view.hpp" #include "ngraph/runtime/tensor.hpp"
#include "ngraph/runtime/tensor_view.hpp"
#include "ngraph/serializer.hpp" #include "ngraph/serializer.hpp"
#include "ngraph/util.hpp" #include "ngraph/util.hpp"
...@@ -31,7 +30,7 @@ using namespace ngraph; ...@@ -31,7 +30,7 @@ using namespace ngraph;
static default_random_engine s_random_engine; static default_random_engine s_random_engine;
template <typename T> template <typename T>
void init_int_tv(shared_ptr<runtime::TensorView> tv, T min, T max) void init_int_tv(shared_ptr<runtime::Tensor> tv, T min, T max)
{ {
size_t size = tv->get_element_count(); size_t size = tv->get_element_count();
uniform_int_distribution<T> dist(min, max); uniform_int_distribution<T> dist(min, max);
...@@ -44,7 +43,7 @@ void init_int_tv(shared_ptr<runtime::TensorView> tv, T min, T max) ...@@ -44,7 +43,7 @@ void init_int_tv(shared_ptr<runtime::TensorView> tv, T min, T max)
} }
template <> template <>
void init_int_tv<char>(shared_ptr<runtime::TensorView> tv, char min, char max) void init_int_tv<char>(shared_ptr<runtime::Tensor> tv, char min, char max)
{ {
size_t size = tv->get_element_count(); size_t size = tv->get_element_count();
uniform_int_distribution<int16_t> dist(static_cast<short>(min), static_cast<short>(max)); uniform_int_distribution<int16_t> dist(static_cast<short>(min), static_cast<short>(max));
...@@ -57,7 +56,7 @@ void init_int_tv<char>(shared_ptr<runtime::TensorView> tv, char min, char max) ...@@ -57,7 +56,7 @@ void init_int_tv<char>(shared_ptr<runtime::TensorView> tv, char min, char max)
} }
template <> template <>
void init_int_tv<int8_t>(shared_ptr<runtime::TensorView> tv, int8_t min, int8_t max) void init_int_tv<int8_t>(shared_ptr<runtime::Tensor> tv, int8_t min, int8_t max)
{ {
size_t size = tv->get_element_count(); size_t size = tv->get_element_count();
uniform_int_distribution<int16_t> dist(static_cast<short>(min), static_cast<short>(max)); uniform_int_distribution<int16_t> dist(static_cast<short>(min), static_cast<short>(max));
...@@ -70,7 +69,7 @@ void init_int_tv<int8_t>(shared_ptr<runtime::TensorView> tv, int8_t min, int8_t ...@@ -70,7 +69,7 @@ void init_int_tv<int8_t>(shared_ptr<runtime::TensorView> tv, int8_t min, int8_t
} }
template <> template <>
void init_int_tv<uint8_t>(shared_ptr<runtime::TensorView> tv, uint8_t min, uint8_t max) void init_int_tv<uint8_t>(shared_ptr<runtime::Tensor> tv, uint8_t min, uint8_t max)
{ {
size_t size = tv->get_element_count(); size_t size = tv->get_element_count();
uniform_int_distribution<int16_t> dist(static_cast<short>(min), static_cast<short>(max)); uniform_int_distribution<int16_t> dist(static_cast<short>(min), static_cast<short>(max));
...@@ -83,7 +82,7 @@ void init_int_tv<uint8_t>(shared_ptr<runtime::TensorView> tv, uint8_t min, uint8 ...@@ -83,7 +82,7 @@ void init_int_tv<uint8_t>(shared_ptr<runtime::TensorView> tv, uint8_t min, uint8
} }
template <typename T> template <typename T>
void init_real_tv(shared_ptr<runtime::TensorView> tv, T min, T max) void init_real_tv(shared_ptr<runtime::Tensor> tv, T min, T max)
{ {
size_t size = tv->get_element_count(); size_t size = tv->get_element_count();
uniform_real_distribution<T> dist(min, max); uniform_real_distribution<T> dist(min, max);
...@@ -95,7 +94,7 @@ void init_real_tv(shared_ptr<runtime::TensorView> tv, T min, T max) ...@@ -95,7 +94,7 @@ void init_real_tv(shared_ptr<runtime::TensorView> tv, T min, T max)
tv->write(vec.data(), 0, vec.size() * sizeof(T)); tv->write(vec.data(), 0, vec.size() * sizeof(T));
} }
static void random_init(shared_ptr<runtime::TensorView> tv) static void random_init(shared_ptr<runtime::Tensor> tv)
{ {
element::Type et = tv->get_element_type(); element::Type et = tv->get_element_type();
if (et == element::boolean) if (et == element::boolean)
...@@ -164,26 +163,26 @@ vector<runtime::PerformanceCounter> run_benchmark(shared_ptr<Function> f, ...@@ -164,26 +163,26 @@ vector<runtime::PerformanceCounter> run_benchmark(shared_ptr<Function> f,
cout.imbue(locale("")); cout.imbue(locale(""));
cout << "compile time: " << timer.get_milliseconds() << "ms" << endl; cout << "compile time: " << timer.get_milliseconds() << "ms" << endl;
vector<shared_ptr<runtime::HostTensorView>> arg_data; vector<shared_ptr<runtime::HostTensor>> arg_data;
vector<shared_ptr<runtime::TensorView>> args; vector<shared_ptr<runtime::Tensor>> args;
vector<bool> args_cacheable; vector<bool> args_cacheable;
for (shared_ptr<op::Parameter> param : f->get_parameters()) for (shared_ptr<op::Parameter> param : f->get_parameters())
{ {
auto tensor = backend->create_tensor(param->get_element_type(), param->get_shape()); auto tensor = backend->create_tensor(param->get_element_type(), param->get_shape());
auto tensor_data = auto tensor_data =
make_shared<runtime::HostTensorView>(param->get_element_type(), param->get_shape()); make_shared<runtime::HostTensor>(param->get_element_type(), param->get_shape());
random_init(tensor); random_init(tensor);
args.push_back(tensor); args.push_back(tensor);
arg_data.push_back(tensor_data); arg_data.push_back(tensor_data);
args_cacheable.push_back(param->get_cacheable()); args_cacheable.push_back(param->get_cacheable());
} }
vector<shared_ptr<runtime::HostTensorView>> result_data; vector<shared_ptr<runtime::HostTensor>> result_data;
vector<shared_ptr<runtime::TensorView>> results; vector<shared_ptr<runtime::Tensor>> results;
for (shared_ptr<Node> out : f->get_results()) for (shared_ptr<Node> out : f->get_results())
{ {
auto result = backend->create_tensor(out->get_element_type(), out->get_shape()); auto result = backend->create_tensor(out->get_element_type(), out->get_shape());
auto tensor_data = auto tensor_data =
make_shared<runtime::HostTensorView>(out->get_element_type(), out->get_shape()); make_shared<runtime::HostTensor>(out->get_element_type(), out->get_shape());
results.push_back(result); results.push_back(result);
result_data.push_back(tensor_data); result_data.push_back(tensor_data);
} }
...@@ -212,10 +211,10 @@ vector<runtime::PerformanceCounter> run_benchmark(shared_ptr<Function> f, ...@@ -212,10 +211,10 @@ vector<runtime::PerformanceCounter> run_benchmark(shared_ptr<Function> f,
{ {
for (size_t arg_index = 0; arg_index < args.size(); arg_index++) for (size_t arg_index = 0; arg_index < args.size(); arg_index++)
{ {
const shared_ptr<runtime::TensorView>& arg = args[arg_index]; const shared_ptr<runtime::Tensor>& arg = args[arg_index];
if (arg->get_stale()) if (arg->get_stale())
{ {
const shared_ptr<runtime::HostTensorView>& data = arg_data[arg_index]; const shared_ptr<runtime::HostTensor>& data = arg_data[arg_index];
arg->write(data->get_data_ptr(), arg->write(data->get_data_ptr(),
0, 0,
data->get_element_count() * data->get_element_type().size()); data->get_element_count() * data->get_element_type().size());
...@@ -227,8 +226,8 @@ vector<runtime::PerformanceCounter> run_benchmark(shared_ptr<Function> f, ...@@ -227,8 +226,8 @@ vector<runtime::PerformanceCounter> run_benchmark(shared_ptr<Function> f,
{ {
for (size_t result_index = 0; result_index < results.size(); result_index++) for (size_t result_index = 0; result_index < results.size(); result_index++)
{ {
const shared_ptr<runtime::HostTensorView>& data = result_data[result_index]; const shared_ptr<runtime::HostTensor>& data = result_data[result_index];
const shared_ptr<runtime::TensorView>& result = results[result_index]; const shared_ptr<runtime::Tensor>& result = results[result_index];
result->read(data->get_data_ptr(), result->read(data->get_data_ptr(),
0, 0,
data->get_element_count() * data->get_element_type().size()); data->get_element_count() * data->get_element_type().size());
......
This diff is collapsed.
...@@ -68,7 +68,7 @@ TEST(benchmark, concat_32x1x200_axis1_6) ...@@ -68,7 +68,7 @@ TEST(benchmark, concat_32x1x200_axis1_6)
vector<std::string> backend_names{"INTERPRETER", "CPU"}; vector<std::string> backend_names{"INTERPRETER", "CPU"};
vector<int> n_runs{200, 200, using_ref_kernels ? 200 : 200000}; // one for each backend vector<int> n_runs{200, 200, using_ref_kernels ? 200 : 200000}; // one for each backend
vector<std::function<void()>> test_callbacks; // one for each backend vector<std::function<void()>> test_callbacks; // one for each backend
vector<std::shared_ptr<runtime::TensorView>> result_tvs; // one for each backend vector<std::shared_ptr<runtime::Tensor>> result_tvs; // one for each backend
for (std::string backend_name : backend_names) for (std::string backend_name : backend_names)
{ {
...@@ -86,7 +86,7 @@ TEST(benchmark, concat_32x1x200_axis1_6) ...@@ -86,7 +86,7 @@ TEST(benchmark, concat_32x1x200_axis1_6)
auto backend = runtime::Backend::create(backend_name); auto backend = runtime::Backend::create(backend_name);
vector<shared_ptr<runtime::TensorView>> input_vals; vector<shared_ptr<runtime::Tensor>> input_vals;
for (size_t i = 0; i < n_arrays; i++) for (size_t i = 0; i < n_arrays; i++)
{ {
......
This diff is collapsed.
...@@ -22,7 +22,7 @@ ...@@ -22,7 +22,7 @@
using namespace ngraph; using namespace ngraph;
using namespace std; using namespace std;
shared_ptr<runtime::TensorView> shared_ptr<runtime::Tensor>
make_reduce_result(function<shared_ptr<Node>(const shared_ptr<Node>&, const AxisSet&)> func) make_reduce_result(function<shared_ptr<Node>(const shared_ptr<Node>&, const AxisSet&)> func)
{ {
Shape shape_a{3, 2}; Shape shape_a{3, 2};
...@@ -39,7 +39,7 @@ shared_ptr<runtime::TensorView> ...@@ -39,7 +39,7 @@ shared_ptr<runtime::TensorView>
return result; return result;
} }
shared_ptr<runtime::TensorView> make_reduce_result_true( shared_ptr<runtime::Tensor> make_reduce_result_true(
function<shared_ptr<Node>(const shared_ptr<Node>&, const AxisSet&, bool)> func) function<shared_ptr<Node>(const shared_ptr<Node>&, const AxisSet&, bool)> func)
{ {
Shape shape_a{3, 2}; Shape shape_a{3, 2};
...@@ -56,7 +56,7 @@ shared_ptr<runtime::TensorView> make_reduce_result_true( ...@@ -56,7 +56,7 @@ shared_ptr<runtime::TensorView> make_reduce_result_true(
return result; return result;
} }
shared_ptr<runtime::TensorView> make_reduce_result_false( shared_ptr<runtime::Tensor> make_reduce_result_false(
function<shared_ptr<Node>(const shared_ptr<Node>&, const AxisSet&, bool)> func) function<shared_ptr<Node>(const shared_ptr<Node>&, const AxisSet&, bool)> func)
{ {
Shape shape_a{3, 2}; Shape shape_a{3, 2};
......
This diff is collapsed.
...@@ -110,10 +110,10 @@ TEST(cpu_test, abc_tbb) ...@@ -110,10 +110,10 @@ TEST(cpu_test, abc_tbb)
auto backend = runtime::Backend::create("CPU"); auto backend = runtime::Backend::create("CPU");
// Create some tensors for input/output // Create some tensors for input/output
shared_ptr<runtime::TensorView> a = backend->create_tensor(element::f32, shape); shared_ptr<runtime::Tensor> a = backend->create_tensor(element::f32, shape);
shared_ptr<runtime::TensorView> b = backend->create_tensor(element::f32, shape); shared_ptr<runtime::Tensor> b = backend->create_tensor(element::f32, shape);
shared_ptr<runtime::TensorView> c = backend->create_tensor(element::f32, shape); shared_ptr<runtime::Tensor> c = backend->create_tensor(element::f32, shape);
shared_ptr<runtime::TensorView> result = backend->create_tensor(element::f32, shape); shared_ptr<runtime::Tensor> result = backend->create_tensor(element::f32, shape);
copy_data(a, test::NDArray<float, 2>({{1, 2}, {3, 4}}).get_vector()); copy_data(a, test::NDArray<float, 2>({{1, 2}, {3, 4}}).get_vector());
copy_data(b, test::NDArray<float, 2>({{5, 6}, {7, 8}}).get_vector()); copy_data(b, test::NDArray<float, 2>({{5, 6}, {7, 8}}).get_vector());
......
...@@ -27,7 +27,7 @@ ...@@ -27,7 +27,7 @@
#include "ngraph/ngraph.hpp" #include "ngraph/ngraph.hpp"
#include "ngraph/pass/assign_placement.hpp" #include "ngraph/pass/assign_placement.hpp"
#include "ngraph/pass/manager.hpp" #include "ngraph/pass/manager.hpp"
#include "ngraph/runtime/host_tensor_view.hpp" #include "ngraph/runtime/host_tensor.hpp"
#include "ngraph/util.hpp" #include "ngraph/util.hpp"
#include "util/ndarray.hpp" #include "util/ndarray.hpp"
#include "util/test_tools.hpp" #include "util/test_tools.hpp"
...@@ -65,8 +65,7 @@ public: ...@@ -65,8 +65,7 @@ public:
} }
~HybridBackend() {} ~HybridBackend() {}
shared_ptr<runtime::TensorView> create_tensor(const element::Type& element_type, shared_ptr<runtime::Tensor> create_tensor(const element::Type& element_type, const Shape& shape)
const Shape& shape)
{ {
return get_cached_backend(Placement::INTERPRETER)->create_tensor(element_type, shape); return get_cached_backend(Placement::INTERPRETER)->create_tensor(element_type, shape);
} }
...@@ -101,8 +100,8 @@ public: ...@@ -101,8 +100,8 @@ public:
} }
bool call_with_validate(const shared_ptr<Function>& func, bool call_with_validate(const shared_ptr<Function>& func,
const vector<shared_ptr<runtime::TensorView>>& outputs, const vector<shared_ptr<runtime::Tensor>>& outputs,
const vector<shared_ptr<runtime::TensorView>>& inputs) const vector<shared_ptr<runtime::Tensor>>& inputs)
{ {
// Get FunctionInstance // Get FunctionInstance
bool rc = true; bool rc = true;
...@@ -118,8 +117,8 @@ public: ...@@ -118,8 +117,8 @@ public:
} }
FunctionInstance& instance = it->second; FunctionInstance& instance = it->second;
// Parameter and result node in sub_function maps to one TensorView // Parameter and result node in sub_function maps to one Tensor
unordered_map<shared_ptr<Node>, shared_ptr<runtime::TensorView>> map_node_to_tensor_view; unordered_map<shared_ptr<Node>, shared_ptr<runtime::Tensor>> map_node_to_tensor_view;
for (size_t i = 0; i < inputs.size(); ++i) for (size_t i = 0; i < inputs.size(); ++i)
{ {
map_node_to_tensor_view[instance.m_function->get_parameters()[i]] = inputs[i]; map_node_to_tensor_view[instance.m_function->get_parameters()[i]] = inputs[i];
...@@ -137,7 +136,7 @@ public: ...@@ -137,7 +136,7 @@ public:
auto backend = get_cached_backend(placement); auto backend = get_cached_backend(placement);
// Prepare parameter TensorViews // Prepare parameter TensorViews
vector<shared_ptr<runtime::TensorView>> parameter_tvs; vector<shared_ptr<runtime::Tensor>> parameter_tvs;
for (auto parameter_node : sub_function->get_parameters()) for (auto parameter_node : sub_function->get_parameters())
{ {
if (map_node_to_tensor_view.find(parameter_node) != map_node_to_tensor_view.end()) if (map_node_to_tensor_view.find(parameter_node) != map_node_to_tensor_view.end())
...@@ -157,7 +156,7 @@ public: ...@@ -157,7 +156,7 @@ public:
} }
// Prepare result TensorViews // Prepare result TensorViews
vector<shared_ptr<runtime::TensorView>> result_tvs; vector<shared_ptr<runtime::Tensor>> result_tvs;
for (auto result_node : sub_function->get_results()) for (auto result_node : sub_function->get_results())
{ {
if (map_node_to_tensor_view.find(result_node) != map_node_to_tensor_view.end()) if (map_node_to_tensor_view.find(result_node) != map_node_to_tensor_view.end())
...@@ -377,10 +376,10 @@ TEST(graph_partition, hybrid_abc) ...@@ -377,10 +376,10 @@ TEST(graph_partition, hybrid_abc)
auto f = make_shared<Function>(ResultVector{R}, op::ParameterVector{A, B, C}); auto f = make_shared<Function>(ResultVector{R}, op::ParameterVector{A, B, C});
auto backend = make_shared<HybridBackend>(int_with_cpu_mul_policy); auto backend = make_shared<HybridBackend>(int_with_cpu_mul_policy);
shared_ptr<runtime::TensorView> a = backend->create_tensor(element::f32, shape); shared_ptr<runtime::Tensor> a = backend->create_tensor(element::f32, shape);
shared_ptr<runtime::TensorView> b = backend->create_tensor(element::f32, shape); shared_ptr<runtime::Tensor> b = backend->create_tensor(element::f32, shape);
shared_ptr<runtime::TensorView> c = backend->create_tensor(element::f32, shape); shared_ptr<runtime::Tensor> c = backend->create_tensor(element::f32, shape);
shared_ptr<runtime::TensorView> r = backend->create_tensor(element::f32, shape); shared_ptr<runtime::Tensor> r = backend->create_tensor(element::f32, shape);
copy_data(a, test::NDArray<float, 2>({{1, 2}, {3, 4}}).get_vector()); copy_data(a, test::NDArray<float, 2>({{1, 2}, {3, 4}}).get_vector());
copy_data(b, test::NDArray<float, 2>({{5, 6}, {7, 8}}).get_vector()); copy_data(b, test::NDArray<float, 2>({{5, 6}, {7, 8}}).get_vector());
...@@ -414,11 +413,11 @@ TEST(graph_partition, hybrid_abcd) ...@@ -414,11 +413,11 @@ TEST(graph_partition, hybrid_abcd)
auto backend = make_shared<HybridBackend>(int_with_cpu_mul_policy); auto backend = make_shared<HybridBackend>(int_with_cpu_mul_policy);
backend->compile(f); backend->compile(f);
shared_ptr<runtime::TensorView> a = backend->create_tensor(element::f32, shape); shared_ptr<runtime::Tensor> a = backend->create_tensor(element::f32, shape);
shared_ptr<runtime::TensorView> b = backend->create_tensor(element::f32, shape); shared_ptr<runtime::Tensor> b = backend->create_tensor(element::f32, shape);
shared_ptr<runtime::TensorView> c = backend->create_tensor(element::f32, shape); shared_ptr<runtime::Tensor> c = backend->create_tensor(element::f32, shape);
shared_ptr<runtime::TensorView> d = backend->create_tensor(element::f32, shape); shared_ptr<runtime::Tensor> d = backend->create_tensor(element::f32, shape);
shared_ptr<runtime::TensorView> r = backend->create_tensor(element::f32, shape); shared_ptr<runtime::Tensor> r = backend->create_tensor(element::f32, shape);
copy_data(a, test::NDArray<float, 2>({{1, 2}, {3, 4}}).get_vector()); copy_data(a, test::NDArray<float, 2>({{1, 2}, {3, 4}}).get_vector());
copy_data(b, test::NDArray<float, 2>({{5, 6}, {7, 8}}).get_vector()); copy_data(b, test::NDArray<float, 2>({{5, 6}, {7, 8}}).get_vector());
...@@ -450,10 +449,10 @@ TEST(graph_partition, hybrid_back_and_forth) ...@@ -450,10 +449,10 @@ TEST(graph_partition, hybrid_back_and_forth)
auto backend = make_shared<HybridBackend>(int_with_cpu_mul_policy); auto backend = make_shared<HybridBackend>(int_with_cpu_mul_policy);
backend->compile(f); backend->compile(f);
shared_ptr<runtime::TensorView> a = backend->create_tensor(element::f32, shape); shared_ptr<runtime::Tensor> a = backend->create_tensor(element::f32, shape);
shared_ptr<runtime::TensorView> b = backend->create_tensor(element::f32, shape); shared_ptr<runtime::Tensor> b = backend->create_tensor(element::f32, shape);
shared_ptr<runtime::TensorView> c = backend->create_tensor(element::f32, shape); shared_ptr<runtime::Tensor> c = backend->create_tensor(element::f32, shape);
shared_ptr<runtime::TensorView> r = backend->create_tensor(element::f32, shape); shared_ptr<runtime::Tensor> r = backend->create_tensor(element::f32, shape);
copy_data(a, test::NDArray<float, 2>({{1, 2}, {3, 4}}).get_vector()); copy_data(a, test::NDArray<float, 2>({{1, 2}, {3, 4}}).get_vector());
copy_data(b, test::NDArray<float, 2>({{5, 6}, {7, 8}}).get_vector()); copy_data(b, test::NDArray<float, 2>({{5, 6}, {7, 8}}).get_vector());
...@@ -487,10 +486,10 @@ TEST(graph_partition, hybrid_multi_middle_nodes) ...@@ -487,10 +486,10 @@ TEST(graph_partition, hybrid_multi_middle_nodes)
auto backend = make_shared<HybridBackend>(int_with_cpu_mul_policy); auto backend = make_shared<HybridBackend>(int_with_cpu_mul_policy);
backend->compile(f); backend->compile(f);
shared_ptr<runtime::TensorView> a = backend->create_tensor(element::f32, shape); shared_ptr<runtime::Tensor> a = backend->create_tensor(element::f32, shape);
shared_ptr<runtime::TensorView> b = backend->create_tensor(element::f32, shape); shared_ptr<runtime::Tensor> b = backend->create_tensor(element::f32, shape);
shared_ptr<runtime::TensorView> c = backend->create_tensor(element::f32, shape); shared_ptr<runtime::Tensor> c = backend->create_tensor(element::f32, shape);
shared_ptr<runtime::TensorView> r = backend->create_tensor(element::f32, shape); shared_ptr<runtime::Tensor> r = backend->create_tensor(element::f32, shape);
copy_data(a, test::NDArray<float, 2>({{1, 2}, {3, 4}}).get_vector()); copy_data(a, test::NDArray<float, 2>({{1, 2}, {3, 4}}).get_vector());
copy_data(b, test::NDArray<float, 2>({{5, 6}, {7, 8}}).get_vector()); copy_data(b, test::NDArray<float, 2>({{5, 6}, {7, 8}}).get_vector());
...@@ -515,9 +514,9 @@ TEST(graph_partition, hybrid_no_split) ...@@ -515,9 +514,9 @@ TEST(graph_partition, hybrid_no_split)
auto backend = make_shared<HybridBackend>(int_with_cpu_mul_policy); auto backend = make_shared<HybridBackend>(int_with_cpu_mul_policy);
backend->compile(f); backend->compile(f);
shared_ptr<runtime::TensorView> a = backend->create_tensor(element::f32, shape); shared_ptr<runtime::Tensor> a = backend->create_tensor(element::f32, shape);
shared_ptr<runtime::TensorView> b = backend->create_tensor(element::f32, shape); shared_ptr<runtime::Tensor> b = backend->create_tensor(element::f32, shape);
shared_ptr<runtime::TensorView> c = backend->create_tensor(element::f32, shape); shared_ptr<runtime::Tensor> c = backend->create_tensor(element::f32, shape);
copy_data(a, test::NDArray<float, 2>({{1, 2}, {3, 4}}).get_vector()); copy_data(a, test::NDArray<float, 2>({{1, 2}, {3, 4}}).get_vector());
copy_data(b, test::NDArray<float, 2>({{5, 6}, {7, 8}}).get_vector()); copy_data(b, test::NDArray<float, 2>({{5, 6}, {7, 8}}).get_vector());
......
...@@ -184,7 +184,7 @@ TEST(DISABLED_include, complete) ...@@ -184,7 +184,7 @@ TEST(DISABLED_include, complete)
// "ngraph/runtime/gpu/gpu_tensor_view.hpp", // "ngraph/runtime/gpu/gpu_tensor_view.hpp",
// "ngraph/runtime/gpu/gpu_tensor_view_wrapper.hpp", // "ngraph/runtime/gpu/gpu_tensor_view_wrapper.hpp",
// "ngraph/runtime/gpu/gpu_util.hpp", // "ngraph/runtime/gpu/gpu_util.hpp",
"ngraph/runtime/host_tensor_view.hpp", "ngraph/runtime/host_tensor.hpp",
"ngraph/runtime/interpreter/int_backend.hpp", "ngraph/runtime/interpreter/int_backend.hpp",
"ngraph/runtime/interpreter/int_call_frame.hpp", "ngraph/runtime/interpreter/int_call_frame.hpp",
"ngraph/runtime/interpreter/int_external_function.hpp", "ngraph/runtime/interpreter/int_external_function.hpp",
...@@ -246,7 +246,7 @@ TEST(DISABLED_include, complete) ...@@ -246,7 +246,7 @@ TEST(DISABLED_include, complete)
"ngraph/runtime/reference/tan.hpp", "ngraph/runtime/reference/tan.hpp",
"ngraph/runtime/reference/tanh.hpp", "ngraph/runtime/reference/tanh.hpp",
"ngraph/runtime/manager.hpp", "ngraph/runtime/manager.hpp",
"ngraph/runtime/tensor_view.hpp", "ngraph/runtime/tensor.hpp",
"ngraph/serializer.hpp", "ngraph/serializer.hpp",
"ngraph/shape.hpp", "ngraph/shape.hpp",
"ngraph/strides.hpp", "ngraph/strides.hpp",
......
...@@ -59,8 +59,8 @@ namespace ngraph ...@@ -59,8 +59,8 @@ namespace ngraph
/// \param atol Absolute tolerance /// \param atol Absolute tolerance
/// Returns true if shapes match and for all elements, |a_i-b_i| <= atol + rtol*|b_i|. /// Returns true if shapes match and for all elements, |a_i-b_i| <= atol + rtol*|b_i|.
template <typename T> template <typename T>
bool all_close(const std::shared_ptr<ngraph::runtime::TensorView>& a, bool all_close(const std::shared_ptr<ngraph::runtime::Tensor>& a,
const std::shared_ptr<ngraph::runtime::TensorView>& b, const std::shared_ptr<ngraph::runtime::Tensor>& b,
T rtol = 1e-5f, T rtol = 1e-5f,
T atol = 1e-8f) T atol = 1e-8f)
{ {
...@@ -85,8 +85,8 @@ namespace ngraph ...@@ -85,8 +85,8 @@ namespace ngraph
/// \param atol Absolute tolerance /// \param atol Absolute tolerance
/// Returns true if shapes match and for all elements, |a_i-b_i| <= atol + rtol*|b_i|. /// Returns true if shapes match and for all elements, |a_i-b_i| <= atol + rtol*|b_i|.
template <typename T> template <typename T>
bool all_close(const std::vector<std::shared_ptr<ngraph::runtime::TensorView>>& as, bool all_close(const std::vector<std::shared_ptr<ngraph::runtime::Tensor>>& as,
const std::vector<std::shared_ptr<ngraph::runtime::TensorView>>& bs, const std::vector<std::shared_ptr<ngraph::runtime::Tensor>>& bs,
T rtol, T rtol,
T atol) T atol)
{ {
......
...@@ -79,8 +79,8 @@ bool test::all_close_f(const vector<float>& a, ...@@ -79,8 +79,8 @@ bool test::all_close_f(const vector<float>& a,
return rc; return rc;
} }
bool test::all_close_f(const std::shared_ptr<runtime::TensorView>& a, bool test::all_close_f(const std::shared_ptr<runtime::Tensor>& a,
const std::shared_ptr<runtime::TensorView>& b, const std::shared_ptr<runtime::Tensor>& b,
int mantissa_bits, int mantissa_bits,
int tolerance_bits) int tolerance_bits)
{ {
...@@ -98,8 +98,8 @@ bool test::all_close_f(const std::shared_ptr<runtime::TensorView>& a, ...@@ -98,8 +98,8 @@ bool test::all_close_f(const std::shared_ptr<runtime::TensorView>& a,
read_float_vector(a), read_float_vector(b), mantissa_bits, tolerance_bits); read_float_vector(a), read_float_vector(b), mantissa_bits, tolerance_bits);
} }
bool test::all_close_f(const std::vector<std::shared_ptr<runtime::TensorView>>& as, bool test::all_close_f(const std::vector<std::shared_ptr<runtime::Tensor>>& as,
const std::vector<std::shared_ptr<runtime::TensorView>>& bs, const std::vector<std::shared_ptr<runtime::Tensor>>& bs,
int mantissa_bits, int mantissa_bits,
int tolerance_bits) int tolerance_bits)
{ {
......
...@@ -60,24 +60,24 @@ namespace ngraph ...@@ -60,24 +60,24 @@ namespace ngraph
int tolerance_bits = 2); int tolerance_bits = 2);
/// \brief Check if the two TensorViews are all close in float /// \brief Check if the two TensorViews are all close in float
/// \param a First TensorView to compare /// \param a First Tensor to compare
/// \param b Second TensorView to compare /// \param b Second Tensor to compare
/// \param mantissa_bits The mantissa width of the underlying number before casting to float /// \param mantissa_bits The mantissa width of the underlying number before casting to float
/// \param tolerance_bits Bit tolerance error /// \param tolerance_bits Bit tolerance error
/// Returns true iff the two TensorViews are all close in float /// Returns true iff the two TensorViews are all close in float
bool all_close_f(const std::shared_ptr<runtime::TensorView>& a, bool all_close_f(const std::shared_ptr<runtime::Tensor>& a,
const std::shared_ptr<runtime::TensorView>& b, const std::shared_ptr<runtime::Tensor>& b,
int mantissa_bits = 8, int mantissa_bits = 8,
int tolerance_bits = 2); int tolerance_bits = 2);
/// \brief Check if the two vectors of TensorViews are all close in float /// \brief Check if the two vectors of TensorViews are all close in float
/// \param as First vector of TensorView to compare /// \param as First vector of Tensor to compare
/// \param bs Second vector of TensorView to compare /// \param bs Second vector of Tensor to compare
/// \param mantissa_bits The mantissa width of the underlying number before casting to float /// \param mantissa_bits The mantissa width of the underlying number before casting to float
/// \param tolerance_bits Bit tolerance error /// \param tolerance_bits Bit tolerance error
/// Returns true iff the two TensorViews are all close in float /// Returns true iff the two TensorViews are all close in float
bool all_close_f(const std::vector<std::shared_ptr<runtime::TensorView>>& as, bool all_close_f(const std::vector<std::shared_ptr<runtime::Tensor>>& as,
const std::vector<std::shared_ptr<runtime::TensorView>>& bs, const std::vector<std::shared_ptr<runtime::Tensor>>& bs,
int mantissa_bits = 8, int mantissa_bits = 8,
int tolerance_bits = 2); int tolerance_bits = 2);
} }
......
...@@ -45,24 +45,24 @@ namespace ngraph ...@@ -45,24 +45,24 @@ namespace ngraph
namespace autodiff namespace autodiff
{ {
template <typename T> template <typename T>
std::vector<std::shared_ptr<runtime::TensorView>> std::vector<std::shared_ptr<runtime::Tensor>>
get_autodiff(const std::shared_ptr<runtime::Backend>& backend, get_autodiff(const std::shared_ptr<runtime::Backend>& backend,
std::shared_ptr<Function>& df, std::shared_ptr<Function>& df,
const std::vector<std::shared_ptr<runtime::TensorView>>& df_input_args, const std::vector<std::shared_ptr<runtime::Tensor>>& df_input_args,
const std::vector<std::shared_ptr<op::Parameter>>& indep_params) const std::vector<std::shared_ptr<op::Parameter>>& indep_params)
{ {
// df/dX* = f'(c, ...) // df/dX* = f'(c, ...)
// using X* to denote all x "of interest" (represented by indep_params) // using X* to denote all x "of interest" (represented by indep_params)
// return value for this function // return value for this function
std::vector<std::shared_ptr<runtime::TensorView>> results; std::vector<std::shared_ptr<runtime::Tensor>> results;
// adjoint // adjoint
auto c_arg = df_input_args[0]; auto c_arg = df_input_args[0];
auto y_shape = c_arg->get_shape(); auto y_shape = c_arg->get_shape();
// df/dX* arguments // df/dX* arguments
std::vector<std::shared_ptr<runtime::TensorView>> df_output_args; std::vector<std::shared_ptr<runtime::Tensor>> df_output_args;
// for each x "of interest" // for each x "of interest"
for (auto x : indep_params) for (auto x : indep_params)
...@@ -125,11 +125,11 @@ namespace ngraph ...@@ -125,11 +125,11 @@ namespace ngraph
} }
template <typename T> template <typename T>
std::vector<std::shared_ptr<runtime::TensorView>> backprop_derivative( std::vector<std::shared_ptr<runtime::Tensor>>
const std::shared_ptr<runtime::Backend>& backend, backprop_derivative(const std::shared_ptr<runtime::Backend>& backend,
const std::shared_ptr<Function>& f, const std::shared_ptr<Function>& f,
const std::vector<std::shared_ptr<runtime::TensorView>>& f_input_args, const std::vector<std::shared_ptr<runtime::Tensor>>& f_input_args,
const std::vector<std::shared_ptr<op::Parameter>>& indep_params) const std::vector<std::shared_ptr<op::Parameter>>& indep_params)
{ {
// y = f(X) // y = f(X)
// using X (upper case) to denote all paramenters of f (represented by f_input_args) // using X (upper case) to denote all paramenters of f (represented by f_input_args)
...@@ -165,7 +165,7 @@ namespace ngraph ...@@ -165,7 +165,7 @@ namespace ngraph
auto df = s_df_map[f]; auto df = s_df_map[f];
// (c, X) arguments // (c, X) arguments
std::vector<std::shared_ptr<runtime::TensorView>> df_input_args = f_input_args; std::vector<std::shared_ptr<runtime::Tensor>> df_input_args = f_input_args;
df_input_args.insert(df_input_args.begin(), c_arg); df_input_args.insert(df_input_args.begin(), c_arg);
// call f'(c,X) to get df/dX* // call f'(c,X) to get df/dX*
...@@ -177,11 +177,11 @@ namespace ngraph ...@@ -177,11 +177,11 @@ namespace ngraph
auto fprop_cache = cache_fprop(f, df); auto fprop_cache = cache_fprop(f, df);
// (y, cached) arguments // (y, cached) arguments
std::vector<std::shared_ptr<runtime::TensorView>> mod_f_output_args; std::vector<std::shared_ptr<runtime::Tensor>> mod_f_output_args;
mod_f_output_args.push_back(backend->create_tensor<T>(y_shape)); mod_f_output_args.push_back(backend->create_tensor<T>(y_shape));
// (c, cached) arguments // (c, cached) arguments
std::vector<std::shared_ptr<runtime::TensorView>> mod_df_input_args = df_input_args; std::vector<std::shared_ptr<runtime::Tensor>> mod_df_input_args = df_input_args;
// add cached nodes to both modified f output and modified f' input arguments // add cached nodes to both modified f output and modified f' input arguments
for (auto node : fprop_cache.fprop_output_nodes) for (auto node : fprop_cache.fprop_output_nodes)
......
...@@ -28,7 +28,7 @@ template <typename T> ...@@ -28,7 +28,7 @@ template <typename T>
bool autodiff_numeric_compare(const std::shared_ptr<ngraph::runtime::Backend>& backend, bool autodiff_numeric_compare(const std::shared_ptr<ngraph::runtime::Backend>& backend,
std::shared_ptr<ngraph::Function> f, std::shared_ptr<ngraph::Function> f,
std::shared_ptr<ngraph::Function> g, std::shared_ptr<ngraph::Function> g,
const std::vector<std::shared_ptr<ngraph::runtime::TensorView>>& args, const std::vector<std::shared_ptr<ngraph::runtime::Tensor>>& args,
T rtol, T rtol,
T atol) T atol)
{ {
...@@ -37,7 +37,7 @@ bool autodiff_numeric_compare(const std::shared_ptr<ngraph::runtime::Backend>& b ...@@ -37,7 +37,7 @@ bool autodiff_numeric_compare(const std::shared_ptr<ngraph::runtime::Backend>& b
// Use INTERPRETER to compute numerical derivatives // Use INTERPRETER to compute numerical derivatives
auto interpreter_backend = ngraph::runtime::Backend::create("INTERPRETER"); auto interpreter_backend = ngraph::runtime::Backend::create("INTERPRETER");
std::vector<std::shared_ptr<ngraph::runtime::TensorView>> interpreter_args; std::vector<std::shared_ptr<ngraph::runtime::Tensor>> interpreter_args;
for (auto arg : args) for (auto arg : args)
{ {
auto interpreter_arg = auto interpreter_arg =
...@@ -61,8 +61,8 @@ bool autodiff_numeric_compare(const std::shared_ptr<ngraph::runtime::Backend>& b ...@@ -61,8 +61,8 @@ bool autodiff_numeric_compare(const std::shared_ptr<ngraph::runtime::Backend>& b
auto results_sym = auto results_sym =
ngraph::autodiff::backprop_derivative<T>(backend, g, args, g->get_parameters()); ngraph::autodiff::backprop_derivative<T>(backend, g, args, g->get_parameters());
// Cast to HostTensorView for comparision // Cast to HostTensor for comparision
std::vector<std::shared_ptr<ngraph::runtime::TensorView>> interpreter_results_sym; std::vector<std::shared_ptr<ngraph::runtime::Tensor>> interpreter_results_sym;
for (auto result : results_sym) for (auto result : results_sym)
{ {
auto interpreter_result = auto interpreter_result =
...@@ -77,7 +77,7 @@ bool autodiff_numeric_compare(const std::shared_ptr<ngraph::runtime::Backend>& b ...@@ -77,7 +77,7 @@ bool autodiff_numeric_compare(const std::shared_ptr<ngraph::runtime::Backend>& b
template <typename T> template <typename T>
bool autodiff_numeric_compare(const std::shared_ptr<ngraph::runtime::Backend>& backend, bool autodiff_numeric_compare(const std::shared_ptr<ngraph::runtime::Backend>& backend,
std::function<std::shared_ptr<ngraph::Function>()> make_graph, std::function<std::shared_ptr<ngraph::Function>()> make_graph,
const std::vector<std::shared_ptr<ngraph::runtime::TensorView>>& args, const std::vector<std::shared_ptr<ngraph::runtime::Tensor>>& args,
T rtol, T rtol,
T atol) T atol)
{ {
...@@ -89,7 +89,7 @@ bool autodiff_numeric_compare_selective( ...@@ -89,7 +89,7 @@ bool autodiff_numeric_compare_selective(
const std::shared_ptr<ngraph::runtime::Backend>& backend, const std::shared_ptr<ngraph::runtime::Backend>& backend,
std::shared_ptr<ngraph::Function> f, std::shared_ptr<ngraph::Function> f,
std::shared_ptr<ngraph::Function> g, std::shared_ptr<ngraph::Function> g,
const std::vector<std::shared_ptr<ngraph::runtime::TensorView>>& args, const std::vector<std::shared_ptr<ngraph::runtime::Tensor>>& args,
T rtol, T rtol,
T atol, T atol,
const std::vector<bool>& indep_param_mask) const std::vector<bool>& indep_param_mask)
...@@ -110,7 +110,7 @@ bool autodiff_numeric_compare_selective( ...@@ -110,7 +110,7 @@ bool autodiff_numeric_compare_selective(
auto interpreter_backend = ngraph::runtime::Backend::create("INTERPRETER"); auto interpreter_backend = ngraph::runtime::Backend::create("INTERPRETER");
std::vector<std::shared_ptr<ngraph::runtime::TensorView>> interpreter_args; std::vector<std::shared_ptr<ngraph::runtime::Tensor>> interpreter_args;
for (auto arg : args) for (auto arg : args)
{ {
auto interpreter_arg = auto interpreter_arg =
...@@ -146,8 +146,8 @@ bool autodiff_numeric_compare_selective( ...@@ -146,8 +146,8 @@ bool autodiff_numeric_compare_selective(
auto results_sym = ngraph::autodiff::backprop_derivative<T>(backend, g, args, g_indep_params); auto results_sym = ngraph::autodiff::backprop_derivative<T>(backend, g, args, g_indep_params);
// Cast to HostTensorView for comparision // Cast to HostTensor for comparision
std::vector<std::shared_ptr<ngraph::runtime::TensorView>> interpreter_results_sym; std::vector<std::shared_ptr<ngraph::runtime::Tensor>> interpreter_results_sym;
for (auto result : results_sym) for (auto result : results_sym)
{ {
auto interpreter_result = auto interpreter_result =
...@@ -163,7 +163,7 @@ template <typename T> ...@@ -163,7 +163,7 @@ template <typename T>
bool autodiff_numeric_compare_selective( bool autodiff_numeric_compare_selective(
const std::shared_ptr<ngraph::runtime::Backend>& backend, const std::shared_ptr<ngraph::runtime::Backend>& backend,
std::function<std::shared_ptr<ngraph::Function>()> make_graph, std::function<std::shared_ptr<ngraph::Function>()> make_graph,
const std::vector<std::shared_ptr<ngraph::runtime::TensorView>>& args, const std::vector<std::shared_ptr<ngraph::runtime::Tensor>>& args,
T rtol, T rtol,
T atol, T atol,
const std::vector<bool>& indep_param_mask) const std::vector<bool>& indep_param_mask)
......
...@@ -33,10 +33,10 @@ namespace ngraph ...@@ -33,10 +33,10 @@ namespace ngraph
/// \param indep_params parameters with respect to which to compute derivatives /// \param indep_params parameters with respect to which to compute derivatives
/// \returns vector of dy/dvar, where each dy/dvar's shape is concat(y.shape(), var.shape()) /// \returns vector of dy/dvar, where each dy/dvar's shape is concat(y.shape(), var.shape())
template <typename T> template <typename T>
std::vector<std::shared_ptr<runtime::TensorView>> std::vector<std::shared_ptr<runtime::Tensor>>
numeric_derivative(const std::shared_ptr<runtime::Backend>& backend, numeric_derivative(const std::shared_ptr<runtime::Backend>& backend,
const std::shared_ptr<Function>& f, const std::shared_ptr<Function>& f,
const std::vector<std::shared_ptr<runtime::TensorView>>& args, const std::vector<std::shared_ptr<runtime::Tensor>>& args,
T delta, T delta,
const std::vector<std::shared_ptr<op::Parameter>>& indep_params) const std::vector<std::shared_ptr<op::Parameter>>& indep_params)
{ {
...@@ -45,7 +45,7 @@ namespace ngraph ...@@ -45,7 +45,7 @@ namespace ngraph
auto params = f->get_parameters(); auto params = f->get_parameters();
// Results for each derivative, shape Y|X_i // Results for each derivative, shape Y|X_i
std::vector<std::shared_ptr<runtime::TensorView>> results; std::vector<std::shared_ptr<runtime::Tensor>> results;
for (auto param : indep_params) for (auto param : indep_params)
{ {
...@@ -59,7 +59,7 @@ namespace ngraph ...@@ -59,7 +59,7 @@ namespace ngraph
auto ref_y = backend->create_tensor<T>(y_shape); auto ref_y = backend->create_tensor<T>(y_shape);
backend->call_with_validate( backend->call_with_validate(
f, std::vector<std::shared_ptr<ngraph::runtime::TensorView>>{ref_y}, args); f, std::vector<std::shared_ptr<ngraph::runtime::Tensor>>{ref_y}, args);
auto ref_vec = read_vector<T>(ref_y); auto ref_vec = read_vector<T>(ref_y);
// inc_y will hold f(x+dx) values // inc_y will hold f(x+dx) values
......
...@@ -41,8 +41,8 @@ namespace ngraph ...@@ -41,8 +41,8 @@ namespace ngraph
/// \brief Randomly initialize a tensor /// \brief Randomly initialize a tensor
/// \param ptv The tensor to initialize /// \param ptv The tensor to initialize
const std::shared_ptr<runtime::TensorView> const std::shared_ptr<runtime::Tensor>
initialize(const std::shared_ptr<runtime::TensorView>& ptv) initialize(const std::shared_ptr<runtime::Tensor>& ptv)
{ {
std::vector<T> vec = read_vector<T>(ptv); std::vector<T> vec = read_vector<T>(ptv);
initialize(vec); initialize(vec);
......
...@@ -23,7 +23,7 @@ ...@@ -23,7 +23,7 @@
using namespace std; using namespace std;
using namespace ngraph; using namespace ngraph;
vector<float> read_float_vector(shared_ptr<runtime::TensorView> tv) vector<float> read_float_vector(shared_ptr<runtime::Tensor> tv)
{ {
vector<float> float_vec; vector<float> float_vec;
element::Type element_type = tv->get_tensor_layout()->get_element_type(); element::Type element_type = tv->get_tensor_layout()->get_element_type();
......
...@@ -24,7 +24,7 @@ ...@@ -24,7 +24,7 @@
#include "ngraph/file_util.hpp" #include "ngraph/file_util.hpp"
#include "ngraph/log.hpp" #include "ngraph/log.hpp"
#include "ngraph/runtime/backend.hpp" #include "ngraph/runtime/backend.hpp"
#include "ngraph/runtime/tensor_view.hpp" #include "ngraph/runtime/tensor.hpp"
#include "ngraph/serializer.hpp" #include "ngraph/serializer.hpp"
namespace ngraph namespace ngraph
...@@ -37,18 +37,18 @@ bool validate_list(const std::list<std::shared_ptr<ngraph::Node>>& nodes); ...@@ -37,18 +37,18 @@ bool validate_list(const std::list<std::shared_ptr<ngraph::Node>>& nodes);
std::shared_ptr<ngraph::Function> make_test_graph(); std::shared_ptr<ngraph::Function> make_test_graph();
template <typename T> template <typename T>
void copy_data(std::shared_ptr<ngraph::runtime::TensorView> tv, const std::vector<T>& data) void copy_data(std::shared_ptr<ngraph::runtime::Tensor> tv, const std::vector<T>& data)
{ {
size_t data_size = data.size() * sizeof(T); size_t data_size = data.size() * sizeof(T);
tv->write(data.data(), 0, data_size); tv->write(data.data(), 0, data_size);
} }
template <typename T> template <typename T>
std::vector<T> read_vector(std::shared_ptr<ngraph::runtime::TensorView> tv) std::vector<T> read_vector(std::shared_ptr<ngraph::runtime::Tensor> tv)
{ {
if (ngraph::element::from<T>() != tv->get_tensor_layout()->get_element_type()) if (ngraph::element::from<T>() != tv->get_tensor_layout()->get_element_type())
{ {
throw std::invalid_argument("read_vector type must match TensorView type"); throw std::invalid_argument("read_vector type must match Tensor type");
} }
size_t element_count = ngraph::shape_size(tv->get_shape()); size_t element_count = ngraph::shape_size(tv->get_shape());
size_t size = element_count * sizeof(T); size_t size = element_count * sizeof(T);
...@@ -57,10 +57,10 @@ std::vector<T> read_vector(std::shared_ptr<ngraph::runtime::TensorView> tv) ...@@ -57,10 +57,10 @@ std::vector<T> read_vector(std::shared_ptr<ngraph::runtime::TensorView> tv)
return rc; return rc;
} }
std::vector<float> read_float_vector(std::shared_ptr<ngraph::runtime::TensorView> tv); std::vector<float> read_float_vector(std::shared_ptr<ngraph::runtime::Tensor> tv);
template <typename T> template <typename T>
void write_vector(std::shared_ptr<ngraph::runtime::TensorView> tv, const std::vector<T>& values) void write_vector(std::shared_ptr<ngraph::runtime::Tensor> tv, const std::vector<T>& values)
{ {
tv->write(values.data(), 0, values.size() * sizeof(T)); tv->write(values.data(), 0, values.size() * sizeof(T));
} }
...@@ -109,7 +109,7 @@ std::vector<std::vector<T1>> execute(const std::shared_ptr<ngraph::Function>& fu ...@@ -109,7 +109,7 @@ std::vector<std::vector<T1>> execute(const std::shared_ptr<ngraph::Function>& fu
throw ngraph::ngraph_error("number of parameters and arguments don't match"); throw ngraph::ngraph_error("number of parameters and arguments don't match");
} }
std::vector<std::shared_ptr<ngraph::runtime::TensorView>> arg_tensors(args.size()); std::vector<std::shared_ptr<ngraph::runtime::Tensor>> arg_tensors(args.size());
for (size_t i = 0; i < args.size(); i++) for (size_t i = 0; i < args.size(); i++)
{ {
auto t = backend->create_tensor(parms.at(i)->get_element_type(), parms.at(i)->get_shape()); auto t = backend->create_tensor(parms.at(i)->get_element_type(), parms.at(i)->get_shape());
...@@ -118,7 +118,7 @@ std::vector<std::vector<T1>> execute(const std::shared_ptr<ngraph::Function>& fu ...@@ -118,7 +118,7 @@ std::vector<std::vector<T1>> execute(const std::shared_ptr<ngraph::Function>& fu
} }
auto results = function->get_results(); auto results = function->get_results();
std::vector<std::shared_ptr<ngraph::runtime::TensorView>> result_tensors(results.size()); std::vector<std::shared_ptr<ngraph::runtime::Tensor>> result_tensors(results.size());
for (size_t i = 0; i < results.size(); i++) for (size_t i = 0; i < results.size(); i++)
{ {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment