Commit d3f3a1ff authored by Robert Kimball's avatar Robert Kimball Committed by Scott Cyphers

Remove Backend parent from runtime::Tensor (#2810)

* Add new HybridTensor class. Remove Tensor parent.

* upate more backends

* fix GPU tensor

* update intel gpu backend
parent 1cede467
...@@ -392,6 +392,7 @@ set(SRC ${SRC} ...@@ -392,6 +392,7 @@ set(SRC ${SRC}
runtime/hybrid/hybrid_backend.hpp runtime/hybrid/hybrid_backend.hpp
runtime/hybrid/hybrid_executable.cpp runtime/hybrid/hybrid_executable.cpp
runtime/hybrid/hybrid_executable.hpp runtime/hybrid/hybrid_executable.hpp
runtime/hybrid/hybrid_tensor.cpp
runtime/hybrid/hybrid_util.cpp runtime/hybrid/hybrid_util.cpp
runtime/hybrid/hybrid_util.hpp runtime/hybrid/hybrid_util.hpp
runtime/hybrid/op/function_call.cpp runtime/hybrid/op/function_call.cpp
......
...@@ -60,13 +60,13 @@ shared_ptr<runtime::cpu::CPU_CallFrame> runtime::cpu::CPU_Backend::make_call_fra ...@@ -60,13 +60,13 @@ shared_ptr<runtime::cpu::CPU_CallFrame> runtime::cpu::CPU_Backend::make_call_fra
shared_ptr<runtime::Tensor> shared_ptr<runtime::Tensor>
runtime::cpu::CPU_Backend::create_tensor(const element::Type& element_type, const Shape& shape) runtime::cpu::CPU_Backend::create_tensor(const element::Type& element_type, const Shape& shape)
{ {
return make_shared<runtime::cpu::CPUTensorView>(element_type, shape, this); return make_shared<runtime::cpu::CPUTensorView>(element_type, shape);
} }
shared_ptr<runtime::Tensor> runtime::cpu::CPU_Backend::create_tensor( shared_ptr<runtime::Tensor> runtime::cpu::CPU_Backend::create_tensor(
const element::Type& element_type, const Shape& shape, void* memory_pointer) const element::Type& element_type, const Shape& shape, void* memory_pointer)
{ {
return make_shared<runtime::cpu::CPUTensorView>(element_type, shape, memory_pointer, this); return make_shared<runtime::cpu::CPUTensorView>(element_type, shape, memory_pointer);
} }
shared_ptr<runtime::Executable> shared_ptr<runtime::Executable>
......
...@@ -35,10 +35,8 @@ using namespace std; ...@@ -35,10 +35,8 @@ using namespace std;
runtime::cpu::CPUTensorView::CPUTensorView(const ngraph::element::Type& element_type, runtime::cpu::CPUTensorView::CPUTensorView(const ngraph::element::Type& element_type,
const Shape& shape, const Shape& shape,
void* memory_pointer, void* memory_pointer)
const runtime::Backend* parent) : runtime::Tensor(std::make_shared<ngraph::descriptor::Tensor>(element_type, shape, ""))
: runtime::Tensor(std::make_shared<ngraph::descriptor::Tensor>(element_type, shape, "external"),
parent)
, buffer(nullptr) , buffer(nullptr)
, aligned_buffer(nullptr) , aligned_buffer(nullptr)
{ {
...@@ -74,9 +72,8 @@ runtime::cpu::CPUTensorView::CPUTensorView(const ngraph::element::Type& element_ ...@@ -74,9 +72,8 @@ runtime::cpu::CPUTensorView::CPUTensorView(const ngraph::element::Type& element_
} }
runtime::cpu::CPUTensorView::CPUTensorView(const ngraph::element::Type& element_type, runtime::cpu::CPUTensorView::CPUTensorView(const ngraph::element::Type& element_type,
const Shape& shape, const Shape& shape)
const runtime::Backend* parent) : CPUTensorView(element_type, shape, nullptr)
: CPUTensorView(element_type, shape, nullptr, parent)
{ {
} }
......
...@@ -33,13 +33,10 @@ namespace ngraph ...@@ -33,13 +33,10 @@ namespace ngraph
class CPUTensorView : public ngraph::runtime::Tensor class CPUTensorView : public ngraph::runtime::Tensor
{ {
public: public:
CPUTensorView(const ngraph::element::Type& element_type, const Shape& shape);
CPUTensorView(const ngraph::element::Type& element_type, CPUTensorView(const ngraph::element::Type& element_type,
const Shape& shape, const Shape& shape,
const runtime::Backend* parent); void* memory_pointer);
CPUTensorView(const ngraph::element::Type& element_type,
const Shape& shape,
void* memory_pointer,
const runtime::Backend* parent);
virtual ~CPUTensorView() override; virtual ~CPUTensorView() override;
char* get_data_ptr(); char* get_data_ptr();
......
...@@ -109,7 +109,7 @@ runtime::gpu::GPU_Backend::BackendContext::~BackendContext() ...@@ -109,7 +109,7 @@ runtime::gpu::GPU_Backend::BackendContext::~BackendContext()
shared_ptr<runtime::Tensor> shared_ptr<runtime::Tensor>
runtime::gpu::GPU_Backend::create_tensor(const element::Type& element_type, const Shape& shape) runtime::gpu::GPU_Backend::create_tensor(const element::Type& element_type, const Shape& shape)
{ {
return make_shared<runtime::gpu::GPUTensor>(element_type, shape, this); return make_shared<runtime::gpu::GPUTensor>(element_type, shape);
} }
shared_ptr<runtime::Tensor> runtime::gpu::GPU_Backend::create_tensor( shared_ptr<runtime::Tensor> runtime::gpu::GPU_Backend::create_tensor(
...@@ -119,7 +119,7 @@ shared_ptr<runtime::Tensor> runtime::gpu::GPU_Backend::create_tensor( ...@@ -119,7 +119,7 @@ shared_ptr<runtime::Tensor> runtime::gpu::GPU_Backend::create_tensor(
{ {
throw ngraph_error("The pointer passed to create_tensor is not a device pointer."); throw ngraph_error("The pointer passed to create_tensor is not a device pointer.");
} }
return make_shared<runtime::gpu::GPUTensor>(element_type, shape, memory_pointer, this); return make_shared<runtime::gpu::GPUTensor>(element_type, shape, memory_pointer);
} }
shared_ptr<runtime::Executable> runtime::gpu::GPU_Backend::compile(shared_ptr<Function> func, shared_ptr<runtime::Executable> runtime::gpu::GPU_Backend::compile(shared_ptr<Function> func,
......
...@@ -29,10 +29,8 @@ using namespace std; ...@@ -29,10 +29,8 @@ using namespace std;
runtime::gpu::GPUTensor::GPUTensor(const ngraph::element::Type& element_type, runtime::gpu::GPUTensor::GPUTensor(const ngraph::element::Type& element_type,
const Shape& shape, const Shape& shape,
void* memory_pointer, void* memory_pointer)
const Backend* backend) : runtime::Tensor(std::make_shared<ngraph::descriptor::Tensor>(element_type, shape, ""))
: runtime::Tensor(std::make_shared<ngraph::descriptor::Tensor>(element_type, shape, "external"),
backend)
, m_custom_memory(false) , m_custom_memory(false)
{ {
m_descriptor->set_tensor_layout( m_descriptor->set_tensor_layout(
...@@ -57,10 +55,8 @@ runtime::gpu::GPUTensor::GPUTensor(const ngraph::element::Type& element_type, ...@@ -57,10 +55,8 @@ runtime::gpu::GPUTensor::GPUTensor(const ngraph::element::Type& element_type,
} }
} }
runtime::gpu::GPUTensor::GPUTensor(const ngraph::element::Type& element_type, runtime::gpu::GPUTensor::GPUTensor(const ngraph::element::Type& element_type, const Shape& shape)
const Shape& shape, : GPUTensor(element_type, shape, nullptr)
const Backend* backend)
: GPUTensor(element_type, shape, nullptr, backend)
{ {
} }
......
...@@ -36,11 +36,8 @@ namespace ngraph ...@@ -36,11 +36,8 @@ namespace ngraph
class ngraph::runtime::gpu::GPUTensor : public ngraph::runtime::Tensor class ngraph::runtime::gpu::GPUTensor : public ngraph::runtime::Tensor
{ {
public: public:
GPUTensor(const ngraph::element::Type& element_type, const Shape& shape, const Backend* parent); GPUTensor(const ngraph::element::Type& element_type, const Shape& shape);
GPUTensor(const ngraph::element::Type& element_type, GPUTensor(const ngraph::element::Type& element_type, const Shape& shape, void* memory_pointer);
const Shape& shape,
void* memory_pointer,
const Backend* parent);
virtual ~GPUTensor() override; virtual ~GPUTensor() override;
/// \brief Write bytes directly into the tensor /// \brief Write bytes directly into the tensor
......
...@@ -29,10 +29,8 @@ static const size_t alignment = 64; ...@@ -29,10 +29,8 @@ static const size_t alignment = 64;
runtime::HostTensor::HostTensor(const ngraph::element::Type& element_type, runtime::HostTensor::HostTensor(const ngraph::element::Type& element_type,
const Shape& shape, const Shape& shape,
void* memory_pointer, void* memory_pointer,
const string& name, const string& name)
const Backend* parent) : runtime::Tensor(std::make_shared<ngraph::descriptor::Tensor>(element_type, shape, name))
: runtime::Tensor(std::make_shared<ngraph::descriptor::Tensor>(element_type, shape, name),
parent)
, m_allocated_buffer_pool(nullptr) , m_allocated_buffer_pool(nullptr)
, m_aligned_buffer_pool(nullptr) , m_aligned_buffer_pool(nullptr)
...@@ -61,24 +59,20 @@ runtime::HostTensor::HostTensor(const ngraph::element::Type& element_type, ...@@ -61,24 +59,20 @@ runtime::HostTensor::HostTensor(const ngraph::element::Type& element_type,
runtime::HostTensor::HostTensor(const ngraph::element::Type& element_type, runtime::HostTensor::HostTensor(const ngraph::element::Type& element_type,
const Shape& shape, const Shape& shape,
const string& name, const string& name)
const Backend* parent) : HostTensor(element_type, shape, nullptr, name)
: HostTensor(element_type, shape, nullptr, name, parent)
{ {
} }
runtime::HostTensor::HostTensor(const ngraph::element::Type& element_type, runtime::HostTensor::HostTensor(const ngraph::element::Type& element_type, const Shape& shape)
const Shape& shape, : HostTensor(element_type, shape, nullptr, "")
const Backend* parent)
: HostTensor(element_type, shape, nullptr, "external", parent)
{ {
} }
runtime::HostTensor::HostTensor(const ngraph::element::Type& element_type, runtime::HostTensor::HostTensor(const ngraph::element::Type& element_type,
const Shape& shape, const Shape& shape,
void* memory_pointer, void* memory_pointer)
const Backend* parent) : HostTensor(element_type, shape, memory_pointer, "")
: HostTensor(element_type, shape, memory_pointer, "external", parent)
{ {
} }
......
...@@ -35,20 +35,13 @@ class ngraph::runtime::HostTensor : public ngraph::runtime::Tensor ...@@ -35,20 +35,13 @@ class ngraph::runtime::HostTensor : public ngraph::runtime::Tensor
public: public:
HostTensor(const ngraph::element::Type& element_type, HostTensor(const ngraph::element::Type& element_type,
const Shape& shape, const Shape& shape,
const std::string& name = "external", const std::string& name);
const Backend* parent = nullptr);
HostTensor(const ngraph::element::Type& element_type, HostTensor(const ngraph::element::Type& element_type,
const Shape& shape, const Shape& shape,
void* memory_pointer, void* memory_pointer,
const std::string& name = "external", const std::string& name);
const Backend* parent = nullptr); HostTensor(const ngraph::element::Type& element_type, const Shape& shape);
HostTensor(const ngraph::element::Type& element_type, HostTensor(const ngraph::element::Type& element_type, const Shape& shape, void* memory_pointer);
const Shape& shape,
const Backend* parent);
HostTensor(const ngraph::element::Type& element_type,
const Shape& shape,
void* memory_pointer,
const Backend* parent);
virtual ~HostTensor() override; virtual ~HostTensor() override;
char* get_data_ptr(); char* get_data_ptr();
......
...@@ -15,18 +15,8 @@ ...@@ -15,18 +15,8 @@
//***************************************************************************** //*****************************************************************************
#include "ngraph/runtime/hybrid/hybrid_backend.hpp" #include "ngraph/runtime/hybrid/hybrid_backend.hpp"
#include "ngraph/graph_util.hpp"
#include "ngraph/pass/manager.hpp"
#include "ngraph/pass/visualize_tree.hpp"
#include "ngraph/runtime/host_tensor.hpp"
#include "ngraph/runtime/hybrid/hybrid_executable.hpp" #include "ngraph/runtime/hybrid/hybrid_executable.hpp"
#include "ngraph/runtime/hybrid/hybrid_util.hpp" #include "ngraph/runtime/hybrid/hybrid_tensor.hpp"
#include "ngraph/runtime/hybrid/pass/default_placement.hpp"
#include "ngraph/runtime/hybrid/pass/dump.hpp"
#include "ngraph/runtime/hybrid/pass/fix_get_output_element.hpp"
#include "ngraph/runtime/hybrid/pass/liveness.hpp"
#include "ngraph/runtime/hybrid/pass/memory_layout.hpp"
#include "ngraph/runtime/tensor.hpp"
using namespace ngraph; using namespace ngraph;
using namespace std; using namespace std;
...@@ -41,17 +31,13 @@ shared_ptr<runtime::Tensor> ...@@ -41,17 +31,13 @@ shared_ptr<runtime::Tensor>
runtime::hybrid::HybridBackend::create_tensor(const element::Type& element_type, runtime::hybrid::HybridBackend::create_tensor(const element::Type& element_type,
const Shape& shape) const Shape& shape)
{ {
auto it = m_backend_list.begin(); return make_shared<HybridTensor>(element_type, shape);
NGRAPH_CHECK(it != m_backend_list.end());
return (*it)->create_tensor(element_type, shape);
} }
shared_ptr<runtime::Tensor> runtime::hybrid::HybridBackend::create_tensor( shared_ptr<runtime::Tensor> runtime::hybrid::HybridBackend::create_tensor(
const element::Type& element_type, const Shape& shape, void* memory_pointer) const element::Type& element_type, const Shape& shape, void* memory_pointer)
{ {
auto it = m_backend_list.begin(); return make_shared<HybridTensor>(element_type, shape, memory_pointer);
NGRAPH_CHECK(it != m_backend_list.end());
return (*it)->create_tensor(element_type, shape, memory_pointer);
} }
shared_ptr<runtime::Executable> shared_ptr<runtime::Executable>
......
...@@ -92,28 +92,11 @@ bool runtime::hybrid::HybridExecutable::call(const vector<shared_ptr<runtime::Te ...@@ -92,28 +92,11 @@ bool runtime::hybrid::HybridExecutable::call(const vector<shared_ptr<runtime::Te
auto it = map_node_to_tensor.find(parameter_node); auto it = map_node_to_tensor.find(parameter_node);
if (it != map_node_to_tensor.end()) if (it != map_node_to_tensor.end())
{ {
if (it->second->get_parent() == backend.get()) parameters.push_back(it->second);
{
parameters.push_back(it->second);
}
else
{
auto parameter = backend->create_tensor(parameter_node->get_element_type(),
parameter_node->get_shape());
parameter->copy_from(*(it->second));
parameters.push_back(parameter);
}
} }
else else
{ {
// Handle temporary tensors that go between subgraphs throw runtime_error("Parameter temp not found in hybrid cache");
auto result_node = m_map_parameter_to_result.at(parameter_node);
auto result = map_node_to_tensor.at(result_node);
auto parameter = backend->create_tensor(parameter_node->get_element_type(),
parameter_node->get_shape());
parameter->copy_from(*result);
map_node_to_tensor[parameter_node] = parameter;
parameters.push_back(parameter);
} }
} }
...@@ -125,25 +108,11 @@ bool runtime::hybrid::HybridExecutable::call(const vector<shared_ptr<runtime::Te ...@@ -125,25 +108,11 @@ bool runtime::hybrid::HybridExecutable::call(const vector<shared_ptr<runtime::Te
auto it = map_node_to_tensor.find(result_node); auto it = map_node_to_tensor.find(result_node);
if (it != map_node_to_tensor.end()) if (it != map_node_to_tensor.end())
{ {
if (it->second->get_parent() == backend.get()) results.push_back(it->second);
{
results.push_back(it->second);
}
else
{
auto result = backend->create_tensor(result_node->get_element_type(),
result_node->get_shape());
results.push_back(result);
copy_back.insert({result.get(), it->second.get()});
}
} }
else else
{ {
// Handle temporary tensors that go between subgraphs throw runtime_error("Result temp not found in hybrid cache");
auto result =
backend->create_tensor(result_node->get_element_type(), result_node->get_shape());
map_node_to_tensor[result_node] = result;
results.push_back(result);
} }
} }
...@@ -157,17 +126,3 @@ bool runtime::hybrid::HybridExecutable::call(const vector<shared_ptr<runtime::Te ...@@ -157,17 +126,3 @@ bool runtime::hybrid::HybridExecutable::call(const vector<shared_ptr<runtime::Te
return rc; return rc;
} }
size_t runtime::hybrid::HybridExecutable::get_placement(const runtime::Tensor* t)
{
size_t index = 0;
for (const shared_ptr<ngraph::runtime::Backend>& be : m_backend_list)
{
if (t->get_parent() == be.get())
{
return index;
}
index++;
}
return -1;
}
...@@ -59,6 +59,4 @@ private: ...@@ -59,6 +59,4 @@ private:
std::vector<std::shared_ptr<runtime::Backend>> m_backend_list; std::vector<std::shared_ptr<runtime::Backend>> m_backend_list;
bool m_debug_enabled = false; bool m_debug_enabled = false;
size_t get_placement(const runtime::Tensor* t);
}; };
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include <cstring>
#include <memory>
#include "ngraph/descriptor/layout/dense_tensor_layout.hpp"
#include "ngraph/runtime/hybrid/hybrid_tensor.hpp"
#include "ngraph/util.hpp"
using namespace ngraph;
using namespace std;
static const size_t alignment = 64;
runtime::HybridTensor::HybridTensor(const ngraph::element::Type& element_type,
const Shape& shape,
void* memory_pointer)
: runtime::Tensor(std::make_shared<ngraph::descriptor::Tensor>(element_type, shape, ""))
, m_allocated_buffer_pool(nullptr)
, m_aligned_buffer_pool(nullptr)
{
m_descriptor->set_tensor_layout(
std::make_shared<ngraph::descriptor::layout::DenseTensorLayout>(*m_descriptor));
m_buffer_size = m_descriptor->get_tensor_layout()->get_size() * element_type.size();
if (memory_pointer != nullptr)
{
m_aligned_buffer_pool = static_cast<char*>(memory_pointer);
}
else if (m_buffer_size > 0)
{
size_t allocation_size = m_buffer_size + alignment;
m_allocated_buffer_pool = static_cast<char*>(ngraph_malloc(allocation_size));
m_aligned_buffer_pool = m_allocated_buffer_pool;
size_t mod = size_t(m_aligned_buffer_pool) % alignment;
if (mod != 0)
{
m_aligned_buffer_pool += (alignment - mod);
}
}
}
runtime::HybridTensor::HybridTensor(const ngraph::element::Type& element_type, const Shape& shape)
: HybridTensor(element_type, shape, nullptr)
{
}
runtime::HybridTensor::~HybridTensor()
{
if (m_allocated_buffer_pool != nullptr)
{
ngraph_free(m_allocated_buffer_pool);
}
}
char* runtime::HybridTensor::get_data_ptr()
{
return m_aligned_buffer_pool;
}
const char* runtime::HybridTensor::get_data_ptr() const
{
return m_aligned_buffer_pool;
}
void runtime::HybridTensor::write(const void* source, size_t tensor_offset, size_t n)
{
if (tensor_offset + n > m_buffer_size)
{
throw out_of_range("write access past end of tensor");
}
char* target = get_data_ptr();
memcpy(target, source, n);
}
void runtime::HybridTensor::read(void* target, size_t tensor_offset, size_t n) const
{
if (tensor_offset + n > m_buffer_size)
{
throw out_of_range("read access past end of tensor");
}
const char* source = get_data_ptr();
memcpy(target, &source[tensor_offset], n);
}
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include <memory>
#include "ngraph/runtime/backend.hpp"
#include "ngraph/runtime/tensor.hpp"
#include "ngraph/type/element_type.hpp"
namespace ngraph
{
namespace runtime
{
class HybridTensor;
}
}
class ngraph::runtime::HybridTensor : public ngraph::runtime::Tensor
{
public:
HybridTensor(const ngraph::element::Type& element_type, const Shape& shape);
HybridTensor(const ngraph::element::Type& element_type,
const Shape& shape,
void* memory_pointer);
virtual ~HybridTensor() override;
char* get_data_ptr();
const char* get_data_ptr() const;
template <typename T>
T* get_data_ptr()
{
return reinterpret_cast<T*>(get_data_ptr());
}
template <typename T>
const T* get_data_ptr() const
{
return reinterpret_cast<T*>(get_data_ptr());
}
/// \brief Write bytes directly into the tensor
/// \param p Pointer to source of data
/// \param tensor_offset Offset into tensor storage to begin writing. Must be element-aligned.
/// \param n Number of bytes to write, must be integral number of elements.
void write(const void* p, size_t tensor_offset, size_t n) override;
/// \brief Read bytes directly from the tensor
/// \param p Pointer to destination for data
/// \param tensor_offset Offset into tensor storage to begin reading. Must be element-aligned.
/// \param n Number of bytes to read, must be integral number of elements.
void read(void* p, size_t tensor_offset, size_t n) const override;
private:
HybridTensor(const HybridTensor&) = delete;
HybridTensor(HybridTensor&&) = delete;
HybridTensor& operator=(const HybridTensor&) = delete;
char* m_allocated_buffer_pool;
char* m_aligned_buffer_pool;
size_t m_buffer_size;
};
...@@ -353,14 +353,14 @@ shared_ptr<runtime::Tensor> ...@@ -353,14 +353,14 @@ shared_ptr<runtime::Tensor>
const Shape& shape) const Shape& shape)
{ {
return make_shared<runtime::intelgpu::IntelGPUTensorView>( return make_shared<runtime::intelgpu::IntelGPUTensorView>(
element_type, shape, *cldnn_engine, nullptr, this); element_type, shape, *cldnn_engine, nullptr);
} }
shared_ptr<runtime::Tensor> runtime::intelgpu::IntelGPUBackend::create_tensor( shared_ptr<runtime::Tensor> runtime::intelgpu::IntelGPUBackend::create_tensor(
const element::Type& element_type, const Shape& shape, void* memory_pointer) const element::Type& element_type, const Shape& shape, void* memory_pointer)
{ {
return make_shared<runtime::intelgpu::IntelGPUTensorView>( return make_shared<runtime::intelgpu::IntelGPUTensorView>(
element_type, shape, *cldnn_engine, memory_pointer, this); element_type, shape, *cldnn_engine, memory_pointer);
} }
shared_ptr<runtime::Executable> shared_ptr<runtime::Executable>
......
...@@ -28,9 +28,8 @@ using namespace std; ...@@ -28,9 +28,8 @@ using namespace std;
runtime::intelgpu::IntelGPUTensorView::IntelGPUTensorView(const element::Type& element_type, runtime::intelgpu::IntelGPUTensorView::IntelGPUTensorView(const element::Type& element_type,
const Shape& shape, const Shape& shape,
const cldnn::engine& backend_engine, const cldnn::engine& backend_engine,
void* memory_pointer, void* memory_pointer)
const runtime::Backend* parent) : runtime::Tensor(make_shared<descriptor::Tensor>(element_type, shape, ""))
: runtime::Tensor(make_shared<descriptor::Tensor>(element_type, shape, "external"), parent)
{ {
const cldnn::layout layout = IntelGPULayout::create_cldnn_layout(element_type, shape); const cldnn::layout layout = IntelGPULayout::create_cldnn_layout(element_type, shape);
......
...@@ -38,8 +38,7 @@ public: ...@@ -38,8 +38,7 @@ public:
IntelGPUTensorView(const element::Type& element_type, IntelGPUTensorView(const element::Type& element_type,
const Shape& shape, const Shape& shape,
const cldnn::engine& backend_engine, const cldnn::engine& backend_engine,
void* memory_pointer, void* memory_pointer);
const runtime::Backend* parent);
/// \brief Write bytes directly into the tensor /// \brief Write bytes directly into the tensor
/// \param p Pointer to source of data /// \param p Pointer to source of data
......
...@@ -46,13 +46,13 @@ runtime::interpreter::INTBackend::INTBackend(const vector<string>& unsupported_o ...@@ -46,13 +46,13 @@ runtime::interpreter::INTBackend::INTBackend(const vector<string>& unsupported_o
shared_ptr<runtime::Tensor> shared_ptr<runtime::Tensor>
runtime::interpreter::INTBackend::create_tensor(const element::Type& type, const Shape& shape) runtime::interpreter::INTBackend::create_tensor(const element::Type& type, const Shape& shape)
{ {
return make_shared<runtime::HostTensor>(type, shape, this); return make_shared<runtime::HostTensor>(type, shape);
} }
shared_ptr<runtime::Tensor> runtime::interpreter::INTBackend::create_tensor( shared_ptr<runtime::Tensor> runtime::interpreter::INTBackend::create_tensor(
const element::Type& type, const Shape& shape, void* memory_pointer) const element::Type& type, const Shape& shape, void* memory_pointer)
{ {
return make_shared<runtime::HostTensor>(type, shape, memory_pointer, this); return make_shared<runtime::HostTensor>(type, shape, memory_pointer);
} }
shared_ptr<runtime::Executable> shared_ptr<runtime::Executable>
......
...@@ -38,11 +38,9 @@ namespace ngraph ...@@ -38,11 +38,9 @@ namespace ngraph
class Tensor class Tensor
{ {
protected: protected:
Tensor(const std::shared_ptr<ngraph::descriptor::Tensor>& descriptor, Tensor(const std::shared_ptr<ngraph::descriptor::Tensor>& descriptor)
const Backend* parent)
: m_descriptor(descriptor) : m_descriptor(descriptor)
, m_stale(true) , m_stale(true)
, m_parent(parent)
{ {
} }
...@@ -107,11 +105,9 @@ namespace ngraph ...@@ -107,11 +105,9 @@ namespace ngraph
/// \param source The source tensor /// \param source The source tensor
virtual void copy_from(const ngraph::runtime::Tensor& source); virtual void copy_from(const ngraph::runtime::Tensor& source);
const Backend* get_parent() const { return m_parent; }
protected: protected:
std::shared_ptr<ngraph::descriptor::Tensor> m_descriptor; std::shared_ptr<ngraph::descriptor::Tensor> m_descriptor;
bool m_stale; bool m_stale;
const Backend* m_parent;
}; };
using TensorViewPtrs = std::vector<std::shared_ptr<Tensor>>; using TensorViewPtrs = std::vector<std::shared_ptr<Tensor>>;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment