Commit f2a93568 authored by Robert Kimball's avatar Robert Kimball

revert API

parent 567bc822
...@@ -20,20 +20,13 @@ ...@@ -20,20 +20,13 @@
#include "ngraph/runtime/backend.hpp" #include "ngraph/runtime/backend.hpp"
#include "ngraph/runtime/backend_manager.hpp" #include "ngraph/runtime/backend_manager.hpp"
#include "ngraph/runtime/dynamic/dynamic_backend.hpp" #include "ngraph/runtime/dynamic/dynamic_backend.hpp"
#include "ngraph/runtime/executable.hpp"
#include "ngraph/util.hpp" #include "ngraph/util.hpp"
using namespace std; using namespace std;
using namespace ngraph; using namespace ngraph;
runtime::Backend::Backend()
{
async_thread_start();
}
runtime::Backend::~Backend() runtime::Backend::~Backend()
{ {
async_thread_stop();
} }
std::shared_ptr<ngraph::Node> runtime::Backend::get_backend_op(const std::string& op_name, ...) std::shared_ptr<ngraph::Node> runtime::Backend::get_backend_op(const std::string& op_name, ...)
...@@ -105,160 +98,6 @@ std::shared_ptr<runtime::Executable> runtime::Backend::load(istream& input_strea ...@@ -105,160 +98,6 @@ std::shared_ptr<runtime::Executable> runtime::Backend::load(istream& input_strea
throw runtime_error("load opertion unimplemented."); throw runtime_error("load opertion unimplemented.");
} }
runtime::Backend::AsyncEvent::AsyncEvent(Type type,
const shared_ptr<Tensor>& tensor,
void* p,
size_t size_in_bytes,
size_t buffer_number)
: m_type{type}
, m_buffer_number{buffer_number}
, m_data{p}
, m_size_in_bytes{size_in_bytes}
, m_executable{nullptr}
, m_tensor{tensor}
, m_outputs{nullptr}
, m_inputs{nullptr}
{
}
runtime::Backend::AsyncEvent::AsyncEvent(const shared_ptr<Executable>& executable,
const vector<shared_ptr<runtime::Tensor>>& outputs,
const vector<shared_ptr<runtime::Tensor>>& inputs)
: m_type{Type::EXECUTE}
, m_buffer_number{0}
, m_data{nullptr}
, m_size_in_bytes{0}
, m_executable{executable}
, m_tensor{nullptr}
, m_outputs{outputs}
, m_inputs{inputs}
{
}
future<void> runtime::Backend::post_async_read_event(const shared_ptr<Tensor>& tensor,
void* p,
size_t size_in_bytes,
size_t buffer_number)
{
auto event =
make_shared<AsyncEvent>(AsyncEvent::Type::READ, tensor, p, size_in_bytes, buffer_number);
unique_lock<std::mutex> lock(m_event_queue_mutex);
m_event_queue.push_back(event);
m_event_queue_condition.notify_all();
return event->get_future();
}
future<void> runtime::Backend::post_async_write_event(const shared_ptr<Tensor>& tensor,
const void* p,
size_t size_in_bytes,
size_t buffer_number)
{
auto event = make_shared<AsyncEvent>(
AsyncEvent::Type::WRITE, tensor, const_cast<void*>(p), size_in_bytes, buffer_number);
unique_lock<std::mutex> lock(m_event_queue_mutex);
m_event_queue.push_back(event);
m_event_queue_condition.notify_all();
return event->get_future();
}
future<void> runtime::Backend::post_async_execute_event(
const std::shared_ptr<Executable>& executable,
const std::vector<std::shared_ptr<runtime::Tensor>>& outputs,
const std::vector<std::shared_ptr<runtime::Tensor>>& inputs)
{
auto event = make_shared<AsyncEvent>(executable, outputs, inputs);
unique_lock<std::mutex> lock(m_event_queue_mutex);
m_event_queue.push_back(event);
m_event_queue_condition.notify_all();
return event->get_future();
}
void runtime::Backend::async_thread_start()
{
if (!m_event_queue_active)
{
m_event_queue_active = true;
m_event_queue_thread =
unique_ptr<thread>(new thread(&runtime::Backend::async_thread_entry, this));
}
}
void runtime::Backend::async_thread_stop()
{
if (m_event_queue_active)
{
{
unique_lock<std::mutex> lock(m_event_queue_mutex);
m_event_queue_active = false;
m_event_queue_condition.notify_all();
}
m_event_queue_thread->join();
}
}
static void local_thread_entry(shared_ptr<runtime::Backend::AsyncEvent> event)
{
event->get_executable()->call(event->get_outputs(), event->get_inputs());
event->signal_result();
};
void runtime::Backend::async_thread_process(const shared_ptr<AsyncEvent>& event)
{
switch (event->get_type())
{
case AsyncEvent::Type::READ:
event->get_tensor()->read(event->get_data(), event->get_size_in_bytes());
event->signal_result();
break;
case AsyncEvent::Type::WRITE:
event->get_tensor()->write(event->get_data(), event->get_size_in_bytes());
event->signal_result();
break;
case AsyncEvent::Type::EXECUTE:
{
std::thread(local_thread_entry, event).detach();
break;
}
}
}
void runtime::Backend::async_thread_entry()
{
unique_lock<std::mutex> lock(m_event_queue_mutex);
while (m_event_queue_active)
{
m_event_queue_condition.wait(lock);
while (!m_event_queue.empty())
{
async_thread_process(m_event_queue.front());
m_event_queue.pop_front();
}
}
}
namespace ngraph
{
namespace runtime
{
ostream& operator<<(ostream& out, const ngraph::runtime::Backend::AsyncEvent& event)
{
out << "Async{";
switch (event.get_type())
{
case runtime::Backend::AsyncEvent::Type::READ:
out << "READ " << locale_string(event.get_size_in_bytes());
break;
case runtime::Backend::AsyncEvent::Type::WRITE:
out << "WRITE " << locale_string(event.get_size_in_bytes());
break;
case runtime::Backend::AsyncEvent::Type::EXECUTE: out << "EXECUTE"; break;
}
out << "}";
return out;
}
}
}
bool runtime::Backend::set_config(const map<string, string>& config, string& error) bool runtime::Backend::set_config(const map<string, string>& config, string& error)
{ {
error = "set_config not supported"; error = "set_config not supported";
......
...@@ -16,7 +16,6 @@ ...@@ -16,7 +16,6 @@
#pragma once #pragma once
#include <future>
#include <memory> #include <memory>
#include "ngraph/function.hpp" #include "ngraph/function.hpp"
...@@ -43,7 +42,6 @@ namespace ngraph ...@@ -43,7 +42,6 @@ namespace ngraph
class ngraph::runtime::Backend class ngraph::runtime::Backend
{ {
public: public:
Backend();
virtual ~Backend(); virtual ~Backend();
/// \brief Create a new Backend object /// \brief Create a new Backend object
/// \param type The name of a registered backend, such as "CPU" or "GPU". /// \param type The name of a registered backend, such as "CPU" or "GPU".
...@@ -169,76 +167,4 @@ public: ...@@ -169,76 +167,4 @@ public:
/// \returns true if the configuration is supported, false otherwise. On false the error /// \returns true if the configuration is supported, false otherwise. On false the error
/// parameter value is valid. /// parameter value is valid.
virtual bool set_config(const std::map<std::string, std::string>& config, std::string& error); virtual bool set_config(const std::map<std::string, std::string>& config, std::string& error);
friend class ngraph::runtime::Tensor;
friend class ngraph::runtime::Executable;
class AsyncEvent
{
public:
enum class Type
{
READ,
WRITE,
EXECUTE
};
AsyncEvent(Type type,
const std::shared_ptr<Tensor>& tensor,
void* p,
size_t size_in_bytes,
size_t buffer_number);
AsyncEvent(const std::shared_ptr<Executable>& m_executable,
const std::vector<std::shared_ptr<runtime::Tensor>>& m_outputs,
const std::vector<std::shared_ptr<runtime::Tensor>>& m_inputs);
void* get_data() const { return m_data; }
size_t get_size_in_bytes() const { return m_size_in_bytes; }
Type get_type() const { return m_type; }
size_t get_buffer_number() const { return m_buffer_number; }
std::shared_ptr<Executable> get_executable() const { return m_executable; }
std::shared_ptr<Tensor> get_tensor() const { return m_tensor; }
const std::vector<std::shared_ptr<runtime::Tensor>>& get_outputs() const
{
return m_outputs;
}
const std::vector<std::shared_ptr<runtime::Tensor>>& get_inputs() const { return m_inputs; }
std::future<void> get_future() { return m_promise.get_future(); }
void signal_result() { m_promise.set_value(); }
friend std::ostream& operator<<(std::ostream& out, const AsyncEvent& event);
private:
const Type m_type;
size_t m_buffer_number;
void* m_data;
const size_t m_size_in_bytes;
std::shared_ptr<Executable> m_executable;
std::shared_ptr<Tensor> m_tensor;
std::vector<std::shared_ptr<runtime::Tensor>> m_outputs;
std::vector<std::shared_ptr<runtime::Tensor>> m_inputs;
std::promise<void> m_promise;
};
protected:
std::future<void> post_async_read_event(const std::shared_ptr<Tensor>& tensor,
void* p,
size_t size_in_bytes,
size_t buffer_number);
std::future<void> post_async_write_event(const std::shared_ptr<Tensor>& tensor,
const void* p,
size_t size_in_bytes,
size_t buffer_number);
std::future<void>
post_async_execute_event(const std::shared_ptr<Executable>& executable,
const std::vector<std::shared_ptr<runtime::Tensor>>& outputs,
const std::vector<std::shared_ptr<runtime::Tensor>>& inputs);
void async_thread_start();
void async_thread_stop();
void async_thread_process(const std::shared_ptr<AsyncEvent>& event);
void async_thread_entry();
std::deque<std::shared_ptr<AsyncEvent>> m_event_queue;
std::mutex m_event_queue_mutex;
std::condition_variable m_event_queue_condition;
std::unique_ptr<std::thread> m_event_queue_thread;
bool m_event_queue_active = false;
}; };
...@@ -17,7 +17,6 @@ ...@@ -17,7 +17,6 @@
#include <sstream> #include <sstream>
#include "ngraph/file_util.hpp" #include "ngraph/file_util.hpp"
#include "ngraph/runtime/backend.hpp"
#include "ngraph/runtime/executable.hpp" #include "ngraph/runtime/executable.hpp"
#include "ngraph/runtime/tensor.hpp" #include "ngraph/runtime/tensor.hpp"
#include "ngraph/util.hpp" #include "ngraph/util.hpp"
...@@ -29,11 +28,6 @@ runtime::Executable::Executable() ...@@ -29,11 +28,6 @@ runtime::Executable::Executable()
{ {
} }
runtime::Executable::Executable(const shared_ptr<Backend>& backend)
: m_backend{backend}
{
}
runtime::Executable::~Executable() runtime::Executable::~Executable()
{ {
} }
...@@ -129,16 +123,3 @@ void runtime::Executable::save(std::ostream& output_stream) ...@@ -129,16 +123,3 @@ void runtime::Executable::save(std::ostream& output_stream)
{ {
throw runtime_error("save opertion unimplemented."); throw runtime_error("save opertion unimplemented.");
} }
future<void> runtime::Executable::begin_execute(const vector<shared_ptr<runtime::Tensor>>& outputs,
const vector<shared_ptr<runtime::Tensor>>& inputs)
{
if (m_backend)
{
return m_backend->post_async_execute_event(shared_from_this(), outputs, inputs);
}
else
{
throw runtime_error("Async operations not supported for this backend");
}
}
...@@ -16,7 +16,6 @@ ...@@ -16,7 +16,6 @@
#pragma once #pragma once
#include <future>
#include <memory> #include <memory>
#include "ngraph/function.hpp" #include "ngraph/function.hpp"
...@@ -30,15 +29,13 @@ namespace ngraph ...@@ -30,15 +29,13 @@ namespace ngraph
{ {
class Tensor; class Tensor;
class Executable; class Executable;
class Backend;
} }
} }
class ngraph::runtime::Executable : public std::enable_shared_from_this<Executable> class ngraph::runtime::Executable
{ {
public: public:
Executable(); Executable();
Executable(const std::shared_ptr<Backend>& backend);
virtual ~Executable(); virtual ~Executable();
/// \param outputs vector of runtime::Tensor used as outputs /// \param outputs vector of runtime::Tensor used as outputs
...@@ -54,16 +51,6 @@ public: ...@@ -54,16 +51,6 @@ public:
bool call_with_validate(const std::vector<std::shared_ptr<runtime::Tensor>>& outputs, bool call_with_validate(const std::vector<std::shared_ptr<runtime::Tensor>>& outputs,
const std::vector<std::shared_ptr<runtime::Tensor>>& inputs); const std::vector<std::shared_ptr<runtime::Tensor>>& inputs);
/// \brief Asynchronously executes a single iteration of the Function. The `future` is used
/// to monitor the execution.
/// \param outputs vector of runtime::Tensor used as outputs
/// \param inputs vector of runtime::Tensor used as inputs
/// \returns a valid std::future to monitor the execution. Use future.get() to get the results
/// or future.wait*() to wait for completion.
virtual std::future<void>
begin_execute(const std::vector<std::shared_ptr<runtime::Tensor>>& outputs,
const std::vector<std::shared_ptr<runtime::Tensor>>& inputs);
/// \brief Collect performance information gathered on a Function. /// \brief Collect performance information gathered on a Function.
/// \returns Vector of PerformanceCounter information. /// \returns Vector of PerformanceCounter information.
virtual std::vector<PerformanceCounter> get_performance_data() const; virtual std::vector<PerformanceCounter> get_performance_data() const;
...@@ -95,5 +82,4 @@ protected: ...@@ -95,5 +82,4 @@ protected:
private: private:
ngraph::ParameterVector m_parameters; ngraph::ParameterVector m_parameters;
ngraph::ResultVector m_results; ngraph::ResultVector m_results;
std::shared_ptr<Backend> m_backend;
}; };
...@@ -30,53 +30,10 @@ runtime::HostTensor::HostTensor(const ngraph::element::Type& element_type, ...@@ -30,53 +30,10 @@ runtime::HostTensor::HostTensor(const ngraph::element::Type& element_type,
const Shape& shape, const Shape& shape,
void* memory_pointer, void* memory_pointer,
const string& name) const string& name)
: HostTensor(nullptr, element_type, shape, memory_pointer, name) : runtime::Tensor(std::make_shared<ngraph::descriptor::Tensor>(element_type, shape, name))
{
}
runtime::HostTensor::HostTensor(const ngraph::element::Type& element_type,
const Shape& shape,
const string& name)
: HostTensor(nullptr, element_type, shape, nullptr, name)
{
}
runtime::HostTensor::HostTensor(const ngraph::element::Type& element_type, const Shape& shape)
: HostTensor(nullptr, element_type, shape, nullptr, "")
{
}
runtime::HostTensor::HostTensor(const ngraph::element::Type& element_type,
const Shape& shape,
void* memory_pointer)
: HostTensor(nullptr, element_type, shape, memory_pointer, "")
{
}
runtime::HostTensor::HostTensor(const std::shared_ptr<ngraph::runtime::Backend>& backend,
const ngraph::element::Type& element_type,
const Shape& shape)
: HostTensor(backend, element_type, shape, nullptr, "")
{
}
runtime::HostTensor::HostTensor(const std::shared_ptr<ngraph::runtime::Backend>& backend,
const ngraph::element::Type& element_type,
const Shape& shape,
void* memory_pointer)
: HostTensor(backend, element_type, shape, memory_pointer, "")
{
}
runtime::HostTensor::HostTensor(const std::shared_ptr<ngraph::runtime::Backend>& backend,
const ngraph::element::Type& element_type,
const Shape& shape,
void* memory_pointer,
const std::string& name)
: runtime::Tensor(backend,
std::make_shared<ngraph::descriptor::Tensor>(element_type, shape, name))
, m_allocated_buffer_pool(nullptr) , m_allocated_buffer_pool(nullptr)
, m_aligned_buffer_pool(nullptr) , m_aligned_buffer_pool(nullptr)
{ {
m_descriptor->set_tensor_layout( m_descriptor->set_tensor_layout(
std::make_shared<ngraph::descriptor::layout::DenseTensorLayout>(*m_descriptor)); std::make_shared<ngraph::descriptor::layout::DenseTensorLayout>(*m_descriptor));
...@@ -100,6 +57,25 @@ runtime::HostTensor::HostTensor(const std::shared_ptr<ngraph::runtime::Backend>& ...@@ -100,6 +57,25 @@ runtime::HostTensor::HostTensor(const std::shared_ptr<ngraph::runtime::Backend>&
} }
} }
runtime::HostTensor::HostTensor(const ngraph::element::Type& element_type,
const Shape& shape,
const string& name)
: HostTensor(element_type, shape, nullptr, name)
{
}
runtime::HostTensor::HostTensor(const ngraph::element::Type& element_type, const Shape& shape)
: HostTensor(element_type, shape, nullptr, "")
{
}
runtime::HostTensor::HostTensor(const ngraph::element::Type& element_type,
const Shape& shape,
void* memory_pointer)
: HostTensor(element_type, shape, memory_pointer, "")
{
}
runtime::HostTensor::~HostTensor() runtime::HostTensor::~HostTensor()
{ {
if (m_allocated_buffer_pool != nullptr) if (m_allocated_buffer_pool != nullptr)
......
...@@ -42,18 +42,6 @@ public: ...@@ -42,18 +42,6 @@ public:
const std::string& name); const std::string& name);
HostTensor(const ngraph::element::Type& element_type, const Shape& shape); HostTensor(const ngraph::element::Type& element_type, const Shape& shape);
HostTensor(const ngraph::element::Type& element_type, const Shape& shape, void* memory_pointer); HostTensor(const ngraph::element::Type& element_type, const Shape& shape, void* memory_pointer);
HostTensor(const std::shared_ptr<ngraph::runtime::Backend>& backend,
const ngraph::element::Type& element_type,
const Shape& shape);
HostTensor(const std::shared_ptr<ngraph::runtime::Backend>& backend,
const ngraph::element::Type& element_type,
const Shape& shape,
void* memory_pointer);
HostTensor(const std::shared_ptr<ngraph::runtime::Backend>& backend,
const ngraph::element::Type& element_type,
const Shape& shape,
void* memory_pointer,
const std::string& name);
virtual ~HostTensor() override; virtual ~HostTensor() override;
char* get_data_ptr(); char* get_data_ptr();
......
...@@ -54,20 +54,20 @@ runtime::interpreter::INTBackend::INTBackend(const vector<string>& unsupported_o ...@@ -54,20 +54,20 @@ runtime::interpreter::INTBackend::INTBackend(const vector<string>& unsupported_o
shared_ptr<runtime::Tensor> shared_ptr<runtime::Tensor>
runtime::interpreter::INTBackend::create_tensor(const element::Type& type, const Shape& shape) runtime::interpreter::INTBackend::create_tensor(const element::Type& type, const Shape& shape)
{ {
return make_shared<runtime::HostTensor>(shared_from_this(), type, shape); return make_shared<runtime::HostTensor>(type, shape);
} }
shared_ptr<runtime::Tensor> runtime::interpreter::INTBackend::create_tensor( shared_ptr<runtime::Tensor> runtime::interpreter::INTBackend::create_tensor(
const element::Type& type, const Shape& shape, void* memory_pointer) const element::Type& type, const Shape& shape, void* memory_pointer)
{ {
return make_shared<runtime::HostTensor>(shared_from_this(), type, shape, memory_pointer); return make_shared<runtime::HostTensor>(type, shape, memory_pointer);
} }
shared_ptr<runtime::Executable> shared_ptr<runtime::Executable>
runtime::interpreter::INTBackend::compile(shared_ptr<Function> function, runtime::interpreter::INTBackend::compile(shared_ptr<Function> function,
bool enable_performance_collection) bool enable_performance_collection)
{ {
return make_shared<INTExecutable>(shared_from_this(), function, enable_performance_collection); return make_shared<INTExecutable>(function, enable_performance_collection);
} }
bool runtime::interpreter::INTBackend::is_supported(const Node& node) const bool runtime::interpreter::INTBackend::is_supported(const Node& node) const
......
...@@ -38,8 +38,7 @@ namespace ngraph ...@@ -38,8 +38,7 @@ namespace ngraph
} }
} }
class ngraph::runtime::interpreter::INTBackend : public Backend, class ngraph::runtime::interpreter::INTBackend : public Backend
public std::enable_shared_from_this<INTBackend>
{ {
public: public:
INTBackend(); INTBackend();
......
...@@ -38,11 +38,9 @@ using namespace ngraph; ...@@ -38,11 +38,9 @@ using namespace ngraph;
using descriptor::layout::DenseTensorLayout; using descriptor::layout::DenseTensorLayout;
runtime::interpreter::INTExecutable::INTExecutable(const shared_ptr<runtime::Backend>& backend, runtime::interpreter::INTExecutable::INTExecutable(const shared_ptr<Function>& function,
const shared_ptr<Function>& function,
bool enable_performance_collection) bool enable_performance_collection)
: Executable{backend} : m_is_compiled{true}
, m_is_compiled{true}
, m_performance_counters_enabled{enable_performance_collection} , m_performance_counters_enabled{enable_performance_collection}
{ {
m_function = clone_function(*function); m_function = clone_function(*function);
......
...@@ -69,7 +69,6 @@ ...@@ -69,7 +69,6 @@
#include "ngraph/op/topk.hpp" #include "ngraph/op/topk.hpp"
#include "ngraph/runtime/aligned_buffer.hpp" #include "ngraph/runtime/aligned_buffer.hpp"
#include "ngraph/runtime/backend.hpp" #include "ngraph/runtime/backend.hpp"
#include "ngraph/runtime/executable.hpp"
#include "ngraph/runtime/host_tensor.hpp" #include "ngraph/runtime/host_tensor.hpp"
#ifdef INTERPRETER_USE_HYBRID #ifdef INTERPRETER_USE_HYBRID
#include "ngraph/runtime/hybrid/op/function_call.hpp" #include "ngraph/runtime/hybrid/op/function_call.hpp"
...@@ -175,8 +174,7 @@ class ngraph::runtime::interpreter::INTExecutable : public Executable ...@@ -175,8 +174,7 @@ class ngraph::runtime::interpreter::INTExecutable : public Executable
friend class INTBackend; friend class INTBackend;
public: public:
INTExecutable(const std::shared_ptr<runtime::Backend>& backend, INTExecutable(const std::shared_ptr<Function>& function,
const std::shared_ptr<Function>& function,
bool enable_performance_collection = false); bool enable_performance_collection = false);
bool call(const std::vector<std::shared_ptr<Tensor>>& outputs, bool call(const std::vector<std::shared_ptr<Tensor>>& outputs,
......
...@@ -14,12 +14,10 @@ ...@@ -14,12 +14,10 @@
// limitations under the License. // limitations under the License.
//***************************************************************************** //*****************************************************************************
#include <functional> #include "ngraph/runtime/tensor.hpp"
#include "ngraph/descriptor/layout/tensor_layout.hpp" #include "ngraph/descriptor/layout/tensor_layout.hpp"
#include "ngraph/log.hpp" #include "ngraph/log.hpp"
#include "ngraph/runtime/aligned_buffer.hpp" #include "ngraph/runtime/aligned_buffer.hpp"
#include "ngraph/runtime/tensor.hpp"
#include "ngraph/type/element_type.hpp" #include "ngraph/type/element_type.hpp"
using namespace ngraph; using namespace ngraph;
...@@ -97,41 +95,3 @@ void runtime::Tensor::copy_from(const ngraph::runtime::Tensor& source) ...@@ -97,41 +95,3 @@ void runtime::Tensor::copy_from(const ngraph::runtime::Tensor& source)
source.read(buffer.get_ptr(), size); source.read(buffer.get_ptr(), size);
write(buffer.get_ptr(), size); write(buffer.get_ptr(), size);
} }
future<void> runtime::Tensor::begin_write(const void* p, size_t size_in_bytes, size_t buffer_number)
{
if (m_backend)
{
// auto f = m_promise.get_future();
return m_backend->post_async_write_event(
shared_from_this(), p, size_in_bytes, buffer_number);
}
else
{
throw runtime_error("Async operations not supported for this backend");
}
// using namespace std::placeholders;
// auto f = m_promise.get_future();
// auto bound_f = bind(&Tensor::write, this, _1, _2, _3);
// async(bound_f, p, 0, n);
// return f;
}
future<void> runtime::Tensor::begin_read(void* p, size_t size_in_bytes, size_t buffer_number)
{
if (m_backend)
{
// auto f = m_promise.get_future();
return m_backend->post_async_read_event(
shared_from_this(), p, size_in_bytes, buffer_number);
}
else
{
throw runtime_error("Async operations not supported for this backend");
}
// using namespace std::placeholders;
// auto f = m_promise.get_future();
// auto bound_f = bind(&Tensor::read, this, _1, _2, _3);
// async(bound_f, p, 0, n);
// return f;
}
...@@ -16,7 +16,6 @@ ...@@ -16,7 +16,6 @@
#pragma once #pragma once
#include <future>
#include <memory> #include <memory>
#include <vector> #include <vector>
...@@ -36,10 +35,8 @@ namespace ngraph ...@@ -36,10 +35,8 @@ namespace ngraph
namespace runtime namespace runtime
{ {
class Tensor : public std::enable_shared_from_this<Tensor> class Tensor
{ {
friend class Executable;
protected: protected:
Tensor(const std::shared_ptr<ngraph::descriptor::Tensor>& descriptor) Tensor(const std::shared_ptr<ngraph::descriptor::Tensor>& descriptor)
: m_descriptor(descriptor) : m_descriptor(descriptor)
...@@ -47,14 +44,6 @@ namespace ngraph ...@@ -47,14 +44,6 @@ namespace ngraph
{ {
} }
Tensor(const std::shared_ptr<ngraph::runtime::Backend>& backend,
const std::shared_ptr<ngraph::descriptor::Tensor>& descriptor)
: m_descriptor(descriptor)
, m_stale(true)
, m_backend{backend}
{
}
public: public:
virtual ~Tensor() {} virtual ~Tensor() {}
Tensor& operator=(const Tensor&) = default; Tensor& operator=(const Tensor&) = default;
...@@ -114,24 +103,6 @@ namespace ngraph ...@@ -114,24 +103,6 @@ namespace ngraph
/// \param n Number of bytes to read, must be integral number of elements. /// \param n Number of bytes to read, must be integral number of elements.
virtual void read(void* p, size_t n) const = 0; virtual void read(void* p, size_t n) const = 0;
/// \brief Write bytes into the tensor. The data buffer pointed to by `p` must
/// be kept live until after the future is signaled complete
/// \param p Pointer to source of data
/// \param size_in_bytes Number of bytes to write, must be integral number of elements.
/// \param buffer_number For double-buffering, which buffer to write.
/// \return std::future to track the operation
virtual std::future<void>
begin_write(const void* p, size_t size_in_bytes, size_t buffer_number);
/// \brief Read bytes from the tensor. The data buffer pointed to by `p` must
/// be kept live until after the future is signaled complete
/// \param p Pointer to destination for data
/// \param size_in_bytes Number of bytes to read, must be integral number of elements.
/// \param buffer_number For double-buffering, which buffer to read.
/// \return std::future to track the operation
virtual std::future<void>
begin_read(void* p, size_t size_in_bytes, size_t buffer_number);
/// \brief copy bytes directly from source to this tensor /// \brief copy bytes directly from source to this tensor
/// \param source The source tensor /// \param source The source tensor
virtual void copy_from(const ngraph::runtime::Tensor& source); virtual void copy_from(const ngraph::runtime::Tensor& source);
...@@ -161,8 +132,8 @@ namespace ngraph ...@@ -161,8 +132,8 @@ namespace ngraph
protected: protected:
std::shared_ptr<ngraph::descriptor::Tensor> m_descriptor; std::shared_ptr<ngraph::descriptor::Tensor> m_descriptor;
bool m_stale; bool m_stale;
std::promise<void> m_promise;
std::shared_ptr<ngraph::runtime::Backend> m_backend;
}; };
using TensorViewPtrs = std::vector<std::shared_ptr<Tensor>>;
} }
} }
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment