Commit 47a7e128 authored by Robert Kimball's avatar Robert Kimball

wip

parent af73a4b2
......@@ -90,3 +90,11 @@ std::shared_ptr<runtime::Executable> runtime::Backend::load(istream& input_strea
{
throw runtime_error("load opertion unimplemented.");
}
void runtime::Backend::post_write(const void* p, size_t size_in_bytes, std::promise<void>& promise)
{
}
void runtime::Backend::post_read(void* p, size_t size_in_bytes, std::promise<void>& promise)
{
}
......@@ -16,6 +16,7 @@
#pragma once
#include <future>
#include <memory>
#include "ngraph/function.hpp"
......@@ -139,4 +140,28 @@ public:
// \param op_name is the name of the backend specific op
// \returns a shared pointer to the op if found, else nullptr
virtual std::shared_ptr<ngraph::Node> get_backend_op(const std::string& op_name, ...);
protected:
friend class ngraph::runtime::Tensor;
void post_write(const void* p, size_t size_in_bytes, std::promise<void>& promise);
void post_read(void* p, size_t size_in_bytes, std::promise<void>& promise);
class ReadWriteInfo
{
public:
ReadWriteInfo(void* p, size_t size, bool is_read)
: m_data{p}
, m_size_in_bytes{size}
, m_is_read{is_read}
{
}
bool is_read() const { return m_is_read; }
bool is_write() const { return !is_read(); }
void* get_ptr() const { return m_data; }
bool get_size_in_bytes() const { return m_size_in_bytes; }
private:
void* m_data;
size_t m_size_in_bytes;
bool m_is_read;
};
};
......@@ -93,25 +93,6 @@ protected:
bool begin_execute_helper(const std::vector<std::shared_ptr<runtime::Tensor>>& outputs,
const std::vector<std::shared_ptr<runtime::Tensor>>& inputs);
class ReadWriteInfo
{
public:
ReadWriteInfo(void* p, size_t size, bool is_read)
: m_data{p}
, m_size_in_bytes{size}
, m_is_read{is_read}
{
}
bool is_read() const { return m_is_read; }
bool is_write() const { return !is_read(); }
void* get_ptr() const { return m_data; }
bool get_size_in_bytes() const { return m_size_in_bytes; }
private:
void* m_data;
size_t m_size_in_bytes;
bool m_is_read;
};
private:
ngraph::ParameterVector m_parameters;
ngraph::ResultVector m_results;
......
......@@ -30,10 +30,53 @@ runtime::HostTensor::HostTensor(const ngraph::element::Type& element_type,
const Shape& shape,
void* memory_pointer,
const string& name)
: runtime::Tensor(std::make_shared<ngraph::descriptor::Tensor>(element_type, shape, name))
: HostTensor(nullptr, element_type, shape, memory_pointer, name)
{
}
runtime::HostTensor::HostTensor(const ngraph::element::Type& element_type,
const Shape& shape,
const string& name)
: HostTensor(nullptr, element_type, shape, nullptr, name)
{
}
runtime::HostTensor::HostTensor(const ngraph::element::Type& element_type, const Shape& shape)
: HostTensor(nullptr, element_type, shape, nullptr, "")
{
}
runtime::HostTensor::HostTensor(const ngraph::element::Type& element_type,
const Shape& shape,
void* memory_pointer)
: HostTensor(nullptr, element_type, shape, memory_pointer, "")
{
}
runtime::HostTensor::HostTensor(const std::shared_ptr<ngraph::runtime::Backend>& backend,
const ngraph::element::Type& element_type,
const Shape& shape)
: HostTensor(backend, element_type, shape, nullptr, "")
{
}
runtime::HostTensor::HostTensor(const std::shared_ptr<ngraph::runtime::Backend>& backend,
const ngraph::element::Type& element_type,
const Shape& shape,
void* memory_pointer)
: HostTensor(backend, element_type, shape, memory_pointer, "")
{
}
runtime::HostTensor::HostTensor(const std::shared_ptr<ngraph::runtime::Backend>& backend,
const ngraph::element::Type& element_type,
const Shape& shape,
void* memory_pointer,
const std::string& name)
: runtime::Tensor(backend,
std::make_shared<ngraph::descriptor::Tensor>(element_type, shape, name))
, m_allocated_buffer_pool(nullptr)
, m_aligned_buffer_pool(nullptr)
{
m_descriptor->set_tensor_layout(
std::make_shared<ngraph::descriptor::layout::DenseTensorLayout>(*m_descriptor));
......@@ -57,25 +100,6 @@ runtime::HostTensor::HostTensor(const ngraph::element::Type& element_type,
}
}
runtime::HostTensor::HostTensor(const ngraph::element::Type& element_type,
const Shape& shape,
const string& name)
: HostTensor(element_type, shape, nullptr, name)
{
}
runtime::HostTensor::HostTensor(const ngraph::element::Type& element_type, const Shape& shape)
: HostTensor(element_type, shape, nullptr, "")
{
}
runtime::HostTensor::HostTensor(const ngraph::element::Type& element_type,
const Shape& shape,
void* memory_pointer)
: HostTensor(element_type, shape, memory_pointer, "")
{
}
runtime::HostTensor::~HostTensor()
{
if (m_allocated_buffer_pool != nullptr)
......
......@@ -42,6 +42,18 @@ public:
const std::string& name);
HostTensor(const ngraph::element::Type& element_type, const Shape& shape);
HostTensor(const ngraph::element::Type& element_type, const Shape& shape, void* memory_pointer);
HostTensor(const std::shared_ptr<ngraph::runtime::Backend>& backend,
const ngraph::element::Type& element_type,
const Shape& shape);
HostTensor(const std::shared_ptr<ngraph::runtime::Backend>& backend,
const ngraph::element::Type& element_type,
const Shape& shape,
void* memory_pointer);
HostTensor(const std::shared_ptr<ngraph::runtime::Backend>& backend,
const ngraph::element::Type& element_type,
const Shape& shape,
void* memory_pointer,
const std::string& name);
virtual ~HostTensor() override;
char* get_data_ptr();
......
......@@ -54,13 +54,13 @@ runtime::interpreter::INTBackend::INTBackend(const vector<string>& unsupported_o
shared_ptr<runtime::Tensor>
runtime::interpreter::INTBackend::create_tensor(const element::Type& type, const Shape& shape)
{
return make_shared<runtime::HostTensor>(type, shape);
return make_shared<runtime::HostTensor>(shared_from_this(), type, shape);
}
shared_ptr<runtime::Tensor> runtime::interpreter::INTBackend::create_tensor(
const element::Type& type, const Shape& shape, void* memory_pointer)
{
return make_shared<runtime::HostTensor>(type, shape, memory_pointer);
return make_shared<runtime::HostTensor>(shared_from_this(), type, shape, memory_pointer);
}
shared_ptr<runtime::Executable>
......
......@@ -38,7 +38,8 @@ namespace ngraph
}
}
class ngraph::runtime::interpreter::INTBackend : public Backend
class ngraph::runtime::interpreter::INTBackend : public Backend,
public std::enable_shared_from_this<INTBackend>
{
public:
INTBackend();
......
......@@ -100,11 +100,21 @@ void runtime::Tensor::copy_from(const ngraph::runtime::Tensor& source)
future<void> runtime::Tensor::begin_write(const void* p, size_t n)
{
using namespace std::placeholders;
auto f = m_promise.get_future();
auto bound_f = bind(&Tensor::write, this, _1, _2, _3);
async(bound_f, p, 0, n);
return f;
if (m_backend)
{
auto f = m_promise.get_future();
m_backend->post_write(p, n, m_promise);
return f;
}
else
{
throw runtime_error("Async operations not supported for this backend");
}
// using namespace std::placeholders;
// auto f = m_promise.get_future();
// auto bound_f = bind(&Tensor::write, this, _1, _2, _3);
// async(bound_f, p, 0, n);
// return f;
}
future<void> runtime::Tensor::begin_read(void* p, size_t n)
......
......@@ -47,6 +47,14 @@ namespace ngraph
{
}
Tensor(const std::shared_ptr<ngraph::runtime::Backend>& backend,
const std::shared_ptr<ngraph::descriptor::Tensor>& descriptor)
: m_descriptor(descriptor)
, m_stale(true)
, m_backend{backend}
{
}
public:
virtual ~Tensor() {}
Tensor& operator=(const Tensor&) = default;
......@@ -130,6 +138,7 @@ namespace ngraph
std::shared_ptr<ngraph::descriptor::Tensor> m_descriptor;
bool m_stale;
std::promise<void> m_promise;
std::shared_ptr<ngraph::runtime::Backend> m_backend;
};
}
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment