Commit 47a7e128 authored by Robert Kimball's avatar Robert Kimball

wip

parent af73a4b2
...@@ -90,3 +90,11 @@ std::shared_ptr<runtime::Executable> runtime::Backend::load(istream& input_strea ...@@ -90,3 +90,11 @@ std::shared_ptr<runtime::Executable> runtime::Backend::load(istream& input_strea
{ {
throw runtime_error("load opertion unimplemented."); throw runtime_error("load opertion unimplemented.");
} }
void runtime::Backend::post_write(const void* p, size_t size_in_bytes, std::promise<void>& promise)
{
}
void runtime::Backend::post_read(void* p, size_t size_in_bytes, std::promise<void>& promise)
{
}
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#pragma once #pragma once
#include <future>
#include <memory> #include <memory>
#include "ngraph/function.hpp" #include "ngraph/function.hpp"
...@@ -139,4 +140,28 @@ public: ...@@ -139,4 +140,28 @@ public:
// \param op_name is the name of the backend specific op // \param op_name is the name of the backend specific op
// \returns a shared pointer to the op if found, else nullptr // \returns a shared pointer to the op if found, else nullptr
virtual std::shared_ptr<ngraph::Node> get_backend_op(const std::string& op_name, ...); virtual std::shared_ptr<ngraph::Node> get_backend_op(const std::string& op_name, ...);
protected:
friend class ngraph::runtime::Tensor;
void post_write(const void* p, size_t size_in_bytes, std::promise<void>& promise);
void post_read(void* p, size_t size_in_bytes, std::promise<void>& promise);
class ReadWriteInfo
{
public:
ReadWriteInfo(void* p, size_t size, bool is_read)
: m_data{p}
, m_size_in_bytes{size}
, m_is_read{is_read}
{
}
bool is_read() const { return m_is_read; }
bool is_write() const { return !is_read(); }
void* get_ptr() const { return m_data; }
bool get_size_in_bytes() const { return m_size_in_bytes; }
private:
void* m_data;
size_t m_size_in_bytes;
bool m_is_read;
};
}; };
...@@ -93,25 +93,6 @@ protected: ...@@ -93,25 +93,6 @@ protected:
bool begin_execute_helper(const std::vector<std::shared_ptr<runtime::Tensor>>& outputs, bool begin_execute_helper(const std::vector<std::shared_ptr<runtime::Tensor>>& outputs,
const std::vector<std::shared_ptr<runtime::Tensor>>& inputs); const std::vector<std::shared_ptr<runtime::Tensor>>& inputs);
class ReadWriteInfo
{
public:
ReadWriteInfo(void* p, size_t size, bool is_read)
: m_data{p}
, m_size_in_bytes{size}
, m_is_read{is_read}
{
}
bool is_read() const { return m_is_read; }
bool is_write() const { return !is_read(); }
void* get_ptr() const { return m_data; }
bool get_size_in_bytes() const { return m_size_in_bytes; }
private:
void* m_data;
size_t m_size_in_bytes;
bool m_is_read;
};
private: private:
ngraph::ParameterVector m_parameters; ngraph::ParameterVector m_parameters;
ngraph::ResultVector m_results; ngraph::ResultVector m_results;
......
...@@ -30,10 +30,53 @@ runtime::HostTensor::HostTensor(const ngraph::element::Type& element_type, ...@@ -30,10 +30,53 @@ runtime::HostTensor::HostTensor(const ngraph::element::Type& element_type,
const Shape& shape, const Shape& shape,
void* memory_pointer, void* memory_pointer,
const string& name) const string& name)
: runtime::Tensor(std::make_shared<ngraph::descriptor::Tensor>(element_type, shape, name)) : HostTensor(nullptr, element_type, shape, memory_pointer, name)
{
}
runtime::HostTensor::HostTensor(const ngraph::element::Type& element_type,
const Shape& shape,
const string& name)
: HostTensor(nullptr, element_type, shape, nullptr, name)
{
}
runtime::HostTensor::HostTensor(const ngraph::element::Type& element_type, const Shape& shape)
: HostTensor(nullptr, element_type, shape, nullptr, "")
{
}
runtime::HostTensor::HostTensor(const ngraph::element::Type& element_type,
const Shape& shape,
void* memory_pointer)
: HostTensor(nullptr, element_type, shape, memory_pointer, "")
{
}
runtime::HostTensor::HostTensor(const std::shared_ptr<ngraph::runtime::Backend>& backend,
const ngraph::element::Type& element_type,
const Shape& shape)
: HostTensor(backend, element_type, shape, nullptr, "")
{
}
runtime::HostTensor::HostTensor(const std::shared_ptr<ngraph::runtime::Backend>& backend,
const ngraph::element::Type& element_type,
const Shape& shape,
void* memory_pointer)
: HostTensor(backend, element_type, shape, memory_pointer, "")
{
}
runtime::HostTensor::HostTensor(const std::shared_ptr<ngraph::runtime::Backend>& backend,
const ngraph::element::Type& element_type,
const Shape& shape,
void* memory_pointer,
const std::string& name)
: runtime::Tensor(backend,
std::make_shared<ngraph::descriptor::Tensor>(element_type, shape, name))
, m_allocated_buffer_pool(nullptr) , m_allocated_buffer_pool(nullptr)
, m_aligned_buffer_pool(nullptr) , m_aligned_buffer_pool(nullptr)
{ {
m_descriptor->set_tensor_layout( m_descriptor->set_tensor_layout(
std::make_shared<ngraph::descriptor::layout::DenseTensorLayout>(*m_descriptor)); std::make_shared<ngraph::descriptor::layout::DenseTensorLayout>(*m_descriptor));
...@@ -57,25 +100,6 @@ runtime::HostTensor::HostTensor(const ngraph::element::Type& element_type, ...@@ -57,25 +100,6 @@ runtime::HostTensor::HostTensor(const ngraph::element::Type& element_type,
} }
} }
runtime::HostTensor::HostTensor(const ngraph::element::Type& element_type,
const Shape& shape,
const string& name)
: HostTensor(element_type, shape, nullptr, name)
{
}
runtime::HostTensor::HostTensor(const ngraph::element::Type& element_type, const Shape& shape)
: HostTensor(element_type, shape, nullptr, "")
{
}
runtime::HostTensor::HostTensor(const ngraph::element::Type& element_type,
const Shape& shape,
void* memory_pointer)
: HostTensor(element_type, shape, memory_pointer, "")
{
}
runtime::HostTensor::~HostTensor() runtime::HostTensor::~HostTensor()
{ {
if (m_allocated_buffer_pool != nullptr) if (m_allocated_buffer_pool != nullptr)
......
...@@ -42,6 +42,18 @@ public: ...@@ -42,6 +42,18 @@ public:
const std::string& name); const std::string& name);
HostTensor(const ngraph::element::Type& element_type, const Shape& shape); HostTensor(const ngraph::element::Type& element_type, const Shape& shape);
HostTensor(const ngraph::element::Type& element_type, const Shape& shape, void* memory_pointer); HostTensor(const ngraph::element::Type& element_type, const Shape& shape, void* memory_pointer);
HostTensor(const std::shared_ptr<ngraph::runtime::Backend>& backend,
const ngraph::element::Type& element_type,
const Shape& shape);
HostTensor(const std::shared_ptr<ngraph::runtime::Backend>& backend,
const ngraph::element::Type& element_type,
const Shape& shape,
void* memory_pointer);
HostTensor(const std::shared_ptr<ngraph::runtime::Backend>& backend,
const ngraph::element::Type& element_type,
const Shape& shape,
void* memory_pointer,
const std::string& name);
virtual ~HostTensor() override; virtual ~HostTensor() override;
char* get_data_ptr(); char* get_data_ptr();
......
...@@ -54,13 +54,13 @@ runtime::interpreter::INTBackend::INTBackend(const vector<string>& unsupported_o ...@@ -54,13 +54,13 @@ runtime::interpreter::INTBackend::INTBackend(const vector<string>& unsupported_o
shared_ptr<runtime::Tensor> shared_ptr<runtime::Tensor>
runtime::interpreter::INTBackend::create_tensor(const element::Type& type, const Shape& shape) runtime::interpreter::INTBackend::create_tensor(const element::Type& type, const Shape& shape)
{ {
return make_shared<runtime::HostTensor>(type, shape); return make_shared<runtime::HostTensor>(shared_from_this(), type, shape);
} }
shared_ptr<runtime::Tensor> runtime::interpreter::INTBackend::create_tensor( shared_ptr<runtime::Tensor> runtime::interpreter::INTBackend::create_tensor(
const element::Type& type, const Shape& shape, void* memory_pointer) const element::Type& type, const Shape& shape, void* memory_pointer)
{ {
return make_shared<runtime::HostTensor>(type, shape, memory_pointer); return make_shared<runtime::HostTensor>(shared_from_this(), type, shape, memory_pointer);
} }
shared_ptr<runtime::Executable> shared_ptr<runtime::Executable>
......
...@@ -38,7 +38,8 @@ namespace ngraph ...@@ -38,7 +38,8 @@ namespace ngraph
} }
} }
class ngraph::runtime::interpreter::INTBackend : public Backend class ngraph::runtime::interpreter::INTBackend : public Backend,
public std::enable_shared_from_this<INTBackend>
{ {
public: public:
INTBackend(); INTBackend();
......
...@@ -100,11 +100,21 @@ void runtime::Tensor::copy_from(const ngraph::runtime::Tensor& source) ...@@ -100,11 +100,21 @@ void runtime::Tensor::copy_from(const ngraph::runtime::Tensor& source)
future<void> runtime::Tensor::begin_write(const void* p, size_t n) future<void> runtime::Tensor::begin_write(const void* p, size_t n)
{ {
using namespace std::placeholders; if (m_backend)
{
auto f = m_promise.get_future(); auto f = m_promise.get_future();
auto bound_f = bind(&Tensor::write, this, _1, _2, _3); m_backend->post_write(p, n, m_promise);
async(bound_f, p, 0, n);
return f; return f;
}
else
{
throw runtime_error("Async operations not supported for this backend");
}
// using namespace std::placeholders;
// auto f = m_promise.get_future();
// auto bound_f = bind(&Tensor::write, this, _1, _2, _3);
// async(bound_f, p, 0, n);
// return f;
} }
future<void> runtime::Tensor::begin_read(void* p, size_t n) future<void> runtime::Tensor::begin_read(void* p, size_t n)
......
...@@ -47,6 +47,14 @@ namespace ngraph ...@@ -47,6 +47,14 @@ namespace ngraph
{ {
} }
Tensor(const std::shared_ptr<ngraph::runtime::Backend>& backend,
const std::shared_ptr<ngraph::descriptor::Tensor>& descriptor)
: m_descriptor(descriptor)
, m_stale(true)
, m_backend{backend}
{
}
public: public:
virtual ~Tensor() {} virtual ~Tensor() {}
Tensor& operator=(const Tensor&) = default; Tensor& operator=(const Tensor&) = default;
...@@ -130,6 +138,7 @@ namespace ngraph ...@@ -130,6 +138,7 @@ namespace ngraph
std::shared_ptr<ngraph::descriptor::Tensor> m_descriptor; std::shared_ptr<ngraph::descriptor::Tensor> m_descriptor;
bool m_stale; bool m_stale;
std::promise<void> m_promise; std::promise<void> m_promise;
std::shared_ptr<ngraph::runtime::Backend> m_backend;
}; };
} }
} }
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment