diff --git a/src/ngraph/runtime/backend.cpp b/src/ngraph/runtime/backend.cpp index 89c795ed4c29b74cf12c7a8f430ab769df50dd54..f1cbcaf1b0588509b2fa425177a84dbfc100d870 100644 --- a/src/ngraph/runtime/backend.cpp +++ b/src/ngraph/runtime/backend.cpp @@ -91,8 +91,12 @@ std::shared_ptr<runtime::Executable> runtime::Backend::load(istream& input_strea throw runtime_error("load opertion unimplemented."); } -runtime::Backend::AsyncEvent::AsyncEvent(Type type, void* p, size_t size_in_bytes) +runtime::Backend::AsyncEvent::AsyncEvent(Type type, + size_t buffer_number, + void* p, + size_t size_in_bytes) : m_type{type} + , m_buffer_number{buffer_number} , m_data{p} , m_size_in_bytes{size_in_bytes} , m_executable{nullptr} @@ -101,10 +105,12 @@ runtime::Backend::AsyncEvent::AsyncEvent(Type type, void* p, size_t size_in_byte { } -runtime::Backend::AsyncEvent::AsyncEvent(const shared_ptr<Executable>& executable, +runtime::Backend::AsyncEvent::AsyncEvent(size_t buffer_number, + const shared_ptr<Executable>& executable, const vector<shared_ptr<runtime::Tensor>>& outputs, const vector<shared_ptr<runtime::Tensor>>& inputs) : m_type{Type::EXECUTE} + , m_buffer_number{buffer_number} , m_data{nullptr} , m_size_in_bytes{0} , m_executable{executable} @@ -115,12 +121,14 @@ runtime::Backend::AsyncEvent::AsyncEvent(const shared_ptr<Executable>& executabl void runtime::Backend::post_async_read_event(void* p, size_t size_in_bytes, + size_t buffer_number, std::promise<void>& promise) { } void runtime::Backend::post_async_write_event(const void* p, size_t size_in_bytes, + size_t buffer_number, std::promise<void>& promise) { } @@ -129,6 +137,7 @@ void runtime::Backend::post_async_execute_event( const std::shared_ptr<Executable>& executable, const std::vector<std::shared_ptr<runtime::Tensor>>& outputs, const std::vector<std::shared_ptr<runtime::Tensor>>& inputs, + size_t buffer_number, std::promise<void>& promise) { } diff --git a/src/ngraph/runtime/backend.hpp b/src/ngraph/runtime/backend.hpp index 493f427e318efd2e022c4b91f26d856fb17f2bba..cf5edc8dfa6bbe6e93a1497b43ce54790fec2f4f 100644 --- a/src/ngraph/runtime/backend.hpp +++ b/src/ngraph/runtime/backend.hpp @@ -163,11 +163,13 @@ protected: } const std::vector<std::shared_ptr<runtime::Tensor>>* get_inputs() const { return m_inputs; } private: - AsyncEvent(Type, void* p, size_t size_in_bytes); - AsyncEvent(const std::shared_ptr<Executable>& m_executable, + AsyncEvent(Type type, size_t buffer_number, void* p, size_t size_in_bytes); + AsyncEvent(size_t buffer_number, + const std::shared_ptr<Executable>& m_executable, const std::vector<std::shared_ptr<runtime::Tensor>>& m_outputs, const std::vector<std::shared_ptr<runtime::Tensor>>& m_inputs); const Type m_type; + size_t m_buffer_number; void* m_data; const size_t m_size_in_bytes; std::shared_ptr<Executable> m_executable; @@ -175,10 +177,17 @@ protected: const std::vector<std::shared_ptr<runtime::Tensor>>* m_inputs; }; - void post_async_read_event(void* p, size_t size_in_bytes, std::promise<void>& promise); - void post_async_write_event(const void* p, size_t size_in_bytes, std::promise<void>& promise); + void post_async_read_event(void* p, + size_t size_in_bytes, + size_t buffer_number, + std::promise<void>& promise); + void post_async_write_event(const void* p, + size_t size_in_bytes, + size_t buffer_number, + std::promise<void>& promise); void post_async_execute_event(const std::shared_ptr<Executable>& executable, const std::vector<std::shared_ptr<runtime::Tensor>>& outputs, const std::vector<std::shared_ptr<runtime::Tensor>>& inputs, + size_t buffer_number, std::promise<void>& promise); }; diff --git a/src/ngraph/runtime/tensor.cpp b/src/ngraph/runtime/tensor.cpp index bedb7edad94a78a414e209efec457b41ecf2be47..143cf8b5f4ad6a6287eed39583bca463b99eb5fc 100644 --- a/src/ngraph/runtime/tensor.cpp +++ b/src/ngraph/runtime/tensor.cpp @@ -98,12 +98,12 @@ void runtime::Tensor::copy_from(const ngraph::runtime::Tensor& source) write(buffer.get_ptr(), 0, size); } -future<void> runtime::Tensor::begin_write(const void* p, size_t n) +future<void> runtime::Tensor::begin_write(const void* p, size_t size_in_bytes, size_t buffer_number) { if (m_backend) { auto f = m_promise.get_future(); - m_backend->post_async_write_event(p, n, m_promise); + m_backend->post_async_write_event(p, size_in_bytes, buffer_number, m_promise); return f; } else @@ -117,11 +117,21 @@ future<void> runtime::Tensor::begin_write(const void* p, size_t n) // return f; } -future<void> runtime::Tensor::begin_read(void* p, size_t n) +future<void> runtime::Tensor::begin_read(void* p, size_t size_in_bytes, size_t buffer_number) { - using namespace std::placeholders; - auto f = m_promise.get_future(); - auto bound_f = bind(&Tensor::read, this, _1, _2, _3); - async(bound_f, p, 0, n); - return f; + if (m_backend) + { + auto f = m_promise.get_future(); + m_backend->post_async_read_event(p, size_in_bytes, buffer_number, m_promise); + return f; + } + else + { + throw runtime_error("Async operations not supported for this backend"); + } + // using namespace std::placeholders; + // auto f = m_promise.get_future(); + // auto bound_f = bind(&Tensor::read, this, _1, _2, _3); + // async(bound_f, p, 0, n); + // return f; } diff --git a/src/ngraph/runtime/tensor.hpp b/src/ngraph/runtime/tensor.hpp index 103f6265d2890a8dae37daa7edbc33dd232f13f6..0c8af07e0c319b97a831c71ada62c12f093a3af7 100644 --- a/src/ngraph/runtime/tensor.hpp +++ b/src/ngraph/runtime/tensor.hpp @@ -119,16 +119,20 @@ namespace ngraph /// \brief Write bytes into the tensor. The data buffer pointed to by `p` must /// be kept live until after the future is signaled complete /// \param p Pointer to source of data - /// \param n Number of bytes to write, must be integral number of elements. + /// \param size_in_bytes Number of bytes to write, must be integral number of elements. + /// \param buffer_number For double-buffering, which buffer to write. /// \return std::future to track the operation - virtual std::future<void> begin_write(const void* p, size_t n); + virtual std::future<void> + begin_write(const void* p, size_t size_in_bytes, size_t buffer_number); /// \brief Read bytes from the tensor. The data buffer pointed to by `p` must /// be kept live until after the future is signaled complete /// \param p Pointer to destination for data - /// \param n Number of bytes to read, must be integral number of elements. + /// \param size_in_bytes Number of bytes to read, must be integral number of elements. + /// \param buffer_number For double-buffering, which buffer to read. /// \return std::future to track the operation - virtual std::future<void> begin_read(void* p, size_t n); + virtual std::future<void> + begin_read(void* p, size_t size_in_bytes, size_t buffer_number); /// \brief copy bytes directly from source to this tensor /// \param source The source tensor diff --git a/test/async.cpp b/test/async.cpp index 9824425057e90ce556e4906a5124b81dc06ffa2e..8256e8be1939ac4ddca3d6b252ca67b4571bbbb7 100644 --- a/test/async.cpp +++ b/test/async.cpp @@ -70,9 +70,9 @@ TEST(async, tensor_read_write) shared_ptr<runtime::Tensor> b = backend->create_tensor(element::f32, shape); shared_ptr<runtime::Tensor> r = backend->create_tensor(element::f32, shape); - auto future_a = a->begin_write(data.data(), data.size() * sizeof(float)); - auto future_b = b->begin_write(data.data(), data.size() * sizeof(float)); - auto future_r = r->begin_read(data_r.data(), data_r.size() * sizeof(float)); + auto future_a = a->begin_write(data.data(), data.size() * sizeof(float), 0); + auto future_b = b->begin_write(data.data(), data.size() * sizeof(float), 0); + auto future_r = r->begin_read(data_r.data(), data_r.size() * sizeof(float), 0); ASSERT_TRUE(future_a.valid()); ASSERT_TRUE(future_b.valid()); ASSERT_TRUE(future_r.valid());