Commit 63055ecd authored by Robert Kimball's avatar Robert Kimball

unit test

parent e485f1c0
...@@ -123,3 +123,18 @@ void runtime::Executable::save(std::ostream& output_stream) ...@@ -123,3 +123,18 @@ void runtime::Executable::save(std::ostream& output_stream)
{ {
throw runtime_error("save opertion unimplemented."); throw runtime_error("save opertion unimplemented.");
} }
bool runtime::Executable::begin_execute_helper(const vector<shared_ptr<runtime::Tensor>>& outputs,
const vector<shared_ptr<runtime::Tensor>>& inputs)
{
bool rc = call(outputs, inputs);
return rc;
}
future<bool> runtime::Executable::begin_execute(const vector<shared_ptr<runtime::Tensor>>& outputs,
const vector<shared_ptr<runtime::Tensor>>& inputs)
{
using namespace std::placeholders;
auto bound_f = bind(&Executable::begin_execute_helper, this, _1, _2);
return async(bound_f, outputs, inputs);
}
...@@ -58,7 +58,7 @@ public: ...@@ -58,7 +58,7 @@ public:
/// \param inputs vector of runtime::Tensor used as inputs /// \param inputs vector of runtime::Tensor used as inputs
/// \returns a valid std::future to monitor the execution. Use future.get() to get the results /// \returns a valid std::future to monitor the execution. Use future.get() to get the results
/// or future.wait*() to wait for completion. /// or future.wait*() to wait for completion.
virtual std::future<bool>& virtual std::future<bool>
begin_execute(const std::vector<std::shared_ptr<runtime::Tensor>>& outputs, begin_execute(const std::vector<std::shared_ptr<runtime::Tensor>>& outputs,
const std::vector<std::shared_ptr<runtime::Tensor>>& inputs); const std::vector<std::shared_ptr<runtime::Tensor>>& inputs);
...@@ -90,8 +90,10 @@ protected: ...@@ -90,8 +90,10 @@ protected:
/// \param func The function with Results fully resolved. /// \param func The function with Results fully resolved.
void set_parameters_and_results(const Function& func); void set_parameters_and_results(const Function& func);
bool begin_execute_helper(const std::vector<std::shared_ptr<runtime::Tensor>>& outputs,
const std::vector<std::shared_ptr<runtime::Tensor>>& inputs);
private: private:
ngraph::ParameterVector m_parameters; ngraph::ParameterVector m_parameters;
ngraph::ResultVector m_results; ngraph::ResultVector m_results;
std::future<bool> m_future;
}; };
...@@ -14,10 +14,12 @@ ...@@ -14,10 +14,12 @@
// limitations under the License. // limitations under the License.
//***************************************************************************** //*****************************************************************************
#include "ngraph/runtime/tensor.hpp" #include <functional>
#include "ngraph/descriptor/layout/tensor_layout.hpp" #include "ngraph/descriptor/layout/tensor_layout.hpp"
#include "ngraph/log.hpp" #include "ngraph/log.hpp"
#include "ngraph/runtime/aligned_buffer.hpp" #include "ngraph/runtime/aligned_buffer.hpp"
#include "ngraph/runtime/tensor.hpp"
#include "ngraph/type/element_type.hpp" #include "ngraph/type/element_type.hpp"
using namespace ngraph; using namespace ngraph;
...@@ -95,3 +97,21 @@ void runtime::Tensor::copy_from(const ngraph::runtime::Tensor& source) ...@@ -95,3 +97,21 @@ void runtime::Tensor::copy_from(const ngraph::runtime::Tensor& source)
source.read(buffer.get_ptr(), 0, size); source.read(buffer.get_ptr(), 0, size);
write(buffer.get_ptr(), 0, size); write(buffer.get_ptr(), 0, size);
} }
future<void> runtime::Tensor::begin_write(const void* p, size_t n)
{
using namespace std::placeholders;
auto f = m_promise.get_future();
auto bound_f = bind(&Tensor::write, this, _1, _2, _3);
async(bound_f, p, 0, n);
return f;
}
future<void> runtime::Tensor::begin_read(void* p, size_t n)
{
using namespace std::placeholders;
auto f = m_promise.get_future();
auto bound_f = bind(&Tensor::read, this, _1, _2, _3);
async(bound_f, p, 0, n);
return f;
}
...@@ -111,14 +111,14 @@ namespace ngraph ...@@ -111,14 +111,14 @@ namespace ngraph
/// \param p Pointer to source of data /// \param p Pointer to source of data
/// \param n Number of bytes to write, must be integral number of elements. /// \param n Number of bytes to write, must be integral number of elements.
/// \return std::future to track the operation /// \return std::future to track the operation
virtual std::future<bool> begin_write(const void* p, size_t n); virtual std::future<void> begin_write(const void* p, size_t n);
/// \brief Read bytes from the tensor. The data buffer pointed to by `p` must /// \brief Read bytes from the tensor. The data buffer pointed to by `p` must
/// be kept live until after the future is signaled complete /// be kept live until after the future is signaled complete
/// \param p Pointer to destination for data /// \param p Pointer to destination for data
/// \param n Number of bytes to read, must be integral number of elements. /// \param n Number of bytes to read, must be integral number of elements.
/// \return std::future to track the operation /// \return std::future to track the operation
virtual std::future<bool> begin_read(void* p, size_t n) const; virtual std::future<void> begin_read(void* p, size_t n);
/// \brief copy bytes directly from source to this tensor /// \brief copy bytes directly from source to this tensor
/// \param source The source tensor /// \param source The source tensor
...@@ -127,6 +127,7 @@ namespace ngraph ...@@ -127,6 +127,7 @@ namespace ngraph
protected: protected:
std::shared_ptr<ngraph::descriptor::Tensor> m_descriptor; std::shared_ptr<ngraph::descriptor::Tensor> m_descriptor;
bool m_stale; bool m_stale;
std::promise<void> m_promise;
}; };
using TensorViewPtrs = std::vector<std::shared_ptr<Tensor>>; using TensorViewPtrs = std::vector<std::shared_ptr<Tensor>>;
......
...@@ -31,6 +31,7 @@ set(SRC ...@@ -31,6 +31,7 @@ set(SRC
aligned_buffer.cpp aligned_buffer.cpp
all_close_f.cpp all_close_f.cpp
assertion.cpp assertion.cpp
async.cpp
bfloat16.cpp bfloat16.cpp
build_graph.cpp build_graph.cpp
builder_autobroadcast.cpp builder_autobroadcast.cpp
......
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include <gtest/gtest.h>
#include "ngraph/op/add.hpp"
#include "ngraph/runtime/backend.hpp"
#include "ngraph/util.hpp"
#include "util/test_tools.hpp"
using namespace ngraph;
using namespace std;
TEST(async, execute)
{
Shape shape{100000};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto B = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::Add>(A, B), ParameterVector{A, B});
auto backend = runtime::Backend::create("INTERPRETER");
vector<float> data(shape_size(shape), 2);
vector<float> result_data(shape_size(shape), 0);
// Create some tensors for input/output
shared_ptr<runtime::Tensor> a = backend->create_tensor(element::f32, shape, data.data());
shared_ptr<runtime::Tensor> b = backend->create_tensor(element::f32, shape, data.data());
shared_ptr<runtime::Tensor> r = backend->create_tensor(element::f32, shape, result_data.data());
auto handle = backend->compile(f);
auto future = handle->begin_execute({r}, {a, b});
bool rc = future.get();
for (float x : result_data)
{
ASSERT_EQ(x, 2);
}
}
TEST(async, tensor_read)
{
}
TEST(async, tensor_write)
{
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment