Commit 0dbeb06e authored by Robert Kimball's avatar Robert Kimball

wip

parent f2a93568
......@@ -123,3 +123,49 @@ void runtime::Executable::save(std::ostream& output_stream)
{
throw runtime_error("save opertion unimplemented.");
}
vector<shared_ptr<runtime::Tensor>> runtime::Executable::create_input_tensor(size_t input_number,
size_t pipeline_depth)
{
vector<shared_ptr<runtime::Tensor>> tensors;
// if (m_backend)
// {
// const ParameterVector& parameters = get_parameters();
// if (index >= parameters.size())
// {
// throw runtime_error("create_tensor for input out of bounds");
// }
// shared_ptr<op::Parameter> parameter = parameters[index];
// tensor = m_backend->create_tensor(
// parameter->get_element_type(), parameter->get_shape(), memory_pointer);
// tensor->m_source_node = parameter;
// }
// else
// {
// throw runtime_error("Backend does not support Executable::create_tensor");
// }
return tensors;
}
vector<shared_ptr<runtime::Tensor>> runtime::Executable::create_output_tensor(size_t input_number,
size_t pipeline_depth)
{
vector<shared_ptr<runtime::Tensor>> tensors;
// if (m_backend)
// {
// const ResultVector& results = get_results();
// if (index >= results.size())
// {
// throw runtime_error("create_tensor for input out of bounds");
// }
// shared_ptr<op::Result> result = results[index];
// tensor = m_backend->create_tensor(
// result->get_element_type(), result->get_shape(), memory_pointer);
// tensor->m_source_node = result;
// }
// else
// {
// throw runtime_error("Backend does not support Executable::create_tensor");
// }
return tensors;
}
......@@ -73,6 +73,11 @@ public:
/// Saved stream may be read with Backend::load
virtual void save(std::ostream& output_stream);
virtual std::vector<std::shared_ptr<runtime::Tensor>> create_input_tensor(size_t input_number,
size_t pipeline_depth = 1);
virtual std::vector<std::shared_ptr<runtime::Tensor>> create_output_tensor(size_t input_number,
size_t pipeline_depth = 1);
protected:
/// \brief Called at the end of compile to the values to be returned by get_parameters
/// and get_results
......
This diff is collapsed.
......@@ -95,7 +95,6 @@ set_source_files_properties(includes.cpp PROPERTIES COMPILE_DEFINITIONS
if (NGRAPH_INTERPRETER_ENABLE)
list(APPEND SRC
async.cpp
backend_debug_api.cpp
builder.cpp
backend_api.cpp)
......
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include <gtest/gtest.h>
#include "ngraph/op/add.hpp"
#include "ngraph/runtime/backend.hpp"
#include "ngraph/util.hpp"
#include "util/test_tools.hpp"
using namespace ngraph;
using namespace std;
TEST(async, execute)
{
Shape shape{100000};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto B = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::Add>(A, B), ParameterVector{A, B});
auto backend = runtime::Backend::create("INTERPRETER");
vector<float> data(shape_size(shape), 2);
vector<float> result_data(shape_size(shape), 0);
// Create some tensors for input/output
shared_ptr<runtime::Tensor> a = backend->create_tensor(element::f32, shape, data.data());
shared_ptr<runtime::Tensor> b = backend->create_tensor(element::f32, shape, data.data());
shared_ptr<runtime::Tensor> r = backend->create_tensor(element::f32, shape, result_data.data());
auto handle = backend->compile(f);
auto future = handle->begin_execute({r}, {a, b});
ASSERT_TRUE(future.valid());
future.get();
for (float x : result_data)
{
ASSERT_EQ(x, 4);
}
}
TEST(async, tensor_read_write)
{
chrono::milliseconds ten_ms(100);
Shape shape{100000};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto B = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::Add>(A, B), ParameterVector{A, B});
auto backend = runtime::Backend::create("INTERPRETER");
auto handle = backend->compile(f);
vector<float> data(shape_size(shape), 2);
vector<float> data_r(shape_size(shape), 0);
// Create some tensors for input/output
shared_ptr<runtime::Tensor> a = backend->create_tensor(element::f32, shape);
shared_ptr<runtime::Tensor> b = backend->create_tensor(element::f32, shape);
shared_ptr<runtime::Tensor> r = backend->create_tensor(element::f32, shape);
auto future_a = a->begin_write(data.data(), data.size() * sizeof(float), 0);
auto future_b = b->begin_write(data.data(), data.size() * sizeof(float), 0);
ASSERT_TRUE(future_a.valid());
ASSERT_TRUE(future_b.valid());
auto future = handle->begin_execute({r}, {a, b});
// get() waits for the result to be ready
future.get();
auto future_r = r->begin_read(data_r.data(), data_r.size() * sizeof(float), 0);
ASSERT_TRUE(future_r.valid());
EXPECT_EQ(future_a.wait_for(ten_ms), future_status::ready);
EXPECT_EQ(future_b.wait_for(ten_ms), future_status::ready);
EXPECT_EQ(future_r.wait_for(ten_ms), future_status::ready);
for (float x : data_r)
{
ASSERT_EQ(x, 4);
}
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment