Commit 8b123ada authored by Robert Kimball's avatar Robert Kimball

read/write test

parent 99a1e245
...@@ -44,7 +44,7 @@ TEST(async, execute) ...@@ -44,7 +44,7 @@ TEST(async, execute)
auto handle = backend->compile(f); auto handle = backend->compile(f);
auto future = handle->begin_execute({r}, {a, b}); auto future = handle->begin_execute({r}, {a, b});
ASSERT_TRUE(future.valid()); ASSERT_TRUE(future.valid());
bool rc = future.get(); future.get();
for (float x : result_data) for (float x : result_data)
{ {
...@@ -52,7 +52,7 @@ TEST(async, execute) ...@@ -52,7 +52,7 @@ TEST(async, execute)
} }
} }
TEST(async, tensor_write) TEST(async, tensor_read_write)
{ {
Shape shape{100000}; Shape shape{100000};
auto A = make_shared<op::Parameter>(element::f32, shape); auto A = make_shared<op::Parameter>(element::f32, shape);
...@@ -63,39 +63,40 @@ TEST(async, tensor_write) ...@@ -63,39 +63,40 @@ TEST(async, tensor_write)
auto handle = backend->compile(f); auto handle = backend->compile(f);
vector<float> data(shape_size(shape), 2); vector<float> data(shape_size(shape), 2);
vector<float> result_data(shape_size(shape), 0); vector<float> data_r(shape_size(shape), 0);
// Create some tensors for input/output // Create some tensors for input/output
shared_ptr<runtime::Tensor> a = backend->create_tensor(element::f32, shape); shared_ptr<runtime::Tensor> a = backend->create_tensor(element::f32, shape);
shared_ptr<runtime::Tensor> b = backend->create_tensor(element::f32, shape); shared_ptr<runtime::Tensor> b = backend->create_tensor(element::f32, shape);
shared_ptr<runtime::Tensor> r = backend->create_tensor(element::f32, shape, result_data.data()); shared_ptr<runtime::Tensor> r = backend->create_tensor(element::f32, shape);
auto future_a = a->begin_write(data.data(), data.size() * sizeof(float)); auto future_a = a->begin_write(data.data(), data.size() * sizeof(float));
auto future_b = b->begin_write(data.data(), data.size() * sizeof(float)); auto future_b = b->begin_write(data.data(), data.size() * sizeof(float));
auto future_r = r->begin_read(data_r.data(), data_r.size() * sizeof(float));
ASSERT_TRUE(future_a.valid()); ASSERT_TRUE(future_a.valid());
ASSERT_TRUE(future_b.valid()); ASSERT_TRUE(future_b.valid());
ASSERT_TRUE(future_r.valid());
chrono::milliseconds ten_ms(10); chrono::milliseconds ten_ms(10);
EXPECT_EQ(future_a.wait_for(ten_ms), future_status::timeout); EXPECT_EQ(future_a.wait_for(ten_ms), future_status::timeout);
EXPECT_EQ(future_b.wait_for(ten_ms), future_status::timeout); EXPECT_EQ(future_b.wait_for(ten_ms), future_status::timeout);
EXPECT_EQ(future_r.wait_for(ten_ms), future_status::timeout);
this_thread::sleep_for(chrono::milliseconds(500)); this_thread::sleep_for(chrono::milliseconds(500));
EXPECT_EQ(future_a.wait_for(ten_ms), future_status::timeout); EXPECT_EQ(future_a.wait_for(ten_ms), future_status::timeout);
EXPECT_EQ(future_b.wait_for(ten_ms), future_status::timeout); EXPECT_EQ(future_b.wait_for(ten_ms), future_status::timeout);
EXPECT_EQ(future_r.wait_for(ten_ms), future_status::timeout);
auto future = handle->begin_execute({r}, {a, b}); auto future = handle->begin_execute({r}, {a, b});
bool rc = future.get(); future.get();
EXPECT_EQ(future_a.wait_for(ten_ms), future_status::ready); EXPECT_EQ(future_a.wait_for(ten_ms), future_status::ready);
EXPECT_EQ(future_b.wait_for(ten_ms), future_status::ready); EXPECT_EQ(future_b.wait_for(ten_ms), future_status::ready);
EXPECT_EQ(future_r.wait_for(ten_ms), future_status::ready);
for (float x : result_data) for (float x : data_r)
{ {
ASSERT_EQ(x, 4); ASSERT_EQ(x, 4);
} }
} }
TEST(async, tensor_read)
{
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment