Commit bd01bf2c authored by Robert Kimball's avatar Robert Kimball Committed by Scott Cyphers

Move std::vector read/write from runtime::TensorView to unit test directory (#397)

* wip

* wip

* remove get_vector from runtime::TensorView class as it was for unit test only

* cleanup

* move writting vector to runtime::TensorView to the unit test dir

* merge fix

* PR review change

* update from PR comment

* update changes file
parent c5144d48
......@@ -7,7 +7,7 @@ arguments now take type `CoordinateDiff` instead of `Shape`. `CoordinateDiff` is
`std::vector<std::ptrdiff_t>`, which "is like `size_t` but is allowed to be negative". Callers may
need to be adapted.
## `Parameter` and `Function` no longer take a type argument.
## `Parameter` and `Function` no longer take a type argument.
To update, remove the passed argument. For example,
```C++
......@@ -22,3 +22,5 @@ make_shared<Function>(results, result_type, parameters);
make_shared<Function>(results, parameters);
```
The runtime::TensorView methods to get_tensor<> and write<T>(std::vector&) have been removed
to the unit test directory under utils/test_tool.hpp read_vector and write_vector.
......@@ -27,9 +27,8 @@ std::shared_ptr<ngraph::runtime::CallFrame> runtime::gpu::GPU_Backend::make_call
}
std::shared_ptr<ngraph::runtime::TensorView>
runtime::gpu::GPU_Backend::make_primary_tensor_view(const ngraph::element::Type& element_type,
const Shape& shape)
runtime::gpu::GPU_Backend::make_device_tensor(const ngraph::element::Type& element_type,
const Shape& shape)
{
auto rc = make_shared<runtime::cpu::CPU_TensorView>(element_type, shape);
return dynamic_pointer_cast<runtime::TensorView>(rc);
return make_shared<runtime::TensorView>(element_type, shape);
}
......@@ -64,29 +64,12 @@ namespace ngraph
/// @param n Number of bytes to write, must be integral number of elements.
virtual void write(const void* p, size_t tensor_offset, size_t n) = 0;
template <typename T>
void write(const std::vector<T>& values)
{
write(values.data(), 0, values.size() * sizeof(T));
}
/// @brief Read bytes directly from the tensor
/// @param p Pointer to destination for data
/// @param tensor_offset Offset into tensor storage to begin reading. Must be element-aligned.
/// @param n Number of bytes to read, must be integral number of elements.
virtual void read(void* p, size_t tensor_offset, size_t n) const = 0;
// This is for unit test only
template <typename T>
std::vector<T> get_vector()
{
size_t element_count = shape_size(get_shape());
size_t size = element_count * sizeof(T);
std::vector<T> rc(element_count);
read(rc.data(), 0, size);
return rc;
}
protected:
std::shared_ptr<ngraph::descriptor::TensorView> m_descriptor;
};
......
......@@ -94,13 +94,6 @@ bool autodiff_numeric_compare_selective(
return test::all_close(results_num, results_sym, rtol, atol);
}
template <typename T>
static void copy_data(shared_ptr<runtime::TensorView> tv, const vector<T>& data)
{
size_t data_size = data.size() * sizeof(T);
tv->write(data.data(), 0, data_size);
}
TEST(${BACKEND_NAME}, backwards_maxpool_n4_c1_hw4_2x2_max)
{
auto manager = runtime::Manager::get("${BACKEND_NAME}");
......@@ -144,7 +137,7 @@ TEST(${BACKEND_NAME}, backwards_maxpool_n4_c1_hw4_2x2_max)
auto external = manager->compile(df);
auto cf = backend->make_call_frame(external);
cf->tensor_call({input, ep}, {output});
ASSERT_TRUE(output->get_vector<int>() == expected);
ASSERT_TRUE(read_vector<int>(output) == expected);
}
TEST(${BACKEND_NAME}, backwards_maxpool_n2_c1_hw5_3x3_str2_max)
......@@ -188,7 +181,7 @@ TEST(${BACKEND_NAME}, backwards_maxpool_n2_c1_hw5_3x3_str2_max)
auto external = manager->compile(df);
auto cf = backend->make_call_frame(external);
cf->tensor_call({input, ep}, {output});
ASSERT_TRUE(output->get_vector<int>() == expected);
ASSERT_TRUE(read_vector<int>(output) == expected);
}
TEST(${BACKEND_NAME}, backwards_abs)
......@@ -880,7 +873,7 @@ TEST(${BACKEND_NAME}, backwards_select)
for (auto i = 0; i < 100; i++)
{
auto x0 = backend->make_primary_tensor_view(element::boolean, shape);
x0->write(vector<char>{0, 1, 0, 1, 0, 1});
write_vector(x0, vector<char>{0, 1, 0, 1, 0, 1});
auto x1 = rng.initialize(backend->make_primary_tensor_view<float>(shape));
auto x2 = rng.initialize(backend->make_primary_tensor_view<float>(shape));
......@@ -913,7 +906,7 @@ TEST(${BACKEND_NAME}, backwards_select_nested)
for (auto i = 0; i < 100; i++)
{
auto x0 = backend->make_primary_tensor_view(element::boolean, shape);
x0->write(vector<char>{0, 1, 0, 1, 0, 1});
write_vector(x0, vector<char>{0, 1, 0, 1, 0, 1});
auto x1 = rng.initialize(backend->make_primary_tensor_view<float>(shape));
auto x2 = rng.initialize(backend->make_primary_tensor_view<float>(shape));
......
......@@ -248,7 +248,7 @@ TEST(benchmark, concat_32x1x200_axis1_6)
std::cout << "Verifying " << backend_names[i] << " result against " << backend_names[0]
<< "..." << std::flush;
if (result_tvs[i]->get_vector<float>() == result_tvs[0]->get_vector<float>())
if (read_vector<float>(result_tvs[i]) == read_vector<float>(result_tvs[0]))
{
std::cout << " OK" << std::endl;
}
......
This diff is collapsed.
......@@ -15,6 +15,7 @@
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "util/test_tools.hpp"
#include <memory>
using namespace std;
......
......@@ -15,18 +15,12 @@
#include "ngraph/ngraph.hpp"
#include "util/all_close.hpp"
#include "util/test_tools.hpp"
using namespace ngraph;
using namespace ngraph::test;
using namespace std;
template <typename T>
static void copy_data(shared_ptr<runtime::TensorView> tv, const vector<T>& data)
{
size_t data_size = data.size() * sizeof(T);
tv->write(data.data(), 0, data_size);
}
std::shared_ptr<ngraph::runtime::TensorView> make_reduce_result(
std::function<std::shared_ptr<Node>(const std::shared_ptr<Node>&, const AxisSet&)> func)
{
......@@ -91,31 +85,31 @@ TEST(builder, l2_norm)
{
auto result = make_reduce_result(builder::l2_norm);
ASSERT_TRUE(
all_close((vector<float>{5.9160797831f, 7.48331477355f}), result->get_vector<float>()));
all_close((vector<float>{5.9160797831f, 7.48331477355f}), read_vector<float>(result)));
}
TEST(builder, mean)
{
auto result = make_reduce_result(builder::mean);
ASSERT_TRUE(all_close((vector<float>{3, 4}), result->get_vector<float>()));
ASSERT_TRUE(all_close((vector<float>{3, 4}), read_vector<float>(result)));
}
TEST(builder, std_dev)
{
auto result = make_reduce_result_false(builder::std_dev);
ASSERT_TRUE(
all_close((vector<float>{1.63299316186f, 1.63299316186f}), result->get_vector<float>()));
all_close((vector<float>{1.63299316186f, 1.63299316186f}), read_vector<float>(result)));
result = make_reduce_result_true(builder::std_dev);
ASSERT_TRUE(all_close((vector<float>{2, 2}), result->get_vector<float>()));
ASSERT_TRUE(all_close((vector<float>{2, 2}), read_vector<float>(result)));
}
TEST(builder, variance)
{
auto result = make_reduce_result_false(builder::variance);
ASSERT_TRUE(
all_close((vector<float>{2.66666666666f, 2.66666666666f}), result->get_vector<float>()));
all_close((vector<float>{2.66666666666f, 2.66666666666f}), read_vector<float>(result)));
result = make_reduce_result_true(builder::variance);
ASSERT_TRUE(all_close((vector<float>{4, 4}), result->get_vector<float>()));
ASSERT_TRUE(all_close((vector<float>{4, 4}), read_vector<float>(result)));
}
TEST(builder, numpy_transpose)
......
......@@ -18,17 +18,11 @@
#include "ngraph/builder/xla_tuple.hpp"
#include "ngraph/ngraph.hpp"
#include "util/test_tools.hpp"
using namespace std;
using namespace ngraph;
template <typename T>
static void copy_data(shared_ptr<runtime::TensorView> tv, const vector<T>& data)
{
size_t data_size = data.size() * sizeof(T);
tv->write(data.data(), 0, data_size);
}
TEST(builder_xla, simple)
{
auto shape = Shape{2, 2};
......@@ -64,13 +58,13 @@ TEST(builder_xla, simple)
auto result_tuple = xla::make_tuple({result});
xla::call(cf, {abc}, {result_tuple});
EXPECT_EQ((vector<float>{54, 80, 110, 144}), result->get_vector<float>());
EXPECT_EQ((vector<float>{54, 80, 110, 144}), read_vector<float>(result));
xla::call(cf, {bac}, {result_tuple});
EXPECT_EQ((vector<float>{54, 80, 110, 144}), result->get_vector<float>());
EXPECT_EQ((vector<float>{54, 80, 110, 144}), read_vector<float>(result));
xla::call(cf, {acb}, {result_tuple});
EXPECT_EQ((vector<float>{50, 72, 98, 128}), result->get_vector<float>());
EXPECT_EQ((vector<float>{50, 72, 98, 128}), read_vector<float>(result));
}
TEST(builder_xla, empty_tuple_interpreter)
......
This diff is collapsed.
......@@ -19,17 +19,11 @@
#include "ngraph/ngraph.hpp"
#include "util/ndarray.hpp"
#include "util/test_tools.hpp"
using namespace std;
using namespace ngraph;
template <typename T>
static void copy_data(shared_ptr<runtime::TensorView> tv, const vector<T>& data)
{
size_t data_size = data.size() * sizeof(T);
tv->write(data.data(), 0, data_size);
}
template <typename OP>
bool check_unary()
{
......
......@@ -275,15 +275,15 @@ TEST(cudnn, abc)
copy_data(c, test::NDArray<float, 2>({{9, 10}, {11, 12}}).get_vector());
cf->call({a, b, c}, {result});
EXPECT_EQ(result->get_vector<float>(),
EXPECT_EQ(result->read_vector<float>(),
(test::NDArray<float, 2>({{54, 80}, {110, 144}})).get_vector());
cf->call({b, a, c}, {result});
EXPECT_EQ(result->get_vector<float>(),
EXPECT_EQ(result->read_vector<float>(),
(test::NDArray<float, 2>({{54, 80}, {110, 144}})).get_vector());
cf->call({a, c, b}, {result});
EXPECT_EQ(result->get_vector<float>(),
EXPECT_EQ(result->read_vector<float>(),
(test::NDArray<float, 2>({{50, 72}, {98, 128}})).get_vector());
}
......@@ -308,5 +308,5 @@ TEST(cudnn, dot1d)
auto result = backend->make_primary_tensor_view(element::f32, shape_r);
cf->call({a, b}, {result});
EXPECT_EQ((vector<float>{170}), result->get_vector<float>());
EXPECT_EQ((vector<float>{170}), result->read_vector<float>());
}
......@@ -35,7 +35,7 @@ def shaped_linspace(shape):
total_elems = reduce(mul,shape)
flat = np.linspace(1,total_elems,total_elems)
return shaped_from_flat(shape,flat)
# Elementwise addition on tuples.
......@@ -204,7 +204,7 @@ TEST (${BACKEND_NAME}, %s)
auto B = make_shared<op::Parameter>(element::f64, shape_b);
auto shape_r = Shape{%s};
auto f = make_shared<Function>(
make_shared<op::Convolution>(A, B,
make_shared<op::Convolution>(A, B,
Strides{%s}, // move_strides
Strides{%s}, // filter_dilation
CoordinateDiff{%s}, // below_pads
......@@ -228,7 +228,7 @@ TEST (${BACKEND_NAME}, %s)
cf->call({a, b}, {result});
EXPECT_TRUE(all_close_d(vector<double>{expected_result},
result->get_vector<double>()));
result->read_vector<double>()));
}
'''
f.write (template % (test_name,
......
......@@ -22,17 +22,11 @@
#include "ngraph/ngraph.hpp"
#include "ngraph/serializer.hpp"
#include "ngraph/util.hpp"
#include "util/test_tools.hpp"
using namespace std;
using namespace ngraph;
template <typename T>
static void copy_data(shared_ptr<runtime::TensorView> tv, const vector<T>& data)
{
size_t data_size = data.size() * sizeof(T);
tv->write(data.data(), 0, data_size);
}
TEST(serialize, main)
{
// First create "f(A,B,C) = (A+B)*C".
......@@ -85,13 +79,13 @@ TEST(serialize, main)
auto result = backend->make_primary_tensor_view(element::f32, shape);
cf->call({x, y, z}, {result});
EXPECT_EQ((vector<float>{54, 80, 110, 144}), result->get_vector<float>());
EXPECT_EQ((vector<float>{54, 80, 110, 144}), read_vector<float>(result));
cf->call({y, x, z}, {result});
EXPECT_EQ((vector<float>{54, 80, 110, 144}), result->get_vector<float>());
EXPECT_EQ((vector<float>{54, 80, 110, 144}), read_vector<float>(result));
cf->call({x, z, y}, {result});
EXPECT_EQ((vector<float>{50, 72, 98, 128}), result->get_vector<float>());
EXPECT_EQ((vector<float>{50, 72, 98, 128}), read_vector<float>(result));
}
TEST(serialize, existing_models)
......
......@@ -27,13 +27,6 @@
using namespace std;
using namespace ngraph;
template <typename T>
static void copy_data(shared_ptr<runtime::TensorView> tv, const vector<T>& data)
{
size_t data_size = data.size() * sizeof(T);
tv->write(data.data(), 0, data_size);
}
TEST(util, split)
{
{
......
......@@ -19,6 +19,7 @@
#include <vector>
#include "ngraph/types/element_type.hpp"
#include "test_tools.hpp"
namespace ngraph
{
......@@ -68,7 +69,7 @@ namespace ngraph
if (a->get_shape() != b->get_shape())
return false;
return all_close(a->get_vector<T>(), b->get_vector<T>(), rtol, atol);
return all_close(read_vector<T>(a), read_vector<T>(b), rtol, atol);
}
/// @brief Same as numpy.allclose
......
......@@ -19,6 +19,7 @@
#include "ngraph/log.hpp"
#include "ngraph/types/element_type.hpp"
#include "ngraph/util.hpp"
#include "util/test_tools.hpp"
namespace ngraph
{
......@@ -78,7 +79,7 @@ namespace ngraph
std::vector<std::vector<T>> result_vect;
for (auto result : results)
{
result_vect.push_back(result->get_vector<T>()); // storage for results
result_vect.push_back(read_vector<T>(result)); // storage for results
result_pos.push_back(result_vect.back().begin());
}
......@@ -89,18 +90,18 @@ namespace ngraph
std::vector<std::shared_ptr<ngraph::runtime::TensorView>> bprops_tv;
bprops_tv.insert(bprops_tv.begin(), bprops.begin(), bprops.end());
auto c_vec = c_arg->template get_vector<T>();
auto c_vec = read_vector<T>(c_arg);
fill(c_vec.begin(), c_vec.end(), 0);
for (size_t i = 0; i < c_vec.size(); i++)
{
c_vec[i] = 1;
c_arg->write(c_vec);
write_vector(c_arg, c_vec);
cf->tensor_call(args_tv, bprops_tv);
c_vec[i] = 0;
c_arg->write(c_vec);
write_vector(c_arg, c_vec);
for (size_t j = 0; j < results.size(); j++)
{
auto bprop_vec = bprops[j]->get_vector<T>();
auto bprop_vec = read_vector<T>(bprops[j]);
result_pos[j] = std::copy(bprop_vec.begin(), bprop_vec.end(), result_pos[j]);
}
}
......@@ -108,7 +109,7 @@ namespace ngraph
// Copy results from temp to result vector
for (size_t j = 0; j < results.size(); j++)
{
results[j]->write(result_vect[j]);
write_vector(results[j], result_vect[j]);
}
return results;
}
......
......@@ -62,7 +62,7 @@ namespace ngraph
auto ref_y = backend->make_primary_tensor_view<T>(y_shape);
cf->tensor_call(args, std::vector<std::shared_ptr<ngraph::runtime::TensorView>>{ref_y});
auto ref_vec = ref_y->template get_vector<T>();
auto ref_vec = read_vector<T>(ref_y);
// inc_y will hold f(x+dx) values
auto inc_y = backend->make_primary_tensor_view<T>(y_shape);
......@@ -79,17 +79,17 @@ namespace ngraph
indep_params.end())
{
auto arg = args[i];
auto res = results[pos]->get_vector<T>();
auto vec = arg->get_vector<T>();
auto res = read_vector<T>(results[pos]);
auto vec = read_vector<T>(arg);
for (size_t j = 0; j < vec.size(); j++)
{
auto old_val = vec[j];
vec[j] += delta;
arg->write(vec);
write_vector(arg, vec);
cf->tensor_call(args, {inc_y});
auto inc_vec = inc_y->template get_vector<T>();
auto inc_vec = read_vector<T>(inc_y);
vec[j] = old_val;
arg->write(vec);
write_vector(arg, vec);
size_t res_k = j;
for (size_t k = 0; k < inc_vec.size(); k++)
{
......@@ -99,7 +99,7 @@ namespace ngraph
res_k += vec.size();
}
}
results[pos]->write(res);
write_vector(results[pos], res);
pos++;
}
}
......
......@@ -18,6 +18,7 @@
#include <random>
#include "ngraph/types/element_type.hpp"
#include "test_tools.hpp"
namespace ngraph
{
......@@ -41,12 +42,12 @@ namespace ngraph
const std::shared_ptr<runtime::TensorView>
initialize(const std::shared_ptr<runtime::TensorView>& ptv)
{
std::vector<T> vec = ptv->get_vector<T>();
std::vector<T> vec = read_vector<T>(ptv);
for (T& elt : vec)
{
elt = m_r();
}
ptv->write(vec);
write_vector(ptv, vec);
return ptv;
}
......
......@@ -14,9 +14,13 @@
#pragma once
#include <exception>
#include <list>
#include <memory>
#include "ngraph/descriptor/layout/tensor_view_layout.hpp"
#include "ngraph/runtime/tensor_view.hpp"
namespace ngraph
{
class Node;
......@@ -32,3 +36,23 @@ void copy_data(std::shared_ptr<ngraph::runtime::TensorView> tv, const std::vecto
size_t data_size = data.size() * sizeof(T);
tv->write(data.data(), 0, data_size);
}
template <typename T>
std::vector<T> read_vector(std::shared_ptr<ngraph::runtime::TensorView> tv)
{
if (ngraph::element::from<T>() != tv->get_tensor_view_layout()->get_element_type())
{
throw std::invalid_argument("read_vector type must match TensorView type");
}
size_t element_count = ngraph::shape_size(tv->get_shape());
size_t size = element_count * sizeof(T);
std::vector<T> rc(element_count);
tv->read(rc.data(), 0, size);
return rc;
}
template <typename T>
void write_vector(std::shared_ptr<ngraph::runtime::TensorView> tv, const std::vector<T>& values)
{
tv->write(values.data(), 0, values.size() * sizeof(T));
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment