Commit de82a3eb authored by Robert Kimball's avatar Robert Kimball Committed by Scott Cyphers

ParameterizedTensorView and ElementType cleanup (#264)

* ParameterizedTensorView and ElementType cleanup

* change element to_type() to from()
parent f2a23d55
......@@ -69,6 +69,18 @@ size_t Tensor::get_pool_offset() const
std::ostream& operator<<(std::ostream& out, const Tensor& tensor)
{
out << "Tensor(" << tensor.get_name() << ")";
out << "Tensor(" << tensor.get_name() << ", ";
out << (tensor.is_persistent() ? "P" : "");
out << (tensor.is_constant() ? "C" : "");
out << (tensor.is_input() ? "I" : "");
out << (tensor.is_output() ? "O" : "");
if (!tensor.is_persistent() && !tensor.is_constant() && !tensor.is_input() &&
!tensor.is_output())
{
out << "T";
}
out << ")";
return out;
}
......@@ -34,7 +34,6 @@ Function::Function(const std::shared_ptr<Node>& result,
, m_temporary_pool_size(0)
, m_instance_id(m_next_instance_id.fetch_add(1))
{
m_result->set_is_output();
traverse_nodes(this, [&](shared_ptr<Node> node) { m_ops.push_back(node); });
}
......
......@@ -67,6 +67,7 @@ void ngraph::pass::Manager::run_passes(shared_ptr<Function> func)
vector<shared_ptr<Function>> fs;
for (shared_ptr<Function> f : get_state().get_functions())
{
f->get_result()->set_is_output();
fs.push_back(f);
}
......
......@@ -18,7 +18,6 @@
#include "ngraph/runtime/parameterized_tensor_view.hpp"
#include "ngraph/runtime/tensor_view.hpp"
#include "ngraph/runtime/tuple.hpp"
#include "ngraph/types/element_type.hpp"
using namespace ngraph::runtime;
......
......@@ -18,7 +18,7 @@
#include "ngraph/common.hpp"
#include "ngraph/log.hpp"
#include "ngraph/runtime/ndarray.hpp"
#include "ngraph/types/element_type.hpp"
namespace ngraph
{
......@@ -35,9 +35,6 @@ namespace ngraph
class Tuple;
class Value;
template <typename ET>
class ParameterizedTensorView;
/// @brief Interface to a generic backend.
///
/// Backends are responsible for function execution and value allocation.
......@@ -56,12 +53,11 @@ namespace ngraph
make_primary_tensor_view(const ngraph::element::Type& element_type,
const Shape& shape);
template <typename ET>
std::shared_ptr<ngraph::runtime::ParameterizedTensorView<ET>>
make_parameterized_tensor_view(const Shape& shape)
template <typename T>
std::shared_ptr<ngraph::runtime::TensorView>
make_primary_tensor_view(const Shape& shape)
{
return std::dynamic_pointer_cast<ngraph::runtime::ParameterizedTensorView<ET>>(
make_primary_tensor_view(ET::element_type(), shape));
return make_primary_tensor_view(element::from<T>(), shape);
}
/// @brief Construct a tuple handle from a sequence of values.
......
......@@ -170,13 +170,16 @@ void ExternalFunction::compile()
return;
}
string function_name = m_function->get_name();
string dump_filename = file_util::path_join(s_output_dir, function_name + "_ops.txt");
pass::Manager pass_manager;
pass_manager.register_pass<pass::TopologicalSort>();
// For now, just make everyone row-major.
pass_manager.register_pass<pass::AssignLayout<DenseTensorViewLayout>>();
pass_manager.register_pass<pass::Liveness>();
pass_manager.register_pass<pass::MemoryLayout>(64);
pass_manager.register_pass<pass::DumpSorted>("sorted_ops.txt");
pass_manager.register_pass<pass::DumpSorted>(dump_filename);
pass_manager.run_passes(m_function);
// Now we build the TU
......@@ -311,7 +314,6 @@ using namespace ngraph::runtime::cpu::eigen;
// TODO: Cleanup and make this a utility function
string function_name = m_function->get_name();
file_util::make_directory(s_output_dir);
string filename = file_util::path_join(s_output_dir, function_name + "_codegen.cpp");
ofstream out(filename);
......
......@@ -19,6 +19,7 @@
#include "ngraph/function.hpp"
#include "ngraph/runtime/call_frame.hpp"
#include "ngraph/runtime/parameterized_tensor_view.hpp"
#include "ngraph/runtime/tensor_view.hpp"
namespace ngraph
......
......@@ -25,9 +25,11 @@ const element::Type element::boolean(8, false, false, "bool");
const element::Type element::f32(32, true, true, "float");
const element::Type element::f64(64, true, true, "double");
const element::Type element::i8(8, false, true, "int8_t");
const element::Type element::i16(16, false, true, "int16_t");
const element::Type element::i32(32, false, true, "int32_t");
const element::Type element::i64(64, false, true, "int64_t");
const element::Type element::u8(8, false, false, "uint8_t");
const element::Type element::u16(16, false, false, "uint16_t");
const element::Type element::u32(32, false, false, "uint32_t");
const element::Type element::u64(64, false, false, "uint64_t");
......
......@@ -29,14 +29,22 @@
namespace ngraph
{
namespace runtime
{
template <typename ET>
class ParameterizedTensorView;
}
namespace element
{
class Type;
extern const Type boolean;
extern const Type f32;
extern const Type f64;
extern const Type i8;
extern const Type i16;
extern const Type i32;
extern const Type i64;
extern const Type u8;
extern const Type u16;
extern const Type u32;
extern const Type u64;
class Type
{
Type(const Type&) = delete;
......@@ -66,15 +74,58 @@ namespace ngraph
const std::string m_cname;
};
extern const Type boolean;
extern const Type f32;
extern const Type f64;
extern const Type i8;
extern const Type i32;
extern const Type i64;
extern const Type u8;
extern const Type u32;
extern const Type u64;
template <typename T>
const Type& from()
{
if (typeid(T) == typeid(char) || typeid(T) == typeid(bool))
{
return boolean;
}
else if (typeid(T) == typeid(float))
{
return f32;
}
else if (typeid(T) == typeid(double))
{
return f64;
}
else if (typeid(T) == typeid(int8_t))
{
return i8;
}
else if (typeid(T) == typeid(int16_t))
{
return i16;
}
else if (typeid(T) == typeid(int32_t))
{
return i32;
}
else if (typeid(T) == typeid(int64_t))
{
return i64;
}
else if (typeid(T) == typeid(uint8_t))
{
return u8;
}
else if (typeid(T) == typeid(uint16_t))
{
return u16;
}
else if (typeid(T) == typeid(uint32_t))
{
return u32;
}
else if (typeid(T) == typeid(uint64_t))
{
return u64;
}
else
{
throw std::invalid_argument("Unknown type");
}
}
std::ostream& operator<<(std::ostream& out, const ngraph::element::Type& obj);
......
......@@ -23,9 +23,10 @@ include_directories(
set (SRC
autodiff.cpp
copy.cpp
build_graph.cpp
copy.cpp
eigen.cpp
element_type.cpp
file_util.cpp
input_output_assign.cpp
main.cpp
......
This diff is collapsed.
......@@ -39,7 +39,6 @@ bool check_unary()
auto node = make_shared<OP>(arg0);
auto new_node = node->copy_with_new_args(new_args);
auto node_cast = dynamic_pointer_cast<OP>(new_node);
return (nullptr != new_node) && (new_args == new_node->get_arguments());
}
......@@ -56,7 +55,6 @@ bool check_binary()
auto node = make_shared<OP>(arg0, arg1);
auto new_node = node->copy_with_new_args(new_args);
auto node_cast = dynamic_pointer_cast<OP>(new_node);
return (nullptr != new_node) && (new_args == new_node->get_arguments());
}
......@@ -99,6 +97,7 @@ TEST(copy, broadcast)
auto node = make_shared<op::Broadcast>(arg0, shape, axes);
auto new_node = node->copy_with_new_args(new_args);
auto node_cast = dynamic_pointer_cast<op::Broadcast>(new_node);
ASSERT_NE(node_cast, nullptr);
ASSERT_TRUE(nullptr != new_node);
ASSERT_TRUE(new_args == new_node->get_arguments());
......@@ -123,6 +122,7 @@ TEST(copy, concat)
auto node = make_shared<op::Concat>(Nodes{arg0, arg1}, axis);
auto new_node = node->copy_with_new_args(new_args);
auto node_cast = dynamic_pointer_cast<op::Concat>(new_node);
ASSERT_NE(node_cast, nullptr);
ASSERT_TRUE(nullptr != new_node);
ASSERT_TRUE(new_args == new_node->get_arguments());
......@@ -140,9 +140,11 @@ TEST(copy, parameterized_constant)
Shape shape{2, 2};
auto cptv = dynamic_pointer_cast<ngraph::runtime::ParameterizedTensorView<element::Float32>>(c);
ASSERT_NE(cptv, nullptr);
auto node = make_shared<op::ParameterizedConstant<element::Float32>>(shape, cptv);
auto new_node = node->copy_with_new_args(Nodes{});
auto node_cast = dynamic_pointer_cast<op::ParameterizedConstant<element::Float32>>(new_node);
ASSERT_NE(node_cast, nullptr);
ASSERT_TRUE(nullptr != new_node);
ASSERT_TRUE(Nodes{} == new_node->get_arguments());
ASSERT_TRUE(node_cast->get_value() == c);
......@@ -157,6 +159,7 @@ TEST(copy, constant)
auto node = make_shared<op::Constant>(et, shape, c);
auto new_node = node->copy_with_new_args(Nodes{});
auto node_cast = dynamic_pointer_cast<op::Constant>(new_node);
ASSERT_NE(node_cast, nullptr);
ASSERT_TRUE(nullptr != new_node);
ASSERT_TRUE(Nodes{} == new_node->get_arguments());
ASSERT_TRUE(node_cast->get_value_strings() == c);
......@@ -175,6 +178,7 @@ TEST(copy, convert)
auto node = make_shared<op::Convert>(arg0, et);
auto new_node = node->copy_with_new_args(new_args);
auto node_cast = dynamic_pointer_cast<op::Convert>(new_node);
ASSERT_NE(node_cast, nullptr);
ASSERT_TRUE(nullptr != new_node);
ASSERT_TRUE(new_args == new_node->get_arguments());
......@@ -236,6 +240,7 @@ TEST(copy, FunctionCall)
make_shared<op::Parameter>(element::Float32::element_type(), shape)};
auto new_node = node->copy_with_new_args(new_args);
auto node_cast = dynamic_pointer_cast<op::FunctionCall>(new_node);
ASSERT_NE(node_cast, nullptr);
ASSERT_TRUE(nullptr != new_node);
ASSERT_TRUE(new_args == new_node->get_arguments());
......@@ -255,6 +260,7 @@ TEST(copy, GetTupleElement)
auto node = make_shared<op::GetTupleElement>(arg0, n);
auto new_node = node->copy_with_new_args(new_args);
auto node_cast = dynamic_pointer_cast<op::GetTupleElement>(new_node);
ASSERT_NE(node_cast, nullptr);
ASSERT_TRUE(nullptr != new_node);
ASSERT_TRUE(new_args == new_node->get_arguments());
......@@ -317,6 +323,7 @@ TEST(copy, parameter)
auto node = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto new_node = node->copy_with_new_args({});
auto node_cast = dynamic_pointer_cast<op::Parameter>(new_node);
ASSERT_NE(node_cast, nullptr);
ASSERT_TRUE(nullptr != new_node);
ASSERT_TRUE(new_node->get_arguments().size() == 0);
......@@ -347,6 +354,7 @@ TEST(copy, reduce)
auto node = make_shared<op::Reduce>(arg0, arg_init, f, axes);
auto new_node = node->copy_with_new_args(new_args);
auto node_cast = dynamic_pointer_cast<op::Reduce>(new_node);
ASSERT_NE(node_cast, nullptr);
ASSERT_TRUE(nullptr != new_node);
ASSERT_TRUE(new_args == new_node->get_arguments());
......@@ -372,6 +380,7 @@ TEST(copy, reshape)
auto node = make_shared<op::Reshape>(arg0, axes, shape_out);
auto new_node = node->copy_with_new_args(new_args);
auto node_cast = dynamic_pointer_cast<op::Reshape>(new_node);
ASSERT_NE(node_cast, nullptr);
ASSERT_TRUE(nullptr != new_node);
ASSERT_TRUE(new_args == new_node->get_arguments());
......@@ -393,6 +402,7 @@ TEST(copy, select)
auto node = make_shared<op::Select>(arg0, arg1, arg2);
auto new_node = node->copy_with_new_args(new_args);
auto node_cast = dynamic_pointer_cast<op::Select>(new_node);
ASSERT_NE(node_cast, nullptr);
ASSERT_TRUE(nullptr != new_node);
ASSERT_TRUE(new_args == new_node->get_arguments());
......@@ -427,6 +437,7 @@ TEST(copy, slice)
auto node = make_shared<op::Slice>(arg0, lower, upper, step);
auto new_node = node->copy_with_new_args(new_args);
auto node_cast = dynamic_pointer_cast<op::Slice>(new_node);
ASSERT_NE(node_cast, nullptr);
ASSERT_TRUE(nullptr != new_node);
ASSERT_TRUE(new_args == new_node->get_arguments());
......@@ -451,6 +462,7 @@ TEST(copy, sum)
auto node = make_shared<op::Sum>(arg0, axes);
auto new_node = node->copy_with_new_args(new_args);
auto node_cast = dynamic_pointer_cast<op::Sum>(new_node);
ASSERT_NE(node_cast, nullptr);
ASSERT_TRUE(nullptr != new_node);
ASSERT_TRUE(new_args == new_node->get_arguments());
......@@ -479,6 +491,7 @@ TEST(copy, tuple)
auto node = make_shared<op::Tuple>(Nodes{arg0, arg1});
auto new_node = node->copy_with_new_args(new_args);
auto node_cast = dynamic_pointer_cast<op::Tuple>(new_node);
ASSERT_NE(node_cast, nullptr);
ASSERT_TRUE(nullptr != new_node);
ASSERT_TRUE(new_args == new_node->get_arguments());
......
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#include "gtest/gtest.h"
#include "ngraph/types/element_type.hpp"
using namespace ngraph;
TEST(element_type, from)
{
EXPECT_EQ(element::from<char>(), element::boolean);
EXPECT_EQ(element::from<bool>(), element::boolean);
EXPECT_EQ(element::from<float>(), element::f32);
EXPECT_EQ(element::from<double>(), element::f64);
EXPECT_EQ(element::from<int8_t>(), element::i8);
EXPECT_EQ(element::from<int16_t>(), element::i16);
EXPECT_EQ(element::from<int32_t>(), element::i32);
EXPECT_EQ(element::from<int64_t>(), element::i64);
EXPECT_EQ(element::from<uint8_t>(), element::u8);
EXPECT_EQ(element::from<uint16_t>(), element::u16);
EXPECT_EQ(element::from<uint32_t>(), element::u32);
EXPECT_EQ(element::from<uint64_t>(), element::u64);
}
......@@ -185,20 +185,20 @@ TEST(util, all_close)
auto backend = manager->allocate_backend();
// Create some tensors for input/output
auto a = backend->make_parameterized_tensor_view<element::Float32>(Shape{2, 3});
auto b = backend->make_parameterized_tensor_view<element::Float32>(Shape{2, 3});
auto a = backend->make_primary_tensor_view(element::Float32::element_type(), Shape{2, 3});
auto b = backend->make_primary_tensor_view(element::Float32::element_type(), Shape{2, 3});
copy_data(a, runtime::NDArray<float, 2>({{1, 2, 3}, {3, 4, 5}}).get_vector());
copy_data(b, runtime::NDArray<float, 2>({{1, 2, 3}, {3, 4, 5}}).get_vector());
EXPECT_TRUE(ngraph::test::all_close(a, b));
EXPECT_TRUE(ngraph::test::all_close<float>(a, b));
auto c = backend->make_parameterized_tensor_view<element::Float32>(Shape{2, 3});
auto c = backend->make_primary_tensor_view(element::Float32::element_type(), Shape{2, 3});
copy_data(c, runtime::NDArray<float, 2>({{1.1f, 2, 3}, {3, 4, 5}}).get_vector());
EXPECT_FALSE(ngraph::test::all_close(c, a, 0, .05f));
EXPECT_TRUE(ngraph::test::all_close(c, a, 0, .11f));
EXPECT_FALSE(ngraph::test::all_close<float>(c, a, 0, .05f));
EXPECT_TRUE(ngraph::test::all_close<float>(c, a, 0, .11f));
EXPECT_FALSE(ngraph::test::all_close(c, a, .05f, 0));
EXPECT_TRUE(ngraph::test::all_close(c, a, .11f, 0));
EXPECT_FALSE(ngraph::test::all_close<float>(c, a, .05f, 0));
EXPECT_TRUE(ngraph::test::all_close<float>(c, a, .11f, 0));
}
......@@ -54,11 +54,11 @@ namespace ngraph
/// @param rtol Relative tolerance
/// @param atol Absolute tolerance
/// Returns true if shapes match and for all elements, |a_i-b_i| <= atol + rtol*|b_i|.
template <typename ET>
bool all_close(const std::shared_ptr<ngraph::runtime::ParameterizedTensorView<ET>>& a,
const std::shared_ptr<ngraph::runtime::ParameterizedTensorView<ET>>& b,
typename ET::type rtol = 1e-5f,
typename ET::type atol = 1e-8f)
template <typename T>
bool all_close(const std::shared_ptr<ngraph::runtime::TensorView>& a,
const std::shared_ptr<ngraph::runtime::TensorView>& b,
T rtol = 1e-5f,
T atol = 1e-8f)
{
// Check that the layouts are compatible
if (*a->get_tensor_view_layout() != *b->get_tensor_view_layout())
......@@ -69,7 +69,7 @@ namespace ngraph
if (a->get_shape() != b->get_shape())
return false;
return all_close(a->get_vector(), b->get_vector(), rtol, atol);
return all_close(a->get_vector<T>(), b->get_vector<T>(), rtol, atol);
}
/// @brief Same as numpy.allclose
......@@ -78,12 +78,11 @@ namespace ngraph
/// @param rtol Relative tolerance
/// @param atol Absolute tolerance
/// Returns true if shapes match and for all elements, |a_i-b_i| <= atol + rtol*|b_i|.
template <typename ET>
bool all_close(
const std::vector<std::shared_ptr<ngraph::runtime::ParameterizedTensorView<ET>>>& as,
const std::vector<std::shared_ptr<ngraph::runtime::ParameterizedTensorView<ET>>>& bs,
typename ET::type rtol,
typename ET::type atol)
template <typename T>
bool all_close(const std::vector<std::shared_ptr<ngraph::runtime::TensorView>>& as,
const std::vector<std::shared_ptr<ngraph::runtime::TensorView>>& bs,
T rtol,
T atol)
{
if (as.size() != bs.size())
{
......
......@@ -39,24 +39,24 @@ namespace ngraph
/// @returns f'(X_i..., c) where f'(x_i, ..., c)_j is backprop for X_j
std::shared_ptr<Function> backprop_function(const std::shared_ptr<Function>& f);
template <typename ET>
std::vector<std::shared_ptr<runtime::ParameterizedTensorView<ET>>> backprop_derivative(
const std::shared_ptr<runtime::Manager>& manager,
const std::shared_ptr<runtime::Backend>& backend,
const std::shared_ptr<Function>& f,
const std::vector<std::shared_ptr<runtime::ParameterizedTensorView<ET>>>& args)
template <typename T>
std::vector<std::shared_ptr<runtime::TensorView>>
backprop_derivative(const std::shared_ptr<runtime::Manager>& manager,
const std::shared_ptr<runtime::Backend>& backend,
const std::shared_ptr<Function>& f,
const std::vector<std::shared_ptr<runtime::TensorView>>& args)
{
auto y = f->get_result();
Shape y_shape =
std::dynamic_pointer_cast<const TensorViewType>(y->get_value_type())->get_shape();
auto c_param = std::make_shared<op::Parameter>(ET::element_type(), y_shape);
auto c_arg = backend->make_parameterized_tensor_view<ET>(y_shape);
auto c_param = std::make_shared<op::Parameter>(element::from<T>(), y_shape);
auto c_arg = backend->make_primary_tensor_view<T>(y_shape);
auto params = f->get_parameters();
std::vector<std::shared_ptr<Node>> deriv_nodes;
std::vector<std::shared_ptr<runtime::ParameterizedTensorView<ET>>> bprops;
std::vector<std::shared_ptr<runtime::ParameterizedTensorView<ET>>> results;
std::vector<std::shared_ptr<runtime::TensorView>> bprops;
std::vector<std::shared_ptr<runtime::TensorView>> results;
for (auto param : params)
{
Shape s = y_shape;
......@@ -64,8 +64,8 @@ namespace ngraph
std::dynamic_pointer_cast<const TensorViewType>(param->get_value_type())
->get_shape();
s.insert(s.end(), param_shape.begin(), param_shape.end());
results.push_back(backend->make_parameterized_tensor_view<ET>(s));
bprops.push_back(backend->make_parameterized_tensor_view<ET>(param_shape));
results.push_back(backend->make_primary_tensor_view<T>(s));
bprops.push_back(backend->make_primary_tensor_view<T>(param_shape));
deriv_nodes.push_back(y->backprop_node(param, c_param));
}
......@@ -78,22 +78,22 @@ namespace ngraph
auto cf = backend->make_call_frame(external);
// We compute the derivatives chunk by chunk
std::vector<typename std::vector<typename ET::type>::iterator> result_pos;
std::vector<std::vector<typename ET::type>> result_vect;
std::vector<typename std::vector<T>::iterator> result_pos;
std::vector<std::vector<T>> result_vect;
for (auto result : results)
{
result_vect.push_back(result->get_vector()); // storage for results
result_vect.push_back(result->get_vector<T>()); // storage for results
result_pos.push_back(result_vect.back().begin());
}
ngraph::runtime::TensorViewPtrs args_tv;
std::vector<std::shared_ptr<ngraph::runtime::TensorView>> args_tv;
args_tv.insert(args_tv.begin(), args.begin(), args.end());
args_tv.push_back(c_arg);
runtime::TensorViewPtrs bprops_tv;
std::vector<std::shared_ptr<ngraph::runtime::TensorView>> bprops_tv;
bprops_tv.insert(bprops_tv.begin(), bprops.begin(), bprops.end());
auto c_vec = c_arg->get_vector();
auto c_vec = c_arg->template get_vector<T>();
for (size_t i = 0; i < c_vec.size(); i++)
{
c_vec[i] = 1;
......@@ -103,7 +103,7 @@ namespace ngraph
c_arg->write(c_vec);
for (size_t j = 0; j < results.size(); j++)
{
auto bprop_vec = bprops[j]->get_vector();
auto bprop_vec = bprops[j]->get_vector<T>();
result_pos[j] = std::copy(bprop_vec.begin(), bprop_vec.end(), result_pos[j]);
}
}
......
......@@ -33,13 +33,13 @@ namespace ngraph
/// @param args Values for the arguments (the independent variables)
/// @param delta increment for the variables
/// @returns vector of dy/dvar, where each dy/dvar's shape is concat(y.shape(), var.shape())
template <typename ET>
std::vector<std::shared_ptr<runtime::ParameterizedTensorView<ET>>> numeric_derivative(
const std::shared_ptr<runtime::Manager>& manager,
const std::shared_ptr<runtime::Backend>& backend,
const std::shared_ptr<Function>& f,
const std::vector<std::shared_ptr<runtime::ParameterizedTensorView<ET>>>& args,
typename ET::type delta)
template <typename T>
std::vector<std::shared_ptr<runtime::TensorView>>
numeric_derivative(const std::shared_ptr<runtime::Manager>& manager,
const std::shared_ptr<runtime::Backend>& backend,
const std::shared_ptr<Function>& f,
const std::vector<std::shared_ptr<runtime::TensorView>>& args,
T delta)
{
auto y = f->get_result();
......@@ -49,7 +49,7 @@ namespace ngraph
auto params = f->get_parameters();
// Results for each derivative, shape Y|X_i
std::vector<std::shared_ptr<runtime::ParameterizedTensorView<ET>>> results;
std::vector<std::shared_ptr<runtime::TensorView>> results;
for (auto param : params)
{
Shape s = y_shape;
......@@ -57,39 +57,36 @@ namespace ngraph
std::dynamic_pointer_cast<const TensorViewType>(param->get_value_type())
->get_shape();
s.insert(s.end(), param_shape.begin(), param_shape.end());
results.push_back(backend->make_parameterized_tensor_view<ET>(s));
results.push_back(backend->make_primary_tensor_view<T>(s));
}
auto external = manager->compile(f);
auto cf = backend->make_call_frame(external);
// ref_y is the function evaluated at the args
auto ref_y = backend->make_parameterized_tensor_view<ET>(y_shape);
auto ref_y = backend->make_primary_tensor_view<T>(y_shape);
ngraph::runtime::TensorViewPtrs args_tv;
args_tv.insert(args_tv.begin(), args.begin(), args.end());
cf->tensor_call(args_tv, runtime::TensorViewPtrs{ref_y});
auto& ref_vec = ref_y->get_vector();
cf->tensor_call(args, std::vector<std::shared_ptr<ngraph::runtime::TensorView>>{ref_y});
auto ref_vec = ref_y->template get_vector<T>();
// inc_y will hold f(x+dx) values
auto inc_y = backend->make_parameterized_tensor_view<ET>(y_shape);
auto& inc_vec = inc_y->get_vector();
auto inc_y = backend->make_primary_tensor_view<T>(y_shape);
// Assuming vars, y, and results are row-major
typename ET::type inv_delta = 1 / delta;
T inv_delta = 1 / delta;
for (size_t i = 0; i < args.size(); ++i)
{
auto arg = args[i];
auto res = results[i]->get_vector();
auto vec = arg->get_vector();
auto res = results[i]->get_vector<T>();
auto vec = arg->get_vector<T>();
for (size_t j = 0; j < vec.size(); j++)
{
auto old_val = vec[j];
vec[j] += delta;
arg->write(vec);
cf->tensor_call(args_tv, {inc_y});
cf->tensor_call(args, {inc_y});
auto inc_vec = inc_y->template get_vector<T>();
vec[j] = old_val;
arg->write(vec);
size_t res_k = j;
......
......@@ -25,13 +25,11 @@ namespace ngraph
{
/// @brief A predictable pseudo-random number generator
/// The seed is initialized so that we get repeatable pseudo-random numbers for tests
template <typename ET>
template <typename T>
class Uniform
{
public:
using type = typename ET::type;
Uniform(type min, type max, type seed = 0)
Uniform(T min, T max, T seed = 0)
: m_engine(seed)
, m_distribution(min, max)
, m_r(std::bind(m_distribution, m_engine))
......@@ -40,11 +38,11 @@ namespace ngraph
/// @brief Randomly initialize a tensor
/// @param ptv The tensor to initialize
const std::shared_ptr<runtime::ParameterizedTensorView<ET>>
initialize(const std::shared_ptr<runtime::ParameterizedTensorView<ET>>& ptv)
const std::shared_ptr<runtime::TensorView>
initialize(const std::shared_ptr<runtime::TensorView>& ptv)
{
auto vec = ptv->get_vector();
for (auto& elt : vec)
std::vector<T> vec = ptv->get_vector<T>();
for (T& elt : vec)
{
elt = m_r();
}
......@@ -54,8 +52,8 @@ namespace ngraph
protected:
std::default_random_engine m_engine;
std::uniform_real_distribution<type> m_distribution;
std::function<type()> m_r;
std::uniform_real_distribution<T> m_distribution;
std::function<T()> m_r;
};
}
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment