Commit de82a3eb authored by Robert Kimball's avatar Robert Kimball Committed by Scott Cyphers

ParameterizedTensorView and ElementType cleanup (#264)

* ParameterizedTensorView and ElementType cleanup

* change element to_type() to from()
parent f2a23d55
...@@ -69,6 +69,18 @@ size_t Tensor::get_pool_offset() const ...@@ -69,6 +69,18 @@ size_t Tensor::get_pool_offset() const
std::ostream& operator<<(std::ostream& out, const Tensor& tensor) std::ostream& operator<<(std::ostream& out, const Tensor& tensor)
{ {
out << "Tensor(" << tensor.get_name() << ")"; out << "Tensor(" << tensor.get_name() << ", ";
out << (tensor.is_persistent() ? "P" : "");
out << (tensor.is_constant() ? "C" : "");
out << (tensor.is_input() ? "I" : "");
out << (tensor.is_output() ? "O" : "");
if (!tensor.is_persistent() && !tensor.is_constant() && !tensor.is_input() &&
!tensor.is_output())
{
out << "T";
}
out << ")";
return out; return out;
} }
...@@ -34,7 +34,6 @@ Function::Function(const std::shared_ptr<Node>& result, ...@@ -34,7 +34,6 @@ Function::Function(const std::shared_ptr<Node>& result,
, m_temporary_pool_size(0) , m_temporary_pool_size(0)
, m_instance_id(m_next_instance_id.fetch_add(1)) , m_instance_id(m_next_instance_id.fetch_add(1))
{ {
m_result->set_is_output();
traverse_nodes(this, [&](shared_ptr<Node> node) { m_ops.push_back(node); }); traverse_nodes(this, [&](shared_ptr<Node> node) { m_ops.push_back(node); });
} }
......
...@@ -67,6 +67,7 @@ void ngraph::pass::Manager::run_passes(shared_ptr<Function> func) ...@@ -67,6 +67,7 @@ void ngraph::pass::Manager::run_passes(shared_ptr<Function> func)
vector<shared_ptr<Function>> fs; vector<shared_ptr<Function>> fs;
for (shared_ptr<Function> f : get_state().get_functions()) for (shared_ptr<Function> f : get_state().get_functions())
{ {
f->get_result()->set_is_output();
fs.push_back(f); fs.push_back(f);
} }
......
...@@ -18,7 +18,6 @@ ...@@ -18,7 +18,6 @@
#include "ngraph/runtime/parameterized_tensor_view.hpp" #include "ngraph/runtime/parameterized_tensor_view.hpp"
#include "ngraph/runtime/tensor_view.hpp" #include "ngraph/runtime/tensor_view.hpp"
#include "ngraph/runtime/tuple.hpp" #include "ngraph/runtime/tuple.hpp"
#include "ngraph/types/element_type.hpp"
using namespace ngraph::runtime; using namespace ngraph::runtime;
......
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
#include "ngraph/common.hpp" #include "ngraph/common.hpp"
#include "ngraph/log.hpp" #include "ngraph/log.hpp"
#include "ngraph/runtime/ndarray.hpp" #include "ngraph/types/element_type.hpp"
namespace ngraph namespace ngraph
{ {
...@@ -35,9 +35,6 @@ namespace ngraph ...@@ -35,9 +35,6 @@ namespace ngraph
class Tuple; class Tuple;
class Value; class Value;
template <typename ET>
class ParameterizedTensorView;
/// @brief Interface to a generic backend. /// @brief Interface to a generic backend.
/// ///
/// Backends are responsible for function execution and value allocation. /// Backends are responsible for function execution and value allocation.
...@@ -56,12 +53,11 @@ namespace ngraph ...@@ -56,12 +53,11 @@ namespace ngraph
make_primary_tensor_view(const ngraph::element::Type& element_type, make_primary_tensor_view(const ngraph::element::Type& element_type,
const Shape& shape); const Shape& shape);
template <typename ET> template <typename T>
std::shared_ptr<ngraph::runtime::ParameterizedTensorView<ET>> std::shared_ptr<ngraph::runtime::TensorView>
make_parameterized_tensor_view(const Shape& shape) make_primary_tensor_view(const Shape& shape)
{ {
return std::dynamic_pointer_cast<ngraph::runtime::ParameterizedTensorView<ET>>( return make_primary_tensor_view(element::from<T>(), shape);
make_primary_tensor_view(ET::element_type(), shape));
} }
/// @brief Construct a tuple handle from a sequence of values. /// @brief Construct a tuple handle from a sequence of values.
......
...@@ -170,13 +170,16 @@ void ExternalFunction::compile() ...@@ -170,13 +170,16 @@ void ExternalFunction::compile()
return; return;
} }
string function_name = m_function->get_name();
string dump_filename = file_util::path_join(s_output_dir, function_name + "_ops.txt");
pass::Manager pass_manager; pass::Manager pass_manager;
pass_manager.register_pass<pass::TopologicalSort>(); pass_manager.register_pass<pass::TopologicalSort>();
// For now, just make everyone row-major. // For now, just make everyone row-major.
pass_manager.register_pass<pass::AssignLayout<DenseTensorViewLayout>>(); pass_manager.register_pass<pass::AssignLayout<DenseTensorViewLayout>>();
pass_manager.register_pass<pass::Liveness>(); pass_manager.register_pass<pass::Liveness>();
pass_manager.register_pass<pass::MemoryLayout>(64); pass_manager.register_pass<pass::MemoryLayout>(64);
pass_manager.register_pass<pass::DumpSorted>("sorted_ops.txt"); pass_manager.register_pass<pass::DumpSorted>(dump_filename);
pass_manager.run_passes(m_function); pass_manager.run_passes(m_function);
// Now we build the TU // Now we build the TU
...@@ -311,7 +314,6 @@ using namespace ngraph::runtime::cpu::eigen; ...@@ -311,7 +314,6 @@ using namespace ngraph::runtime::cpu::eigen;
// TODO: Cleanup and make this a utility function // TODO: Cleanup and make this a utility function
string function_name = m_function->get_name();
file_util::make_directory(s_output_dir); file_util::make_directory(s_output_dir);
string filename = file_util::path_join(s_output_dir, function_name + "_codegen.cpp"); string filename = file_util::path_join(s_output_dir, function_name + "_codegen.cpp");
ofstream out(filename); ofstream out(filename);
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#include "ngraph/function.hpp" #include "ngraph/function.hpp"
#include "ngraph/runtime/call_frame.hpp" #include "ngraph/runtime/call_frame.hpp"
#include "ngraph/runtime/parameterized_tensor_view.hpp"
#include "ngraph/runtime/tensor_view.hpp" #include "ngraph/runtime/tensor_view.hpp"
namespace ngraph namespace ngraph
......
...@@ -25,9 +25,11 @@ const element::Type element::boolean(8, false, false, "bool"); ...@@ -25,9 +25,11 @@ const element::Type element::boolean(8, false, false, "bool");
const element::Type element::f32(32, true, true, "float"); const element::Type element::f32(32, true, true, "float");
const element::Type element::f64(64, true, true, "double"); const element::Type element::f64(64, true, true, "double");
const element::Type element::i8(8, false, true, "int8_t"); const element::Type element::i8(8, false, true, "int8_t");
const element::Type element::i16(16, false, true, "int16_t");
const element::Type element::i32(32, false, true, "int32_t"); const element::Type element::i32(32, false, true, "int32_t");
const element::Type element::i64(64, false, true, "int64_t"); const element::Type element::i64(64, false, true, "int64_t");
const element::Type element::u8(8, false, false, "uint8_t"); const element::Type element::u8(8, false, false, "uint8_t");
const element::Type element::u16(16, false, false, "uint16_t");
const element::Type element::u32(32, false, false, "uint32_t"); const element::Type element::u32(32, false, false, "uint32_t");
const element::Type element::u64(64, false, false, "uint64_t"); const element::Type element::u64(64, false, false, "uint64_t");
......
...@@ -29,14 +29,22 @@ ...@@ -29,14 +29,22 @@
namespace ngraph namespace ngraph
{ {
namespace runtime
{
template <typename ET>
class ParameterizedTensorView;
}
namespace element namespace element
{ {
class Type;
extern const Type boolean;
extern const Type f32;
extern const Type f64;
extern const Type i8;
extern const Type i16;
extern const Type i32;
extern const Type i64;
extern const Type u8;
extern const Type u16;
extern const Type u32;
extern const Type u64;
class Type class Type
{ {
Type(const Type&) = delete; Type(const Type&) = delete;
...@@ -66,15 +74,58 @@ namespace ngraph ...@@ -66,15 +74,58 @@ namespace ngraph
const std::string m_cname; const std::string m_cname;
}; };
extern const Type boolean; template <typename T>
extern const Type f32; const Type& from()
extern const Type f64; {
extern const Type i8; if (typeid(T) == typeid(char) || typeid(T) == typeid(bool))
extern const Type i32; {
extern const Type i64; return boolean;
extern const Type u8; }
extern const Type u32; else if (typeid(T) == typeid(float))
extern const Type u64; {
return f32;
}
else if (typeid(T) == typeid(double))
{
return f64;
}
else if (typeid(T) == typeid(int8_t))
{
return i8;
}
else if (typeid(T) == typeid(int16_t))
{
return i16;
}
else if (typeid(T) == typeid(int32_t))
{
return i32;
}
else if (typeid(T) == typeid(int64_t))
{
return i64;
}
else if (typeid(T) == typeid(uint8_t))
{
return u8;
}
else if (typeid(T) == typeid(uint16_t))
{
return u16;
}
else if (typeid(T) == typeid(uint32_t))
{
return u32;
}
else if (typeid(T) == typeid(uint64_t))
{
return u64;
}
else
{
throw std::invalid_argument("Unknown type");
}
}
std::ostream& operator<<(std::ostream& out, const ngraph::element::Type& obj); std::ostream& operator<<(std::ostream& out, const ngraph::element::Type& obj);
......
...@@ -23,9 +23,10 @@ include_directories( ...@@ -23,9 +23,10 @@ include_directories(
set (SRC set (SRC
autodiff.cpp autodiff.cpp
copy.cpp
build_graph.cpp build_graph.cpp
copy.cpp
eigen.cpp eigen.cpp
element_type.cpp
file_util.cpp file_util.cpp
input_output_assign.cpp input_output_assign.cpp
main.cpp main.cpp
......
...@@ -29,19 +29,17 @@ ...@@ -29,19 +29,17 @@
using namespace std; using namespace std;
using namespace ngraph; using namespace ngraph;
template <typename ET> template <typename T>
bool autodiff_numeric_compare( bool autodiff_numeric_compare(const std::shared_ptr<runtime::Manager>& manager,
const std::shared_ptr<runtime::Manager>& manager, const std::shared_ptr<runtime::Backend>& backend,
const std::shared_ptr<runtime::Backend>& backend, std::function<std::shared_ptr<Function>()> make_graph,
std::function<std::shared_ptr<Function>()> make_graph, const std::vector<std::shared_ptr<runtime::TensorView>>& args,
const std::vector<std::shared_ptr<runtime::ParameterizedTensorView<ET>>>& args, T rtol,
typename ET::type rtol, T atol)
typename ET::type atol)
{ {
auto results_num = auto results_num = autodiff::numeric_derivative<T>(manager, backend, make_graph(), args, .001f);
autodiff::numeric_derivative<element::Float32>(manager, backend, make_graph(), args, .001f); auto results_sym = autodiff::backprop_derivative<T>(manager, backend, make_graph(), args);
auto results_sym =
autodiff::backprop_derivative<element::Float32>(manager, backend, make_graph(), args);
return test::all_close(results_num, results_sym, .01f, .01f); return test::all_close(results_num, results_sym, .01f, .01f);
} }
...@@ -50,10 +48,10 @@ TEST(backwards, add) ...@@ -50,10 +48,10 @@ TEST(backwards, add)
auto manager = runtime::Manager::get("NGVM"); auto manager = runtime::Manager::get("NGVM");
auto backend = manager->allocate_backend(); auto backend = manager->allocate_backend();
test::Uniform<element::Float32> rng(-1.0f, 1.0f); test::Uniform<float> rng(-1.0f, 1.0f);
auto shape = Shape{2, 3}; auto shape = Shape{2, 3};
auto x0 = rng.initialize(backend->make_parameterized_tensor_view<element::Float32>(shape)); auto x0 = rng.initialize(backend->make_primary_tensor_view<float>(shape));
auto x1 = rng.initialize(backend->make_parameterized_tensor_view<element::Float32>(shape)); auto x1 = rng.initialize(backend->make_primary_tensor_view<float>(shape));
auto make_graph = [shape]() { auto make_graph = [shape]() {
auto X0 = make_shared<op::Parameter>(element::Float32::element_type(), shape); auto X0 = make_shared<op::Parameter>(element::Float32::element_type(), shape);
...@@ -61,8 +59,8 @@ TEST(backwards, add) ...@@ -61,8 +59,8 @@ TEST(backwards, add)
return make_shared<Function>( return make_shared<Function>(
X0 + X1, nullptr, std::vector<std::shared_ptr<op::Parameter>>{X0, X1}); X0 + X1, nullptr, std::vector<std::shared_ptr<op::Parameter>>{X0, X1});
}; };
EXPECT_TRUE(autodiff_numeric_compare<element::Float32>( EXPECT_TRUE(
manager, backend, make_graph, {x0, x1}, .01f, .01f)); autodiff_numeric_compare<float>(manager, backend, make_graph, {x0, x1}, .01f, .01f));
} }
TEST(backwards, add_nested) TEST(backwards, add_nested)
...@@ -70,10 +68,10 @@ TEST(backwards, add_nested) ...@@ -70,10 +68,10 @@ TEST(backwards, add_nested)
auto manager = runtime::Manager::get("NGVM"); auto manager = runtime::Manager::get("NGVM");
auto backend = manager->allocate_backend(); auto backend = manager->allocate_backend();
test::Uniform<element::Float32> rng(-1.0f, 1.0f); test::Uniform<float> rng(-1.0f, 1.0f);
auto shape = Shape{2, 3}; auto shape = Shape{2, 3};
auto x0 = rng.initialize(backend->make_parameterized_tensor_view<element::Float32>(shape)); auto x0 = rng.initialize(backend->make_primary_tensor_view<float>(shape));
auto x1 = rng.initialize(backend->make_parameterized_tensor_view<element::Float32>(shape)); auto x1 = rng.initialize(backend->make_primary_tensor_view<float>(shape));
auto make_graph = [shape]() { auto make_graph = [shape]() {
auto X0 = make_shared<op::Parameter>(element::Float32::element_type(), shape); auto X0 = make_shared<op::Parameter>(element::Float32::element_type(), shape);
...@@ -81,8 +79,8 @@ TEST(backwards, add_nested) ...@@ -81,8 +79,8 @@ TEST(backwards, add_nested)
return make_shared<Function>( return make_shared<Function>(
(X0 + X1) + (X1 + X0), nullptr, std::vector<std::shared_ptr<op::Parameter>>{X0, X1}); (X0 + X1) + (X1 + X0), nullptr, std::vector<std::shared_ptr<op::Parameter>>{X0, X1});
}; };
EXPECT_TRUE(autodiff_numeric_compare<element::Float32>( EXPECT_TRUE(
manager, backend, make_graph, {x0, x1}, .01f, .01f)); autodiff_numeric_compare<float>(manager, backend, make_graph, {x0, x1}, .01f, .01f));
} }
TEST(backwards, broadcast0) TEST(backwards, broadcast0)
...@@ -90,9 +88,9 @@ TEST(backwards, broadcast0) ...@@ -90,9 +88,9 @@ TEST(backwards, broadcast0)
auto manager = runtime::Manager::get("NGVM"); auto manager = runtime::Manager::get("NGVM");
auto backend = manager->allocate_backend(); auto backend = manager->allocate_backend();
test::Uniform<element::Float32> rng(-1.0f, 1.0f); test::Uniform<float> rng(-1.0f, 1.0f);
auto shape = Shape{3}; auto shape = Shape{3};
auto x0 = rng.initialize(backend->make_parameterized_tensor_view<element::Float32>(shape)); auto x0 = rng.initialize(backend->make_primary_tensor_view<float>(shape));
auto make_graph = [shape]() { auto make_graph = [shape]() {
auto X0 = make_shared<op::Parameter>(element::Float32::element_type(), shape); auto X0 = make_shared<op::Parameter>(element::Float32::element_type(), shape);
...@@ -100,8 +98,7 @@ TEST(backwards, broadcast0) ...@@ -100,8 +98,7 @@ TEST(backwards, broadcast0)
nullptr, nullptr,
std::vector<std::shared_ptr<op::Parameter>>{X0}); std::vector<std::shared_ptr<op::Parameter>>{X0});
}; };
EXPECT_TRUE( EXPECT_TRUE(autodiff_numeric_compare<float>(manager, backend, make_graph, {x0}, .01f, .01f));
autodiff_numeric_compare<element::Float32>(manager, backend, make_graph, {x0}, .01f, .01f));
} }
TEST(backwards, broadcast1) TEST(backwards, broadcast1)
...@@ -109,9 +106,9 @@ TEST(backwards, broadcast1) ...@@ -109,9 +106,9 @@ TEST(backwards, broadcast1)
auto manager = runtime::Manager::get("NGVM"); auto manager = runtime::Manager::get("NGVM");
auto backend = manager->allocate_backend(); auto backend = manager->allocate_backend();
test::Uniform<element::Float32> rng(-1.0f, 1.0f); test::Uniform<float> rng(-1.0f, 1.0f);
auto shape = Shape{3}; auto shape = Shape{3};
auto x0 = rng.initialize(backend->make_parameterized_tensor_view<element::Float32>(shape)); auto x0 = rng.initialize(backend->make_primary_tensor_view<float>(shape));
auto make_graph = [shape]() { auto make_graph = [shape]() {
auto X0 = make_shared<op::Parameter>(element::Float32::element_type(), shape); auto X0 = make_shared<op::Parameter>(element::Float32::element_type(), shape);
...@@ -119,8 +116,7 @@ TEST(backwards, broadcast1) ...@@ -119,8 +116,7 @@ TEST(backwards, broadcast1)
nullptr, nullptr,
std::vector<std::shared_ptr<op::Parameter>>{X0}); std::vector<std::shared_ptr<op::Parameter>>{X0});
}; };
EXPECT_TRUE( EXPECT_TRUE(autodiff_numeric_compare<float>(manager, backend, make_graph, {x0}, .01f, .01f));
autodiff_numeric_compare<element::Float32>(manager, backend, make_graph, {x0}, .01f, .01f));
} }
TEST(backwards, divide) TEST(backwards, divide)
...@@ -128,13 +124,13 @@ TEST(backwards, divide) ...@@ -128,13 +124,13 @@ TEST(backwards, divide)
auto manager = runtime::Manager::get("NGVM"); auto manager = runtime::Manager::get("NGVM");
auto backend = manager->allocate_backend(); auto backend = manager->allocate_backend();
test::Uniform<element::Float32> rng(-1.0f, 1.0f); test::Uniform<float> rng(-1.0f, 1.0f);
test::Uniform<element::Float32> rng1(1.0f, 2.0f); test::Uniform<float> rng1(1.0f, 2.0f);
test::Uniform<element::Float32> rng2(-2.0f, -1.0f); test::Uniform<float> rng2(-2.0f, -1.0f);
auto shape = Shape{2, 3}; auto shape = Shape{2, 3};
auto x0 = rng.initialize(backend->make_parameterized_tensor_view<element::Float32>(shape)); auto x0 = rng.initialize(backend->make_primary_tensor_view<float>(shape));
auto x1 = rng1.initialize(backend->make_parameterized_tensor_view<element::Float32>(shape)); auto x1 = rng1.initialize(backend->make_primary_tensor_view<float>(shape));
auto x2 = rng2.initialize(backend->make_parameterized_tensor_view<element::Float32>(shape)); auto x2 = rng2.initialize(backend->make_primary_tensor_view<float>(shape));
auto make_graph = [shape]() { auto make_graph = [shape]() {
auto X0 = make_shared<op::Parameter>(element::Float32::element_type(), shape); auto X0 = make_shared<op::Parameter>(element::Float32::element_type(), shape);
...@@ -142,10 +138,10 @@ TEST(backwards, divide) ...@@ -142,10 +138,10 @@ TEST(backwards, divide)
return make_shared<Function>( return make_shared<Function>(
X0 / X1, nullptr, std::vector<std::shared_ptr<op::Parameter>>{X0, X1}); X0 / X1, nullptr, std::vector<std::shared_ptr<op::Parameter>>{X0, X1});
}; };
EXPECT_TRUE(autodiff_numeric_compare<element::Float32>( EXPECT_TRUE(
manager, backend, make_graph, {x0, x1}, .01f, .01f)); autodiff_numeric_compare<float>(manager, backend, make_graph, {x0, x1}, .01f, .01f));
EXPECT_TRUE(autodiff_numeric_compare<element::Float32>( EXPECT_TRUE(
manager, backend, make_graph, {x0, x2}, .01f, .01f)); autodiff_numeric_compare<float>(manager, backend, make_graph, {x0, x2}, .01f, .01f));
} }
TEST(backwards, dot_scalar_scalar) TEST(backwards, dot_scalar_scalar)
...@@ -153,11 +149,11 @@ TEST(backwards, dot_scalar_scalar) ...@@ -153,11 +149,11 @@ TEST(backwards, dot_scalar_scalar)
auto manager = runtime::Manager::get("NGVM"); auto manager = runtime::Manager::get("NGVM");
auto backend = manager->allocate_backend(); auto backend = manager->allocate_backend();
test::Uniform<element::Float32> rng(-1.0f, 1.0f); test::Uniform<float> rng(-1.0f, 1.0f);
auto shape0 = Shape{}; auto shape0 = Shape{};
auto shape1 = Shape{}; auto shape1 = Shape{};
auto x0 = rng.initialize(backend->make_parameterized_tensor_view<element::Float32>(shape0)); auto x0 = rng.initialize(backend->make_primary_tensor_view<float>(shape0));
auto x1 = rng.initialize(backend->make_parameterized_tensor_view<element::Float32>(shape1)); auto x1 = rng.initialize(backend->make_primary_tensor_view<float>(shape1));
auto make_graph = [shape0, shape1]() { auto make_graph = [shape0, shape1]() {
auto X0 = make_shared<op::Parameter>(element::Float32::element_type(), shape0); auto X0 = make_shared<op::Parameter>(element::Float32::element_type(), shape0);
...@@ -166,8 +162,8 @@ TEST(backwards, dot_scalar_scalar) ...@@ -166,8 +162,8 @@ TEST(backwards, dot_scalar_scalar)
nullptr, nullptr,
std::vector<std::shared_ptr<op::Parameter>>{X0, X1}); std::vector<std::shared_ptr<op::Parameter>>{X0, X1});
}; };
EXPECT_TRUE(autodiff_numeric_compare<element::Float32>( EXPECT_TRUE(
manager, backend, make_graph, {x0, x1}, .01f, .01f)); autodiff_numeric_compare<float>(manager, backend, make_graph, {x0, x1}, .01f, .01f));
} }
TEST(backwards, dot_scalar_tensor) TEST(backwards, dot_scalar_tensor)
...@@ -175,11 +171,11 @@ TEST(backwards, dot_scalar_tensor) ...@@ -175,11 +171,11 @@ TEST(backwards, dot_scalar_tensor)
auto manager = runtime::Manager::get("NGVM"); auto manager = runtime::Manager::get("NGVM");
auto backend = manager->allocate_backend(); auto backend = manager->allocate_backend();
test::Uniform<element::Float32> rng(-1.0f, 1.0f); test::Uniform<float> rng(-1.0f, 1.0f);
auto shape0 = Shape{}; auto shape0 = Shape{};
auto shape1 = Shape{3, 4}; auto shape1 = Shape{3, 4};
auto x0 = rng.initialize(backend->make_parameterized_tensor_view<element::Float32>(shape0)); auto x0 = rng.initialize(backend->make_primary_tensor_view<float>(shape0));
auto x1 = rng.initialize(backend->make_parameterized_tensor_view<element::Float32>(shape1)); auto x1 = rng.initialize(backend->make_primary_tensor_view<float>(shape1));
auto make_graph = [shape0, shape1]() { auto make_graph = [shape0, shape1]() {
auto X0 = make_shared<op::Parameter>(element::Float32::element_type(), shape0); auto X0 = make_shared<op::Parameter>(element::Float32::element_type(), shape0);
...@@ -188,8 +184,8 @@ TEST(backwards, dot_scalar_tensor) ...@@ -188,8 +184,8 @@ TEST(backwards, dot_scalar_tensor)
nullptr, nullptr,
std::vector<std::shared_ptr<op::Parameter>>{X0, X1}); std::vector<std::shared_ptr<op::Parameter>>{X0, X1});
}; };
EXPECT_TRUE(autodiff_numeric_compare<element::Float32>( EXPECT_TRUE(
manager, backend, make_graph, {x0, x1}, .01f, .01f)); autodiff_numeric_compare<float>(manager, backend, make_graph, {x0, x1}, .01f, .01f));
} }
TEST(backwards, dot_tensor_scalar) TEST(backwards, dot_tensor_scalar)
...@@ -197,11 +193,11 @@ TEST(backwards, dot_tensor_scalar) ...@@ -197,11 +193,11 @@ TEST(backwards, dot_tensor_scalar)
auto manager = runtime::Manager::get("NGVM"); auto manager = runtime::Manager::get("NGVM");
auto backend = manager->allocate_backend(); auto backend = manager->allocate_backend();
test::Uniform<element::Float32> rng(-1.0f, 1.0f); test::Uniform<float> rng(-1.0f, 1.0f);
auto shape0 = Shape{3, 4}; auto shape0 = Shape{3, 4};
auto shape1 = Shape{}; auto shape1 = Shape{};
auto x0 = rng.initialize(backend->make_parameterized_tensor_view<element::Float32>(shape0)); auto x0 = rng.initialize(backend->make_primary_tensor_view<float>(shape0));
auto x1 = rng.initialize(backend->make_parameterized_tensor_view<element::Float32>(shape1)); auto x1 = rng.initialize(backend->make_primary_tensor_view<float>(shape1));
auto make_graph = [shape0, shape1]() { auto make_graph = [shape0, shape1]() {
auto X0 = make_shared<op::Parameter>(element::Float32::element_type(), shape0); auto X0 = make_shared<op::Parameter>(element::Float32::element_type(), shape0);
...@@ -210,8 +206,8 @@ TEST(backwards, dot_tensor_scalar) ...@@ -210,8 +206,8 @@ TEST(backwards, dot_tensor_scalar)
nullptr, nullptr,
std::vector<std::shared_ptr<op::Parameter>>{X0, X1}); std::vector<std::shared_ptr<op::Parameter>>{X0, X1});
}; };
EXPECT_TRUE(autodiff_numeric_compare<element::Float32>( EXPECT_TRUE(
manager, backend, make_graph, {x0, x1}, .01f, .01f)); autodiff_numeric_compare<float>(manager, backend, make_graph, {x0, x1}, .01f, .01f));
} }
TEST(backwards, dot_vector_vector) TEST(backwards, dot_vector_vector)
...@@ -219,11 +215,11 @@ TEST(backwards, dot_vector_vector) ...@@ -219,11 +215,11 @@ TEST(backwards, dot_vector_vector)
auto manager = runtime::Manager::get("NGVM"); auto manager = runtime::Manager::get("NGVM");
auto backend = manager->allocate_backend(); auto backend = manager->allocate_backend();
test::Uniform<element::Float32> rng(-1.0f, 1.0f); test::Uniform<float> rng(-1.0f, 1.0f);
auto shape0 = Shape{3}; auto shape0 = Shape{3};
auto shape1 = Shape{3}; auto shape1 = Shape{3};
auto x0 = rng.initialize(backend->make_parameterized_tensor_view<element::Float32>(shape0)); auto x0 = rng.initialize(backend->make_primary_tensor_view<float>(shape0));
auto x1 = rng.initialize(backend->make_parameterized_tensor_view<element::Float32>(shape1)); auto x1 = rng.initialize(backend->make_primary_tensor_view<float>(shape1));
auto make_graph = [shape0, shape1]() { auto make_graph = [shape0, shape1]() {
auto X0 = make_shared<op::Parameter>(element::Float32::element_type(), shape0); auto X0 = make_shared<op::Parameter>(element::Float32::element_type(), shape0);
...@@ -232,8 +228,8 @@ TEST(backwards, dot_vector_vector) ...@@ -232,8 +228,8 @@ TEST(backwards, dot_vector_vector)
nullptr, nullptr,
std::vector<std::shared_ptr<op::Parameter>>{X0, X1}); std::vector<std::shared_ptr<op::Parameter>>{X0, X1});
}; };
EXPECT_TRUE(autodiff_numeric_compare<element::Float32>( EXPECT_TRUE(
manager, backend, make_graph, {x0, x1}, .01f, .01f)); autodiff_numeric_compare<float>(manager, backend, make_graph, {x0, x1}, .01f, .01f));
} }
TEST(backwards, dot_tensor_vector) TEST(backwards, dot_tensor_vector)
...@@ -241,11 +237,11 @@ TEST(backwards, dot_tensor_vector) ...@@ -241,11 +237,11 @@ TEST(backwards, dot_tensor_vector)
auto manager = runtime::Manager::get("NGVM"); auto manager = runtime::Manager::get("NGVM");
auto backend = manager->allocate_backend(); auto backend = manager->allocate_backend();
test::Uniform<element::Float32> rng(-1.0f, 1.0f); test::Uniform<float> rng(-1.0f, 1.0f);
auto shape0 = Shape{4, 3}; auto shape0 = Shape{4, 3};
auto shape1 = Shape{3}; auto shape1 = Shape{3};
auto x0 = rng.initialize(backend->make_parameterized_tensor_view<element::Float32>(shape0)); auto x0 = rng.initialize(backend->make_primary_tensor_view<float>(shape0));
auto x1 = rng.initialize(backend->make_parameterized_tensor_view<element::Float32>(shape1)); auto x1 = rng.initialize(backend->make_primary_tensor_view<float>(shape1));
auto make_graph = [shape0, shape1]() { auto make_graph = [shape0, shape1]() {
auto X0 = make_shared<op::Parameter>(element::Float32::element_type(), shape0); auto X0 = make_shared<op::Parameter>(element::Float32::element_type(), shape0);
...@@ -254,8 +250,8 @@ TEST(backwards, dot_tensor_vector) ...@@ -254,8 +250,8 @@ TEST(backwards, dot_tensor_vector)
nullptr, nullptr,
std::vector<std::shared_ptr<op::Parameter>>{X0, X1}); std::vector<std::shared_ptr<op::Parameter>>{X0, X1});
}; };
EXPECT_TRUE(autodiff_numeric_compare<element::Float32>( EXPECT_TRUE(
manager, backend, make_graph, {x0, x1}, .01f, .01f)); autodiff_numeric_compare<float>(manager, backend, make_graph, {x0, x1}, .01f, .01f));
} }
TEST(backwards, dot_tensor2_tensor2) TEST(backwards, dot_tensor2_tensor2)
...@@ -263,11 +259,11 @@ TEST(backwards, dot_tensor2_tensor2) ...@@ -263,11 +259,11 @@ TEST(backwards, dot_tensor2_tensor2)
auto manager = runtime::Manager::get("NGVM"); auto manager = runtime::Manager::get("NGVM");
auto backend = manager->allocate_backend(); auto backend = manager->allocate_backend();
test::Uniform<element::Float32> rng(-1.0f, 1.0f); test::Uniform<float> rng(-1.0f, 1.0f);
auto shape0 = Shape{4, 3}; auto shape0 = Shape{4, 3};
auto shape1 = Shape{3, 5}; auto shape1 = Shape{3, 5};
auto x0 = rng.initialize(backend->make_parameterized_tensor_view<element::Float32>(shape0)); auto x0 = rng.initialize(backend->make_primary_tensor_view<float>(shape0));
auto x1 = rng.initialize(backend->make_parameterized_tensor_view<element::Float32>(shape1)); auto x1 = rng.initialize(backend->make_primary_tensor_view<float>(shape1));
auto make_graph = [shape0, shape1]() { auto make_graph = [shape0, shape1]() {
auto X0 = make_shared<op::Parameter>(element::Float32::element_type(), shape0); auto X0 = make_shared<op::Parameter>(element::Float32::element_type(), shape0);
...@@ -276,8 +272,8 @@ TEST(backwards, dot_tensor2_tensor2) ...@@ -276,8 +272,8 @@ TEST(backwards, dot_tensor2_tensor2)
nullptr, nullptr,
std::vector<std::shared_ptr<op::Parameter>>{X0, X1}); std::vector<std::shared_ptr<op::Parameter>>{X0, X1});
}; };
EXPECT_TRUE(autodiff_numeric_compare<element::Float32>( EXPECT_TRUE(
manager, backend, make_graph, {x0, x1}, .01f, .01f)); autodiff_numeric_compare<float>(manager, backend, make_graph, {x0, x1}, .01f, .01f));
} }
TEST(backwards, exp) TEST(backwards, exp)
...@@ -285,17 +281,16 @@ TEST(backwards, exp) ...@@ -285,17 +281,16 @@ TEST(backwards, exp)
auto manager = runtime::Manager::get("NGVM"); auto manager = runtime::Manager::get("NGVM");
auto backend = manager->allocate_backend(); auto backend = manager->allocate_backend();
test::Uniform<element::Float32> rng(-1.0f, 1.0f); test::Uniform<float> rng(-1.0f, 1.0f);
auto shape = Shape{2, 3}; auto shape = Shape{2, 3};
auto x0 = rng.initialize(backend->make_parameterized_tensor_view<element::Float32>(shape)); auto x0 = rng.initialize(backend->make_primary_tensor_view<float>(shape));
auto make_graph = [shape]() { auto make_graph = [shape]() {
auto X0 = make_shared<op::Parameter>(element::Float32::element_type(), shape); auto X0 = make_shared<op::Parameter>(element::Float32::element_type(), shape);
return make_shared<Function>( return make_shared<Function>(
make_shared<op::Exp>(X0), nullptr, std::vector<std::shared_ptr<op::Parameter>>{X0}); make_shared<op::Exp>(X0), nullptr, std::vector<std::shared_ptr<op::Parameter>>{X0});
}; };
EXPECT_TRUE( EXPECT_TRUE(autodiff_numeric_compare<float>(manager, backend, make_graph, {x0}, .01f, .01f));
autodiff_numeric_compare<element::Float32>(manager, backend, make_graph, {x0}, .01f, .01f));
} }
TEST(backwards, log) TEST(backwards, log)
...@@ -303,17 +298,16 @@ TEST(backwards, log) ...@@ -303,17 +298,16 @@ TEST(backwards, log)
auto manager = runtime::Manager::get("NGVM"); auto manager = runtime::Manager::get("NGVM");
auto backend = manager->allocate_backend(); auto backend = manager->allocate_backend();
test::Uniform<element::Float32> rng(1.0f, 2.0f); test::Uniform<float> rng(1.0f, 2.0f);
auto shape = Shape{2, 3}; auto shape = Shape{2, 3};
auto x0 = rng.initialize(backend->make_parameterized_tensor_view<element::Float32>(shape)); auto x0 = rng.initialize(backend->make_primary_tensor_view<float>(shape));
auto make_graph = [shape]() { auto make_graph = [shape]() {
auto X0 = make_shared<op::Parameter>(element::Float32::element_type(), shape); auto X0 = make_shared<op::Parameter>(element::Float32::element_type(), shape);
return make_shared<Function>( return make_shared<Function>(
make_shared<op::Log>(X0), nullptr, std::vector<std::shared_ptr<op::Parameter>>{X0}); make_shared<op::Log>(X0), nullptr, std::vector<std::shared_ptr<op::Parameter>>{X0});
}; };
EXPECT_TRUE( EXPECT_TRUE(autodiff_numeric_compare<float>(manager, backend, make_graph, {x0}, .01f, .01f));
autodiff_numeric_compare<element::Float32>(manager, backend, make_graph, {x0}, .01f, .01f));
} }
TEST(backwards, maximum) TEST(backwards, maximum)
...@@ -321,10 +315,10 @@ TEST(backwards, maximum) ...@@ -321,10 +315,10 @@ TEST(backwards, maximum)
auto manager = runtime::Manager::get("NGVM"); auto manager = runtime::Manager::get("NGVM");
auto backend = manager->allocate_backend(); auto backend = manager->allocate_backend();
test::Uniform<element::Float32> rng(-1.0f, 1.0f); test::Uniform<float> rng(-1.0f, 1.0f);
auto shape = Shape{2, 3}; auto shape = Shape{2, 3};
auto x0 = rng.initialize(backend->make_parameterized_tensor_view<element::Float32>(shape)); auto x0 = rng.initialize(backend->make_primary_tensor_view<float>(shape));
auto x1 = rng.initialize(backend->make_parameterized_tensor_view<element::Float32>(shape)); auto x1 = rng.initialize(backend->make_primary_tensor_view<float>(shape));
auto make_graph = [shape]() { auto make_graph = [shape]() {
auto X0 = make_shared<op::Parameter>(element::Float32::element_type(), shape); auto X0 = make_shared<op::Parameter>(element::Float32::element_type(), shape);
...@@ -333,8 +327,8 @@ TEST(backwards, maximum) ...@@ -333,8 +327,8 @@ TEST(backwards, maximum)
nullptr, nullptr,
std::vector<std::shared_ptr<op::Parameter>>{X0, X1}); std::vector<std::shared_ptr<op::Parameter>>{X0, X1});
}; };
EXPECT_TRUE(autodiff_numeric_compare<element::Float32>( EXPECT_TRUE(
manager, backend, make_graph, {x0, x1}, .01f, .01f)); autodiff_numeric_compare<float>(manager, backend, make_graph, {x0, x1}, .01f, .01f));
} }
TEST(backwards, minimum) TEST(backwards, minimum)
...@@ -342,10 +336,10 @@ TEST(backwards, minimum) ...@@ -342,10 +336,10 @@ TEST(backwards, minimum)
auto manager = runtime::Manager::get("NGVM"); auto manager = runtime::Manager::get("NGVM");
auto backend = manager->allocate_backend(); auto backend = manager->allocate_backend();
test::Uniform<element::Float32> rng(-1.0f, 1.0f); test::Uniform<float> rng(-1.0f, 1.0f);
auto shape = Shape{2, 3}; auto shape = Shape{2, 3};
auto x0 = rng.initialize(backend->make_parameterized_tensor_view<element::Float32>(shape)); auto x0 = rng.initialize(backend->make_primary_tensor_view<float>(shape));
auto x1 = rng.initialize(backend->make_parameterized_tensor_view<element::Float32>(shape)); auto x1 = rng.initialize(backend->make_primary_tensor_view<float>(shape));
auto make_graph = [shape]() { auto make_graph = [shape]() {
auto X0 = make_shared<op::Parameter>(element::Float32::element_type(), shape); auto X0 = make_shared<op::Parameter>(element::Float32::element_type(), shape);
...@@ -354,8 +348,8 @@ TEST(backwards, minimum) ...@@ -354,8 +348,8 @@ TEST(backwards, minimum)
nullptr, nullptr,
std::vector<std::shared_ptr<op::Parameter>>{X0, X1}); std::vector<std::shared_ptr<op::Parameter>>{X0, X1});
}; };
EXPECT_TRUE(autodiff_numeric_compare<element::Float32>( EXPECT_TRUE(
manager, backend, make_graph, {x0, x1}, .01f, .01f)); autodiff_numeric_compare<float>(manager, backend, make_graph, {x0, x1}, .01f, .01f));
} }
TEST(backwards, multiply) TEST(backwards, multiply)
...@@ -363,10 +357,10 @@ TEST(backwards, multiply) ...@@ -363,10 +357,10 @@ TEST(backwards, multiply)
auto manager = runtime::Manager::get("NGVM"); auto manager = runtime::Manager::get("NGVM");
auto backend = manager->allocate_backend(); auto backend = manager->allocate_backend();
test::Uniform<element::Float32> rng(-1.0f, 1.0f); test::Uniform<float> rng(-1.0f, 1.0f);
auto shape = Shape{2, 3}; auto shape = Shape{2, 3};
auto x0 = rng.initialize(backend->make_parameterized_tensor_view<element::Float32>(shape)); auto x0 = rng.initialize(backend->make_primary_tensor_view<float>(shape));
auto x1 = rng.initialize(backend->make_parameterized_tensor_view<element::Float32>(shape)); auto x1 = rng.initialize(backend->make_primary_tensor_view<float>(shape));
auto make_graph = [shape]() { auto make_graph = [shape]() {
auto X0 = make_shared<op::Parameter>(element::Float32::element_type(), shape); auto X0 = make_shared<op::Parameter>(element::Float32::element_type(), shape);
...@@ -374,8 +368,8 @@ TEST(backwards, multiply) ...@@ -374,8 +368,8 @@ TEST(backwards, multiply)
return make_shared<Function>( return make_shared<Function>(
X0 * X1, nullptr, std::vector<std::shared_ptr<op::Parameter>>{X0, X1}); X0 * X1, nullptr, std::vector<std::shared_ptr<op::Parameter>>{X0, X1});
}; };
EXPECT_TRUE(autodiff_numeric_compare<element::Float32>( EXPECT_TRUE(
manager, backend, make_graph, {x0, x1}, .01f, .01f)); autodiff_numeric_compare<float>(manager, backend, make_graph, {x0, x1}, .01f, .01f));
} }
TEST(backwards, negative) TEST(backwards, negative)
...@@ -383,16 +377,15 @@ TEST(backwards, negative) ...@@ -383,16 +377,15 @@ TEST(backwards, negative)
auto manager = runtime::Manager::get("NGVM"); auto manager = runtime::Manager::get("NGVM");
auto backend = manager->allocate_backend(); auto backend = manager->allocate_backend();
test::Uniform<element::Float32> rng(-1.0f, 1.0f); test::Uniform<float> rng(-1.0f, 1.0f);
auto shape = Shape{2, 3}; auto shape = Shape{2, 3};
auto x0 = rng.initialize(backend->make_parameterized_tensor_view<element::Float32>(shape)); auto x0 = rng.initialize(backend->make_primary_tensor_view<float>(shape));
auto make_graph = [shape]() { auto make_graph = [shape]() {
auto X0 = make_shared<op::Parameter>(element::Float32::element_type(), shape); auto X0 = make_shared<op::Parameter>(element::Float32::element_type(), shape);
return make_shared<Function>(-X0, nullptr, std::vector<std::shared_ptr<op::Parameter>>{X0}); return make_shared<Function>(-X0, nullptr, std::vector<std::shared_ptr<op::Parameter>>{X0});
}; };
EXPECT_TRUE( EXPECT_TRUE(autodiff_numeric_compare<float>(manager, backend, make_graph, {x0}, .01f, .01f));
autodiff_numeric_compare<element::Float32>(manager, backend, make_graph, {x0}, .01f, .01f));
} }
TEST(backwards, parameter) TEST(backwards, parameter)
...@@ -400,15 +393,14 @@ TEST(backwards, parameter) ...@@ -400,15 +393,14 @@ TEST(backwards, parameter)
auto manager = runtime::Manager::get("NGVM"); auto manager = runtime::Manager::get("NGVM");
auto backend = manager->allocate_backend(); auto backend = manager->allocate_backend();
test::Uniform<element::Float32> rng(-1.0f, 1.0f); test::Uniform<float> rng(-1.0f, 1.0f);
auto shape = Shape{2, 3}; auto shape = Shape{2, 3};
auto x0 = rng.initialize(backend->make_parameterized_tensor_view<element::Float32>(shape)); auto x0 = rng.initialize(backend->make_primary_tensor_view<float>(shape));
auto make_graph = [shape]() { auto make_graph = [shape]() {
auto X0 = make_shared<op::Parameter>(element::Float32::element_type(), shape); auto X0 = make_shared<op::Parameter>(element::Float32::element_type(), shape);
return make_shared<Function>(X0, nullptr, std::vector<std::shared_ptr<op::Parameter>>{X0}); return make_shared<Function>(X0, nullptr, std::vector<std::shared_ptr<op::Parameter>>{X0});
}; };
EXPECT_TRUE( EXPECT_TRUE(autodiff_numeric_compare<float>(manager, backend, make_graph, {x0}, .01f, .01f));
autodiff_numeric_compare<element::Float32>(manager, backend, make_graph, {x0}, .01f, .01f));
} }
TEST(backwards, reshape) TEST(backwards, reshape)
...@@ -416,9 +408,9 @@ TEST(backwards, reshape) ...@@ -416,9 +408,9 @@ TEST(backwards, reshape)
auto manager = runtime::Manager::get("NGVM"); auto manager = runtime::Manager::get("NGVM");
auto backend = manager->allocate_backend(); auto backend = manager->allocate_backend();
test::Uniform<element::Float32> rng(-1.0f, 1.0f); test::Uniform<float> rng(-1.0f, 1.0f);
auto shape = Shape{3, 4}; auto shape = Shape{3, 4};
auto x0 = rng.initialize(backend->make_parameterized_tensor_view<element::Float32>(shape)); auto x0 = rng.initialize(backend->make_primary_tensor_view<float>(shape));
auto make_graph = [shape]() { auto make_graph = [shape]() {
auto X0 = make_shared<op::Parameter>(element::Float32::element_type(), shape); auto X0 = make_shared<op::Parameter>(element::Float32::element_type(), shape);
...@@ -426,8 +418,7 @@ TEST(backwards, reshape) ...@@ -426,8 +418,7 @@ TEST(backwards, reshape)
nullptr, nullptr,
std::vector<std::shared_ptr<op::Parameter>>{X0}); std::vector<std::shared_ptr<op::Parameter>>{X0});
}; };
EXPECT_TRUE( EXPECT_TRUE(autodiff_numeric_compare<float>(manager, backend, make_graph, {x0}, .01f, .01f));
autodiff_numeric_compare<element::Float32>(manager, backend, make_graph, {x0}, .01f, .01f));
} }
TEST(backwards, subtract) TEST(backwards, subtract)
...@@ -435,10 +426,10 @@ TEST(backwards, subtract) ...@@ -435,10 +426,10 @@ TEST(backwards, subtract)
auto manager = runtime::Manager::get("NGVM"); auto manager = runtime::Manager::get("NGVM");
auto backend = manager->allocate_backend(); auto backend = manager->allocate_backend();
test::Uniform<element::Float32> rng(-1.0f, 1.0f); test::Uniform<float> rng(-1.0f, 1.0f);
auto shape = Shape{2, 3}; auto shape = Shape{2, 3};
auto x0 = rng.initialize(backend->make_parameterized_tensor_view<element::Float32>(shape)); auto x0 = rng.initialize(backend->make_primary_tensor_view<float>(shape));
auto x1 = rng.initialize(backend->make_parameterized_tensor_view<element::Float32>(shape)); auto x1 = rng.initialize(backend->make_primary_tensor_view<float>(shape));
auto make_graph = [shape]() { auto make_graph = [shape]() {
auto X0 = make_shared<op::Parameter>(element::Float32::element_type(), shape); auto X0 = make_shared<op::Parameter>(element::Float32::element_type(), shape);
...@@ -446,8 +437,8 @@ TEST(backwards, subtract) ...@@ -446,8 +437,8 @@ TEST(backwards, subtract)
return make_shared<Function>( return make_shared<Function>(
X0 - X1, nullptr, std::vector<std::shared_ptr<op::Parameter>>{X0, X1}); X0 - X1, nullptr, std::vector<std::shared_ptr<op::Parameter>>{X0, X1});
}; };
EXPECT_TRUE(autodiff_numeric_compare<element::Float32>( EXPECT_TRUE(
manager, backend, make_graph, {x0, x1}, .01f, .01f)); autodiff_numeric_compare<float>(manager, backend, make_graph, {x0, x1}, .01f, .01f));
} }
TEST(backwards, abc) TEST(backwards, abc)
...@@ -455,11 +446,11 @@ TEST(backwards, abc) ...@@ -455,11 +446,11 @@ TEST(backwards, abc)
auto manager = runtime::Manager::get("NGVM"); auto manager = runtime::Manager::get("NGVM");
auto backend = manager->allocate_backend(); auto backend = manager->allocate_backend();
test::Uniform<element::Float32> rng(-1.0f, 1.0f); test::Uniform<float> rng(-1.0f, 1.0f);
auto shape = Shape{2, 3}; auto shape = Shape{2, 3};
auto x0 = rng.initialize(backend->make_parameterized_tensor_view<element::Float32>(shape)); auto x0 = rng.initialize(backend->make_primary_tensor_view<float>(shape));
auto x1 = rng.initialize(backend->make_parameterized_tensor_view<element::Float32>(shape)); auto x1 = rng.initialize(backend->make_primary_tensor_view<float>(shape));
auto x2 = rng.initialize(backend->make_parameterized_tensor_view<element::Float32>(shape)); auto x2 = rng.initialize(backend->make_primary_tensor_view<float>(shape));
auto make_graph = [shape]() { auto make_graph = [shape]() {
auto X0 = make_shared<op::Parameter>(element::Float32::element_type(), shape); auto X0 = make_shared<op::Parameter>(element::Float32::element_type(), shape);
...@@ -468,6 +459,6 @@ TEST(backwards, abc) ...@@ -468,6 +459,6 @@ TEST(backwards, abc)
return make_shared<Function>( return make_shared<Function>(
(X0 + X1) * X2, nullptr, std::vector<std::shared_ptr<op::Parameter>>{X0, X1, X2}); (X0 + X1) * X2, nullptr, std::vector<std::shared_ptr<op::Parameter>>{X0, X1, X2});
}; };
EXPECT_TRUE(autodiff_numeric_compare<element::Float32>( EXPECT_TRUE(
manager, backend, make_graph, {x0, x1, x2}, .01f, .01f)); autodiff_numeric_compare<float>(manager, backend, make_graph, {x0, x1, x2}, .01f, .01f));
} }
...@@ -39,7 +39,6 @@ bool check_unary() ...@@ -39,7 +39,6 @@ bool check_unary()
auto node = make_shared<OP>(arg0); auto node = make_shared<OP>(arg0);
auto new_node = node->copy_with_new_args(new_args); auto new_node = node->copy_with_new_args(new_args);
auto node_cast = dynamic_pointer_cast<OP>(new_node);
return (nullptr != new_node) && (new_args == new_node->get_arguments()); return (nullptr != new_node) && (new_args == new_node->get_arguments());
} }
...@@ -56,7 +55,6 @@ bool check_binary() ...@@ -56,7 +55,6 @@ bool check_binary()
auto node = make_shared<OP>(arg0, arg1); auto node = make_shared<OP>(arg0, arg1);
auto new_node = node->copy_with_new_args(new_args); auto new_node = node->copy_with_new_args(new_args);
auto node_cast = dynamic_pointer_cast<OP>(new_node);
return (nullptr != new_node) && (new_args == new_node->get_arguments()); return (nullptr != new_node) && (new_args == new_node->get_arguments());
} }
...@@ -99,6 +97,7 @@ TEST(copy, broadcast) ...@@ -99,6 +97,7 @@ TEST(copy, broadcast)
auto node = make_shared<op::Broadcast>(arg0, shape, axes); auto node = make_shared<op::Broadcast>(arg0, shape, axes);
auto new_node = node->copy_with_new_args(new_args); auto new_node = node->copy_with_new_args(new_args);
auto node_cast = dynamic_pointer_cast<op::Broadcast>(new_node); auto node_cast = dynamic_pointer_cast<op::Broadcast>(new_node);
ASSERT_NE(node_cast, nullptr);
ASSERT_TRUE(nullptr != new_node); ASSERT_TRUE(nullptr != new_node);
ASSERT_TRUE(new_args == new_node->get_arguments()); ASSERT_TRUE(new_args == new_node->get_arguments());
...@@ -123,6 +122,7 @@ TEST(copy, concat) ...@@ -123,6 +122,7 @@ TEST(copy, concat)
auto node = make_shared<op::Concat>(Nodes{arg0, arg1}, axis); auto node = make_shared<op::Concat>(Nodes{arg0, arg1}, axis);
auto new_node = node->copy_with_new_args(new_args); auto new_node = node->copy_with_new_args(new_args);
auto node_cast = dynamic_pointer_cast<op::Concat>(new_node); auto node_cast = dynamic_pointer_cast<op::Concat>(new_node);
ASSERT_NE(node_cast, nullptr);
ASSERT_TRUE(nullptr != new_node); ASSERT_TRUE(nullptr != new_node);
ASSERT_TRUE(new_args == new_node->get_arguments()); ASSERT_TRUE(new_args == new_node->get_arguments());
...@@ -140,9 +140,11 @@ TEST(copy, parameterized_constant) ...@@ -140,9 +140,11 @@ TEST(copy, parameterized_constant)
Shape shape{2, 2}; Shape shape{2, 2};
auto cptv = dynamic_pointer_cast<ngraph::runtime::ParameterizedTensorView<element::Float32>>(c); auto cptv = dynamic_pointer_cast<ngraph::runtime::ParameterizedTensorView<element::Float32>>(c);
ASSERT_NE(cptv, nullptr);
auto node = make_shared<op::ParameterizedConstant<element::Float32>>(shape, cptv); auto node = make_shared<op::ParameterizedConstant<element::Float32>>(shape, cptv);
auto new_node = node->copy_with_new_args(Nodes{}); auto new_node = node->copy_with_new_args(Nodes{});
auto node_cast = dynamic_pointer_cast<op::ParameterizedConstant<element::Float32>>(new_node); auto node_cast = dynamic_pointer_cast<op::ParameterizedConstant<element::Float32>>(new_node);
ASSERT_NE(node_cast, nullptr);
ASSERT_TRUE(nullptr != new_node); ASSERT_TRUE(nullptr != new_node);
ASSERT_TRUE(Nodes{} == new_node->get_arguments()); ASSERT_TRUE(Nodes{} == new_node->get_arguments());
ASSERT_TRUE(node_cast->get_value() == c); ASSERT_TRUE(node_cast->get_value() == c);
...@@ -157,6 +159,7 @@ TEST(copy, constant) ...@@ -157,6 +159,7 @@ TEST(copy, constant)
auto node = make_shared<op::Constant>(et, shape, c); auto node = make_shared<op::Constant>(et, shape, c);
auto new_node = node->copy_with_new_args(Nodes{}); auto new_node = node->copy_with_new_args(Nodes{});
auto node_cast = dynamic_pointer_cast<op::Constant>(new_node); auto node_cast = dynamic_pointer_cast<op::Constant>(new_node);
ASSERT_NE(node_cast, nullptr);
ASSERT_TRUE(nullptr != new_node); ASSERT_TRUE(nullptr != new_node);
ASSERT_TRUE(Nodes{} == new_node->get_arguments()); ASSERT_TRUE(Nodes{} == new_node->get_arguments());
ASSERT_TRUE(node_cast->get_value_strings() == c); ASSERT_TRUE(node_cast->get_value_strings() == c);
...@@ -175,6 +178,7 @@ TEST(copy, convert) ...@@ -175,6 +178,7 @@ TEST(copy, convert)
auto node = make_shared<op::Convert>(arg0, et); auto node = make_shared<op::Convert>(arg0, et);
auto new_node = node->copy_with_new_args(new_args); auto new_node = node->copy_with_new_args(new_args);
auto node_cast = dynamic_pointer_cast<op::Convert>(new_node); auto node_cast = dynamic_pointer_cast<op::Convert>(new_node);
ASSERT_NE(node_cast, nullptr);
ASSERT_TRUE(nullptr != new_node); ASSERT_TRUE(nullptr != new_node);
ASSERT_TRUE(new_args == new_node->get_arguments()); ASSERT_TRUE(new_args == new_node->get_arguments());
...@@ -236,6 +240,7 @@ TEST(copy, FunctionCall) ...@@ -236,6 +240,7 @@ TEST(copy, FunctionCall)
make_shared<op::Parameter>(element::Float32::element_type(), shape)}; make_shared<op::Parameter>(element::Float32::element_type(), shape)};
auto new_node = node->copy_with_new_args(new_args); auto new_node = node->copy_with_new_args(new_args);
auto node_cast = dynamic_pointer_cast<op::FunctionCall>(new_node); auto node_cast = dynamic_pointer_cast<op::FunctionCall>(new_node);
ASSERT_NE(node_cast, nullptr);
ASSERT_TRUE(nullptr != new_node); ASSERT_TRUE(nullptr != new_node);
ASSERT_TRUE(new_args == new_node->get_arguments()); ASSERT_TRUE(new_args == new_node->get_arguments());
...@@ -255,6 +260,7 @@ TEST(copy, GetTupleElement) ...@@ -255,6 +260,7 @@ TEST(copy, GetTupleElement)
auto node = make_shared<op::GetTupleElement>(arg0, n); auto node = make_shared<op::GetTupleElement>(arg0, n);
auto new_node = node->copy_with_new_args(new_args); auto new_node = node->copy_with_new_args(new_args);
auto node_cast = dynamic_pointer_cast<op::GetTupleElement>(new_node); auto node_cast = dynamic_pointer_cast<op::GetTupleElement>(new_node);
ASSERT_NE(node_cast, nullptr);
ASSERT_TRUE(nullptr != new_node); ASSERT_TRUE(nullptr != new_node);
ASSERT_TRUE(new_args == new_node->get_arguments()); ASSERT_TRUE(new_args == new_node->get_arguments());
...@@ -317,6 +323,7 @@ TEST(copy, parameter) ...@@ -317,6 +323,7 @@ TEST(copy, parameter)
auto node = make_shared<op::Parameter>(element::Float32::element_type(), shape); auto node = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto new_node = node->copy_with_new_args({}); auto new_node = node->copy_with_new_args({});
auto node_cast = dynamic_pointer_cast<op::Parameter>(new_node); auto node_cast = dynamic_pointer_cast<op::Parameter>(new_node);
ASSERT_NE(node_cast, nullptr);
ASSERT_TRUE(nullptr != new_node); ASSERT_TRUE(nullptr != new_node);
ASSERT_TRUE(new_node->get_arguments().size() == 0); ASSERT_TRUE(new_node->get_arguments().size() == 0);
...@@ -347,6 +354,7 @@ TEST(copy, reduce) ...@@ -347,6 +354,7 @@ TEST(copy, reduce)
auto node = make_shared<op::Reduce>(arg0, arg_init, f, axes); auto node = make_shared<op::Reduce>(arg0, arg_init, f, axes);
auto new_node = node->copy_with_new_args(new_args); auto new_node = node->copy_with_new_args(new_args);
auto node_cast = dynamic_pointer_cast<op::Reduce>(new_node); auto node_cast = dynamic_pointer_cast<op::Reduce>(new_node);
ASSERT_NE(node_cast, nullptr);
ASSERT_TRUE(nullptr != new_node); ASSERT_TRUE(nullptr != new_node);
ASSERT_TRUE(new_args == new_node->get_arguments()); ASSERT_TRUE(new_args == new_node->get_arguments());
...@@ -372,6 +380,7 @@ TEST(copy, reshape) ...@@ -372,6 +380,7 @@ TEST(copy, reshape)
auto node = make_shared<op::Reshape>(arg0, axes, shape_out); auto node = make_shared<op::Reshape>(arg0, axes, shape_out);
auto new_node = node->copy_with_new_args(new_args); auto new_node = node->copy_with_new_args(new_args);
auto node_cast = dynamic_pointer_cast<op::Reshape>(new_node); auto node_cast = dynamic_pointer_cast<op::Reshape>(new_node);
ASSERT_NE(node_cast, nullptr);
ASSERT_TRUE(nullptr != new_node); ASSERT_TRUE(nullptr != new_node);
ASSERT_TRUE(new_args == new_node->get_arguments()); ASSERT_TRUE(new_args == new_node->get_arguments());
...@@ -393,6 +402,7 @@ TEST(copy, select) ...@@ -393,6 +402,7 @@ TEST(copy, select)
auto node = make_shared<op::Select>(arg0, arg1, arg2); auto node = make_shared<op::Select>(arg0, arg1, arg2);
auto new_node = node->copy_with_new_args(new_args); auto new_node = node->copy_with_new_args(new_args);
auto node_cast = dynamic_pointer_cast<op::Select>(new_node); auto node_cast = dynamic_pointer_cast<op::Select>(new_node);
ASSERT_NE(node_cast, nullptr);
ASSERT_TRUE(nullptr != new_node); ASSERT_TRUE(nullptr != new_node);
ASSERT_TRUE(new_args == new_node->get_arguments()); ASSERT_TRUE(new_args == new_node->get_arguments());
...@@ -427,6 +437,7 @@ TEST(copy, slice) ...@@ -427,6 +437,7 @@ TEST(copy, slice)
auto node = make_shared<op::Slice>(arg0, lower, upper, step); auto node = make_shared<op::Slice>(arg0, lower, upper, step);
auto new_node = node->copy_with_new_args(new_args); auto new_node = node->copy_with_new_args(new_args);
auto node_cast = dynamic_pointer_cast<op::Slice>(new_node); auto node_cast = dynamic_pointer_cast<op::Slice>(new_node);
ASSERT_NE(node_cast, nullptr);
ASSERT_TRUE(nullptr != new_node); ASSERT_TRUE(nullptr != new_node);
ASSERT_TRUE(new_args == new_node->get_arguments()); ASSERT_TRUE(new_args == new_node->get_arguments());
...@@ -451,6 +462,7 @@ TEST(copy, sum) ...@@ -451,6 +462,7 @@ TEST(copy, sum)
auto node = make_shared<op::Sum>(arg0, axes); auto node = make_shared<op::Sum>(arg0, axes);
auto new_node = node->copy_with_new_args(new_args); auto new_node = node->copy_with_new_args(new_args);
auto node_cast = dynamic_pointer_cast<op::Sum>(new_node); auto node_cast = dynamic_pointer_cast<op::Sum>(new_node);
ASSERT_NE(node_cast, nullptr);
ASSERT_TRUE(nullptr != new_node); ASSERT_TRUE(nullptr != new_node);
ASSERT_TRUE(new_args == new_node->get_arguments()); ASSERT_TRUE(new_args == new_node->get_arguments());
...@@ -479,6 +491,7 @@ TEST(copy, tuple) ...@@ -479,6 +491,7 @@ TEST(copy, tuple)
auto node = make_shared<op::Tuple>(Nodes{arg0, arg1}); auto node = make_shared<op::Tuple>(Nodes{arg0, arg1});
auto new_node = node->copy_with_new_args(new_args); auto new_node = node->copy_with_new_args(new_args);
auto node_cast = dynamic_pointer_cast<op::Tuple>(new_node); auto node_cast = dynamic_pointer_cast<op::Tuple>(new_node);
ASSERT_NE(node_cast, nullptr);
ASSERT_TRUE(nullptr != new_node); ASSERT_TRUE(nullptr != new_node);
ASSERT_TRUE(new_args == new_node->get_arguments()); ASSERT_TRUE(new_args == new_node->get_arguments());
......
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#include "gtest/gtest.h"
#include "ngraph/types/element_type.hpp"
using namespace ngraph;
TEST(element_type, from)
{
EXPECT_EQ(element::from<char>(), element::boolean);
EXPECT_EQ(element::from<bool>(), element::boolean);
EXPECT_EQ(element::from<float>(), element::f32);
EXPECT_EQ(element::from<double>(), element::f64);
EXPECT_EQ(element::from<int8_t>(), element::i8);
EXPECT_EQ(element::from<int16_t>(), element::i16);
EXPECT_EQ(element::from<int32_t>(), element::i32);
EXPECT_EQ(element::from<int64_t>(), element::i64);
EXPECT_EQ(element::from<uint8_t>(), element::u8);
EXPECT_EQ(element::from<uint16_t>(), element::u16);
EXPECT_EQ(element::from<uint32_t>(), element::u32);
EXPECT_EQ(element::from<uint64_t>(), element::u64);
}
...@@ -185,20 +185,20 @@ TEST(util, all_close) ...@@ -185,20 +185,20 @@ TEST(util, all_close)
auto backend = manager->allocate_backend(); auto backend = manager->allocate_backend();
// Create some tensors for input/output // Create some tensors for input/output
auto a = backend->make_parameterized_tensor_view<element::Float32>(Shape{2, 3}); auto a = backend->make_primary_tensor_view(element::Float32::element_type(), Shape{2, 3});
auto b = backend->make_parameterized_tensor_view<element::Float32>(Shape{2, 3}); auto b = backend->make_primary_tensor_view(element::Float32::element_type(), Shape{2, 3});
copy_data(a, runtime::NDArray<float, 2>({{1, 2, 3}, {3, 4, 5}}).get_vector()); copy_data(a, runtime::NDArray<float, 2>({{1, 2, 3}, {3, 4, 5}}).get_vector());
copy_data(b, runtime::NDArray<float, 2>({{1, 2, 3}, {3, 4, 5}}).get_vector()); copy_data(b, runtime::NDArray<float, 2>({{1, 2, 3}, {3, 4, 5}}).get_vector());
EXPECT_TRUE(ngraph::test::all_close(a, b)); EXPECT_TRUE(ngraph::test::all_close<float>(a, b));
auto c = backend->make_parameterized_tensor_view<element::Float32>(Shape{2, 3}); auto c = backend->make_primary_tensor_view(element::Float32::element_type(), Shape{2, 3});
copy_data(c, runtime::NDArray<float, 2>({{1.1f, 2, 3}, {3, 4, 5}}).get_vector()); copy_data(c, runtime::NDArray<float, 2>({{1.1f, 2, 3}, {3, 4, 5}}).get_vector());
EXPECT_FALSE(ngraph::test::all_close(c, a, 0, .05f)); EXPECT_FALSE(ngraph::test::all_close<float>(c, a, 0, .05f));
EXPECT_TRUE(ngraph::test::all_close(c, a, 0, .11f)); EXPECT_TRUE(ngraph::test::all_close<float>(c, a, 0, .11f));
EXPECT_FALSE(ngraph::test::all_close(c, a, .05f, 0)); EXPECT_FALSE(ngraph::test::all_close<float>(c, a, .05f, 0));
EXPECT_TRUE(ngraph::test::all_close(c, a, .11f, 0)); EXPECT_TRUE(ngraph::test::all_close<float>(c, a, .11f, 0));
} }
...@@ -54,11 +54,11 @@ namespace ngraph ...@@ -54,11 +54,11 @@ namespace ngraph
/// @param rtol Relative tolerance /// @param rtol Relative tolerance
/// @param atol Absolute tolerance /// @param atol Absolute tolerance
/// Returns true if shapes match and for all elements, |a_i-b_i| <= atol + rtol*|b_i|. /// Returns true if shapes match and for all elements, |a_i-b_i| <= atol + rtol*|b_i|.
template <typename ET> template <typename T>
bool all_close(const std::shared_ptr<ngraph::runtime::ParameterizedTensorView<ET>>& a, bool all_close(const std::shared_ptr<ngraph::runtime::TensorView>& a,
const std::shared_ptr<ngraph::runtime::ParameterizedTensorView<ET>>& b, const std::shared_ptr<ngraph::runtime::TensorView>& b,
typename ET::type rtol = 1e-5f, T rtol = 1e-5f,
typename ET::type atol = 1e-8f) T atol = 1e-8f)
{ {
// Check that the layouts are compatible // Check that the layouts are compatible
if (*a->get_tensor_view_layout() != *b->get_tensor_view_layout()) if (*a->get_tensor_view_layout() != *b->get_tensor_view_layout())
...@@ -69,7 +69,7 @@ namespace ngraph ...@@ -69,7 +69,7 @@ namespace ngraph
if (a->get_shape() != b->get_shape()) if (a->get_shape() != b->get_shape())
return false; return false;
return all_close(a->get_vector(), b->get_vector(), rtol, atol); return all_close(a->get_vector<T>(), b->get_vector<T>(), rtol, atol);
} }
/// @brief Same as numpy.allclose /// @brief Same as numpy.allclose
...@@ -78,12 +78,11 @@ namespace ngraph ...@@ -78,12 +78,11 @@ namespace ngraph
/// @param rtol Relative tolerance /// @param rtol Relative tolerance
/// @param atol Absolute tolerance /// @param atol Absolute tolerance
/// Returns true if shapes match and for all elements, |a_i-b_i| <= atol + rtol*|b_i|. /// Returns true if shapes match and for all elements, |a_i-b_i| <= atol + rtol*|b_i|.
template <typename ET> template <typename T>
bool all_close( bool all_close(const std::vector<std::shared_ptr<ngraph::runtime::TensorView>>& as,
const std::vector<std::shared_ptr<ngraph::runtime::ParameterizedTensorView<ET>>>& as, const std::vector<std::shared_ptr<ngraph::runtime::TensorView>>& bs,
const std::vector<std::shared_ptr<ngraph::runtime::ParameterizedTensorView<ET>>>& bs, T rtol,
typename ET::type rtol, T atol)
typename ET::type atol)
{ {
if (as.size() != bs.size()) if (as.size() != bs.size())
{ {
......
...@@ -39,24 +39,24 @@ namespace ngraph ...@@ -39,24 +39,24 @@ namespace ngraph
/// @returns f'(X_i..., c) where f'(x_i, ..., c)_j is backprop for X_j /// @returns f'(X_i..., c) where f'(x_i, ..., c)_j is backprop for X_j
std::shared_ptr<Function> backprop_function(const std::shared_ptr<Function>& f); std::shared_ptr<Function> backprop_function(const std::shared_ptr<Function>& f);
template <typename ET> template <typename T>
std::vector<std::shared_ptr<runtime::ParameterizedTensorView<ET>>> backprop_derivative( std::vector<std::shared_ptr<runtime::TensorView>>
const std::shared_ptr<runtime::Manager>& manager, backprop_derivative(const std::shared_ptr<runtime::Manager>& manager,
const std::shared_ptr<runtime::Backend>& backend, const std::shared_ptr<runtime::Backend>& backend,
const std::shared_ptr<Function>& f, const std::shared_ptr<Function>& f,
const std::vector<std::shared_ptr<runtime::ParameterizedTensorView<ET>>>& args) const std::vector<std::shared_ptr<runtime::TensorView>>& args)
{ {
auto y = f->get_result(); auto y = f->get_result();
Shape y_shape = Shape y_shape =
std::dynamic_pointer_cast<const TensorViewType>(y->get_value_type())->get_shape(); std::dynamic_pointer_cast<const TensorViewType>(y->get_value_type())->get_shape();
auto c_param = std::make_shared<op::Parameter>(ET::element_type(), y_shape); auto c_param = std::make_shared<op::Parameter>(element::from<T>(), y_shape);
auto c_arg = backend->make_parameterized_tensor_view<ET>(y_shape); auto c_arg = backend->make_primary_tensor_view<T>(y_shape);
auto params = f->get_parameters(); auto params = f->get_parameters();
std::vector<std::shared_ptr<Node>> deriv_nodes; std::vector<std::shared_ptr<Node>> deriv_nodes;
std::vector<std::shared_ptr<runtime::ParameterizedTensorView<ET>>> bprops; std::vector<std::shared_ptr<runtime::TensorView>> bprops;
std::vector<std::shared_ptr<runtime::ParameterizedTensorView<ET>>> results; std::vector<std::shared_ptr<runtime::TensorView>> results;
for (auto param : params) for (auto param : params)
{ {
Shape s = y_shape; Shape s = y_shape;
...@@ -64,8 +64,8 @@ namespace ngraph ...@@ -64,8 +64,8 @@ namespace ngraph
std::dynamic_pointer_cast<const TensorViewType>(param->get_value_type()) std::dynamic_pointer_cast<const TensorViewType>(param->get_value_type())
->get_shape(); ->get_shape();
s.insert(s.end(), param_shape.begin(), param_shape.end()); s.insert(s.end(), param_shape.begin(), param_shape.end());
results.push_back(backend->make_parameterized_tensor_view<ET>(s)); results.push_back(backend->make_primary_tensor_view<T>(s));
bprops.push_back(backend->make_parameterized_tensor_view<ET>(param_shape)); bprops.push_back(backend->make_primary_tensor_view<T>(param_shape));
deriv_nodes.push_back(y->backprop_node(param, c_param)); deriv_nodes.push_back(y->backprop_node(param, c_param));
} }
...@@ -78,22 +78,22 @@ namespace ngraph ...@@ -78,22 +78,22 @@ namespace ngraph
auto cf = backend->make_call_frame(external); auto cf = backend->make_call_frame(external);
// We compute the derivatives chunk by chunk // We compute the derivatives chunk by chunk
std::vector<typename std::vector<typename ET::type>::iterator> result_pos; std::vector<typename std::vector<T>::iterator> result_pos;
std::vector<std::vector<typename ET::type>> result_vect; std::vector<std::vector<T>> result_vect;
for (auto result : results) for (auto result : results)
{ {
result_vect.push_back(result->get_vector()); // storage for results result_vect.push_back(result->get_vector<T>()); // storage for results
result_pos.push_back(result_vect.back().begin()); result_pos.push_back(result_vect.back().begin());
} }
ngraph::runtime::TensorViewPtrs args_tv; std::vector<std::shared_ptr<ngraph::runtime::TensorView>> args_tv;
args_tv.insert(args_tv.begin(), args.begin(), args.end()); args_tv.insert(args_tv.begin(), args.begin(), args.end());
args_tv.push_back(c_arg); args_tv.push_back(c_arg);
runtime::TensorViewPtrs bprops_tv; std::vector<std::shared_ptr<ngraph::runtime::TensorView>> bprops_tv;
bprops_tv.insert(bprops_tv.begin(), bprops.begin(), bprops.end()); bprops_tv.insert(bprops_tv.begin(), bprops.begin(), bprops.end());
auto c_vec = c_arg->get_vector(); auto c_vec = c_arg->template get_vector<T>();
for (size_t i = 0; i < c_vec.size(); i++) for (size_t i = 0; i < c_vec.size(); i++)
{ {
c_vec[i] = 1; c_vec[i] = 1;
...@@ -103,7 +103,7 @@ namespace ngraph ...@@ -103,7 +103,7 @@ namespace ngraph
c_arg->write(c_vec); c_arg->write(c_vec);
for (size_t j = 0; j < results.size(); j++) for (size_t j = 0; j < results.size(); j++)
{ {
auto bprop_vec = bprops[j]->get_vector(); auto bprop_vec = bprops[j]->get_vector<T>();
result_pos[j] = std::copy(bprop_vec.begin(), bprop_vec.end(), result_pos[j]); result_pos[j] = std::copy(bprop_vec.begin(), bprop_vec.end(), result_pos[j]);
} }
} }
......
...@@ -33,13 +33,13 @@ namespace ngraph ...@@ -33,13 +33,13 @@ namespace ngraph
/// @param args Values for the arguments (the independent variables) /// @param args Values for the arguments (the independent variables)
/// @param delta increment for the variables /// @param delta increment for the variables
/// @returns vector of dy/dvar, where each dy/dvar's shape is concat(y.shape(), var.shape()) /// @returns vector of dy/dvar, where each dy/dvar's shape is concat(y.shape(), var.shape())
template <typename ET> template <typename T>
std::vector<std::shared_ptr<runtime::ParameterizedTensorView<ET>>> numeric_derivative( std::vector<std::shared_ptr<runtime::TensorView>>
const std::shared_ptr<runtime::Manager>& manager, numeric_derivative(const std::shared_ptr<runtime::Manager>& manager,
const std::shared_ptr<runtime::Backend>& backend, const std::shared_ptr<runtime::Backend>& backend,
const std::shared_ptr<Function>& f, const std::shared_ptr<Function>& f,
const std::vector<std::shared_ptr<runtime::ParameterizedTensorView<ET>>>& args, const std::vector<std::shared_ptr<runtime::TensorView>>& args,
typename ET::type delta) T delta)
{ {
auto y = f->get_result(); auto y = f->get_result();
...@@ -49,7 +49,7 @@ namespace ngraph ...@@ -49,7 +49,7 @@ namespace ngraph
auto params = f->get_parameters(); auto params = f->get_parameters();
// Results for each derivative, shape Y|X_i // Results for each derivative, shape Y|X_i
std::vector<std::shared_ptr<runtime::ParameterizedTensorView<ET>>> results; std::vector<std::shared_ptr<runtime::TensorView>> results;
for (auto param : params) for (auto param : params)
{ {
Shape s = y_shape; Shape s = y_shape;
...@@ -57,39 +57,36 @@ namespace ngraph ...@@ -57,39 +57,36 @@ namespace ngraph
std::dynamic_pointer_cast<const TensorViewType>(param->get_value_type()) std::dynamic_pointer_cast<const TensorViewType>(param->get_value_type())
->get_shape(); ->get_shape();
s.insert(s.end(), param_shape.begin(), param_shape.end()); s.insert(s.end(), param_shape.begin(), param_shape.end());
results.push_back(backend->make_parameterized_tensor_view<ET>(s)); results.push_back(backend->make_primary_tensor_view<T>(s));
} }
auto external = manager->compile(f); auto external = manager->compile(f);
auto cf = backend->make_call_frame(external); auto cf = backend->make_call_frame(external);
// ref_y is the function evaluated at the args // ref_y is the function evaluated at the args
auto ref_y = backend->make_parameterized_tensor_view<ET>(y_shape); auto ref_y = backend->make_primary_tensor_view<T>(y_shape);
ngraph::runtime::TensorViewPtrs args_tv; cf->tensor_call(args, std::vector<std::shared_ptr<ngraph::runtime::TensorView>>{ref_y});
args_tv.insert(args_tv.begin(), args.begin(), args.end()); auto ref_vec = ref_y->template get_vector<T>();
cf->tensor_call(args_tv, runtime::TensorViewPtrs{ref_y});
auto& ref_vec = ref_y->get_vector();
// inc_y will hold f(x+dx) values // inc_y will hold f(x+dx) values
auto inc_y = backend->make_parameterized_tensor_view<ET>(y_shape); auto inc_y = backend->make_primary_tensor_view<T>(y_shape);
auto& inc_vec = inc_y->get_vector();
// Assuming vars, y, and results are row-major // Assuming vars, y, and results are row-major
typename ET::type inv_delta = 1 / delta; T inv_delta = 1 / delta;
for (size_t i = 0; i < args.size(); ++i) for (size_t i = 0; i < args.size(); ++i)
{ {
auto arg = args[i]; auto arg = args[i];
auto res = results[i]->get_vector(); auto res = results[i]->get_vector<T>();
auto vec = arg->get_vector(); auto vec = arg->get_vector<T>();
for (size_t j = 0; j < vec.size(); j++) for (size_t j = 0; j < vec.size(); j++)
{ {
auto old_val = vec[j]; auto old_val = vec[j];
vec[j] += delta; vec[j] += delta;
arg->write(vec); arg->write(vec);
cf->tensor_call(args_tv, {inc_y}); cf->tensor_call(args, {inc_y});
auto inc_vec = inc_y->template get_vector<T>();
vec[j] = old_val; vec[j] = old_val;
arg->write(vec); arg->write(vec);
size_t res_k = j; size_t res_k = j;
......
...@@ -25,13 +25,11 @@ namespace ngraph ...@@ -25,13 +25,11 @@ namespace ngraph
{ {
/// @brief A predictable pseudo-random number generator /// @brief A predictable pseudo-random number generator
/// The seed is initialized so that we get repeatable pseudo-random numbers for tests /// The seed is initialized so that we get repeatable pseudo-random numbers for tests
template <typename ET> template <typename T>
class Uniform class Uniform
{ {
public: public:
using type = typename ET::type; Uniform(T min, T max, T seed = 0)
Uniform(type min, type max, type seed = 0)
: m_engine(seed) : m_engine(seed)
, m_distribution(min, max) , m_distribution(min, max)
, m_r(std::bind(m_distribution, m_engine)) , m_r(std::bind(m_distribution, m_engine))
...@@ -40,11 +38,11 @@ namespace ngraph ...@@ -40,11 +38,11 @@ namespace ngraph
/// @brief Randomly initialize a tensor /// @brief Randomly initialize a tensor
/// @param ptv The tensor to initialize /// @param ptv The tensor to initialize
const std::shared_ptr<runtime::ParameterizedTensorView<ET>> const std::shared_ptr<runtime::TensorView>
initialize(const std::shared_ptr<runtime::ParameterizedTensorView<ET>>& ptv) initialize(const std::shared_ptr<runtime::TensorView>& ptv)
{ {
auto vec = ptv->get_vector(); std::vector<T> vec = ptv->get_vector<T>();
for (auto& elt : vec) for (T& elt : vec)
{ {
elt = m_r(); elt = m_r();
} }
...@@ -54,8 +52,8 @@ namespace ngraph ...@@ -54,8 +52,8 @@ namespace ngraph
protected: protected:
std::default_random_engine m_engine; std::default_random_engine m_engine;
std::uniform_real_distribution<type> m_distribution; std::uniform_real_distribution<T> m_distribution;
std::function<type()> m_r; std::function<T()> m_r;
}; };
} }
} }
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment