Commit 0c681f1f authored by Adam Procter's avatar Adam Procter

Merge remote-tracking branch 'origin' into aprocter/de-eigenize-partial

parents 174ca317 e6cc7d8b
......@@ -89,15 +89,29 @@ set (SRC
runtime/tensor_view.cpp
runtime/tuple.cpp
runtime/utils.cpp
serializer.cpp
shape.cpp
types/element_type.cpp
types/type.cpp
util.cpp
)
message(STATUS ${CMAKE_CURRENT_SOURCE_DIR}/ops)
file(GLOB_RECURSE OPS "${CMAKE_CURRENT_SOURCE_DIR}/ops" "${CMAKE_CURRENT_SOURCE_DIR}/ops/*.hpp")
foreach(OP ${OPS})
file(STRINGS ${OP} OP_CLASS REGEX "class [A-Za-z0-9_]+ :")
foreach(LINE ${OP_CLASS})
string(REGEX REPLACE ".*class ([A-Za-z0-9_]+) : public ([A-Za-z0-9_]+).*" "\\1:\\2" CLASS_FOUND ${LINE})
set(OP_CLASS_LIST ${OP_CLASS_LIST} ${CLASS_FOUND})
endforeach(LINE ${OP_CLASS})
endforeach()
message(STATUS "${CMAKE_CURRENT_BINARY_DIR}/ops_list.txt")
string(REPLACE ";" "\n" OP_CLASS_LINES "${OP_CLASS_LIST}")
file(WRITE "${CMAKE_CURRENT_BINARY_DIR}/ops_list.txt" "${OP_CLASS_LINES}")
# find_program (GRAPHVIZ dot)
# message (STATUS "graphviz '${GRAPHVIZ}'")
find_package(Graphviz)
find_package(Graphviz QUIET)
if (GRAPHVIZ_FOUND)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DGRAPHVIZ_FOUND")
endif()
......
......@@ -40,6 +40,7 @@ namespace ngraph
const std::string& name = "");
std::shared_ptr<Node> get_result() { return m_result; }
std::shared_ptr<const Node> get_result() const { return m_result; }
const std::vector<std::shared_ptr<op::Parameter>>& get_parameters() const
{
return m_parameters;
......
// clang-format off
#pragma clang diagnostic ignored "-Weverything"
/*
__ _____ _____ _____
__| | __| | | | JSON for Modern C++
......
......@@ -181,6 +181,11 @@ std::shared_ptr<Node> Node::backprop_node(const std::shared_ptr<Node>& x,
return adjoints_it->second.get(x);
}
std::shared_ptr<Function> Node::get_function() const
{
return nullptr;
}
namespace ngraph
{
ostream& operator<<(ostream& out, const Node& node)
......
......@@ -111,6 +111,8 @@ namespace ngraph
virtual std::shared_ptr<Node>
copy_with_new_args(const std::vector<std::shared_ptr<Node>>& new_args) const = 0;
virtual std::shared_ptr<Function> get_function() const;
protected:
std::string m_node_type;
Nodes m_arguments;
......
......@@ -45,7 +45,7 @@ namespace ngraph
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class FunctionCall : public ngraph::Node
class FunctionCall : public Node
{
public:
/// \brief Constructs a function call operation.
......@@ -62,7 +62,7 @@ namespace ngraph
}
/// \return The function to be called.
std::shared_ptr<Function> get_function() const { return m_function; }
std::shared_ptr<Function> get_function() const override { return m_function; }
protected:
std::shared_ptr<Function> m_function;
};
......
......@@ -20,8 +20,6 @@ namespace ngraph
{
namespace op
{
class Node;
/// \brief Operation to get an element from a tuple.
///
/// ## Parameters
......@@ -47,7 +45,7 @@ namespace ngraph
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class GetTupleElement : public ngraph::Node
class GetTupleElement : public Node
{
public:
/// \brief Constructs a get-tuple-element operation.
......
......@@ -111,10 +111,7 @@ namespace ngraph
}
/// \return The function to use for reduction.
std::shared_ptr<Function> get_reduction_function() const
{
return m_reduction_function;
}
std::shared_ptr<Function> get_function() const override { return m_reduction_function; }
/// \return The axis positions (0-based) to be eliminated through reduction.
const AxisSet& get_reduction_axes() const { return m_reduction_axes; }
protected:
......
......@@ -39,7 +39,7 @@ namespace ngraph
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Tuple : public ngraph::Node
class Tuple : public Node
{
public:
/// \brief Constructs a tuple construction operation.
......
......@@ -16,12 +16,12 @@
#include <memory>
#include "ngraph/function.hpp"
#include "ngraph/log.hpp"
#include "ngraph/node.hpp"
#include "ngraph/ops/function_call.hpp"
#include "ngraph/ops/reduce.hpp"
#include "ngraph/pass/manager.hpp"
#include "ngraph/pass/pass.hpp"
#include "ngraph/util.hpp"
using namespace std;
using namespace ngraph;
......@@ -38,30 +38,11 @@ void ngraph::pass::Manager::initialize_default_passes()
{
}
static void find_functions(shared_ptr<Function> f, set<shared_ptr<Function>>& funcs)
{
funcs.insert(f);
for (shared_ptr<Node> node : f->get_ops())
{
shared_ptr<op::FunctionCall> fc = dynamic_pointer_cast<op::FunctionCall>(node);
if (fc)
{
find_functions(fc->get_function(), funcs);
}
shared_ptr<op::Reduce> reduce = dynamic_pointer_cast<op::Reduce>(node);
if (reduce)
{
find_functions(reduce->get_reduction_function(), funcs);
}
}
}
void ngraph::pass::Manager::run_passes(shared_ptr<Function> func)
{
// find all functions
set<shared_ptr<Function>> tfs;
find_functions(func, tfs);
traverse_functions(func, [&](shared_ptr<Function> f) { tfs.insert(f); });
get_state().set_functions(tfs);
vector<shared_ptr<Function>> fs;
......
......@@ -1001,7 +1001,7 @@ void Emitter::EmitReduce(const ngraph::Node* n,
const std::vector<TensorViewInfo>& outputs)
{
auto reduce = static_cast<const op::Reduce*>(n);
auto reduction_function = reduce->get_reduction_function();
auto reduction_function = reduce->get_function();
auto reductee_type = reduce->get_arguments().at(0)->get_value_type();
auto reductee_tensor_view_type = dynamic_pointer_cast<const TensorViewType>(reductee_type);
......
......@@ -248,9 +248,9 @@ using namespace ngraph::runtime::cpu::eigen;
{
for (descriptor::Tensor* tensor : node->liveness_new_list)
{
TU << tensor->get_element_type() << "* " << tensor->get_name() << " = ("
<< tensor->get_element_type() << "*)(memory_handler.get_ptr("
<< tensor->get_pool_offset() << "));\n";
TU << tensor->get_element_type().c_type_string() << "* " << tensor->get_name()
<< " = (" << tensor->get_element_type().c_type_string()
<< "*)(memory_handler.get_ptr(" << tensor->get_pool_offset() << "));\n";
}
}
TU << "\n";
......
......@@ -662,7 +662,7 @@ ExternalFunction::OpMap& ExternalFunction::get_op_map()
REGISTER_TO_OP_MAP(op::Reduce)
{
auto reduce = static_cast<const op::Reduce*>(n);
auto reduction_function = reduce->get_reduction_function();
auto reduction_function = reduce->get_function();
std::shared_ptr<ExternalFunction> external;
......
This diff is collapsed.
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include <memory>
#include <unordered_map>
#include "ngraph/function.hpp"
#include "ngraph/json.hpp"
#include "ngraph/node.hpp"
namespace ngraph
{
std::string serialize(std::shared_ptr<ngraph::Function>);
std::shared_ptr<ngraph::Function> deserialize(std::istream&);
}
......@@ -21,7 +21,7 @@
using namespace ngraph;
const element::Type element::boolean(8, false, false, "bool");
const element::Type element::boolean(8, false, false, "char");
const element::Type element::f32(32, true, true, "float");
const element::Type element::f64(64, true, true, "double");
const element::Type element::i8(8, false, true, "int8_t");
......@@ -39,7 +39,6 @@ element::Type::Type(size_t bitwidth, bool is_real, bool is_signed, const std::st
, m_is_signed{is_signed}
, m_cname{cname}
{
assert(m_bitwidth % 8 == 0);
}
const std::string& element::Type::c_type_string() const
......@@ -53,13 +52,35 @@ bool element::Type::operator==(const element::Type& other) const
m_is_signed == other.m_is_signed && m_cname == other.m_cname;
}
bool element::Type::operator<(const Type& other) const
{
size_t v1 = m_bitwidth << 2;
v1 |= (m_is_real ? 2 : 0);
v1 |= (m_is_signed ? 1 : 0);
size_t v2 = other.m_bitwidth << 2;
v2 |= (other.m_is_real ? 2 : 0);
v2 |= (other.m_is_signed ? 1 : 0);
return v1 < v2;
}
size_t element::Type::size() const
{
return std::ceil(static_cast<float>(m_bitwidth) / 8.0f);
}
size_t element::Type::hash() const
{
size_t h1 = std::hash<size_t>{}(m_bitwidth);
size_t h2 = std::hash<bool>{}(m_is_real);
size_t h3 = std::hash<bool>{}(m_is_signed);
return h1 ^ ((h2 ^ (h3 << 1)) << 1);
}
std::ostream& element::operator<<(std::ostream& out, const element::Type& obj)
{
out << obj.m_cname;
out << "element::Type(" << obj.m_bitwidth << ", " << obj.m_is_real << ", " << obj.m_is_signed
<< ")";
return out;
}
......@@ -47,23 +47,20 @@ namespace ngraph
class Type
{
Type(const Type&) = delete;
Type& operator=(const Type&) = delete;
public:
virtual ~Type() {}
Type() = delete;
Type(const Type&) = default;
Type(size_t bitwidth, bool is_real, bool is_signed, const std::string& cname);
virtual ~Type() {}
const std::string& c_type_string() const;
size_t size() const;
size_t hash() const
{
std::hash<std::string> h;
return h(m_cname);
}
size_t hash() const;
bool is_real() const { return m_is_real; }
bool is_signed() const { return m_is_signed; }
size_t bitwidth() const { return m_bitwidth; }
bool operator==(const Type& other) const;
bool operator!=(const Type& other) const { return !(*this == other); }
bool operator<(const Type& other) const;
friend std::ostream& operator<<(std::ostream&, const Type&);
private:
......
......@@ -145,7 +145,6 @@ void ngraph::traverse_nodes(std::shared_ptr<ngraph::Function> p,
}
void ngraph::traverse_nodes(ngraph::Function* p, std::function<void(shared_ptr<Node>)> f)
{
std::unordered_set<shared_ptr<Node>> instances_seen;
deque<shared_ptr<Node>> stack;
......@@ -172,6 +171,34 @@ void ngraph::traverse_nodes(ngraph::Function* p, std::function<void(shared_ptr<N
}
}
void ngraph::traverse_functions(std::shared_ptr<ngraph::Function> p,
std::function<void(shared_ptr<Function>)> f)
{
std::unordered_set<shared_ptr<Function>> instances_seen;
deque<shared_ptr<Function>> stack;
stack.push_front(p);
while (stack.size() > 0)
{
shared_ptr<Function> func = stack.front();
if (instances_seen.find(func) == instances_seen.end())
{
instances_seen.insert(func);
f(func);
}
stack.pop_front();
for (shared_ptr<Node> op : func->get_ops())
{
shared_ptr<Function> fp = op->get_function();
if (fp)
{
stack.push_front(fp);
}
}
}
}
void ngraph::free_nodes(shared_ptr<Function> p)
{
std::deque<Node*> sorted_list;
......
......@@ -18,6 +18,7 @@
#include <chrono>
#include <functional>
#include <iostream>
#include <list>
#include <map>
#include <memory>
#include <sstream>
......@@ -239,8 +240,9 @@ namespace ngraph
}
void traverse_nodes(Function* p, std::function<void(std::shared_ptr<Node>)> f);
void traverse_nodes(std::shared_ptr<Function> p, std::function<void(std::shared_ptr<Node>)> f);
void traverse_functions(std::shared_ptr<Function> p,
std::function<void(std::shared_ptr<Function>)> f);
void free_nodes(std::shared_ptr<Function>);
} // end namespace ngraph
......@@ -14,9 +14,10 @@
#include <algorithm>
#include <cinttypes>
#include <cmath>
#include "gtest/gtest.h"
#include <cmath>
#include "ngraph/log.hpp"
#include "ngraph/ngraph.hpp"
......
......@@ -358,7 +358,7 @@ TEST(copy, reduce)
ASSERT_TRUE(nullptr != new_node);
ASSERT_TRUE(new_args == new_node->get_arguments());
ASSERT_TRUE(f == node_cast->get_reduction_function());
ASSERT_TRUE(f == node_cast->get_function());
ASSERT_TRUE(axes == node_cast->get_reduction_axes());
}
......
......@@ -12,6 +12,8 @@
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#include <map>
#include "gtest/gtest.h"
#include "ngraph/types/element_type.hpp"
......@@ -33,3 +35,50 @@ TEST(element_type, from)
EXPECT_EQ(element::from<uint32_t>(), element::u32);
EXPECT_EQ(element::from<uint64_t>(), element::u64);
}
TEST(element_type, mapable)
{
std::map<element::Type, std::string> test_map;
test_map.insert({element::f32, "float"});
}
TEST(element_type, size)
{
{
element::Type t1{1, false, false, ""};
EXPECT_EQ(1, t1.size());
}
{
element::Type t1{2, false, false, ""};
EXPECT_EQ(1, t1.size());
}
{
element::Type t1{3, false, false, ""};
EXPECT_EQ(1, t1.size());
}
{
element::Type t1{4, false, false, ""};
EXPECT_EQ(1, t1.size());
}
{
element::Type t1{5, false, false, ""};
EXPECT_EQ(1, t1.size());
}
{
element::Type t1{6, false, false, ""};
EXPECT_EQ(1, t1.size());
}
{
element::Type t1{7, false, false, ""};
EXPECT_EQ(1, t1.size());
}
{
element::Type t1{2, false, false, ""};
EXPECT_EQ(1, t1.size());
}
{
element::Type t1{9, false, false, ""};
EXPECT_EQ(2, t1.size());
}
}
......@@ -12,4 +12,88 @@
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#include <fstream>
#include <sstream>
#include "gtest/gtest.h"
#include "ngraph/json.hpp"
#include "ngraph/ngraph.hpp"
#include "ngraph/serializer.hpp"
#include "ngraph/util.hpp"
using namespace std;
using namespace ngraph;
template <typename T>
static void copy_data(shared_ptr<runtime::TensorView> tv, const vector<T>& data)
{
size_t data_size = data.size() * sizeof(T);
tv->write(data.data(), 0, data_size);
}
TEST(serialize, main)
{
// First create "f(A,B,C) = (A+B)*C".
auto shape = Shape{2, 2};
auto A = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto B = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto C = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto rt_f = make_shared<TensorViewType>(element::Float32::element_type(), shape);
auto f = make_shared<Function>((A + B) * C, rt_f, op::Parameters{A, B, C}, "f");
// Now make "g(X,Y,Z) = f(X,Y,Z) + f(X,Y,Z)"
auto X = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto Y = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto Z = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto rt_g = make_shared<TensorViewType>(element::Float32::element_type(), shape);
auto g = make_shared<Function>(make_shared<op::FunctionCall>(f, Nodes{X, Y, Z}) +
make_shared<op::FunctionCall>(f, Nodes{X, Y, Z}),
rt_g,
op::Parameters{X, Y, Z},
"g");
// Now make "h(X,Y,Z) = g(X,Y,Z) + g(X,Y,Z)"
auto X1 = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto Y1 = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto Z1 = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto rt_h = make_shared<TensorViewType>(element::Float32::element_type(), shape);
auto h = make_shared<Function>(make_shared<op::FunctionCall>(g, Nodes{X1, Y1, Z1}) +
make_shared<op::FunctionCall>(g, Nodes{X1, Y1, Z1}),
rt_h,
op::Parameters{X1, Y1, Z1},
"h");
string js = serialize(h);
{
ofstream f("serialize_function.js");
f << js;
}
istringstream in(js);
shared_ptr<Function> sfunc = deserialize(in);
// Now call g on some test vectors.
auto manager = runtime::Manager::get("CPU");
auto external = manager->compile(sfunc);
auto backend = manager->allocate_backend();
auto cf = backend->make_call_frame(external);
auto x = backend->make_primary_tensor_view(element::Float32::element_type(), shape);
copy_data(x, vector<float>{1, 2, 3, 4});
auto y = backend->make_primary_tensor_view(element::Float32::element_type(), shape);
copy_data(y, vector<float>{5, 6, 7, 8});
auto z = backend->make_primary_tensor_view(element::Float32::element_type(), shape);
copy_data(z, vector<float>{9, 10, 11, 12});
auto result = backend->make_primary_tensor_view(element::Float32::element_type(), shape);
cf->call({x, y, z}, {result});
EXPECT_EQ((vector<float>{54, 80, 110, 144}), result->get_vector<float>());
cf->call({y, x, z}, {result});
EXPECT_EQ((vector<float>{54, 80, 110, 144}), result->get_vector<float>());
cf->call({x, z, y}, {result});
EXPECT_EQ((vector<float>{50, 72, 98, 128}), result->get_vector<float>());
}
......@@ -18,6 +18,7 @@
#include "gtest/gtest.h"
#include "ngraph/function.hpp"
#include "ngraph/ngraph.hpp"
#include "ngraph/util.hpp"
#include "util/all_close.hpp"
......@@ -202,3 +203,40 @@ TEST(util, all_close)
EXPECT_FALSE(ngraph::test::all_close<float>(c, a, .05f, 0));
EXPECT_TRUE(ngraph::test::all_close<float>(c, a, .11f, 0));
}
TEST(util, traverse_functions)
{
// First create "f(A,B,C) = (A+B)*C".
auto shape = Shape{2, 2};
auto A = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto B = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto C = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto rt_f = make_shared<TensorViewType>(element::Float32::element_type(), shape);
auto f = make_shared<Function>((A + B) * C, rt_f, op::Parameters{A, B, C}, "f");
// Now make "g(X,Y,Z) = f(X,Y,Z) + f(X,Y,Z)"
auto X = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto Y = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto Z = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto rt_g = make_shared<TensorViewType>(element::Float32::element_type(), shape);
auto g = make_shared<Function>(make_shared<op::FunctionCall>(f, Nodes{X, Y, Z}) +
make_shared<op::FunctionCall>(f, Nodes{X, Y, Z}),
rt_g,
op::Parameters{X, Y, Z},
"g");
// Now make "h(X,Y,Z) = g(X,Y,Z) + g(X,Y,Z)"
auto X1 = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto Y1 = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto Z1 = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto rt_h = make_shared<TensorViewType>(element::Float32::element_type(), shape);
auto h = make_shared<Function>(make_shared<op::FunctionCall>(g, Nodes{X1, Y1, Z1}) +
make_shared<op::FunctionCall>(g, Nodes{X1, Y1, Z1}),
rt_h,
op::Parameters{X1, Y1, Z1},
"h");
vector<Function*> functions;
traverse_functions(h, [&](shared_ptr<Function> fp) { functions.push_back(fp.get()); });
ASSERT_EQ(3, functions.size());
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment