Unverified Commit 174402a4 authored by Scott Cyphers's avatar Scott Cyphers Committed by GitHub

Merge branch 'master' into cyphers/dochow

parents 5aea8126 8e5c9404
......@@ -46,7 +46,7 @@ descriptor::Tensor::Tensor(const element::Type& element_type,
string descriptor::Tensor::make_tensor_name(const Node* node, size_t value_index)
{
return node->get_node_id() + "_" + to_string(value_index);
return node->get_name() + "_" + to_string(value_index);
}
string descriptor::Tensor::get_next_view_name()
......
......@@ -32,9 +32,10 @@ Function::Function(const NodeVector& results,
const std::string& name)
: m_results(results)
, m_parameters(parameters)
, m_name(name)
, m_temporary_pool_size(0)
, m_instance_id(m_next_instance_id.fetch_add(1))
, m_name(name)
, m_unique_name("Function_" + to_string(m_instance_id))
{
traverse_nodes(this, [&](shared_ptr<Node> node) {
std::shared_ptr<op::Parameter> p = std::dynamic_pointer_cast<op::Parameter>(node);
......@@ -63,18 +64,18 @@ std::list<shared_ptr<Node>> Function::get_ordered_ops()
return topological_sort(get_ops());
}
std::string Function::get_name() const
const std::string& Function::get_friendly_name() const
{
string rc;
if (m_name.empty())
{
rc = "Function_" + to_string(m_instance_id);
}
else
{
rc = m_name;
return m_unique_name;
}
return rc;
return m_name;
}
const std::string& Function::get_name() const
{
return m_unique_name;
}
void Function::set_name(const string& name)
......
......@@ -67,25 +67,25 @@ namespace ngraph
/// Check that there is a single result and return it.
std::shared_ptr<Node> get_result() const;
std::string get_name() const;
void set_name(
const std::string&
name); //so we can use `dynamic_cast` in FunctionCall to double check if we are dealing with an XLA or regular function
const std::string& get_friendly_name() const;
const std::string& get_name() const;
// so we can use `dynamic_cast` in FunctionCall to double check if we are dealing with
// an XLA or regular function
void set_name(const std::string& name);
std::list<std::shared_ptr<Node>> get_ops() const;
std::list<std::shared_ptr<Node>> get_ordered_ops();
friend std::ostream& operator<<(std::ostream&, const Function&);
size_t get_instance_id() { return m_instance_id; }
size_t get_temporary_pool_size();
void set_temporary_pool_size(size_t);
//updates old w/ repl in m_results list
// updates old w/ repl in m_results list
void replace_output_op(std::shared_ptr<Node> old, std::shared_ptr<Node> repl);
//updates graph and m_results list
// updates graph and m_results list
void replace_node(std::shared_ptr<Node> old, std::shared_ptr<Node> repl);
protected:
NodeVector m_results;
op::ParameterVector m_parameters;
std::string m_name;
size_t m_temporary_pool_size;
private:
......@@ -94,5 +94,7 @@ namespace ngraph
static std::atomic<size_t> m_next_instance_id;
size_t m_instance_id;
std::string m_name;
const std::string m_unique_name;
};
}
......@@ -33,6 +33,7 @@ atomic<size_t> Node::m_next_instance_id(0);
Node::Node(const std::string& node_type, const NodeVector& arguments)
: m_node_type(node_type)
, m_instance_id(m_next_instance_id.fetch_add(1))
, m_unique_name(description() + "_" + to_string(m_instance_id))
, m_is_output(false)
, m_arguments(arguments)
{
......@@ -112,25 +113,18 @@ bool Node::is_constant() const
return false;
}
std::string Node::get_node_id() const
const std::string& Node::get_friendly_name() const
{
stringstream ss;
ss << description() << "_" << m_instance_id;
return ss.str();
}
std::string Node::get_name() const
{
string rc;
if (m_name.empty())
{
rc = description() + "_" + to_string(m_instance_id);
return m_unique_name;
}
else
{
rc = m_name;
}
return rc;
return m_name;
}
const std::string& Node::get_name() const
{
return m_unique_name;
}
void Node::set_name(const string& name)
......@@ -207,11 +201,11 @@ namespace ngraph
auto parameter_tmp = dynamic_cast<const op::Parameter*>(&node);
if (parameter_tmp)
{
out << "Parameter(" << parameter_tmp->get_node_id() << ")";
out << "Parameter(" << parameter_tmp->get_name() << ")";
}
else
{
out << "Node(" << node.get_node_id() << ")";
out << "Node(" << node.get_name() << ")";
}
return out;
}
......
......@@ -80,12 +80,11 @@ namespace ngraph
public:
/// The class name, must not contain spaces
std::string description() const { return m_node_type; }
std::string get_name() const;
const std::string& get_friendly_name() const;
const std::string& get_name() const;
void set_name(const std::string& name);
void clear_arguments() { m_arguments.clear(); }
const std::multiset<Node*>& users() const { return m_users; }
std::string get_node_id() const;
/// Return true if this has the same implementing class as node. This
/// will be used by the pattern matcher when comparing a pattern
/// graph against the graph.
......@@ -95,7 +94,6 @@ namespace ngraph
return std::type_index(typeid(*this)) == std::type_index(typeid(*n));
}
public:
// Set the value type if it has not already been set; otherwise, ensure that
// value_type agrees with the value type that was set.
// This is used when the framework specifies a value type for the value, and we
......@@ -122,7 +120,6 @@ namespace ngraph
// TODO: Remove from unit tests.
const std::deque<descriptor::Output>& get_outputs() const;
public:
/// Returns the number of outputs on the for the node.
size_t get_output_size() const;
......@@ -197,8 +194,9 @@ namespace ngraph
std::string m_node_type;
std::multiset<Node*> m_users;
std::string m_name;
size_t m_instance_id;
std::string m_name;
const std::string m_unique_name;
static std::atomic<size_t> m_next_instance_id;
std::deque<descriptor::Input> m_inputs;
std::deque<descriptor::Output> m_outputs;
......
......@@ -43,6 +43,58 @@ static const vector<element::Type> s_known_element_types = {element::from<float>
element::from<uint32_t>(),
element::from<uint64_t>()};
TEST(${BACKEND_NAME}, function_name)
{
Shape shape{2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto B = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(A + B, op::ParameterVector{A, B}, "funky func name");
auto manager = runtime::Manager::get("${BACKEND_NAME}");
auto external = manager->compile(f);
auto backend = manager->allocate_backend();
auto cf = backend->make_call_frame(external);
// Create some tensors for input/output
shared_ptr<runtime::TensorView> a = backend->make_primary_tensor_view(element::f32, shape);
shared_ptr<runtime::TensorView> b = backend->make_primary_tensor_view(element::f32, shape);
shared_ptr<runtime::TensorView> result = backend->make_primary_tensor_view(element::f32, shape);
copy_data(a, test::NDArray<float, 2>({{1, 2}, {3, 4}}).get_vector());
copy_data(b, test::NDArray<float, 2>({{5, 6}, {7, 8}}).get_vector());
cf->call({a, b}, {result});
EXPECT_EQ(read_vector<float>(result),
(test::NDArray<float, 2>({{6, 8}, {10, 12}})).get_vector());
}
TEST(${BACKEND_NAME}, node_name)
{
Shape shape{2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto B = make_shared<op::Parameter>(element::f32, shape);
auto C = A + B;
C->set_name("a node name");
auto f = make_shared<Function>(C, op::ParameterVector{A, B});
auto manager = runtime::Manager::get("${BACKEND_NAME}");
auto external = manager->compile(f);
auto backend = manager->allocate_backend();
auto cf = backend->make_call_frame(external);
// Create some tensors for input/output
shared_ptr<runtime::TensorView> a = backend->make_primary_tensor_view(element::f32, shape);
shared_ptr<runtime::TensorView> b = backend->make_primary_tensor_view(element::f32, shape);
shared_ptr<runtime::TensorView> result = backend->make_primary_tensor_view(element::f32, shape);
copy_data(a, test::NDArray<float, 2>({{1, 2}, {3, 4}}).get_vector());
copy_data(b, test::NDArray<float, 2>({{5, 6}, {7, 8}}).get_vector());
cf->call({a, b}, {result});
EXPECT_EQ(read_vector<float>(result),
(test::NDArray<float, 2>({{6, 8}, {10, 12}})).get_vector());
}
TEST(${BACKEND_NAME}, component_cleanup)
{
shared_ptr<runtime::Backend> backend;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment