Commit 2f776ef0 authored by Jayaram Bobba's avatar Jayaram Bobba Committed by Scott Cyphers

Bug fix to graph control logic to always compute output tensors (#1053)

* Bug fix to graph control logic to always compute output tensors

* Remove stale comments
parent 7d29490f
......@@ -44,7 +44,7 @@ void Input::replace_output(std::shared_ptr<Node> node, size_t i)
replace_output(node->m_outputs.at(i));
}
std::shared_ptr<Node> Input::get_node()
std::shared_ptr<Node> Input::get_node() const
{
return m_node->shared_from_this();
}
......
......@@ -41,7 +41,7 @@ namespace ngraph
Input(Node* node, size_t index, Output& output);
/// @return the node that this is an input of
std::shared_ptr<Node> get_node();
std::shared_ptr<Node> get_node() const;
/// @return the position within all supplied tensors of this input
size_t get_index() const { return m_index; }
......
......@@ -794,6 +794,32 @@ using namespace ngraph::runtime;
writer << " || t_en[" << tensor_index_map[input_name] << "]";
}
}
auto computes_output = [&]() {
if (std::dynamic_pointer_cast<ngraph::op::Result>(node))
{
return true;
}
// Check if node feeds a result node that has been copy eliminated
for (const descriptor::Output& output : node->get_outputs())
{
for (const descriptor::Input* input : output.get_inputs())
{
auto res =
std::dynamic_pointer_cast<ngraph::op::Result>(input->get_node());
if (res && !res->needs_copy())
{
return true;
}
}
}
return false;
};
// Always enable nodes computing output tensors
if (computes_output())
{
writer << " || 1";
}
writer << ") {\n";
writer.indent++;
}
......
......@@ -1498,6 +1498,21 @@ NGRAPH_TEST(${BACKEND_NAME}, tensor_constant_with_op)
EXPECT_EQ((vector<float>{1, 2, 3, 4, 5, 6, 7, 8}), read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, constant_multi_use)
{
auto A = make_shared<op::Constant>(element::i32, Shape{}, std::vector<std::string>{"388"});
auto f = make_shared<Function>(A, op::ParameterVector{});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
std::shared_ptr<runtime::TensorView> r1 = backend->create_tensor(element::i32, Shape{});
backend->call(f, {r1}, std::vector<std::shared_ptr<runtime::TensorView>>{});
EXPECT_EQ(read_vector<int>(r1), std::vector<int>{388});
std::shared_ptr<runtime::TensorView> r2 = backend->create_tensor(element::i32, Shape{});
backend->call(f, {r2}, std::vector<std::shared_ptr<runtime::TensorView>>{});
EXPECT_EQ(read_vector<int>(r2), std::vector<int>{388});
}
NGRAPH_TEST(${BACKEND_NAME}, constant_broadcast)
{
const string js =
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment