Unverified Commit 5c7e9844 authored by Nick Korovaiko's avatar Nick Korovaiko Committed by GitHub

op::Result ver3 (#594)

* the first stab at op::Result

format fixes

disabling logging

op::Result, 2nd attempt

purge stale code

disable logging

fix copyright header

* initial cleanup

* cleanup2

* remove dead code

* result.cpp, fix comments

* fix comment
parent 456db623
...@@ -67,6 +67,7 @@ set (SRC ...@@ -67,6 +67,7 @@ set (SRC
ops/replace_slice.cpp ops/replace_slice.cpp
ops/reshape.cpp ops/reshape.cpp
ops/reverse.cpp ops/reverse.cpp
ops/result.cpp
ops/select.cpp ops/select.cpp
ops/select_and_scatter.cpp ops/select_and_scatter.cpp
ops/sin.cpp ops/sin.cpp
......
...@@ -27,7 +27,7 @@ using namespace ngraph; ...@@ -27,7 +27,7 @@ using namespace ngraph;
atomic<size_t> Function::m_next_instance_id(0); atomic<size_t> Function::m_next_instance_id(0);
Function::Function(const NodeVector& results, Function::Function(const ResultVector& results,
const op::ParameterVector& parameters, const op::ParameterVector& parameters,
const std::string& name) const std::string& name)
: m_results(results) : m_results(results)
...@@ -37,14 +37,50 @@ Function::Function(const NodeVector& results, ...@@ -37,14 +37,50 @@ Function::Function(const NodeVector& results,
, m_name(name) , m_name(name)
, m_unique_name("Function_" + to_string(m_instance_id)) , m_unique_name("Function_" + to_string(m_instance_id))
{ {
init();
}
Function::Function(const NodeVector& results,
const op::ParameterVector& parameters,
const std::string& name)
: m_results(results.size())
, m_parameters(parameters)
, m_temporary_pool_size(0)
, m_instance_id(m_next_instance_id.fetch_add(1))
, m_name(name)
, m_unique_name("Function_" + to_string(m_instance_id))
{
std::transform(results.begin(), results.end(), m_results.begin(), [](std::shared_ptr<Node> n) {
return std::make_shared<op::Result>(n);
});
init();
}
Function::Function(const std::shared_ptr<Node>& result,
const op::ParameterVector& parameters,
const std::string& name)
: Function(NodeVector{result}, parameters, name)
{
}
void Function::init()
{
for (auto r : m_results)
{
for (descriptor::Output& output : r->get_outputs())
{
output.get_tensor().set_is_output();
}
}
traverse_nodes(this, [&](shared_ptr<Node> node) { traverse_nodes(this, [&](shared_ptr<Node> node) {
std::shared_ptr<op::Parameter> p = std::dynamic_pointer_cast<op::Parameter>(node); std::shared_ptr<op::Parameter> p = std::dynamic_pointer_cast<op::Parameter>(node);
if (nullptr != p) if (nullptr != p)
{ {
auto it = std::find_if(parameters.begin(), auto it = std::find_if(m_parameters.begin(),
parameters.end(), m_parameters.end(),
[p](std::shared_ptr<op::Parameter> q) { return (p == q); }); [p](std::shared_ptr<op::Parameter> q) { return (p == q); });
if (it == parameters.end()) if (it == m_parameters.end())
{ {
throw ngraph_error("Function references undeclared parameter"); throw ngraph_error("Function references undeclared parameter");
} }
...@@ -52,13 +88,6 @@ Function::Function(const NodeVector& results, ...@@ -52,13 +88,6 @@ Function::Function(const NodeVector& results,
}); });
} }
Function::Function(const std::shared_ptr<Node>& result,
const op::ParameterVector& parameters,
const std::string& name)
: Function(NodeVector{result}, parameters, name)
{
}
std::list<shared_ptr<Node>> Function::get_ordered_ops() std::list<shared_ptr<Node>> Function::get_ordered_ops()
{ {
return topological_sort(get_ops()); return topological_sort(get_ops());
...@@ -156,18 +185,7 @@ std::list<shared_ptr<Node>> Function::get_ops() const ...@@ -156,18 +185,7 @@ std::list<shared_ptr<Node>> Function::get_ops() const
return ops; return ops;
} }
void Function::replace_output_op(std::shared_ptr<Node> old, std::shared_ptr<Node> repl)
{
auto it = std::find(begin(m_results), end(m_results), old);
if (it != end(m_results))
{
NGRAPH_DEBUG << "Replacing output " << old->get_name() << " w/ " << repl->get_name();
*it = repl;
}
}
void Function::replace_node(std::shared_ptr<Node> old, std::shared_ptr<Node> repl) void Function::replace_node(std::shared_ptr<Node> old, std::shared_ptr<Node> repl)
{ {
replace_output_op(old, repl); ngraph::replace_node(old, repl);
ngraph::replace_node(old, repl, true);
} }
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#include "ngraph/node.hpp" #include "ngraph/node.hpp"
#include "ngraph/ops/parameter_vector.hpp" #include "ngraph/ops/parameter_vector.hpp"
#include "ngraph/ops/result_vector.hpp"
#include "ngraph/types/type.hpp" #include "ngraph/types/type.hpp"
namespace ngraph namespace ngraph
...@@ -41,6 +42,12 @@ namespace ngraph ...@@ -41,6 +42,12 @@ namespace ngraph
const op::ParameterVector& parameters, const op::ParameterVector& parameters,
const std::string& name = ""); const std::string& name = "");
Function(const ResultVector& results,
const op::ParameterVector& parameters,
const std::string& name = "");
void init();
virtual ~Function() {} virtual ~Function() {}
public: public:
/// Return the number of outputs for this function. /// Return the number of outputs for this function.
...@@ -57,8 +64,8 @@ namespace ngraph ...@@ -57,8 +64,8 @@ namespace ngraph
/// Return the function parameters /// Return the function parameters
const op::ParameterVector& get_parameters() const { return m_parameters; } const op::ParameterVector& get_parameters() const { return m_parameters; }
/// Return the ops that generate the results /// Return a list of function's outputs
const NodeVector get_results() const { return m_results; } const ResultVector& get_results() const { return m_results; }
/// Check that there is a single result and return it. /// Check that there is a single result and return it.
std::shared_ptr<Node> get_result() const; std::shared_ptr<Node> get_result() const;
...@@ -73,13 +80,11 @@ namespace ngraph ...@@ -73,13 +80,11 @@ namespace ngraph
size_t get_instance_id() { return m_instance_id; } size_t get_instance_id() { return m_instance_id; }
size_t get_temporary_pool_size(); size_t get_temporary_pool_size();
void set_temporary_pool_size(size_t); void set_temporary_pool_size(size_t);
// updates old w/ repl in m_results list
void replace_output_op(std::shared_ptr<Node> old, std::shared_ptr<Node> repl);
// updates graph and m_results list // updates graph and m_results list
void replace_node(std::shared_ptr<Node> old, std::shared_ptr<Node> repl); void replace_node(std::shared_ptr<Node> old, std::shared_ptr<Node> repl);
protected: protected:
NodeVector m_results; ResultVector m_results;
op::ParameterVector m_parameters; op::ParameterVector m_parameters;
size_t m_temporary_pool_size; size_t m_temporary_pool_size;
......
...@@ -29,6 +29,8 @@ ...@@ -29,6 +29,8 @@
#include "ngraph/node_vector.hpp" #include "ngraph/node_vector.hpp"
#include "ngraph/ops/constant.hpp" #include "ngraph/ops/constant.hpp"
#include "ngraph/ops/parameter.hpp" #include "ngraph/ops/parameter.hpp"
#include "ngraph/ops/result.hpp"
#include "ngraph/ops/result_vector.hpp"
#include "ngraph/placement.hpp" #include "ngraph/placement.hpp"
#include "ngraph/util.hpp" #include "ngraph/util.hpp"
...@@ -114,13 +116,11 @@ void ngraph::free_nodes(shared_ptr<Function> p) ...@@ -114,13 +116,11 @@ void ngraph::free_nodes(shared_ptr<Function> p)
} }
} }
void ngraph::replace_node(std::shared_ptr<Node> target, void ngraph::replace_node(std::shared_ptr<Node> target, std::shared_ptr<Node> replacement)
std::shared_ptr<Node> replacement,
bool replace_output)
{ {
if (target->is_output() && !replace_output) if (target->is_output())
{ {
return; throw ngraph_error("Result nodes cannot be replaced.");
} }
// Fix input/output descriptors // Fix input/output descriptors
...@@ -197,6 +197,15 @@ std::list<std::shared_ptr<ngraph::Node>> ...@@ -197,6 +197,15 @@ std::list<std::shared_ptr<ngraph::Node>>
return result_list; return result_list;
} }
void ngraph::NodeMap::update(std::shared_ptr<ngraph::Node> orig, std::shared_ptr<ngraph::Node> val)
{
if (!exists(orig))
{
throw ngraph_error("Node doesn't exist!");
}
m_node_map[orig] = val;
}
void ngraph::NodeMap::add(std::shared_ptr<ngraph::Node> orig, void ngraph::NodeMap::add(std::shared_ptr<ngraph::Node> orig,
std::shared_ptr<ngraph::Node> replacement) std::shared_ptr<ngraph::Node> replacement)
{ {
...@@ -252,10 +261,15 @@ std::shared_ptr<ngraph::Function> ngraph::clone_function(std::shared_ptr<ngraph: ...@@ -252,10 +261,15 @@ std::shared_ptr<ngraph::Function> ngraph::clone_function(std::shared_ptr<ngraph:
clone_nodes(func->get_ops(), node_map); clone_nodes(func->get_ops(), node_map);
// get cloned function results and parameters // get cloned function results and parameters
NodeVector cloned_results; ResultVector cloned_results;
for (shared_ptr<Node> node : func->get_results()) for (shared_ptr<Node> node : func->get_results())
{ {
cloned_results.push_back(node_map.get(node)); auto result = std::dynamic_pointer_cast<op::Result>(node_map.get(node));
if (!result)
{
throw ngraph_error("Results should be of type op::Result");
}
cloned_results.push_back(result);
} }
std::vector<std::shared_ptr<op::Parameter>> cloned_params; std::vector<std::shared_ptr<op::Parameter>> cloned_params;
for (auto param : func->get_parameters()) for (auto param : func->get_parameters())
...@@ -435,8 +449,8 @@ static shared_ptr<Function> build_largest_colocated_function( ...@@ -435,8 +449,8 @@ static shared_ptr<Function> build_largest_colocated_function(
} }
} }
} }
auto func = make_shared<Function>(outputs, collected_parameters);
return make_shared<Function>(outputs, collected_parameters); return func;
} }
// The returned nodes contains the node N with highest order. If N is placed at P, the returned // The returned nodes contains the node N with highest order. If N is placed at P, the returned
...@@ -528,7 +542,7 @@ vector<shared_ptr<Function>> ngraph::split_function_by_placement( ...@@ -528,7 +542,7 @@ vector<shared_ptr<Function>> ngraph::split_function_by_placement(
// Remove input-output and constant-output aliasing // Remove input-output and constant-output aliasing
if (f_parameters.count(node) == 0 && node->description() != "Constant") if (f_parameters.count(node) == 0 && node->description() != "Constant")
{ {
unvisited_outputs.insert(node); unvisited_outputs.insert(node->get_input_op(0));
} }
} }
...@@ -571,6 +585,24 @@ vector<shared_ptr<Function>> ngraph::split_function_by_placement( ...@@ -571,6 +585,24 @@ vector<shared_ptr<Function>> ngraph::split_function_by_placement(
unvisited_outputs = updated_unvisited_outputs; unvisited_outputs = updated_unvisited_outputs;
} }
unordered_map<shared_ptr<Node>, shared_ptr<Node>> map_source_node_to_result;
for (auto cf : colocated_functions)
{
for (auto r : cf->get_results())
{
map_source_node_to_result[r->get_input_op(0)] = r;
}
}
for (auto it = map_parameter_to_source_node.begin(); it != map_parameter_to_source_node.end();
++it)
{
if (map_source_node_to_result.count(it->second) != 0)
{
it->second = map_source_node_to_result[it->second];
}
}
// The colocated_functions should be called in reversed order // The colocated_functions should be called in reversed order
reverse(colocated_functions.begin(), colocated_functions.end()); reverse(colocated_functions.begin(), colocated_functions.end());
return colocated_functions; return colocated_functions;
......
...@@ -48,9 +48,8 @@ namespace ngraph ...@@ -48,9 +48,8 @@ namespace ngraph
void free_nodes(std::shared_ptr<Function>); void free_nodes(std::shared_ptr<Function>);
void replace_node(std::shared_ptr<Node> target, void replace_node(std::shared_ptr<Node> target, std::shared_ptr<Node> replacement);
std::shared_ptr<Node> replacement,
bool replace_output = false);
void replace_node_users_arguments(std::shared_ptr<Node> target, void replace_node_users_arguments(std::shared_ptr<Node> target,
std::shared_ptr<Node> replacement); std::shared_ptr<Node> replacement);
...@@ -78,6 +77,8 @@ namespace ngraph ...@@ -78,6 +77,8 @@ namespace ngraph
return (m_node_map.count(orig) != 0); return (m_node_map.count(orig) != 0);
} }
void update(std::shared_ptr<ngraph::Node> orig, std::shared_ptr<ngraph::Node> val);
const std::unordered_map<std::shared_ptr<ngraph::Node>, std::shared_ptr<ngraph::Node>>& const std::unordered_map<std::shared_ptr<ngraph::Node>, std::shared_ptr<ngraph::Node>>&
get_node_map() const get_node_map() const
{ {
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#include "ngraph/descriptor/layout/tensor_view_layout.hpp" #include "ngraph/descriptor/layout/tensor_view_layout.hpp"
#include "ngraph/descriptor/primary_tensor_view.hpp" #include "ngraph/descriptor/primary_tensor_view.hpp"
#include "ngraph/ops/parameter.hpp" #include "ngraph/ops/parameter.hpp"
#include "ngraph/ops/result.hpp"
#include "ngraph/placement.hpp" #include "ngraph/placement.hpp"
using namespace std; using namespace std;
...@@ -34,7 +35,6 @@ Node::Node(const std::string& node_type, const NodeVector& arguments) ...@@ -34,7 +35,6 @@ Node::Node(const std::string& node_type, const NodeVector& arguments)
: m_node_type(node_type) : m_node_type(node_type)
, m_instance_id(m_next_instance_id.fetch_add(1)) , m_instance_id(m_next_instance_id.fetch_add(1))
, m_unique_name(description() + "_" + to_string(m_instance_id)) , m_unique_name(description() + "_" + to_string(m_instance_id))
, m_is_output(false)
, m_arguments(arguments) , m_arguments(arguments)
{ {
// Add this node as a user of each argument. // Add this node as a user of each argument.
...@@ -68,7 +68,7 @@ void Node::add_output(const element::Type& element_type, const Shape& shape) ...@@ -68,7 +68,7 @@ void Node::add_output(const element::Type& element_type, const Shape& shape)
auto tensor_view_descriptor = make_shared<descriptor::PrimaryTensorView>( auto tensor_view_descriptor = make_shared<descriptor::PrimaryTensorView>(
tensor_view_type, tensor_view_type,
ngraph::descriptor::Tensor::make_tensor_name(this, i), ngraph::descriptor::Tensor::make_tensor_name(this, i),
is_output(), false,
is_parameter(), is_parameter(),
is_constant()); is_constant());
m_outputs.emplace_back(this, i, tensor_view_descriptor); m_outputs.emplace_back(this, i, tensor_view_descriptor);
...@@ -96,16 +96,7 @@ bool Node::is_parameter() const ...@@ -96,16 +96,7 @@ bool Node::is_parameter() const
bool Node::is_output() const bool Node::is_output() const
{ {
return m_is_output; return false;
}
void Node::set_is_output()
{
m_is_output = true;
for (descriptor::Output& output : get_outputs())
{
output.get_tensor().set_is_output();
}
} }
bool Node::is_constant() const bool Node::is_constant() const
......
...@@ -102,8 +102,7 @@ namespace ngraph ...@@ -102,8 +102,7 @@ namespace ngraph
void set_value_type_checked(const element::Type& element_type, const Shape& shape); void set_value_type_checked(const element::Type& element_type, const Shape& shape);
bool is_parameter() const; bool is_parameter() const;
bool is_output() const; virtual bool is_output() const;
void set_is_output();
virtual bool is_constant() const; virtual bool is_constant() const;
virtual bool is_commutative() { return false; } virtual bool is_commutative() { return false; }
size_t get_instance_id() const { return m_instance_id; } size_t get_instance_id() const { return m_instance_id; }
...@@ -200,7 +199,6 @@ namespace ngraph ...@@ -200,7 +199,6 @@ namespace ngraph
static std::atomic<size_t> m_next_instance_id; static std::atomic<size_t> m_next_instance_id;
std::deque<descriptor::Input> m_inputs; std::deque<descriptor::Input> m_inputs;
std::deque<descriptor::Output> m_outputs; std::deque<descriptor::Output> m_outputs;
bool m_is_output;
std::unordered_map<Node*, autodiff::Adjoints> m_adjoint_map; std::unordered_map<Node*, autodiff::Adjoints> m_adjoint_map;
Placement m_placement = Placement::DEFAULT; Placement m_placement = Placement::DEFAULT;
......
...@@ -23,6 +23,11 @@ namespace ngraph ...@@ -23,6 +23,11 @@ namespace ngraph
{ {
class Node; class Node;
namespace op
{
class Result;
}
/// \brief Zero or more nodes. /// \brief Zero or more nodes.
class NodeVector : public std::vector<std::shared_ptr<Node>> class NodeVector : public std::vector<std::shared_ptr<Node>>
{ {
......
/*******************************************************************************
* Copyright 2017-2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#include <memory>
#include <typeindex>
#include <typeinfo>
#include "ngraph/node.hpp"
#include "ngraph/ops/result.hpp"
using namespace std;
using namespace ngraph;
op::Result::Result(const std::shared_ptr<Node>& arg)
: RequiresTensorViewArgs("Result", {arg})
{
if (arg->get_outputs().size() != 1)
{
throw ngraph_error("Expected a single-output argument");
}
//always borrow the placement conf even the default one
set_placement(arg->get_placement());
set_value_type_checked(arg->get_element_type(), arg->get_shape());
}
std::shared_ptr<Node> op::Result::copy_with_new_args(const NodeVector& new_args) const
{
if (new_args.size() != 1)
{
throw ngraph_error("Incorrect number of new arguments");
}
if (new_args.at(0)->get_outputs().size() != 1)
{
throw ngraph_error("Expected a single-output argument");
}
return std::make_shared<Result>(new_args.at(0));
}
/*******************************************************************************
* Copyright 2017-2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#pragma once
#include <memory>
#include "ngraph/ops/util/requires_tensor_view_args.hpp"
namespace ngraph
{
namespace op
{
class Result : public util::RequiresTensorViewArgs
{
public:
/// \brief Constructs an arcsin operation.
///
/// \param arg Node that produces the input tensor.
Result(const std::shared_ptr<Node>& arg);
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
virtual bool is_output() const override { return true; }
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const std::shared_ptr<Node>& delta) override
{
adjoints.add_delta(get_input_op(0), delta);
}
};
}
}
/*******************************************************************************
* Copyright 2017-2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#pragma once
#include <memory>
#include <vector>
#include "ngraph/ops/result.hpp"
namespace ngraph
{
/// \brief Zero or more nodes.
class ResultVector : public std::vector<std::shared_ptr<op::Result>>
{
public:
ResultVector(size_t size)
: std::vector<std::shared_ptr<op::Result>>(size)
{
}
ResultVector(const std::initializer_list<std::shared_ptr<op::Result>>& nodes)
: std::vector<std::shared_ptr<op::Result>>(nodes)
{
}
ResultVector(const std::vector<std::shared_ptr<op::Result>>& nodes)
: std::vector<std::shared_ptr<op::Result>>(nodes)
{
}
ResultVector(const ResultVector& nodes)
: std::vector<std::shared_ptr<op::Result>>(nodes)
{
}
ResultVector() {}
};
}
...@@ -30,12 +30,10 @@ using namespace std; ...@@ -30,12 +30,10 @@ using namespace std;
using namespace ngraph; using namespace ngraph;
ngraph::pass::Manager::Manager() ngraph::pass::Manager::Manager()
: m_to_set_is_output(true)
{ {
} }
ngraph::pass::Manager::Manager(bool to_set_is_output) ngraph::pass::Manager::Manager(bool to_set_is_output)
: m_to_set_is_output(to_set_is_output)
{ {
} }
...@@ -56,17 +54,6 @@ void ngraph::pass::Manager::run_passes(shared_ptr<Function> func) ...@@ -56,17 +54,6 @@ void ngraph::pass::Manager::run_passes(shared_ptr<Function> func)
set<shared_ptr<Function>> tfs(begin(fs), end(fs)); set<shared_ptr<Function>> tfs(begin(fs), end(fs));
get_state().set_functions(tfs); get_state().set_functions(tfs);
if (m_to_set_is_output)
{
for (shared_ptr<Function> f : get_state().get_functions())
{
for (size_t i = 0; i < f->get_output_size(); ++i)
{
f->get_output_op(i)->set_is_output();
}
}
}
for (shared_ptr<PassBase> pass : m_pass_list) for (shared_ptr<PassBase> pass : m_pass_list)
{ {
pass->set_state(get_state()); pass->set_state(get_state());
......
...@@ -57,5 +57,4 @@ public: ...@@ -57,5 +57,4 @@ public:
private: private:
std::vector<std::shared_ptr<PassBase>> m_pass_list; std::vector<std::shared_ptr<PassBase>> m_pass_list;
ManagerState m_state; ManagerState m_state;
bool m_to_set_is_output;
}; };
...@@ -72,6 +72,7 @@ ...@@ -72,6 +72,7 @@
#include "ngraph/ops/remainder.hpp" #include "ngraph/ops/remainder.hpp"
#include "ngraph/ops/replace_slice.hpp" #include "ngraph/ops/replace_slice.hpp"
#include "ngraph/ops/reshape.hpp" #include "ngraph/ops/reshape.hpp"
#include "ngraph/ops/result.hpp"
#include "ngraph/ops/reverse.hpp" #include "ngraph/ops/reverse.hpp"
#include "ngraph/ops/select.hpp" #include "ngraph/ops/select.hpp"
#include "ngraph/ops/select_and_scatter.hpp" #include "ngraph/ops/select_and_scatter.hpp"
...@@ -3389,6 +3390,15 @@ namespace ngraph ...@@ -3389,6 +3390,15 @@ namespace ngraph
} }
} }
} }
template <>
void CPU_Emitter::EMITTER_DECL(ngraph::op::Result)
{
writer << "kernel::result<" << out[0].get_type() << ">(" << args[0].get_name()
<< ",\n";
writer << " " << out[0].get_name() << ",\n";
writer << " " << shape_size(node->get_shape()) << ");\n";
}
} }
} }
} }
......
...@@ -82,6 +82,7 @@ ...@@ -82,6 +82,7 @@
#include "ngraph/ops/remainder.hpp" #include "ngraph/ops/remainder.hpp"
#include "ngraph/ops/replace_slice.hpp" #include "ngraph/ops/replace_slice.hpp"
#include "ngraph/ops/reshape.hpp" #include "ngraph/ops/reshape.hpp"
#include "ngraph/ops/result.hpp"
#include "ngraph/ops/reverse.hpp" #include "ngraph/ops/reverse.hpp"
#include "ngraph/ops/select.hpp" #include "ngraph/ops/select.hpp"
#include "ngraph/ops/select_and_scatter.hpp" #include "ngraph/ops/select_and_scatter.hpp"
...@@ -227,6 +228,7 @@ static const runtime::cpu::OpMap dispatcher{ ...@@ -227,6 +228,7 @@ static const runtime::cpu::OpMap dispatcher{
{TI(ngraph::op::Not), &runtime::cpu::CPU_Emitter::emit<op::Not>}, {TI(ngraph::op::Not), &runtime::cpu::CPU_Emitter::emit<op::Not>},
{TI(ngraph::op::MaxPool), &runtime::cpu::CPU_Emitter::emit<op::MaxPool>}, {TI(ngraph::op::MaxPool), &runtime::cpu::CPU_Emitter::emit<op::MaxPool>},
{TI(ngraph::op::Reverse), &runtime::cpu::CPU_Emitter::emit<op::Reverse>}, {TI(ngraph::op::Reverse), &runtime::cpu::CPU_Emitter::emit<op::Reverse>},
{TI(ngraph::op::Result), &runtime::cpu::CPU_Emitter::emit<op::Result>},
{TI(ngraph::op::ReduceWindow), &runtime::cpu::CPU_Emitter::emit<op::ReduceWindow>}, {TI(ngraph::op::ReduceWindow), &runtime::cpu::CPU_Emitter::emit<op::ReduceWindow>},
{TI(ngraph::op::SelectAndScatter), &runtime::cpu::CPU_Emitter::emit<op::SelectAndScatter>}, {TI(ngraph::op::SelectAndScatter), &runtime::cpu::CPU_Emitter::emit<op::SelectAndScatter>},
{TI(ngraph::op::AvgPool), &runtime::cpu::CPU_Emitter::emit<op::AvgPool>}, {TI(ngraph::op::AvgPool), &runtime::cpu::CPU_Emitter::emit<op::AvgPool>},
...@@ -316,6 +318,7 @@ void runtime::cpu::CPU_ExternalFunction::compile() ...@@ -316,6 +318,7 @@ void runtime::cpu::CPU_ExternalFunction::compile()
#include "ngraph/runtime/kernel/relu.hpp" #include "ngraph/runtime/kernel/relu.hpp"
#include "ngraph/runtime/kernel/replace_slice.hpp" #include "ngraph/runtime/kernel/replace_slice.hpp"
#include "ngraph/runtime/kernel/reshape.hpp" #include "ngraph/runtime/kernel/reshape.hpp"
#include "ngraph/runtime/kernel/result.hpp"
#include "ngraph/runtime/kernel/reverse.hpp" #include "ngraph/runtime/kernel/reverse.hpp"
#include "ngraph/runtime/kernel/select_and_scatter.hpp" #include "ngraph/runtime/kernel/select_and_scatter.hpp"
#include "ngraph/runtime/kernel/slice.hpp" #include "ngraph/runtime/kernel/slice.hpp"
...@@ -604,6 +607,7 @@ using namespace ngraph::runtime; ...@@ -604,6 +607,7 @@ using namespace ngraph::runtime;
} }
// create output alias map // create output alias map
/*
size_t output_index = 0; size_t output_index = 0;
unordered_map<descriptor::TensorView*, vector<size_t>> output_alias_map; unordered_map<descriptor::TensorView*, vector<size_t>> output_alias_map;
vector<size_t> aliases; vector<size_t> aliases;
...@@ -619,48 +623,17 @@ using namespace ngraph::runtime; ...@@ -619,48 +623,17 @@ using namespace ngraph::runtime;
} }
output_index++; output_index++;
} }
*/
// Add outputs to the variable name map // Add outputs to the variable name map
output_index = 0;
for (size_t i = 0; i < current_function->get_output_size(); ++i) for (size_t i = 0; i < current_function->get_output_size(); ++i)
{ {
shared_ptr<Node> op = current_function->get_output_op(i); shared_ptr<Node> op = current_function->get_output_op(i);
shared_ptr<descriptor::TensorView> tv = op->get_output_tensor_view(); shared_ptr<descriptor::TensorView> tv = op->get_output_tensor_view();
const element::Type& et = tv->get_tensor_view_type()->get_element_type(); string type = tv->get_tensor_view_type()->get_element_type().c_type_string();
bool parameter_as_output = false; stringstream ss;
for (shared_ptr<ngraph::op::Parameter> param : current_function->get_parameters()) ss << "((" << type << "*)(outputs[" << i << "]))";
{ m_variable_name_map[tv->get_tensor().get_name()] = ss.str();
for (const descriptor::Output& pout : param->get_outputs())
{
shared_ptr<descriptor::TensorView> ptv = pout.get_tensor_view();
if (tv == ptv)
{
parameter_as_output = true;
writer << "memcpy(static_cast<" << et.c_type_string() << "*>(outputs["
<< output_index << "]), "
<< m_variable_name_map[ptv->get_tensor().get_name()] << ", "
<< ptv->get_tensor().size() << ");\n";
break;
}
}
}
if (!parameter_as_output && !contains(aliases, output_index))
{
if (contains(constants, tv.get()))
{
writer << "memcpy(outputs[" << output_index << "], "
<< tv->get_tensor().get_name() << ", " << tv->get_tensor().size()
<< ");\n";
}
else
{
string type = et.c_type_string();
stringstream ss;
ss << "((" << type << "*)(outputs[" << output_index << "]))";
m_variable_name_map[tv->get_tensor().get_name()] = ss.str();
}
}
output_index++;
} }
for (shared_ptr<Node> node : current_function->get_ordered_ops()) for (shared_ptr<Node> node : current_function->get_ordered_ops())
...@@ -751,7 +724,6 @@ using namespace ngraph::runtime; ...@@ -751,7 +724,6 @@ using namespace ngraph::runtime;
// Emit operation epilogue // Emit operation epilogue
if (!node->is_parameter() && !node->is_constant()) if (!node->is_parameter() && !node->is_constant())
{ {
handle_output_alias(writer, *node, output_alias_map);
if (m_emit_timing) if (m_emit_timing)
{ {
emit_debug_function_exit(writer, node.get(), in, out); emit_debug_function_exit(writer, node.get(), in, out);
...@@ -888,35 +860,6 @@ using namespace ngraph::runtime; ...@@ -888,35 +860,6 @@ using namespace ngraph::runtime;
} }
} }
void runtime::cpu::CPU_ExternalFunction::handle_output_alias(
codegen::CodeWriter& writer,
const Node& node,
const unordered_map<descriptor::TensorView*, vector<size_t>>& output_alias_map)
{
for (const descriptor::Output& output : node.get_outputs())
{
shared_ptr<descriptor::TensorView> otv = output.get_tensor_view();
auto it = output_alias_map.find(otv.get());
if (it != output_alias_map.end())
{
const vector<size_t>& outputs = it->second;
if (outputs.size() > 1)
{
writer << "{ // handle output alias for previous op\n";
writer.indent++;
for (size_t i = 1; i < outputs.size(); i++)
{
writer << "memcpy(static_cast<void*>(outputs[" << outputs[i]
<< "]), static_cast<void*>(outputs[" << outputs[0] << "]), "
<< otv->get_tensor().size() << ");\n";
}
writer.indent--;
writer << "}\n";
}
}
}
}
shared_ptr<ngraph::runtime::CallFrame> runtime::cpu::CPU_ExternalFunction::make_call_frame() shared_ptr<ngraph::runtime::CallFrame> runtime::cpu::CPU_ExternalFunction::make_call_frame()
{ {
if (!m_is_compiled) if (!m_is_compiled)
......
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
#include "ngraph/ops/convolution.hpp" #include "ngraph/ops/convolution.hpp"
#include "ngraph/ops/op.hpp" #include "ngraph/ops/op.hpp"
#include "ngraph/ops/relu.hpp" #include "ngraph/ops/relu.hpp"
#include "ngraph/ops/result.hpp"
#include "ngraph/runtime/cpu/cpu_layout_descriptor.hpp" #include "ngraph/runtime/cpu/cpu_layout_descriptor.hpp"
#include "ngraph/runtime/cpu/cpu_op_annotations.hpp" #include "ngraph/runtime/cpu/cpu_op_annotations.hpp"
#include "ngraph/runtime/cpu/mkldnn_utils.hpp" #include "ngraph/runtime/cpu/mkldnn_utils.hpp"
...@@ -629,6 +630,16 @@ namespace ngraph ...@@ -629,6 +630,16 @@ namespace ngraph
} }
} }
template <>
void CPULayout::LAYOUT_DECL(ngraph::op::Result)
{
auto input_layout =
runtime::cpu::mkldnn_utils::get_input_mkldnn_format(node.get(), 0);
vector<memory::format> prim_output_formats;
prim_output_formats.push_back(input_layout);
set_output_layouts(node, prim_output_formats);
}
template <> template <>
void CPULayout::LAYOUT_DECL(ngraph::op::Relu) void CPULayout::LAYOUT_DECL(ngraph::op::Relu)
{ {
...@@ -708,6 +719,7 @@ static const runtime::cpu::pass::LayoutOpMap s_dispatcher{ ...@@ -708,6 +719,7 @@ static const runtime::cpu::pass::LayoutOpMap s_dispatcher{
{TI(ngraph::op::AvgPoolBackprop), {TI(ngraph::op::AvgPoolBackprop),
&runtime::cpu::pass::CPULayout::layout<ngraph::op::AvgPoolBackprop>}, &runtime::cpu::pass::CPULayout::layout<ngraph::op::AvgPoolBackprop>},
{TI(ngraph::op::Relu), &runtime::cpu::pass::CPULayout::layout<ngraph::op::Relu>}, {TI(ngraph::op::Relu), &runtime::cpu::pass::CPULayout::layout<ngraph::op::Relu>},
{TI(ngraph::op::Result), &runtime::cpu::pass::CPULayout::layout<ngraph::op::Result>},
{TI(ngraph::op::ReluBackprop), {TI(ngraph::op::ReluBackprop),
&runtime::cpu::pass::CPULayout::layout<ngraph::op::ReluBackprop>}, &runtime::cpu::pass::CPULayout::layout<ngraph::op::ReluBackprop>},
}; };
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <cstdlib> #include <cstdlib>
#include <iomanip> #include <iomanip>
#include "ngraph/ops/result.hpp"
#include "ngraph/runtime/host_tensor_view.hpp" #include "ngraph/runtime/host_tensor_view.hpp"
#include "ngraph/runtime/interpreter/int_call_frame.hpp" #include "ngraph/runtime/interpreter/int_call_frame.hpp"
...@@ -52,31 +53,16 @@ void runtime::interpreter::INT_CallFrame::call( ...@@ -52,31 +53,16 @@ void runtime::interpreter::INT_CallFrame::call(
tensor_map.insert({tv, input_tvs[arg_index++]}); tensor_map.insert({tv, input_tvs[arg_index++]});
} }
} }
std::vector<size_t> aliased_outputs;
for (size_t i = 0; i < output_tvs.size(); i++) for (size_t i = 0; i < function->get_output_size(); i++)
{ {
shared_ptr<Node> op = function->get_output_op(i); auto output_op = function->get_output_op(i);
descriptor::TensorView* tv = op->get_output_tensor_view(0).get(); if (!std::dynamic_pointer_cast<op::Result>(output_op))
string name = tv->get_tensor().get_name();
if (contains_key(tensor_map, tv))
{
if (op->description() == "Parameter")
{
// Here we handle the special case where an output is just a copy of an input
memcpy(output_tvs[i]->get_data_ptr(),
tensor_map.at(tv)->get_data_ptr(),
tv->get_tensor().size());
}
else
{
// This is a computed value returned more than once and will need to be copied at the end
aliased_outputs.push_back(i);
}
}
else
{ {
tensor_map.insert({tv, output_tvs[i]}); throw ngraph_error("One of function's outputs isn't op::Result");
} }
descriptor::TensorView* tv = function->get_output_op(i)->get_output_tensor_view(0).get();
tensor_map.insert({tv, output_tvs[i]});
} }
// Invoke computation // Invoke computation
...@@ -163,29 +149,6 @@ void runtime::interpreter::INT_CallFrame::call( ...@@ -163,29 +149,6 @@ void runtime::interpreter::INT_CallFrame::call(
} }
} }
} }
for (size_t i : aliased_outputs)
{
shared_ptr<Node> op = function->get_output_op(i);
size_t first_output;
for (first_output = 0; first_output <= i; ++first_output)
{
if (function->get_output_op(first_output) == op)
{
break;
}
}
if (first_output == i)
{
throw ngraph_error("Internal error: duplicate output missing");
}
descriptor::TensorView* tv = op->get_output_tensor_view(0).get();
string name = tv->get_tensor().get_name();
// Here we handle the special case where an output is just a copy of an input
memcpy(output_tvs[i]->get_data_ptr(),
output_tvs[first_output]->get_data_ptr(),
tv->get_tensor().size());
}
} }
void runtime::interpreter::INT_CallFrame::generate_calls( void runtime::interpreter::INT_CallFrame::generate_calls(
......
...@@ -39,6 +39,7 @@ ...@@ -39,6 +39,7 @@
#include "ngraph/ops/reduce_window.hpp" #include "ngraph/ops/reduce_window.hpp"
#include "ngraph/ops/replace_slice.hpp" #include "ngraph/ops/replace_slice.hpp"
#include "ngraph/ops/reshape.hpp" #include "ngraph/ops/reshape.hpp"
#include "ngraph/ops/result.hpp"
#include "ngraph/ops/reverse.hpp" #include "ngraph/ops/reverse.hpp"
#include "ngraph/ops/select_and_scatter.hpp" #include "ngraph/ops/select_and_scatter.hpp"
#include "ngraph/ops/slice.hpp" #include "ngraph/ops/slice.hpp"
...@@ -89,6 +90,7 @@ ...@@ -89,6 +90,7 @@
#include "ngraph/runtime/kernel/relu.hpp" #include "ngraph/runtime/kernel/relu.hpp"
#include "ngraph/runtime/kernel/replace_slice.hpp" #include "ngraph/runtime/kernel/replace_slice.hpp"
#include "ngraph/runtime/kernel/reshape.hpp" #include "ngraph/runtime/kernel/reshape.hpp"
#include "ngraph/runtime/kernel/result.hpp"
#include "ngraph/runtime/kernel/reverse.hpp" #include "ngraph/runtime/kernel/reverse.hpp"
#include "ngraph/runtime/kernel/select.hpp" #include "ngraph/runtime/kernel/select.hpp"
#include "ngraph/runtime/kernel/select_and_scatter.hpp" #include "ngraph/runtime/kernel/select_and_scatter.hpp"
...@@ -720,6 +722,13 @@ private: ...@@ -720,6 +722,13 @@ private:
reshape->get_input_order(), reshape->get_input_order(),
out[0]->get_shape()); out[0]->get_shape());
} }
else if (node_op == "Result")
{
ngraph::op::Result* res = dynamic_cast<ngraph::op::Result*>(&node);
kernel::result(reinterpret_cast<T*>(args[0]->get_data_ptr()),
reinterpret_cast<T*>(out[0]->get_data_ptr()),
shape_size(res->get_shape()));
}
else if (node_op == "Reverse") else if (node_op == "Reverse")
{ {
ngraph::op::Reverse* reverse = dynamic_cast<ngraph::op::Reverse*>(&node); ngraph::op::Reverse* reverse = dynamic_cast<ngraph::op::Reverse*>(&node);
......
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include <algorithm>
#include <cmath>
#include <numeric>
#include <vector>
#include "ngraph/shape.hpp"
namespace ngraph
{
namespace runtime
{
namespace kernel
{
template <typename T>
void result(T* arg, T* out, size_t count)
{
memcpy(out, arg, sizeof(T) * count);
}
}
}
}
...@@ -64,6 +64,7 @@ ...@@ -64,6 +64,7 @@
#include "ngraph/ops/remainder.hpp" #include "ngraph/ops/remainder.hpp"
#include "ngraph/ops/replace_slice.hpp" #include "ngraph/ops/replace_slice.hpp"
#include "ngraph/ops/reshape.hpp" #include "ngraph/ops/reshape.hpp"
#include "ngraph/ops/result.hpp"
#include "ngraph/ops/reverse.hpp" #include "ngraph/ops/reverse.hpp"
#include "ngraph/ops/select.hpp" #include "ngraph/ops/select.hpp"
#include "ngraph/ops/select_and_scatter.hpp" #include "ngraph/ops/select_and_scatter.hpp"
...@@ -667,6 +668,10 @@ static shared_ptr<ngraph::Function> ...@@ -667,6 +668,10 @@ static shared_ptr<ngraph::Function>
auto output_shape = node_js.at("output_shape").get<vector<size_t>>(); auto output_shape = node_js.at("output_shape").get<vector<size_t>>();
node = make_shared<op::Reshape>(args[0], input_order, output_shape); node = make_shared<op::Reshape>(args[0], input_order, output_shape);
} }
else if (node_op == "Result")
{
node = make_shared<op::Result>(args[0]);
}
else if (node_op == "Reverse") else if (node_op == "Reverse")
{ {
auto reversed_axes = node_js.at("reversed_axes").get<set<size_t>>(); auto reversed_axes = node_js.at("reversed_axes").get<set<size_t>>();
...@@ -1061,6 +1066,9 @@ static json write(const Node& n) ...@@ -1061,6 +1066,9 @@ static json write(const Node& n)
node["input_order"] = tmp->get_input_order(); node["input_order"] = tmp->get_input_order();
node["output_shape"] = tmp->get_output_shape(); node["output_shape"] = tmp->get_output_shape();
} }
else if (node_op == "Result")
{
}
else if (node_op == "Reverse") else if (node_op == "Reverse")
{ {
auto tmp = dynamic_cast<const op::Reverse*>(&n); auto tmp = dynamic_cast<const op::Reverse*>(&n);
......
...@@ -25,9 +25,12 @@ ...@@ -25,9 +25,12 @@
#include "ngraph/graph_util.hpp" #include "ngraph/graph_util.hpp"
#include "ngraph/log.hpp" #include "ngraph/log.hpp"
#include "ngraph/node.hpp" #include "ngraph/node.hpp"
#include "ngraph/ops/result_vector.hpp"
#include "ngraph/runtime/backend.hpp" #include "ngraph/runtime/backend.hpp"
#include "ngraph/util.hpp" #include "ngraph/util.hpp"
#include <iostream>
using namespace std; using namespace std;
std::string ngraph::to_cplusplus_sourcecode_literal(bool val) std::string ngraph::to_cplusplus_sourcecode_literal(bool val)
...@@ -239,10 +242,21 @@ ngraph::FpropCache ngraph::cache_fprop(std::shared_ptr<ngraph::Function> fprop, ...@@ -239,10 +242,21 @@ ngraph::FpropCache ngraph::cache_fprop(std::shared_ptr<ngraph::Function> fprop,
} }
// create the new outputs for fprop and the new fprop function // create the new outputs for fprop and the new fprop function
NodeVector fprop_outputs{fprop->get_results()}; ResultVector fprop_outputs;
fprop_outputs.insert(fprop_outputs.end(),
fprop_cache.fprop_output_nodes.begin(), for (auto fpr : fprop->get_results())
fprop_cache.fprop_output_nodes.end()); {
fprop_outputs.push_back(fpr);
}
for (auto fpir : fprop_cache.fprop_output_nodes)
{
if (std::dynamic_pointer_cast<op::Result>(fpir))
{
throw ngraph_error("Expected op::Result in fprop->get_results()");
}
fprop_outputs.push_back(std::make_shared<op::Result>(fpir));
}
fprop_cache.fprop = std::make_shared<Function>(fprop_outputs, fprop->get_parameters()); fprop_cache.fprop = std::make_shared<Function>(fprop_outputs, fprop->get_parameters());
...@@ -251,10 +265,15 @@ ngraph::FpropCache ngraph::cache_fprop(std::shared_ptr<ngraph::Function> fprop, ...@@ -251,10 +265,15 @@ ngraph::FpropCache ngraph::cache_fprop(std::shared_ptr<ngraph::Function> fprop,
ngraph::clone_nodes(bprop->get_ops(), node_param_map); ngraph::clone_nodes(bprop->get_ops(), node_param_map);
// get cloned bprop results // get cloned bprop results
NodeVector cloned_results; ResultVector cloned_results;
for (auto node : bprop->get_results()) for (auto node : bprop->get_results())
{ {
cloned_results.push_back(node_param_map.get(node)); auto result = std::dynamic_pointer_cast<op::Result>(node_param_map.get(node));
if (!result)
{
throw ngraph_error("Expected op::Result values for op::Result keys in node_param_map");
}
cloned_results.push_back(result);
} }
// get clone bprop parameters // get clone bprop parameters
......
...@@ -40,7 +40,7 @@ TEST(build_graph, build_simple) ...@@ -40,7 +40,7 @@ TEST(build_graph, build_simple)
auto cluster_0 = make_shared<Function>(dot, op::ParameterVector{arg0, arg1, arg2, arg3}); auto cluster_0 = make_shared<Function>(dot, op::ParameterVector{arg0, arg1, arg2, arg3});
ASSERT_EQ(cluster_0->get_output_op(0), dot); ASSERT_EQ(cluster_0->get_output_op(0)->get_input_op(0), dot);
} }
// Check node comparisons // Check node comparisons
......
...@@ -218,11 +218,6 @@ public: ...@@ -218,11 +218,6 @@ public:
{ {
map_parameter_to_index[f->get_parameters().at(i)] = i; map_parameter_to_index[f->get_parameters().at(i)] = i;
} }
unordered_map<shared_ptr<Node>, size_t> map_result_to_index;
for (size_t i = 0; i < f->get_results().size(); ++i)
{
map_result_to_index[f->get_results().at(i)] = i;
}
// Parameter's source is either itself, or the output node of the upstream function // Parameter's source is either itself, or the output node of the upstream function
unordered_map<shared_ptr<op::Parameter>, shared_ptr<Node>> map_parameter_to_source_node; unordered_map<shared_ptr<op::Parameter>, shared_ptr<Node>> map_parameter_to_source_node;
...@@ -231,6 +226,13 @@ public: ...@@ -231,6 +226,13 @@ public:
vector<shared_ptr<Function>> funcs = vector<shared_ptr<Function>> funcs =
split_function_by_placement(f, map_parameter_to_source_node); split_function_by_placement(f, map_parameter_to_source_node);
auto main_func = funcs.back();
unordered_map<shared_ptr<Node>, size_t> map_result_to_index;
for (size_t i = 0; i < main_func->get_results().size(); ++i)
{
map_result_to_index[main_func->get_results().at(i)] = i;
}
// Make call frames // Make call frames
vector<shared_ptr<runtime::CallFrame>> call_frames; vector<shared_ptr<runtime::CallFrame>> call_frames;
for (auto func : funcs) for (auto func : funcs)
......
...@@ -47,14 +47,22 @@ TEST(liveness, constant) ...@@ -47,14 +47,22 @@ TEST(liveness, constant)
auto tmp = f->get_ordered_ops(); auto tmp = f->get_ordered_ops();
vector<shared_ptr<Node>> sorted{tmp.begin(), tmp.end()}; vector<shared_ptr<Node>> sorted{tmp.begin(), tmp.end()};
ASSERT_EQ(2, sorted.size()); ASSERT_EQ(3, sorted.size());
EXPECT_EQ(0, sorted[0]->liveness_live_list.size()); EXPECT_EQ(0, sorted[0]->liveness_live_list.size());
EXPECT_EQ(0, sorted[0]->liveness_new_list.size()); EXPECT_EQ(0, sorted[0]->liveness_new_list.size());
EXPECT_EQ(0, sorted[0]->liveness_free_list.size()); EXPECT_EQ(0, sorted[0]->liveness_free_list.size());
EXPECT_EQ(0, sorted[1]->liveness_live_list.size()); //op::Negative is live on output to op::Result
EXPECT_EQ(0, sorted[1]->liveness_new_list.size()); EXPECT_EQ(1, sorted[1]->liveness_live_list.size());
//op::Negative is new
EXPECT_EQ(1, sorted[1]->liveness_new_list.size());
EXPECT_EQ(0, sorted[1]->liveness_free_list.size()); EXPECT_EQ(0, sorted[1]->liveness_free_list.size());
//op::Negative is live on input to op::Result
EXPECT_EQ(1, sorted[2]->liveness_live_list.size());
EXPECT_EQ(0, sorted[2]->liveness_new_list.size());
//op::Negative is freed
EXPECT_EQ(1, sorted[2]->liveness_free_list.size());
} }
TEST(liveness, liveness) TEST(liveness, liveness)
......
...@@ -234,5 +234,5 @@ TEST(memory_layout, constant) ...@@ -234,5 +234,5 @@ TEST(memory_layout, constant)
pass_manager.run_passes(f); pass_manager.run_passes(f);
auto sorted = f->get_ordered_ops(); auto sorted = f->get_ordered_ops();
size_t temporary_pool_size = f->get_temporary_pool_size(); size_t temporary_pool_size = f->get_temporary_pool_size();
EXPECT_EQ(0, temporary_pool_size); EXPECT_EQ(4, temporary_pool_size);
} }
...@@ -42,6 +42,7 @@ ...@@ -42,6 +42,7 @@
#include "ngraph/runtime/cpu/pass/cpu_fusion.hpp" #include "ngraph/runtime/cpu/pass/cpu_fusion.hpp"
#include "ngraph/serializer.hpp" #include "ngraph/serializer.hpp"
#include "util/matcher.hpp" #include "util/matcher.hpp"
#include "util/test_tools.hpp"
using namespace ngraph; using namespace ngraph;
using namespace std; using namespace std;
...@@ -89,9 +90,9 @@ bool sum_predicate(std::shared_ptr<Node> gn) ...@@ -89,9 +90,9 @@ bool sum_predicate(std::shared_ptr<Node> gn)
return false; return false;
} }
NGRAPH_DEBUG << "looking at function's result " auto result = r->get_functions()[0]->get_result()->get_input_op(0);
<< r->get_functions()[0]->get_result()->get_name(); NGRAPH_DEBUG << "looking at function's result " << result->get_name();
if (auto sum = std::dynamic_pointer_cast<op::Add>(r->get_functions()[0]->get_result())) if (auto sum = std::dynamic_pointer_cast<op::Add>(result))
{ {
auto parm1 = std::dynamic_pointer_cast<op::Parameter>(sum->get_input_op(0)); auto parm1 = std::dynamic_pointer_cast<op::Parameter>(sum->get_input_op(0));
auto parm2 = std::dynamic_pointer_cast<op::Parameter>(sum->get_input_op(1)); auto parm2 = std::dynamic_pointer_cast<op::Parameter>(sum->get_input_op(1));
...@@ -297,7 +298,7 @@ TEST(pattern, graph_rewrite) ...@@ -297,7 +298,7 @@ TEST(pattern, graph_rewrite)
ASSERT_TRUE(graph_b->get_output_inputs(0).empty()); ASSERT_TRUE(graph_b->get_output_inputs(0).empty());
auto expected = ngraph::NodeVector{a, b, a, c, b}; auto expected = ngraph::NodeVector{a, b, a, c, b};
ASSERT_TRUE(f->get_results() == expected); ASSERT_TRUE(count_ops_of_type<op::Add>(f) == 0);
} }
{ {
......
...@@ -33,12 +33,13 @@ bool validate_list(const list<shared_ptr<Node>>& nodes) ...@@ -33,12 +33,13 @@ bool validate_list(const list<shared_ptr<Node>>& nodes)
auto node_tmp = *it; auto node_tmp = *it;
auto dependencies_tmp = node_tmp->get_input_ops(); auto dependencies_tmp = node_tmp->get_input_ops();
vector<Node*> dependencies; vector<Node*> dependencies;
for (shared_ptr<Node> n : dependencies_tmp) for (shared_ptr<Node> n : dependencies_tmp)
{ {
dependencies.push_back(n.get()); dependencies.push_back(n.get());
} }
auto tmp = it++; auto tmp = it;
for (; tmp != nodes.rend(); tmp++) for (tmp++; tmp != nodes.rend(); tmp++)
{ {
auto dep_tmp = *tmp; auto dep_tmp = *tmp;
auto found = find(dependencies.begin(), dependencies.end(), dep_tmp.get()); auto found = find(dependencies.begin(), dependencies.end(), dep_tmp.get());
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment