Commit dd5bd9ad authored by Louis Feng's avatar Louis Feng

Merge branch 'master' into louisfeng/NGMX-296-conv_bias

parents 97c2ce20 ad58cb29
...@@ -36,7 +36,7 @@ General Instructions ...@@ -36,7 +36,7 @@ General Instructions
These instructions assume that your system has been prepared in accordance These instructions assume that your system has been prepared in accordance
with the above prerequisites. with the above prerequisites.
$ cd private-ngraph-cpp $ cd ngraph-cpp
$ mkdir build $ mkdir build
$ cd build $ cd build
$ cmake .. \ $ cmake .. \
......
set(FILE_NAME ${CMAKE_BINARY_DIR}/include/nlohmann/detail/macro_scope.hpp)
file(READ ${FILE_NAME} FILE_CONTENTS)
string(REPLACE
"#if (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) < 40900"
"#if (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) < 40805"
REWRITTEN_FILE
"${FILE_CONTENTS}"
)
file(WRITE ${FILE_NAME} "${REWRITTEN_FILE}")
message(STATUS "json library gcc minimum version number patched")
...@@ -34,6 +34,9 @@ if (${CMAKE_VERSION} VERSION_LESS 3.2) ...@@ -34,6 +34,9 @@ if (${CMAKE_VERSION} VERSION_LESS 3.2)
BUILD_COMMAND "" BUILD_COMMAND ""
INSTALL_COMMAND "" INSTALL_COMMAND ""
UPDATE_COMMAND "" UPDATE_COMMAND ""
# cmake does not allow calling cmake functions so we call a cmake script in the Module
# directory.
PATCH_COMMAND ${CMAKE_COMMAND} -P ${CMAKE_MODULE_PATH}patch_json.cmake
) )
else() else()
ExternalProject_Add( ExternalProject_Add(
...@@ -44,6 +47,9 @@ else() ...@@ -44,6 +47,9 @@ else()
BUILD_COMMAND "" BUILD_COMMAND ""
INSTALL_COMMAND "" INSTALL_COMMAND ""
UPDATE_COMMAND "" UPDATE_COMMAND ""
# cmake does not allow calling cmake functions so we call a cmake script in the Module
# directory.
PATCH_COMMAND ${CMAKE_COMMAND} -P ${CMAKE_MODULE_PATH}patch_json.cmake
) )
endif() endif()
......
...@@ -67,6 +67,7 @@ set (SRC ...@@ -67,6 +67,7 @@ set (SRC
ops/replace_slice.cpp ops/replace_slice.cpp
ops/reshape.cpp ops/reshape.cpp
ops/reverse.cpp ops/reverse.cpp
ops/result.cpp
ops/select.cpp ops/select.cpp
ops/select_and_scatter.cpp ops/select_and_scatter.cpp
ops/sin.cpp ops/sin.cpp
...@@ -184,8 +185,8 @@ if (NGRAPH_CPU_ENABLE AND LLVM_INCLUDE_DIR AND ...@@ -184,8 +185,8 @@ if (NGRAPH_CPU_ENABLE AND LLVM_INCLUDE_DIR AND
runtime/cpu/mkldnn_emitter.cpp runtime/cpu/mkldnn_emitter.cpp
runtime/cpu/mkldnn_invoke.cpp runtime/cpu/mkldnn_invoke.cpp
runtime/cpu/mkldnn_utils.cpp runtime/cpu/mkldnn_utils.cpp
runtime/cpu/ops/convert_layout.cpp
runtime/cpu/ops/conv_bias.cpp runtime/cpu/ops/conv_bias.cpp
runtime/cpu/ops/convert_layout.cpp
runtime/cpu/ops/matmul_bias.cpp runtime/cpu/ops/matmul_bias.cpp
runtime/cpu/pass/cpu_assignment.cpp runtime/cpu/pass/cpu_assignment.cpp
runtime/cpu/pass/cpu_fusion.cpp runtime/cpu/pass/cpu_fusion.cpp
......
...@@ -27,7 +27,7 @@ using namespace ngraph; ...@@ -27,7 +27,7 @@ using namespace ngraph;
atomic<size_t> Function::m_next_instance_id(0); atomic<size_t> Function::m_next_instance_id(0);
Function::Function(const NodeVector& results, Function::Function(const ResultVector& results,
const op::ParameterVector& parameters, const op::ParameterVector& parameters,
const std::string& name) const std::string& name)
: m_results(results) : m_results(results)
...@@ -37,14 +37,50 @@ Function::Function(const NodeVector& results, ...@@ -37,14 +37,50 @@ Function::Function(const NodeVector& results,
, m_name(name) , m_name(name)
, m_unique_name("Function_" + to_string(m_instance_id)) , m_unique_name("Function_" + to_string(m_instance_id))
{ {
init();
}
Function::Function(const NodeVector& results,
const op::ParameterVector& parameters,
const std::string& name)
: m_results(results.size())
, m_parameters(parameters)
, m_temporary_pool_size(0)
, m_instance_id(m_next_instance_id.fetch_add(1))
, m_name(name)
, m_unique_name("Function_" + to_string(m_instance_id))
{
std::transform(results.begin(), results.end(), m_results.begin(), [](std::shared_ptr<Node> n) {
return std::make_shared<op::Result>(n);
});
init();
}
Function::Function(const std::shared_ptr<Node>& result,
const op::ParameterVector& parameters,
const std::string& name)
: Function(NodeVector{result}, parameters, name)
{
}
void Function::init()
{
for (auto r : m_results)
{
for (descriptor::Output& output : r->get_outputs())
{
output.get_tensor().set_is_output();
}
}
traverse_nodes(this, [&](shared_ptr<Node> node) { traverse_nodes(this, [&](shared_ptr<Node> node) {
std::shared_ptr<op::Parameter> p = std::dynamic_pointer_cast<op::Parameter>(node); std::shared_ptr<op::Parameter> p = std::dynamic_pointer_cast<op::Parameter>(node);
if (nullptr != p) if (nullptr != p)
{ {
auto it = std::find_if(parameters.begin(), auto it = std::find_if(m_parameters.begin(),
parameters.end(), m_parameters.end(),
[p](std::shared_ptr<op::Parameter> q) { return (p == q); }); [p](std::shared_ptr<op::Parameter> q) { return (p == q); });
if (it == parameters.end()) if (it == m_parameters.end())
{ {
throw ngraph_error("Function references undeclared parameter"); throw ngraph_error("Function references undeclared parameter");
} }
...@@ -52,13 +88,6 @@ Function::Function(const NodeVector& results, ...@@ -52,13 +88,6 @@ Function::Function(const NodeVector& results,
}); });
} }
Function::Function(const std::shared_ptr<Node>& result,
const op::ParameterVector& parameters,
const std::string& name)
: Function(NodeVector{result}, parameters, name)
{
}
std::list<shared_ptr<Node>> Function::get_ordered_ops() std::list<shared_ptr<Node>> Function::get_ordered_ops()
{ {
return topological_sort(get_ops()); return topological_sort(get_ops());
...@@ -156,18 +185,7 @@ std::list<shared_ptr<Node>> Function::get_ops() const ...@@ -156,18 +185,7 @@ std::list<shared_ptr<Node>> Function::get_ops() const
return ops; return ops;
} }
void Function::replace_output_op(std::shared_ptr<Node> old, std::shared_ptr<Node> repl)
{
auto it = std::find(begin(m_results), end(m_results), old);
if (it != end(m_results))
{
NGRAPH_DEBUG << "Replacing output " << old->get_name() << " w/ " << repl->get_name();
*it = repl;
}
}
void Function::replace_node(std::shared_ptr<Node> old, std::shared_ptr<Node> repl) void Function::replace_node(std::shared_ptr<Node> old, std::shared_ptr<Node> repl)
{ {
replace_output_op(old, repl); ngraph::replace_node(old, repl);
ngraph::replace_node(old, repl, true);
} }
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#include "ngraph/node.hpp" #include "ngraph/node.hpp"
#include "ngraph/ops/parameter_vector.hpp" #include "ngraph/ops/parameter_vector.hpp"
#include "ngraph/ops/result_vector.hpp"
#include "ngraph/types/type.hpp" #include "ngraph/types/type.hpp"
namespace ngraph namespace ngraph
...@@ -41,6 +42,12 @@ namespace ngraph ...@@ -41,6 +42,12 @@ namespace ngraph
const op::ParameterVector& parameters, const op::ParameterVector& parameters,
const std::string& name = ""); const std::string& name = "");
Function(const ResultVector& results,
const op::ParameterVector& parameters,
const std::string& name = "");
void init();
virtual ~Function() {} virtual ~Function() {}
public: public:
/// Return the number of outputs for this function. /// Return the number of outputs for this function.
...@@ -57,8 +64,8 @@ namespace ngraph ...@@ -57,8 +64,8 @@ namespace ngraph
/// Return the function parameters /// Return the function parameters
const op::ParameterVector& get_parameters() const { return m_parameters; } const op::ParameterVector& get_parameters() const { return m_parameters; }
/// Return the ops that generate the results /// Return a list of function's outputs
const NodeVector get_results() const { return m_results; } const ResultVector& get_results() const { return m_results; }
/// Check that there is a single result and return it. /// Check that there is a single result and return it.
std::shared_ptr<Node> get_result() const; std::shared_ptr<Node> get_result() const;
...@@ -73,13 +80,11 @@ namespace ngraph ...@@ -73,13 +80,11 @@ namespace ngraph
size_t get_instance_id() { return m_instance_id; } size_t get_instance_id() { return m_instance_id; }
size_t get_temporary_pool_size(); size_t get_temporary_pool_size();
void set_temporary_pool_size(size_t); void set_temporary_pool_size(size_t);
// updates old w/ repl in m_results list
void replace_output_op(std::shared_ptr<Node> old, std::shared_ptr<Node> repl);
// updates graph and m_results list // updates graph and m_results list
void replace_node(std::shared_ptr<Node> old, std::shared_ptr<Node> repl); void replace_node(std::shared_ptr<Node> old, std::shared_ptr<Node> repl);
protected: protected:
NodeVector m_results; ResultVector m_results;
op::ParameterVector m_parameters; op::ParameterVector m_parameters;
size_t m_temporary_pool_size; size_t m_temporary_pool_size;
......
...@@ -29,6 +29,8 @@ ...@@ -29,6 +29,8 @@
#include "ngraph/node_vector.hpp" #include "ngraph/node_vector.hpp"
#include "ngraph/ops/constant.hpp" #include "ngraph/ops/constant.hpp"
#include "ngraph/ops/parameter.hpp" #include "ngraph/ops/parameter.hpp"
#include "ngraph/ops/result.hpp"
#include "ngraph/ops/result_vector.hpp"
#include "ngraph/placement.hpp" #include "ngraph/placement.hpp"
#include "ngraph/util.hpp" #include "ngraph/util.hpp"
...@@ -114,13 +116,11 @@ void ngraph::free_nodes(shared_ptr<Function> p) ...@@ -114,13 +116,11 @@ void ngraph::free_nodes(shared_ptr<Function> p)
} }
} }
void ngraph::replace_node(std::shared_ptr<Node> target, void ngraph::replace_node(std::shared_ptr<Node> target, std::shared_ptr<Node> replacement)
std::shared_ptr<Node> replacement,
bool replace_output)
{ {
if (target->is_output() && !replace_output) if (target->is_output())
{ {
return; throw ngraph_error("Result nodes cannot be replaced.");
} }
// Fix input/output descriptors // Fix input/output descriptors
...@@ -197,6 +197,15 @@ std::list<std::shared_ptr<ngraph::Node>> ...@@ -197,6 +197,15 @@ std::list<std::shared_ptr<ngraph::Node>>
return result_list; return result_list;
} }
void ngraph::NodeMap::update(std::shared_ptr<ngraph::Node> orig, std::shared_ptr<ngraph::Node> val)
{
if (!exists(orig))
{
throw ngraph_error("Node doesn't exist!");
}
m_node_map[orig] = val;
}
void ngraph::NodeMap::add(std::shared_ptr<ngraph::Node> orig, void ngraph::NodeMap::add(std::shared_ptr<ngraph::Node> orig,
std::shared_ptr<ngraph::Node> replacement) std::shared_ptr<ngraph::Node> replacement)
{ {
...@@ -252,10 +261,15 @@ std::shared_ptr<ngraph::Function> ngraph::clone_function(std::shared_ptr<ngraph: ...@@ -252,10 +261,15 @@ std::shared_ptr<ngraph::Function> ngraph::clone_function(std::shared_ptr<ngraph:
clone_nodes(func->get_ops(), node_map); clone_nodes(func->get_ops(), node_map);
// get cloned function results and parameters // get cloned function results and parameters
NodeVector cloned_results; ResultVector cloned_results;
for (shared_ptr<Node> node : func->get_results()) for (shared_ptr<Node> node : func->get_results())
{ {
cloned_results.push_back(node_map.get(node)); auto result = std::dynamic_pointer_cast<op::Result>(node_map.get(node));
if (!result)
{
throw ngraph_error("Results should be of type op::Result");
}
cloned_results.push_back(result);
} }
std::vector<std::shared_ptr<op::Parameter>> cloned_params; std::vector<std::shared_ptr<op::Parameter>> cloned_params;
for (auto param : func->get_parameters()) for (auto param : func->get_parameters())
...@@ -435,8 +449,8 @@ static shared_ptr<Function> build_largest_colocated_function( ...@@ -435,8 +449,8 @@ static shared_ptr<Function> build_largest_colocated_function(
} }
} }
} }
auto func = make_shared<Function>(outputs, collected_parameters);
return make_shared<Function>(outputs, collected_parameters); return func;
} }
// The returned nodes contains the node N with highest order. If N is placed at P, the returned // The returned nodes contains the node N with highest order. If N is placed at P, the returned
...@@ -528,7 +542,7 @@ vector<shared_ptr<Function>> ngraph::split_function_by_placement( ...@@ -528,7 +542,7 @@ vector<shared_ptr<Function>> ngraph::split_function_by_placement(
// Remove input-output and constant-output aliasing // Remove input-output and constant-output aliasing
if (f_parameters.count(node) == 0 && node->description() != "Constant") if (f_parameters.count(node) == 0 && node->description() != "Constant")
{ {
unvisited_outputs.insert(node); unvisited_outputs.insert(node->get_input_op(0));
} }
} }
...@@ -571,6 +585,24 @@ vector<shared_ptr<Function>> ngraph::split_function_by_placement( ...@@ -571,6 +585,24 @@ vector<shared_ptr<Function>> ngraph::split_function_by_placement(
unvisited_outputs = updated_unvisited_outputs; unvisited_outputs = updated_unvisited_outputs;
} }
unordered_map<shared_ptr<Node>, shared_ptr<Node>> map_source_node_to_result;
for (auto cf : colocated_functions)
{
for (auto r : cf->get_results())
{
map_source_node_to_result[r->get_input_op(0)] = r;
}
}
for (auto it = map_parameter_to_source_node.begin(); it != map_parameter_to_source_node.end();
++it)
{
if (map_source_node_to_result.count(it->second) != 0)
{
it->second = map_source_node_to_result[it->second];
}
}
// The colocated_functions should be called in reversed order // The colocated_functions should be called in reversed order
reverse(colocated_functions.begin(), colocated_functions.end()); reverse(colocated_functions.begin(), colocated_functions.end());
return colocated_functions; return colocated_functions;
......
...@@ -48,9 +48,8 @@ namespace ngraph ...@@ -48,9 +48,8 @@ namespace ngraph
void free_nodes(std::shared_ptr<Function>); void free_nodes(std::shared_ptr<Function>);
void replace_node(std::shared_ptr<Node> target, void replace_node(std::shared_ptr<Node> target, std::shared_ptr<Node> replacement);
std::shared_ptr<Node> replacement,
bool replace_output = false);
void replace_node_users_arguments(std::shared_ptr<Node> target, void replace_node_users_arguments(std::shared_ptr<Node> target,
std::shared_ptr<Node> replacement); std::shared_ptr<Node> replacement);
...@@ -78,6 +77,8 @@ namespace ngraph ...@@ -78,6 +77,8 @@ namespace ngraph
return (m_node_map.count(orig) != 0); return (m_node_map.count(orig) != 0);
} }
void update(std::shared_ptr<ngraph::Node> orig, std::shared_ptr<ngraph::Node> val);
const std::unordered_map<std::shared_ptr<ngraph::Node>, std::shared_ptr<ngraph::Node>>& const std::unordered_map<std::shared_ptr<ngraph::Node>, std::shared_ptr<ngraph::Node>>&
get_node_map() const get_node_map() const
{ {
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#include "ngraph/descriptor/layout/tensor_view_layout.hpp" #include "ngraph/descriptor/layout/tensor_view_layout.hpp"
#include "ngraph/descriptor/primary_tensor_view.hpp" #include "ngraph/descriptor/primary_tensor_view.hpp"
#include "ngraph/ops/parameter.hpp" #include "ngraph/ops/parameter.hpp"
#include "ngraph/ops/result.hpp"
#include "ngraph/placement.hpp" #include "ngraph/placement.hpp"
using namespace std; using namespace std;
...@@ -34,7 +35,6 @@ Node::Node(const std::string& node_type, const NodeVector& arguments) ...@@ -34,7 +35,6 @@ Node::Node(const std::string& node_type, const NodeVector& arguments)
: m_node_type(node_type) : m_node_type(node_type)
, m_instance_id(m_next_instance_id.fetch_add(1)) , m_instance_id(m_next_instance_id.fetch_add(1))
, m_unique_name(description() + "_" + to_string(m_instance_id)) , m_unique_name(description() + "_" + to_string(m_instance_id))
, m_is_output(false)
, m_arguments(arguments) , m_arguments(arguments)
{ {
// Add this node as a user of each argument. // Add this node as a user of each argument.
...@@ -68,7 +68,7 @@ void Node::add_output(const element::Type& element_type, const Shape& shape) ...@@ -68,7 +68,7 @@ void Node::add_output(const element::Type& element_type, const Shape& shape)
auto tensor_view_descriptor = make_shared<descriptor::PrimaryTensorView>( auto tensor_view_descriptor = make_shared<descriptor::PrimaryTensorView>(
tensor_view_type, tensor_view_type,
ngraph::descriptor::Tensor::make_tensor_name(this, i), ngraph::descriptor::Tensor::make_tensor_name(this, i),
is_output(), false,
is_parameter(), is_parameter(),
is_constant()); is_constant());
m_outputs.emplace_back(this, i, tensor_view_descriptor); m_outputs.emplace_back(this, i, tensor_view_descriptor);
...@@ -96,16 +96,7 @@ bool Node::is_parameter() const ...@@ -96,16 +96,7 @@ bool Node::is_parameter() const
bool Node::is_output() const bool Node::is_output() const
{ {
return m_is_output; return false;
}
void Node::set_is_output()
{
m_is_output = true;
for (descriptor::Output& output : get_outputs())
{
output.get_tensor().set_is_output();
}
} }
bool Node::is_constant() const bool Node::is_constant() const
......
...@@ -102,8 +102,7 @@ namespace ngraph ...@@ -102,8 +102,7 @@ namespace ngraph
void set_value_type_checked(const element::Type& element_type, const Shape& shape); void set_value_type_checked(const element::Type& element_type, const Shape& shape);
bool is_parameter() const; bool is_parameter() const;
bool is_output() const; virtual bool is_output() const;
void set_is_output();
virtual bool is_constant() const; virtual bool is_constant() const;
virtual bool is_commutative() { return false; } virtual bool is_commutative() { return false; }
size_t get_instance_id() const { return m_instance_id; } size_t get_instance_id() const { return m_instance_id; }
...@@ -200,7 +199,6 @@ namespace ngraph ...@@ -200,7 +199,6 @@ namespace ngraph
static std::atomic<size_t> m_next_instance_id; static std::atomic<size_t> m_next_instance_id;
std::deque<descriptor::Input> m_inputs; std::deque<descriptor::Input> m_inputs;
std::deque<descriptor::Output> m_outputs; std::deque<descriptor::Output> m_outputs;
bool m_is_output;
std::unordered_map<Node*, autodiff::Adjoints> m_adjoint_map; std::unordered_map<Node*, autodiff::Adjoints> m_adjoint_map;
Placement m_placement = Placement::DEFAULT; Placement m_placement = Placement::DEFAULT;
......
...@@ -23,6 +23,11 @@ namespace ngraph ...@@ -23,6 +23,11 @@ namespace ngraph
{ {
class Node; class Node;
namespace op
{
class Result;
}
/// \brief Zero or more nodes. /// \brief Zero or more nodes.
class NodeVector : public std::vector<std::shared_ptr<Node>> class NodeVector : public std::vector<std::shared_ptr<Node>>
{ {
......
/*******************************************************************************
* Copyright 2017-2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#include <memory>
#include <typeindex>
#include <typeinfo>
#include "ngraph/node.hpp"
#include "ngraph/ops/result.hpp"
using namespace std;
using namespace ngraph;
op::Result::Result(const std::shared_ptr<Node>& arg)
: RequiresTensorViewArgs("Result", {arg})
{
if (arg->get_outputs().size() != 1)
{
throw ngraph_error("Expected a single-output argument");
}
//always borrow the placement conf even the default one
set_placement(arg->get_placement());
set_value_type_checked(arg->get_element_type(), arg->get_shape());
}
std::shared_ptr<Node> op::Result::copy_with_new_args(const NodeVector& new_args) const
{
if (new_args.size() != 1)
{
throw ngraph_error("Incorrect number of new arguments");
}
if (new_args.at(0)->get_outputs().size() != 1)
{
throw ngraph_error("Expected a single-output argument");
}
return std::make_shared<Result>(new_args.at(0));
}
/*******************************************************************************
* Copyright 2017-2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#pragma once
#include <memory>
#include "ngraph/ops/util/requires_tensor_view_args.hpp"
namespace ngraph
{
namespace op
{
class Result : public util::RequiresTensorViewArgs
{
public:
/// \brief Constructs an arcsin operation.
///
/// \param arg Node that produces the input tensor.
Result(const std::shared_ptr<Node>& arg);
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
virtual bool is_output() const override { return true; }
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const std::shared_ptr<Node>& delta) override
{
adjoints.add_delta(get_input_op(0), delta);
}
};
}
}
/*******************************************************************************
* Copyright 2017-2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#pragma once
#include <memory>
#include <vector>
#include "ngraph/ops/result.hpp"
namespace ngraph
{
/// \brief Zero or more nodes.
class ResultVector : public std::vector<std::shared_ptr<op::Result>>
{
public:
ResultVector(size_t size)
: std::vector<std::shared_ptr<op::Result>>(size)
{
}
ResultVector(const std::initializer_list<std::shared_ptr<op::Result>>& nodes)
: std::vector<std::shared_ptr<op::Result>>(nodes)
{
}
ResultVector(const std::vector<std::shared_ptr<op::Result>>& nodes)
: std::vector<std::shared_ptr<op::Result>>(nodes)
{
}
ResultVector(const ResultVector& nodes)
: std::vector<std::shared_ptr<op::Result>>(nodes)
{
}
ResultVector() {}
};
}
...@@ -30,12 +30,10 @@ using namespace std; ...@@ -30,12 +30,10 @@ using namespace std;
using namespace ngraph; using namespace ngraph;
ngraph::pass::Manager::Manager() ngraph::pass::Manager::Manager()
: m_to_set_is_output(true)
{ {
} }
ngraph::pass::Manager::Manager(bool to_set_is_output) ngraph::pass::Manager::Manager(bool to_set_is_output)
: m_to_set_is_output(to_set_is_output)
{ {
} }
...@@ -56,17 +54,6 @@ void ngraph::pass::Manager::run_passes(shared_ptr<Function> func) ...@@ -56,17 +54,6 @@ void ngraph::pass::Manager::run_passes(shared_ptr<Function> func)
set<shared_ptr<Function>> tfs(begin(fs), end(fs)); set<shared_ptr<Function>> tfs(begin(fs), end(fs));
get_state().set_functions(tfs); get_state().set_functions(tfs);
if (m_to_set_is_output)
{
for (shared_ptr<Function> f : get_state().get_functions())
{
for (size_t i = 0; i < f->get_output_size(); ++i)
{
f->get_output_op(i)->set_is_output();
}
}
}
for (shared_ptr<PassBase> pass : m_pass_list) for (shared_ptr<PassBase> pass : m_pass_list)
{ {
pass->set_state(get_state()); pass->set_state(get_state());
......
...@@ -57,5 +57,4 @@ public: ...@@ -57,5 +57,4 @@ public:
private: private:
std::vector<std::shared_ptr<PassBase>> m_pass_list; std::vector<std::shared_ptr<PassBase>> m_pass_list;
ManagerState m_state; ManagerState m_state;
bool m_to_set_is_output;
}; };
...@@ -150,3 +150,50 @@ void ngraph::pass::ReshapeElimination::construct_reshapex2_pattern() ...@@ -150,3 +150,50 @@ void ngraph::pass::ReshapeElimination::construct_reshapex2_pattern()
auto m = std::make_shared<ngraph::pattern::Matcher>(reshape2, callback); auto m = std::make_shared<ngraph::pattern::Matcher>(reshape2, callback);
this->add_matcher(m); this->add_matcher(m);
} }
void ngraph::pass::ReshapeElimination::construct_dot_transpose_pattern()
{
//dot(A,B).T = dot (B.T, A.T)
auto dot_pred = [](std::shared_ptr<Node> n) {
return static_cast<bool>(std::dynamic_pointer_cast<op::Dot>(n));
};
auto pdot = std::make_shared<pattern::op::Label>(element::f32, Shape{2, 1}, dot_pred);
auto preshape = std::make_shared<op::Reshape>(pdot, AxisVector{1, 0}, Shape{1, 2});
ngraph::pattern::gr_callback_fn callback = [](pattern::Matcher& m) {
NGRAPH_DEBUG << "In callback for construct_dot_transpose_pattern against node = "
<< m.match_root()->get_name();
std::shared_ptr<Node> nn;
auto mtranspose = std::dynamic_pointer_cast<op::Reshape>(m.match_root());
//this also checks the rank
if (mtranspose->get_input_order() != AxisVector{1, 0})
{
NGRAPH_DEBUG << "Reshape isn't transpose. "
<< vector_to_string(mtranspose->get_input_order());
return nn;
}
auto mdot = mtranspose->get_input_op(0);
if (mdot->get_shape().size() != 2)
{
NGRAPH_DEBUG << "Dot has the wrong shape. " << vector_to_string(mdot->get_shape());
return nn;
}
auto arg0 = mdot->get_input_op(0);
auto reshape0_shape = Shape{arg0->get_shape().at(1), arg0->get_shape().at(0)};
auto reshape0 = std::make_shared<op::Reshape>(arg0, AxisVector{1, 0}, reshape0_shape);
auto arg1 = mdot->get_input_op(1);
auto reshape1_shape = Shape{arg1->get_shape().at(1), arg1->get_shape().at(0)};
auto reshape1 = std::make_shared<op::Reshape>(arg1, AxisVector{1, 0}, reshape1_shape);
auto tdot = std::shared_ptr<Node>(new op::Dot(reshape1, reshape0));
return tdot;
};
auto m = std::make_shared<ngraph::pattern::Matcher>(preshape, callback);
this->add_matcher(m);
}
...@@ -32,11 +32,13 @@ public: ...@@ -32,11 +32,13 @@ public:
ReshapeElimination() ReshapeElimination()
: GraphRewrite() : GraphRewrite()
{ {
construct_dot_transpose_pattern();
construct_identity_reshape_pattern(); construct_identity_reshape_pattern();
construct_reshapex2_pattern(); construct_reshapex2_pattern();
} }
private: private:
void construct_dot_transpose_pattern();
void construct_identity_reshape_pattern(); void construct_identity_reshape_pattern();
void construct_reshapex2_pattern(); void construct_reshapex2_pattern();
}; };
...@@ -72,6 +72,7 @@ ...@@ -72,6 +72,7 @@
#include "ngraph/ops/remainder.hpp" #include "ngraph/ops/remainder.hpp"
#include "ngraph/ops/replace_slice.hpp" #include "ngraph/ops/replace_slice.hpp"
#include "ngraph/ops/reshape.hpp" #include "ngraph/ops/reshape.hpp"
#include "ngraph/ops/result.hpp"
#include "ngraph/ops/reverse.hpp" #include "ngraph/ops/reverse.hpp"
#include "ngraph/ops/select.hpp" #include "ngraph/ops/select.hpp"
#include "ngraph/ops/select_and_scatter.hpp" #include "ngraph/ops/select_and_scatter.hpp"
...@@ -240,7 +241,7 @@ namespace ngraph ...@@ -240,7 +241,7 @@ namespace ngraph
const Shape& arg0_shape = cg->get_arg0_shape(); //W const Shape& arg0_shape = cg->get_arg0_shape(); //W
const Shape& arg1_shape = cg->get_arg1_shape(); //x const Shape& arg1_shape = cg->get_arg1_shape(); //x
const Shape& arg2_shape = args[2].get_shape(); //bias (C) const Shape& arg2_shape = node->get_shape(); //bias (C)
static const char* ctranspose = "cblas::Transpose::Transpose, "; static const char* ctranspose = "cblas::Transpose::Transpose, ";
static const char* cnotranspose = "cblas::Transpose::None, "; static const char* cnotranspose = "cblas::Transpose::None, ";
...@@ -270,16 +271,23 @@ namespace ngraph ...@@ -270,16 +271,23 @@ namespace ngraph
writer << "{ // " << node->get_name() << "\n"; writer << "{ // " << node->get_name() << "\n";
writer.indent++; writer.indent++;
const char* cbeta = "0.0f";
if (args.size() > 2)
{
writer << "memcpy(" << out[0].get_name() << ", " << args[2].get_name() << ", " writer << "memcpy(" << out[0].get_name() << ", " << args[2].get_name() << ", "
<< out[0].get_size() * out[0].get_element_type().size() << ");\n"; << out[0].get_size() * out[0].get_element_type().size() << ");\n";
cbeta = "1.0f";
}
writer << "cblas::cblas_sgemm(" writer << "cblas::cblas_sgemm("
<< "cblas::Layout::RowMajor, " << tranpose_a << tranpose_b << m << ", " << n << "cblas::Layout::RowMajor, " << tranpose_a << tranpose_b << m << ", " << n
<< ", " << k << ",\n" << ", " << k << ",\n"
<< " 1.0f, " << args[0].get_name() << ", " << max(1UL, lda) << ", " << " 1.0f, " << args[0].get_name() << ", " << max(1UL, lda) << ", "
<< args[1].get_name() << ", " << max(1UL, ldb) << ", 1.0f,\n" << args[1].get_name() << ", " << max(1UL, ldb) << ", " << cbeta << ",\n"
<< " " << out[0].get_name() << ", " << max(1UL, arg2_shape[1]) << " " << out[0].get_name() << ", " << max(1UL, arg2_shape[1])
<< ");\n"; << ");\n";
writer.indent--; writer.indent--;
writer << "}\n"; writer << "}\n";
} }
...@@ -3526,6 +3534,15 @@ namespace ngraph ...@@ -3526,6 +3534,15 @@ namespace ngraph
} }
} }
} }
template <>
void CPU_Emitter::EMITTER_DECL(ngraph::op::Result)
{
writer << "kernel::result<" << out[0].get_type() << ">(" << args[0].get_name()
<< ",\n";
writer << " " << out[0].get_name() << ",\n";
writer << " " << shape_size(node->get_shape()) << ");\n";
}
} }
} }
} }
......
...@@ -82,6 +82,7 @@ ...@@ -82,6 +82,7 @@
#include "ngraph/ops/remainder.hpp" #include "ngraph/ops/remainder.hpp"
#include "ngraph/ops/replace_slice.hpp" #include "ngraph/ops/replace_slice.hpp"
#include "ngraph/ops/reshape.hpp" #include "ngraph/ops/reshape.hpp"
#include "ngraph/ops/result.hpp"
#include "ngraph/ops/reverse.hpp" #include "ngraph/ops/reverse.hpp"
#include "ngraph/ops/select.hpp" #include "ngraph/ops/select.hpp"
#include "ngraph/ops/select_and_scatter.hpp" #include "ngraph/ops/select_and_scatter.hpp"
...@@ -234,6 +235,7 @@ static const runtime::cpu::OpMap dispatcher{ ...@@ -234,6 +235,7 @@ static const runtime::cpu::OpMap dispatcher{
{TI(ngraph::op::Not), &runtime::cpu::CPU_Emitter::emit<op::Not>}, {TI(ngraph::op::Not), &runtime::cpu::CPU_Emitter::emit<op::Not>},
{TI(ngraph::op::MaxPool), &runtime::cpu::CPU_Emitter::emit<op::MaxPool>}, {TI(ngraph::op::MaxPool), &runtime::cpu::CPU_Emitter::emit<op::MaxPool>},
{TI(ngraph::op::Reverse), &runtime::cpu::CPU_Emitter::emit<op::Reverse>}, {TI(ngraph::op::Reverse), &runtime::cpu::CPU_Emitter::emit<op::Reverse>},
{TI(ngraph::op::Result), &runtime::cpu::CPU_Emitter::emit<op::Result>},
{TI(ngraph::op::ReduceWindow), &runtime::cpu::CPU_Emitter::emit<op::ReduceWindow>}, {TI(ngraph::op::ReduceWindow), &runtime::cpu::CPU_Emitter::emit<op::ReduceWindow>},
{TI(ngraph::op::SelectAndScatter), &runtime::cpu::CPU_Emitter::emit<op::SelectAndScatter>}, {TI(ngraph::op::SelectAndScatter), &runtime::cpu::CPU_Emitter::emit<op::SelectAndScatter>},
{TI(ngraph::op::AvgPool), &runtime::cpu::CPU_Emitter::emit<op::AvgPool>}, {TI(ngraph::op::AvgPool), &runtime::cpu::CPU_Emitter::emit<op::AvgPool>},
...@@ -323,6 +325,7 @@ void runtime::cpu::CPU_ExternalFunction::compile() ...@@ -323,6 +325,7 @@ void runtime::cpu::CPU_ExternalFunction::compile()
#include "ngraph/runtime/kernel/relu.hpp" #include "ngraph/runtime/kernel/relu.hpp"
#include "ngraph/runtime/kernel/replace_slice.hpp" #include "ngraph/runtime/kernel/replace_slice.hpp"
#include "ngraph/runtime/kernel/reshape.hpp" #include "ngraph/runtime/kernel/reshape.hpp"
#include "ngraph/runtime/kernel/result.hpp"
#include "ngraph/runtime/kernel/reverse.hpp" #include "ngraph/runtime/kernel/reverse.hpp"
#include "ngraph/runtime/kernel/select_and_scatter.hpp" #include "ngraph/runtime/kernel/select_and_scatter.hpp"
#include "ngraph/runtime/kernel/slice.hpp" #include "ngraph/runtime/kernel/slice.hpp"
...@@ -611,6 +614,7 @@ using namespace ngraph::runtime; ...@@ -611,6 +614,7 @@ using namespace ngraph::runtime;
} }
// create output alias map // create output alias map
/*
size_t output_index = 0; size_t output_index = 0;
unordered_map<descriptor::TensorView*, vector<size_t>> output_alias_map; unordered_map<descriptor::TensorView*, vector<size_t>> output_alias_map;
vector<size_t> aliases; vector<size_t> aliases;
...@@ -626,49 +630,18 @@ using namespace ngraph::runtime; ...@@ -626,49 +630,18 @@ using namespace ngraph::runtime;
} }
output_index++; output_index++;
} }
*/
// Add outputs to the variable name map // Add outputs to the variable name map
output_index = 0;
for (size_t i = 0; i < current_function->get_output_size(); ++i) for (size_t i = 0; i < current_function->get_output_size(); ++i)
{ {
shared_ptr<Node> op = current_function->get_output_op(i); shared_ptr<Node> op = current_function->get_output_op(i);
shared_ptr<descriptor::TensorView> tv = op->get_output_tensor_view(); shared_ptr<descriptor::TensorView> tv = op->get_output_tensor_view();
const element::Type& et = tv->get_tensor_view_type()->get_element_type(); string type = tv->get_tensor_view_type()->get_element_type().c_type_string();
bool parameter_as_output = false;
for (shared_ptr<ngraph::op::Parameter> param : current_function->get_parameters())
{
for (const descriptor::Output& pout : param->get_outputs())
{
shared_ptr<descriptor::TensorView> ptv = pout.get_tensor_view();
if (tv == ptv)
{
parameter_as_output = true;
writer << "memcpy(static_cast<" << et.c_type_string() << "*>(outputs["
<< output_index << "]), "
<< m_variable_name_map[ptv->get_tensor().get_name()] << ", "
<< ptv->get_tensor().size() << ");\n";
break;
}
}
}
if (!parameter_as_output && !contains(aliases, output_index))
{
if (contains(constants, tv.get()))
{
writer << "memcpy(outputs[" << output_index << "], "
<< tv->get_tensor().get_name() << ", " << tv->get_tensor().size()
<< ");\n";
}
else
{
string type = et.c_type_string();
stringstream ss; stringstream ss;
ss << "((" << type << "*)(outputs[" << output_index << "]))"; ss << "((" << type << "*)(outputs[" << i << "]))";
m_variable_name_map[tv->get_tensor().get_name()] = ss.str(); m_variable_name_map[tv->get_tensor().get_name()] = ss.str();
} }
}
output_index++;
}
for (shared_ptr<Node> node : current_function->get_ordered_ops()) for (shared_ptr<Node> node : current_function->get_ordered_ops())
{ {
...@@ -758,7 +731,6 @@ using namespace ngraph::runtime; ...@@ -758,7 +731,6 @@ using namespace ngraph::runtime;
// Emit operation epilogue // Emit operation epilogue
if (!node->is_parameter() && !node->is_constant()) if (!node->is_parameter() && !node->is_constant())
{ {
handle_output_alias(writer, *node, output_alias_map);
if (m_emit_timing) if (m_emit_timing)
{ {
emit_debug_function_exit(writer, node.get(), in, out); emit_debug_function_exit(writer, node.get(), in, out);
...@@ -895,35 +867,6 @@ using namespace ngraph::runtime; ...@@ -895,35 +867,6 @@ using namespace ngraph::runtime;
} }
} }
void runtime::cpu::CPU_ExternalFunction::handle_output_alias(
codegen::CodeWriter& writer,
const Node& node,
const unordered_map<descriptor::TensorView*, vector<size_t>>& output_alias_map)
{
for (const descriptor::Output& output : node.get_outputs())
{
shared_ptr<descriptor::TensorView> otv = output.get_tensor_view();
auto it = output_alias_map.find(otv.get());
if (it != output_alias_map.end())
{
const vector<size_t>& outputs = it->second;
if (outputs.size() > 1)
{
writer << "{ // handle output alias for previous op\n";
writer.indent++;
for (size_t i = 1; i < outputs.size(); i++)
{
writer << "memcpy(static_cast<void*>(outputs[" << outputs[i]
<< "]), static_cast<void*>(outputs[" << outputs[0] << "]), "
<< otv->get_tensor().size() << ");\n";
}
writer.indent--;
writer << "}\n";
}
}
}
}
shared_ptr<ngraph::runtime::CallFrame> runtime::cpu::CPU_ExternalFunction::make_call_frame() shared_ptr<ngraph::runtime::CallFrame> runtime::cpu::CPU_ExternalFunction::make_call_frame()
{ {
if (!m_is_compiled) if (!m_is_compiled)
......
...@@ -113,6 +113,7 @@ namespace ngraph ...@@ -113,6 +113,7 @@ namespace ngraph
size_t build_relu_forward(const mkldnn::memory::desc& input_desc, size_t build_relu_forward(const mkldnn::memory::desc& input_desc,
const mkldnn::memory::desc& result_desc); const mkldnn::memory::desc& result_desc);
size_t build_elementwise_add( size_t build_elementwise_add(
const mkldnn::memory::desc& input0_data_desc, const mkldnn::memory::desc& input0_data_desc,
const mkldnn::memory::desc& input1_data_desc, const mkldnn::memory::desc& input1_data_desc,
......
...@@ -21,13 +21,14 @@ ...@@ -21,13 +21,14 @@
std::shared_ptr<ngraph::Node> std::shared_ptr<ngraph::Node>
ngraph::op::MatmulBias::copy_with_new_args(const NodeVector& new_args) const ngraph::op::MatmulBias::copy_with_new_args(const NodeVector& new_args) const
{ {
if (new_args.size() != 2) if (new_args.size() != 2 && new_args.size() != 3)
{ {
throw ngraph_error("Incorrect number of new arguments"); throw ngraph_error("Incorrect number of new arguments");
} }
return std::make_shared<MatmulBias>(new_args.at(0), return std::make_shared<MatmulBias>(new_args.at(0),
new_args.at(1), new_args.at(1),
new_args.at(1), new_args.size() == 3 ? new_args.at(2) : nullptr,
m_shape_w, m_shape_w,
m_shape_x, m_shape_x,
m_transpose_w, m_transpose_w,
...@@ -41,7 +42,9 @@ ngraph::op::MatmulBias::MatmulBias(std::shared_ptr<ngraph::Node> W, ...@@ -41,7 +42,9 @@ ngraph::op::MatmulBias::MatmulBias(std::shared_ptr<ngraph::Node> W,
Shape shape_x, Shape shape_x,
bool transpose_w, bool transpose_w,
bool transpose_x) bool transpose_x)
: RequiresTensorViewArgs("MatMulBias", {W, x, b}) : RequiresTensorViewArgs("MatMulBias",
b == nullptr ? std::vector<std::shared_ptr<Node>>{W, x}
: std::vector<std::shared_ptr<Node>>{W, x, b})
, m_shape_w(shape_w) , m_shape_w(shape_w)
, m_shape_x(shape_x) , m_shape_x(shape_x)
, m_transpose_w(transpose_w) , m_transpose_w(transpose_w)
...@@ -74,8 +77,12 @@ ngraph::op::MatmulBias::MatmulBias(std::shared_ptr<ngraph::Node> W, ...@@ -74,8 +77,12 @@ ngraph::op::MatmulBias::MatmulBias(std::shared_ptr<ngraph::Node> W,
} }
Shape dot_shape{shape_w.at(1 - dot_dimension_w), shape_x.at(1 - dot_dimension_x)}; Shape dot_shape{shape_w.at(1 - dot_dimension_w), shape_x.at(1 - dot_dimension_x)};
NGRAPH_DEBUG << "dot_shape shape = " << vector_to_string(dot_shape) NGRAPH_DEBUG << "dot_shape shape = " << vector_to_string(dot_shape);
<< " , b shape = " << vector_to_string(b->get_shape());
if (b)
{
NGRAPH_DEBUG << "b shape = " << vector_to_string(b->get_shape());
}
add_output(W->get_element_type(), dot_shape); add_output(W->get_element_type(), dot_shape);
} }
This diff is collapsed.
...@@ -38,13 +38,19 @@ public: ...@@ -38,13 +38,19 @@ public:
CPUFusion() CPUFusion()
: GraphRewrite() : GraphRewrite()
{ {
construct_gemm_pattern(); construct_matmul_pattern();
construct_matmulbias_pattern();
construct_fprop_bn(); construct_fprop_bn();
construct_zero_padded_reshaped_conv();
construct_zero_padded_conv();
construct_conv_bias(); construct_conv_bias();
} }
private: private:
void construct_gemm_pattern(); void construct_matmul_pattern();
void construct_matmulbias_pattern();
void construct_fprop_bn(); void construct_fprop_bn();
void construct_conv_bias(); void construct_zero_padded_reshaped_conv();
void construct_zero_padded_conv();
construct_conv_bias();
}; };
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
#include "ngraph/ops/convolution.hpp" #include "ngraph/ops/convolution.hpp"
#include "ngraph/ops/op.hpp" #include "ngraph/ops/op.hpp"
#include "ngraph/ops/relu.hpp" #include "ngraph/ops/relu.hpp"
#include "ngraph/ops/result.hpp"
#include "ngraph/runtime/cpu/cpu_layout_descriptor.hpp" #include "ngraph/runtime/cpu/cpu_layout_descriptor.hpp"
#include "ngraph/runtime/cpu/cpu_op_annotations.hpp" #include "ngraph/runtime/cpu/cpu_op_annotations.hpp"
#include "ngraph/runtime/cpu/mkldnn_utils.hpp" #include "ngraph/runtime/cpu/mkldnn_utils.hpp"
...@@ -629,6 +630,16 @@ namespace ngraph ...@@ -629,6 +630,16 @@ namespace ngraph
} }
} }
template <>
void CPULayout::LAYOUT_DECL(ngraph::op::Result)
{
auto input_layout =
runtime::cpu::mkldnn_utils::get_input_mkldnn_format(node.get(), 0);
vector<memory::format> prim_output_formats;
prim_output_formats.push_back(input_layout);
set_output_layouts(node, prim_output_formats);
}
template <> template <>
void CPULayout::LAYOUT_DECL(ngraph::op::Relu) void CPULayout::LAYOUT_DECL(ngraph::op::Relu)
{ {
...@@ -708,6 +719,7 @@ static const runtime::cpu::pass::LayoutOpMap s_dispatcher{ ...@@ -708,6 +719,7 @@ static const runtime::cpu::pass::LayoutOpMap s_dispatcher{
{TI(ngraph::op::AvgPoolBackprop), {TI(ngraph::op::AvgPoolBackprop),
&runtime::cpu::pass::CPULayout::layout<ngraph::op::AvgPoolBackprop>}, &runtime::cpu::pass::CPULayout::layout<ngraph::op::AvgPoolBackprop>},
{TI(ngraph::op::Relu), &runtime::cpu::pass::CPULayout::layout<ngraph::op::Relu>}, {TI(ngraph::op::Relu), &runtime::cpu::pass::CPULayout::layout<ngraph::op::Relu>},
{TI(ngraph::op::Result), &runtime::cpu::pass::CPULayout::layout<ngraph::op::Result>},
{TI(ngraph::op::ReluBackprop), {TI(ngraph::op::ReluBackprop),
&runtime::cpu::pass::CPULayout::layout<ngraph::op::ReluBackprop>}, &runtime::cpu::pass::CPULayout::layout<ngraph::op::ReluBackprop>},
}; };
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <cstdlib> #include <cstdlib>
#include <iomanip> #include <iomanip>
#include "ngraph/ops/result.hpp"
#include "ngraph/runtime/host_tensor_view.hpp" #include "ngraph/runtime/host_tensor_view.hpp"
#include "ngraph/runtime/interpreter/int_call_frame.hpp" #include "ngraph/runtime/interpreter/int_call_frame.hpp"
...@@ -52,32 +53,17 @@ void runtime::interpreter::INT_CallFrame::call( ...@@ -52,32 +53,17 @@ void runtime::interpreter::INT_CallFrame::call(
tensor_map.insert({tv, input_tvs[arg_index++]}); tensor_map.insert({tv, input_tvs[arg_index++]});
} }
} }
std::vector<size_t> aliased_outputs;
for (size_t i = 0; i < output_tvs.size(); i++) for (size_t i = 0; i < function->get_output_size(); i++)
{
shared_ptr<Node> op = function->get_output_op(i);
descriptor::TensorView* tv = op->get_output_tensor_view(0).get();
string name = tv->get_tensor().get_name();
if (contains_key(tensor_map, tv))
{ {
if (op->description() == "Parameter") auto output_op = function->get_output_op(i);
if (!std::dynamic_pointer_cast<op::Result>(output_op))
{ {
// Here we handle the special case where an output is just a copy of an input throw ngraph_error("One of function's outputs isn't op::Result");
memcpy(output_tvs[i]->get_data_ptr(),
tensor_map.at(tv)->get_data_ptr(),
tv->get_tensor().size());
} }
else descriptor::TensorView* tv = function->get_output_op(i)->get_output_tensor_view(0).get();
{
// This is a computed value returned more than once and will need to be copied at the end
aliased_outputs.push_back(i);
}
}
else
{
tensor_map.insert({tv, output_tvs[i]}); tensor_map.insert({tv, output_tvs[i]});
} }
}
// Invoke computation // Invoke computation
for (shared_ptr<Node> op : function->get_ordered_ops()) for (shared_ptr<Node> op : function->get_ordered_ops())
...@@ -163,29 +149,6 @@ void runtime::interpreter::INT_CallFrame::call( ...@@ -163,29 +149,6 @@ void runtime::interpreter::INT_CallFrame::call(
} }
} }
} }
for (size_t i : aliased_outputs)
{
shared_ptr<Node> op = function->get_output_op(i);
size_t first_output;
for (first_output = 0; first_output <= i; ++first_output)
{
if (function->get_output_op(first_output) == op)
{
break;
}
}
if (first_output == i)
{
throw ngraph_error("Internal error: duplicate output missing");
}
descriptor::TensorView* tv = op->get_output_tensor_view(0).get();
string name = tv->get_tensor().get_name();
// Here we handle the special case where an output is just a copy of an input
memcpy(output_tvs[i]->get_data_ptr(),
output_tvs[first_output]->get_data_ptr(),
tv->get_tensor().size());
}
} }
void runtime::interpreter::INT_CallFrame::generate_calls( void runtime::interpreter::INT_CallFrame::generate_calls(
......
...@@ -39,6 +39,7 @@ ...@@ -39,6 +39,7 @@
#include "ngraph/ops/reduce_window.hpp" #include "ngraph/ops/reduce_window.hpp"
#include "ngraph/ops/replace_slice.hpp" #include "ngraph/ops/replace_slice.hpp"
#include "ngraph/ops/reshape.hpp" #include "ngraph/ops/reshape.hpp"
#include "ngraph/ops/result.hpp"
#include "ngraph/ops/reverse.hpp" #include "ngraph/ops/reverse.hpp"
#include "ngraph/ops/select_and_scatter.hpp" #include "ngraph/ops/select_and_scatter.hpp"
#include "ngraph/ops/slice.hpp" #include "ngraph/ops/slice.hpp"
...@@ -89,6 +90,7 @@ ...@@ -89,6 +90,7 @@
#include "ngraph/runtime/kernel/relu.hpp" #include "ngraph/runtime/kernel/relu.hpp"
#include "ngraph/runtime/kernel/replace_slice.hpp" #include "ngraph/runtime/kernel/replace_slice.hpp"
#include "ngraph/runtime/kernel/reshape.hpp" #include "ngraph/runtime/kernel/reshape.hpp"
#include "ngraph/runtime/kernel/result.hpp"
#include "ngraph/runtime/kernel/reverse.hpp" #include "ngraph/runtime/kernel/reverse.hpp"
#include "ngraph/runtime/kernel/select.hpp" #include "ngraph/runtime/kernel/select.hpp"
#include "ngraph/runtime/kernel/select_and_scatter.hpp" #include "ngraph/runtime/kernel/select_and_scatter.hpp"
...@@ -720,6 +722,13 @@ private: ...@@ -720,6 +722,13 @@ private:
reshape->get_input_order(), reshape->get_input_order(),
out[0]->get_shape()); out[0]->get_shape());
} }
else if (node_op == "Result")
{
ngraph::op::Result* res = dynamic_cast<ngraph::op::Result*>(&node);
kernel::result(reinterpret_cast<T*>(args[0]->get_data_ptr()),
reinterpret_cast<T*>(out[0]->get_data_ptr()),
shape_size(res->get_shape()));
}
else if (node_op == "Reverse") else if (node_op == "Reverse")
{ {
ngraph::op::Reverse* reverse = dynamic_cast<ngraph::op::Reverse*>(&node); ngraph::op::Reverse* reverse = dynamic_cast<ngraph::op::Reverse*>(&node);
......
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include <algorithm>
#include <cmath>
#include <numeric>
#include <vector>
#include "ngraph/shape.hpp"
namespace ngraph
{
namespace runtime
{
namespace kernel
{
template <typename T>
void result(T* arg, T* out, size_t count)
{
memcpy(out, arg, sizeof(T) * count);
}
}
}
}
...@@ -64,6 +64,7 @@ ...@@ -64,6 +64,7 @@
#include "ngraph/ops/remainder.hpp" #include "ngraph/ops/remainder.hpp"
#include "ngraph/ops/replace_slice.hpp" #include "ngraph/ops/replace_slice.hpp"
#include "ngraph/ops/reshape.hpp" #include "ngraph/ops/reshape.hpp"
#include "ngraph/ops/result.hpp"
#include "ngraph/ops/reverse.hpp" #include "ngraph/ops/reverse.hpp"
#include "ngraph/ops/select.hpp" #include "ngraph/ops/select.hpp"
#include "ngraph/ops/select_and_scatter.hpp" #include "ngraph/ops/select_and_scatter.hpp"
...@@ -667,6 +668,10 @@ static shared_ptr<ngraph::Function> ...@@ -667,6 +668,10 @@ static shared_ptr<ngraph::Function>
auto output_shape = node_js.at("output_shape").get<vector<size_t>>(); auto output_shape = node_js.at("output_shape").get<vector<size_t>>();
node = make_shared<op::Reshape>(args[0], input_order, output_shape); node = make_shared<op::Reshape>(args[0], input_order, output_shape);
} }
else if (node_op == "Result")
{
node = make_shared<op::Result>(args[0]);
}
else if (node_op == "Reverse") else if (node_op == "Reverse")
{ {
auto reversed_axes = node_js.at("reversed_axes").get<set<size_t>>(); auto reversed_axes = node_js.at("reversed_axes").get<set<size_t>>();
...@@ -1061,6 +1066,9 @@ static json write(const Node& n) ...@@ -1061,6 +1066,9 @@ static json write(const Node& n)
node["input_order"] = tmp->get_input_order(); node["input_order"] = tmp->get_input_order();
node["output_shape"] = tmp->get_output_shape(); node["output_shape"] = tmp->get_output_shape();
} }
else if (node_op == "Result")
{
}
else if (node_op == "Reverse") else if (node_op == "Reverse")
{ {
auto tmp = dynamic_cast<const op::Reverse*>(&n); auto tmp = dynamic_cast<const op::Reverse*>(&n);
......
...@@ -25,9 +25,12 @@ ...@@ -25,9 +25,12 @@
#include "ngraph/graph_util.hpp" #include "ngraph/graph_util.hpp"
#include "ngraph/log.hpp" #include "ngraph/log.hpp"
#include "ngraph/node.hpp" #include "ngraph/node.hpp"
#include "ngraph/ops/result_vector.hpp"
#include "ngraph/runtime/backend.hpp" #include "ngraph/runtime/backend.hpp"
#include "ngraph/util.hpp" #include "ngraph/util.hpp"
#include <iostream>
using namespace std; using namespace std;
std::string ngraph::to_cplusplus_sourcecode_literal(bool val) std::string ngraph::to_cplusplus_sourcecode_literal(bool val)
...@@ -239,10 +242,21 @@ ngraph::FpropCache ngraph::cache_fprop(std::shared_ptr<ngraph::Function> fprop, ...@@ -239,10 +242,21 @@ ngraph::FpropCache ngraph::cache_fprop(std::shared_ptr<ngraph::Function> fprop,
} }
// create the new outputs for fprop and the new fprop function // create the new outputs for fprop and the new fprop function
NodeVector fprop_outputs{fprop->get_results()}; ResultVector fprop_outputs;
fprop_outputs.insert(fprop_outputs.end(),
fprop_cache.fprop_output_nodes.begin(), for (auto fpr : fprop->get_results())
fprop_cache.fprop_output_nodes.end()); {
fprop_outputs.push_back(fpr);
}
for (auto fpir : fprop_cache.fprop_output_nodes)
{
if (std::dynamic_pointer_cast<op::Result>(fpir))
{
throw ngraph_error("Expected op::Result in fprop->get_results()");
}
fprop_outputs.push_back(std::make_shared<op::Result>(fpir));
}
fprop_cache.fprop = std::make_shared<Function>(fprop_outputs, fprop->get_parameters()); fprop_cache.fprop = std::make_shared<Function>(fprop_outputs, fprop->get_parameters());
...@@ -251,10 +265,15 @@ ngraph::FpropCache ngraph::cache_fprop(std::shared_ptr<ngraph::Function> fprop, ...@@ -251,10 +265,15 @@ ngraph::FpropCache ngraph::cache_fprop(std::shared_ptr<ngraph::Function> fprop,
ngraph::clone_nodes(bprop->get_ops(), node_param_map); ngraph::clone_nodes(bprop->get_ops(), node_param_map);
// get cloned bprop results // get cloned bprop results
NodeVector cloned_results; ResultVector cloned_results;
for (auto node : bprop->get_results()) for (auto node : bprop->get_results())
{ {
cloned_results.push_back(node_param_map.get(node)); auto result = std::dynamic_pointer_cast<op::Result>(node_param_map.get(node));
if (!result)
{
throw ngraph_error("Expected op::Result values for op::Result keys in node_param_map");
}
cloned_results.push_back(result);
} }
// get clone bprop parameters // get clone bprop parameters
......
...@@ -40,7 +40,7 @@ TEST(build_graph, build_simple) ...@@ -40,7 +40,7 @@ TEST(build_graph, build_simple)
auto cluster_0 = make_shared<Function>(dot, op::ParameterVector{arg0, arg1, arg2, arg3}); auto cluster_0 = make_shared<Function>(dot, op::ParameterVector{arg0, arg1, arg2, arg3});
ASSERT_EQ(cluster_0->get_output_op(0), dot); ASSERT_EQ(cluster_0->get_output_op(0)->get_input_op(0), dot);
} }
// Check node comparisons // Check node comparisons
......
...@@ -134,6 +134,42 @@ TEST(cpu_fusion, gemm_cpu) ...@@ -134,6 +134,42 @@ TEST(cpu_fusion, gemm_cpu)
ASSERT_TRUE(read_vector<float>(result) == expected); ASSERT_TRUE(read_vector<float>(result) == expected);
} }
TEST(cpu_fusion, gemm_cpu_no_bias)
{
auto shapeA = Shape{3, 2};
auto shapeB = Shape{2, 3};
auto shapeC = Shape{2, 2};
auto A = make_shared<op::Parameter>(element::f32, shapeA);
auto B = make_shared<op::Parameter>(element::f32, shapeB);
auto reshape_w = make_shared<op::Reshape>(A, AxisVector{1, 0}, Shape{2, 3});
auto reshape_x = make_shared<op::Reshape>(B, AxisVector{1, 0}, Shape{3, 2});
auto cg =
make_shared<op::MatmulBias>(A, B, nullptr, A->get_shape(), B->get_shape(), true, true);
auto f = make_shared<Function>(cg, op::ParameterVector{A, B});
auto manager = runtime::Manager::get("CPU");
auto external = manager->compile(f);
auto backend = manager->allocate_backend();
auto cf = backend->make_call_frame(external);
shared_ptr<runtime::TensorView> a = backend->make_primary_tensor_view(element::f32, shapeA);
shared_ptr<runtime::TensorView> b = backend->make_primary_tensor_view(element::f32, shapeB);
shared_ptr<runtime::TensorView> result =
backend->make_primary_tensor_view(element::f32, shapeC);
vector<float> dataA{1.0f, 4.0f, 1.0f, 4.0f, 1.0f, 4.0f};
vector<float> dataB{3.0f, 3.0f, 3.0f, 9.0f, 9.0f, 9.0f};
copy_data(a, dataA);
copy_data(b, dataB);
cf->call({a, b}, {result});
vector<float> expected{9, 27, 36, 108};
ASSERT_TRUE(read_vector<float>(result) == expected);
}
TEST(cpu_fusion, cpu_fusion_pass_basic) TEST(cpu_fusion, cpu_fusion_pass_basic)
{ {
Shape shape{}; Shape shape{};
...@@ -155,6 +191,50 @@ TEST(cpu_fusion, cpu_fusion_pass_basic) ...@@ -155,6 +191,50 @@ TEST(cpu_fusion, cpu_fusion_pass_basic)
ASSERT_NE(std::dynamic_pointer_cast<op::MatmulBias>(graph->get_input_op(0)), nullptr); ASSERT_NE(std::dynamic_pointer_cast<op::MatmulBias>(graph->get_input_op(0)), nullptr);
} }
TEST(cpu_fusion, cpu_fusion_pass_matmul_bias)
{
Shape shape_w{2, 4};
Shape shape_x{4, 1};
Shape shape_b{1};
auto W = make_shared<op::Parameter>(element::f32, shape_w);
auto x = make_shared<op::Parameter>(element::f32, shape_x);
auto b = make_shared<op::Parameter>(element::f32, shape_b);
auto mmb = std::make_shared<op::MatmulBias>(
W, x, nullptr, W->get_shape(), x->get_shape(), false, false);
auto broadcast = std::make_shared<op::Broadcast>(b, mmb->get_shape(), AxisSet{0});
auto add = mmb + broadcast;
auto graph = make_shared<op::Abs>(add);
pass::Manager pass_manager;
pass_manager.register_pass<runtime::cpu::pass::CPUFusion>();
auto func = make_shared<Function>(graph, op::ParameterVector{W, x, b});
pass_manager.run_passes(func);
auto gmm = graph->get_input_op(0);
ASSERT_TRUE(std::dynamic_pointer_cast<op::MatmulBias>(gmm));
ASSERT_EQ(gmm->get_input_op(2), broadcast);
}
TEST(cpu_fusion, cpu_fusion_pass_matmul_no_bias)
{
Shape shape_w{4, 2};
Shape shape_x{1, 4};
auto W = make_shared<op::Parameter>(element::f32, shape_w);
auto x = make_shared<op::Parameter>(element::f32, shape_x);
auto reshape_w = std::make_shared<op::Reshape>(W, AxisVector{1, 0}, Shape{2, 4});
auto reshape_x = std::make_shared<op::Reshape>(x, AxisVector{1, 0}, Shape{4, 1});
auto re_dot = make_shared<op::Dot>(reshape_w, reshape_x);
auto graph = make_shared<op::Abs>(re_dot);
pass::Manager pass_manager;
pass_manager.register_pass<runtime::cpu::pass::CPUFusion>();
auto func = make_shared<Function>(graph, op::ParameterVector{W, x});
pass_manager.run_passes(func);
size_t mmb = count_ops_of_type<op::MatmulBias>(func);
ASSERT_EQ(mmb, 1);
}
TEST(cpu_fusion, gemm_mlp) TEST(cpu_fusion, gemm_mlp)
{ {
const string json_path = file_util::path_join(SERIALIZED_ZOO, "mxnet/mnist_mlp_forward.json"); const string json_path = file_util::path_join(SERIALIZED_ZOO, "mxnet/mnist_mlp_forward.json");
...@@ -164,8 +244,8 @@ TEST(cpu_fusion, gemm_mlp) ...@@ -164,8 +244,8 @@ TEST(cpu_fusion, gemm_mlp)
pass::Manager pass_manager; pass::Manager pass_manager;
pass_manager.register_pass<runtime::cpu::pass::CPUFusion>(); pass_manager.register_pass<runtime::cpu::pass::CPUFusion>();
pass_manager.run_passes(func); pass_manager.run_passes(func);
size_t ccg = count_ops_of_type<op::MatmulBias>(func); size_t mmb = count_ops_of_type<op::MatmulBias>(func);
ASSERT_EQ(ccg, 3); ASSERT_EQ(mmb, 3);
} }
//TODO: Move this test to backend_test.in.cpp once we have the INTERPRETER //TODO: Move this test to backend_test.in.cpp once we have the INTERPRETER
...@@ -403,6 +483,98 @@ TEST(cpu_fusion, bn_bprop_n4c3h2w2) ...@@ -403,6 +483,98 @@ TEST(cpu_fusion, bn_bprop_n4c3h2w2)
vector<float> expected_dbeta{320.f, 320.f, 320.f}; vector<float> expected_dbeta{320.f, 320.f, 320.f};
ASSERT_TRUE(ngraph::test::all_close(read_vector<float>(_dbeta), expected_dbeta, 1e-4f, 1e-8f)); ASSERT_TRUE(ngraph::test::all_close(read_vector<float>(_dbeta), expected_dbeta, 1e-4f, 1e-8f));
} }
TEST(cpu_fusion, zero_padded_reshaped_conv)
{
auto X = make_shared<op::Parameter>(element::f32, Shape{1, 2, 2, 1});
auto F = make_shared<op::Parameter>(element::f32, Shape{1, 1, 1, 1});
auto pad_value = op::Constant::create<float>(element::f32, Shape{}, std::vector<float>{0.0f});
auto pad =
make_shared<op::Pad>(X, pad_value, Shape{0, 1, 0, 0}, Shape{0, 0, 1, 0}, Shape{0, 0, 0, 0});
auto reshape = make_shared<op::Reshape>(pad, AxisVector{0, 3, 1, 2}, Shape{1, 1, 3, 3});
auto conv = make_shared<op::Convolution>(reshape,
F,
Strides{1, 1},
Strides{1, 1},
CoordinateDiff{0, 0},
CoordinateDiff{0, 0},
Strides{1, 1});
auto func = make_shared<Function>(conv, op::ParameterVector{X, F});
ASSERT_EQ(count_ops_of_type<op::Pad>(func), 1);
auto manager = runtime::Manager::get("CPU");
auto external = manager->compile(func);
auto backend = manager->allocate_backend();
auto cf = backend->make_call_frame(external);
ASSERT_EQ(count_ops_of_type<op::Pad>(func), 0);
}
TEST(cpu_fusion, zero_padded_conv)
{
auto X = make_shared<op::Parameter>(element::f32, Shape{1, 1, 2, 2});
auto F = make_shared<op::Parameter>(element::f32, Shape{1, 1, 1, 1});
auto pad_value = op::Constant::create<float>(element::f32, Shape{}, std::vector<float>{0.0f});
auto pad =
make_shared<op::Pad>(X, pad_value, Shape{0, 0, 0, 1}, Shape{0, 0, 1, 0}, Shape{0, 0, 0, 0});
auto conv = make_shared<op::Convolution>(pad,
F,
Strides{1, 1},
Strides{1, 1},
CoordinateDiff{0, 0},
CoordinateDiff{0, 0},
Strides{1, 1});
auto func = make_shared<Function>(conv, op::ParameterVector{X, F});
ASSERT_EQ(count_ops_of_type<op::Pad>(func), 1);
auto manager = runtime::Manager::get("CPU");
auto external = manager->compile(func);
auto backend = manager->allocate_backend();
auto cf = backend->make_call_frame(external);
ASSERT_EQ(count_ops_of_type<op::Pad>(func), 0);
}
TEST(cpu_fusion, non_zero_padded_conv)
{
auto X = make_shared<op::Parameter>(element::f32, Shape{1, 1, 2, 2});
auto F = make_shared<op::Parameter>(element::f32, Shape{1, 1, 1, 1});
auto pad_value = op::Constant::create<float>(element::f32, Shape{}, std::vector<float>{1.0f});
auto pad =
make_shared<op::Pad>(X, pad_value, Shape{0, 0, 0, 1}, Shape{0, 0, 1, 0}, Shape{0, 0, 0, 0});
auto conv = make_shared<op::Convolution>(pad,
F,
Strides{1, 1},
Strides{1, 1},
CoordinateDiff{0, 0},
CoordinateDiff{0, 0},
Strides{1, 1});
auto func = make_shared<Function>(conv, op::ParameterVector{X, F});
ASSERT_EQ(count_ops_of_type<op::Pad>(func), 1);
auto manager = runtime::Manager::get("CPU");
auto external = manager->compile(func);
auto backend = manager->allocate_backend();
auto cf = backend->make_call_frame(external);
ASSERT_EQ(count_ops_of_type<op::Pad>(func), 1);
}
TEST(cpu_fusion, fuse_conv_bias) TEST(cpu_fusion, fuse_conv_bias)
{ {
pass::Manager pass_manager; pass::Manager pass_manager;
......
...@@ -218,11 +218,6 @@ public: ...@@ -218,11 +218,6 @@ public:
{ {
map_parameter_to_index[f->get_parameters().at(i)] = i; map_parameter_to_index[f->get_parameters().at(i)] = i;
} }
unordered_map<shared_ptr<Node>, size_t> map_result_to_index;
for (size_t i = 0; i < f->get_results().size(); ++i)
{
map_result_to_index[f->get_results().at(i)] = i;
}
// Parameter's source is either itself, or the output node of the upstream function // Parameter's source is either itself, or the output node of the upstream function
unordered_map<shared_ptr<op::Parameter>, shared_ptr<Node>> map_parameter_to_source_node; unordered_map<shared_ptr<op::Parameter>, shared_ptr<Node>> map_parameter_to_source_node;
...@@ -231,6 +226,13 @@ public: ...@@ -231,6 +226,13 @@ public:
vector<shared_ptr<Function>> funcs = vector<shared_ptr<Function>> funcs =
split_function_by_placement(f, map_parameter_to_source_node); split_function_by_placement(f, map_parameter_to_source_node);
auto main_func = funcs.back();
unordered_map<shared_ptr<Node>, size_t> map_result_to_index;
for (size_t i = 0; i < main_func->get_results().size(); ++i)
{
map_result_to_index[main_func->get_results().at(i)] = i;
}
// Make call frames // Make call frames
vector<shared_ptr<runtime::CallFrame>> call_frames; vector<shared_ptr<runtime::CallFrame>> call_frames;
for (auto func : funcs) for (auto func : funcs)
......
...@@ -47,14 +47,22 @@ TEST(liveness, constant) ...@@ -47,14 +47,22 @@ TEST(liveness, constant)
auto tmp = f->get_ordered_ops(); auto tmp = f->get_ordered_ops();
vector<shared_ptr<Node>> sorted{tmp.begin(), tmp.end()}; vector<shared_ptr<Node>> sorted{tmp.begin(), tmp.end()};
ASSERT_EQ(2, sorted.size()); ASSERT_EQ(3, sorted.size());
EXPECT_EQ(0, sorted[0]->liveness_live_list.size()); EXPECT_EQ(0, sorted[0]->liveness_live_list.size());
EXPECT_EQ(0, sorted[0]->liveness_new_list.size()); EXPECT_EQ(0, sorted[0]->liveness_new_list.size());
EXPECT_EQ(0, sorted[0]->liveness_free_list.size()); EXPECT_EQ(0, sorted[0]->liveness_free_list.size());
EXPECT_EQ(0, sorted[1]->liveness_live_list.size()); //op::Negative is live on output to op::Result
EXPECT_EQ(0, sorted[1]->liveness_new_list.size()); EXPECT_EQ(1, sorted[1]->liveness_live_list.size());
//op::Negative is new
EXPECT_EQ(1, sorted[1]->liveness_new_list.size());
EXPECT_EQ(0, sorted[1]->liveness_free_list.size()); EXPECT_EQ(0, sorted[1]->liveness_free_list.size());
//op::Negative is live on input to op::Result
EXPECT_EQ(1, sorted[2]->liveness_live_list.size());
EXPECT_EQ(0, sorted[2]->liveness_new_list.size());
//op::Negative is freed
EXPECT_EQ(1, sorted[2]->liveness_free_list.size());
} }
TEST(liveness, liveness) TEST(liveness, liveness)
......
...@@ -234,5 +234,5 @@ TEST(memory_layout, constant) ...@@ -234,5 +234,5 @@ TEST(memory_layout, constant)
pass_manager.run_passes(f); pass_manager.run_passes(f);
auto sorted = f->get_ordered_ops(); auto sorted = f->get_ordered_ops();
size_t temporary_pool_size = f->get_temporary_pool_size(); size_t temporary_pool_size = f->get_temporary_pool_size();
EXPECT_EQ(0, temporary_pool_size); EXPECT_EQ(4, temporary_pool_size);
} }
...@@ -42,6 +42,7 @@ ...@@ -42,6 +42,7 @@
#include "ngraph/runtime/cpu/pass/cpu_fusion.hpp" #include "ngraph/runtime/cpu/pass/cpu_fusion.hpp"
#include "ngraph/serializer.hpp" #include "ngraph/serializer.hpp"
#include "util/matcher.hpp" #include "util/matcher.hpp"
#include "util/test_tools.hpp"
using namespace ngraph; using namespace ngraph;
using namespace std; using namespace std;
...@@ -89,9 +90,9 @@ bool sum_predicate(std::shared_ptr<Node> gn) ...@@ -89,9 +90,9 @@ bool sum_predicate(std::shared_ptr<Node> gn)
return false; return false;
} }
NGRAPH_DEBUG << "looking at function's result " auto result = r->get_functions()[0]->get_result()->get_input_op(0);
<< r->get_functions()[0]->get_result()->get_name(); NGRAPH_DEBUG << "looking at function's result " << result->get_name();
if (auto sum = std::dynamic_pointer_cast<op::Add>(r->get_functions()[0]->get_result())) if (auto sum = std::dynamic_pointer_cast<op::Add>(result))
{ {
auto parm1 = std::dynamic_pointer_cast<op::Parameter>(sum->get_input_op(0)); auto parm1 = std::dynamic_pointer_cast<op::Parameter>(sum->get_input_op(0));
auto parm2 = std::dynamic_pointer_cast<op::Parameter>(sum->get_input_op(1)); auto parm2 = std::dynamic_pointer_cast<op::Parameter>(sum->get_input_op(1));
...@@ -297,7 +298,7 @@ TEST(pattern, graph_rewrite) ...@@ -297,7 +298,7 @@ TEST(pattern, graph_rewrite)
ASSERT_TRUE(graph_b->get_output_inputs(0).empty()); ASSERT_TRUE(graph_b->get_output_inputs(0).empty());
auto expected = ngraph::NodeVector{a, b, a, c, b}; auto expected = ngraph::NodeVector{a, b, a, c, b};
ASSERT_TRUE(f->get_results() == expected); ASSERT_TRUE(count_ops_of_type<op::Add>(f) == 0);
} }
{ {
......
...@@ -82,3 +82,27 @@ TEST(reshape_elimination, bn_bprop_rewrite) ...@@ -82,3 +82,27 @@ TEST(reshape_elimination, bn_bprop_rewrite)
size_t count_after = count_ops_of_type<op::Reshape>(func); size_t count_after = count_ops_of_type<op::Reshape>(func);
ASSERT_TRUE(count_after < count_before); ASSERT_TRUE(count_after < count_before);
} }
TEST(reshape_elimination, dot_transpose_to_dot_w_transpose_args)
{
Shape shape_w{2, 4};
Shape shape_x{4, 1};
auto W = make_shared<op::Parameter>(element::f32, shape_w);
auto x = make_shared<op::Parameter>(element::f32, shape_x);
auto dot = make_shared<op::Dot>(W, x);
auto reshape_dot = std::make_shared<op::Reshape>(dot, AxisVector{1, 0}, Shape{1, 2});
auto graph = make_shared<op::Abs>(reshape_dot);
pass::Manager pass_manager;
pass_manager.register_pass<pass::ReshapeElimination>();
auto func = make_shared<Function>(graph, op::ParameterVector{W, x});
pass_manager.run_passes(func);
auto gdot = graph->get_input_op(0);
ASSERT_TRUE(std::dynamic_pointer_cast<op::Dot>(gdot));
ASSERT_TRUE(std::dynamic_pointer_cast<op::Reshape>(gdot->get_input_op(0)));
ASSERT_TRUE(std::dynamic_pointer_cast<op::Reshape>(gdot->get_input_op(1)));
ASSERT_EQ(gdot->get_input_op(0)->get_input_op(0), x);
ASSERT_EQ(gdot->get_input_op(1)->get_input_op(0), W);
ASSERT_EQ(gdot->get_shape(), (Shape{1, 2}));
}
...@@ -33,12 +33,13 @@ bool validate_list(const list<shared_ptr<Node>>& nodes) ...@@ -33,12 +33,13 @@ bool validate_list(const list<shared_ptr<Node>>& nodes)
auto node_tmp = *it; auto node_tmp = *it;
auto dependencies_tmp = node_tmp->get_input_ops(); auto dependencies_tmp = node_tmp->get_input_ops();
vector<Node*> dependencies; vector<Node*> dependencies;
for (shared_ptr<Node> n : dependencies_tmp) for (shared_ptr<Node> n : dependencies_tmp)
{ {
dependencies.push_back(n.get()); dependencies.push_back(n.get());
} }
auto tmp = it++; auto tmp = it;
for (; tmp != nodes.rend(); tmp++) for (tmp++; tmp != nodes.rend(); tmp++)
{ {
auto dep_tmp = *tmp; auto dep_tmp = *tmp;
auto found = find(dependencies.begin(), dependencies.end(), dep_tmp.get()); auto found = find(dependencies.begin(), dependencies.end(), dep_tmp.get());
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment