Commit 749a1c6b authored by amy.zhuang's avatar amy.zhuang

Modify pass name and type.

parent f97b41c7
...@@ -107,7 +107,7 @@ set(SRC ...@@ -107,7 +107,7 @@ set(SRC
pass/cpu_layout.cpp pass/cpu_layout.cpp
pass/cpu_loop_kernel_fusion.cpp pass/cpu_loop_kernel_fusion.cpp
pass/cpu_mat_fusion.cpp pass/cpu_mat_fusion.cpp
pass/cpu_post_layout_assignment.cpp pass/cpu_memory_optimization.cpp
pass/cpu_post_layout_optimizations.cpp pass/cpu_post_layout_optimizations.cpp
pass/cpu_rnn_fusion.cpp pass/cpu_rnn_fusion.cpp
pass/cpu_workspace_insertion.cpp pass/cpu_workspace_insertion.cpp
......
...@@ -165,7 +165,7 @@ ...@@ -165,7 +165,7 @@
#include "ngraph/runtime/cpu/pass/cpu_horizontal_fusion.hpp" #include "ngraph/runtime/cpu/pass/cpu_horizontal_fusion.hpp"
#include "ngraph/runtime/cpu/pass/cpu_layout.hpp" #include "ngraph/runtime/cpu/pass/cpu_layout.hpp"
#include "ngraph/runtime/cpu/pass/cpu_mat_fusion.hpp" #include "ngraph/runtime/cpu/pass/cpu_mat_fusion.hpp"
#include "ngraph/runtime/cpu/pass/cpu_post_layout_assignment.hpp" #include "ngraph/runtime/cpu/pass/cpu_memory_optimization.hpp"
#include "ngraph/runtime/cpu/pass/cpu_post_layout_optimizations.hpp" #include "ngraph/runtime/cpu/pass/cpu_post_layout_optimizations.hpp"
#include "ngraph/runtime/cpu/pass/cpu_rnn_fusion.hpp" #include "ngraph/runtime/cpu/pass/cpu_rnn_fusion.hpp"
#include "ngraph/runtime/cpu/pass/cpu_workspace_insertion.hpp" #include "ngraph/runtime/cpu/pass/cpu_workspace_insertion.hpp"
...@@ -1057,7 +1057,7 @@ void runtime::cpu::CPU_ExternalFunction::register_common_passes(ngraph::pass::Ma ...@@ -1057,7 +1057,7 @@ void runtime::cpu::CPU_ExternalFunction::register_common_passes(ngraph::pass::Ma
pass_manager.register_pass<runtime::cpu::pass::CPUAssignment>(this); pass_manager.register_pass<runtime::cpu::pass::CPUAssignment>(this);
pass_manager.register_pass<runtime::cpu::pass::CPULayout>(this); pass_manager.register_pass<runtime::cpu::pass::CPULayout>(this);
pass_manager.register_pass<runtime::cpu::pass::CPUPostLayoutOptimizations>(); pass_manager.register_pass<runtime::cpu::pass::CPUPostLayoutOptimizations>();
pass_manager.register_pass<runtime::cpu::pass::CPUPostLayoutAssignment>(this); pass_manager.register_pass<runtime::cpu::pass::CPUMemoryOptimization>();
pass_manager.register_pass<ngraph::pass::GetOutputElementElimination>(); pass_manager.register_pass<ngraph::pass::GetOutputElementElimination>();
pass_manager.get_state().set_visualize_tree_ops_map(runtime::cpu::get_visualize_tree_ops_map()); pass_manager.get_state().set_visualize_tree_ops_map(runtime::cpu::get_visualize_tree_ops_map());
} }
......
//*****************************************************************************
// Copyright 2017-2018 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "ngraph/runtime/cpu/pass/cpu_memory_optimization.hpp"
#include "ngraph/descriptor/output.hpp"
#include "ngraph/op/concat.hpp"
#include "ngraph/op/constant.hpp"
#include "ngraph/runtime/cpu/cpu_op_annotations.hpp"
using namespace ngraph;
bool runtime::cpu::pass::CPUMemoryOptimization::run_on_function(std::shared_ptr<Function> function)
{
for (auto n : function->get_ordered_ops())
{
if (auto concat = std::dynamic_pointer_cast<op::Concat>(n))
{
auto shape = concat->get_input_shape(0);
auto axis = concat->get_concatenation_axis();
auto product = 1;
for (int i = 0; i < axis; i++)
{
product *= shape[i];
}
if (product != 1)
{
NGRAPH_DEBUG << "cpu_post_layout_assignment: The product of Concat's shape "
"before concat axis is not 1, no in place concat";
continue;
}
bool in_place_concat = true;
for (descriptor::Input& input : concat->get_inputs())
{
if (shape_size(input.get_shape()) == 0)
{
NGRAPH_DEBUG << "cpu_post_layout_assignment: 0 length tensor, no in "
"place concat";
in_place_concat = false;
break;
}
const auto& output = input.get_output();
auto arg = output.get_node();
if (std::dynamic_pointer_cast<op::Constant>(arg) ||
std::dynamic_pointer_cast<op::Parameter>(arg))
{
NGRAPH_DEBUG << "cpu_post_layout_assignment: " << arg->get_name()
<< ": constant or parameter, no in place concat";
in_place_concat = false;
break;
}
if (output.get_inputs().size() != 1)
{
// check if we can do in place concat
auto concat_count = 0;
for (auto output_input : output.get_inputs())
{
auto user = output_input->get_node();
if (std::dynamic_pointer_cast<op::Concat>(user))
{
concat_count++;
if (concat_count == 2)
{
NGRAPH_DEBUG << "cpu_post_layout_assignment: multiple "
"concat users, no in place concat";
in_place_concat = false;
break;
}
}
}
if (!in_place_concat)
{
break;
}
std::unordered_set<Node*> visited;
std::deque<Node*> stack;
stack.push_front(arg.get());
while (stack.size() > 0)
{
ngraph::Node* curr = stack.front();
visited.insert(curr);
if (curr->is_output())
{
NGRAPH_DEBUG << "cpu_post_layout_assignment: not post "
"dominated, no in place concat";
in_place_concat = false;
break;
}
else
{
if (auto op = dynamic_cast<op::Op*>(curr))
{
if (auto op_annotations = op->get_op_annotations())
{
for (auto oi_pair : op_annotations->get_in_place_oi_pairs())
{
if (oi_pair.destructive)
{
NGRAPH_DEBUG << "cpu_post_layout_assignment: "
"destructive in place oi, no "
"in place concat";
in_place_concat = false;
break;
}
}
}
}
}
stack.pop_front();
if (curr != concat.get())
{
for (auto next : curr->get_users())
{
if (visited.count(next.get()) == 0)
{
stack.push_front(next.get());
}
}
}
}
if (!in_place_concat)
{
break;
}
}
}
if (in_place_concat)
{
auto op_annotations = concat->get_op_annotations();
if (op_annotations)
{
op_annotations->add_in_place_oi_pair({0, 0, false});
}
else
{
op_annotations = std::make_shared<ngraph::runtime::cpu::CPUOpAnnotations>();
op_annotations->add_in_place_oi_pair({0, 0, false});
concat->set_op_annotations(op_annotations);
}
}
}
}
return false;
}
...@@ -17,12 +17,6 @@ ...@@ -17,12 +17,6 @@
#pragma once #pragma once
#include "ngraph/pass/pass.hpp" #include "ngraph/pass/pass.hpp"
#include "ngraph/runtime/cpu/cpu_external_function.hpp"
#include "ngraph/runtime/cpu/cpu_tensor_view.hpp"
#define ASSIGN_DECL(op_name) \
assign<op_name>(ngraph::runtime::cpu::CPU_ExternalFunction * external_function, \
ngraph::Node * node)
namespace ngraph namespace ngraph
{ {
...@@ -32,34 +26,10 @@ namespace ngraph ...@@ -32,34 +26,10 @@ namespace ngraph
{ {
namespace pass namespace pass
{ {
using PostLayoutAssignFunction = class CPUMemoryOptimization : public ngraph::pass::FunctionPass
std::function<void(CPU_ExternalFunction*, ngraph::Node*)>;
using PostLayoutAssignOpMap =
std::unordered_map<std::type_index, PostLayoutAssignFunction>;
class CPUPostLayoutAssignment : public ngraph::pass::CallGraphPass
{ {
public: public:
CPUPostLayoutAssignment(CPU_ExternalFunction* external_function) bool run_on_function(std::shared_ptr<ngraph::Function> function) override;
: m_external_function(external_function)
{
}
virtual bool
run_on_call_graph(const std::list<std::shared_ptr<Node>>& nodes) override;
template <typename OP>
static void
assign(ngraph::runtime::cpu::CPU_ExternalFunction* external_function,
ngraph::Node* node)
{
throw std::runtime_error("Unimplemented op '" + node->description() +
"' in CPU post layout assignment");
}
private:
CPU_ExternalFunction* m_external_function;
}; };
} }
} }
......
//*****************************************************************************
// Copyright 2017-2018 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "ngraph/runtime/cpu/pass/cpu_post_layout_assignment.hpp"
#include <typeindex>
#include <typeinfo>
#include "ngraph/descriptor/output.hpp"
#include "ngraph/op/concat.hpp"
#include "ngraph/runtime/cpu/cpu_op_annotations.hpp"
using namespace std;
using namespace ngraph;
namespace ngraph
{
namespace runtime
{
namespace cpu
{
namespace pass
{
template <>
void CPUPostLayoutAssignment::ASSIGN_DECL(ngraph::op::Concat)
{
auto concat = static_cast<op::Concat*>(node);
auto shape = concat->get_input_shape(0);
auto axis = concat->get_concatenation_axis();
auto product = 1;
for (int i = 0; i < axis; i++)
{
product *= shape[i];
}
if (product != 1)
{
NGRAPH_DEBUG << "cpu_post_layout_assignment: The product of Concat's shape "
"before concat axis is not 1, no in place concat";
return;
}
bool in_place_concat = false;
for (descriptor::Input& input : concat->get_inputs())
{
if (shape_size(input.get_shape()) == 0)
{
NGRAPH_DEBUG << "cpu_post_layout_assignment: 0 length tensor, no in "
"place concat";
return;
}
const auto& output = input.get_output();
auto arg = output.get_node();
if (std::dynamic_pointer_cast<op::Constant>(arg) ||
std::dynamic_pointer_cast<op::Parameter>(arg))
{
NGRAPH_DEBUG << "cpu_post_layout_assignment: " << arg->get_name()
<< ": constant or parameter, no in place concat";
return;
}
else if (output.get_inputs().size() != 1)
{
// check if we can do in place concat
auto concat_count = 0;
for (auto output_input : output.get_inputs())
{
auto user = output_input->get_node();
if (std::dynamic_pointer_cast<op::Concat>(user))
{
concat_count++;
if (concat_count == 2)
{
NGRAPH_DEBUG << "cpu_post_layout_assignment: multiple "
"concat users, no in place concat";
return;
}
}
}
std::unordered_set<Node*> visited;
std::deque<Node*> stack;
stack.push_front(arg.get());
while (stack.size() > 0)
{
ngraph::Node* curr = stack.front();
visited.insert(curr);
if (curr->is_output())
{
NGRAPH_DEBUG << "cpu_post_layout_assignment: not post "
"dominated, no in place concat";
return;
}
else
{
if (auto op = dynamic_cast<op::Op*>(curr))
{
if (auto op_annotations = op->get_op_annotations())
{
for (auto oi_pair :
op_annotations->get_in_place_oi_pairs())
{
if (oi_pair.destructive)
{
NGRAPH_DEBUG << "cpu_post_layout_assignment: "
"destructive in place oi, no "
"in place concat";
return;
}
}
}
}
}
stack.pop_front();
if (curr != concat)
{
for (auto next : curr->get_users())
{
if (visited.count(next.get()) == 0)
{
stack.push_front(next.get());
}
}
}
}
in_place_concat = true;
}
else
{
in_place_concat = true;
}
}
if (in_place_concat)
{
auto op_annotations = concat->get_op_annotations();
if (op_annotations)
{
op_annotations->add_in_place_oi_pair({0, 0, false});
}
else
{
op_annotations =
std::make_shared<ngraph::runtime::cpu::CPUOpAnnotations>();
op_annotations->add_in_place_oi_pair({0, 0, false});
concat->set_op_annotations(op_annotations);
}
}
}
}
}
}
}
#define TI(x) type_index(typeid(x))
static const runtime::cpu::pass::PostLayoutAssignOpMap s_dispatcher{
{TI(ngraph::op::Concat),
&runtime::cpu::pass::CPUPostLayoutAssignment::assign<ngraph::op::Concat>},
};
bool runtime::cpu::pass::CPUPostLayoutAssignment::run_on_call_graph(
const std::list<std::shared_ptr<Node>>& nodes)
{
for (const auto& node : nodes)
{
auto& n = *node;
auto handler = s_dispatcher.find(TI(n));
if (handler != s_dispatcher.end())
{
handler->second(m_external_function, node.get());
}
}
return false;
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment