Commit 24b72581 authored by fenglei.tian's avatar fenglei.tian

clang format

parent 8d0768c5
...@@ -346,7 +346,8 @@ using namespace std; ...@@ -346,7 +346,8 @@ using namespace std;
shared_ptr<descriptor::TensorView> tv = node->get_outputs()[0].get_tensor_view(); shared_ptr<descriptor::TensorView> tv = node->get_outputs()[0].get_tensor_view();
auto c_value_strings = c->get_value_strings(); auto c_value_strings = c->get_value_strings();
writer << "static " << tv->get_tensor().get_element_type().c_type_string() << " " writer << "static " << tv->get_tensor().get_element_type().c_type_string() << " "
<< tv->get_tensor().get_name() << "_cpu[" << c_value_strings.size() << "] =\n"; << tv->get_tensor().get_name() << "_cpu[" << c_value_strings.size()
<< "] =\n";
writer << "{\n"; writer << "{\n";
writer.indent++; writer.indent++;
writer << emit_string_array(c_value_strings, 100 - writer.indent * 4); writer << emit_string_array(c_value_strings, 100 - writer.indent * 4);
...@@ -487,23 +488,24 @@ using namespace std; ...@@ -487,23 +488,24 @@ using namespace std;
writer << "{\n"; writer << "{\n";
writer.indent++; writer.indent++;
for (shared_ptr<Function> current_function : pass_manager.get_state().get_functions()) for (shared_ptr<Function> current_function : pass_manager.get_state().get_functions())
{
for (shared_ptr<Node> node : current_function->get_ordered_ops())
{ {
const op::Constant* c = dynamic_cast<op::Constant*>(node.get()); for (shared_ptr<Node> node : current_function->get_ordered_ops())
if (c)
{ {
shared_ptr<descriptor::TensorView> tv = node->get_outputs()[0].get_tensor_view(); const op::Constant* c = dynamic_cast<op::Constant*>(node.get());
writer << "if(" << tv->get_tensor().get_name() << " == NULL)\n"; if (c)
writer << "{\n"; {
writer << "runtime::gpu::cuda_memcpyHtD(" << tv->get_tensor().get_name() << ", " shared_ptr<descriptor::TensorView> tv =
<< tv->get_tensor().get_name() << "_cpu, " << tv->get_tensor().size() node->get_outputs()[0].get_tensor_view();
writer << "if(" << tv->get_tensor().get_name() << " == NULL)\n";
writer << "{\n";
writer << "runtime::gpu::cuda_memcpyHtD(" << tv->get_tensor().get_name() << ", "
<< tv->get_tensor().get_name() << "_cpu, " << tv->get_tensor().size()
<< ");\n"; << ");\n";
writer << "}\n"; writer << "}\n";
}
} }
} }
}
bool temporaries_used = false; bool temporaries_used = false;
size_t worst_case_tmp_size = 0; size_t worst_case_tmp_size = 0;
for (shared_ptr<Node> node : current_function->get_ordered_ops()) for (shared_ptr<Node> node : current_function->get_ordered_ops())
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment