Commit 9b46e945 authored by Robert Kimball's avatar Robert Kimball

change codegened functions to take void** instead of vector<void*>

parent 3bb93eb3
......@@ -45,7 +45,7 @@ void CallFrame::tensor_call(
}
// Invoke compiled computation
m_compiled_function(inputs, outputs);
m_compiled_function(inputs.data(), outputs.data());
}
void CallFrame::operator()(const std::vector<std::shared_ptr<ngraph::runtime::Value>>& arguments,
......
......@@ -32,8 +32,7 @@ namespace ngraph
{
class CallFrame;
using EntryPoint_t = void(const std::vector<void*>& inputs,
const std::vector<void*>& outputs);
using EntryPoint_t = void(void** inputs, void** outputs);
using EntryPoint = std::function<EntryPoint_t>;
......
......@@ -967,8 +967,8 @@ void Emitter::EmitReduce(const ngraph::Node* n,
TU.indent++;
TU << "\n";
TU << type << " result;\n";
TU << "std::vector<void*> inputs = {&x, &y};\n";
TU << "std::vector<void*> outputs = {&result};\n";
TU << "void* inputs[] = {&x, &y};\n";
TU << "void* outputs[] = {&result};\n";
TU << reduction_function->get_name() << "(inputs, outputs);\n";
TU << "return result;\n";
TU.indent--;
......@@ -1003,8 +1003,8 @@ void Emitter::EmitReduce(const ngraph::Node* n,
TU.indent++;
TU << "\n";
TU << type << " result;\n";
TU << "std::vector<void*> inputs = {&x, &y};\n";
TU << "std::vector<void*> outputs = {&result};\n";
TU << "void* inputs[] = {&x, &y};\n";
TU << "void* outputs[] = {&result};\n";
TU << reduction_function->get_name() << "(inputs, outputs);\n";
TU << "return result;\n";
TU.indent--;
......@@ -1035,8 +1035,8 @@ void Emitter::EmitReduce(const ngraph::Node* n,
TU.indent++;
TU << "\n";
TU << type << " result;\n";
TU << "std::vector<void*> inputs = {&x, &y};\n";
TU << "std::vector<void*> outputs = {&result};\n";
TU << "void* inputs[] = {&x, &y};\n";
TU << "void* outputs[] = {&result};\n";
TU << reduction_function->get_name() << "(inputs, outputs);\n";
TU << "return result;\n";
TU.indent--;
......@@ -1365,13 +1365,13 @@ void Emitter::generate_call(const std::vector<TensorViewInfo>& inputs,
output_names.push_back(output.get_tensor().get_name());
}
TU << "std::vector<void*> inputs =\n{";
TU << "void* inputs[] =\n{";
TU.indent++;
TU << "\n" << join(input_names, ",\n");
TU.indent--;
TU << "\n};\n";
TU << "std::vector<void*> outputs =\n{";
TU << "void* outputs[] =\n{";
TU.indent++;
TU << "\n" << join(output_names, ",\n");
TU.indent--;
......
......@@ -186,7 +186,6 @@ void ExternalFunction::compile()
TU +=
R"(// Generated by the NGraph CPU backend
#include <cmath>
#include <vector>
#include <Eigen/Dense>
......@@ -201,17 +200,14 @@ using namespace ngraph::runtime::cpu::eigen;
TU << "// Declare all functions\n";
for (shared_ptr<Function> f : pass_manager.get_state().get_functions())
{
TU << "extern \"C\" void " << f->get_name() << "(\n";
TU << " const std::vector<void*>& inputs,\n";
TU << " const std::vector<void*>& outputs);\n";
TU << "extern \"C\" void " << f->get_name() << "(void** inputs, void** outputs);\n";
}
TU << "\n";
for (shared_ptr<Function> current_function : pass_manager.get_state().get_functions())
{
TU << "extern \"C\" void " << current_function->get_name() << "(\n";
TU << " const std::vector<void*>& inputs,\n";
TU << " const std::vector<void*>& outputs)\n";
TU << "extern \"C\" void " << current_function->get_name();
TU << "(void** inputs, void** outputs)\n";
TU << "{\n";
TU.indent++;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment