Commit 6da5d38d authored by Jaikrishnan Menon's avatar Jaikrishnan Menon

CPU: More WiP changes

parent 3128f36c
......@@ -46,7 +46,7 @@
#include <llvm/ExecutionEngine/ExecutionEngine.h>
#include <llvm/ExecutionEngine/SectionMemoryManager.h>
#include "compiler.hpp"
#include "ngraph/codegen/compiler.hpp"
// TODO: Fix leaks
......@@ -55,7 +55,7 @@ using namespace llvm;
using namespace llvm::opt;
using namespace std;
using namespace nervana::cpu;
using namespace ngraph::codegen;
static std::string GetExecutablePath(const char* Argv0)
{
......@@ -122,6 +122,7 @@ std::unique_ptr<llvm::Module> execution_state::compile(const string& source, con
// But that's a private header and isn't part of the public libclang API
// Instead of re-implementing all of that functionality in a custom toolchain
// just hardcode the paths relevant to frequently used build/test machines for now
HSO.AddPath("/localdisk/menonjai/build/third-party/ext_llvm-prefix/src/ext_llvm/lib/clang/5.0.0/include", clang::frontend::System, false, false);
HSO.AddPath("/usr/include/x86_64-linux-gnu", clang::frontend::System, false, false);
HSO.AddPath("/usr/include", clang::frontend::System, false, false);
// Add C++ standard library headers
......@@ -138,6 +139,16 @@ std::unique_ptr<llvm::Module> execution_state::compile(const string& source, con
false);
HSO.AddPath("/home/menonjai/ngraph-cpp/src", clang::frontend::System, false, false);
// Language options
auto LO = Clang->getInvocation().getLangOpts();
LO->CPlusPlus = 1;
LO->CPlusPlus11 = 1;
LO->Bool = 1;
LO->Exceptions = 1;
LO->CXXExceptions = 1;
LO->WChar = 1;
LO->RTTI = 1;
// Map code filename to a memoryBuffer
StringRef source_ref(source);
unique_ptr<MemoryBuffer> buffer = MemoryBuffer::getMemBufferCopy(source_ref);
......
......@@ -22,23 +22,23 @@
#include <llvm/ExecutionEngine/SectionMemoryManager.h>
#include <llvm/Option/Arg.h>
namespace nervana
namespace ngraph
{
namespace cpu
namespace codegen
{
class module;
class execution_state;
}
}
class nervana::cpu::module
class ngraph::codegen::module
{
public:
private:
std::unique_ptr<llvm::Module> m_module;
};
class nervana::cpu::execution_state : public llvm::SectionMemoryManager
class ngraph::codegen::execution_state : public llvm::SectionMemoryManager
{
public:
execution_state();
......
......@@ -33,7 +33,7 @@ namespace ngraph
class CPUManager : public Manager
{
protected:
nervana::cpu::execution_state exec_state;
ngraph::codegen::execution_state exec_state;
public:
virtual std::shared_ptr<Backend> allocate_backend() override;
......
......@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#include <iostream>
#include <vector>
#include "ngraph/node.hpp"
......@@ -22,21 +23,85 @@
using namespace std;
using namespace ngraph::runtime::cpu;
void Emitter::EmitAdd(const ngraph::Node*,
ExternalFunction*,
FunctionMap&,
void Emitter::EmitNop(const ngraph::Node* n,
ExternalFunction* ef,
FunctionMap& function_map,
const std::vector<TensorViewInfo>& inputs,
const std::vector<TensorViewInfo>& outputs) const
{
}
void Emitter::EmitDot(const ngraph::Node*,
ExternalFunction*,
FunctionMap&,
void Emitter::EmitAdd(const ngraph::Node* n,
ExternalFunction* ef,
FunctionMap& function_map,
const std::vector<TensorViewInfo>& inputs,
const std::vector<TensorViewInfo>& outputs) const
{
}
void Emitter::EmitDot(const ngraph::Node* n,
ExternalFunction* ef,
FunctionMap& function_map,
const std::vector<TensorViewInfo>& inputs,
const std::vector<TensorViewInfo>& outputs) const
{
auto& arg_nodes = n->get_arguments();
assert(arg_nodes.size() == 2);
auto arg0_tensor_type =
dynamic_pointer_cast<const TensorViewType>(arg_nodes.at(0)->get_value_type());
assert(nullptr != arg0_tensor_type);
auto arg1_tensor_type =
dynamic_pointer_cast<const TensorViewType>(arg_nodes.at(1)->get_value_type());
assert(nullptr != arg1_tensor_type);
auto arg0_shape = arg0_tensor_type->get_shape();
auto arg1_shape = arg1_tensor_type->get_shape();
auto& arg0_element_type = arg0_tensor_type->get_element_type();
// If arg0 or arg1 is a scalar, emit a scalar-tensor product.
if (arg0_shape.size() == 0)
{
cout << "Emitting scalar-tensor product\n";
}
else if (arg1_shape.size() == 0)
{
cout << "Emitting scalar-tensor product\n";
}
// If arg0 and arg1 are both vectors, emit a dot product.
else if (arg0_shape.size() == 1 && arg1_shape.size() == 1)
{
cout << "Emitting dot product\n";
}
// If arg0 is a matrix and arg1 is a vector, emit a matrix-vector product.
else if (arg0_shape.size() == 2 && arg1_shape.size() == 1)
{
cout << "Emitting matrix-vector product\n";
}
// If arg0 and arg1 are both matrices, emit a matrix product.
else if (arg0_shape.size() == 2 && arg1_shape.size() == 2)
{
cout << "Emitting matrix multiply\n";
}
else
{
throw ngraph_error("Dot product for tensors with rank>2 not implemented yet.");
}
}
void Emitter::EmitMultiply(const ngraph::Node* n,
ExternalFunction* ef,
FunctionMap& function_map,
const std::vector<TensorViewInfo>& inputs,
const std::vector<TensorViewInfo>& outputs) const
{
}
......@@ -15,6 +15,7 @@
#pragma once
#include <vector>
#include <string>
#include "ngraph/node.hpp"
#include "ngraph/runtime/tensor_view_info.hpp"
......@@ -28,8 +29,18 @@ namespace ngraph
{
class Emitter
{
protected:
std::string TU;
public:
Emitter() { }
Emitter() : TU("") { }
std::string& GetTU() { return TU; }
void EmitNop(const ngraph::Node*,
ExternalFunction*,
FunctionMap&,
const std::vector<TensorViewInfo>& inputs,
const std::vector<TensorViewInfo>& outputs) const;
void EmitAdd(const ngraph::Node*,
ExternalFunction*,
......@@ -43,6 +54,12 @@ namespace ngraph
const std::vector<TensorViewInfo>& inputs,
const std::vector<TensorViewInfo>& outputs) const;
void EmitMultiply(const ngraph::Node*,
ExternalFunction*,
FunctionMap&,
const std::vector<TensorViewInfo>& inputs,
const std::vector<TensorViewInfo>& outputs) const;
};
}
}
......
......@@ -17,6 +17,7 @@
#include <typeindex>
#include <typeinfo>
#include <unordered_map>
#include <string>
#include "ngraph/codegen/compiler.hpp"
#include "ngraph/descriptor/input.hpp"
......@@ -64,8 +65,10 @@ using ngraph::descriptor::layout::DenseTensorViewLayout;
#define TI(x) type_index(typeid(x))
static const OpMap dispatch{{TI(ngraph::op::Add), &Emitter::EmitAdd},
{TI(ngraph::op::Dot), &Emitter::EmitDot}};
static const OpMap dispatcher{{TI(ngraph::op::Add), &Emitter::EmitAdd},
{TI(ngraph::op::Dot), &Emitter::EmitDot},
{TI(ngraph::op::Multiply), &Emitter::EmitMultiply},
{TI(ngraph::op::Parameter), &Emitter::EmitNop}};
#undef TI
......@@ -146,10 +149,59 @@ void ExternalFunction::compile(FunctionMap& function_map)
}
// Now we build the TU
Emitter emitter;
auto TU = emitter.GetTU();
TU += R"(
#include <vector>
#include <memory>
#include <Eigen/Dense>
#include "ngraph/descriptor/layout/dense_tensor_view_layout.hpp"
#include "ngraph/runtime/tensor_view.hpp"
#include "ngraph/runtime/cpu/call_frame.hpp"
void *__dso_handle = 0;
extern "C" void __entrypoint(ngraph::runtime::cpu::CallFrame* call_frame,
ngraph::runtime::TensorViewPtrs& tensor_views)
{
)";
for (shared_ptr<Node> node : m_function->get_ordered_ops())
{
auto& n = *node; // Work around a compiler warning (*node inside typeid may have effects
// with shared pointers, which is fine here but clang doesn't like it.)
auto handler = dispatcher.find(type_index(typeid(n)));
if (handler == dispatcher.end())
{
throw ngraph_error("Unhandled op during code generation : " + node->description());
}
std::vector<TensorViewInfo> in;
for (const descriptor::Input& input : node->get_inputs())
{
const descriptor::Output& output = input.get_output();
auto tv = output.get_tensor_view();
in.push_back({tensor_index.at(tv), tv});
}
std::vector<TensorViewInfo> out;
for (const descriptor::Output& output : node->get_outputs())
{
auto tv = output.get_tensor_view();
out.push_back({tensor_index.at(tv), tv});
}
handler->second(&emitter, node.get(), this, function_map, in, out);
}
// End TU
TU += "}\n";
ngraph::codegen::execution_state estate;
auto llvm_module = estate.compile(TU, "ExternalFunction");
assert(llvm_module);
estate.add_module(llvm_module);
estate.finalize();
//auto llvm_func = estate.find_function
m_is_compiled = true;
if (m_release_function)
......
......@@ -33,6 +33,7 @@ namespace ngraph
class Instruction;
class ExternalFunction;
class Emitter;
class CallFrame;
using FunctionMap = std::unordered_map<std::shared_ptr<Function>,
std::shared_ptr<ExternalFunction>>;
......@@ -46,6 +47,8 @@ namespace ngraph
using OpMap = std::unordered_map<std::type_index, OpFunction>;
using EntryPoint = std::function<void(ngraph::runtime::cpu::CallFrame*, ngraph::runtime::TensorViewPtrs&)>;
class ExternalFunction : public ngraph::runtime::ExternalFunction
{
public:
......
......@@ -27,7 +27,7 @@ TEST(codegen, simple_return)
constexpr auto name = "test.cpp";
constexpr auto source = R"(extern "C" int test() { return 2+5; })";
nervana::cpu::execution_state estate;
ngraph::codegen::execution_state estate;
auto module = estate.compile(source, name);
ASSERT_NE(nullptr, module);
......@@ -47,7 +47,7 @@ TEST(codegen, pass_args)
constexpr auto name = "test.cpp";
constexpr auto source = R"(extern "C" int test(int a, int b) { return a+b; })";
nervana::cpu::execution_state estate;
ngraph::codegen::execution_state estate;
auto module = estate.compile(source, name);
ASSERT_NE(nullptr, module);
......@@ -74,7 +74,7 @@ TEST(codegen, include)
}
)";
nervana::cpu::execution_state estate;
ngraph::codegen::execution_state estate;
auto module = estate.compile(source, name);
ASSERT_NE(nullptr, module);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment