Unverified Commit af18be14 authored by Robert Kimball's avatar Robert Kimball Committed by GitHub

Merge pull request #206 from NervanaSystems/jmenon/cpu

CPU Backend
parents 88986222 60ea252d
......@@ -11,7 +11,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
cmake_minimum_required (VERSION 2.8)
cmake_minimum_required (VERSION 3.1)
set(NGRAPH_INCLUDE_PATH
${CMAKE_CURRENT_SOURCE_DIR}/src
......
# Copyright 2017 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
include(ExternalProject)
if(NGRAPH_CPU_ENABLE AND (NOT ${CMAKE_SYSTEM_NAME} MATCHES "Darwin") AND
(NOT ${CMAKE_SYSTEM_NAME} MATCHES "Windows"))
message(STATUS "Fetching LLVM from llvm.org")
set(LLVM_RELEASE_URL http://releases.llvm.org/5.0.0/clang+llvm-5.0.0-linux-x86_64-ubuntu16.04.tar.xz)
set(LLVM_SHA1_HASH 9cb81c92aa4d3f9707a9b8413c4d24b8dee90c59)
# Override default LLVM binaries
if(PREBUILT_LLVM)
if(NOT DEFINED PREBUILT_LLVM_HASH)
message(FATAL_ERROR "SHA1 hash of prebuilt llvm tarball not provided in PREBUILT_LLVM_HASH.")
endif()
set(LLVM_RELEASE_URL ${PREBUILT_LLVM})
set(LLVM_SHA1_HASH ${PREBUILT_LLVM_HASH})
endif()
# The 'BUILD_BYPRODUCTS' argument was introduced in CMake 3.2.
if(${CMAKE_VERSION} VERSION_LESS 3.2)
ExternalProject_Add(
ext_llvm
URL ${LLVM_RELEASE_URL}
URL_HASH SHA1=${LLVM_SHA1_HASH}
CONFIGURE_COMMAND ""
BUILD_COMMAND ""
INSTALL_COMMAND ""
UPDATE_COMMAND ""
)
else()
ExternalProject_Add(
ext_llvm
URL ${LLVM_RELEASE_URL}
URL_HASH SHA1=${LLVM_SHA1_HASH}
CONFIGURE_COMMAND ""
BUILD_COMMAND ""
INSTALL_COMMAND ""
UPDATE_COMMAND ""
BUILD_BYPRODUCTS "${CMAKE_CURRENT_BINARY_DIR}/ext_llvm-prefix/src/ext_llvm/lib/libLLVMCore.a"
)
endif()
ExternalProject_Get_Property(ext_llvm source_dir)
set(LLVM_INCLUDE_DIR "${source_dir}/include" PARENT_SCOPE)
set(LLVM_LIB_DIR "${source_dir}/lib" PARENT_SCOPE)
set(LLVM_LINK_LIBS clangTooling clangFrontendTool clangFrontend clangDriver clangSerialization clangCodeGen clangParse clangSema clangStaticAnalyzerFrontend clangStaticAnalyzerCheckers clangStaticAnalyzerCore clangAnalysis clangARCMigrate clangRewriteFrontend clangEdit clangAST clangLex clangBasic LLVMLTO LLVMPasses LLVMObjCARCOpts LLVMSymbolize LLVMDebugInfoPDB LLVMDebugInfoDWARF LLVMMIRParser LLVMCoverage LLVMTableGen LLVMDlltoolDriver LLVMOrcJIT LLVMXCoreDisassembler LLVMXCoreCodeGen LLVMXCoreDesc LLVMXCoreInfo LLVMXCoreAsmPrinter LLVMSystemZDisassembler LLVMSystemZCodeGen LLVMSystemZAsmParser LLVMSystemZDesc LLVMSystemZInfo LLVMSystemZAsmPrinter LLVMSparcDisassembler LLVMSparcCodeGen LLVMSparcAsmParser LLVMSparcDesc LLVMSparcInfo LLVMSparcAsmPrinter LLVMPowerPCDisassembler LLVMPowerPCCodeGen LLVMPowerPCAsmParser LLVMPowerPCDesc LLVMPowerPCInfo LLVMPowerPCAsmPrinter LLVMNVPTXCodeGen LLVMNVPTXDesc LLVMNVPTXInfo LLVMNVPTXAsmPrinter LLVMMSP430CodeGen LLVMMSP430Desc LLVMMSP430Info LLVMMSP430AsmPrinter LLVMMipsDisassembler LLVMMipsCodeGen LLVMMipsAsmParser LLVMMipsDesc LLVMMipsInfo LLVMMipsAsmPrinter LLVMLanaiDisassembler LLVMLanaiCodeGen LLVMLanaiAsmParser LLVMLanaiDesc LLVMLanaiAsmPrinter LLVMLanaiInfo LLVMHexagonDisassembler LLVMHexagonCodeGen LLVMHexagonAsmParser LLVMHexagonDesc LLVMHexagonInfo LLVMBPFDisassembler LLVMBPFCodeGen LLVMBPFDesc LLVMBPFInfo LLVMBPFAsmPrinter LLVMARMDisassembler LLVMARMCodeGen LLVMARMAsmParser LLVMARMDesc LLVMARMInfo LLVMARMAsmPrinter LLVMAMDGPUDisassembler LLVMAMDGPUCodeGen LLVMAMDGPUAsmParser LLVMAMDGPUDesc LLVMAMDGPUInfo LLVMAMDGPUAsmPrinter LLVMAMDGPUUtils LLVMAArch64Disassembler LLVMAArch64CodeGen LLVMAArch64AsmParser LLVMAArch64Desc LLVMAArch64Info LLVMAArch64AsmPrinter LLVMAArch64Utils LLVMObjectYAML LLVMLibDriver LLVMOption LLVMX86Disassembler LLVMX86AsmParser LLVMX86CodeGen LLVMGlobalISel LLVMSelectionDAG LLVMAsmPrinter LLVMDebugInfoCodeView LLVMDebugInfoMSF LLVMX86Desc LLVMMCDisassembler LLVMX86Info LLVMX86AsmPrinter LLVMX86Utils LLVMMCJIT LLVMLineEditor LLVMInterpreter LLVMExecutionEngine LLVMRuntimeDyld LLVMCodeGen LLVMTarget LLVMCoroutines LLVMipo LLVMInstrumentation LLVMVectorize LLVMScalarOpts LLVMLinker LLVMIRReader LLVMAsmParser LLVMInstCombine LLVMTransformUtils LLVMBitWriter LLVMAnalysis LLVMProfileData LLVMObject LLVMMCParser LLVMMC LLVMBitReader LLVMCore LLVMBinaryFormat LLVMSupport LLVMDemangle tinfo z m PARENT_SCOPE)
endif()
......@@ -99,9 +99,38 @@ include_directories(
"${EIGEN_INCLUDE_DIR}"
)
if(LLVM_INCLUDE_DIR)
include_directories(SYSTEM ${LLVM_INCLUDE_DIR})
link_directories(${LLVM_LIB_DIR})
# Add sources for the CPU backend
# and all its dependencies
set(SRC ${SRC}
codegen/compiler.cpp
runtime/cpu/call_frame.cpp
runtime/cpu/cpu_manager.cpp
runtime/cpu/cpu_backend.cpp
runtime/cpu/emitter.cpp
runtime/cpu/external_function.cpp
)
# LLVM binary builds are typically built without RTTI
# The built-in headers are in a version-specific directory
# This must be kept in sync with the LLVM + Clang version in use
set_source_files_properties(codegen/compiler.cpp PROPERTIES COMPILE_FLAGS "-fno-rtti")
set_source_files_properties(codegen/compiler.cpp PROPERTIES COMPILE_DEFINITIONS
"EIGEN_HEADERS_PATH=\"${EIGEN_INCLUDE_DIR}\";CLANG_BUILTIN_HEADERS_PATH=\"${LLVM_LIB_DIR}/clang/5.0.0/include\";NGRAPH_HEADERS_PATH=\"${NGRAPH_INCLUDE_PATH}\"")
set(NGRAPH_CPU_PCH_ENABLE 0 CACHE STRING "Enable pre-compiled headers in the CPU backend")
set(NGRAPH_CPU_DEBUGINFO_ENABLE 0 CACHE STRING "Enable debuginfo in the CPU backend")
set_source_files_properties(runtime/cpu/external_function.cpp PROPERTIES COMPILE_DEFINITIONS
"NGCPU_PCH=${NGRAPH_CPU_PCH_ENABLE};NGCPU_DEBUGINFO=${NGRAPH_CPU_DEBUGINFO_ENABLE}")
endif()
add_library(ngraph SHARED ${SRC})
target_include_directories(ngraph PUBLIC "${NGRAPH_INCLUDE_PATH}")
if(LLVM_LINK_LIBS)
target_link_libraries(ngraph LINK_PRIVATE ${LLVM_LINK_LIBS})
endif()
if (APPLE)
set_property(TARGET ngraph PROPERTY PREFIX "lib")
set_property(TARGET ngraph PROPERTY OUTPUT_NAME "ngraph.so")
......@@ -146,3 +175,7 @@ install(DIRECTORY
endif()
add_dependencies(ngraph eigen)
if(NOT LLVM_PACKAGED AND LLVM_INCLUDE_DIR)
add_dependencies(ngraph ext_llvm)
endif()
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#include <clang/CodeGen/ObjectFilePCHContainerOperations.h>
#include <clang/Driver/DriverDiagnostic.h>
#include <clang/Driver/Options.h>
#include <clang/Frontend/CompilerInstance.h>
#include <clang/Frontend/CompilerInvocation.h>
#include <clang/Frontend/FrontendDiagnostic.h>
#include <clang/Frontend/TextDiagnosticBuffer.h>
#include <clang/Frontend/TextDiagnosticPrinter.h>
#include <clang/Frontend/Utils.h>
#include <clang/FrontendTool/Utils.h>
#include <clang/Lex/Preprocessor.h>
#include <clang/Lex/PreprocessorOptions.h>
#include <llvm/ADT/Statistic.h>
#include <llvm/LinkAllPasses.h>
#include <llvm/Option/Arg.h>
#include <llvm/Option/ArgList.h>
#include <llvm/Option/OptTable.h>
#include <llvm/Support/ErrorHandling.h>
#include <llvm/Support/ManagedStatic.h>
#include <llvm/Support/Signals.h>
#include <llvm/Support/TargetSelect.h>
#include <llvm/Support/Timer.h>
#include <llvm/Support/raw_ostream.h>
#include <clang/Basic/DiagnosticOptions.h>
#include <clang/Basic/TargetInfo.h>
#include <clang/CodeGen/CodeGenAction.h>
#include <clang/Frontend/CompilerInstance.h>
#include <clang/Frontend/TextDiagnosticPrinter.h>
#include <llvm/Support/TargetSelect.h>
#include <llvm/ExecutionEngine/ExecutionEngine.h>
#include <llvm/ExecutionEngine/SectionMemoryManager.h>
#include "ngraph/codegen/compiler.hpp"
// TODO: Fix leaks
using namespace clang;
using namespace llvm;
using namespace llvm::opt;
using namespace std;
using namespace ngraph::codegen;
static std::string GetExecutablePath(const char* Argv0)
{
// This just needs to be some symbol in the binary; C++ doesn't
// allow taking the address of ::main however.
void* MainAddr = reinterpret_cast<void*>(GetExecutablePath);
return llvm::sys::fs::getMainExecutable(Argv0, MainAddr);
}
execution_state::execution_state()
: m_execution_engine{nullptr}
, precompiled_headers_enabled(false)
, debuginfo_enabled(false)
{
}
execution_state::~execution_state()
{
}
std::unique_ptr<llvm::Module> execution_state::compile(const string& source, const string& name)
{
llvm::InitializeAllTargets();
llvm::InitializeAllTargetMCs();
llvm::InitializeAllAsmPrinters();
llvm::InitializeAllAsmParsers();
// Prepare compilation arguments
vector<const char*> args;
args.push_back(name.c_str());
// Prepare DiagnosticEngine
DiagnosticOptions DiagOpts;
TextDiagnosticPrinter* textDiagPrinter = new clang::TextDiagnosticPrinter(errs(), &DiagOpts);
IntrusiveRefCntPtr<clang::DiagnosticIDs> pDiagIDs;
DiagnosticsEngine* pDiagnosticsEngine =
new DiagnosticsEngine(pDiagIDs, &DiagOpts, textDiagPrinter);
// Create and initialize CompilerInstance
std::unique_ptr<CompilerInstance> Clang(new CompilerInstance());
Clang->createDiagnostics();
// Initialize CompilerInvocation
CompilerInvocation::CreateFromArgs(
Clang->getInvocation(), &args[0], &args[0] + args.size(), *pDiagnosticsEngine);
// Infer the builtin include path if unspecified.
if (Clang->getHeaderSearchOpts().UseBuiltinIncludes &&
Clang->getHeaderSearchOpts().ResourceDir.empty())
{
void* MainAddr = reinterpret_cast<void*>(GetExecutablePath);
auto path = CompilerInvocation::GetResourcesPath(args[0], MainAddr);
Clang->getHeaderSearchOpts().ResourceDir = path;
}
auto& HSO = Clang->getInvocation().getHeaderSearchOpts();
// Add base toolchain-supplied header paths
// Ideally one would use the Linux toolchain definition in clang/lib/Driver/ToolChains.h
// But that's a private header and isn't part of the public libclang API
// Instead of re-implementing all of that functionality in a custom toolchain
// just hardcode the paths relevant to frequently used build/test machines for now
HSO.AddPath(CLANG_BUILTIN_HEADERS_PATH, clang::frontend::System, false, false);
HSO.AddPath("/usr/include/x86_64-linux-gnu", clang::frontend::System, false, false);
HSO.AddPath("/usr/include", clang::frontend::System, false, false);
// Add C++ standard library headers
// Debian-like + GCC 4.8 libstdc++
HSO.AddPath("/usr/include/x86_64-linux-gnu/c++/4.8", clang::frontend::System, false, false);
HSO.AddPath("/usr/include/c++/4.8", clang::frontend::System, false, false);
// Debian-like + GCC 5 libstdc++
HSO.AddPath("/usr/include/x86_64-linux-gnu/c++/5", clang::frontend::System, false, false);
HSO.AddPath("/usr/include/c++/5", clang::frontend::System, false, false);
HSO.AddPath(EIGEN_HEADERS_PATH, clang::frontend::System, false, false);
HSO.AddPath(NGRAPH_HEADERS_PATH, clang::frontend::System, false, false);
// Language options
// These are the C++ features needed to compile ngraph headers
// and any dependencies like Eigen
auto LO = Clang->getInvocation().getLangOpts();
LO->CPlusPlus = 1;
LO->CPlusPlus11 = 1;
LO->Bool = 1;
LO->Exceptions = 1;
LO->CXXExceptions = 1;
LO->WChar = 1;
LO->RTTI = 1;
// Enable OpenMP for Eigen
LO->OpenMP = 1;
LO->OpenMPUseTLS = 1;
if (debuginfo_enabled)
{
// CodeGen options
auto& CGO = Clang->getInvocation().getCodeGenOpts();
CGO.setDebugInfo(codegenoptions::FullDebugInfo);
}
if (precompiled_headers_enabled)
{
// Preprocessor options
auto& PPO = Clang->getInvocation().getPreprocessorOpts();
PPO.ImplicitPCHInclude = "ngcpu.pch";
PPO.DisablePCHValidation = 1;
}
// Enable various target features
// Most of these are for Eigen
auto& TO = Clang->getInvocation().getTargetOpts();
TO.FeaturesAsWritten.emplace_back("+sse4.1");
TO.FeaturesAsWritten.emplace_back("+sse4.2");
TO.FeaturesAsWritten.emplace_back("+avx");
TO.FeaturesAsWritten.emplace_back("+avx2");
TO.FeaturesAsWritten.emplace_back("+fma");
// Map code filename to a memoryBuffer
StringRef source_ref(source);
unique_ptr<MemoryBuffer> buffer = MemoryBuffer::getMemBufferCopy(source_ref);
Clang->getInvocation().getPreprocessorOpts().addRemappedFile(name, buffer.get());
// Create and execute action
CodeGenAction* compilerAction = new EmitCodeGenOnlyAction();
Clang->ExecuteAction(*compilerAction);
buffer.release();
return compilerAction->takeModule();
}
bool execution_state::add_module(std::unique_ptr<llvm::Module>& module)
{
if (module)
{
if (!m_execution_engine)
{
m_execution_engine = llvm::EngineBuilder(move(module))
.setEngineKind(llvm::EngineKind::JIT)
.setOptLevel(llvm::CodeGenOpt::Aggressive)
.setErrorStr(&jit_error)
.create();
if (!m_execution_engine)
{
return false;
}
}
}
else
{
return false;
}
return true;
}
void execution_state::finalize()
{
if (m_execution_engine)
{
m_execution_engine->finalizeObject();
m_execution_engine->runStaticConstructorsDestructors(false);
}
else
{
throw std::runtime_error(
"Error in finalize: " +
(jit_error.empty() ? "Could not create an execution engine" : jit_error));
}
}
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include <functional>
#include <memory>
#include <string>
#include <llvm/ExecutionEngine/MCJIT.h> // forces JIT to link in
#include <llvm/ExecutionEngine/SectionMemoryManager.h>
#include <llvm/Option/Arg.h>
namespace ngraph
{
namespace codegen
{
class module;
class execution_state;
}
}
class ngraph::codegen::module
{
public:
private:
std::unique_ptr<llvm::Module> m_module;
};
class ngraph::codegen::execution_state : public llvm::SectionMemoryManager
{
public:
execution_state();
~execution_state();
void set_precompiled_headers_enabled(bool state) { precompiled_headers_enabled = state; }
bool is_precompiled_headers_enabled() { return precompiled_headers_enabled; }
void set_debuginfo_enabled(bool state) { debuginfo_enabled = state; }
bool is_debuginfo_enabled() { return debuginfo_enabled; }
std::unique_ptr<llvm::Module> compile(const std::string& source, const std::string& name = "");
bool add_module(std::unique_ptr<llvm::Module>&);
void finalize();
template <typename ftype>
std::function<ftype> find_function(const std::string& func_name)
{
auto f = m_execution_engine->getPointerToNamedFunction(func_name);
return f_cast<ftype>(f);
}
private:
llvm::ExecutionEngine* m_execution_engine;
std::string jit_error;
bool precompiled_headers_enabled;
bool debuginfo_enabled;
template <typename signature>
std::function<signature> f_cast(void* f)
{
return static_cast<signature*>(reinterpret_cast<signature*>(f));
}
};
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include <exception>
#include <sstream>
#include "ngraph/descriptor/output.hpp"
#include "ngraph/pass/pass.hpp"
namespace ngraph
{
namespace pass
{
template <typename LT>
class AssignLayout : public CallGraphPass
{
public:
virtual bool run_on_call_graph(std::list<std::shared_ptr<Node>>& nodes) override
{
for (const std::shared_ptr<Node>& node : nodes)
{
try
{
for (const descriptor::Output& output : node->get_outputs())
{
auto tv = output.get_tensor_view();
if (nullptr == tv->get_tensor_view_layout())
{
auto layout = std::make_shared<LT>(*tv);
tv->set_tensor_view_layout(layout);
}
}
}
catch (const std::exception& e)
{
std::stringstream ss;
ss << "Error with node " << *node << ": ";
ss << e.what();
throw std::invalid_argument(ss.str());
}
}
return false;
}
};
}
}
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#include <algorithm>
#include "call_frame.hpp"
using namespace std;
using namespace ngraph::runtime::cpu;
CallFrame::CallFrame(EntryPoint compiled_function,
size_t n_outputs,
size_t n_inputs,
const TensorViewPtrs& temps)
: m_n_outputs(n_outputs)
, m_n_inputs(n_inputs)
, m_tensor_views(n_inputs + n_outputs + temps.size())
, m_compiled_function(compiled_function)
{
copy(temps.begin(), temps.end(), m_tensor_views.begin() + m_n_outputs + m_n_inputs);
}
void CallFrame::tensor_call(
const std::vector<std::shared_ptr<ngraph::runtime::TensorView>>& inputs,
const std::vector<std::shared_ptr<ngraph::runtime::TensorView>>& outputs)
{
copy(outputs.begin(), outputs.end(), m_tensor_views.begin());
copy(inputs.begin(), inputs.end(), m_tensor_views.begin() + m_n_outputs);
// Invoke compiled computation
m_compiled_function(this, m_tensor_views);
// Don't hold onto inputs/outputs
fill_n(m_tensor_views.begin(), m_n_outputs + m_n_inputs, nullptr);
}
void CallFrame::operator()(const std::vector<std::shared_ptr<ngraph::runtime::Value>>& arguments,
const std::vector<std::shared_ptr<ngraph::runtime::Value>>& results)
{
// TODO: Check types of args and result
std::vector<std::shared_ptr<ngraph::runtime::TensorView>> inputs;
for (auto argument : arguments)
{
argument->collect_tensor_views(inputs, argument);
}
std::vector<std::shared_ptr<ngraph::runtime::TensorView>> outputs;
for (auto result : results)
{
result->collect_tensor_views(outputs, result);
}
tensor_call(inputs, outputs);
}
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include <functional>
#include <memory>
#include <vector>
#include "ngraph/function.hpp"
#include "ngraph/runtime/call_frame.hpp"
#include "ngraph/runtime/tensor_view.hpp"
namespace ngraph
{
namespace runtime
{
class PrimaryTensorView;
namespace cpu
{
class CallFrame;
using EntryPoint = std::function<void(ngraph::runtime::cpu::CallFrame*,
ngraph::runtime::TensorViewPtrs&)>;
// Compile and execute graphs
class CallFrame : public ngraph::runtime::CallFrame
{
public:
CallFrame(EntryPoint compiled_function,
size_t n_outputs,
size_t n_inputs,
const TensorViewPtrs& temps);
/// @brief Invoke the function with values matching the signature of the function.
///
/// Tuples will be expanded into their tensor views to build the call frame.
void
operator()(const std::vector<std::shared_ptr<ngraph::runtime::Value>>& inputs,
const std::vector<std::shared_ptr<ngraph::runtime::Value>>& outputs);
/// @brief Invoke the function with tuples pre-expanded to their underlying tensor views.
void tensor_call(const TensorViewPtrs& inputs, const TensorViewPtrs& outputs);
void set_return() { m_return = true; }
std::shared_ptr<TensorView> get_tensor_view(size_t i) { return m_tensor_views[i]; }
template <typename ET>
ParameterizedTensorView<ET>* get_parameterized_tensor_view(size_t i)
{
return m_tensor_views[i]->get_parameterized_tensor_view<ET>();
}
template <typename ET>
typename ET::type* get_tensor_view_data(size_t i)
{
return &get_parameterized_tensor_view<ET>(i)->get_vector()[0];
}
protected:
size_t m_n_outputs;
size_t m_n_inputs;
TensorViewPtrs m_tensor_views;
bool m_return;
EntryPoint m_compiled_function;
};
}
}
}
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#include "ngraph/runtime/cpu/cpu_backend.hpp"
#include "ngraph/runtime/external_function.hpp"
using namespace ngraph::runtime::cpu;
std::shared_ptr<ngraph::runtime::CallFrame>
CPUBackend::make_call_frame(const std::shared_ptr<ExternalFunction>& external_function)
{
return external_function->make_call_frame();
}
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include "ngraph/runtime/backend.hpp"
namespace ngraph
{
namespace runtime
{
namespace cpu
{
class CPUBackend : public Backend
{
public:
virtual std::shared_ptr<ngraph::runtime::CallFrame> make_call_frame(
const std::shared_ptr<ngraph::runtime::ExternalFunction>& external_function);
};
}
}
}
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#include <memory>
#include "ngraph/runtime/cpu/cpu_backend.hpp"
#include "ngraph/runtime/cpu/cpu_manager.hpp"
#include "ngraph/runtime/cpu/external_function.hpp"
using namespace ngraph::runtime::cpu;
std::shared_ptr<ngraph::runtime::Backend> CPUManager::allocate_backend()
{
return std::make_shared<CPUBackend>();
}
std::shared_ptr<ngraph::runtime::ExternalFunction>
CPUManager::compile(const std::shared_ptr<ngraph::Function>& fun)
{
return std::make_shared<ExternalFunction>(fun);
}
ngraph::runtime::Manager::Factory CPUManager::factory = ngraph::runtime::Manager::register_factory(
"CPU", [](const std::string& name) -> std::shared_ptr<ngraph::runtime::Manager> {
return std::make_shared<CPUManager>();
});
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include <memory>
#include "ngraph/codegen/compiler.hpp"
#include "ngraph/runtime/manager.hpp"
namespace ngraph
{
class Function;
namespace runtime
{
class ExternalFunction;
namespace cpu
{
/// @brief Transformer for the interpreted backend
class CPUManager : public Manager
{
protected:
ngraph::codegen::execution_state exec_state;
public:
virtual std::shared_ptr<Backend> allocate_backend() override;
virtual std::shared_ptr<ngraph::runtime::ExternalFunction>
compile(const std::shared_ptr<ngraph::Function>& fun) override;
static Factory factory;
};
};
}
}
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include <memory>
#include <Eigen/Dense>
#include "ngraph/descriptor/layout/dense_tensor_view_layout.hpp"
#include "ngraph/runtime/cpu/call_frame.hpp"
#include "ngraph/runtime/tensor_view_info.hpp"
namespace ngraph
{
namespace runtime
{
class TensorViewInfo;
namespace cpu
{
class CallFrame;
namespace eigen
{
using DynamicStrides = Eigen::Stride<Eigen::Dynamic, Eigen::Dynamic>;
using VectorStrides = Eigen::Stride<Eigen::Dynamic, 1>;
template <typename ET>
using DynamicArray =
Eigen::Array<typename ET::type, Eigen::Dynamic, Eigen::Dynamic>;
template <typename ET>
using EigenArrayBase = Eigen::Map<DynamicArray<ET>, 0, DynamicStrides>;
template <typename ET>
using DynamicMatrix = Eigen::
Matrix<typename ET::type, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>;
template <typename ET>
using EigenMatrixBase = Eigen::Map<DynamicMatrix<ET>, 0, DynamicStrides>;
template <typename ET>
using DynamicVector = Eigen::Matrix<typename ET::type, Eigen::Dynamic, 1>;
template <typename ET>
using EigenVectorBase = Eigen::Map<DynamicVector<ET>, 0, VectorStrides>;
namespace fmt
{
/// @brief vector format for Eigen wrappers.
class V
{
public:
V(const TensorViewInfo& tensor_view_info)
: l0(tensor_view_info
.get_layout<
ngraph::descriptor::layout::DenseTensorViewLayout>()
->get_size())
{
}
V(size_t s)
: l0(s)
{
}
public:
size_t l0;
size_t l1{1};
size_t s0{1};
size_t s1{1};
};
class M
{
M(const std::shared_ptr<ngraph::descriptor::layout::DenseTensorViewLayout>&
layout)
: M(layout->get_shape(), layout->get_strides())
{
}
public:
M(const Shape& shape, const Strides& strides)
: l0(shape.at(0))
, l1(shape.at(1))
, s0(strides.at(0))
, s1(strides.at(1))
{
}
M(const TensorViewInfo& tensor_view_info)
: M(tensor_view_info.get_layout<
ngraph::descriptor::layout::DenseTensorViewLayout>())
{
}
public:
size_t l0;
size_t l1;
size_t s0;
size_t s1;
};
}
// ET element type
// FMT array format (fmt::V for vector, etc.)
// BASE select array/matrix
template <typename ET,
typename FMT,
typename BASE,
typename STRIDES = DynamicStrides>
class EigenWrapper : public BASE
{
using base = BASE;
public:
EigenWrapper(typename ET::type* t, const FMT& fmt)
: base(t, fmt.l0, fmt.l1, STRIDES(fmt.s0, fmt.s1))
{
}
EigenWrapper(
typename ET::type* t,
const std::shared_ptr<ngraph::descriptor::layout::DenseTensorViewLayout>&
layout)
: base(t, layout->get_size(), 1, DynamicStrides(1, 1))
{
}
EigenWrapper(CallFrame* call_frame, const TensorViewInfo& tensor_view_info)
: EigenWrapper(
call_frame->get_tensor_view_data<ET>(tensor_view_info.get_index()),
FMT(tensor_view_info))
{
}
template <typename U>
EigenWrapper& operator=(const U& other)
{
this->base::operator=(other);
return *this;
}
};
template <typename ET, typename FMT = fmt::V>
using EigenArray1d = EigenWrapper<ET, FMT, EigenArrayBase<ET>>;
template <typename ET, typename FMT = fmt::M>
using EigenArray2d = EigenWrapper<ET, FMT, EigenArrayBase<ET>>;
template <typename ET, typename FMT = fmt::M>
using EigenMatrix = EigenWrapper<ET, FMT, EigenMatrixBase<ET>>;
template <typename ET, typename FMT = fmt::V>
using EigenVector = EigenWrapper<ET, FMT, EigenVectorBase<ET>, VectorStrides>;
}
}
}
}
This diff is collapsed.
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include <string>
#include <vector>
#include "ngraph/node.hpp"
#include "ngraph/runtime/cpu/external_function.hpp"
#include "ngraph/runtime/tensor_view_info.hpp"
#define EMITTER_DECL(E) \
E(const ngraph::Node* n, \
ExternalFunction* ef, \
FunctionMap& function_map, \
const std::vector<TensorViewInfo>& inputs, \
const std::vector<TensorViewInfo>& outputs)
namespace ngraph
{
namespace runtime
{
namespace cpu
{
class Emitter
{
protected:
std::string TU;
public:
Emitter()
: TU("")
{
}
std::string& GetTU() { return TU; }
void EMITTER_DECL(EmitNop);
void EMITTER_DECL(EmitAdd);
void EMITTER_DECL(EmitDot);
void EMITTER_DECL(EmitMultiply);
void EMITTER_DECL(EmitGetTupleElement);
void EMITTER_DECL(EmitTuple);
void EMITTER_DECL(EmitAbs);
void EMITTER_DECL(EmitConcat);
void EMITTER_DECL(EmitDivide);
void EMITTER_DECL(EmitEqual);
void EMITTER_DECL(EmitGreater);
void EMITTER_DECL(EmitGreaterEq);
void EMITTER_DECL(EmitLess);
void EMITTER_DECL(EmitLessEq);
void EMITTER_DECL(EmitLog);
void EMITTER_DECL(EmitMaximum);
void EMITTER_DECL(EmitNegative);
void EMITTER_DECL(EmitNotEqual);
void EMITTER_DECL(EmitSelect);
void EMITTER_DECL(EmitSubtract);
void EMITTER_DECL(EmitParameterizedConstantBool);
void EMITTER_DECL(EmitParameterizedConstantFloat32);
void EMITTER_DECL(EmitParameterizedConstantInt8);
void EMITTER_DECL(EmitParameterizedConstantInt32);
void EMITTER_DECL(EmitParameterizedConstantInt64);
void EMITTER_DECL(EmitParameterizedConstantUInt8);
void EMITTER_DECL(EmitParameterizedConstantUInt32);
void EMITTER_DECL(EmitParameterizedConstantUInt64);
void EMITTER_DECL(EmitBroadcast);
void EMITTER_DECL(EmitConvert);
};
}
}
}
This diff is collapsed.
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include <functional>
#include <memory>
#include <typeindex>
#include <typeinfo>
#include <unordered_map>
#include "ngraph/codegen/compiler.hpp"
#include "ngraph/function.hpp"
#include "ngraph/runtime/external_function.hpp"
#include "ngraph/runtime/tensor_view_info.hpp"
namespace ngraph
{
namespace runtime
{
namespace cpu
{
class ExternalFunction;
class Emitter;
class CallFrame;
using FunctionMap =
std::unordered_map<std::shared_ptr<Function>, std::shared_ptr<ExternalFunction>>;
using OpFunction = std::function<void(Emitter*,
const ngraph::Node*,
ExternalFunction*,
FunctionMap&,
const std::vector<TensorViewInfo>& inputs,
const std::vector<TensorViewInfo>& outputs)>;
using OpMap = std::unordered_map<std::type_index, OpFunction>;
using EntryPoint = std::function<void(ngraph::runtime::cpu::CallFrame*,
ngraph::runtime::TensorViewPtrs&)>;
class ExternalFunction : public ngraph::runtime::ExternalFunction
{
public:
ExternalFunction(const std::shared_ptr<ngraph::Function>& function,
bool release_function = true);
std::shared_ptr<ngraph::runtime::CallFrame> make_call_frame();
protected:
void compile(FunctionMap& function_map);
size_t m_n_inputs;
size_t m_n_outputs;
ngraph::descriptor::TensorViewPtrs m_temp_views;
EntryPoint compiled_function;
};
}
}
}
......@@ -27,7 +27,7 @@ namespace ngraph
{
public:
TensorViewInfo(size_t index,
const std::shared_ptr<ngraph::descriptor::TensorView>& descriptor)
const std::shared_ptr<const ngraph::descriptor::TensorView>& descriptor)
: m_index(index)
, m_layout(descriptor->get_tensor_view_layout())
{
......
......@@ -25,7 +25,6 @@ set (SRC
autodiff.cpp
build_graph.cpp
eigen.cpp
execute.cpp
input_output_assign.cpp
main.cpp
op.cpp
......@@ -42,12 +41,32 @@ set (SRC
uuid.cpp
)
#================================================================================================
# To auto generate a suite of unit tests for a backend add a line like this
# set(BACKEND_NAMES ${BACKEND_NAMES} "BACKEND_NAME_GOES_HERE")
# and replace BACKEND_NAME_GOES_HERE with your backend name.
# The code for the unit test suite is in test/backend_test.in.cpp
#================================================================================================
set(BACKEND_NAMES ${BACKEND_NAMES} "NGVM")
if(MKLDNN_INCLUDE_DIR)
include_directories(SYSTEM ${MKLDNN_INCLUDE_DIR})
link_directories(${MKLDNN_LIB_DIR})
set(SRC ${SRC} mkldnn.cpp)
endif()
if(LLVM_INCLUDE_DIR)
include_directories(SYSTEM ${LLVM_INCLUDE_DIR})
set(SRC ${SRC} codegen.cpp)
set(BACKEND_NAMES ${BACKEND_NAMES} "CPU")
endif()
foreach(BACKEND_NAME ${BACKEND_NAMES})
configure_file(backend_test.in.cpp backend_test_${BACKEND_NAME}.cpp)
set(SRC ${SRC} ${CMAKE_CURRENT_BINARY_DIR}/backend_test_${BACKEND_NAME}.cpp)
message(STATUS "Adding unit test for backend ${BACKEND_NAME}")
endforeach()
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DCURDIR=\\\"${CMAKE_CURRENT_SOURCE_DIR}\\\"")
......
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#include <sstream>
#include <string>
#include <vector>
#include "gtest/gtest.h"
#include "ngraph/codegen/compiler.hpp"
using namespace std;
TEST(codegen, simple_return)
{
constexpr auto name = "test.cpp";
constexpr auto source = R"(extern "C" int test() { return 2+5; })";
ngraph::codegen::execution_state estate;
auto module = estate.compile(source, name);
ASSERT_NE(nullptr, module);
estate.add_module(module);
estate.finalize();
auto func = estate.find_function<int()>("test");
ASSERT_NE(nullptr, func);
int result = func();
EXPECT_EQ(7, result);
}
TEST(codegen, pass_args)
{
constexpr auto name = "test.cpp";
constexpr auto source = R"(extern "C" int test(int a, int b) { return a+b; })";
ngraph::codegen::execution_state estate;
auto module = estate.compile(source, name);
ASSERT_NE(nullptr, module);
estate.add_module(module);
estate.finalize();
auto func = estate.find_function<int(int, int)>("test");
ASSERT_NE(nullptr, func);
int result = func(20, 22);
EXPECT_EQ(42, result);
}
TEST(codegen, include)
{
constexpr auto name = "test.cpp";
constexpr auto source =
R"(
#include <cmath>
extern "C" int test(int a, int b)
{
return (int)pow((double)a,(double)b);
}
)";
ngraph::codegen::execution_state estate;
auto module = estate.compile(source, name);
ASSERT_NE(nullptr, module);
estate.add_module(module);
estate.finalize();
auto func = estate.find_function<int(int, int)>("test");
ASSERT_NE(nullptr, func);
int result = func(20, 2);
EXPECT_EQ(400, result);
}
......@@ -14,3 +14,4 @@
include( ../cmake/external_gtest.cmake )
include( ../cmake/external_eigen.cmake )
include( ../cmake/external_mkldnn.cmake )
include( ../cmake/external_llvm.cmake )
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment