Commit 40fa0208 authored by fenglei.tian's avatar fenglei.tian

fix header setting in cmake

parent 2cf7bff2
......@@ -3,5 +3,5 @@
./build/test/unit-test --gtest_filter=GPU.maximum
./build/test/unit-test --gtest_filter=GPU.minimum
./build/test/unit-test --gtest_filter=GPU.multiple*
#./build/test/unit-test --gtest_filter=GPU.abs
##./build/test/unit-test --gtest_filter=GPU.abs
#./build/test/unit-test --gtest_filter=GPU.dot*
......@@ -194,14 +194,12 @@ if (NGRAPH_CPU_ENABLE AND LLVM_INCLUDE_DIR AND
"NGRAPH_TBB_ENABLE"
)
endif()
set_source_files_properties(codegen/compiler.cpp PROPERTIES COMPILE_DEFINITIONS "${HEADER_SEARCH_DEFINES}")
set(NGRAPH_CPU_DEBUGINFO_ENABLE 0 CACHE STRING "Enable debuginfo in the CPU backend")
# GPU backend current requires CPU because they share compiler.cpp,
# and compiler.cpp requires MKLDNN
if(NGRAPH_GPU_ENABLE)
include_directories(SYSTEM ${CUDA_INCLUDE_DIRS} ${CUDNN_INCLUDE_DIR})
message(STATUS "cuda --------------- ${CUDA_INCLUDE_DIRS}")
# Add sources for the GPU backend
# and all its dependencies
set(SRC ${SRC}
......@@ -215,9 +213,11 @@ if (NGRAPH_CPU_ENABLE AND LLVM_INCLUDE_DIR AND
runtime/gpu/gpu_tensor_view_wrapper.cpp
runtime/gpu/gpu_util.cpp
)
set_property(SOURCE codegen/compiler.cpp APPEND_STRING PROPERTY COMPILE_DEFINITIONS
"CUDA_HEADER_PATHS=\"${CUDA_INCLUDE_DIRS}\";")
set(HEADER_SEARCH_DEFINES ${HEADER_SEARCH_DEFINES}
"CUDA_HEADER_PATHS=\"${CUDA_INCLUDE_DIRS}\""
)
endif()
set_source_files_properties(codegen/compiler.cpp PROPERTIES COMPILE_DEFINITIONS "${HEADER_SEARCH_DEFINES}")
endif()
......
......@@ -65,7 +65,7 @@
#error "This source file interfaces with LLVM and Clang and must be compiled with RTTI disabled"
#endif
//#define USE_BUILTIN
#define USE_BUILTIN
using namespace clang;
using namespace llvm;
......@@ -360,7 +360,6 @@ void codegen::StaticCompiler::configure_search_path()
add_header_search_path(CLANG_BUILTIN_HEADERS_PATH);
add_header_search_path("/usr/include/x86_64-linux-gnu");
add_header_search_path("/usr/include");
add_header_search_path("/usr/local/cuda/include");
// Search for headers in
// /usr/include/x86_64-linux-gnu/c++/N.N
......
......@@ -50,26 +50,26 @@ using namespace std;
using namespace ngraph;
#define NVRTC_SAFE_CALL(x) \
do { \
nvrtcResult result = x; \
if (result != NVRTC_SUCCESS) { \
std::cerr << "\nerror: " #x " failed with error " \
#define NVRTC_SAFE_CALL(x) \
do { \
nvrtcResult result = x; \
if (result != NVRTC_SUCCESS) { \
std::cerr << "\nerror: " #x " failed with error " \
<< nvrtcGetErrorString(result) << '\n'; \
exit(1); \
exit(1); \
} \
} while(0)
#define CUDA_SAFE_CALL(x) \
do { \
CUresult result = x; \
if (result != CUDA_SUCCESS) { \
const char *msg; \
cuGetErrorName(result, &msg); \
std::cerr << "\nerror: " #x " failed with error " \
<< msg << '\n'; \
exit(1); \
} \
} while(0)
#define CUDA_SAFE_CALL(x) \
do { \
CUresult result = x; \
if (result != CUDA_SUCCESS) { \
const char *msg; \
cuGetErrorName(result, &msg); \
std::cerr << "\nerror: " #x " failed with error " \
<< msg << '\n'; \
exit(1); \
} \
} while(0)
......@@ -151,7 +151,7 @@ void runtime::gpu::GPU_Emitter::EmitDot(codegen::CodeWriter& writer,
const vector<runtime::gpu::GPU_TensorViewWrapper>& out)
{
writer << " // " << n->get_name() << "\n return;\n";
/*
const Shape& arg0_shape = args[0].get_shape();
const Shape& arg1_shape = args[1].get_shape();
if (arg0_shape.empty() || arg1_shape.empty())
......@@ -256,7 +256,7 @@ else
{
// General ND Call?
}
*/
}
void runtime::gpu::GPU_Emitter::EmitDivide(codegen::CodeWriter& writer,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment