Commit 79802dcf authored by Robert Kimball's avatar Robert Kimball Committed by Scott Cyphers

address issues which surface with clang 6.0 (#1980)

* address issues which surface with clang 6.0

* revert changes due to new clang warning and disable new warning
parent 8bd3846f
......@@ -64,7 +64,7 @@ elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "AppleClang")
message(FATAL_ERROR "Apple Clang version must be at least ${APPLE_CLANG_MIN_VERSION}!")
endif()
else()
message(WARNING "You are using an unsupported compiler. Compilation has only been tested with Clang (${CLANG_MIN_VERSION} and up), Apple Clang (${APPLE_CLANG_MIN_VERSION} and up), and GCC (${GCC_MIN_VERSION} and up).")
message(WARNING "You are using an unsupported compiler.")
endif()
# Prevent Eigen from using any LGPL3 code
......@@ -168,7 +168,7 @@ if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
if (DEFINED NGRAPH_USE_CXX_ABI)
message( STATUS "nGraph using CXX11 ABI: " ${NGRAPH_USE_CXX_ABI} )
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -D_GLIBCXX_USE_CXX11_ABI=${NGRAPH_USE_CXX_ABI}")
endif()
endif()
endif()
ngraph_var(NGRAPH_WARNINGS_AS_ERRORS DEFAULT "OFF")
......@@ -225,7 +225,7 @@ if(WIN32)
endif()
if (NGRAPH_CPU_ENABLE)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DNGRAPH_CPU_ENABLE")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DNGRAPH_CPU_ENABLE")
endif()
if (NGRAPH_PLAIDML_ENABLE)
......
......@@ -34,6 +34,11 @@ if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "AppleClang")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-zero-as-null-pointer-constant")
endif()
endif()
if(CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
if(CMAKE_CXX_COMPILER_VERSION VERSION_GREATER "4.0.0")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-unused-lambda-capture")
endif()
endif()
# # should remove these
# set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-old-style-cast")
......
......@@ -22,7 +22,7 @@ include(ExternalProject)
#------------------------------------------------------------------------------
SET(GTEST_GIT_REPO_URL https://github.com/google/googletest.git)
SET(GTEST_GIT_LABEL release-1.8.0)
SET(GTEST_GIT_LABEL release-1.8.1)
# The 'BUILD_BYPRODUCTS' argument was introduced in CMake 3.2.
if (${CMAKE_VERSION} VERSION_LESS 3.2)
......
......@@ -158,7 +158,7 @@ void ngraph::pass::Manager::run_passes(shared_ptr<Function> func, bool transitiv
string name = typeid(*p).name();
#ifndef WIN32
int status;
name = abi::__cxa_demangle(name.c_str(), 0, 0, &status);
name = abi::__cxa_demangle(name.c_str(), nullptr, nullptr, &status);
#endif
cout << setw(7) << pass_timer.get_milliseconds() << "ms " << name << "\n";
}
......
......@@ -202,9 +202,9 @@ size_t runtime::gpu::CUDAEmitter::build_concat(const std::string& dtype,
1,
1, // block dim
0,
NULL, // shared mem and stream
nullptr, // shared mem and stream
args_list.data(),
0)); // arguments
nullptr)); // arguments
debug_sync();
}
}});
......@@ -310,9 +310,9 @@ size_t runtime::gpu::CUDAEmitter::build_topk(const std::vector<element::Type>& d
1,
1,
0,
NULL, // stream
nullptr, // stream
args_list,
0)); // arguments
nullptr)); // arguments
debug_sync();
}});
primitive_index = this->m_primitive_emitter->insert(std::move(kernel_launch));
......@@ -334,9 +334,9 @@ size_t runtime::gpu::CUDAEmitter::build_topk(const std::vector<element::Type>& d
1,
1,
shared_data_bytes, // shared mem
NULL, //stream
nullptr, //stream
args_list,
0)); // arguments
nullptr)); // arguments
debug_sync();
}});
primitive_index = this->m_primitive_emitter->insert(std::move(kernel_launch));
......@@ -406,9 +406,9 @@ size_t runtime::gpu::CUDAEmitter::build_onehot(const std::array<std::string, 2>&
1,
1, // block dim
0,
NULL, // shared mem and stream
nullptr, // shared mem and stream
args_list.data(),
0)); // arguments
nullptr)); // arguments
debug_sync();
}});
......@@ -477,9 +477,9 @@ size_t runtime::gpu::CUDAEmitter::build_reverse(const std::array<std::string, 2>
1,
1, // block dim
0,
NULL, // shared mem and stream
nullptr, // shared mem and stream
args_list.data(),
0)); // arguments
nullptr)); // arguments
debug_sync();
}});
......@@ -562,9 +562,9 @@ size_t runtime::gpu::CUDAEmitter::build_pad(const std::vector<std::string>& dtyp
1,
1, // block dim
0,
NULL, // shared mem and stream
nullptr, // shared mem and stream
args_list,
0)); // arguments
nullptr)); // arguments
debug_sync();
}});
......@@ -650,9 +650,9 @@ size_t runtime::gpu::CUDAEmitter::build_pad_fill(const std::vector<std::string>&
1,
1, // block dim
0,
NULL, // shared mem and stream
nullptr, // shared mem and stream
args_list,
0)); // arguments
nullptr)); // arguments
debug_sync();
}});
......@@ -734,9 +734,9 @@ size_t runtime::gpu::CUDAEmitter::build_reshape(const std::array<std::string, 2>
1,
1, // block dim
0,
NULL, // shared mem and stream
nullptr, // shared mem and stream
args_list,
0)); // arguments
nullptr)); // arguments
debug_sync();
}});
......@@ -820,9 +820,9 @@ size_t runtime::gpu::CUDAEmitter::build_reshape_2d(const std::array<std::string,
block_size,
1, // block dim
0,
NULL, // shared mem and stream
nullptr, // shared mem and stream
args_list,
0)); // arguments
nullptr)); // arguments
debug_sync();
}});
......@@ -910,9 +910,9 @@ size_t runtime::gpu::CUDAEmitter::build_reshape_3d(const std::array<std::string,
block_size[1],
block_size[2], // block dim
0,
NULL, // shared mem and stream
nullptr, // shared mem and stream
args_list,
0)); // arguments
nullptr)); // arguments
debug_sync();
}});
......@@ -997,9 +997,9 @@ size_t runtime::gpu::CUDAEmitter::build_slice(const std::array<std::string, 2>&
1,
1, // block dim
0,
NULL, // shared mem and stream
nullptr, // shared mem and stream
args_list.data(),
0)); // arguments
nullptr)); // arguments
debug_sync();
}});
......@@ -1078,9 +1078,9 @@ size_t runtime::gpu::CUDAEmitter::build_reverse_sequence(const std::array<std::s
1,
1, // block dim
0,
NULL, // shared mem and stream
nullptr, // shared mem and stream
args_list.data(),
0)); // arguments
nullptr)); // arguments
debug_sync();
}});
......@@ -1136,9 +1136,9 @@ size_t runtime::gpu::CUDAEmitter::build_1d_max_pool(const std::array<std::string
1,
1, // block dim
0,
NULL, // shared mem and stream
nullptr, // shared mem and stream
args_list,
0)); // arguments
nullptr)); // arguments
debug_sync();
}});
......@@ -1322,9 +1322,9 @@ size_t runtime::gpu::CUDAEmitter::build_avg_pool(const std::array<std::string, 2
1,
1,
0,
NULL,
nullptr,
args_list,
0));
nullptr));
debug_sync();
}});
......@@ -1396,9 +1396,9 @@ size_t runtime::gpu::CUDAEmitter::build_elementwise_n_to_1(const std::vector<std
1,
1, // block dim
0,
NULL, // shared mem and stream
nullptr, // shared mem and stream
args_list.data(),
0)); // arguments
nullptr)); // arguments
debug_sync();
}});
......@@ -1461,9 +1461,9 @@ size_t runtime::gpu::CUDAEmitter::build_cudnn_bn_inv_var(const std::vector<std::
1,
1, // block dim
0,
NULL, // shared mem and stream
nullptr, // shared mem and stream
args_list,
0)); // arguments
nullptr)); // arguments
debug_sync();
}});
......@@ -1634,9 +1634,9 @@ size_t runtime::gpu::CUDAEmitter::build_softmax_divide(const std::vector<std::st
1,
1, // block dim
0,
NULL, // shared mem and stream
nullptr, // shared mem and stream
arg_list.data(),
0)); // arguments
nullptr)); // arguments
debug_sync();
}});
......@@ -1736,9 +1736,9 @@ size_t runtime::gpu::CUDAEmitter::build_reduce_to_nd(const std::vector<std::stri
1,
1,
0,
NULL,
nullptr,
args_list,
0));
nullptr));
debug_sync();
}});
......@@ -1812,9 +1812,9 @@ size_t runtime::gpu::CUDAEmitter::build_reduce_to_scalar(const std::vector<std::
1,
1,
shared_data_bytes,
NULL,
nullptr,
args_list,
0));
nullptr));
debug_sync();
}});
......@@ -1880,9 +1880,9 @@ size_t runtime::gpu::CUDAEmitter::build_reduce_to_scalar_acc(const std::vector<s
1,
1,
0,
NULL,
nullptr,
args_list,
0));
nullptr));
}});
return this->m_primitive_emitter->register_primitive(reduce_acc, hash);
......@@ -2179,9 +2179,9 @@ size_t
1,
1,
0,
NULL,
nullptr,
args_list,
0));
nullptr));
debug_sync();
}});
......@@ -2195,8 +2195,8 @@ size_t runtime::gpu::CUDAEmitter::build_reduce_window(const OpName op_name,
NVShape reduce_window_shape,
NVShape reduce_window_strides)
{
const char* op = NULL;
const char* kernel = NULL;
const char* op = nullptr;
const char* kernel = nullptr;
switch (op_name)
{
case OpName::add:
......@@ -2274,7 +2274,7 @@ size_t runtime::gpu::CUDAEmitter::build_reduce_window(const OpName op_name,
void* param_reduce_window_strides =
runtime::gpu::invoke_memory_primitive(m_ctx, idx_reduce_window_strides);
std::vector<void*> args_list(7, NULL);
std::vector<void*> args_list(7, nullptr);
args_list[0] = &inputs[0];
args_list[1] = &outputs[0];
args_list[2] = &param_input_strides;
......@@ -2291,9 +2291,9 @@ size_t runtime::gpu::CUDAEmitter::build_reduce_window(const OpName op_name,
1,
1, // block dim
0,
NULL, // shared mem and stream
nullptr, // shared mem and stream
args_list.data(),
0)); // arguments
nullptr)); // arguments
debug_sync();
}});
......@@ -2391,9 +2391,9 @@ size_t runtime::gpu::CUDAEmitter::build_broadcast(const std::array<std::string,
1,
1,
0,
NULL,
nullptr,
args_list,
0));
nullptr));
debug_sync();
}});
......@@ -2797,9 +2797,9 @@ size_t runtime::gpu::CUDAEmitter::build_convolution(const std::array<std::string
threads.y,
threads.z,
0,
NULL,
nullptr,
args_list,
0));
nullptr));
debug_sync();
}});
......
......@@ -77,7 +77,7 @@ std::shared_ptr<CUfunction> runtime::gpu::CudaFunctionBuilder::get(const std::st
// extract the compiled function
CUmodule module;
CUfunction function;
CUDA_SAFE_CALL(cuModuleLoadDataEx(&module, ptx, 0, 0, 0));
CUDA_SAFE_CALL(cuModuleLoadDataEx(&module, ptx, 0, nullptr, nullptr));
CUDA_SAFE_CALL(cuModuleGetFunction(&function, module, name.c_str()));
return std::make_shared<CUfunction>(function);
}
......@@ -152,7 +152,7 @@ namespace ngraph
static false_type check(...);
public:
static const bool value = sizeof(check<T>(0)) == sizeof(true_type);
static const bool value = sizeof(check<T>(nullptr)) == sizeof(true_type);
typedef T type;
};
......
......@@ -38,7 +38,7 @@ class ngraph::runtime::gpu::GPUTensor : public ngraph::runtime::Tensor
public:
GPUTensor(const ngraph::element::Type& element_type, const Shape& shape);
GPUTensor(const ngraph::element::Type& element_type, const Shape& shape, void* memory_pointer);
virtual ~GPUTensor();
virtual ~GPUTensor() override;
/// \brief Write bytes directly into the tensor
/// \param p Pointer to source of data
......
......@@ -27,7 +27,7 @@ namespace ngraph
{
void print_gpu_f32_tensor(const void* p, size_t element_count, size_t element_size);
void check_cuda_errors(CUresult err);
void* create_gpu_buffer(size_t buffer_size, const void* data = NULL);
void* create_gpu_buffer(size_t buffer_size, const void* data = nullptr);
void free_gpu_buffer(void* buffer);
void cuda_memcpyDtD(void* dst, const void* src, size_t buffer_size);
void cuda_memcpyHtD(void* dst, const void* src, size_t buffer_size);
......
......@@ -14,6 +14,14 @@
# limitations under the License.
# ******************************************************************************
if(CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
if(CMAKE_CXX_COMPILER_VERSION VERSION_GREATER "4.0.0")
# gtest has issues with this with v1.8.x
# gtest issue is supposed to be addressed after v1.8.x
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-zero-as-null-pointer-constant")
endif()
endif()
set(SRC
algebraic_simplification.cpp
all_close_f.cpp
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment