Unverified Commit aa915d4f authored by Scott Cyphers's avatar Scott Cyphers Committed by GitHub

Enable some warnings (#3608)

* Enable some warnings

* Unused param

* Unused params

* Unused params

* Unused params

* Unused params

* Unused param

* Unused params

* unused params

* unused params

* unused params

* unused params

* unused params

* unused params

* unused params
parent 5e607081
...@@ -38,10 +38,8 @@ endif() ...@@ -38,10 +38,8 @@ endif()
# should remove these # should remove these
add_compile_options(-Wno-float-conversion) add_compile_options(-Wno-float-conversion)
add_compile_options(-Wno-sign-conversion)
add_compile_options(-Wno-padded) add_compile_options(-Wno-padded)
add_compile_options(-Wno-sign-compare) add_compile_options(-Wno-sign-compare)
add_compile_options(-Wno-unused-parameter)
add_compile_options(-Wno-conversion) add_compile_options(-Wno-conversion)
add_compile_options(-Wno-double-promotion) add_compile_options(-Wno-double-promotion)
add_compile_options(-Wno-undefined-func-template) add_compile_options(-Wno-undefined-func-template)
...@@ -138,15 +138,18 @@ namespace ngraph ...@@ -138,15 +138,18 @@ namespace ngraph
env.DeleteDistribution(distribution); env.DeleteDistribution(distribution);
} }
void recv(void* in, element::Type_t element_type, size_t count, int src_id) override void recv(void* /* in */,
element::Type_t /* element_type */,
size_t /* count */,
int /* src_id */) override
{ {
throw ngraph_error("recv not supported/mentioned in MLSL"); throw ngraph_error("recv not supported/mentioned in MLSL");
} }
void send(const void* in, void send(const void* /* in */,
element::Type_t element_type, element::Type_t /* element_type */,
size_t count, size_t /* count */,
int dest_id) override int /* dest_id */) override
{ {
throw ngraph_error("send not supported/mentioned in MLSL"); throw ngraph_error("send not supported/mentioned in MLSL");
} }
......
...@@ -1691,7 +1691,7 @@ size_t runtime::gpu::CUDAEmitter::build_softmax(const std::vector<element::Type> ...@@ -1691,7 +1691,7 @@ size_t runtime::gpu::CUDAEmitter::build_softmax(const std::vector<element::Type>
GPUAllocator allocator = this->m_primitive_emitter->get_memory_allocator(); GPUAllocator allocator = this->m_primitive_emitter->get_memory_allocator();
// (lazy) allocation for kernel arguments // (lazy) allocation for kernel arguments
size_t idx_init_value = allocator.reserve_argspace(init_value, dtypes[0].size()); size_t idx_init_value = allocator.reserve_argspace(init_value, dtypes[0].size());
std::unique_ptr<gpu::primitive> memset(new gpu::primitive{[=](void** inputs, std::unique_ptr<gpu::primitive> memset(new gpu::primitive{[=](void** /* inputs */,
void** outputs) mutable { void** outputs) mutable {
void* init_value_buff = runtime::gpu::invoke_memory_primitive(m_ctx, idx_init_value); void* init_value_buff = runtime::gpu::invoke_memory_primitive(m_ctx, idx_init_value);
gpu::invoke_primitive(m_ctx, gpu::invoke_primitive(m_ctx,
...@@ -2128,7 +2128,7 @@ size_t runtime::gpu::CUDAEmitter::build_reduce(const std::vector<element::Type>& ...@@ -2128,7 +2128,7 @@ size_t runtime::gpu::CUDAEmitter::build_reduce(const std::vector<element::Type>&
// (lazy) allocation for kernel arguments // (lazy) allocation for kernel arguments
size_t idx_init_value = allocator.reserve_argspace(init_value, data_bytes); size_t idx_init_value = allocator.reserve_argspace(init_value, data_bytes);
std::unique_ptr<gpu::primitive> memset( std::unique_ptr<gpu::primitive> memset(
new gpu::primitive{[=](void** inputs, void** outputs) mutable { new gpu::primitive{[=](void** /* inputs */, void** outputs) mutable {
void* init_value_buff = void* init_value_buff =
runtime::gpu::invoke_memory_primitive(m_ctx, idx_init_value); runtime::gpu::invoke_memory_primitive(m_ctx, idx_init_value);
gpu::invoke_primitive(m_ctx, gpu::invoke_primitive(m_ctx,
......
...@@ -85,7 +85,9 @@ cudnnTensorDescriptor_t& runtime::gpu::CUDNNEmitter::tensor_descriptor_from_shap ...@@ -85,7 +85,9 @@ cudnnTensorDescriptor_t& runtime::gpu::CUDNNEmitter::tensor_descriptor_from_shap
} }
cudnnTensorDescriptor_t& runtime::gpu::CUDNNEmitter::get_nd_tensor_descriptor( cudnnTensorDescriptor_t& runtime::gpu::CUDNNEmitter::get_nd_tensor_descriptor(
const Shape& shape, const cudnnDataType_t data_type, const cudnnTensorFormat_t tensor_format) const Shape& shape,
const cudnnDataType_t data_type,
const cudnnTensorFormat_t /* tensor_format */)
{ {
cudnnTensorDescriptor_t& desc = m_descriptors.build<cudnnTensorDescriptor_t>(); cudnnTensorDescriptor_t& desc = m_descriptors.build<cudnnTensorDescriptor_t>();
std::vector<int> dimensions(shape.size()); std::vector<int> dimensions(shape.size());
...@@ -269,8 +271,8 @@ size_t runtime::gpu::CUDNNEmitter::build_reduce_forward(const cudnnReduceTensorO ...@@ -269,8 +271,8 @@ size_t runtime::gpu::CUDNNEmitter::build_reduce_forward(const cudnnReduceTensorO
auto& cuda_emitter = m_primitive_emitter->get_cuda_emitter(); auto& cuda_emitter = m_primitive_emitter->get_cuda_emitter();
std::function<void(void**, void**)> convert_output = [](void** inputs, void** outputs) { std::function<void(void**, void**)> convert_output = [](void** /* inputs */,
}; void** /* outputs */) {};
std::function<void*(void*)> convert_output_space = [](void* ptr) { return ptr; }; std::function<void*(void*)> convert_output_space = [](void* ptr) { return ptr; };
if (output_type == element::i64) if (output_type == element::i64)
{ {
...@@ -280,13 +282,13 @@ size_t runtime::gpu::CUDNNEmitter::build_reduce_forward(const cudnnReduceTensorO ...@@ -280,13 +282,13 @@ size_t runtime::gpu::CUDNNEmitter::build_reduce_forward(const cudnnReduceTensorO
convert_output = [=](void** inputs, void** outputs) { convert_output = [=](void** inputs, void** outputs) {
gpu::invoke_primitive(m_ctx, convert_idx, inputs, outputs); gpu::invoke_primitive(m_ctx, convert_idx, inputs, outputs);
}; };
convert_output_space = [=](void* ptr) { convert_output_space = [=](void* /* ptr */) {
return runtime::gpu::invoke_memory_primitive(m_ctx, workspace_indices_idx); return runtime::gpu::invoke_memory_primitive(m_ctx, workspace_indices_idx);
}; };
} }
std::function<void(void**, void**)> convert_input = [](void** inputs, void** outputs) { std::function<void(void**, void**)> convert_input = [](void** /* inputs */,
}; void** /* outputs */) {};
std::function<void*(void*)> convert_input_space = [](void* ptr) { return ptr; }; std::function<void*(void*)> convert_input_space = [](void* ptr) { return ptr; };
if (input_type == element::i32) if (input_type == element::i32)
{ {
...@@ -298,7 +300,7 @@ size_t runtime::gpu::CUDNNEmitter::build_reduce_forward(const cudnnReduceTensorO ...@@ -298,7 +300,7 @@ size_t runtime::gpu::CUDNNEmitter::build_reduce_forward(const cudnnReduceTensorO
convert_input = [=](void** inputs, void** outputs) { convert_input = [=](void** inputs, void** outputs) {
gpu::invoke_primitive(m_ctx, convert_input_idx, inputs, outputs); gpu::invoke_primitive(m_ctx, convert_input_idx, inputs, outputs);
}; };
convert_input_space = [=](void* ptr) { convert_input_space = [=](void* /* ptr */) {
return runtime::gpu::invoke_memory_primitive(m_ctx, input_idx); return runtime::gpu::invoke_memory_primitive(m_ctx, input_idx);
}; };
} }
...@@ -968,7 +970,7 @@ size_t runtime::gpu::CUDNNEmitter::build_primitive(const op::Max* node) ...@@ -968,7 +970,7 @@ size_t runtime::gpu::CUDNNEmitter::build_primitive(const op::Max* node)
size_t idx_float_inf = size_t idx_float_inf =
allocator.reserve_argspace(negative_inf.data(), negative_inf.size() * sizeof(float)); allocator.reserve_argspace(negative_inf.data(), negative_inf.size() * sizeof(float));
kernel_launch.reset(new gpu::primitive{[=](void** inputs, void** outputs) mutable { kernel_launch.reset(new gpu::primitive{[=](void** /* inputs */, void** outputs) mutable {
void* temp_d = runtime::gpu::invoke_memory_primitive(m_ctx, idx_float_inf); void* temp_d = runtime::gpu::invoke_memory_primitive(m_ctx, idx_float_inf);
runtime::gpu::cuda_memcpyDtD(outputs[0], temp_d, output_size * output_element_size); runtime::gpu::cuda_memcpyDtD(outputs[0], temp_d, output_size * output_element_size);
}}); }});
...@@ -1031,7 +1033,7 @@ size_t runtime::gpu::CUDNNEmitter::build_primitive(const op::Min* node) ...@@ -1031,7 +1033,7 @@ size_t runtime::gpu::CUDNNEmitter::build_primitive(const op::Min* node)
size_t idx_float_inf = size_t idx_float_inf =
allocator.reserve_argspace(negative_inf.data(), negative_inf.size() * sizeof(float)); allocator.reserve_argspace(negative_inf.data(), negative_inf.size() * sizeof(float));
kernel_launch.reset(new gpu::primitive{[=](void** inputs, void** outputs) mutable { kernel_launch.reset(new gpu::primitive{[=](void** /* inputs */, void** outputs) mutable {
void* temp_d = runtime::gpu::invoke_memory_primitive(m_ctx, idx_float_inf); void* temp_d = runtime::gpu::invoke_memory_primitive(m_ctx, idx_float_inf);
runtime::gpu::cuda_memcpyDtD(outputs[0], temp_d, output_size * output_element_size); runtime::gpu::cuda_memcpyDtD(outputs[0], temp_d, output_size * output_element_size);
}}); }});
......
...@@ -38,7 +38,7 @@ extern "C" runtime::BackendConstructor* get_backend_constructor_pointer() ...@@ -38,7 +38,7 @@ extern "C" runtime::BackendConstructor* get_backend_constructor_pointer()
class LocalBackendConstructor : public runtime::BackendConstructor class LocalBackendConstructor : public runtime::BackendConstructor
{ {
public: public:
std::shared_ptr<runtime::Backend> create(const std::string& config) override std::shared_ptr<runtime::Backend> create(const std::string& /* config */) override
{ {
return std::make_shared<runtime::gpu::GPU_Backend>(); return std::make_shared<runtime::gpu::GPU_Backend>();
} }
......
...@@ -60,7 +60,7 @@ void runtime::gpu::CudaKernelBuilder::get_elementwise_op(CodeWriter& writer, ...@@ -60,7 +60,7 @@ void runtime::gpu::CudaKernelBuilder::get_elementwise_op(CodeWriter& writer,
void runtime::gpu::CudaKernelBuilder::get_memset_op(CodeWriter& writer, void runtime::gpu::CudaKernelBuilder::get_memset_op(CodeWriter& writer,
const std::string& name, const std::string& name,
const std::string& data_type, const std::string& /* data_type */,
runtime::gpu::GPUKernelArgs& args) runtime::gpu::GPUKernelArgs& args)
{ {
writer << "extern \"C\" __global__ void cuda_" << name << args.get_input_signature(); writer << "extern \"C\" __global__ void cuda_" << name << args.get_input_signature();
...@@ -1126,11 +1126,12 @@ void runtime::gpu::CudaKernelBuilder::get_onehot_op(CodeWriter& writer, ...@@ -1126,11 +1126,12 @@ void runtime::gpu::CudaKernelBuilder::get_onehot_op(CodeWriter& writer,
writer.block_end(); writer.block_end();
} }
void runtime::gpu::CudaKernelBuilder::get_reshape_op(CodeWriter& writer, void runtime::gpu::CudaKernelBuilder::get_reshape_op(
const std::string& name, CodeWriter& writer,
runtime::gpu::GPUKernelArgs& args, const std::string& name,
const std::array<std::string, 2>& data_types, runtime::gpu::GPUKernelArgs& args,
size_t rank) const std::array<std::string, 2>& /* data_types */,
size_t rank)
{ {
writer << "extern \"C\" __global__ void cuda_" << name << args.get_input_signature(); writer << "extern \"C\" __global__ void cuda_" << name << args.get_input_signature();
writer.block_begin(); writer.block_begin();
......
This diff is collapsed.
...@@ -44,6 +44,7 @@ namespace ngraph ...@@ -44,6 +44,7 @@ namespace ngraph
template <typename T> template <typename T>
static std::string emit_elementwise(EMIT_ARGS) static std::string emit_elementwise(EMIT_ARGS)
{ {
(void)node;
if (out[0].get_size() == 0) if (out[0].get_size() == 0)
{ {
return ""; return "";
......
...@@ -164,7 +164,7 @@ runtime::gpu::GPUExternalFunction::~GPUExternalFunction() ...@@ -164,7 +164,7 @@ runtime::gpu::GPUExternalFunction::~GPUExternalFunction()
std::string runtime::gpu::GPUExternalFunction::add_to_runtime( std::string runtime::gpu::GPUExternalFunction::add_to_runtime(
size_t primitive_index, size_t primitive_index,
const std::string& function_name, const std::string& /* function_name */,
const std::vector<runtime::gpu::GPUTensorWrapper>& args, const std::vector<runtime::gpu::GPUTensorWrapper>& args,
const std::vector<runtime::gpu::GPUTensorWrapper>& out) const std::vector<runtime::gpu::GPUTensorWrapper>& out)
{ {
...@@ -180,7 +180,7 @@ std::string runtime::gpu::GPUExternalFunction::add_to_runtime( ...@@ -180,7 +180,7 @@ std::string runtime::gpu::GPUExternalFunction::add_to_runtime(
} }
std::string runtime::gpu::GPUExternalFunction::add_call_to_runtime( std::string runtime::gpu::GPUExternalFunction::add_call_to_runtime(
const std::string& caller, const std::string& /* caller */,
const std::string& callee, const std::string& callee,
const std::vector<runtime::gpu::GPUTensorWrapper>& args, const std::vector<runtime::gpu::GPUTensorWrapper>& args,
const std::vector<runtime::gpu::GPUTensorWrapper>& out) const std::vector<runtime::gpu::GPUTensorWrapper>& out)
......
...@@ -377,7 +377,7 @@ void runtime::gpu::GPUInternalFunction::build_functions() ...@@ -377,7 +377,7 @@ void runtime::gpu::GPUInternalFunction::build_functions()
} }
} }
void runtime::gpu::GPUInternalFunction::add_passes(ngraph::pass::Manager& pass_manager) void runtime::gpu::GPUInternalFunction::add_passes(ngraph::pass::Manager& /* pass_manager */)
{ {
} }
...@@ -419,8 +419,8 @@ void runtime::gpu::GPUInternalFunction::save_manifest_to_disk() const ...@@ -419,8 +419,8 @@ void runtime::gpu::GPUInternalFunction::save_manifest_to_disk() const
out.close(); out.close();
} }
void runtime::gpu::GPUInternalFunction::propagate_in_place_input(ngraph::descriptor::Output* output, void runtime::gpu::GPUInternalFunction::propagate_in_place_input(
const std::string& input_name) ngraph::descriptor::Output* /* output */, const std::string& /* input_name */)
{ {
// std::deque<ngraph::descriptor::Output*> stack; // std::deque<ngraph::descriptor::Output*> stack;
// stack.push_front(output); // stack.push_front(output);
...@@ -459,7 +459,7 @@ void runtime::gpu::GPUInternalFunction::propagate_in_place_input(ngraph::descrip ...@@ -459,7 +459,7 @@ void runtime::gpu::GPUInternalFunction::propagate_in_place_input(ngraph::descrip
} }
void runtime::gpu::GPUInternalFunction::propagate_in_place_output( void runtime::gpu::GPUInternalFunction::propagate_in_place_output(
ngraph::descriptor::Output* res_src_output, const std::string& output_name) ngraph::descriptor::Output* /* res_src_output */, const std::string& /* output_name */)
{ {
// // we start with a particular output // // we start with a particular output
// // which is an argument to a given op::Result // // which is an argument to a given op::Result
...@@ -502,7 +502,7 @@ void runtime::gpu::GPUInternalFunction::propagate_in_place_output( ...@@ -502,7 +502,7 @@ void runtime::gpu::GPUInternalFunction::propagate_in_place_output(
} }
void runtime::gpu::GPUInternalFunction::get_performance_data( void runtime::gpu::GPUInternalFunction::get_performance_data(
std::vector<runtime::PerformanceCounter>& rc) const std::vector<runtime::PerformanceCounter>& /* rc */) const
{ {
// auto* engine = this->m_execution_engine.get(); // auto* engine = this->m_execution_engine.get();
// if (engine) // if (engine)
......
...@@ -74,14 +74,15 @@ size_t runtime::gpu::HostEmitter::build_zero_out(size_t dst, size_t size, bool i ...@@ -74,14 +74,15 @@ size_t runtime::gpu::HostEmitter::build_zero_out(size_t dst, size_t size, bool i
std::unique_ptr<gpu::primitive> launch_kernel; std::unique_ptr<gpu::primitive> launch_kernel;
if (is_local) if (is_local)
{ {
launch_kernel.reset(new gpu::primitive{[=](void** inputs, void** outputs) mutable { launch_kernel.reset(
void* tensor = gpu::invoke_memory_primitive(m_ctx, dst); new gpu::primitive{[=](void** /* inputs */, void** /* outputs */) mutable {
CUDA_RT_SAFE_CALL(cudaMemset(tensor, 0, size)); void* tensor = gpu::invoke_memory_primitive(m_ctx, dst);
}}); CUDA_RT_SAFE_CALL(cudaMemset(tensor, 0, size));
}});
} }
else else
{ {
launch_kernel.reset(new gpu::primitive{[=](void** inputs, void** outputs) mutable { launch_kernel.reset(new gpu::primitive{[=](void** /* inputs */, void** outputs) mutable {
CUDA_RT_SAFE_CALL(cudaMemset(outputs[dst], 0, size)); CUDA_RT_SAFE_CALL(cudaMemset(outputs[dst], 0, size));
}}); }});
} }
......
...@@ -41,6 +41,7 @@ namespace ngraph ...@@ -41,6 +41,7 @@ namespace ngraph
template <> template <>
void GPULayout::LAYOUT_DECL(ngraph::op::ReplaceSlice) void GPULayout::LAYOUT_DECL(ngraph::op::ReplaceSlice)
{ {
(void)compiled_function;
auto rep_slice = static_cast<ngraph::op::ReplaceSlice*>(node.get()); auto rep_slice = static_cast<ngraph::op::ReplaceSlice*>(node.get());
auto op_annotations = rep_slice->get_op_annotations(); auto op_annotations = rep_slice->get_op_annotations();
...@@ -60,6 +61,7 @@ namespace ngraph ...@@ -60,6 +61,7 @@ namespace ngraph
template <> template <>
void GPULayout::LAYOUT_DECL(ngraph::op::Reshape) void GPULayout::LAYOUT_DECL(ngraph::op::Reshape)
{ {
(void)compiled_function;
auto reshape = static_cast<ngraph::op::Reshape*>(node.get()); auto reshape = static_cast<ngraph::op::Reshape*>(node.get());
if (reshape->get_is_transpose()) if (reshape->get_is_transpose())
{ {
...@@ -84,6 +86,7 @@ namespace ngraph ...@@ -84,6 +86,7 @@ namespace ngraph
template <> template <>
void GPULayout::LAYOUT_DECL(ngraph::op::TopK) void GPULayout::LAYOUT_DECL(ngraph::op::TopK)
{ {
(void)compiled_function;
auto topk = std::dynamic_pointer_cast<ngraph::op::TopK>(node); auto topk = std::dynamic_pointer_cast<ngraph::op::TopK>(node);
auto topk_axis = topk->get_top_k_axis(); auto topk_axis = topk->get_top_k_axis();
auto topk_k = topk->get_k(); auto topk_k = topk->get_k();
......
...@@ -23,7 +23,7 @@ using namespace std; ...@@ -23,7 +23,7 @@ using namespace std;
// start 23,749,645 in 1,912 files // start 23,749,645 in 1,912 files
void skip_comment(istream& s) void skip_comment(istream& /* s */)
{ {
} }
......
...@@ -33,7 +33,7 @@ using namespace ngraph; ...@@ -33,7 +33,7 @@ using namespace ngraph;
TEST(gpu_test, gpu_shape_from_64bit_shape) TEST(gpu_test, gpu_shape_from_64bit_shape)
{ {
Shape shape{1UL << 33}; Shape shape{1UL << 33};
ASSERT_ANY_THROW([](NVShape s) {}(shape);); ASSERT_ANY_THROW([](NVShape /* s */) {}(shape););
} }
TEST(gpu_test, memory_manager_unallocated) TEST(gpu_test, memory_manager_unallocated)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment