Unverified Commit aa915d4f authored by Scott Cyphers's avatar Scott Cyphers Committed by GitHub

Enable some warnings (#3608)

* Enable some warnings

* Unused param

* Unused params

* Unused params

* Unused params

* Unused params

* Unused param

* Unused params

* unused params

* unused params

* unused params

* unused params

* unused params

* unused params

* unused params
parent 5e607081
......@@ -38,10 +38,8 @@ endif()
# should remove these
add_compile_options(-Wno-float-conversion)
add_compile_options(-Wno-sign-conversion)
add_compile_options(-Wno-padded)
add_compile_options(-Wno-sign-compare)
add_compile_options(-Wno-unused-parameter)
add_compile_options(-Wno-conversion)
add_compile_options(-Wno-double-promotion)
add_compile_options(-Wno-undefined-func-template)
......@@ -138,15 +138,18 @@ namespace ngraph
env.DeleteDistribution(distribution);
}
void recv(void* in, element::Type_t element_type, size_t count, int src_id) override
void recv(void* /* in */,
element::Type_t /* element_type */,
size_t /* count */,
int /* src_id */) override
{
throw ngraph_error("recv not supported/mentioned in MLSL");
}
void send(const void* in,
element::Type_t element_type,
size_t count,
int dest_id) override
void send(const void* /* in */,
element::Type_t /* element_type */,
size_t /* count */,
int /* dest_id */) override
{
throw ngraph_error("send not supported/mentioned in MLSL");
}
......
......@@ -1691,7 +1691,7 @@ size_t runtime::gpu::CUDAEmitter::build_softmax(const std::vector<element::Type>
GPUAllocator allocator = this->m_primitive_emitter->get_memory_allocator();
// (lazy) allocation for kernel arguments
size_t idx_init_value = allocator.reserve_argspace(init_value, dtypes[0].size());
std::unique_ptr<gpu::primitive> memset(new gpu::primitive{[=](void** inputs,
std::unique_ptr<gpu::primitive> memset(new gpu::primitive{[=](void** /* inputs */,
void** outputs) mutable {
void* init_value_buff = runtime::gpu::invoke_memory_primitive(m_ctx, idx_init_value);
gpu::invoke_primitive(m_ctx,
......@@ -2128,7 +2128,7 @@ size_t runtime::gpu::CUDAEmitter::build_reduce(const std::vector<element::Type>&
// (lazy) allocation for kernel arguments
size_t idx_init_value = allocator.reserve_argspace(init_value, data_bytes);
std::unique_ptr<gpu::primitive> memset(
new gpu::primitive{[=](void** inputs, void** outputs) mutable {
new gpu::primitive{[=](void** /* inputs */, void** outputs) mutable {
void* init_value_buff =
runtime::gpu::invoke_memory_primitive(m_ctx, idx_init_value);
gpu::invoke_primitive(m_ctx,
......
......@@ -85,7 +85,9 @@ cudnnTensorDescriptor_t& runtime::gpu::CUDNNEmitter::tensor_descriptor_from_shap
}
cudnnTensorDescriptor_t& runtime::gpu::CUDNNEmitter::get_nd_tensor_descriptor(
const Shape& shape, const cudnnDataType_t data_type, const cudnnTensorFormat_t tensor_format)
const Shape& shape,
const cudnnDataType_t data_type,
const cudnnTensorFormat_t /* tensor_format */)
{
cudnnTensorDescriptor_t& desc = m_descriptors.build<cudnnTensorDescriptor_t>();
std::vector<int> dimensions(shape.size());
......@@ -269,8 +271,8 @@ size_t runtime::gpu::CUDNNEmitter::build_reduce_forward(const cudnnReduceTensorO
auto& cuda_emitter = m_primitive_emitter->get_cuda_emitter();
std::function<void(void**, void**)> convert_output = [](void** inputs, void** outputs) {
};
std::function<void(void**, void**)> convert_output = [](void** /* inputs */,
void** /* outputs */) {};
std::function<void*(void*)> convert_output_space = [](void* ptr) { return ptr; };
if (output_type == element::i64)
{
......@@ -280,13 +282,13 @@ size_t runtime::gpu::CUDNNEmitter::build_reduce_forward(const cudnnReduceTensorO
convert_output = [=](void** inputs, void** outputs) {
gpu::invoke_primitive(m_ctx, convert_idx, inputs, outputs);
};
convert_output_space = [=](void* ptr) {
convert_output_space = [=](void* /* ptr */) {
return runtime::gpu::invoke_memory_primitive(m_ctx, workspace_indices_idx);
};
}
std::function<void(void**, void**)> convert_input = [](void** inputs, void** outputs) {
};
std::function<void(void**, void**)> convert_input = [](void** /* inputs */,
void** /* outputs */) {};
std::function<void*(void*)> convert_input_space = [](void* ptr) { return ptr; };
if (input_type == element::i32)
{
......@@ -298,7 +300,7 @@ size_t runtime::gpu::CUDNNEmitter::build_reduce_forward(const cudnnReduceTensorO
convert_input = [=](void** inputs, void** outputs) {
gpu::invoke_primitive(m_ctx, convert_input_idx, inputs, outputs);
};
convert_input_space = [=](void* ptr) {
convert_input_space = [=](void* /* ptr */) {
return runtime::gpu::invoke_memory_primitive(m_ctx, input_idx);
};
}
......@@ -968,7 +970,7 @@ size_t runtime::gpu::CUDNNEmitter::build_primitive(const op::Max* node)
size_t idx_float_inf =
allocator.reserve_argspace(negative_inf.data(), negative_inf.size() * sizeof(float));
kernel_launch.reset(new gpu::primitive{[=](void** inputs, void** outputs) mutable {
kernel_launch.reset(new gpu::primitive{[=](void** /* inputs */, void** outputs) mutable {
void* temp_d = runtime::gpu::invoke_memory_primitive(m_ctx, idx_float_inf);
runtime::gpu::cuda_memcpyDtD(outputs[0], temp_d, output_size * output_element_size);
}});
......@@ -1031,7 +1033,7 @@ size_t runtime::gpu::CUDNNEmitter::build_primitive(const op::Min* node)
size_t idx_float_inf =
allocator.reserve_argspace(negative_inf.data(), negative_inf.size() * sizeof(float));
kernel_launch.reset(new gpu::primitive{[=](void** inputs, void** outputs) mutable {
kernel_launch.reset(new gpu::primitive{[=](void** /* inputs */, void** outputs) mutable {
void* temp_d = runtime::gpu::invoke_memory_primitive(m_ctx, idx_float_inf);
runtime::gpu::cuda_memcpyDtD(outputs[0], temp_d, output_size * output_element_size);
}});
......
......@@ -38,7 +38,7 @@ extern "C" runtime::BackendConstructor* get_backend_constructor_pointer()
class LocalBackendConstructor : public runtime::BackendConstructor
{
public:
std::shared_ptr<runtime::Backend> create(const std::string& config) override
std::shared_ptr<runtime::Backend> create(const std::string& /* config */) override
{
return std::make_shared<runtime::gpu::GPU_Backend>();
}
......
......@@ -60,7 +60,7 @@ void runtime::gpu::CudaKernelBuilder::get_elementwise_op(CodeWriter& writer,
void runtime::gpu::CudaKernelBuilder::get_memset_op(CodeWriter& writer,
const std::string& name,
const std::string& data_type,
const std::string& /* data_type */,
runtime::gpu::GPUKernelArgs& args)
{
writer << "extern \"C\" __global__ void cuda_" << name << args.get_input_signature();
......@@ -1126,11 +1126,12 @@ void runtime::gpu::CudaKernelBuilder::get_onehot_op(CodeWriter& writer,
writer.block_end();
}
void runtime::gpu::CudaKernelBuilder::get_reshape_op(CodeWriter& writer,
const std::string& name,
runtime::gpu::GPUKernelArgs& args,
const std::array<std::string, 2>& data_types,
size_t rank)
void runtime::gpu::CudaKernelBuilder::get_reshape_op(
CodeWriter& writer,
const std::string& name,
runtime::gpu::GPUKernelArgs& args,
const std::array<std::string, 2>& /* data_types */,
size_t rank)
{
writer << "extern \"C\" __global__ void cuda_" << name << args.get_input_signature();
writer.block_begin();
......
......@@ -181,11 +181,19 @@ std::string runtime::gpu::GPU_Emitter::emit_Add(EMIT_ARGS)
std::string runtime::gpu::GPU_Emitter::emit_All(EMIT_ARGS)
{
(void)compiled_function;
(void)function_name;
(void)args;
(void)out;
throw unsupported_op("Unsupported op '" + node->description() + "'");
}
std::string runtime::gpu::GPU_Emitter::emit_AllReduce(EMIT_ARGS)
{
(void)compiled_function;
(void)function_name;
(void)args;
(void)out;
throw unsupported_op("Unsupported op '" + node->description() + "'");
}
......@@ -196,6 +204,10 @@ std::string runtime::gpu::GPU_Emitter::emit_And(EMIT_ARGS)
std::string runtime::gpu::GPU_Emitter::emit_Any(EMIT_ARGS)
{
(void)compiled_function;
(void)function_name;
(void)args;
(void)out;
throw unsupported_op("Unsupported op '" + node->description() + "'");
}
......@@ -343,6 +355,10 @@ std::string runtime::gpu::GPU_Emitter::emit_AvgPoolBackprop(EMIT_ARGS)
std::string runtime::gpu::GPU_Emitter::emit_BatchMatMul(EMIT_ARGS)
{
(void)compiled_function;
(void)function_name;
(void)args;
(void)out;
throw unsupported_op("Unsupported op '" + node->description() + "'");
}
......@@ -449,6 +465,10 @@ std::string runtime::gpu::GPU_Emitter::emit_Broadcast(EMIT_ARGS)
std::string runtime::gpu::GPU_Emitter::emit_BroadcastLike(EMIT_ARGS)
{
(void)compiled_function;
(void)function_name;
(void)args;
(void)out;
throw unsupported_op("Unsupported op '" + node->description() + "'");
}
......@@ -481,6 +501,11 @@ std::string runtime::gpu::GPU_Emitter::emit_Concat(EMIT_ARGS)
std::string runtime::gpu::GPU_Emitter::emit_Constant(EMIT_ARGS)
{
(void)compiled_function;
(void)function_name;
(void)node;
(void)args;
(void)out;
return "";
}
......@@ -567,6 +592,10 @@ std::string runtime::gpu::GPU_Emitter::emit_Cosh(EMIT_ARGS)
std::string runtime::gpu::GPU_Emitter::emit_BroadcastDistributed(EMIT_ARGS)
{
(void)compiled_function;
(void)function_name;
(void)args;
(void)out;
throw unsupported_op("Unsupported op '" + node->description() + "'");
}
......@@ -577,6 +606,10 @@ std::string runtime::gpu::GPU_Emitter::emit_Divide(EMIT_ARGS)
std::string runtime::gpu::GPU_Emitter::emit_Dequantize(EMIT_ARGS)
{
(void)compiled_function;
(void)function_name;
(void)args;
(void)out;
throw unsupported_op("Unsupported op '" + node->description() + "'");
}
......@@ -616,21 +649,38 @@ std::string runtime::gpu::GPU_Emitter::emit_Dot(EMIT_ARGS)
std::string runtime::gpu::GPU_Emitter::emit_DynReplaceSlice(EMIT_ARGS)
{
(void)compiled_function;
(void)function_name;
(void)args;
(void)out;
throw unsupported_op("Unsupported op '" + node->description() + "'");
}
std::string runtime::gpu::GPU_Emitter::emit_DynReshape(EMIT_ARGS)
{
(void)compiled_function;
(void)function_name;
(void)args;
(void)out;
throw unsupported_op("Unsupported op '" + node->description() + "'");
}
std::string runtime::gpu::GPU_Emitter::emit_DynSlice(EMIT_ARGS)
{
(void)compiled_function;
(void)function_name;
(void)args;
(void)out;
throw unsupported_op("Unsupported op '" + node->description() + "'");
}
std::string runtime::gpu::GPU_Emitter::emit_EmbeddingLookup(EMIT_ARGS)
{
(void)compiled_function;
(void)function_name;
(void)args;
(void)out;
(void)node;
throw ngraph_error("EmbeddingLookup is not yet implemented for NVIDIA GPU");
}
......@@ -641,6 +691,10 @@ std::string runtime::gpu::GPU_Emitter::emit_Equal(EMIT_ARGS)
std::string runtime::gpu::GPU_Emitter::emit_Erf(EMIT_ARGS)
{
(void)compiled_function;
(void)function_name;
(void)args;
(void)out;
throw unsupported_op("Unsupported op '" + node->description() + "'");
}
......@@ -656,16 +710,29 @@ std::string runtime::gpu::GPU_Emitter::emit_Floor(EMIT_ARGS)
std::string runtime::gpu::GPU_Emitter::emit_Gather(EMIT_ARGS)
{
(void)compiled_function;
(void)function_name;
(void)args;
(void)out;
throw unsupported_op("Unsupported op '" + node->description() + "'");
}
std::string runtime::gpu::GPU_Emitter::emit_GatherND(EMIT_ARGS)
{
(void)compiled_function;
(void)function_name;
(void)args;
(void)out;
throw unsupported_op("Unsupported op '" + node->description() + "'");
}
std::string runtime::gpu::GPU_Emitter::emit_GenerateMask(EMIT_ARGS)
{
(void)compiled_function;
(void)function_name;
(void)node;
(void)args;
(void)out;
throw ngraph_error("GenerateMask is not supported yet on NVIDIA GPU");
}
......@@ -922,11 +989,20 @@ std::string runtime::gpu::GPU_Emitter::emit_Pad(EMIT_ARGS)
std::string runtime::gpu::GPU_Emitter::emit_Parameter(EMIT_ARGS)
{
(void)compiled_function;
(void)function_name;
(void)node;
(void)args;
(void)out;
return "";
}
std::string runtime::gpu::GPU_Emitter::emit_Passthrough(EMIT_ARGS)
{
(void)compiled_function;
(void)function_name;
(void)args;
(void)out;
throw unsupported_op("Unsupported op '" + node->description() + "'");
}
......@@ -956,61 +1032,109 @@ std::string runtime::gpu::GPU_Emitter::emit_Product(EMIT_ARGS)
std::string runtime::gpu::GPU_Emitter::emit_Quantize(EMIT_ARGS)
{
(void)compiled_function;
(void)function_name;
(void)args;
(void)out;
throw unsupported_op("Unsupported op '" + node->description() + "'");
}
std::string runtime::gpu::GPU_Emitter::emit_QuantizedAvgPool(EMIT_ARGS)
{
(void)compiled_function;
(void)function_name;
(void)args;
(void)out;
throw unsupported_op("Unsupported op '" + node->description() + "'");
}
std::string runtime::gpu::GPU_Emitter::emit_QuantizedConvolution(EMIT_ARGS)
{
(void)compiled_function;
(void)function_name;
(void)args;
(void)out;
throw unsupported_op("Unsupported op '" + node->description() + "'");
}
std::string runtime::gpu::GPU_Emitter::emit_QuantizedConvolutionBias(EMIT_ARGS)
{
(void)compiled_function;
(void)function_name;
(void)args;
(void)out;
throw unsupported_op("Unsupported op '" + node->description() + "'");
}
std::string runtime::gpu::GPU_Emitter::emit_QuantizedConvolutionBiasAdd(EMIT_ARGS)
{
(void)compiled_function;
(void)function_name;
(void)args;
(void)out;
throw unsupported_op("Unsupported op '" + node->description() + "'");
}
std::string runtime::gpu::GPU_Emitter::emit_QuantizedConvolutionBiasSignedAdd(EMIT_ARGS)
{
(void)compiled_function;
(void)function_name;
(void)args;
(void)out;
throw unsupported_op("Unsupported op '" + node->description() + "'");
}
std::string runtime::gpu::GPU_Emitter::emit_QuantizedConvolutionRelu(EMIT_ARGS)
{
(void)compiled_function;
(void)function_name;
(void)args;
(void)out;
throw unsupported_op("Unsupported op '" + node->description() + "'");
}
std::string runtime::gpu::GPU_Emitter::emit_QuantizedDot(EMIT_ARGS)
{
(void)compiled_function;
(void)function_name;
(void)args;
(void)out;
throw unsupported_op("Unsupported op '" + node->description() + "'");
}
std::string runtime::gpu::GPU_Emitter::emit_QuantizedDotBias(EMIT_ARGS)
{
(void)compiled_function;
(void)function_name;
(void)args;
(void)out;
throw unsupported_op("Unsupported op '" + node->description() + "'");
}
std::string runtime::gpu::GPU_Emitter::emit_QuantizedMaxPool(EMIT_ARGS)
{
(void)compiled_function;
(void)function_name;
(void)args;
(void)out;
throw unsupported_op("Unsupported op '" + node->description() + "'");
}
std::string runtime::gpu::GPU_Emitter::emit_Recv(EMIT_ARGS)
{
(void)compiled_function;
(void)function_name;
(void)args;
(void)out;
throw unsupported_op("Unsupported op '" + node->description() + "'");
}
std::string runtime::gpu::GPU_Emitter::emit_Range(EMIT_ARGS)
{
(void)compiled_function;
(void)function_name;
(void)args;
(void)out;
throw unsupported_op("Unsupported op '" + node->description() + "'");
}
......@@ -1246,16 +1370,28 @@ std::string runtime::gpu::GPU_Emitter::emit_Rnn(EMIT_ARGS)
std::string runtime::gpu::GPU_Emitter::emit_ScalarConstantLike(EMIT_ARGS)
{
(void)compiled_function;
(void)function_name;
(void)args;
(void)out;
throw unsupported_op("Unsupported op '" + node->description() + "'");
}
std::string runtime::gpu::GPU_Emitter::emit_ScatterAdd(EMIT_ARGS)
{
(void)compiled_function;
(void)function_name;
(void)args;
(void)out;
throw unsupported_op("Unsupported op '" + node->description() + "'");
}
std::string runtime::gpu::GPU_Emitter::emit_ScatterNDAdd(EMIT_ARGS)
{
(void)compiled_function;
(void)function_name;
(void)args;
(void)out;
throw unsupported_op("Unsupported op '" + node->description() + "'");
}
......@@ -1266,11 +1402,19 @@ std::string runtime::gpu::GPU_Emitter::emit_Select(EMIT_ARGS)
std::string runtime::gpu::GPU_Emitter::emit_Send(EMIT_ARGS)
{
(void)compiled_function;
(void)function_name;
(void)args;
(void)out;
throw unsupported_op("Unsupported op '" + node->description() + "'");
}
std::string runtime::gpu::GPU_Emitter::emit_ShapeOf(EMIT_ARGS)
{
(void)compiled_function;
(void)function_name;
(void)args;
(void)out;
throw unsupported_op("Unsupported op '" + node->description() + "'");
}
......@@ -1353,6 +1497,10 @@ std::string runtime::gpu::GPU_Emitter::emit_Sqrt(EMIT_ARGS)
std::string runtime::gpu::GPU_Emitter::emit_StopGradient(EMIT_ARGS)
{
(void)compiled_function;
(void)function_name;
(void)args;
(void)out;
throw unsupported_op("Unsupported op '" + node->description() + "'");
}
......@@ -1466,26 +1614,46 @@ std::string runtime::gpu::GPU_Emitter::emit_TopK(EMIT_ARGS)
std::string runtime::gpu::GPU_Emitter::emit_Xor(EMIT_ARGS)
{
(void)compiled_function;
(void)function_name;
(void)args;
(void)out;
throw unsupported_op("Unsupported op '" + node->description() + "'");
}
std::string runtime::gpu::GPU_Emitter::emit_DynBroadcast(EMIT_ARGS)
{
(void)compiled_function;
(void)function_name;
(void)args;
(void)out;
throw unsupported_op("Unsupported op '" + node->description() + "'");
}
std::string runtime::gpu::GPU_Emitter::emit_DynPad(EMIT_ARGS)
{
(void)compiled_function;
(void)function_name;
(void)args;
(void)out;
throw unsupported_op("Unsupported op '" + node->description() + "'");
}
std::string runtime::gpu::GPU_Emitter::emit_Tile(EMIT_ARGS)
{
(void)compiled_function;
(void)function_name;
(void)args;
(void)out;
throw unsupported_op("Unsupported op '" + node->description() + "'");
}
std::string runtime::gpu::GPU_Emitter::emit_Transpose(EMIT_ARGS)
{
(void)compiled_function;
(void)function_name;
(void)args;
(void)out;
throw unsupported_op("Unsupported op '" + node->description() + "'");
}
......
......@@ -44,6 +44,7 @@ namespace ngraph
template <typename T>
static std::string emit_elementwise(EMIT_ARGS)
{
(void)node;
if (out[0].get_size() == 0)
{
return "";
......
......@@ -164,7 +164,7 @@ runtime::gpu::GPUExternalFunction::~GPUExternalFunction()
std::string runtime::gpu::GPUExternalFunction::add_to_runtime(
size_t primitive_index,
const std::string& function_name,
const std::string& /* function_name */,
const std::vector<runtime::gpu::GPUTensorWrapper>& args,
const std::vector<runtime::gpu::GPUTensorWrapper>& out)
{
......@@ -180,7 +180,7 @@ std::string runtime::gpu::GPUExternalFunction::add_to_runtime(
}
std::string runtime::gpu::GPUExternalFunction::add_call_to_runtime(
const std::string& caller,
const std::string& /* caller */,
const std::string& callee,
const std::vector<runtime::gpu::GPUTensorWrapper>& args,
const std::vector<runtime::gpu::GPUTensorWrapper>& out)
......
......@@ -377,7 +377,7 @@ void runtime::gpu::GPUInternalFunction::build_functions()
}
}
void runtime::gpu::GPUInternalFunction::add_passes(ngraph::pass::Manager& pass_manager)
void runtime::gpu::GPUInternalFunction::add_passes(ngraph::pass::Manager& /* pass_manager */)
{
}
......@@ -419,8 +419,8 @@ void runtime::gpu::GPUInternalFunction::save_manifest_to_disk() const
out.close();
}
void runtime::gpu::GPUInternalFunction::propagate_in_place_input(ngraph::descriptor::Output* output,
const std::string& input_name)
void runtime::gpu::GPUInternalFunction::propagate_in_place_input(
ngraph::descriptor::Output* /* output */, const std::string& /* input_name */)
{
// std::deque<ngraph::descriptor::Output*> stack;
// stack.push_front(output);
......@@ -459,7 +459,7 @@ void runtime::gpu::GPUInternalFunction::propagate_in_place_input(ngraph::descrip
}
void runtime::gpu::GPUInternalFunction::propagate_in_place_output(
ngraph::descriptor::Output* res_src_output, const std::string& output_name)
ngraph::descriptor::Output* /* res_src_output */, const std::string& /* output_name */)
{
// // we start with a particular output
// // which is an argument to a given op::Result
......@@ -502,7 +502,7 @@ void runtime::gpu::GPUInternalFunction::propagate_in_place_output(
}
void runtime::gpu::GPUInternalFunction::get_performance_data(
std::vector<runtime::PerformanceCounter>& rc) const
std::vector<runtime::PerformanceCounter>& /* rc */) const
{
// auto* engine = this->m_execution_engine.get();
// if (engine)
......
......@@ -74,14 +74,15 @@ size_t runtime::gpu::HostEmitter::build_zero_out(size_t dst, size_t size, bool i
std::unique_ptr<gpu::primitive> launch_kernel;
if (is_local)
{
launch_kernel.reset(new gpu::primitive{[=](void** inputs, void** outputs) mutable {
void* tensor = gpu::invoke_memory_primitive(m_ctx, dst);
CUDA_RT_SAFE_CALL(cudaMemset(tensor, 0, size));
}});
launch_kernel.reset(
new gpu::primitive{[=](void** /* inputs */, void** /* outputs */) mutable {
void* tensor = gpu::invoke_memory_primitive(m_ctx, dst);
CUDA_RT_SAFE_CALL(cudaMemset(tensor, 0, size));
}});
}
else
{
launch_kernel.reset(new gpu::primitive{[=](void** inputs, void** outputs) mutable {
launch_kernel.reset(new gpu::primitive{[=](void** /* inputs */, void** outputs) mutable {
CUDA_RT_SAFE_CALL(cudaMemset(outputs[dst], 0, size));
}});
}
......
......@@ -41,6 +41,7 @@ namespace ngraph
template <>
void GPULayout::LAYOUT_DECL(ngraph::op::ReplaceSlice)
{
(void)compiled_function;
auto rep_slice = static_cast<ngraph::op::ReplaceSlice*>(node.get());
auto op_annotations = rep_slice->get_op_annotations();
......@@ -60,6 +61,7 @@ namespace ngraph
template <>
void GPULayout::LAYOUT_DECL(ngraph::op::Reshape)
{
(void)compiled_function;
auto reshape = static_cast<ngraph::op::Reshape*>(node.get());
if (reshape->get_is_transpose())
{
......@@ -84,6 +86,7 @@ namespace ngraph
template <>
void GPULayout::LAYOUT_DECL(ngraph::op::TopK)
{
(void)compiled_function;
auto topk = std::dynamic_pointer_cast<ngraph::op::TopK>(node);
auto topk_axis = topk->get_top_k_axis();
auto topk_k = topk->get_k();
......
......@@ -23,7 +23,7 @@ using namespace std;
// start 23,749,645 in 1,912 files
void skip_comment(istream& s)
void skip_comment(istream& /* s */)
{
}
......
......@@ -33,7 +33,7 @@ using namespace ngraph;
TEST(gpu_test, gpu_shape_from_64bit_shape)
{
Shape shape{1UL << 33};
ASSERT_ANY_THROW([](NVShape s) {}(shape););
ASSERT_ANY_THROW([](NVShape /* s */) {}(shape););
}
TEST(gpu_test, memory_manager_unallocated)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment