Unverified Commit a97e26e1 authored by Scott Cyphers's avatar Scott Cyphers Committed by GitHub

Cyphers/params (#3613)

* Enable unuser parameter warnings

* Unused params
parent 706e705e
......@@ -38,10 +38,8 @@ endif()
# should remove these
add_compile_options(-Wno-float-conversion)
add_compile_options(-Wno-sign-conversion)
add_compile_options(-Wno-padded)
add_compile_options(-Wno-sign-compare)
add_compile_options(-Wno-unused-parameter)
add_compile_options(-Wno-conversion)
add_compile_options(-Wno-double-promotion)
add_compile_options(-Wno-undefined-func-template)
......@@ -138,15 +138,18 @@ namespace ngraph
env.DeleteDistribution(distribution);
}
void recv(void* in, element::Type_t element_type, size_t count, int src_id) override
void recv(void* /* in */,
element::Type_t /* element_type */,
size_t /* count */,
int /* src_id */) override
{
throw ngraph_error("recv not supported/mentioned in MLSL");
}
void send(const void* in,
element::Type_t element_type,
size_t count,
int dest_id) override
void send(const void* /* in */,
element::Type_t /* element_type */,
size_t /* count */,
int /* dest_id */) override
{
throw ngraph_error("send not supported/mentioned in MLSL");
}
......
......@@ -217,7 +217,8 @@ shared_ptr<Node> op::QuantizedDot::copy_with_new_args(const NodeVector& new_args
m_output_axes));
}
void op::QuantizedDot::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas)
void op::QuantizedDot::generate_adjoints(autodiff::Adjoints& /* adjoints */,
const NodeVector& /* deltas */)
{
throw ngraph_error("Forward-propagation-only operation");
}
......@@ -141,7 +141,8 @@ shared_ptr<Node> op::v1::Softmax::copy_with_new_args(const NodeVector& new_args)
return make_shared<op::v1::Softmax>(new_args.at(0), m_axis);
}
void op::v1::Softmax::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas)
void op::v1::Softmax::generate_adjoints(autodiff::Adjoints& /* adjoints */,
const NodeVector& /* deltas */)
{
throw ngraph_error("op::v1::Softmax::generate_adjoints function is not implemented yet");
......
......@@ -79,7 +79,7 @@ namespace ngraph
arg2_buffer_index,
arg3_buffer_index,
out0_buffer_index](CPURuntimeContext* ctx,
CPUExecutionContext* ectx) mutable {
CPUExecutionContext* /* ectx */) mutable {
if (ctx->first_iteration)
{
vector<float> dyn_scales;
......@@ -121,6 +121,7 @@ namespace ngraph
template <>
void Builder::BUILDER_DECL(ngraph::op::QuantizedDot)
{
(void)node;
auto& functors = external_function->get_functors();
auto arg0_shape = args[0].get_shape();
......@@ -161,7 +162,7 @@ namespace ngraph
arg6_buffer_index,
arg7_buffer_index,
out0_buffer_index](CPURuntimeContext* ctx,
CPUExecutionContext* ectx) {
CPUExecutionContext* /* ectx */) {
kernel(ctx->buffer_data[arg0_buffer_index],
ctx->buffer_data[arg1_buffer_index],
......@@ -203,7 +204,7 @@ namespace ngraph
arg6_buffer_index,
arg7_buffer_index,
out0_buffer_index](CPURuntimeContext* ctx,
CPUExecutionContext* ectx) {
CPUExecutionContext* /* ectx */) {
kernel(ctx->buffer_data[arg0_buffer_index],
ctx->buffer_data[arg1_buffer_index],
......@@ -245,7 +246,7 @@ namespace ngraph
arg6_buffer_index,
arg7_buffer_index,
out0_buffer_index](CPURuntimeContext* ctx,
CPUExecutionContext* ectx) {
CPUExecutionContext* /* ectx */) {
kernel(ctx->buffer_data[arg0_buffer_index],
ctx->buffer_data[arg1_buffer_index],
......
......@@ -175,7 +175,7 @@ namespace ngraph
arg_buffer_index,
out_indices_buffer_index,
out_values_buffer_index](CPURuntimeContext* ctx,
CPUExecutionContext* ectx) {
CPUExecutionContext* /* ectx */) {
ngraph::runtime::reference::topk<int32_t, int64_t>(
static_cast<int32_t*>(ctx->buffer_data[arg_buffer_index]),
static_cast<int64_t*>(ctx->buffer_data[out_indices_buffer_index]),
......@@ -200,7 +200,7 @@ namespace ngraph
arg_buffer_index,
out_indices_buffer_index,
out_values_buffer_index](CPURuntimeContext* ctx,
CPUExecutionContext* ectx) {
CPUExecutionContext* /* ectx */) {
ngraph::runtime::reference::topk<int32_t, int32_t>(
static_cast<int32_t*>(ctx->buffer_data[arg_buffer_index]),
static_cast<int32_t*>(ctx->buffer_data[out_indices_buffer_index]),
......
......@@ -38,7 +38,7 @@ using namespace std;
extern "C" CPU_BACKEND_API void ngraph_register_cpu_backend()
{
runtime::BackendManager::register_backend("CPU", [](const std::string& config) {
runtime::BackendManager::register_backend("CPU", [](const std::string& /* config */) {
static bool is_initialized = false;
if (!is_initialized)
{
......@@ -202,7 +202,7 @@ vector<runtime::PerformanceCounter> runtime::cpu::CPU_Executable::get_performanc
return rc;
}
bool runtime::cpu::CPU_Backend::is_supported(const Node& op) const
bool runtime::cpu::CPU_Backend::is_supported(const Node& /* op */) const
{
return true;
}
......
This diff is collapsed.
......@@ -1600,9 +1600,9 @@ namespace ngraph
template <bool with_bias>
void build_convolution_forward(
std::vector<mkldnn::memory*>& mkldnn_memories,
std::vector<mkldnn::memory*>& /* mkldnn_memories */,
std::vector<mkldnn::primitive*>& mkldnn_primitives,
std::vector<mkldnn::memory::desc*>& mkldnn_scratchpad_mds,
std::vector<mkldnn::memory::desc*>& /* mkldnn_scratchpad_mds */,
const mkldnn::convolution_forward::desc& desc,
const mkldnn::primitive_attr& attr,
const mkldnn::engine& engine,
......@@ -1652,9 +1652,9 @@ namespace ngraph
template <bool with_bias>
void build_inner_product_forward(
std::vector<mkldnn::memory*>& mkldnn_memories,
std::vector<mkldnn::memory*>& /* mkldnn_memories */,
std::vector<mkldnn::primitive*>& mkldnn_primitives,
std::vector<mkldnn::memory::desc*>& mkldnn_scratchpad_mds,
std::vector<mkldnn::memory::desc*>& /* mkldnn_scratchpad_mds */,
const mkldnn::inner_product_forward::desc& desc,
const mkldnn::primitive_attr& attr,
const mkldnn::engine& engine,
......
......@@ -33,8 +33,11 @@ extern "C" void ngraph::runtime::cpu::mkldnn_utils::set_memory_ptr(CPURuntimeCon
primitive->set_data_handle(ptr);
}
extern "C" void ngraph::runtime::cpu::mkldnn_utils::mkldnn_invoke_primitive(
CPURuntimeContext* ctx, size_t primitive_index, std::vector<size_t>& deps, OpType type)
extern "C" void
ngraph::runtime::cpu::mkldnn_utils::mkldnn_invoke_primitive(CPURuntimeContext* ctx,
size_t primitive_index,
std::vector<size_t>& /* deps */,
OpType /* type */)
{
mkldnn::stream s(mkldnn::stream::kind::eager);
try
......
......@@ -113,7 +113,7 @@ bool runtime::cpu::mkldnn_utils::can_create_mkldnn_md(const ngraph::element::Typ
}
bool runtime::cpu::mkldnn_utils::can_create_mkldnn_md(const Shape& dims,
const Strides& strides,
const Strides& /* strides */,
const ngraph::element::Type type)
{
auto it = get_mkldnn_data_type_map().find(type);
......
......@@ -31,7 +31,7 @@ using namespace ngraph;
extern "C" INTERPRETER_BACKEND_API void ngraph_register_interpreter_backend()
{
runtime::BackendManager::register_backend("INTERPRETER", [](const std::string& config) {
runtime::BackendManager::register_backend("INTERPRETER", [](const std::string& /* config */) {
return std::make_shared<runtime::interpreter::INTBackend>();
});
}
......
......@@ -36,7 +36,7 @@ using descriptor::layout::DenseTensorLayout;
extern "C" NOP_BACKEND_API void ngraph_register_nop_backend()
{
runtime::BackendManager::register_backend("NOP", [](const std::string& config) {
runtime::BackendManager::register_backend("NOP", [](const std::string& /* config */) {
return std::make_shared<runtime::nop::NOPBackend>();
});
}
......@@ -62,7 +62,7 @@ shared_ptr<runtime::Executable>
}
runtime::nop::NOPExecutable::NOPExecutable(shared_ptr<Function> function,
bool enable_performance_collection)
bool /* enable_performance_collection */)
{
pass::Manager pass_manager;
pass_manager.register_pass<pass::AssignLayout<DenseTensorLayout>>();
......@@ -71,8 +71,8 @@ runtime::nop::NOPExecutable::NOPExecutable(shared_ptr<Function> function,
set_parameters_and_results(*function);
}
bool runtime::nop::NOPExecutable::call(const vector<shared_ptr<runtime::Tensor>>& outputs,
const vector<shared_ptr<runtime::Tensor>>& inputs)
bool runtime::nop::NOPExecutable::call(const vector<shared_ptr<runtime::Tensor>>& /* outputs */,
const vector<shared_ptr<runtime::Tensor>>& /* inputs */)
{
return true;
}
......@@ -23,7 +23,7 @@ using namespace std;
// start 23,749,645 in 1,912 files
void skip_comment(istream& s)
void skip_comment(istream& /* s */)
{
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment