Unverified Commit a97e26e1 authored by Scott Cyphers's avatar Scott Cyphers Committed by GitHub

Cyphers/params (#3613)

* Enable unuser parameter warnings

* Unused params
parent 706e705e
......@@ -38,10 +38,8 @@ endif()
# should remove these
add_compile_options(-Wno-float-conversion)
add_compile_options(-Wno-sign-conversion)
add_compile_options(-Wno-padded)
add_compile_options(-Wno-sign-compare)
add_compile_options(-Wno-unused-parameter)
add_compile_options(-Wno-conversion)
add_compile_options(-Wno-double-promotion)
add_compile_options(-Wno-undefined-func-template)
......@@ -138,15 +138,18 @@ namespace ngraph
env.DeleteDistribution(distribution);
}
void recv(void* in, element::Type_t element_type, size_t count, int src_id) override
void recv(void* /* in */,
element::Type_t /* element_type */,
size_t /* count */,
int /* src_id */) override
{
throw ngraph_error("recv not supported/mentioned in MLSL");
}
void send(const void* in,
element::Type_t element_type,
size_t count,
int dest_id) override
void send(const void* /* in */,
element::Type_t /* element_type */,
size_t /* count */,
int /* dest_id */) override
{
throw ngraph_error("send not supported/mentioned in MLSL");
}
......
......@@ -217,7 +217,8 @@ shared_ptr<Node> op::QuantizedDot::copy_with_new_args(const NodeVector& new_args
m_output_axes));
}
void op::QuantizedDot::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas)
void op::QuantizedDot::generate_adjoints(autodiff::Adjoints& /* adjoints */,
const NodeVector& /* deltas */)
{
throw ngraph_error("Forward-propagation-only operation");
}
......@@ -141,7 +141,8 @@ shared_ptr<Node> op::v1::Softmax::copy_with_new_args(const NodeVector& new_args)
return make_shared<op::v1::Softmax>(new_args.at(0), m_axis);
}
void op::v1::Softmax::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas)
void op::v1::Softmax::generate_adjoints(autodiff::Adjoints& /* adjoints */,
const NodeVector& /* deltas */)
{
throw ngraph_error("op::v1::Softmax::generate_adjoints function is not implemented yet");
......
......@@ -79,7 +79,7 @@ namespace ngraph
arg2_buffer_index,
arg3_buffer_index,
out0_buffer_index](CPURuntimeContext* ctx,
CPUExecutionContext* ectx) mutable {
CPUExecutionContext* /* ectx */) mutable {
if (ctx->first_iteration)
{
vector<float> dyn_scales;
......@@ -121,6 +121,7 @@ namespace ngraph
template <>
void Builder::BUILDER_DECL(ngraph::op::QuantizedDot)
{
(void)node;
auto& functors = external_function->get_functors();
auto arg0_shape = args[0].get_shape();
......@@ -161,7 +162,7 @@ namespace ngraph
arg6_buffer_index,
arg7_buffer_index,
out0_buffer_index](CPURuntimeContext* ctx,
CPUExecutionContext* ectx) {
CPUExecutionContext* /* ectx */) {
kernel(ctx->buffer_data[arg0_buffer_index],
ctx->buffer_data[arg1_buffer_index],
......@@ -203,7 +204,7 @@ namespace ngraph
arg6_buffer_index,
arg7_buffer_index,
out0_buffer_index](CPURuntimeContext* ctx,
CPUExecutionContext* ectx) {
CPUExecutionContext* /* ectx */) {
kernel(ctx->buffer_data[arg0_buffer_index],
ctx->buffer_data[arg1_buffer_index],
......@@ -245,7 +246,7 @@ namespace ngraph
arg6_buffer_index,
arg7_buffer_index,
out0_buffer_index](CPURuntimeContext* ctx,
CPUExecutionContext* ectx) {
CPUExecutionContext* /* ectx */) {
kernel(ctx->buffer_data[arg0_buffer_index],
ctx->buffer_data[arg1_buffer_index],
......
......@@ -175,7 +175,7 @@ namespace ngraph
arg_buffer_index,
out_indices_buffer_index,
out_values_buffer_index](CPURuntimeContext* ctx,
CPUExecutionContext* ectx) {
CPUExecutionContext* /* ectx */) {
ngraph::runtime::reference::topk<int32_t, int64_t>(
static_cast<int32_t*>(ctx->buffer_data[arg_buffer_index]),
static_cast<int64_t*>(ctx->buffer_data[out_indices_buffer_index]),
......@@ -200,7 +200,7 @@ namespace ngraph
arg_buffer_index,
out_indices_buffer_index,
out_values_buffer_index](CPURuntimeContext* ctx,
CPUExecutionContext* ectx) {
CPUExecutionContext* /* ectx */) {
ngraph::runtime::reference::topk<int32_t, int32_t>(
static_cast<int32_t*>(ctx->buffer_data[arg_buffer_index]),
static_cast<int32_t*>(ctx->buffer_data[out_indices_buffer_index]),
......
......@@ -38,7 +38,7 @@ using namespace std;
extern "C" CPU_BACKEND_API void ngraph_register_cpu_backend()
{
runtime::BackendManager::register_backend("CPU", [](const std::string& config) {
runtime::BackendManager::register_backend("CPU", [](const std::string& /* config */) {
static bool is_initialized = false;
if (!is_initialized)
{
......@@ -202,7 +202,7 @@ vector<runtime::PerformanceCounter> runtime::cpu::CPU_Executable::get_performanc
return rc;
}
bool runtime::cpu::CPU_Backend::is_supported(const Node& op) const
bool runtime::cpu::CPU_Backend::is_supported(const Node& /* op */) const
{
return true;
}
......
......@@ -1667,9 +1667,9 @@ mkldnn::sum::primitive_desc MKLDNNEmitter::get_elementwise_add_desc(const ngraph
}
void MKLDNNEmitter::build_quantize_reorder(
std::vector<mkldnn::memory*>& mkldnn_memories,
std::vector<mkldnn::memory*>& /* mkldnn_memories */,
std::vector<mkldnn::primitive*>& mkldnn_primitives,
std::vector<mkldnn::memory::desc*>& mkldnn_scratchpad_mds,
std::vector<mkldnn::memory::desc*>& /* mkldnn_scratchpad_mds */,
const mkldnn::memory::desc& input_desc,
const mkldnn::memory::desc& result_desc,
const std::vector<float>& scales,
......@@ -1727,9 +1727,9 @@ mkldnn::memory::format MKLDNNEmitter::query_convolution_forward_weight_format(
}
void MKLDNNEmitter::build_deconvolutionbias_forward(
std::vector<mkldnn::memory*>& mkldnn_memories,
std::vector<mkldnn::memory*>& /* mkldnn_memories */,
std::vector<mkldnn::primitive*>& mkldnn_primitives,
std::vector<mkldnn::memory::desc*>& mkldnn_scratchpad_mds,
std::vector<mkldnn::memory::desc*>& /* mkldnn_scratchpad_mds */,
const mkldnn::deconvolution_forward::desc& deconv_desc,
const std::vector<size_t>& deps,
size_t deconv_index,
......@@ -1753,9 +1753,9 @@ void MKLDNNEmitter::build_deconvolutionbias_forward(
}
void MKLDNNEmitter::build_convolution_backward_weights_bias(
std::vector<mkldnn::memory*>& mkldnn_memories,
std::vector<mkldnn::memory*>& /* mkldnn_memories */,
std::vector<mkldnn::primitive*>& mkldnn_primitives,
std::vector<mkldnn::memory::desc*>& mkldnn_scratchpad_mds,
std::vector<mkldnn::memory::desc*>& /* mkldnn_scratchpad_mds */,
const mkldnn::convolution_backward_weights::desc& bwd_desc,
const mkldnn::convolution_forward::desc& fwd_desc,
const std::vector<size_t>& deps,
......@@ -1784,9 +1784,9 @@ void MKLDNNEmitter::build_convolution_backward_weights_bias(
}
void MKLDNNEmitter::build_convolution_backward_weights(
std::vector<mkldnn::memory*>& mkldnn_memories,
std::vector<mkldnn::memory*>& /* mkldnn_memories */,
std::vector<mkldnn::primitive*>& mkldnn_primitives,
std::vector<mkldnn::memory::desc*>& mkldnn_scratchpad_mds,
std::vector<mkldnn::memory::desc*>& /* mkldnn_scratchpad_mds */,
const mkldnn::convolution_backward_weights::desc& bwd_desc,
const mkldnn::convolution_forward::desc& fwd_desc,
const std::vector<size_t>& deps,
......@@ -1810,9 +1810,9 @@ void MKLDNNEmitter::build_convolution_backward_weights(
}
void MKLDNNEmitter::build_convolution_backward_data(
std::vector<mkldnn::memory*>& mkldnn_memories,
std::vector<mkldnn::memory*>& /* mkldnn_memories */,
std::vector<mkldnn::primitive*>& mkldnn_primitives,
std::vector<mkldnn::memory::desc*>& mkldnn_scratchpad_mds,
std::vector<mkldnn::memory::desc*>& /* mkldnn_scratchpad_mds */,
const mkldnn::convolution_backward_data::desc& bwd_desc,
const mkldnn::convolution_forward::desc& fwd_desc,
const std::vector<size_t>& deps,
......@@ -1835,12 +1835,13 @@ void MKLDNNEmitter::build_convolution_backward_data(
*mkldnn_primitives[diff_src_index]);
}
void MKLDNNEmitter::build_pooling_forward(std::vector<mkldnn::memory*>& mkldnn_memories,
std::vector<mkldnn::primitive*>& mkldnn_primitives,
std::vector<mkldnn::memory::desc*>& mkldnn_scratchpad_mds,
const mkldnn::pooling_forward::desc& pool_desc,
const std::vector<size_t>& deps,
size_t pool_index)
void MKLDNNEmitter::build_pooling_forward(
std::vector<mkldnn::memory*>& /* mkldnn_memories */,
std::vector<mkldnn::primitive*>& mkldnn_primitives,
std::vector<mkldnn::memory::desc*>& /* mkldnn_scratchpad_mds */,
const mkldnn::pooling_forward::desc& pool_desc,
const std::vector<size_t>& deps,
size_t pool_index)
{
size_t input_index = deps[0];
build_memory_primitive(mkldnn_primitives, pool_desc.data.src_desc, input_index);
......@@ -1854,9 +1855,9 @@ void MKLDNNEmitter::build_pooling_forward(std::vector<mkldnn::memory*>& mkldnn_m
}
void MKLDNNEmitter::build_pooling_backward(
std::vector<mkldnn::memory*>& mkldnn_memories,
std::vector<mkldnn::memory*>& /* mkldnn_memories */,
std::vector<mkldnn::primitive*>& mkldnn_primitives,
std::vector<mkldnn::memory::desc*>& mkldnn_scratchpad_mds,
std::vector<mkldnn::memory::desc*>& /* mkldnn_scratchpad_mds */,
const mkldnn::pooling_backward::desc& pool_desc,
const mkldnn::pooling_forward::desc& pool_fwd_desc,
const std::vector<size_t>& deps,
......@@ -1877,9 +1878,9 @@ void MKLDNNEmitter::build_pooling_backward(
}
void MKLDNNEmitter::build_max_pooling_backward(
std::vector<mkldnn::memory*>& mkldnn_memories,
std::vector<mkldnn::memory*>& /* mkldnn_memories */,
std::vector<mkldnn::primitive*>& mkldnn_primitives,
std::vector<mkldnn::memory::desc*>& mkldnn_scratchpad_mds,
std::vector<mkldnn::memory::desc*>& /* mkldnn_scratchpad_mds */,
std::vector<char*>& mkldnn_workspaces,
const mkldnn::pooling_backward::desc& bwd_pool_desc,
const mkldnn::pooling_forward::desc& fwd_pool_desc,
......@@ -1925,9 +1926,9 @@ void MKLDNNEmitter::build_max_pooling_backward(
}
void MKLDNNEmitter::build_max_pooling_with_indices_forward(
std::vector<mkldnn::memory*>& mkldnn_memories,
std::vector<mkldnn::memory*>& /* mkldnn_memories */,
std::vector<mkldnn::primitive*>& mkldnn_primitives,
std::vector<mkldnn::memory::desc*>& mkldnn_scratchpad_mds,
std::vector<mkldnn::memory::desc*>& /* mkldnn_scratchpad_mds */,
const mkldnn::pooling_forward::desc& max_pool_desc,
const std::vector<size_t>& deps,
size_t max_pool_index)
......@@ -1949,9 +1950,9 @@ void MKLDNNEmitter::build_max_pooling_with_indices_forward(
}
void MKLDNNEmitter::build_max_pooling_with_indices_backward(
std::vector<mkldnn::memory*>& mkldnn_memories,
std::vector<mkldnn::memory*>& /* mkldnn_memories */,
std::vector<mkldnn::primitive*>& mkldnn_primitives,
std::vector<mkldnn::memory::desc*>& mkldnn_scratchpad_mds,
std::vector<mkldnn::memory::desc*>& /* mkldnn_scratchpad_mds */,
const mkldnn::pooling_backward::desc& bwd_pool_desc,
const mkldnn::pooling_forward::desc& fwd_pool_desc,
const std::vector<size_t>& deps,
......@@ -1975,9 +1976,9 @@ void MKLDNNEmitter::build_max_pooling_with_indices_backward(
*mkldnn_primitives[diff_src_index]);
}
void MKLDNNEmitter::build_reorder(std::vector<mkldnn::memory*>& mkldnn_memories,
void MKLDNNEmitter::build_reorder(std::vector<mkldnn::memory*>& /* mkldnn_memories */,
std::vector<mkldnn::primitive*>& mkldnn_primitives,
std::vector<mkldnn::memory::desc*>& mkldnn_scratchpad_mds,
std::vector<mkldnn::memory::desc*>& /* mkldnn_scratchpad_mds */,
const mkldnn::memory::desc& input_desc,
const mkldnn::memory::desc& result_desc,
const std::vector<size_t>& deps,
......@@ -1992,12 +1993,13 @@ void MKLDNNEmitter::build_reorder(std::vector<mkldnn::memory*>& mkldnn_memories,
new mkldnn::reorder(*mkldnn_primitives[input_index], *mkldnn_primitives[result_index]);
}
void MKLDNNEmitter::build_lrn_forward(std::vector<mkldnn::memory*>& mkldnn_memories,
std::vector<mkldnn::primitive*>& mkldnn_primitives,
std::vector<mkldnn::memory::desc*>& mkldnn_scratchpad_mds,
const mkldnn::lrn_forward::desc& lrn_desc,
const std::vector<size_t>& deps,
size_t lrn_index)
void MKLDNNEmitter::build_lrn_forward(
std::vector<mkldnn::memory*>& /* mkldnn_memories */,
std::vector<mkldnn::primitive*>& mkldnn_primitives,
std::vector<mkldnn::memory::desc*>& /* mkldnn_scratchpad_mds */,
const mkldnn::lrn_forward::desc& lrn_desc,
const std::vector<size_t>& deps,
size_t lrn_index)
{
size_t input_index = deps[0];
build_memory_primitive(mkldnn_primitives, lrn_desc.data.data_desc, input_index);
......@@ -2010,12 +2012,13 @@ void MKLDNNEmitter::build_lrn_forward(std::vector<mkldnn::memory*>& mkldnn_memor
lrn_prim_desc, *mkldnn_primitives[input_index], *mkldnn_primitives[result_index]);
}
void MKLDNNEmitter::build_relu_forward(std::vector<mkldnn::memory*>& mkldnn_memories,
std::vector<mkldnn::primitive*>& mkldnn_primitives,
std::vector<mkldnn::memory::desc*>& mkldnn_scratchpad_mds,
const mkldnn::eltwise_forward::desc& relu_desc,
const std::vector<size_t>& deps,
size_t relu_index)
void MKLDNNEmitter::build_relu_forward(
std::vector<mkldnn::memory*>& /* mkldnn_memories */,
std::vector<mkldnn::primitive*>& mkldnn_primitives,
std::vector<mkldnn::memory::desc*>& /* mkldnn_scratchpad_mds */,
const mkldnn::eltwise_forward::desc& relu_desc,
const std::vector<size_t>& deps,
size_t relu_index)
{
size_t input_index = deps[0];
build_memory_primitive(mkldnn_primitives, relu_desc.data.data_desc, input_index);
......@@ -2028,13 +2031,14 @@ void MKLDNNEmitter::build_relu_forward(std::vector<mkldnn::memory*>& mkldnn_memo
*mkldnn_primitives[result_index]);
}
void MKLDNNEmitter::build_relu_backward(std::vector<mkldnn::memory*>& mkldnn_memories,
std::vector<mkldnn::primitive*>& mkldnn_primitives,
std::vector<mkldnn::memory::desc*>& mkldnn_scratchpad_mds,
const mkldnn::eltwise_backward::desc& bwd_desc,
const mkldnn::eltwise_forward::desc& fwd_desc,
const std::vector<size_t>& deps,
size_t relu_index)
void MKLDNNEmitter::build_relu_backward(
std::vector<mkldnn::memory*>& /* mkldnn_memories */,
std::vector<mkldnn::primitive*>& mkldnn_primitives,
std::vector<mkldnn::memory::desc*>& /* mkldnn_scratchpad_mds */,
const mkldnn::eltwise_backward::desc& bwd_desc,
const mkldnn::eltwise_forward::desc& fwd_desc,
const std::vector<size_t>& deps,
size_t relu_index)
{
size_t input_index = deps[0];
build_memory_primitive(mkldnn_primitives, bwd_desc.data.data_desc, input_index);
......@@ -2056,12 +2060,13 @@ void MKLDNNEmitter::build_relu_backward(std::vector<mkldnn::memory*>& mkldnn_mem
*mkldnn_primitives[result_index]);
}
void MKLDNNEmitter::build_sigmoid_forward(std::vector<mkldnn::memory*>& mkldnn_memories,
std::vector<mkldnn::primitive*>& mkldnn_primitives,
std::vector<mkldnn::memory::desc*>& mkldnn_scratchpad_mds,
const mkldnn::eltwise_forward::desc& sigmoid_desc,
const std::vector<size_t>& deps,
size_t sigmoid_index)
void MKLDNNEmitter::build_sigmoid_forward(
std::vector<mkldnn::memory*>& /* mkldnn_memories */,
std::vector<mkldnn::primitive*>& mkldnn_primitives,
std::vector<mkldnn::memory::desc*>& /* mkldnn_scratchpad_mds */,
const mkldnn::eltwise_forward::desc& sigmoid_desc,
const std::vector<size_t>& deps,
size_t sigmoid_index)
{
size_t input_index = deps[0];
build_memory_primitive(mkldnn_primitives, sigmoid_desc.data.data_desc, input_index);
......@@ -2075,9 +2080,9 @@ void MKLDNNEmitter::build_sigmoid_forward(std::vector<mkldnn::memory*>& mkldnn_m
}
void MKLDNNEmitter::build_sigmoid_backward(
std::vector<mkldnn::memory*>& mkldnn_memories,
std::vector<mkldnn::memory*>& /* mkldnn_memories */,
std::vector<mkldnn::primitive*>& mkldnn_primitives,
std::vector<mkldnn::memory::desc*>& mkldnn_scratchpad_mds,
std::vector<mkldnn::memory::desc*>& /* mkldnn_scratchpad_mds */,
const mkldnn::eltwise_backward::desc& bwd_desc,
const mkldnn::eltwise_forward::desc& fwd_desc,
const std::vector<size_t>& deps,
......@@ -2101,12 +2106,13 @@ void MKLDNNEmitter::build_sigmoid_backward(
*mkldnn_primitives[result_index]);
}
void MKLDNNEmitter::build_elementwise_add(std::vector<mkldnn::memory*>& mkldnn_memories,
std::vector<mkldnn::primitive*>& mkldnn_primitives,
std::vector<mkldnn::memory::desc*>& mkldnn_scratchpad_mds,
const mkldnn::sum::primitive_desc& sum_pd,
const std::vector<size_t>& deps,
size_t add_index)
void MKLDNNEmitter::build_elementwise_add(
std::vector<mkldnn::memory*>& /* mkldnn_memories */,
std::vector<mkldnn::primitive*>& mkldnn_primitives,
std::vector<mkldnn::memory::desc*>& /* mkldnn_scratchpad_mds */,
const mkldnn::sum::primitive_desc& sum_pd,
const std::vector<size_t>& deps,
size_t add_index)
{
std::vector<mkldnn::memory::primitive::at> inputs_primitive;
......@@ -2128,9 +2134,9 @@ void MKLDNNEmitter::build_elementwise_add(std::vector<mkldnn::memory*>& mkldnn_m
}
void MKLDNNEmitter::build_batchnorm_forward(
std::vector<mkldnn::memory*>& mkldnn_memories,
std::vector<mkldnn::memory*>& /* mkldnn_memories */,
std::vector<mkldnn::primitive*>& mkldnn_primitives,
std::vector<mkldnn::memory::desc*>& mkldnn_scratchpad_mds,
std::vector<mkldnn::memory::desc*>& /* mkldnn_scratchpad_mds */,
const mkldnn::batch_normalization_forward::desc& batchnorm_desc,
const mkldnn::memory::desc& weights_desc,
bool bn_training_flag,
......@@ -2188,14 +2194,14 @@ void MKLDNNEmitter::build_batchnorm_forward(
}
void MKLDNNEmitter::build_batchnorm_backward(
std::vector<mkldnn::memory*>& mkldnn_memories,
std::vector<mkldnn::memory*>& /* mkldnn_memories */,
std::vector<mkldnn::primitive*>& mkldnn_primitives,
std::vector<mkldnn::memory::desc*>& mkldnn_scratchpad_mds,
std::vector<mkldnn::memory::desc*>& /* mkldnn_scratchpad_mds */,
const mkldnn::batch_normalization_backward::desc& batchnorm_desc,
const mkldnn::memory::desc& input_desc,
const mkldnn::memory::desc& /* input_desc */,
const mkldnn::memory::desc& weights_desc,
const mkldnn::memory::desc& dweights_desc,
float epsilon,
float /* epsilon */,
const std::vector<size_t>& deps,
size_t batchnorm_index)
{
......@@ -2231,13 +2237,14 @@ void MKLDNNEmitter::build_batchnorm_backward(
*mkldnn_primitives[dweights_index]);
}
void MKLDNNEmitter::build_rnn_forward(std::vector<mkldnn::memory*>& mkldnn_memories,
std::vector<mkldnn::primitive*>& mkldnn_primitives,
std::vector<mkldnn::memory::desc*>& mkldnn_scratchpad_mds,
std::vector<char*>& mkldnn_workspaces,
const mkldnn::rnn_forward::desc& rnn_desc,
std::vector<size_t>& deps,
size_t rnn_index)
void MKLDNNEmitter::build_rnn_forward(
std::vector<mkldnn::memory*>& /* mkldnn_memories */,
std::vector<mkldnn::primitive*>& mkldnn_primitives,
std::vector<mkldnn::memory::desc*>& /* mkldnn_scratchpad_mds */,
std::vector<char*>& mkldnn_workspaces,
const mkldnn::rnn_forward::desc& rnn_desc,
std::vector<size_t>& deps,
size_t rnn_index)
{
size_t src_layer_index = deps[0];
build_memory_primitive(mkldnn_primitives, rnn_desc.data.src_layer_desc, src_layer_index);
......@@ -2278,9 +2285,9 @@ void MKLDNNEmitter::build_rnn_forward(std::vector<mkldnn::memory*>& mkldnn_memor
static_cast<mkldnn::memory>(*mkldnn_primitives[workspace_index]));
}
void MKLDNNEmitter::build_concat(std::vector<mkldnn::memory*>& mkldnn_memories,
void MKLDNNEmitter::build_concat(std::vector<mkldnn::memory*>& /* mkldnn_memories */,
std::vector<mkldnn::primitive*>& mkldnn_primitives,
std::vector<mkldnn::memory::desc*>& mkldnn_scratchpad_mds,
std::vector<mkldnn::memory::desc*>& /* mkldnn_scratchpad_mds */,
const mkldnn::concat::primitive_desc& concat_pd,
const std::vector<mkldnn::memory::desc>& inputs_data_desc,
const std::vector<size_t>& deps,
......@@ -2309,9 +2316,9 @@ void MKLDNNEmitter::build_concat(std::vector<mkldnn::memory*>& mkldnn_memories,
new mkldnn::concat(concat_pd, inputs_primitive, *mkldnn_primitives[result_index]);
}
void MKLDNNEmitter::build_slice(std::vector<mkldnn::memory*>& mkldnn_memories,
void MKLDNNEmitter::build_slice(std::vector<mkldnn::memory*>& /* mkldnn_memories */,
std::vector<mkldnn::primitive*>& mkldnn_primitives,
std::vector<mkldnn::memory::desc*>& mkldnn_scratchpad_mds,
std::vector<mkldnn::memory::desc*>& /* mkldnn_scratchpad_mds */,
mkldnn::memory::desc input_desc,
const mkldnn::memory::desc& result_desc,
const ngraph::Coordinate& lower_bounds,
......@@ -2342,12 +2349,13 @@ void MKLDNNEmitter::build_slice(std::vector<mkldnn::memory*>& mkldnn_memories,
reorder_pd, *mkldnn_primitives[input_index], *mkldnn_primitives[result_index]);
}
void MKLDNNEmitter::build_softmax_forward(std::vector<mkldnn::memory*>& mkldnn_memories,
std::vector<mkldnn::primitive*>& mkldnn_primitives,
std::vector<mkldnn::memory::desc*>& mkldnn_scratchpad_mds,
const mkldnn::softmax_forward::desc& softmax_desc,
const std::vector<size_t>& deps,
size_t softmax_index)
void MKLDNNEmitter::build_softmax_forward(
std::vector<mkldnn::memory*>& /* mkldnn_memories */,
std::vector<mkldnn::primitive*>& mkldnn_primitives,
std::vector<mkldnn::memory::desc*>& /* mkldnn_scratchpad_mds */,
const mkldnn::softmax_forward::desc& softmax_desc,
const std::vector<size_t>& deps,
size_t softmax_index)
{
size_t input_index = deps[0];
build_memory_primitive(mkldnn_primitives, softmax_desc.data.data_desc, input_index);
......@@ -2360,12 +2368,13 @@ void MKLDNNEmitter::build_softmax_forward(std::vector<mkldnn::memory*>& mkldnn_m
*mkldnn_primitives[result_index]);
}
void MKLDNNEmitter::build_leaky_relu(std::vector<mkldnn::memory*>& mkldnn_memories,
std::vector<mkldnn::primitive*>& mkldnn_primitives,
std::vector<mkldnn::memory::desc*>& mkldnn_scratchpad_mds,
const mkldnn::eltwise_forward::desc& leaky_relu_desc,
const std::vector<size_t>& deps,
size_t leaky_relu_index)
void MKLDNNEmitter::build_leaky_relu(
std::vector<mkldnn::memory*>& /* mkldnn_memories */,
std::vector<mkldnn::primitive*>& mkldnn_primitives,
std::vector<mkldnn::memory::desc*>& /* mkldnn_scratchpad_mds */,
const mkldnn::eltwise_forward::desc& leaky_relu_desc,
const std::vector<size_t>& deps,
size_t leaky_relu_index)
{
size_t input_index = deps[0];
build_memory_primitive(mkldnn_primitives, leaky_relu_desc.data.data_desc, input_index);
......@@ -2378,12 +2387,13 @@ void MKLDNNEmitter::build_leaky_relu(std::vector<mkldnn::memory*>& mkldnn_memori
*mkldnn_primitives[result_index]);
}
void MKLDNNEmitter::build_bounded_relu(std::vector<mkldnn::memory*>& mkldnn_memories,
std::vector<mkldnn::primitive*>& mkldnn_primitives,
std::vector<mkldnn::memory::desc*>& mkldnn_scratchpad_mds,
const mkldnn::eltwise_forward::desc& bounded_relu_desc,
const std::vector<size_t>& deps,
size_t bounded_relu_index)
void MKLDNNEmitter::build_bounded_relu(
std::vector<mkldnn::memory*>& /* mkldnn_memories */,
std::vector<mkldnn::primitive*>& mkldnn_primitives,
std::vector<mkldnn::memory::desc*>& /* mkldnn_scratchpad_mds */,
const mkldnn::eltwise_forward::desc& bounded_relu_desc,
const std::vector<size_t>& deps,
size_t bounded_relu_index)
{
size_t input_index = deps[0];
build_memory_primitive(mkldnn_primitives, bounded_relu_desc.data.data_desc, input_index);
......
......@@ -1600,9 +1600,9 @@ namespace ngraph
template <bool with_bias>
void build_convolution_forward(
std::vector<mkldnn::memory*>& mkldnn_memories,
std::vector<mkldnn::memory*>& /* mkldnn_memories */,
std::vector<mkldnn::primitive*>& mkldnn_primitives,
std::vector<mkldnn::memory::desc*>& mkldnn_scratchpad_mds,
std::vector<mkldnn::memory::desc*>& /* mkldnn_scratchpad_mds */,
const mkldnn::convolution_forward::desc& desc,
const mkldnn::primitive_attr& attr,
const mkldnn::engine& engine,
......@@ -1652,9 +1652,9 @@ namespace ngraph
template <bool with_bias>
void build_inner_product_forward(
std::vector<mkldnn::memory*>& mkldnn_memories,
std::vector<mkldnn::memory*>& /* mkldnn_memories */,
std::vector<mkldnn::primitive*>& mkldnn_primitives,
std::vector<mkldnn::memory::desc*>& mkldnn_scratchpad_mds,
std::vector<mkldnn::memory::desc*>& /* mkldnn_scratchpad_mds */,
const mkldnn::inner_product_forward::desc& desc,
const mkldnn::primitive_attr& attr,
const mkldnn::engine& engine,
......
......@@ -33,8 +33,11 @@ extern "C" void ngraph::runtime::cpu::mkldnn_utils::set_memory_ptr(CPURuntimeCon
primitive->set_data_handle(ptr);
}
extern "C" void ngraph::runtime::cpu::mkldnn_utils::mkldnn_invoke_primitive(
CPURuntimeContext* ctx, size_t primitive_index, std::vector<size_t>& deps, OpType type)
extern "C" void
ngraph::runtime::cpu::mkldnn_utils::mkldnn_invoke_primitive(CPURuntimeContext* ctx,
size_t primitive_index,
std::vector<size_t>& /* deps */,
OpType /* type */)
{
mkldnn::stream s(mkldnn::stream::kind::eager);
try
......
......@@ -113,7 +113,7 @@ bool runtime::cpu::mkldnn_utils::can_create_mkldnn_md(const ngraph::element::Typ
}
bool runtime::cpu::mkldnn_utils::can_create_mkldnn_md(const Shape& dims,
const Strides& strides,
const Strides& /* strides */,
const ngraph::element::Type type)
{
auto it = get_mkldnn_data_type_map().find(type);
......
......@@ -77,6 +77,7 @@ namespace ngraph
template <>
void CPUAssignment::ASSIGN_DECL(ngraph::op::Add)
{
(void)external_function;
auto arg0_shape = node->get_input_shape(0);
auto arg1_shape = node->get_input_shape(1);
auto arg0_rank = arg0_shape.size();
......@@ -98,6 +99,7 @@ namespace ngraph
template <>
void CPUAssignment::ASSIGN_DECL(ngraph::op::Concat)
{
(void)external_function;
if ((node->get_input_element_type(0) == element::f32 ||
node->get_input_element_type(0) == element::i8 ||
node->get_input_element_type(0) == element::u8) &&
......@@ -127,6 +129,7 @@ namespace ngraph
template <>
void CPUAssignment::ASSIGN_DECL(ngraph::op::Convolution)
{
(void)external_function;
if (mkldnn_utils::can_use_mkldnn_conv<ngraph::op::Convolution>(node))
{
runtime::cpu::mkldnn_utils::assign_mkldnn_kernel(node);
......@@ -136,6 +139,7 @@ namespace ngraph
template <>
void CPUAssignment::ASSIGN_DECL(ngraph::op::GroupConvolution)
{
(void)external_function;
if (mkldnn_utils::can_use_mkldnn_conv<ngraph::op::GroupConvolution>(node))
{
runtime::cpu::mkldnn_utils::assign_mkldnn_kernel(node);
......@@ -145,6 +149,7 @@ namespace ngraph
template <>
void CPUAssignment::ASSIGN_DECL(ngraph::op::GroupConvolutionBias)
{
(void)external_function;
if (mkldnn_utils::can_use_mkldnn_conv<ngraph::op::GroupConvolutionBias>(node))
{
runtime::cpu::mkldnn_utils::assign_mkldnn_kernel(node);
......@@ -154,6 +159,7 @@ namespace ngraph
template <>
void CPUAssignment::ASSIGN_DECL(ngraph::op::ConvolutionRelu)
{
(void)external_function;
if (mkldnn_utils::can_use_mkldnn_conv<ngraph::op::ConvolutionRelu>(node))
{
runtime::cpu::mkldnn_utils::assign_mkldnn_kernel(node);
......@@ -163,6 +169,7 @@ namespace ngraph
template <>
void CPUAssignment::ASSIGN_DECL(ngraph::op::ConvolutionBiasAdd)
{
(void)external_function;
auto convolution = static_cast<ngraph::op::ConvolutionBiasAdd*>(node);
if (mkldnn_utils::can_use_mkldnn_conv<ngraph::op::ConvolutionBiasAdd>(node))
......@@ -180,6 +187,7 @@ namespace ngraph
template <>
void CPUAssignment::ASSIGN_DECL(ngraph::op::GetOutputElement)
{
(void)external_function;
auto goe = static_cast<ngraph::op::GetOutputElement*>(node);
auto op_annotations =
std::make_shared<ngraph::runtime::cpu::CPUOpAnnotations>();
......@@ -190,6 +198,7 @@ namespace ngraph
template <>
void CPUAssignment::ASSIGN_DECL(ngraph::op::ConvolutionAdd)
{
(void)external_function;
auto convolution = static_cast<ngraph::op::ConvolutionAdd*>(node);
if (mkldnn_utils::can_use_mkldnn_conv<ngraph::op::ConvolutionAdd>(node))
......@@ -207,6 +216,7 @@ namespace ngraph
template <>
void CPUAssignment::ASSIGN_DECL(ngraph::op::BatchNormInferenceRelu)
{
(void)external_function;
if (mkldnn_utils::can_use_mkldnn_batchnorm_fprop(node))
{
runtime::cpu::mkldnn_utils::assign_mkldnn_kernel(node);
......@@ -216,6 +226,7 @@ namespace ngraph
template <>
void CPUAssignment::ASSIGN_DECL(ngraph::op::BatchNormTrainingRelu)
{
(void)external_function;
if (mkldnn_utils::can_use_mkldnn_batchnorm_fprop(node))
{
runtime::cpu::mkldnn_utils::assign_mkldnn_kernel(node);
......@@ -225,6 +236,7 @@ namespace ngraph
template <>
void CPUAssignment::ASSIGN_DECL(ngraph::op::DeconvolutionBias)
{
(void)external_function;
auto convolution = static_cast<ngraph::op::DeconvolutionBias*>(node);
auto arg0_shape = node->get_input_shape(0);
......@@ -259,6 +271,7 @@ namespace ngraph
template <>
void CPUAssignment::ASSIGN_DECL(ngraph::op::ConvolutionBackpropData)
{
(void)external_function;
auto convolution = static_cast<ngraph::op::ConvolutionBackpropData*>(node);
auto arg0_shape = node->get_input_shape(0);
......@@ -284,6 +297,7 @@ namespace ngraph
template <>
void CPUAssignment::ASSIGN_DECL(ngraph::op::ConvolutionBackpropFilters)
{
(void)external_function;
auto convolution = static_cast<ngraph::op::ConvolutionBackpropFilters*>(node);
auto arg0_shape = node->get_input_shape(0);
......@@ -309,6 +323,7 @@ namespace ngraph
template <>
void CPUAssignment::ASSIGN_DECL(ngraph::op::ConvolutionBias)
{
(void)external_function;
if (mkldnn_utils::can_use_mkldnn_conv<ngraph::op::ConvolutionBias>(node))
{
runtime::cpu::mkldnn_utils::assign_mkldnn_kernel(node);
......@@ -318,6 +333,7 @@ namespace ngraph
template <>
void CPUAssignment::ASSIGN_DECL(ngraph::op::ConvolutionBiasBackpropFiltersBias)
{
(void)external_function;
auto convolution =
static_cast<ngraph::op::ConvolutionBiasBackpropFiltersBias*>(node);
......@@ -343,6 +359,7 @@ namespace ngraph
template <>
void CPUAssignment::ASSIGN_DECL(ngraph::op::AvgPool)
{
(void)external_function;
auto avg_pool = static_cast<ngraph::op::AvgPool*>(node);
auto arg0_shape = node->get_input_shape(0);
......@@ -362,6 +379,7 @@ namespace ngraph
template <>
void CPUAssignment::ASSIGN_DECL(ngraph::op::AvgPoolBackprop)
{
(void)external_function;
auto avg_pool = static_cast<ngraph::op::AvgPoolBackprop*>(node);
auto arg0_shape = node->get_input_shape(0);
......@@ -379,6 +397,7 @@ namespace ngraph
template <>
void CPUAssignment::ASSIGN_DECL(ngraph::op::MaxPool)
{
(void)external_function;
auto max_pool = static_cast<ngraph::op::MaxPool*>(node);
auto arg0_shape = node->get_input_shape(0);
......@@ -398,6 +417,7 @@ namespace ngraph
template <>
void CPUAssignment::ASSIGN_DECL(ngraph::op::MaxPoolWithIndices)
{
(void)external_function;
auto max_pool = static_cast<ngraph::op::MaxPoolWithIndices*>(node);
auto arg0_shape = node->get_input_shape(0);
......@@ -414,6 +434,7 @@ namespace ngraph
template <>
void CPUAssignment::ASSIGN_DECL(ngraph::op::MaxPoolBackprop)
{
(void)external_function;
auto max_pool = static_cast<ngraph::op::MaxPoolBackprop*>(node);
auto arg1_shape = node->get_input_shape(1);
......@@ -431,6 +452,7 @@ namespace ngraph
template <>
void CPUAssignment::ASSIGN_DECL(ngraph::op::MaxPoolWithIndicesBackprop)
{
(void)external_function;
auto max_pool = static_cast<ngraph::op::MaxPoolWithIndicesBackprop*>(node);
auto arg1_shape = node->get_input_shape(1);
......@@ -447,6 +469,7 @@ namespace ngraph
template <>
void CPUAssignment::ASSIGN_DECL(ngraph::op::Relu)
{
(void)external_function;
auto relu = static_cast<ngraph::op::Relu*>(node);
auto arg0_shape = node->get_input_shape(0);
......@@ -471,6 +494,7 @@ namespace ngraph
template <>
void CPUAssignment::ASSIGN_DECL(ngraph::op::ReplaceSlice)
{
(void)external_function;
auto replace_slice = static_cast<ngraph::op::ReplaceSlice*>(node);
// ReplaceSlice is independent of data type. Hence not checking type
......@@ -487,6 +511,7 @@ namespace ngraph
template <>
void CPUAssignment::ASSIGN_DECL(ngraph::op::UpdateSlice)
{
(void)external_function;
auto update_slice = static_cast<ngraph::op::UpdateSlice*>(node);
auto op_annotations =
......@@ -502,6 +527,7 @@ namespace ngraph
template <>
void CPUAssignment::ASSIGN_DECL(ngraph::op::ScatterAdd)
{
(void)external_function;
auto update_slice = static_cast<ngraph::op::ScatterAdd*>(node);
auto op_annotations =
......@@ -517,6 +543,7 @@ namespace ngraph
template <>
void CPUAssignment::ASSIGN_DECL(ngraph::op::LRN)
{
(void)external_function;
auto arg0_shape = node->get_input_shape(0);
auto arg0_rank = arg0_shape.size();
auto result_shape = node->get_output_shape(0);
......@@ -530,6 +557,7 @@ namespace ngraph
template <>
void CPUAssignment::ASSIGN_DECL(ngraph::op::Sigmoid)
{
(void)external_function;
if (node->get_input_element_type(0) == element::f32)
{
runtime::cpu::mkldnn_utils::assign_mkldnn_kernel(node);
......@@ -539,6 +567,7 @@ namespace ngraph
template <>
void CPUAssignment::ASSIGN_DECL(ngraph::op::SigmoidBackprop)
{
(void)external_function;
if (node->get_input_element_type(0) == element::f32)
{
runtime::cpu::mkldnn_utils::assign_mkldnn_kernel(node);
......@@ -548,6 +577,7 @@ namespace ngraph
template <>
void CPUAssignment::ASSIGN_DECL(ngraph::op::ReluBackprop)
{
(void)external_function;
auto arg0_shape = node->get_input_shape(0);
auto arg0_rank = arg0_shape.size();
auto result_shape = node->get_output_shape(0);
......@@ -562,6 +592,7 @@ namespace ngraph
template <>
void CPUAssignment::ASSIGN_DECL(ngraph::op::BatchNormTraining)
{
(void)external_function;
if (mkldnn_utils::can_use_mkldnn_batchnorm_fprop(node))
{
runtime::cpu::mkldnn_utils::assign_mkldnn_kernel(node);
......@@ -571,6 +602,7 @@ namespace ngraph
template <>
void CPUAssignment::ASSIGN_DECL(ngraph::op::BatchNormInference)
{
(void)external_function;
if (mkldnn_utils::can_use_mkldnn_batchnorm_fprop(node))
{
runtime::cpu::mkldnn_utils::assign_mkldnn_kernel(node);
......@@ -580,6 +612,7 @@ namespace ngraph
template <>
void CPUAssignment::ASSIGN_DECL(ngraph::op::BatchNormTrainingBackprop)
{
(void)external_function;
if (mkldnn_utils::can_use_mkldnn_batchnorm_bprop(node))
{
runtime::cpu::mkldnn_utils::assign_mkldnn_kernel(node);
......@@ -589,6 +622,7 @@ namespace ngraph
template <>
void CPUAssignment::ASSIGN_DECL(ngraph::op::Lstm)
{
(void)external_function;
auto src_layer_rank = node->get_input_shape(0).size();
auto src_iter_rank = node->get_input_shape(1).size();
#if MKLDNN_VERSION_MAJOR < 1
......@@ -620,6 +654,7 @@ namespace ngraph
template <>
void CPUAssignment::ASSIGN_DECL(ngraph::op::Rnn)
{
(void)external_function;
auto src_layer_rank = node->get_input_shape(0).size();
auto src_iter_rank = node->get_input_shape(1).size();
#if MKLDNN_VERSION_MAJOR < 1
......@@ -651,6 +686,7 @@ namespace ngraph
template <>
void CPUAssignment::ASSIGN_DECL(ngraph::op::Softmax)
{
(void)external_function;
auto softmax = static_cast<ngraph::op::Softmax*>(node);
auto arg0_shape = node->get_input_shape(0);
......@@ -668,6 +704,7 @@ namespace ngraph
template <>
void CPUAssignment::ASSIGN_DECL(ngraph::op::Slice)
{
(void)external_function;
auto slice = static_cast<ngraph::op::Slice*>(node);
auto strides = slice->get_strides();
if (!is_strided(strides) && node->get_input_element_type(0) == element::f32)
......@@ -679,6 +716,7 @@ namespace ngraph
template <>
void CPUAssignment::ASSIGN_DECL(ngraph::op::BoundedRelu)
{
(void)external_function;
auto bounded_relu = static_cast<ngraph::op::BoundedRelu*>(node);
auto arg0_shape = node->get_input_shape(0);
......@@ -703,6 +741,7 @@ namespace ngraph
template <>
void CPUAssignment::ASSIGN_DECL(ngraph::op::CPULeakyRelu)
{
(void)external_function;
auto leaky_relu = static_cast<ngraph::op::CPULeakyRelu*>(node);
auto arg0_shape = node->get_input_shape(0);
......@@ -727,6 +766,7 @@ namespace ngraph
template <>
void CPUAssignment::ASSIGN_DECL(ngraph::op::QuantizedConvolution)
{
(void)external_function;
auto qconv = static_cast<ngraph::op::QuantizedConvolution*>(node);
auto input_zero_point =
dynamic_pointer_cast<ngraph::op::Constant>(qconv->get_argument(3));
......@@ -753,18 +793,21 @@ namespace ngraph
template <>
void CPUAssignment::ASSIGN_DECL(ngraph::op::QuantizedConvolutionRelu)
{
(void)external_function;
runtime::cpu::mkldnn_utils::assign_mkldnn_kernel(node);
}
template <>
void CPUAssignment::ASSIGN_DECL(ngraph::op::QuantizedConvolutionBias)
{
(void)external_function;
runtime::cpu::mkldnn_utils::assign_mkldnn_kernel(node);
}
template <>
void CPUAssignment::ASSIGN_DECL(ngraph::op::QuantizedConvolutionBiasAdd)
{
(void)external_function;
auto quantized_conv_bias =
static_cast<ngraph::op::QuantizedConvolutionBiasAdd*>(node);
auto op_annotations =
......@@ -779,6 +822,7 @@ namespace ngraph
template <>
void CPUAssignment::ASSIGN_DECL(ngraph::op::QuantizedConvolutionBiasSignedAdd)
{
(void)external_function;
auto quantized_conv_bias =
static_cast<ngraph::op::QuantizedConvolutionBiasSignedAdd*>(node);
auto op_annotations =
......@@ -793,6 +837,7 @@ namespace ngraph
template <>
void CPUAssignment::ASSIGN_DECL(ngraph::op::QuantizedDotBias)
{
(void)external_function;
if (node->get_input_element_type(0) == element::u8 &&
node->get_input_element_type(1) == element::i8)
{
......@@ -803,6 +848,7 @@ namespace ngraph
template <>
void CPUAssignment::ASSIGN_DECL(ngraph::op::QuantizedMatmul)
{
(void)external_function;
if (node->get_input_element_type(0) == element::u8 &&
node->get_input_element_type(1) == element::i8)
{
......@@ -813,6 +859,7 @@ namespace ngraph
template <>
void CPUAssignment::ASSIGN_DECL(ngraph::op::Dequantize)
{
(void)external_function;
auto dequantize = static_cast<ngraph::op::Dequantize*>(node);
// TODO(nbpatel): Support dynamic offset via mkldnn
// Go through reference if the offset is not a constant
......@@ -851,6 +898,7 @@ namespace ngraph
template <>
void CPUAssignment::ASSIGN_DECL(ngraph::op::Quantize)
{
(void)external_function;
auto quantize = static_cast<ngraph::op::Quantize*>(node);
// TODO(nbpatel): Support dynamic offset via mkldnn
// Go through reference if the offset is not a constant
......@@ -900,6 +948,7 @@ namespace ngraph
template <>
void CPUAssignment::ASSIGN_DECL(ngraph::op::Convert)
{
(void)external_function;
auto convert = static_cast<ngraph::op::Convert*>(node);
if ((node->get_input_element_type(0) == element::i8 &&
node->get_output_element_type(0) == element::u8) ||
......
......@@ -31,7 +31,7 @@ using namespace ngraph;
extern "C" INTERPRETER_BACKEND_API void ngraph_register_interpreter_backend()
{
runtime::BackendManager::register_backend("INTERPRETER", [](const std::string& config) {
runtime::BackendManager::register_backend("INTERPRETER", [](const std::string& /* config */) {
return std::make_shared<runtime::interpreter::INTBackend>();
});
}
......
......@@ -36,7 +36,7 @@ using descriptor::layout::DenseTensorLayout;
extern "C" NOP_BACKEND_API void ngraph_register_nop_backend()
{
runtime::BackendManager::register_backend("NOP", [](const std::string& config) {
runtime::BackendManager::register_backend("NOP", [](const std::string& /* config */) {
return std::make_shared<runtime::nop::NOPBackend>();
});
}
......@@ -62,7 +62,7 @@ shared_ptr<runtime::Executable>
}
runtime::nop::NOPExecutable::NOPExecutable(shared_ptr<Function> function,
bool enable_performance_collection)
bool /* enable_performance_collection */)
{
pass::Manager pass_manager;
pass_manager.register_pass<pass::AssignLayout<DenseTensorLayout>>();
......@@ -71,8 +71,8 @@ runtime::nop::NOPExecutable::NOPExecutable(shared_ptr<Function> function,
set_parameters_and_results(*function);
}
bool runtime::nop::NOPExecutable::call(const vector<shared_ptr<runtime::Tensor>>& outputs,
const vector<shared_ptr<runtime::Tensor>>& inputs)
bool runtime::nop::NOPExecutable::call(const vector<shared_ptr<runtime::Tensor>>& /* outputs */,
const vector<shared_ptr<runtime::Tensor>>& /* inputs */)
{
return true;
}
......@@ -23,7 +23,7 @@ using namespace std;
// start 23,749,645 in 1,912 files
void skip_comment(istream& s)
void skip_comment(istream& /* s */)
{
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment