Unverified Commit 5e607081 authored by Scott Cyphers's avatar Scott Cyphers Committed by GitHub

Unnused parameter cleanup (#3603)

parent 5dd1e07d
...@@ -133,8 +133,8 @@ namespace ngraph ...@@ -133,8 +133,8 @@ namespace ngraph
const Shape& padding_below, const Shape& padding_below,
const Shape& padding_above, const Shape& padding_above,
bool include_padding_in_avg_computation, bool include_padding_in_avg_computation,
const Output<Node>& min, const Output<Node>& /* min */,
const Output<Node>& max) const Output<Node>& /* max */)
{ {
return make_shared<op::QuantizedAvgPool>(input, return make_shared<op::QuantizedAvgPool>(input,
window_shape, window_shape,
...@@ -222,8 +222,8 @@ namespace ngraph ...@@ -222,8 +222,8 @@ namespace ngraph
const Strides& window_movement_strides, const Strides& window_movement_strides,
const Shape& padding_below, const Shape& padding_below,
const Shape& padding_above, const Shape& padding_above,
const Output<Node>& min, const Output<Node>& /* min */,
const Output<Node>& max) const Output<Node>& /* max */)
{ {
return make_shared<op::QuantizedMaxPool>( return make_shared<op::QuantizedMaxPool>(
input, window_shape, window_movement_strides, padding_below, padding_above); input, window_shape, window_movement_strides, padding_below, padding_above);
......
...@@ -35,32 +35,35 @@ namespace ngraph ...@@ -35,32 +35,35 @@ namespace ngraph
{ {
std::printf("%s: %s\n", timestamp.c_str(), buf.data()); std::printf("%s: %s\n", timestamp.c_str(), buf.data());
} }
void all_reduce(void* in, void all_reduce(void* /* in */,
void* out, void* /* out */,
element::Type_t element_type, element::Type_t /* element_type */,
reduction::Type reduce_type, reduction::Type /* reduce_type */,
size_t count) override size_t /* count */) override
{ {
throw ngraph_error("Distributed Library not supported/mentioned"); throw ngraph_error("Distributed Library not supported/mentioned");
} }
void broadcast(void* in, void broadcast(void* /* in */,
element::Type_t element_type, element::Type_t /* element_type */,
size_t count, size_t /* count */,
int root_id) override int /* root_id */) override
{ {
throw ngraph_error("Distributed Library not supported/mentioned"); throw ngraph_error("Distributed Library not supported/mentioned");
} }
void recv(void* in, element::Type_t element_type, size_t count, int src_id) override void recv(void* /* in */,
element::Type_t /* element_type */,
size_t /* count */,
int /* src_id*/) override
{ {
throw ngraph_error("Distributed Library not supported/mentioned"); throw ngraph_error("Distributed Library not supported/mentioned");
} }
void send(const void* in, void send(const void* /* in */,
element::Type_t element_type, element::Type_t /* element_type */,
size_t count, size_t /* count */,
int dest_id) override int /* dest_id */) override
{ {
throw ngraph_error("Distributed Library not supported/mentioned"); throw ngraph_error("Distributed Library not supported/mentioned");
} }
......
...@@ -25,7 +25,7 @@ namespace ngraph ...@@ -25,7 +25,7 @@ namespace ngraph
{ {
const std::string NullNode::type_name{"NullNode"}; const std::string NullNode::type_name{"NullNode"};
std::shared_ptr<Node> NullNode::copy_with_new_args(const NodeVector& new_args) const std::shared_ptr<Node> NullNode::copy_with_new_args(const NodeVector& /* new_args */) const
{ {
return std::make_shared<NullNode>(); return std::make_shared<NullNode>();
} }
......
...@@ -48,85 +48,93 @@ ONNXIFI_PUBLIC ONNXIFI_CHECK_RESULT onnxStatus ONNXIFI_ABI ...@@ -48,85 +48,93 @@ ONNXIFI_PUBLIC ONNXIFI_CHECK_RESULT onnxStatus ONNXIFI_ABI
} }
} }
ONNXIFI_PUBLIC ONNXIFI_CHECK_RESULT onnxStatus ONNXIFI_ABI ONNXIFI_PUBLIC
onnxReleaseBackendID(onnxBackendID backendID) ONNXIFI_CHECK_RESULT onnxStatus ONNXIFI_ABI onnxReleaseBackendID(onnxBackendID /* backendID */)
{ {
return ONNXIFI_STATUS_INTERNAL_ERROR; return ONNXIFI_STATUS_INTERNAL_ERROR;
} }
ONNXIFI_PUBLIC ONNXIFI_CHECK_RESULT onnxStatus ONNXIFI_ABI onnxGetBackendInfo( ONNXIFI_PUBLIC ONNXIFI_CHECK_RESULT onnxStatus ONNXIFI_ABI
onnxBackendID backendID, onnxBackendInfo infoType, void* infoValue, std::size_t* infoValueSize) onnxGetBackendInfo(onnxBackendID /* backendID */,
onnxBackendInfo /* infoType */,
void* /* infoValue */,
std::size_t* /* infoValueSize */)
{ {
return ONNXIFI_STATUS_BACKEND_UNAVAILABLE; return ONNXIFI_STATUS_BACKEND_UNAVAILABLE;
} }
ONNXIFI_PUBLIC ONNXIFI_CHECK_RESULT onnxStatus ONNXIFI_ABI onnxGetBackendCompatibility( ONNXIFI_PUBLIC ONNXIFI_CHECK_RESULT onnxStatus ONNXIFI_ABI onnxGetBackendCompatibility(
onnxBackendID backendID, std::size_t onnxModelSize, const void* onnxModel) onnxBackendID /* backendID */, std::size_t /* onnxModelSize */, const void* /* onnxModel */)
{ {
return ONNXIFI_STATUS_BACKEND_UNAVAILABLE; return ONNXIFI_STATUS_BACKEND_UNAVAILABLE;
} }
ONNXIFI_PUBLIC ONNXIFI_CHECK_RESULT onnxStatus ONNXIFI_ABI onnxInitBackend( ONNXIFI_PUBLIC ONNXIFI_CHECK_RESULT onnxStatus ONNXIFI_ABI
onnxBackendID backendID, const uint64_t* auxPropertiesList, onnxBackend* backend) onnxInitBackend(onnxBackendID /* backendID */,
const uint64_t* /* auxPropertiesList */,
onnxBackend* /* backend */)
{ {
return ONNXIFI_STATUS_BACKEND_UNAVAILABLE; return ONNXIFI_STATUS_BACKEND_UNAVAILABLE;
} }
ONNXIFI_PUBLIC ONNXIFI_CHECK_RESULT onnxStatus ONNXIFI_ABI onnxReleaseBackend(onnxBackend backend) ONNXIFI_PUBLIC
ONNXIFI_CHECK_RESULT onnxStatus ONNXIFI_ABI onnxReleaseBackend(onnxBackend /* backend */)
{ {
return ONNXIFI_STATUS_INTERNAL_ERROR; return ONNXIFI_STATUS_INTERNAL_ERROR;
} }
ONNXIFI_PUBLIC ONNXIFI_CHECK_RESULT onnxStatus ONNXIFI_ABI onnxInitEvent(onnxBackend backend, ONNXIFI_PUBLIC ONNXIFI_CHECK_RESULT onnxStatus ONNXIFI_ABI onnxInitEvent(onnxBackend /* backend */,
onnxEvent* event) onnxEvent* /* event */)
{ {
return ONNXIFI_STATUS_BACKEND_UNAVAILABLE; return ONNXIFI_STATUS_BACKEND_UNAVAILABLE;
} }
ONNXIFI_PUBLIC ONNXIFI_CHECK_RESULT onnxStatus ONNXIFI_ABI onnxSignalEvent(onnxEvent event) ONNXIFI_PUBLIC ONNXIFI_CHECK_RESULT onnxStatus ONNXIFI_ABI onnxSignalEvent(onnxEvent /* event */)
{ {
return ONNXIFI_STATUS_BACKEND_UNAVAILABLE; return ONNXIFI_STATUS_BACKEND_UNAVAILABLE;
} }
ONNXIFI_PUBLIC ONNXIFI_CHECK_RESULT onnxStatus ONNXIFI_ABI onnxWaitEvent(onnxEvent event) ONNXIFI_PUBLIC ONNXIFI_CHECK_RESULT onnxStatus ONNXIFI_ABI onnxWaitEvent(onnxEvent /* event */)
{ {
return ONNXIFI_STATUS_BACKEND_UNAVAILABLE; return ONNXIFI_STATUS_BACKEND_UNAVAILABLE;
} }
ONNXIFI_PUBLIC ONNXIFI_CHECK_RESULT onnxStatus ONNXIFI_ABI onnxReleaseEvent(onnxEvent event) ONNXIFI_PUBLIC ONNXIFI_CHECK_RESULT onnxStatus ONNXIFI_ABI onnxReleaseEvent(onnxEvent /* event */)
{ {
return ONNXIFI_STATUS_INTERNAL_ERROR; return ONNXIFI_STATUS_INTERNAL_ERROR;
} }
ONNXIFI_PUBLIC ONNXIFI_CHECK_RESULT onnxStatus ONNXIFI_ABI ONNXIFI_PUBLIC ONNXIFI_CHECK_RESULT onnxStatus ONNXIFI_ABI
onnxInitGraph(onnxBackend backend, onnxInitGraph(onnxBackend /* backend */,
const uint64_t* auxPropertiesList, const uint64_t* /* auxPropertiesList */,
std::size_t onnxModelSize, std::size_t /* onnxModelSize */,
const void* onnxModel, const void* /* onnxModel */,
uint32_t weightsCount, uint32_t /* weightsCount */,
const onnxTensorDescriptorV1* weightDescriptors, const onnxTensorDescriptorV1* /* weightDescriptors */,
onnxGraph* graph) onnxGraph* /* graph */)
{ {
return ONNXIFI_STATUS_BACKEND_UNAVAILABLE; return ONNXIFI_STATUS_BACKEND_UNAVAILABLE;
} }
ONNXIFI_PUBLIC ONNXIFI_CHECK_RESULT onnxStatus ONNXIFI_ABI ONNXIFI_PUBLIC ONNXIFI_CHECK_RESULT onnxStatus ONNXIFI_ABI
onnxSetGraphIO(onnxGraph graph, onnxSetGraphIO(onnxGraph /* graph */,
std::uint32_t inputsCount, std::uint32_t /* inputsCount */,
const onnxTensorDescriptorV1* inputDescriptors, const onnxTensorDescriptorV1* /* inputDescriptors */,
std::uint32_t outputsCount, std::uint32_t /* outputsCount */,
const onnxTensorDescriptorV1* outputDescriptors) const onnxTensorDescriptorV1* /* outputDescriptors */)
{ {
return ONNXIFI_STATUS_BACKEND_UNAVAILABLE; return ONNXIFI_STATUS_BACKEND_UNAVAILABLE;
} }
ONNXIFI_PUBLIC ONNXIFI_CHECK_RESULT onnxStatus ONNXIFI_ABI onnxRunGraph( ONNXIFI_PUBLIC ONNXIFI_CHECK_RESULT onnxStatus ONNXIFI_ABI
onnxGraph graph, const onnxMemoryFenceV1* inputFence, onnxMemoryFenceV1* outputFence) onnxRunGraph(onnxGraph /* graph */,
const onnxMemoryFenceV1* /* inputFence */,
onnxMemoryFenceV1* /* outputFence */)
{ {
return ONNXIFI_STATUS_BACKEND_UNAVAILABLE; return ONNXIFI_STATUS_BACKEND_UNAVAILABLE;
} }
ONNXIFI_PUBLIC ONNXIFI_CHECK_RESULT onnxStatus ONNXIFI_ABI onnxReleaseGraph(onnxGraph graph) ONNXIFI_PUBLIC ONNXIFI_CHECK_RESULT onnxStatus ONNXIFI_ABI onnxReleaseGraph(onnxGraph /* graph */)
{ {
return ONNXIFI_STATUS_INTERNAL_ERROR; return ONNXIFI_STATUS_INTERNAL_ERROR;
} }
......
...@@ -130,7 +130,10 @@ namespace ngraph ...@@ -130,7 +130,10 @@ namespace ngraph
/// \param output_size Number of outputs for this node /// \param output_size Number of outputs for this node
Node(const NodeVector& arguments, size_t output_size = 1); Node(const NodeVector& arguments, size_t output_size = 1);
virtual void generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) {} virtual void generate_adjoints(autodiff::Adjoints& /* adjoints */,
const NodeVector& /* deltas */)
{
}
/// \brief Moves nodes that would be deleted from inputs to nodes to avoid stack overflows on deep networks. /// \brief Moves nodes that would be deleted from inputs to nodes to avoid stack overflows on deep networks.
void safe_delete(NodeVector& nodes, bool recurse); void safe_delete(NodeVector& nodes, bool recurse);
......
...@@ -143,8 +143,8 @@ namespace ngraph ...@@ -143,8 +143,8 @@ namespace ngraph
copy_with_new_args(const NodeVector& new_args) const override; copy_with_new_args(const NodeVector& new_args) const override;
protected: protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints, virtual void generate_adjoints(autodiff::Adjoints& /* adjoints */,
const NodeVector& deltas) override const NodeVector& /* deltas */) override
{ {
throw ngraph_error("Invalid operation"); throw ngraph_error("Invalid operation");
} }
......
...@@ -364,11 +364,11 @@ namespace ngraph ...@@ -364,11 +364,11 @@ namespace ngraph
namespace op namespace op
{ {
template <> template <>
void Constant::write_to_buffer<string>(const element::Type& target_type, void Constant::write_to_buffer<string>(const element::Type& /* target_type */,
const Shape& target_shape, const Shape& /* target_shape */,
const vector<string>& source, const vector<string>& /* source */,
void* target, void* /* target */,
size_t target_element_count) size_t /* target_element_count */)
{ {
} }
} }
......
...@@ -275,7 +275,7 @@ namespace ngraph ...@@ -275,7 +275,7 @@ namespace ngraph
template <typename T> template <typename T>
void write_to_buffer(const element::Type& target_type, void write_to_buffer(const element::Type& target_type,
const Shape& target_shape, const Shape& /* target_shape */,
const std::vector<T>& source, const std::vector<T>& source,
void* target, void* target,
size_t target_element_count) size_t target_element_count)
......
...@@ -157,7 +157,8 @@ shared_ptr<Node> op::Dequantize::copy_with_new_args(const NodeVector& new_args) ...@@ -157,7 +157,8 @@ shared_ptr<Node> op::Dequantize::copy_with_new_args(const NodeVector& new_args)
return make_shared<Dequantize>(new_args.at(0), new_args.at(1), new_args.at(2), m_type, m_axes); return make_shared<Dequantize>(new_args.at(0), new_args.at(1), new_args.at(2), m_type, m_axes);
} }
void op::Dequantize::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) void op::Dequantize::generate_adjoints(autodiff::Adjoints& /* adjoints */,
const NodeVector& /* deltas */)
{ {
throw ngraph_error("Forward-propagation-only operation"); throw ngraph_error("Forward-propagation-only operation");
} }
...@@ -49,7 +49,8 @@ namespace ngraph ...@@ -49,7 +49,8 @@ namespace ngraph
void validate_and_infer_types() override; void validate_and_infer_types() override;
void generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) override void generate_adjoints(autodiff::Adjoints& /* adjoints */,
const NodeVector& /* deltas */) override
{ {
throw ngraph_error("Not yet implemented"); throw ngraph_error("Not yet implemented");
} }
......
...@@ -133,7 +133,8 @@ shared_ptr<Node> op::DynBroadcast::copy_with_new_args(const NodeVector& new_args ...@@ -133,7 +133,8 @@ shared_ptr<Node> op::DynBroadcast::copy_with_new_args(const NodeVector& new_args
} }
// TODO: This function is not implemented! // TODO: This function is not implemented!
void op::DynBroadcast::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) void op::DynBroadcast::generate_adjoints(autodiff::Adjoints& /* adjoints */,
const NodeVector& /* deltas */)
{ {
throw ngraph_error("generate_adjoints not implemented for DynBroadcast"); throw ngraph_error("generate_adjoints not implemented for DynBroadcast");
} }
...@@ -25,7 +25,7 @@ op::DynPad::DynPad(const std::shared_ptr<Node>& arg, ...@@ -25,7 +25,7 @@ op::DynPad::DynPad(const std::shared_ptr<Node>& arg,
const std::shared_ptr<Node>& padding_below, const std::shared_ptr<Node>& padding_below,
const std::shared_ptr<Node>& padding_above, const std::shared_ptr<Node>& padding_above,
const std::shared_ptr<Node>& padding_value, const std::shared_ptr<Node>& padding_value,
op::PadMode pad_mode) op::PadMode /* pad_mode */)
: Op(check_single_output_args({arg, padding_below, padding_above, padding_value})) : Op(check_single_output_args({arg, padding_below, padding_above, padding_value}))
{ {
constructor_validate_and_infer_types(); constructor_validate_and_infer_types();
...@@ -110,7 +110,8 @@ shared_ptr<Node> op::DynPad::copy_with_new_args(const NodeVector& new_args) cons ...@@ -110,7 +110,8 @@ shared_ptr<Node> op::DynPad::copy_with_new_args(const NodeVector& new_args) cons
} }
// TODO: This function is not implemented! // TODO: This function is not implemented!
void op::DynPad::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) void op::DynPad::generate_adjoints(autodiff::Adjoints& /* adjoints */,
const NodeVector& /* deltas */)
{ {
throw ngraph_error("generate_adjoints not implemented for DynPad"); throw ngraph_error("generate_adjoints not implemented for DynPad");
} }
...@@ -154,7 +154,8 @@ shared_ptr<Node> op::DynReplaceSlice::copy_with_new_args(const NodeVector& new_a ...@@ -154,7 +154,8 @@ shared_ptr<Node> op::DynReplaceSlice::copy_with_new_args(const NodeVector& new_a
m_ellipsis_mask); m_ellipsis_mask);
} }
void op::DynReplaceSlice::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) void op::DynReplaceSlice::generate_adjoints(autodiff::Adjoints& /* adjoints */,
const NodeVector& /* deltas */)
{ {
throw ngraph_error("generate_adjoints not implemented for DynReplaceSlice"); throw ngraph_error("generate_adjoints not implemented for DynReplaceSlice");
} }
...@@ -156,7 +156,8 @@ shared_ptr<Node> op::DynReshape::copy_with_new_args(const NodeVector& new_args) ...@@ -156,7 +156,8 @@ shared_ptr<Node> op::DynReshape::copy_with_new_args(const NodeVector& new_args)
return make_shared<DynReshape>(new_args.at(0), new_args.at(1), m_zero_flag); return make_shared<DynReshape>(new_args.at(0), new_args.at(1), m_zero_flag);
} }
void op::DynReshape::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) void op::DynReshape::generate_adjoints(autodiff::Adjoints& /* adjoints */,
const NodeVector& /* deltas */)
{ {
throw ngraph_error("generate_adjoints not implemented for DynReshape"); throw ngraph_error("generate_adjoints not implemented for DynReshape");
} }
...@@ -125,7 +125,8 @@ shared_ptr<Node> op::DynSlice::copy_with_new_args(const NodeVector& new_args) co ...@@ -125,7 +125,8 @@ shared_ptr<Node> op::DynSlice::copy_with_new_args(const NodeVector& new_args) co
m_ellipsis_mask); m_ellipsis_mask);
} }
void op::DynSlice::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) void op::DynSlice::generate_adjoints(autodiff::Adjoints& /* adjoints */,
const NodeVector& /* deltas */)
{ {
throw ngraph_error("generate_adjoints not implemented for DynSlice"); throw ngraph_error("generate_adjoints not implemented for DynSlice");
} }
...@@ -77,8 +77,8 @@ namespace ngraph ...@@ -77,8 +77,8 @@ namespace ngraph
void validate_and_infer_types() override; void validate_and_infer_types() override;
protected: protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints, virtual void generate_adjoints(autodiff::Adjoints& /* adjoints */,
const NodeVector& deltas) override const NodeVector& /* deltas */) override
{ {
} }
......
...@@ -36,14 +36,14 @@ op::Range::Range(const Output<Node>& start, const Output<Node>& stop, const Outp ...@@ -36,14 +36,14 @@ op::Range::Range(const Output<Node>& start, const Output<Node>& stop, const Outp
template <typename T> template <typename T>
static typename std::enable_if<std::is_integral<T>::value, void>::type static typename std::enable_if<std::is_integral<T>::value, void>::type
check_start(const op::Range* node, T start) check_start(const op::Range* /* node */, T /* start */)
{ {
// Nothing to check for integral types. // Nothing to check for integral types.
} }
template <typename T> template <typename T>
static typename std::enable_if<std::is_integral<T>::value, void>::type static typename std::enable_if<std::is_integral<T>::value, void>::type
check_stop(const op::Range* node, T stop) check_stop(const op::Range* /* node */, T /* stop */)
{ {
// Nothing to check for integral types. // Nothing to check for integral types.
} }
...@@ -125,7 +125,7 @@ static ...@@ -125,7 +125,7 @@ static
} }
template <typename T> template <typename T>
static PartialShape infer_output_shape(const op::Range* node, const element::Type& et) static PartialShape infer_output_shape(const op::Range* node, const element::Type& /* et */)
{ {
auto const_start = dynamic_pointer_cast<op::Constant>(node->get_argument(0)); auto const_start = dynamic_pointer_cast<op::Constant>(node->get_argument(0));
auto const_stop = dynamic_pointer_cast<op::Constant>(node->get_argument(1)); auto const_stop = dynamic_pointer_cast<op::Constant>(node->get_argument(1));
......
...@@ -94,7 +94,7 @@ shared_ptr<Node> op::Tile::copy_with_new_args(const NodeVector& new_args) const ...@@ -94,7 +94,7 @@ shared_ptr<Node> op::Tile::copy_with_new_args(const NodeVector& new_args) const
} }
// TODO: This function is not implemented! // TODO: This function is not implemented!
void op::Tile::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) void op::Tile::generate_adjoints(autodiff::Adjoints& /* adjoints */, const NodeVector& /* deltas */)
{ {
throw ngraph_error("generate_adjoints not implemented for Tile"); throw ngraph_error("generate_adjoints not implemented for Tile");
} }
...@@ -73,7 +73,8 @@ shared_ptr<Node> op::Transpose::copy_with_new_args(const NodeVector& new_args) c ...@@ -73,7 +73,8 @@ shared_ptr<Node> op::Transpose::copy_with_new_args(const NodeVector& new_args) c
// TODO(amprocte): This will require some way of inverting the permutation in-graph. (TensorFlow, // TODO(amprocte): This will require some way of inverting the permutation in-graph. (TensorFlow,
// for example, has an InvertPermutation op, but that doesn't feel very nGraph-y somehow.) // for example, has an InvertPermutation op, but that doesn't feel very nGraph-y somehow.)
void op::Transpose::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) void op::Transpose::generate_adjoints(autodiff::Adjoints& /* adjoints */,
const NodeVector& /* deltas */)
{ {
throw ngraph_error("generate_adjoints not implemented for Transpose"); throw ngraph_error("generate_adjoints not implemented for Transpose");
} }
...@@ -178,7 +178,8 @@ NodeVector op::GroupConvolution::decompose_op() const ...@@ -178,7 +178,8 @@ NodeVector op::GroupConvolution::decompose_op() const
return {std::make_shared<ngraph::op::Concat>(convolution_nodes, concatenation_axis)}; return {std::make_shared<ngraph::op::Concat>(convolution_nodes, concatenation_axis)};
} }
void op::GroupConvolution::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) void op::GroupConvolution::generate_adjoints(autodiff::Adjoints& /* adjoints */,
const NodeVector& /* deltas */)
{ {
throw ngraph_error("NYI"); throw ngraph_error("NYI");
} }
...@@ -328,8 +328,8 @@ NodeVector op::GroupConvolutionTranspose::decompose_op() const ...@@ -328,8 +328,8 @@ NodeVector op::GroupConvolutionTranspose::decompose_op() const
} }
} }
void op::GroupConvolutionTranspose::generate_adjoints(autodiff::Adjoints& adjoints, void op::GroupConvolutionTranspose::generate_adjoints(autodiff::Adjoints& /* adjoints */,
const NodeVector& deltas) const NodeVector& /* deltas */)
{ {
throw ngraph_error( throw ngraph_error(
"Generating adjoints is not yet implemented for GroupConvolutionTranspose node."); "Generating adjoints is not yet implemented for GroupConvolutionTranspose node.");
......
...@@ -42,7 +42,8 @@ namespace ngraph ...@@ -42,7 +42,8 @@ namespace ngraph
void validate_and_infer_types() override; void validate_and_infer_types() override;
void generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) override void generate_adjoints(autodiff::Adjoints& /* adjoints */,
const NodeVector& /* deltas */) override
{ {
throw ngraph_error("Not yet implemented"); throw ngraph_error("Not yet implemented");
} }
......
...@@ -40,7 +40,8 @@ namespace ngraph ...@@ -40,7 +40,8 @@ namespace ngraph
void validate_and_infer_types() override; void validate_and_infer_types() override;
void generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) override void generate_adjoints(autodiff::Adjoints& /* adjoints */,
const NodeVector& /* deltas */) override
{ {
throw ngraph_error("Not yet implemented"); throw ngraph_error("Not yet implemented");
} }
......
...@@ -52,7 +52,7 @@ shared_ptr<Node> op::LRN::copy_with_new_args(const NodeVector& new_args) const ...@@ -52,7 +52,7 @@ shared_ptr<Node> op::LRN::copy_with_new_args(const NodeVector& new_args) const
return make_shared<op::LRN>(new_args.at(0), m_alpha, m_beta, m_bias, m_size); return make_shared<op::LRN>(new_args.at(0), m_alpha, m_beta, m_bias, m_size);
} }
void op::LRN::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) void op::LRN::generate_adjoints(autodiff::Adjoints& /* adjoints */, const NodeVector& /* deltas */)
{ {
throw ngraph_error("NYI"); throw ngraph_error("NYI");
} }
...@@ -161,7 +161,7 @@ shared_ptr<Node> op::Pad::copy_with_new_args(const NodeVector& new_args) const ...@@ -161,7 +161,7 @@ shared_ptr<Node> op::Pad::copy_with_new_args(const NodeVector& new_args) const
and push that back. and push that back.
*/ */
void op::Pad::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) void op::Pad::generate_adjoints(autodiff::Adjoints& /* adjoints */, const NodeVector& /* deltas */)
{ {
throw invalid_argument("Autodiff is not yet implemented for Pad"); throw invalid_argument("Autodiff is not yet implemented for Pad");
} }
......
...@@ -47,7 +47,7 @@ shared_ptr<Node> op::Parameter::copy_with_new_args(const NodeVector& new_args) c ...@@ -47,7 +47,7 @@ shared_ptr<Node> op::Parameter::copy_with_new_args(const NodeVector& new_args) c
return make_shared<Parameter>(m_element_type, m_partial_shape); return make_shared<Parameter>(m_element_type, m_partial_shape);
} }
void op::Parameter::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) void op::Parameter::generate_adjoints(autodiff::Adjoints& /* adjoints */, const NodeVector& deltas)
{ {
auto delta = deltas.at(0); auto delta = deltas.at(0);
} }
......
...@@ -160,7 +160,8 @@ shared_ptr<Node> op::Quantize::copy_with_new_args(const NodeVector& new_args) co ...@@ -160,7 +160,8 @@ shared_ptr<Node> op::Quantize::copy_with_new_args(const NodeVector& new_args) co
new_args.at(0), new_args.at(1), new_args.at(2), m_type, m_axes, m_round_mode); new_args.at(0), new_args.at(1), new_args.at(2), m_type, m_axes, m_round_mode);
} }
void op::Quantize::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) void op::Quantize::generate_adjoints(autodiff::Adjoints& /* adjoints */,
const NodeVector& /* deltas */)
{ {
throw ngraph_error("Forward-propagation-only operation"); throw ngraph_error("Forward-propagation-only operation");
} }
...@@ -196,8 +196,8 @@ shared_ptr<Node> op::QuantizedConvolution::copy_with_new_args(const NodeVector& ...@@ -196,8 +196,8 @@ shared_ptr<Node> op::QuantizedConvolution::copy_with_new_args(const NodeVector&
m_output_axes)); m_output_axes));
} }
void op::QuantizedConvolution::generate_adjoints(autodiff::Adjoints& adjoints, void op::QuantizedConvolution::generate_adjoints(autodiff::Adjoints& /* adjoints */,
const NodeVector& deltas) const NodeVector& /* deltas */)
{ {
throw ngraph_error("Forward-propagation-only operation"); throw ngraph_error("Forward-propagation-only operation");
} }
...@@ -42,7 +42,8 @@ namespace ngraph ...@@ -42,7 +42,8 @@ namespace ngraph
void validate_and_infer_types() override; void validate_and_infer_types() override;
void generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) override void generate_adjoints(autodiff::Adjoints& /* adjoints */,
const NodeVector& /* deltas */) override
{ {
throw ngraph_error("Not yet implemented"); throw ngraph_error("Not yet implemented");
} }
......
...@@ -42,7 +42,8 @@ namespace ngraph ...@@ -42,7 +42,8 @@ namespace ngraph
void validate_and_infer_types() override; void validate_and_infer_types() override;
void generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) override void generate_adjoints(autodiff::Adjoints& /* adjoints */,
const NodeVector& /* deltas */) override
{ {
throw ngraph_error("Not yet implemented"); throw ngraph_error("Not yet implemented");
} }
......
...@@ -137,7 +137,7 @@ shared_ptr<Node> op::TopK::copy_with_new_args(const NodeVector& new_args) const ...@@ -137,7 +137,7 @@ shared_ptr<Node> op::TopK::copy_with_new_args(const NodeVector& new_args) const
new_args.at(0), new_args.at(1), m_top_k_axis, m_index_element_type, m_compute_max, m_sort); new_args.at(0), new_args.at(1), m_top_k_axis, m_index_element_type, m_compute_max, m_sort);
} }
void op::TopK::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) void op::TopK::generate_adjoints(autodiff::Adjoints& /* adjoints */, const NodeVector& /* deltas */)
{ {
throw ngraph_error("Forward-propagation-only operation"); throw ngraph_error("Forward-propagation-only operation");
} }
...@@ -29,17 +29,17 @@ ...@@ -29,17 +29,17 @@
using namespace std; using namespace std;
using namespace ngraph; using namespace ngraph;
static shared_ptr<Node> sigmoid(const shared_ptr<Node>& arg, float alpha, float beta) static shared_ptr<Node> sigmoid(const shared_ptr<Node>& arg, float /* alpha */, float /* beta */)
{ {
return make_shared<op::Sigmoid>(arg); return make_shared<op::Sigmoid>(arg);
} }
static shared_ptr<Node> tanh(const shared_ptr<Node>& arg, float alpha, float beta) static shared_ptr<Node> tanh(const shared_ptr<Node>& arg, float /* alpha */, float /* beta */)
{ {
return make_shared<op::Tanh>(arg); return make_shared<op::Tanh>(arg);
} }
static shared_ptr<Node> relu(const shared_ptr<Node>& arg, float alpha, float beta) static shared_ptr<Node> relu(const shared_ptr<Node>& arg, float /* alpha */, float /* beta */)
{ {
return make_shared<op::Relu>(arg); return make_shared<op::Relu>(arg);
} }
......
...@@ -65,7 +65,8 @@ void op::util::FusedOp::validate_and_infer_types() ...@@ -65,7 +65,8 @@ void op::util::FusedOp::validate_and_infer_types()
post_validate_and_infer_types(); post_validate_and_infer_types();
} }
void op::util::FusedOp::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) void op::util::FusedOp::generate_adjoints(autodiff::Adjoints& /* adjoints */,
const NodeVector& /*deltas*/)
{ {
// TODO // TODO
throw ngraph_error("Autodiff on fused ops not supported yet"); throw ngraph_error("Autodiff on fused ops not supported yet");
......
...@@ -120,8 +120,8 @@ void op::util::IndexReduction::validate_and_infer_types() ...@@ -120,8 +120,8 @@ void op::util::IndexReduction::validate_and_infer_types()
set_output_type(0, m_index_element_type, output_shape); set_output_type(0, m_index_element_type, output_shape);
} }
void op::util::IndexReduction::generate_adjoints(autodiff::Adjoints& adjoints, void op::util::IndexReduction::generate_adjoints(autodiff::Adjoints& /* adjoints */,
const NodeVector& deltas) const NodeVector& /* deltas */)
{ {
throw ngraph_error("Forward-propagation-only operation"); throw ngraph_error("Forward-propagation-only operation");
} }
...@@ -53,7 +53,7 @@ pass::Manager::~Manager() ...@@ -53,7 +53,7 @@ pass::Manager::~Manager()
{ {
} }
void pass::Manager::run_passes(shared_ptr<Function> func, bool transitive) void pass::Manager::run_passes(shared_ptr<Function> func, bool /* transitive */)
{ {
static bool profile_enabled = getenv("NGRAPH_PROFILE_PASS_ENABLE") != nullptr; static bool profile_enabled = getenv("NGRAPH_PROFILE_PASS_ENABLE") != nullptr;
......
...@@ -250,17 +250,17 @@ int pass::MemoryVisualize::compute_op_weight(const shared_ptr<Node> exop) ...@@ -250,17 +250,17 @@ int pass::MemoryVisualize::compute_op_weight(const shared_ptr<Node> exop)
return mass; return mass;
} }
size_t pass::MemoryVisualize::memory_usage(shared_ptr<Node> node) size_t pass::MemoryVisualize::memory_usage(shared_ptr<Node> /* node */)
{ {
return 0; return 0;
} }
size_t pass::MemoryVisualize::memory_footprint(shared_ptr<Node> node) size_t pass::MemoryVisualize::memory_footprint(shared_ptr<Node> /* node */)
{ {
return 0; return 0;
} }
size_t pass::MemoryVisualize::memory_footprint(const std::list<shared_ptr<Node>>& nodes) size_t pass::MemoryVisualize::memory_footprint(const std::list<shared_ptr<Node>>& /* nodes */)
{ {
return 0; return 0;
} }
...@@ -326,7 +326,7 @@ static void sink_reshape(shared_ptr<op::Reshape> reshape, ...@@ -326,7 +326,7 @@ static void sink_reshape(shared_ptr<op::Reshape> reshape,
static void sink_unary(shared_ptr<op::util::UnaryElementwiseArithmetic> n, static void sink_unary(shared_ptr<op::util::UnaryElementwiseArithmetic> n,
ReshapeMap& reorders, ReshapeMap& reorders,
set<shared_ptr<Node>>& reshapes_to_delete) set<shared_ptr<Node>>& /* reshapes_to_delete */)
{ {
auto arg_reshape = read_reshapemap(reorders, n->get_argument(0)); auto arg_reshape = read_reshapemap(reorders, n->get_argument(0));
NGRAPH_DEBUG << "Propagating " << describe_reshape(arg_reshape) << " for " << n->get_name(); NGRAPH_DEBUG << "Propagating " << describe_reshape(arg_reshape) << " for " << n->get_name();
...@@ -373,7 +373,7 @@ static void sink_binary(shared_ptr<op::util::BinaryElementwiseArithmetic> binary ...@@ -373,7 +373,7 @@ static void sink_binary(shared_ptr<op::util::BinaryElementwiseArithmetic> binary
static void sink_slice(shared_ptr<op::Slice> n, static void sink_slice(shared_ptr<op::Slice> n,
ReshapeMap& reorders, ReshapeMap& reorders,
set<shared_ptr<Node>>& reshapes_to_delete) set<shared_ptr<Node>>& /* reshapes_to_delete */)
{ {
auto arg_reshape = reorders.at(n->get_argument(0)); auto arg_reshape = reorders.at(n->get_argument(0));
auto order = arg_reshape->get_input_order(); auto order = arg_reshape->get_input_order();
...@@ -399,8 +399,9 @@ static void sink_slice(shared_ptr<op::Slice> n, ...@@ -399,8 +399,9 @@ static void sink_slice(shared_ptr<op::Slice> n,
write_reshapemap(reorders, new_slice, new_reshape); write_reshapemap(reorders, new_slice, new_reshape);
} }
static void static void sink_pad(shared_ptr<op::Pad> n,
sink_pad(shared_ptr<op::Pad> n, ReshapeMap& reorders, set<shared_ptr<Node>>& reshapes_to_delete) ReshapeMap& reorders,
set<shared_ptr<Node>>& /* reshapes_to_delete */)
{ {
auto arg_reshape = reorders.at(n->get_argument(0)); auto arg_reshape = reorders.at(n->get_argument(0));
auto order = arg_reshape->get_input_order(); auto order = arg_reshape->get_input_order();
...@@ -425,7 +426,7 @@ static void ...@@ -425,7 +426,7 @@ static void
} }
static void sink_quantize(shared_ptr<op::Quantize> quantize, static void sink_quantize(shared_ptr<op::Quantize> quantize,
ReshapeMap& reorders, ReshapeMap& reorders,
set<shared_ptr<Node>>& reshapes_to_delete) set<shared_ptr<Node>>& /* reshapes_to_delete */)
{ {
auto arg_reshape = reorders.at(quantize->get_argument(0)); auto arg_reshape = reorders.at(quantize->get_argument(0));
AxisSet axes_in_def_order = AxisSet axes_in_def_order =
...@@ -492,7 +493,7 @@ static void sink_concat(shared_ptr<op::Concat> n, ...@@ -492,7 +493,7 @@ static void sink_concat(shared_ptr<op::Concat> n,
static void sink_dequantize(shared_ptr<op::Dequantize> dequantize, static void sink_dequantize(shared_ptr<op::Dequantize> dequantize,
ReshapeMap& reorders, ReshapeMap& reorders,
set<shared_ptr<Node>>& reshapes_to_delete) set<shared_ptr<Node>>& /* reshapes_to_delete */)
{ {
auto arg_reshape = reorders.at(dequantize->get_argument(0)); auto arg_reshape = reorders.at(dequantize->get_argument(0));
AxisSet axes_in_def_order = AxisSet axes_in_def_order =
......
...@@ -148,7 +148,7 @@ private: ...@@ -148,7 +148,7 @@ private:
std::unordered_map<Node*, int64_t> m_heights; std::unordered_map<Node*, int64_t> m_heights;
}; };
static std::string label_edge(const std::shared_ptr<Node>& src, static std::string label_edge(const std::shared_ptr<Node>& /* src */,
const std::shared_ptr<Node>& dst, const std::shared_ptr<Node>& dst,
size_t arg_index, size_t arg_index,
int64_t jump_distance) int64_t jump_distance)
......
...@@ -40,7 +40,7 @@ namespace ngraph ...@@ -40,7 +40,7 @@ namespace ngraph
} }
virtual std::shared_ptr<Node> virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override copy_with_new_args(const NodeVector& /* new_args */) const override
{ {
throw ngraph_error("Uncopyable"); throw ngraph_error("Uncopyable");
} }
......
...@@ -23,7 +23,7 @@ ngraph::runtime::Allocator::~Allocator() ...@@ -23,7 +23,7 @@ ngraph::runtime::Allocator::~Allocator()
class ngraph::runtime::DefaultAllocator : public ngraph::runtime::Allocator class ngraph::runtime::DefaultAllocator : public ngraph::runtime::Allocator
{ {
public: public:
void* malloc(size_t size, size_t alignment) void* malloc(size_t size, size_t /* alignment */)
{ {
// If allocation succeeds, returns a pointer to the lowest (first) byte in the // If allocation succeeds, returns a pointer to the lowest (first) byte in the
// allocated memory block that is suitably aligned for any scalar type. // allocated memory block that is suitably aligned for any scalar type.
......
...@@ -62,7 +62,8 @@ runtime::Backend::~Backend() ...@@ -62,7 +62,8 @@ runtime::Backend::~Backend()
{ {
} }
std::shared_ptr<ngraph::Node> runtime::Backend::get_backend_op(const std::string& op_name, ...) std::shared_ptr<ngraph::Node> runtime::Backend::get_backend_op(const std::string& /* op_name */,
...)
{ {
std::shared_ptr<ngraph::Node> dummy_node(nullptr); std::shared_ptr<ngraph::Node> dummy_node(nullptr);
return dummy_node; return dummy_node;
...@@ -89,42 +90,42 @@ vector<string> runtime::Backend::get_registered_devices() ...@@ -89,42 +90,42 @@ vector<string> runtime::Backend::get_registered_devices()
} }
std::shared_ptr<ngraph::runtime::Tensor> std::shared_ptr<ngraph::runtime::Tensor>
runtime::Backend::create_dynamic_tensor(const ngraph::element::Type& element_type, runtime::Backend::create_dynamic_tensor(const ngraph::element::Type& /* element_type */,
const PartialShape& shape) const PartialShape& /* shape */)
{ {
throw std::invalid_argument("This backend does not support dynamic tensors"); throw std::invalid_argument("This backend does not support dynamic tensors");
} }
std::shared_ptr<runtime::Executable> std::shared_ptr<runtime::Executable>
runtime::Backend::compile(std::shared_ptr<Function> func, runtime::Backend::compile(std::shared_ptr<Function> func,
ngraph::pass::PassConfig& pass_config, ngraph::pass::PassConfig& /* pass_config */,
bool enable_performance_data) bool enable_performance_data)
{ {
return compile(func, enable_performance_data); return compile(func, enable_performance_data);
} }
bool runtime::Backend::is_supported(const Node& node) const bool runtime::Backend::is_supported(const Node& /* node */) const
{ {
// The default behavior is that a backend does not support any ops. If this is not the case // The default behavior is that a backend does not support any ops. If this is not the case
// then override this method and enhance. // then override this method and enhance.
return false; return false;
} }
bool runtime::Backend::is_supported_property(const Property prop) const bool runtime::Backend::is_supported_property(const Property /* prop */) const
{ {
return false; return false;
} }
void runtime::Backend::remove_compiled_function(std::shared_ptr<Executable> exec) void runtime::Backend::remove_compiled_function(std::shared_ptr<Executable> /* exec */)
{ {
} }
std::shared_ptr<runtime::Executable> runtime::Backend::load(istream& input_stream) std::shared_ptr<runtime::Executable> runtime::Backend::load(istream& /* input_stream */)
{ {
throw runtime_error("load operation unimplemented."); throw runtime_error("load operation unimplemented.");
} }
bool runtime::Backend::is_device_memory(void* ptr) bool runtime::Backend::is_device_memory(void* /* ptr */)
{ {
// override this method for each supported backend to determine if the passed pointer is in // override this method for each supported backend to determine if the passed pointer is in
// device pinned memory or not // device pinned memory or not
...@@ -146,7 +147,7 @@ const string& runtime::Backend::get_backend_shared_library_search_directory() ...@@ -146,7 +147,7 @@ const string& runtime::Backend::get_backend_shared_library_search_directory()
return s_backend_shared_library_search_directory; return s_backend_shared_library_search_directory;
} }
bool runtime::Backend::set_config(const map<string, string>& config, string& error) bool runtime::Backend::set_config(const map<string, string>& /* config */, string& error)
{ {
error = "set_config not supported"; error = "set_config not supported";
return false; return false;
......
...@@ -159,7 +159,7 @@ public: ...@@ -159,7 +159,7 @@ public:
virtual Allocator* get_host_memory_allocator() { return nullptr; } virtual Allocator* get_host_memory_allocator() { return nullptr; }
/// \brief Set the host memory allocator to be used by the backend /// \brief Set the host memory allocator to be used by the backend
/// \param allocator is pointer to host memory allocator object /// \param allocator is pointer to host memory allocator object
virtual void set_host_memory_allocator(Allocator* allocator) {} virtual void set_host_memory_allocator(Allocator* allocator) { (void)allocator; }
/// \brief Returns memory allocator used by backend for device allocations /// \brief Returns memory allocator used by backend for device allocations
virtual Allocator* get_device_memory_allocator() virtual Allocator* get_device_memory_allocator()
{ {
......
...@@ -119,29 +119,30 @@ vector<runtime::PerformanceCounter> runtime::Executable::get_performance_data() ...@@ -119,29 +119,30 @@ vector<runtime::PerformanceCounter> runtime::Executable::get_performance_data()
return vector<PerformanceCounter>(); return vector<PerformanceCounter>();
} }
void runtime::Executable::save(std::ostream& output_stream) void runtime::Executable::save(std::ostream& /* output_stream */)
{ {
throw runtime_error("save opertion unimplemented."); throw runtime_error("save opertion unimplemented.");
} }
shared_ptr<runtime::Tensor> runtime::Executable::create_input_tensor(size_t input_index) shared_ptr<runtime::Tensor> runtime::Executable::create_input_tensor(size_t /* input_index */)
{ {
throw runtime_error("create_input_tensor unimplemented"); throw runtime_error("create_input_tensor unimplemented");
} }
shared_ptr<runtime::Tensor> runtime::Executable::create_output_tensor(size_t output_index) shared_ptr<runtime::Tensor> runtime::Executable::create_output_tensor(size_t /* output_index */)
{ {
throw runtime_error("create_output_tensor unimplemented"); throw runtime_error("create_output_tensor unimplemented");
} }
vector<shared_ptr<runtime::Tensor>> runtime::Executable::create_input_tensor(size_t input_index, vector<shared_ptr<runtime::Tensor>>
size_t pipeline_depth) runtime::Executable::create_input_tensor(size_t /* input_index */, size_t /* pipeline_depth */)
{ {
throw runtime_error("create_input_tensor unimplemented"); throw runtime_error("create_input_tensor unimplemented");
} }
vector<shared_ptr<runtime::Tensor>> runtime::Executable::create_output_tensor(size_t output_index, vector<shared_ptr<runtime::Tensor>>
size_t pipeline_depth) runtime::Executable::create_output_tensor(size_t /* output_index */,
size_t /* pipeline_depth */)
{ {
throw runtime_error("create_output_tensor unimplemented"); throw runtime_error("create_output_tensor unimplemented");
} }
...@@ -32,7 +32,7 @@ runtime::BackendConstructor* runtime::interpreter::get_backend_constructor_point ...@@ -32,7 +32,7 @@ runtime::BackendConstructor* runtime::interpreter::get_backend_constructor_point
class INTBackendConstructor : public runtime::BackendConstructor class INTBackendConstructor : public runtime::BackendConstructor
{ {
public: public:
std::shared_ptr<runtime::Backend> create(const std::string& config) override std::shared_ptr<runtime::Backend> create(const std::string& /* config */) override
{ {
return std::make_shared<runtime::interpreter::INTBackend>(); return std::make_shared<runtime::interpreter::INTBackend>();
} }
......
...@@ -37,7 +37,7 @@ extern "C" runtime::BackendConstructor* get_backend_constructor_pointer() ...@@ -37,7 +37,7 @@ extern "C" runtime::BackendConstructor* get_backend_constructor_pointer()
class LocalBackendConstructor : public runtime::BackendConstructor class LocalBackendConstructor : public runtime::BackendConstructor
{ {
public: public:
std::shared_ptr<runtime::Backend> create(const std::string& config) override std::shared_ptr<runtime::Backend> create(const std::string& /* config */) override
{ {
return std::make_shared<runtime::nop::NOPBackend>(); return std::make_shared<runtime::nop::NOPBackend>();
} }
...@@ -69,7 +69,7 @@ shared_ptr<runtime::Executable> ...@@ -69,7 +69,7 @@ shared_ptr<runtime::Executable>
} }
runtime::nop::NOPExecutable::NOPExecutable(shared_ptr<Function> function, runtime::nop::NOPExecutable::NOPExecutable(shared_ptr<Function> function,
bool enable_performance_collection) bool /* enable_performance_collection */)
{ {
pass::Manager pass_manager; pass::Manager pass_manager;
pass_manager.register_pass<pass::AssignLayout<DenseTensorLayout>>(); pass_manager.register_pass<pass::AssignLayout<DenseTensorLayout>>();
...@@ -78,8 +78,8 @@ runtime::nop::NOPExecutable::NOPExecutable(shared_ptr<Function> function, ...@@ -78,8 +78,8 @@ runtime::nop::NOPExecutable::NOPExecutable(shared_ptr<Function> function,
set_parameters_and_results(*function); set_parameters_and_results(*function);
} }
bool runtime::nop::NOPExecutable::call(const vector<shared_ptr<runtime::Tensor>>& outputs, bool runtime::nop::NOPExecutable::call(const vector<shared_ptr<runtime::Tensor>>& /* outputs */,
const vector<shared_ptr<runtime::Tensor>>& inputs) const vector<shared_ptr<runtime::Tensor>>& /* inputs */)
{ {
return true; return true;
} }
...@@ -53,7 +53,8 @@ bool ngraph::runtime::plaidml::PlaidML_Backend::is_supported(const Node& node) c ...@@ -53,7 +53,8 @@ bool ngraph::runtime::plaidml::PlaidML_Backend::is_supported(const Node& node) c
return m_compiler.is_supported(node); return m_compiler.is_supported(node);
} }
bool ngraph::runtime::plaidml::PlaidML_Backend::is_supported_property(const Property prop) const bool ngraph::runtime::plaidml::PlaidML_Backend::is_supported_property(
const Property /* prop */) const
{ {
return false; return false;
} }
......
...@@ -40,7 +40,6 @@ void ngraph::runtime::plaidml::ImplGroupConvolution::Apply() ...@@ -40,7 +40,6 @@ void ngraph::runtime::plaidml::ImplGroupConvolution::Apply()
const auto& image = op_input(0); const auto& image = op_input(0);
const auto& filter = op_input(1); const auto& filter = op_input(1);
auto rank = op().get_input_shape(0).size() - 2;
const auto& groups = op().get_groups(); const auto& groups = op().get_groups();
const auto& padding_above = op().get_padding_above(); const auto& padding_above = op().get_padding_above();
const auto& padding_below = op().get_padding_below(); const auto& padding_below = op().get_padding_below();
......
...@@ -127,7 +127,7 @@ namespace ngraph ...@@ -127,7 +127,7 @@ namespace ngraph
template <typename T> template <typename T>
void batch_norm_backprop(double eps, void batch_norm_backprop(double eps,
const T* gamma, const T* gamma,
const T* beta, const T* /* beta */,
const T* input, const T* input,
const T* mean, const T* mean,
const T* variance, const T* variance,
......
...@@ -45,7 +45,7 @@ namespace ngraph ...@@ -45,7 +45,7 @@ namespace ngraph
} }
template <typename T> template <typename T>
typename std::enable_if<std::is_integral<T>::value, bool>::type is_finite(T x) typename std::enable_if<std::is_integral<T>::value, bool>::type is_finite(T /* x */)
{ {
return true; return true;
} }
......
...@@ -115,6 +115,7 @@ namespace ngraph ...@@ -115,6 +115,7 @@ namespace ngraph
void write(const void* p, size_t offset, size_t n) void write(const void* p, size_t offset, size_t n)
NGRAPH_DEPRECATED("Use two-parameter write") NGRAPH_DEPRECATED("Use two-parameter write")
{ {
(void)offset;
write(p, n); write(p, n);
} }
...@@ -126,6 +127,7 @@ namespace ngraph ...@@ -126,6 +127,7 @@ namespace ngraph
void read(void* p, size_t offset, size_t n) const void read(void* p, size_t offset, size_t n) const
NGRAPH_DEPRECATED("Use two-parameter read") NGRAPH_DEPRECATED("Use two-parameter read")
{ {
(void)offset;
read(p, n); read(p, n);
} }
......
...@@ -107,8 +107,11 @@ std::vector<const element::Type*> element::Type::get_known_types() ...@@ -107,8 +107,11 @@ std::vector<const element::Type*> element::Type::get_known_types()
return rc; return rc;
} }
element::Type::Type( element::Type::Type(size_t bitwidth,
size_t bitwidth, bool is_real, bool is_signed, bool is_quantized, const std::string& cname) bool is_real,
bool is_signed,
bool is_quantized,
const std::string& /* cname */)
{ {
for (auto& t : get_type_info_map()) for (auto& t : get_type_info_map())
{ {
......
...@@ -20,7 +20,7 @@ ...@@ -20,7 +20,7 @@
using namespace std; using namespace std;
using namespace ngraph; using namespace ngraph;
Strides ngraph::conv_default_strides(const Node* node, Strides ngraph::conv_default_strides(const Node* /* node */,
const PartialShape& data_batch_shape, const PartialShape& data_batch_shape,
const PartialShape& filters_shape) const PartialShape& filters_shape)
{ {
...@@ -42,7 +42,7 @@ Strides ngraph::conv_default_strides(const Node* node, ...@@ -42,7 +42,7 @@ Strides ngraph::conv_default_strides(const Node* node,
return Strides(rank, 1); return Strides(rank, 1);
} }
CoordinateDiff ngraph::conv_default_padding(const Node* node, CoordinateDiff ngraph::conv_default_padding(const Node* /* node */,
const PartialShape& data_batch_shape, const PartialShape& data_batch_shape,
const PartialShape& filters_shape) const PartialShape& filters_shape)
{ {
......
...@@ -109,7 +109,7 @@ vector<runtime::PerformanceCounter> run_benchmark_pipelined(shared_ptr<Function> ...@@ -109,7 +109,7 @@ vector<runtime::PerformanceCounter> run_benchmark_pipelined(shared_ptr<Function>
size_t iterations, size_t iterations,
bool timing_detail, bool timing_detail,
int warmup_iterations, int warmup_iterations,
bool copy_data) bool /* copy_data */)
{ {
constexpr size_t pipeline_depth = 2; constexpr size_t pipeline_depth = 2;
s_iterations = iterations; s_iterations = iterations;
......
...@@ -308,7 +308,7 @@ TEST(tracer, basic) ...@@ -308,7 +308,7 @@ TEST(tracer, basic)
ngraph::runtime::cpu::CPU_Debugger dbg(*cf); ngraph::runtime::cpu::CPU_Debugger dbg(*cf);
int good_or_bad_value = -777; int good_or_bad_value = -777;
auto add_tracer = [&good_or_bad_value](void** values, const std::string& name) { auto add_tracer = [&good_or_bad_value](void** values, const std::string& /* name */) {
ASSERT_EQ(static_cast<int*>(values[0])[0], good_or_bad_value); ASSERT_EQ(static_cast<int*>(values[0])[0], good_or_bad_value);
}; };
...@@ -344,7 +344,7 @@ TEST(tracer, count_tracepoint) ...@@ -344,7 +344,7 @@ TEST(tracer, count_tracepoint)
size_t offset = 5; size_t offset = 5;
std::function<void(void**, const std::string&)> callback = std::function<void(void**, const std::string&)> callback =
[&num_iterations, offset](void** values, const std::string& name) { [&num_iterations, offset](void** values, const std::string& /* name */) {
ASSERT_EQ(static_cast<int*>(values[0])[0], num_iterations - 1 + offset); ASSERT_EQ(static_cast<int*>(values[0])[0], num_iterations - 1 + offset);
}; };
...@@ -385,7 +385,8 @@ TEST(tracer, conditional_tracepoint) ...@@ -385,7 +385,8 @@ TEST(tracer, conditional_tracepoint)
size_t offset = 5; size_t offset = 5;
int countdown = num_iterations; int countdown = num_iterations;
auto add_tracer = [&countdown, num_iterations, offset](void** values, const std::string& name) { auto add_tracer = [&countdown, num_iterations, offset](void** values,
const std::string& /* name */) {
if (countdown-- == 0) if (countdown-- == 0)
{ {
ASSERT_EQ(static_cast<int*>(values[0])[0], num_iterations - 1 + offset); ASSERT_EQ(static_cast<int*>(values[0])[0], num_iterations - 1 + offset);
......
...@@ -35,7 +35,7 @@ TEST(pass_manager, add) ...@@ -35,7 +35,7 @@ TEST(pass_manager, add)
auto graph = make_test_graph(); auto graph = make_test_graph();
size_t node_count = 0; size_t node_count = 0;
traverse_nodes(graph, [&](shared_ptr<Node> node) { node_count++; }); traverse_nodes(graph, [&](shared_ptr<Node> /* node */) { node_count++; });
pass_manager.run_passes(graph); pass_manager.run_passes(graph);
auto sorted = graph->get_ordered_ops(); auto sorted = graph->get_ordered_ops();
EXPECT_EQ(node_count, sorted.size()); EXPECT_EQ(node_count, sorted.size());
...@@ -51,7 +51,7 @@ namespace ...@@ -51,7 +51,7 @@ namespace
: FunctionPass() : FunctionPass()
{ {
} }
bool run_on_function(std::shared_ptr<ngraph::Function> f) override { return false; } bool run_on_function(std::shared_ptr<ngraph::Function> /* f */) override { return false; }
}; };
} }
......
...@@ -305,7 +305,7 @@ TEST(pattern, matcher) ...@@ -305,7 +305,7 @@ TEST(pattern, matcher)
ASSERT_TRUE(n.match(any, abs)); ASSERT_TRUE(n.match(any, abs));
ASSERT_EQ(n.get_matched_nodes(), (NodeVector{abs, a})); ASSERT_EQ(n.get_matched_nodes(), (NodeVector{abs, a}));
auto false_pred = [](std::shared_ptr<Node> no) { return false; }; auto false_pred = [](std::shared_ptr<Node> /* no */) { return false; };
auto any_false = std::make_shared<pattern::op::Skip>(a, false_pred); auto any_false = std::make_shared<pattern::op::Skip>(a, false_pred);
ASSERT_TRUE(n.match(any_false, a)); ASSERT_TRUE(n.match(any_false, a));
ASSERT_EQ(n.get_matched_nodes(), (NodeVector{a, a})); ASSERT_EQ(n.get_matched_nodes(), (NodeVector{a, a}));
......
...@@ -24,7 +24,7 @@ using namespace ngraph; ...@@ -24,7 +24,7 @@ using namespace ngraph;
// //
// Tests for binary elementwise ops. // Tests for binary elementwise ops.
// //
void test_binary(std::string node_type, void test_binary(std::string /* node_type */,
shared_ptr<Node>(f)(const shared_ptr<Node>& x, const shared_ptr<Node>& y)) shared_ptr<Node>(f)(const shared_ptr<Node>& x, const shared_ptr<Node>& y))
{ {
// Check for bad arguments // Check for bad arguments
...@@ -115,7 +115,7 @@ TEST(type_prop, subtract_bad_arguments) ...@@ -115,7 +115,7 @@ TEST(type_prop, subtract_bad_arguments)
// //
// Tests for binary elementwise logical ops. // Tests for binary elementwise logical ops.
// //
void test_binary_logical(std::string node_type, void test_binary_logical(std::string /* node_type */,
shared_ptr<Node>(f)(const shared_ptr<Node>& x, const shared_ptr<Node>& y)) shared_ptr<Node>(f)(const shared_ptr<Node>& x, const shared_ptr<Node>& y))
{ {
// Check for bad arguments // Check for bad arguments
......
...@@ -67,14 +67,14 @@ namespace ngraph ...@@ -67,14 +67,14 @@ namespace ngraph
// For a scalar, nothing to do. // For a scalar, nothing to do.
template <typename T, size_t N> template <typename T, size_t N>
typename std::enable_if<(N == 0), void>::type typename std::enable_if<(N == 0), void>::type
fill_shape(Shape& shape, const NestedInitializerList<T, N>& inits) fill_shape(Shape& /* shape */, const NestedInitializerList<T, N>& /* inits */)
{ {
} }
// Check that the inits match the shape // Check that the inits match the shape
template <typename T, size_t N> template <typename T, size_t N>
typename std::enable_if<(N == 0), void>::type typename std::enable_if<(N == 0), void>::type
check_shape(const Shape& shape, const NestedInitializerList<T, N>& inits) check_shape(const Shape& shape, const NestedInitializerList<T, N>& /* inits */)
{ {
if (shape.size() != 0) if (shape.size() != 0)
{ {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment