Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in / Register
Toggle navigation
N
ngraph
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
submodule
ngraph
Commits
5e607081
Unverified
Commit
5e607081
authored
Sep 12, 2019
by
Scott Cyphers
Committed by
GitHub
Sep 12, 2019
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Unnused parameter cleanup (#3603)
parent
5dd1e07d
Hide whitespace changes
Inline
Side-by-side
Showing
58 changed files
with
184 additions
and
146 deletions
+184
-146
quantization.cpp
src/ngraph/builder/quantization.cpp
+4
-4
null.hpp
src/ngraph/distributed/null.hpp
+17
-14
null_node.cpp
src/ngraph/frontend/onnx_import/core/null_node.cpp
+1
-1
onnxifi.cpp
src/ngraph/frontend/onnxifi/onnxifi.cpp
+36
-28
node.hpp
src/ngraph/node.hpp
+4
-1
batch_norm.hpp
src/ngraph/op/batch_norm.hpp
+2
-2
constant.cpp
src/ngraph/op/constant.cpp
+5
-5
constant.hpp
src/ngraph/op/constant.hpp
+1
-1
dequantize.cpp
src/ngraph/op/dequantize.cpp
+2
-1
embedding_lookup.hpp
src/ngraph/op/embedding_lookup.hpp
+2
-1
dyn_broadcast.cpp
src/ngraph/op/experimental/dyn_broadcast.cpp
+2
-1
dyn_pad.cpp
src/ngraph/op/experimental/dyn_pad.cpp
+3
-2
dyn_replace_slice.cpp
src/ngraph/op/experimental/dyn_replace_slice.cpp
+2
-1
dyn_reshape.cpp
src/ngraph/op/experimental/dyn_reshape.cpp
+2
-1
dyn_slice.cpp
src/ngraph/op/experimental/dyn_slice.cpp
+2
-1
generate_mask.hpp
src/ngraph/op/experimental/generate_mask.hpp
+2
-2
range.cpp
src/ngraph/op/experimental/range.cpp
+3
-3
tile.cpp
src/ngraph/op/experimental/tile.cpp
+1
-1
transpose.cpp
src/ngraph/op/experimental/transpose.cpp
+2
-1
group_conv.cpp
src/ngraph/op/fused/group_conv.cpp
+2
-1
group_conv_transpose.cpp
src/ngraph/op/fused/group_conv_transpose.cpp
+2
-2
gather.hpp
src/ngraph/op/gather.hpp
+2
-1
gather_nd.hpp
src/ngraph/op/gather_nd.hpp
+2
-1
lrn.cpp
src/ngraph/op/lrn.cpp
+1
-1
pad.cpp
src/ngraph/op/pad.cpp
+1
-1
parameter.cpp
src/ngraph/op/parameter.cpp
+1
-1
quantize.cpp
src/ngraph/op/quantize.cpp
+2
-1
quantized_convolution.cpp
src/ngraph/op/quantized_convolution.cpp
+2
-2
scatter_add.hpp
src/ngraph/op/scatter_add.hpp
+2
-1
scatter_nd_add.hpp
src/ngraph/op/scatter_nd_add.hpp
+2
-1
topk.cpp
src/ngraph/op/topk.cpp
+1
-1
activation_functions.cpp
src/ngraph/op/util/activation_functions.cpp
+3
-3
fused_op.cpp
src/ngraph/op/util/fused_op.cpp
+2
-1
index_reduction.cpp
src/ngraph/op/util/index_reduction.cpp
+2
-2
manager.cpp
src/ngraph/pass/manager.cpp
+1
-1
memory_visualize.cpp
src/ngraph/pass/memory_visualize.cpp
+3
-3
reshape_sinking.cpp
src/ngraph/pass/reshape_sinking.cpp
+7
-6
visualize_tree.cpp
src/ngraph/pass/visualize_tree.cpp
+1
-1
pattern.hpp
src/ngraph/pattern/op/pattern.hpp
+1
-1
allocator.cpp
src/ngraph/runtime/allocator.cpp
+1
-1
backend.cpp
src/ngraph/runtime/backend.cpp
+11
-10
backend.hpp
src/ngraph/runtime/backend.hpp
+1
-1
executable.cpp
src/ngraph/runtime/executable.cpp
+8
-7
int_backend.cpp
src/ngraph/runtime/interpreter/int_backend.cpp
+1
-1
nop_backend.cpp
src/ngraph/runtime/nop/nop_backend.cpp
+4
-4
plaidml_backend.cpp
src/ngraph/runtime/plaidml/plaidml_backend.cpp
+2
-1
plaidml_ops_group_convolution.cpp
src/ngraph/runtime/plaidml/plaidml_ops_group_convolution.cpp
+0
-1
batch_norm.hpp
src/ngraph/runtime/reference/batch_norm.hpp
+1
-1
sum.hpp
src/ngraph/runtime/reference/sum.hpp
+1
-1
tensor.hpp
src/ngraph/runtime/tensor.hpp
+2
-0
element_type.cpp
src/ngraph/type/element_type.cpp
+5
-2
validation_util.cpp
src/ngraph/validation_util.cpp
+2
-2
benchmark_pipelined.cpp
src/tools/nbench/benchmark_pipelined.cpp
+1
-1
cpu_debugger.cpp
test/cpu_debugger.cpp
+4
-3
pass_manager.cpp
test/pass_manager.cpp
+2
-2
pattern.cpp
test/pattern.cpp
+1
-1
binary_elementwise.cpp
test/type_prop/binary_elementwise.cpp
+2
-2
ndarray.hpp
test/util/ndarray.hpp
+2
-2
No files found.
src/ngraph/builder/quantization.cpp
View file @
5e607081
...
...
@@ -133,8 +133,8 @@ namespace ngraph
const
Shape
&
padding_below
,
const
Shape
&
padding_above
,
bool
include_padding_in_avg_computation
,
const
Output
<
Node
>&
min
,
const
Output
<
Node
>&
max
)
const
Output
<
Node
>&
/* min */
,
const
Output
<
Node
>&
/* max */
)
{
return
make_shared
<
op
::
QuantizedAvgPool
>
(
input
,
window_shape
,
...
...
@@ -222,8 +222,8 @@ namespace ngraph
const
Strides
&
window_movement_strides
,
const
Shape
&
padding_below
,
const
Shape
&
padding_above
,
const
Output
<
Node
>&
min
,
const
Output
<
Node
>&
max
)
const
Output
<
Node
>&
/* min */
,
const
Output
<
Node
>&
/* max */
)
{
return
make_shared
<
op
::
QuantizedMaxPool
>
(
input
,
window_shape
,
window_movement_strides
,
padding_below
,
padding_above
);
...
...
src/ngraph/distributed/null.hpp
View file @
5e607081
...
...
@@ -35,32 +35,35 @@ namespace ngraph
{
std
::
printf
(
"%s: %s
\n
"
,
timestamp
.
c_str
(),
buf
.
data
());
}
void
all_reduce
(
void
*
in
,
void
*
out
,
element
::
Type_t
element_type
,
reduction
::
Type
reduce_type
,
size_t
count
)
override
void
all_reduce
(
void
*
/* in */
,
void
*
/* out */
,
element
::
Type_t
/* element_type */
,
reduction
::
Type
/* reduce_type */
,
size_t
/* count */
)
override
{
throw
ngraph_error
(
"Distributed Library not supported/mentioned"
);
}
void
broadcast
(
void
*
in
,
element
::
Type_t
element_type
,
size_t
count
,
int
root_id
)
override
void
broadcast
(
void
*
/* in */
,
element
::
Type_t
/* element_type */
,
size_t
/* count */
,
int
/* root_id */
)
override
{
throw
ngraph_error
(
"Distributed Library not supported/mentioned"
);
}
void
recv
(
void
*
in
,
element
::
Type_t
element_type
,
size_t
count
,
int
src_id
)
override
void
recv
(
void
*
/* in */
,
element
::
Type_t
/* element_type */
,
size_t
/* count */
,
int
/* src_id*/
)
override
{
throw
ngraph_error
(
"Distributed Library not supported/mentioned"
);
}
void
send
(
const
void
*
in
,
element
::
Type_t
element_type
,
size_t
count
,
int
dest_id
)
override
void
send
(
const
void
*
/* in */
,
element
::
Type_t
/* element_type */
,
size_t
/* count */
,
int
/* dest_id */
)
override
{
throw
ngraph_error
(
"Distributed Library not supported/mentioned"
);
}
...
...
src/ngraph/frontend/onnx_import/core/null_node.cpp
View file @
5e607081
...
...
@@ -25,7 +25,7 @@ namespace ngraph
{
const
std
::
string
NullNode
::
type_name
{
"NullNode"
};
std
::
shared_ptr
<
Node
>
NullNode
::
copy_with_new_args
(
const
NodeVector
&
new_args
)
const
std
::
shared_ptr
<
Node
>
NullNode
::
copy_with_new_args
(
const
NodeVector
&
/* new_args */
)
const
{
return
std
::
make_shared
<
NullNode
>
();
}
...
...
src/ngraph/frontend/onnxifi/onnxifi.cpp
View file @
5e607081
...
...
@@ -48,85 +48,93 @@ ONNXIFI_PUBLIC ONNXIFI_CHECK_RESULT onnxStatus ONNXIFI_ABI
}
}
ONNXIFI_PUBLIC
ONNXIFI_CHECK_RESULT
onnxStatus
ONNXIFI_ABI
onnxReleaseBackendID
(
onnxBackendID
backendID
)
ONNXIFI_PUBLIC
ONNXIFI_CHECK_RESULT
onnxStatus
ONNXIFI_ABI
onnxReleaseBackendID
(
onnxBackendID
/* backendID */
)
{
return
ONNXIFI_STATUS_INTERNAL_ERROR
;
}
ONNXIFI_PUBLIC
ONNXIFI_CHECK_RESULT
onnxStatus
ONNXIFI_ABI
onnxGetBackendInfo
(
onnxBackendID
backendID
,
onnxBackendInfo
infoType
,
void
*
infoValue
,
std
::
size_t
*
infoValueSize
)
ONNXIFI_PUBLIC
ONNXIFI_CHECK_RESULT
onnxStatus
ONNXIFI_ABI
onnxGetBackendInfo
(
onnxBackendID
/* backendID */
,
onnxBackendInfo
/* infoType */
,
void
*
/* infoValue */
,
std
::
size_t
*
/* infoValueSize */
)
{
return
ONNXIFI_STATUS_BACKEND_UNAVAILABLE
;
}
ONNXIFI_PUBLIC
ONNXIFI_CHECK_RESULT
onnxStatus
ONNXIFI_ABI
onnxGetBackendCompatibility
(
onnxBackendID
backendID
,
std
::
size_t
onnxModelSize
,
const
void
*
onnxModel
)
onnxBackendID
/* backendID */
,
std
::
size_t
/* onnxModelSize */
,
const
void
*
/* onnxModel */
)
{
return
ONNXIFI_STATUS_BACKEND_UNAVAILABLE
;
}
ONNXIFI_PUBLIC
ONNXIFI_CHECK_RESULT
onnxStatus
ONNXIFI_ABI
onnxInitBackend
(
onnxBackendID
backendID
,
const
uint64_t
*
auxPropertiesList
,
onnxBackend
*
backend
)
ONNXIFI_PUBLIC
ONNXIFI_CHECK_RESULT
onnxStatus
ONNXIFI_ABI
onnxInitBackend
(
onnxBackendID
/* backendID */
,
const
uint64_t
*
/* auxPropertiesList */
,
onnxBackend
*
/* backend */
)
{
return
ONNXIFI_STATUS_BACKEND_UNAVAILABLE
;
}
ONNXIFI_PUBLIC
ONNXIFI_CHECK_RESULT
onnxStatus
ONNXIFI_ABI
onnxReleaseBackend
(
onnxBackend
backend
)
ONNXIFI_PUBLIC
ONNXIFI_CHECK_RESULT
onnxStatus
ONNXIFI_ABI
onnxReleaseBackend
(
onnxBackend
/* backend */
)
{
return
ONNXIFI_STATUS_INTERNAL_ERROR
;
}
ONNXIFI_PUBLIC
ONNXIFI_CHECK_RESULT
onnxStatus
ONNXIFI_ABI
onnxInitEvent
(
onnxBackend
backend
,
onnxEvent
*
event
)
ONNXIFI_PUBLIC
ONNXIFI_CHECK_RESULT
onnxStatus
ONNXIFI_ABI
onnxInitEvent
(
onnxBackend
/* backend */
,
onnxEvent
*
/* event */
)
{
return
ONNXIFI_STATUS_BACKEND_UNAVAILABLE
;
}
ONNXIFI_PUBLIC
ONNXIFI_CHECK_RESULT
onnxStatus
ONNXIFI_ABI
onnxSignalEvent
(
onnxEvent
event
)
ONNXIFI_PUBLIC
ONNXIFI_CHECK_RESULT
onnxStatus
ONNXIFI_ABI
onnxSignalEvent
(
onnxEvent
/* event */
)
{
return
ONNXIFI_STATUS_BACKEND_UNAVAILABLE
;
}
ONNXIFI_PUBLIC
ONNXIFI_CHECK_RESULT
onnxStatus
ONNXIFI_ABI
onnxWaitEvent
(
onnxEvent
event
)
ONNXIFI_PUBLIC
ONNXIFI_CHECK_RESULT
onnxStatus
ONNXIFI_ABI
onnxWaitEvent
(
onnxEvent
/* event */
)
{
return
ONNXIFI_STATUS_BACKEND_UNAVAILABLE
;
}
ONNXIFI_PUBLIC
ONNXIFI_CHECK_RESULT
onnxStatus
ONNXIFI_ABI
onnxReleaseEvent
(
onnxEvent
event
)
ONNXIFI_PUBLIC
ONNXIFI_CHECK_RESULT
onnxStatus
ONNXIFI_ABI
onnxReleaseEvent
(
onnxEvent
/* event */
)
{
return
ONNXIFI_STATUS_INTERNAL_ERROR
;
}
ONNXIFI_PUBLIC
ONNXIFI_CHECK_RESULT
onnxStatus
ONNXIFI_ABI
onnxInitGraph
(
onnxBackend
backend
,
const
uint64_t
*
auxPropertiesList
,
std
::
size_t
onnxModelSize
,
const
void
*
onnxModel
,
uint32_t
weightsCount
,
const
onnxTensorDescriptorV1
*
weightDescriptors
,
onnxGraph
*
graph
)
onnxInitGraph
(
onnxBackend
/* backend */
,
const
uint64_t
*
/* auxPropertiesList */
,
std
::
size_t
/* onnxModelSize */
,
const
void
*
/* onnxModel */
,
uint32_t
/* weightsCount */
,
const
onnxTensorDescriptorV1
*
/* weightDescriptors */
,
onnxGraph
*
/* graph */
)
{
return
ONNXIFI_STATUS_BACKEND_UNAVAILABLE
;
}
ONNXIFI_PUBLIC
ONNXIFI_CHECK_RESULT
onnxStatus
ONNXIFI_ABI
onnxSetGraphIO
(
onnxGraph
graph
,
std
::
uint32_t
inputsCount
,
const
onnxTensorDescriptorV1
*
inputDescriptors
,
std
::
uint32_t
outputsCount
,
const
onnxTensorDescriptorV1
*
outputDescriptors
)
onnxSetGraphIO
(
onnxGraph
/* graph */
,
std
::
uint32_t
/* inputsCount */
,
const
onnxTensorDescriptorV1
*
/* inputDescriptors */
,
std
::
uint32_t
/* outputsCount */
,
const
onnxTensorDescriptorV1
*
/* outputDescriptors */
)
{
return
ONNXIFI_STATUS_BACKEND_UNAVAILABLE
;
}
ONNXIFI_PUBLIC
ONNXIFI_CHECK_RESULT
onnxStatus
ONNXIFI_ABI
onnxRunGraph
(
onnxGraph
graph
,
const
onnxMemoryFenceV1
*
inputFence
,
onnxMemoryFenceV1
*
outputFence
)
ONNXIFI_PUBLIC
ONNXIFI_CHECK_RESULT
onnxStatus
ONNXIFI_ABI
onnxRunGraph
(
onnxGraph
/* graph */
,
const
onnxMemoryFenceV1
*
/* inputFence */
,
onnxMemoryFenceV1
*
/* outputFence */
)
{
return
ONNXIFI_STATUS_BACKEND_UNAVAILABLE
;
}
ONNXIFI_PUBLIC
ONNXIFI_CHECK_RESULT
onnxStatus
ONNXIFI_ABI
onnxReleaseGraph
(
onnxGraph
graph
)
ONNXIFI_PUBLIC
ONNXIFI_CHECK_RESULT
onnxStatus
ONNXIFI_ABI
onnxReleaseGraph
(
onnxGraph
/* graph */
)
{
return
ONNXIFI_STATUS_INTERNAL_ERROR
;
}
...
...
src/ngraph/node.hpp
View file @
5e607081
...
...
@@ -130,7 +130,10 @@ namespace ngraph
/// \param output_size Number of outputs for this node
Node
(
const
NodeVector
&
arguments
,
size_t
output_size
=
1
);
virtual
void
generate_adjoints
(
autodiff
::
Adjoints
&
adjoints
,
const
NodeVector
&
deltas
)
{}
virtual
void
generate_adjoints
(
autodiff
::
Adjoints
&
/* adjoints */
,
const
NodeVector
&
/* deltas */
)
{
}
/// \brief Moves nodes that would be deleted from inputs to nodes to avoid stack overflows on deep networks.
void
safe_delete
(
NodeVector
&
nodes
,
bool
recurse
);
...
...
src/ngraph/op/batch_norm.hpp
View file @
5e607081
...
...
@@ -143,8 +143,8 @@ namespace ngraph
copy_with_new_args
(
const
NodeVector
&
new_args
)
const
override
;
protected
:
virtual
void
generate_adjoints
(
autodiff
::
Adjoints
&
adjoints
,
const
NodeVector
&
deltas
)
override
virtual
void
generate_adjoints
(
autodiff
::
Adjoints
&
/* adjoints */
,
const
NodeVector
&
/* deltas */
)
override
{
throw
ngraph_error
(
"Invalid operation"
);
}
...
...
src/ngraph/op/constant.cpp
View file @
5e607081
...
...
@@ -364,11 +364,11 @@ namespace ngraph
namespace
op
{
template
<>
void
Constant
::
write_to_buffer
<
string
>
(
const
element
::
Type
&
target_type
,
const
Shape
&
target_shape
,
const
vector
<
string
>&
source
,
void
*
target
,
size_t
target_element_count
)
void
Constant
::
write_to_buffer
<
string
>
(
const
element
::
Type
&
/* target_type */
,
const
Shape
&
/* target_shape */
,
const
vector
<
string
>&
/* source */
,
void
*
/* target */
,
size_t
/* target_element_count */
)
{
}
}
...
...
src/ngraph/op/constant.hpp
View file @
5e607081
...
...
@@ -275,7 +275,7 @@ namespace ngraph
template
<
typename
T
>
void
write_to_buffer
(
const
element
::
Type
&
target_type
,
const
Shape
&
target_shape
,
const
Shape
&
/* target_shape */
,
const
std
::
vector
<
T
>&
source
,
void
*
target
,
size_t
target_element_count
)
...
...
src/ngraph/op/dequantize.cpp
View file @
5e607081
...
...
@@ -157,7 +157,8 @@ shared_ptr<Node> op::Dequantize::copy_with_new_args(const NodeVector& new_args)
return
make_shared
<
Dequantize
>
(
new_args
.
at
(
0
),
new_args
.
at
(
1
),
new_args
.
at
(
2
),
m_type
,
m_axes
);
}
void
op
::
Dequantize
::
generate_adjoints
(
autodiff
::
Adjoints
&
adjoints
,
const
NodeVector
&
deltas
)
void
op
::
Dequantize
::
generate_adjoints
(
autodiff
::
Adjoints
&
/* adjoints */
,
const
NodeVector
&
/* deltas */
)
{
throw
ngraph_error
(
"Forward-propagation-only operation"
);
}
src/ngraph/op/embedding_lookup.hpp
View file @
5e607081
...
...
@@ -49,7 +49,8 @@ namespace ngraph
void
validate_and_infer_types
()
override
;
void
generate_adjoints
(
autodiff
::
Adjoints
&
adjoints
,
const
NodeVector
&
deltas
)
override
void
generate_adjoints
(
autodiff
::
Adjoints
&
/* adjoints */
,
const
NodeVector
&
/* deltas */
)
override
{
throw
ngraph_error
(
"Not yet implemented"
);
}
...
...
src/ngraph/op/experimental/dyn_broadcast.cpp
View file @
5e607081
...
...
@@ -133,7 +133,8 @@ shared_ptr<Node> op::DynBroadcast::copy_with_new_args(const NodeVector& new_args
}
// TODO: This function is not implemented!
void
op
::
DynBroadcast
::
generate_adjoints
(
autodiff
::
Adjoints
&
adjoints
,
const
NodeVector
&
deltas
)
void
op
::
DynBroadcast
::
generate_adjoints
(
autodiff
::
Adjoints
&
/* adjoints */
,
const
NodeVector
&
/* deltas */
)
{
throw
ngraph_error
(
"generate_adjoints not implemented for DynBroadcast"
);
}
src/ngraph/op/experimental/dyn_pad.cpp
View file @
5e607081
...
...
@@ -25,7 +25,7 @@ op::DynPad::DynPad(const std::shared_ptr<Node>& arg,
const
std
::
shared_ptr
<
Node
>&
padding_below
,
const
std
::
shared_ptr
<
Node
>&
padding_above
,
const
std
::
shared_ptr
<
Node
>&
padding_value
,
op
::
PadMode
pad_mode
)
op
::
PadMode
/* pad_mode */
)
:
Op
(
check_single_output_args
({
arg
,
padding_below
,
padding_above
,
padding_value
}))
{
constructor_validate_and_infer_types
();
...
...
@@ -110,7 +110,8 @@ shared_ptr<Node> op::DynPad::copy_with_new_args(const NodeVector& new_args) cons
}
// TODO: This function is not implemented!
void
op
::
DynPad
::
generate_adjoints
(
autodiff
::
Adjoints
&
adjoints
,
const
NodeVector
&
deltas
)
void
op
::
DynPad
::
generate_adjoints
(
autodiff
::
Adjoints
&
/* adjoints */
,
const
NodeVector
&
/* deltas */
)
{
throw
ngraph_error
(
"generate_adjoints not implemented for DynPad"
);
}
src/ngraph/op/experimental/dyn_replace_slice.cpp
View file @
5e607081
...
...
@@ -154,7 +154,8 @@ shared_ptr<Node> op::DynReplaceSlice::copy_with_new_args(const NodeVector& new_a
m_ellipsis_mask
);
}
void
op
::
DynReplaceSlice
::
generate_adjoints
(
autodiff
::
Adjoints
&
adjoints
,
const
NodeVector
&
deltas
)
void
op
::
DynReplaceSlice
::
generate_adjoints
(
autodiff
::
Adjoints
&
/* adjoints */
,
const
NodeVector
&
/* deltas */
)
{
throw
ngraph_error
(
"generate_adjoints not implemented for DynReplaceSlice"
);
}
src/ngraph/op/experimental/dyn_reshape.cpp
View file @
5e607081
...
...
@@ -156,7 +156,8 @@ shared_ptr<Node> op::DynReshape::copy_with_new_args(const NodeVector& new_args)
return
make_shared
<
DynReshape
>
(
new_args
.
at
(
0
),
new_args
.
at
(
1
),
m_zero_flag
);
}
void
op
::
DynReshape
::
generate_adjoints
(
autodiff
::
Adjoints
&
adjoints
,
const
NodeVector
&
deltas
)
void
op
::
DynReshape
::
generate_adjoints
(
autodiff
::
Adjoints
&
/* adjoints */
,
const
NodeVector
&
/* deltas */
)
{
throw
ngraph_error
(
"generate_adjoints not implemented for DynReshape"
);
}
src/ngraph/op/experimental/dyn_slice.cpp
View file @
5e607081
...
...
@@ -125,7 +125,8 @@ shared_ptr<Node> op::DynSlice::copy_with_new_args(const NodeVector& new_args) co
m_ellipsis_mask
);
}
void
op
::
DynSlice
::
generate_adjoints
(
autodiff
::
Adjoints
&
adjoints
,
const
NodeVector
&
deltas
)
void
op
::
DynSlice
::
generate_adjoints
(
autodiff
::
Adjoints
&
/* adjoints */
,
const
NodeVector
&
/* deltas */
)
{
throw
ngraph_error
(
"generate_adjoints not implemented for DynSlice"
);
}
src/ngraph/op/experimental/generate_mask.hpp
View file @
5e607081
...
...
@@ -77,8 +77,8 @@ namespace ngraph
void
validate_and_infer_types
()
override
;
protected
:
virtual
void
generate_adjoints
(
autodiff
::
Adjoints
&
adjoints
,
const
NodeVector
&
deltas
)
override
virtual
void
generate_adjoints
(
autodiff
::
Adjoints
&
/* adjoints */
,
const
NodeVector
&
/* deltas */
)
override
{
}
...
...
src/ngraph/op/experimental/range.cpp
View file @
5e607081
...
...
@@ -36,14 +36,14 @@ op::Range::Range(const Output<Node>& start, const Output<Node>& stop, const Outp
template
<
typename
T
>
static
typename
std
::
enable_if
<
std
::
is_integral
<
T
>::
value
,
void
>::
type
check_start
(
const
op
::
Range
*
node
,
T
start
)
check_start
(
const
op
::
Range
*
/* node */
,
T
/* start */
)
{
// Nothing to check for integral types.
}
template
<
typename
T
>
static
typename
std
::
enable_if
<
std
::
is_integral
<
T
>::
value
,
void
>::
type
check_stop
(
const
op
::
Range
*
node
,
T
stop
)
check_stop
(
const
op
::
Range
*
/* node */
,
T
/* stop */
)
{
// Nothing to check for integral types.
}
...
...
@@ -125,7 +125,7 @@ static
}
template
<
typename
T
>
static
PartialShape
infer_output_shape
(
const
op
::
Range
*
node
,
const
element
::
Type
&
et
)
static
PartialShape
infer_output_shape
(
const
op
::
Range
*
node
,
const
element
::
Type
&
/* et */
)
{
auto
const_start
=
dynamic_pointer_cast
<
op
::
Constant
>
(
node
->
get_argument
(
0
));
auto
const_stop
=
dynamic_pointer_cast
<
op
::
Constant
>
(
node
->
get_argument
(
1
));
...
...
src/ngraph/op/experimental/tile.cpp
View file @
5e607081
...
...
@@ -94,7 +94,7 @@ shared_ptr<Node> op::Tile::copy_with_new_args(const NodeVector& new_args) const
}
// TODO: This function is not implemented!
void
op
::
Tile
::
generate_adjoints
(
autodiff
::
Adjoints
&
adjoints
,
const
NodeVector
&
deltas
)
void
op
::
Tile
::
generate_adjoints
(
autodiff
::
Adjoints
&
/* adjoints */
,
const
NodeVector
&
/* deltas */
)
{
throw
ngraph_error
(
"generate_adjoints not implemented for Tile"
);
}
src/ngraph/op/experimental/transpose.cpp
View file @
5e607081
...
...
@@ -73,7 +73,8 @@ shared_ptr<Node> op::Transpose::copy_with_new_args(const NodeVector& new_args) c
// TODO(amprocte): This will require some way of inverting the permutation in-graph. (TensorFlow,
// for example, has an InvertPermutation op, but that doesn't feel very nGraph-y somehow.)
void
op
::
Transpose
::
generate_adjoints
(
autodiff
::
Adjoints
&
adjoints
,
const
NodeVector
&
deltas
)
void
op
::
Transpose
::
generate_adjoints
(
autodiff
::
Adjoints
&
/* adjoints */
,
const
NodeVector
&
/* deltas */
)
{
throw
ngraph_error
(
"generate_adjoints not implemented for Transpose"
);
}
src/ngraph/op/fused/group_conv.cpp
View file @
5e607081
...
...
@@ -178,7 +178,8 @@ NodeVector op::GroupConvolution::decompose_op() const
return
{
std
::
make_shared
<
ngraph
::
op
::
Concat
>
(
convolution_nodes
,
concatenation_axis
)};
}
void
op
::
GroupConvolution
::
generate_adjoints
(
autodiff
::
Adjoints
&
adjoints
,
const
NodeVector
&
deltas
)
void
op
::
GroupConvolution
::
generate_adjoints
(
autodiff
::
Adjoints
&
/* adjoints */
,
const
NodeVector
&
/* deltas */
)
{
throw
ngraph_error
(
"NYI"
);
}
src/ngraph/op/fused/group_conv_transpose.cpp
View file @
5e607081
...
...
@@ -328,8 +328,8 @@ NodeVector op::GroupConvolutionTranspose::decompose_op() const
}
}
void
op
::
GroupConvolutionTranspose
::
generate_adjoints
(
autodiff
::
Adjoints
&
adjoints
,
const
NodeVector
&
deltas
)
void
op
::
GroupConvolutionTranspose
::
generate_adjoints
(
autodiff
::
Adjoints
&
/* adjoints */
,
const
NodeVector
&
/* deltas */
)
{
throw
ngraph_error
(
"Generating adjoints is not yet implemented for GroupConvolutionTranspose node."
);
...
...
src/ngraph/op/gather.hpp
View file @
5e607081
...
...
@@ -42,7 +42,8 @@ namespace ngraph
void
validate_and_infer_types
()
override
;
void
generate_adjoints
(
autodiff
::
Adjoints
&
adjoints
,
const
NodeVector
&
deltas
)
override
void
generate_adjoints
(
autodiff
::
Adjoints
&
/* adjoints */
,
const
NodeVector
&
/* deltas */
)
override
{
throw
ngraph_error
(
"Not yet implemented"
);
}
...
...
src/ngraph/op/gather_nd.hpp
View file @
5e607081
...
...
@@ -40,7 +40,8 @@ namespace ngraph
void
validate_and_infer_types
()
override
;
void
generate_adjoints
(
autodiff
::
Adjoints
&
adjoints
,
const
NodeVector
&
deltas
)
override
void
generate_adjoints
(
autodiff
::
Adjoints
&
/* adjoints */
,
const
NodeVector
&
/* deltas */
)
override
{
throw
ngraph_error
(
"Not yet implemented"
);
}
...
...
src/ngraph/op/lrn.cpp
View file @
5e607081
...
...
@@ -52,7 +52,7 @@ shared_ptr<Node> op::LRN::copy_with_new_args(const NodeVector& new_args) const
return
make_shared
<
op
::
LRN
>
(
new_args
.
at
(
0
),
m_alpha
,
m_beta
,
m_bias
,
m_size
);
}
void
op
::
LRN
::
generate_adjoints
(
autodiff
::
Adjoints
&
adjoints
,
const
NodeVector
&
deltas
)
void
op
::
LRN
::
generate_adjoints
(
autodiff
::
Adjoints
&
/* adjoints */
,
const
NodeVector
&
/* deltas */
)
{
throw
ngraph_error
(
"NYI"
);
}
src/ngraph/op/pad.cpp
View file @
5e607081
...
...
@@ -161,7 +161,7 @@ shared_ptr<Node> op::Pad::copy_with_new_args(const NodeVector& new_args) const
and push that back.
*/
void
op
::
Pad
::
generate_adjoints
(
autodiff
::
Adjoints
&
adjoints
,
const
NodeVector
&
deltas
)
void
op
::
Pad
::
generate_adjoints
(
autodiff
::
Adjoints
&
/* adjoints */
,
const
NodeVector
&
/* deltas */
)
{
throw
invalid_argument
(
"Autodiff is not yet implemented for Pad"
);
}
...
...
src/ngraph/op/parameter.cpp
View file @
5e607081
...
...
@@ -47,7 +47,7 @@ shared_ptr<Node> op::Parameter::copy_with_new_args(const NodeVector& new_args) c
return
make_shared
<
Parameter
>
(
m_element_type
,
m_partial_shape
);
}
void
op
::
Parameter
::
generate_adjoints
(
autodiff
::
Adjoints
&
adjoints
,
const
NodeVector
&
deltas
)
void
op
::
Parameter
::
generate_adjoints
(
autodiff
::
Adjoints
&
/* adjoints */
,
const
NodeVector
&
deltas
)
{
auto
delta
=
deltas
.
at
(
0
);
}
...
...
src/ngraph/op/quantize.cpp
View file @
5e607081
...
...
@@ -160,7 +160,8 @@ shared_ptr<Node> op::Quantize::copy_with_new_args(const NodeVector& new_args) co
new_args
.
at
(
0
),
new_args
.
at
(
1
),
new_args
.
at
(
2
),
m_type
,
m_axes
,
m_round_mode
);
}
void
op
::
Quantize
::
generate_adjoints
(
autodiff
::
Adjoints
&
adjoints
,
const
NodeVector
&
deltas
)
void
op
::
Quantize
::
generate_adjoints
(
autodiff
::
Adjoints
&
/* adjoints */
,
const
NodeVector
&
/* deltas */
)
{
throw
ngraph_error
(
"Forward-propagation-only operation"
);
}
src/ngraph/op/quantized_convolution.cpp
View file @
5e607081
...
...
@@ -196,8 +196,8 @@ shared_ptr<Node> op::QuantizedConvolution::copy_with_new_args(const NodeVector&
m_output_axes
));
}
void
op
::
QuantizedConvolution
::
generate_adjoints
(
autodiff
::
Adjoints
&
adjoints
,
const
NodeVector
&
deltas
)
void
op
::
QuantizedConvolution
::
generate_adjoints
(
autodiff
::
Adjoints
&
/* adjoints */
,
const
NodeVector
&
/* deltas */
)
{
throw
ngraph_error
(
"Forward-propagation-only operation"
);
}
src/ngraph/op/scatter_add.hpp
View file @
5e607081
...
...
@@ -42,7 +42,8 @@ namespace ngraph
void
validate_and_infer_types
()
override
;
void
generate_adjoints
(
autodiff
::
Adjoints
&
adjoints
,
const
NodeVector
&
deltas
)
override
void
generate_adjoints
(
autodiff
::
Adjoints
&
/* adjoints */
,
const
NodeVector
&
/* deltas */
)
override
{
throw
ngraph_error
(
"Not yet implemented"
);
}
...
...
src/ngraph/op/scatter_nd_add.hpp
View file @
5e607081
...
...
@@ -42,7 +42,8 @@ namespace ngraph
void
validate_and_infer_types
()
override
;
void
generate_adjoints
(
autodiff
::
Adjoints
&
adjoints
,
const
NodeVector
&
deltas
)
override
void
generate_adjoints
(
autodiff
::
Adjoints
&
/* adjoints */
,
const
NodeVector
&
/* deltas */
)
override
{
throw
ngraph_error
(
"Not yet implemented"
);
}
...
...
src/ngraph/op/topk.cpp
View file @
5e607081
...
...
@@ -137,7 +137,7 @@ shared_ptr<Node> op::TopK::copy_with_new_args(const NodeVector& new_args) const
new_args
.
at
(
0
),
new_args
.
at
(
1
),
m_top_k_axis
,
m_index_element_type
,
m_compute_max
,
m_sort
);
}
void
op
::
TopK
::
generate_adjoints
(
autodiff
::
Adjoints
&
adjoints
,
const
NodeVector
&
deltas
)
void
op
::
TopK
::
generate_adjoints
(
autodiff
::
Adjoints
&
/* adjoints */
,
const
NodeVector
&
/* deltas */
)
{
throw
ngraph_error
(
"Forward-propagation-only operation"
);
}
src/ngraph/op/util/activation_functions.cpp
View file @
5e607081
...
...
@@ -29,17 +29,17 @@
using
namespace
std
;
using
namespace
ngraph
;
static
shared_ptr
<
Node
>
sigmoid
(
const
shared_ptr
<
Node
>&
arg
,
float
alpha
,
float
beta
)
static
shared_ptr
<
Node
>
sigmoid
(
const
shared_ptr
<
Node
>&
arg
,
float
/* alpha */
,
float
/* beta */
)
{
return
make_shared
<
op
::
Sigmoid
>
(
arg
);
}
static
shared_ptr
<
Node
>
tanh
(
const
shared_ptr
<
Node
>&
arg
,
float
alpha
,
float
beta
)
static
shared_ptr
<
Node
>
tanh
(
const
shared_ptr
<
Node
>&
arg
,
float
/* alpha */
,
float
/* beta */
)
{
return
make_shared
<
op
::
Tanh
>
(
arg
);
}
static
shared_ptr
<
Node
>
relu
(
const
shared_ptr
<
Node
>&
arg
,
float
alpha
,
float
beta
)
static
shared_ptr
<
Node
>
relu
(
const
shared_ptr
<
Node
>&
arg
,
float
/* alpha */
,
float
/* beta */
)
{
return
make_shared
<
op
::
Relu
>
(
arg
);
}
...
...
src/ngraph/op/util/fused_op.cpp
View file @
5e607081
...
...
@@ -65,7 +65,8 @@ void op::util::FusedOp::validate_and_infer_types()
post_validate_and_infer_types
();
}
void
op
::
util
::
FusedOp
::
generate_adjoints
(
autodiff
::
Adjoints
&
adjoints
,
const
NodeVector
&
deltas
)
void
op
::
util
::
FusedOp
::
generate_adjoints
(
autodiff
::
Adjoints
&
/* adjoints */
,
const
NodeVector
&
/*deltas*/
)
{
// TODO
throw
ngraph_error
(
"Autodiff on fused ops not supported yet"
);
...
...
src/ngraph/op/util/index_reduction.cpp
View file @
5e607081
...
...
@@ -120,8 +120,8 @@ void op::util::IndexReduction::validate_and_infer_types()
set_output_type
(
0
,
m_index_element_type
,
output_shape
);
}
void
op
::
util
::
IndexReduction
::
generate_adjoints
(
autodiff
::
Adjoints
&
adjoints
,
const
NodeVector
&
deltas
)
void
op
::
util
::
IndexReduction
::
generate_adjoints
(
autodiff
::
Adjoints
&
/* adjoints */
,
const
NodeVector
&
/* deltas */
)
{
throw
ngraph_error
(
"Forward-propagation-only operation"
);
}
src/ngraph/pass/manager.cpp
View file @
5e607081
...
...
@@ -53,7 +53,7 @@ pass::Manager::~Manager()
{
}
void
pass
::
Manager
::
run_passes
(
shared_ptr
<
Function
>
func
,
bool
transitive
)
void
pass
::
Manager
::
run_passes
(
shared_ptr
<
Function
>
func
,
bool
/* transitive */
)
{
static
bool
profile_enabled
=
getenv
(
"NGRAPH_PROFILE_PASS_ENABLE"
)
!=
nullptr
;
...
...
src/ngraph/pass/memory_visualize.cpp
View file @
5e607081
...
...
@@ -250,17 +250,17 @@ int pass::MemoryVisualize::compute_op_weight(const shared_ptr<Node> exop)
return
mass
;
}
size_t
pass
::
MemoryVisualize
::
memory_usage
(
shared_ptr
<
Node
>
node
)
size_t
pass
::
MemoryVisualize
::
memory_usage
(
shared_ptr
<
Node
>
/* node */
)
{
return
0
;
}
size_t
pass
::
MemoryVisualize
::
memory_footprint
(
shared_ptr
<
Node
>
node
)
size_t
pass
::
MemoryVisualize
::
memory_footprint
(
shared_ptr
<
Node
>
/* node */
)
{
return
0
;
}
size_t
pass
::
MemoryVisualize
::
memory_footprint
(
const
std
::
list
<
shared_ptr
<
Node
>>&
nodes
)
size_t
pass
::
MemoryVisualize
::
memory_footprint
(
const
std
::
list
<
shared_ptr
<
Node
>>&
/* nodes */
)
{
return
0
;
}
src/ngraph/pass/reshape_sinking.cpp
View file @
5e607081
...
...
@@ -326,7 +326,7 @@ static void sink_reshape(shared_ptr<op::Reshape> reshape,
static
void
sink_unary
(
shared_ptr
<
op
::
util
::
UnaryElementwiseArithmetic
>
n
,
ReshapeMap
&
reorders
,
set
<
shared_ptr
<
Node
>>&
reshapes_to_delete
)
set
<
shared_ptr
<
Node
>>&
/* reshapes_to_delete */
)
{
auto
arg_reshape
=
read_reshapemap
(
reorders
,
n
->
get_argument
(
0
));
NGRAPH_DEBUG
<<
"Propagating "
<<
describe_reshape
(
arg_reshape
)
<<
" for "
<<
n
->
get_name
();
...
...
@@ -373,7 +373,7 @@ static void sink_binary(shared_ptr<op::util::BinaryElementwiseArithmetic> binary
static
void
sink_slice
(
shared_ptr
<
op
::
Slice
>
n
,
ReshapeMap
&
reorders
,
set
<
shared_ptr
<
Node
>>&
reshapes_to_delete
)
set
<
shared_ptr
<
Node
>>&
/* reshapes_to_delete */
)
{
auto
arg_reshape
=
reorders
.
at
(
n
->
get_argument
(
0
));
auto
order
=
arg_reshape
->
get_input_order
();
...
...
@@ -399,8 +399,9 @@ static void sink_slice(shared_ptr<op::Slice> n,
write_reshapemap
(
reorders
,
new_slice
,
new_reshape
);
}
static
void
sink_pad
(
shared_ptr
<
op
::
Pad
>
n
,
ReshapeMap
&
reorders
,
set
<
shared_ptr
<
Node
>>&
reshapes_to_delete
)
static
void
sink_pad
(
shared_ptr
<
op
::
Pad
>
n
,
ReshapeMap
&
reorders
,
set
<
shared_ptr
<
Node
>>&
/* reshapes_to_delete */
)
{
auto
arg_reshape
=
reorders
.
at
(
n
->
get_argument
(
0
));
auto
order
=
arg_reshape
->
get_input_order
();
...
...
@@ -425,7 +426,7 @@ static void
}
static
void
sink_quantize
(
shared_ptr
<
op
::
Quantize
>
quantize
,
ReshapeMap
&
reorders
,
set
<
shared_ptr
<
Node
>>&
reshapes_to_delete
)
set
<
shared_ptr
<
Node
>>&
/* reshapes_to_delete */
)
{
auto
arg_reshape
=
reorders
.
at
(
quantize
->
get_argument
(
0
));
AxisSet
axes_in_def_order
=
...
...
@@ -492,7 +493,7 @@ static void sink_concat(shared_ptr<op::Concat> n,
static
void
sink_dequantize
(
shared_ptr
<
op
::
Dequantize
>
dequantize
,
ReshapeMap
&
reorders
,
set
<
shared_ptr
<
Node
>>&
reshapes_to_delete
)
set
<
shared_ptr
<
Node
>>&
/* reshapes_to_delete */
)
{
auto
arg_reshape
=
reorders
.
at
(
dequantize
->
get_argument
(
0
));
AxisSet
axes_in_def_order
=
...
...
src/ngraph/pass/visualize_tree.cpp
View file @
5e607081
...
...
@@ -148,7 +148,7 @@ private:
std
::
unordered_map
<
Node
*
,
int64_t
>
m_heights
;
};
static
std
::
string
label_edge
(
const
std
::
shared_ptr
<
Node
>&
src
,
static
std
::
string
label_edge
(
const
std
::
shared_ptr
<
Node
>&
/* src */
,
const
std
::
shared_ptr
<
Node
>&
dst
,
size_t
arg_index
,
int64_t
jump_distance
)
...
...
src/ngraph/pattern/op/pattern.hpp
View file @
5e607081
...
...
@@ -40,7 +40,7 @@ namespace ngraph
}
virtual
std
::
shared_ptr
<
Node
>
copy_with_new_args
(
const
NodeVector
&
new_args
)
const
override
copy_with_new_args
(
const
NodeVector
&
/* new_args */
)
const
override
{
throw
ngraph_error
(
"Uncopyable"
);
}
...
...
src/ngraph/runtime/allocator.cpp
View file @
5e607081
...
...
@@ -23,7 +23,7 @@ ngraph::runtime::Allocator::~Allocator()
class
ngraph
::
runtime
::
DefaultAllocator
:
public
ngraph
::
runtime
::
Allocator
{
public
:
void
*
malloc
(
size_t
size
,
size_t
alignment
)
void
*
malloc
(
size_t
size
,
size_t
/* alignment */
)
{
// If allocation succeeds, returns a pointer to the lowest (first) byte in the
// allocated memory block that is suitably aligned for any scalar type.
...
...
src/ngraph/runtime/backend.cpp
View file @
5e607081
...
...
@@ -62,7 +62,8 @@ runtime::Backend::~Backend()
{
}
std
::
shared_ptr
<
ngraph
::
Node
>
runtime
::
Backend
::
get_backend_op
(
const
std
::
string
&
op_name
,
...)
std
::
shared_ptr
<
ngraph
::
Node
>
runtime
::
Backend
::
get_backend_op
(
const
std
::
string
&
/* op_name */
,
...)
{
std
::
shared_ptr
<
ngraph
::
Node
>
dummy_node
(
nullptr
);
return
dummy_node
;
...
...
@@ -89,42 +90,42 @@ vector<string> runtime::Backend::get_registered_devices()
}
std
::
shared_ptr
<
ngraph
::
runtime
::
Tensor
>
runtime
::
Backend
::
create_dynamic_tensor
(
const
ngraph
::
element
::
Type
&
element_type
,
const
PartialShape
&
shape
)
runtime
::
Backend
::
create_dynamic_tensor
(
const
ngraph
::
element
::
Type
&
/* element_type */
,
const
PartialShape
&
/* shape */
)
{
throw
std
::
invalid_argument
(
"This backend does not support dynamic tensors"
);
}
std
::
shared_ptr
<
runtime
::
Executable
>
runtime
::
Backend
::
compile
(
std
::
shared_ptr
<
Function
>
func
,
ngraph
::
pass
::
PassConfig
&
pass_config
,
ngraph
::
pass
::
PassConfig
&
/* pass_config */
,
bool
enable_performance_data
)
{
return
compile
(
func
,
enable_performance_data
);
}
bool
runtime
::
Backend
::
is_supported
(
const
Node
&
node
)
const
bool
runtime
::
Backend
::
is_supported
(
const
Node
&
/* node */
)
const
{
// The default behavior is that a backend does not support any ops. If this is not the case
// then override this method and enhance.
return
false
;
}
bool
runtime
::
Backend
::
is_supported_property
(
const
Property
prop
)
const
bool
runtime
::
Backend
::
is_supported_property
(
const
Property
/* prop */
)
const
{
return
false
;
}
void
runtime
::
Backend
::
remove_compiled_function
(
std
::
shared_ptr
<
Executable
>
exec
)
void
runtime
::
Backend
::
remove_compiled_function
(
std
::
shared_ptr
<
Executable
>
/* exec */
)
{
}
std
::
shared_ptr
<
runtime
::
Executable
>
runtime
::
Backend
::
load
(
istream
&
input_stream
)
std
::
shared_ptr
<
runtime
::
Executable
>
runtime
::
Backend
::
load
(
istream
&
/* input_stream */
)
{
throw
runtime_error
(
"load operation unimplemented."
);
}
bool
runtime
::
Backend
::
is_device_memory
(
void
*
ptr
)
bool
runtime
::
Backend
::
is_device_memory
(
void
*
/* ptr */
)
{
// override this method for each supported backend to determine if the passed pointer is in
// device pinned memory or not
...
...
@@ -146,7 +147,7 @@ const string& runtime::Backend::get_backend_shared_library_search_directory()
return
s_backend_shared_library_search_directory
;
}
bool
runtime
::
Backend
::
set_config
(
const
map
<
string
,
string
>&
config
,
string
&
error
)
bool
runtime
::
Backend
::
set_config
(
const
map
<
string
,
string
>&
/* config */
,
string
&
error
)
{
error
=
"set_config not supported"
;
return
false
;
...
...
src/ngraph/runtime/backend.hpp
View file @
5e607081
...
...
@@ -159,7 +159,7 @@ public:
virtual
Allocator
*
get_host_memory_allocator
()
{
return
nullptr
;
}
/// \brief Set the host memory allocator to be used by the backend
/// \param allocator is pointer to host memory allocator object
virtual
void
set_host_memory_allocator
(
Allocator
*
allocator
)
{}
virtual
void
set_host_memory_allocator
(
Allocator
*
allocator
)
{
(
void
)
allocator
;
}
/// \brief Returns memory allocator used by backend for device allocations
virtual
Allocator
*
get_device_memory_allocator
()
{
...
...
src/ngraph/runtime/executable.cpp
View file @
5e607081
...
...
@@ -119,29 +119,30 @@ vector<runtime::PerformanceCounter> runtime::Executable::get_performance_data()
return
vector
<
PerformanceCounter
>
();
}
void
runtime
::
Executable
::
save
(
std
::
ostream
&
output_stream
)
void
runtime
::
Executable
::
save
(
std
::
ostream
&
/* output_stream */
)
{
throw
runtime_error
(
"save opertion unimplemented."
);
}
shared_ptr
<
runtime
::
Tensor
>
runtime
::
Executable
::
create_input_tensor
(
size_t
input_index
)
shared_ptr
<
runtime
::
Tensor
>
runtime
::
Executable
::
create_input_tensor
(
size_t
/* input_index */
)
{
throw
runtime_error
(
"create_input_tensor unimplemented"
);
}
shared_ptr
<
runtime
::
Tensor
>
runtime
::
Executable
::
create_output_tensor
(
size_t
output_index
)
shared_ptr
<
runtime
::
Tensor
>
runtime
::
Executable
::
create_output_tensor
(
size_t
/* output_index */
)
{
throw
runtime_error
(
"create_output_tensor unimplemented"
);
}
vector
<
shared_ptr
<
runtime
::
Tensor
>>
runtime
::
Executable
::
create_input_tensor
(
size_t
input_index
,
size_t
pipeline_depth
)
vector
<
shared_ptr
<
runtime
::
Tensor
>>
runtime
::
Executable
::
create_input_tensor
(
size_t
/* input_index */
,
size_t
/* pipeline_depth */
)
{
throw
runtime_error
(
"create_input_tensor unimplemented"
);
}
vector
<
shared_ptr
<
runtime
::
Tensor
>>
runtime
::
Executable
::
create_output_tensor
(
size_t
output_index
,
size_t
pipeline_depth
)
vector
<
shared_ptr
<
runtime
::
Tensor
>>
runtime
::
Executable
::
create_output_tensor
(
size_t
/* output_index */
,
size_t
/* pipeline_depth */
)
{
throw
runtime_error
(
"create_output_tensor unimplemented"
);
}
src/ngraph/runtime/interpreter/int_backend.cpp
View file @
5e607081
...
...
@@ -32,7 +32,7 @@ runtime::BackendConstructor* runtime::interpreter::get_backend_constructor_point
class
INTBackendConstructor
:
public
runtime
::
BackendConstructor
{
public
:
std
::
shared_ptr
<
runtime
::
Backend
>
create
(
const
std
::
string
&
config
)
override
std
::
shared_ptr
<
runtime
::
Backend
>
create
(
const
std
::
string
&
/* config */
)
override
{
return
std
::
make_shared
<
runtime
::
interpreter
::
INTBackend
>
();
}
...
...
src/ngraph/runtime/nop/nop_backend.cpp
View file @
5e607081
...
...
@@ -37,7 +37,7 @@ extern "C" runtime::BackendConstructor* get_backend_constructor_pointer()
class
LocalBackendConstructor
:
public
runtime
::
BackendConstructor
{
public
:
std
::
shared_ptr
<
runtime
::
Backend
>
create
(
const
std
::
string
&
config
)
override
std
::
shared_ptr
<
runtime
::
Backend
>
create
(
const
std
::
string
&
/* config */
)
override
{
return
std
::
make_shared
<
runtime
::
nop
::
NOPBackend
>
();
}
...
...
@@ -69,7 +69,7 @@ shared_ptr<runtime::Executable>
}
runtime
::
nop
::
NOPExecutable
::
NOPExecutable
(
shared_ptr
<
Function
>
function
,
bool
enable_performance_collection
)
bool
/* enable_performance_collection */
)
{
pass
::
Manager
pass_manager
;
pass_manager
.
register_pass
<
pass
::
AssignLayout
<
DenseTensorLayout
>>
();
...
...
@@ -78,8 +78,8 @@ runtime::nop::NOPExecutable::NOPExecutable(shared_ptr<Function> function,
set_parameters_and_results
(
*
function
);
}
bool
runtime
::
nop
::
NOPExecutable
::
call
(
const
vector
<
shared_ptr
<
runtime
::
Tensor
>>&
outputs
,
const
vector
<
shared_ptr
<
runtime
::
Tensor
>>&
inputs
)
bool
runtime
::
nop
::
NOPExecutable
::
call
(
const
vector
<
shared_ptr
<
runtime
::
Tensor
>>&
/* outputs */
,
const
vector
<
shared_ptr
<
runtime
::
Tensor
>>&
/* inputs */
)
{
return
true
;
}
src/ngraph/runtime/plaidml/plaidml_backend.cpp
View file @
5e607081
...
...
@@ -53,7 +53,8 @@ bool ngraph::runtime::plaidml::PlaidML_Backend::is_supported(const Node& node) c
return
m_compiler
.
is_supported
(
node
);
}
bool
ngraph
::
runtime
::
plaidml
::
PlaidML_Backend
::
is_supported_property
(
const
Property
prop
)
const
bool
ngraph
::
runtime
::
plaidml
::
PlaidML_Backend
::
is_supported_property
(
const
Property
/* prop */
)
const
{
return
false
;
}
...
...
src/ngraph/runtime/plaidml/plaidml_ops_group_convolution.cpp
View file @
5e607081
...
...
@@ -40,7 +40,6 @@ void ngraph::runtime::plaidml::ImplGroupConvolution::Apply()
const
auto
&
image
=
op_input
(
0
);
const
auto
&
filter
=
op_input
(
1
);
auto
rank
=
op
().
get_input_shape
(
0
).
size
()
-
2
;
const
auto
&
groups
=
op
().
get_groups
();
const
auto
&
padding_above
=
op
().
get_padding_above
();
const
auto
&
padding_below
=
op
().
get_padding_below
();
...
...
src/ngraph/runtime/reference/batch_norm.hpp
View file @
5e607081
...
...
@@ -127,7 +127,7 @@ namespace ngraph
template
<
typename
T
>
void
batch_norm_backprop
(
double
eps
,
const
T
*
gamma
,
const
T
*
beta
,
const
T
*
/* beta */
,
const
T
*
input
,
const
T
*
mean
,
const
T
*
variance
,
...
...
src/ngraph/runtime/reference/sum.hpp
View file @
5e607081
...
...
@@ -45,7 +45,7 @@ namespace ngraph
}
template
<
typename
T
>
typename
std
::
enable_if
<
std
::
is_integral
<
T
>::
value
,
bool
>::
type
is_finite
(
T
x
)
typename
std
::
enable_if
<
std
::
is_integral
<
T
>::
value
,
bool
>::
type
is_finite
(
T
/* x */
)
{
return
true
;
}
...
...
src/ngraph/runtime/tensor.hpp
View file @
5e607081
...
...
@@ -115,6 +115,7 @@ namespace ngraph
void
write
(
const
void
*
p
,
size_t
offset
,
size_t
n
)
NGRAPH_DEPRECATED
(
"Use two-parameter write"
)
{
(
void
)
offset
;
write
(
p
,
n
);
}
...
...
@@ -126,6 +127,7 @@ namespace ngraph
void
read
(
void
*
p
,
size_t
offset
,
size_t
n
)
const
NGRAPH_DEPRECATED
(
"Use two-parameter read"
)
{
(
void
)
offset
;
read
(
p
,
n
);
}
...
...
src/ngraph/type/element_type.cpp
View file @
5e607081
...
...
@@ -107,8 +107,11 @@ std::vector<const element::Type*> element::Type::get_known_types()
return
rc
;
}
element
::
Type
::
Type
(
size_t
bitwidth
,
bool
is_real
,
bool
is_signed
,
bool
is_quantized
,
const
std
::
string
&
cname
)
element
::
Type
::
Type
(
size_t
bitwidth
,
bool
is_real
,
bool
is_signed
,
bool
is_quantized
,
const
std
::
string
&
/* cname */
)
{
for
(
auto
&
t
:
get_type_info_map
())
{
...
...
src/ngraph/validation_util.cpp
View file @
5e607081
...
...
@@ -20,7 +20,7 @@
using
namespace
std
;
using
namespace
ngraph
;
Strides
ngraph
::
conv_default_strides
(
const
Node
*
node
,
Strides
ngraph
::
conv_default_strides
(
const
Node
*
/* node */
,
const
PartialShape
&
data_batch_shape
,
const
PartialShape
&
filters_shape
)
{
...
...
@@ -42,7 +42,7 @@ Strides ngraph::conv_default_strides(const Node* node,
return
Strides
(
rank
,
1
);
}
CoordinateDiff
ngraph
::
conv_default_padding
(
const
Node
*
node
,
CoordinateDiff
ngraph
::
conv_default_padding
(
const
Node
*
/* node */
,
const
PartialShape
&
data_batch_shape
,
const
PartialShape
&
filters_shape
)
{
...
...
src/tools/nbench/benchmark_pipelined.cpp
View file @
5e607081
...
...
@@ -109,7 +109,7 @@ vector<runtime::PerformanceCounter> run_benchmark_pipelined(shared_ptr<Function>
size_t
iterations
,
bool
timing_detail
,
int
warmup_iterations
,
bool
copy_data
)
bool
/* copy_data */
)
{
constexpr
size_t
pipeline_depth
=
2
;
s_iterations
=
iterations
;
...
...
test/cpu_debugger.cpp
View file @
5e607081
...
...
@@ -308,7 +308,7 @@ TEST(tracer, basic)
ngraph
::
runtime
::
cpu
::
CPU_Debugger
dbg
(
*
cf
);
int
good_or_bad_value
=
-
777
;
auto
add_tracer
=
[
&
good_or_bad_value
](
void
**
values
,
const
std
::
string
&
name
)
{
auto
add_tracer
=
[
&
good_or_bad_value
](
void
**
values
,
const
std
::
string
&
/* name */
)
{
ASSERT_EQ
(
static_cast
<
int
*>
(
values
[
0
])[
0
],
good_or_bad_value
);
};
...
...
@@ -344,7 +344,7 @@ TEST(tracer, count_tracepoint)
size_t
offset
=
5
;
std
::
function
<
void
(
void
**
,
const
std
::
string
&
)
>
callback
=
[
&
num_iterations
,
offset
](
void
**
values
,
const
std
::
string
&
name
)
{
[
&
num_iterations
,
offset
](
void
**
values
,
const
std
::
string
&
/* name */
)
{
ASSERT_EQ
(
static_cast
<
int
*>
(
values
[
0
])[
0
],
num_iterations
-
1
+
offset
);
};
...
...
@@ -385,7 +385,8 @@ TEST(tracer, conditional_tracepoint)
size_t
offset
=
5
;
int
countdown
=
num_iterations
;
auto
add_tracer
=
[
&
countdown
,
num_iterations
,
offset
](
void
**
values
,
const
std
::
string
&
name
)
{
auto
add_tracer
=
[
&
countdown
,
num_iterations
,
offset
](
void
**
values
,
const
std
::
string
&
/* name */
)
{
if
(
countdown
--
==
0
)
{
ASSERT_EQ
(
static_cast
<
int
*>
(
values
[
0
])[
0
],
num_iterations
-
1
+
offset
);
...
...
test/pass_manager.cpp
View file @
5e607081
...
...
@@ -35,7 +35,7 @@ TEST(pass_manager, add)
auto
graph
=
make_test_graph
();
size_t
node_count
=
0
;
traverse_nodes
(
graph
,
[
&
](
shared_ptr
<
Node
>
node
)
{
node_count
++
;
});
traverse_nodes
(
graph
,
[
&
](
shared_ptr
<
Node
>
/* node */
)
{
node_count
++
;
});
pass_manager
.
run_passes
(
graph
);
auto
sorted
=
graph
->
get_ordered_ops
();
EXPECT_EQ
(
node_count
,
sorted
.
size
());
...
...
@@ -51,7 +51,7 @@ namespace
:
FunctionPass
()
{
}
bool
run_on_function
(
std
::
shared_ptr
<
ngraph
::
Function
>
f
)
override
{
return
false
;
}
bool
run_on_function
(
std
::
shared_ptr
<
ngraph
::
Function
>
/* f */
)
override
{
return
false
;
}
};
}
...
...
test/pattern.cpp
View file @
5e607081
...
...
@@ -305,7 +305,7 @@ TEST(pattern, matcher)
ASSERT_TRUE
(
n
.
match
(
any
,
abs
));
ASSERT_EQ
(
n
.
get_matched_nodes
(),
(
NodeVector
{
abs
,
a
}));
auto
false_pred
=
[](
std
::
shared_ptr
<
Node
>
no
)
{
return
false
;
};
auto
false_pred
=
[](
std
::
shared_ptr
<
Node
>
/* no */
)
{
return
false
;
};
auto
any_false
=
std
::
make_shared
<
pattern
::
op
::
Skip
>
(
a
,
false_pred
);
ASSERT_TRUE
(
n
.
match
(
any_false
,
a
));
ASSERT_EQ
(
n
.
get_matched_nodes
(),
(
NodeVector
{
a
,
a
}));
...
...
test/type_prop/binary_elementwise.cpp
View file @
5e607081
...
...
@@ -24,7 +24,7 @@ using namespace ngraph;
//
// Tests for binary elementwise ops.
//
void
test_binary
(
std
::
string
node_type
,
void
test_binary
(
std
::
string
/* node_type */
,
shared_ptr
<
Node
>
(
f
)(
const
shared_ptr
<
Node
>&
x
,
const
shared_ptr
<
Node
>&
y
))
{
// Check for bad arguments
...
...
@@ -115,7 +115,7 @@ TEST(type_prop, subtract_bad_arguments)
//
// Tests for binary elementwise logical ops.
//
void
test_binary_logical
(
std
::
string
node_type
,
void
test_binary_logical
(
std
::
string
/* node_type */
,
shared_ptr
<
Node
>
(
f
)(
const
shared_ptr
<
Node
>&
x
,
const
shared_ptr
<
Node
>&
y
))
{
// Check for bad arguments
...
...
test/util/ndarray.hpp
View file @
5e607081
...
...
@@ -67,14 +67,14 @@ namespace ngraph
// For a scalar, nothing to do.
template
<
typename
T
,
size_t
N
>
typename
std
::
enable_if
<
(
N
==
0
),
void
>::
type
fill_shape
(
Shape
&
shape
,
const
NestedInitializerList
<
T
,
N
>&
inits
)
fill_shape
(
Shape
&
/* shape */
,
const
NestedInitializerList
<
T
,
N
>&
/* inits */
)
{
}
// Check that the inits match the shape
template
<
typename
T
,
size_t
N
>
typename
std
::
enable_if
<
(
N
==
0
),
void
>::
type
check_shape
(
const
Shape
&
shape
,
const
NestedInitializerList
<
T
,
N
>&
inits
)
check_shape
(
const
Shape
&
shape
,
const
NestedInitializerList
<
T
,
N
>&
/* inits */
)
{
if
(
shape
.
size
()
!=
0
)
{
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment