Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in / Register
Toggle navigation
N
ngraph
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
submodule
ngraph
Commits
61d877ff
Commit
61d877ff
authored
5 years ago
by
Robert Kimball
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
running?
parent
0bb34c03
Show whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
32 additions
and
1 deletion
+32
-1
benchmark_pipelined.cpp
src/tools/nbench/benchmark_pipelined.cpp
+32
-1
No files found.
src/tools/nbench/benchmark_pipelined.cpp
View file @
61d877ff
...
...
@@ -34,6 +34,7 @@ class TensorCollection
{
public
:
vector
<
shared_ptr
<
runtime
::
HostTensor
>>
parameter_data
;
vector
<
shared_ptr
<
runtime
::
HostTensor
>>
result_data
;
vector
<
shared_ptr
<
runtime
::
Tensor
>>
input_tensors
;
vector
<
shared_ptr
<
runtime
::
Tensor
>>
output_tensors
;
...
...
@@ -49,6 +50,26 @@ static size_t s_warmup_iterations;
static
void
do_iteration
(
runtime
::
Executable
*
exec
,
const
TensorCollection
&
tensors
)
{
const
vector
<
shared_ptr
<
runtime
::
Tensor
>>&
args
=
tensors
.
input_tensors
;
const
vector
<
shared_ptr
<
runtime
::
Tensor
>>&
results
=
tensors
.
output_tensors
;
for
(
size_t
arg_index
=
0
;
arg_index
<
args
.
size
();
arg_index
++
)
{
const
shared_ptr
<
runtime
::
Tensor
>&
arg
=
args
[
arg_index
];
if
(
arg
->
get_stale
())
{
const
shared_ptr
<
runtime
::
HostTensor
>&
data
=
tensors
.
parameter_data
[
arg_index
];
arg
->
write
(
data
->
get_data_ptr
(),
data
->
get_element_count
()
*
data
->
get_element_type
().
size
());
}
}
exec
->
call
(
results
,
args
);
for
(
size_t
result_index
=
0
;
result_index
<
results
.
size
();
result_index
++
)
{
const
shared_ptr
<
runtime
::
HostTensor
>&
data
=
tensors
.
result_data
[
result_index
];
const
shared_ptr
<
runtime
::
Tensor
>&
result
=
results
[
result_index
];
result
->
read
(
data
->
get_data_ptr
(),
data
->
get_element_count
()
*
data
->
get_element_type
().
size
());
}
}
static
void
...
...
@@ -96,7 +117,6 @@ vector<runtime::PerformanceCounter> run_benchmark_pipelined(shared_ptr<Function>
set_denormals_flush_to_zero
();
// Create random input data for all input tensors
array
<
vector
<
shared_ptr
<
runtime
::
HostTensor
>>
,
pipeline_depth
>
parameters_data_set
;
for
(
size_t
i
=
0
;
i
<
pipeline_depth
;
i
++
)
{
for
(
shared_ptr
<
op
::
Parameter
>
param
:
f
->
get_parameters
())
...
...
@@ -108,6 +128,17 @@ vector<runtime::PerformanceCounter> run_benchmark_pipelined(shared_ptr<Function>
}
}
// Create output tensors for all outputs
for
(
size_t
i
=
0
;
i
<
pipeline_depth
;
i
++
)
{
for
(
shared_ptr
<
Node
>
result
:
f
->
get_results
())
{
auto
tensor_data
=
make_shared
<
runtime
::
HostTensor
>
(
result
->
get_element_type
(),
result
->
get_shape
());
tensor_collections
[
i
].
result_data
.
push_back
(
tensor_data
);
}
}
// Create input tensors for all Parameters
array
<
vector
<
shared_ptr
<
runtime
::
Tensor
>>
,
pipeline_depth
>
input_tensors_array
;
size_t
input_index
=
0
;
...
...
This diff is collapsed.
Click to expand it.
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment