Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in / Register
Toggle navigation
N
ngraph
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
submodule
ngraph
Commits
d3326687
Commit
d3326687
authored
5 years ago
by
Robert Kimball
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
wip
parent
40822ddc
Show whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
15 additions
and
4 deletions
+15
-4
benchmark_pipelined.cpp
src/tools/nbench/benchmark_pipelined.cpp
+15
-4
No files found.
src/tools/nbench/benchmark_pipelined.cpp
View file @
d3326687
...
@@ -15,17 +15,28 @@
...
@@ -15,17 +15,28 @@
//*****************************************************************************
//*****************************************************************************
#include "benchmark.hpp"
#include "benchmark.hpp"
#include "benchmark_utils.hpp"
#include "ngraph/file_util.hpp"
#include "ngraph/file_util.hpp"
#include "ngraph/runtime/backend.hpp"
#include "ngraph/runtime/backend.hpp"
#include "ngraph/runtime/host_tensor.hpp"
#include "ngraph/runtime/host_tensor.hpp"
#include "ngraph/runtime/tensor.hpp"
#include "ngraph/runtime/tensor.hpp"
#include "ngraph/serializer.hpp"
#include "ngraph/serializer.hpp"
#include "ngraph/util.hpp"
#include "ngraph/util.hpp"
#include "benchmark_utils.hpp"
using
namespace
std
;
using
namespace
std
;
using
namespace
ngraph
;
using
namespace
ngraph
;
class
TensorCollection
{
public
:
vector
<
shared_ptr
<
runtime
::
HostTensor
>>
parameter_data
;
vector
<
shared_ptr
<
runtime
::
Tensor
>>
input_tensors
;
vector
<
shared_ptr
<
runtime
::
Tensor
>>
output_tensors
;
private
:
};
vector
<
runtime
::
PerformanceCounter
>
run_benchmark_pipelined
(
shared_ptr
<
Function
>
f
,
vector
<
runtime
::
PerformanceCounter
>
run_benchmark_pipelined
(
shared_ptr
<
Function
>
f
,
const
string
&
backend_name
,
const
string
&
backend_name
,
size_t
iterations
,
size_t
iterations
,
...
@@ -34,6 +45,7 @@ vector<runtime::PerformanceCounter> run_benchmark_pipelined(shared_ptr<Function>
...
@@ -34,6 +45,7 @@ vector<runtime::PerformanceCounter> run_benchmark_pipelined(shared_ptr<Function>
bool
copy_data
)
bool
copy_data
)
{
{
constexpr
size_t
pipeline_depth
=
2
;
constexpr
size_t
pipeline_depth
=
2
;
array
<
TensorCollection
,
pipeline_depth
>
tensor_collections
;
stopwatch
timer
;
stopwatch
timer
;
timer
.
start
();
timer
.
start
();
auto
backend
=
runtime
::
Backend
::
create
(
backend_name
);
auto
backend
=
runtime
::
Backend
::
create
(
backend_name
);
...
@@ -45,7 +57,6 @@ vector<runtime::PerformanceCounter> run_benchmark_pipelined(shared_ptr<Function>
...
@@ -45,7 +57,6 @@ vector<runtime::PerformanceCounter> run_benchmark_pipelined(shared_ptr<Function>
// Create random input data for all input tensors
// Create random input data for all input tensors
array
<
vector
<
shared_ptr
<
runtime
::
HostTensor
>>
,
pipeline_depth
>
parameters_data_set
;
array
<
vector
<
shared_ptr
<
runtime
::
HostTensor
>>
,
pipeline_depth
>
parameters_data_set
;
array
<
vector
<
shared_ptr
<
runtime
::
HostTensor
>>
,
pipeline_depth
>
results_data_set
;
for
(
size_t
i
=
0
;
i
<
pipeline_depth
;
i
++
)
for
(
size_t
i
=
0
;
i
<
pipeline_depth
;
i
++
)
{
{
vector
<
shared_ptr
<
runtime
::
HostTensor
>>
parameters_data
;
vector
<
shared_ptr
<
runtime
::
HostTensor
>>
parameters_data
;
...
@@ -65,7 +76,7 @@ vector<runtime::PerformanceCounter> run_benchmark_pipelined(shared_ptr<Function>
...
@@ -65,7 +76,7 @@ vector<runtime::PerformanceCounter> run_benchmark_pipelined(shared_ptr<Function>
for
(
shared_ptr
<
op
::
Parameter
>
param
:
f
->
get_parameters
())
for
(
shared_ptr
<
op
::
Parameter
>
param
:
f
->
get_parameters
())
{
{
auto
input_tensors
=
exec
->
create_input_tensor
(
input_index
++
,
pipeline_depth
);
auto
input_tensors
=
exec
->
create_input_tensor
(
input_index
++
,
pipeline_depth
);
for
(
size_t
i
=
0
;
i
<
pipeline_depth
;
i
++
)
for
(
size_t
i
=
0
;
i
<
pipeline_depth
;
i
++
)
{
{
input_tensors_array
[
i
].
push_back
(
input_tensors
[
i
]);
input_tensors_array
[
i
].
push_back
(
input_tensors
[
i
]);
}
}
...
@@ -77,7 +88,7 @@ vector<runtime::PerformanceCounter> run_benchmark_pipelined(shared_ptr<Function>
...
@@ -77,7 +88,7 @@ vector<runtime::PerformanceCounter> run_benchmark_pipelined(shared_ptr<Function>
for
(
shared_ptr
<
Node
>
result
:
f
->
get_results
())
for
(
shared_ptr
<
Node
>
result
:
f
->
get_results
())
{
{
auto
output_tensors
=
exec
->
create_output_tensor
(
output_index
++
,
pipeline_depth
);
auto
output_tensors
=
exec
->
create_output_tensor
(
output_index
++
,
pipeline_depth
);
for
(
size_t
i
=
0
;
i
<
pipeline_depth
;
i
++
)
for
(
size_t
i
=
0
;
i
<
pipeline_depth
;
i
++
)
{
{
output_tensors_array
[
i
].
push_back
(
output_tensors
[
i
]);
output_tensors_array
[
i
].
push_back
(
output_tensors
[
i
]);
}
}
...
...
This diff is collapsed.
Click to expand it.
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment