Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in / Register
Toggle navigation
N
ngraph
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
submodule
ngraph
Commits
3f9b5ff1
Commit
3f9b5ff1
authored
Jul 21, 2019
by
Robert Kimball
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
cleanup
parent
61d877ff
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
6 additions
and
65 deletions
+6
-65
benchmark.cpp
src/tools/nbench/benchmark.cpp
+1
-1
benchmark_pipelined.cpp
src/tools/nbench/benchmark_pipelined.cpp
+0
-59
benchmark_pipelined.hpp
src/tools/nbench/benchmark_pipelined.hpp
+5
-5
No files found.
src/tools/nbench/benchmark.cpp
View file @
3f9b5ff1
...
...
@@ -15,13 +15,13 @@
//*****************************************************************************
#include "benchmark.hpp"
#include "benchmark_utils.hpp"
#include "ngraph/file_util.hpp"
#include "ngraph/runtime/backend.hpp"
#include "ngraph/runtime/host_tensor.hpp"
#include "ngraph/runtime/tensor.hpp"
#include "ngraph/serializer.hpp"
#include "ngraph/util.hpp"
#include "benchmark_utils.hpp"
using
namespace
std
;
using
namespace
ngraph
;
...
...
src/tools/nbench/benchmark_pipelined.cpp
View file @
3f9b5ff1
...
...
@@ -78,7 +78,6 @@ static void
{
while
(
current_iteration
<
s_iterations
+
s_warmup_iterations
)
{
NGRAPH_INFO
;
unique_lock
<
mutex
>
lock
(
s_mutex
);
if
((
current_iteration
&
1
)
!=
pipeline_stage
)
{
...
...
@@ -102,7 +101,6 @@ vector<runtime::PerformanceCounter> run_benchmark_pipelined(shared_ptr<Function>
int
warmup_iterations
,
bool
copy_data
)
{
NGRAPH_INFO
;
constexpr
size_t
pipeline_depth
=
2
;
s_iterations
=
iterations
;
s_warmup_iterations
=
warmup_iterations
;
...
...
@@ -170,7 +168,6 @@ vector<runtime::PerformanceCounter> run_benchmark_pipelined(shared_ptr<Function>
for
(
size_t
i
=
0
;
i
<
pipeline_depth
;
i
++
)
{
threads
[
i
]
=
thread
(
thread_entry
,
exec
.
get
(),
tensor_collections
[
i
],
i
);
// threads[i] = thread(thread_entry, i);
}
for
(
size_t
i
=
0
;
i
<
pipeline_depth
;
i
++
)
...
...
@@ -178,62 +175,6 @@ vector<runtime::PerformanceCounter> run_benchmark_pipelined(shared_ptr<Function>
threads
[
i
].
join
();
}
// // Before we start we write the first iteration's data
// size_t buffer_number = 0;
// auto args = input_tensors_array[buffer_number];
// auto args_data = parameters_data_set[buffer_number];
// for (size_t arg_index = 0; arg_index < args.size(); arg_index++)
// {
// const shared_ptr<runtime::Tensor>& arg = args[arg_index];
// const shared_ptr<runtime::HostTensor>& data = args_data[arg_index];
// arg->begin_write(data->get_data_ptr(),
// data->get_element_count() * data->get_element_type().size(),
// buffer_number);
// }
// const vector<shared_ptr<runtime::Tensor>>& results = output_tensors[buffer_number];
// const vector<shared_ptr<runtime::HostTensor>>& results_data = results_data_set[buffer_number];
// for (size_t i = 0; i < iterations + warmup_iterations; i++)
// {
// if (i == warmup_iterations)
// {
// t1.start();
// }
// future<void> exec_future = exec->begin_execute(results, args);
// if (i > 0)
// {
// for (size_t result_index = 0; result_index < results.size(); result_index++)
// {
// const shared_ptr<runtime::HostTensor>& data = results_data[result_index];
// const shared_ptr<runtime::Tensor>& result = results[result_index];
// result->begin_read(data->get_data_ptr(),
// data->get_element_count() * data->get_element_type().size(),
// (buffer_number - 1) & 1);
// }
// }
// buffer_number = (buffer_number + 1) & 1;
// for (size_t arg_index = 0; arg_index < args.size(); arg_index++)
// {
// const shared_ptr<runtime::Tensor>& arg = args[arg_index];
// const shared_ptr<runtime::HostTensor>& data = args_data[arg_index];
// arg->begin_write(data->get_data_ptr(),
// data->get_element_count() * data->get_element_type().size(),
// buffer_number);
// }
// exec_future.get();
// }
// for (size_t result_index = 0; result_index < results.size(); result_index++)
// {
// const shared_ptr<runtime::HostTensor>& data = results_data[result_index];
// const shared_ptr<runtime::Tensor>& result = results[result_index];
// result->begin_read(data->get_data_ptr(),
// data->get_element_count() * data->get_element_type().size(),
// (buffer_number - 1) & 1);
// }
// t1.stop();
// float time = t1.get_milliseconds();
// cout << time / iterations << "ms per iteration" << endl;
vector
<
runtime
::
PerformanceCounter
>
perf_data
=
exec
->
get_performance_data
();
return
perf_data
;
}
src/tools/nbench/benchmark_pipelined.hpp
View file @
3f9b5ff1
...
...
@@ -26,8 +26,8 @@
std
::
vector
<
ngraph
::
runtime
::
PerformanceCounter
>
run_benchmark_pipelined
(
std
::
shared_ptr
<
ngraph
::
Function
>
f
,
const
std
::
string
&
backend_name
,
size_t
iterations
,
bool
timing_detail
,
int
warmup_iterations
,
bool
copy_data
);
const
std
::
string
&
backend_name
,
size_t
iterations
,
bool
timing_detail
,
int
warmup_iterations
,
bool
copy_data
);
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment