Commit 9b3846f4 authored by Robert Kimball's avatar Robert Kimball

revert nbench changes for now

parent 8ba7eb9e
......@@ -109,11 +109,6 @@ void init_real_tv(shared_ptr<runtime::Tensor> tv, T min, T max)
static void random_init(shared_ptr<runtime::Tensor> tv)
{
element::Type et = tv->get_element_type();
#if !(defined(__GNUC__) && (__GNUC__ == 4 && __GNUC_MINOR__ == 8))
#pragma GCC diagnostic push
#pragma GCC diagnostic error "-Wswitch"
#pragma GCC diagnostic error "-Wswitch-enum"
#endif
switch (et.get_type_enum())
{
case element::Type_t::boolean: init_int_tv<char>(tv, 0, 1); break;
......@@ -127,15 +122,8 @@ static void random_init(shared_ptr<runtime::Tensor> tv)
case element::Type_t::u16: init_int_tv<uint16_t>(tv, 0, 1); break;
case element::Type_t::u32: init_int_tv<uint32_t>(tv, 0, 1); break;
case element::Type_t::u64: init_int_tv<uint64_t>(tv, 0, 1); break;
case element::Type_t::undefined:
case element::Type_t::dynamic:
case element::Type_t::bf16:
case element::Type_t::f16:
default: throw runtime_error("unsupported type");
}
#if !(defined(__GNUC__) && (__GNUC__ == 4 && __GNUC_MINOR__ == 8))
#pragma GCC diagnostic pop
#endif
}
vector<runtime::PerformanceCounter> run_benchmark(shared_ptr<Function> f,
......@@ -228,117 +216,3 @@ vector<runtime::PerformanceCounter> run_benchmark(shared_ptr<Function> f,
vector<runtime::PerformanceCounter> perf_data = compiled_func->get_performance_data();
return perf_data;
}
vector<runtime::PerformanceCounter> run_benchmark_double_buffered(shared_ptr<Function> f,
const string& backend_name,
size_t iterations,
bool timing_detail,
int warmup_iterations,
bool copy_data)
{
stopwatch timer;
timer.start();
auto backend = runtime::Backend::create(backend_name);
auto compiled_func = backend->compile(f, timing_detail);
timer.stop();
cout.imbue(locale(""));
cout << "compile time: " << timer.get_milliseconds() << "ms" << endl;
set_denormals_flush_to_zero();
// array<vector<shared_ptr<runtime::HostTensor>>, 2> args_data_set;
// array<vector<shared_ptr<runtime::Tensor>>, 2> args_set;
// array<vector<shared_ptr<runtime::HostTensor>>, 2> results_data_set;
// array<vector<shared_ptr<runtime::Tensor>>, 2> results_set;
// for (size_t i = 0; i < 2; i++)
// {
// vector<shared_ptr<runtime::HostTensor>> args_data;
// vector<shared_ptr<runtime::Tensor>> args;
// for (shared_ptr<op::Parameter> param : f->get_parameters())
// {
// auto tensor = backend->create_tensor(param->get_element_type(), param->get_shape());
// auto tensor_data =
// make_shared<runtime::HostTensor>(param->get_element_type(), param->get_shape());
// random_init(tensor_data);
// tensor->write(tensor_data->get_data_ptr(),
// tensor_data->get_element_count() *
// tensor_data->get_element_type().size());
// args.push_back(tensor);
// args_data.push_back(tensor_data);
// }
// args_set[i] = args;
// args_data_set[i] = args_data;
// vector<shared_ptr<runtime::Tensor>> results;
// vector<shared_ptr<runtime::HostTensor>> results_data;
// for (shared_ptr<Node> out : f->get_results())
// {
// auto result = backend->create_tensor(out->get_element_type(), out->get_shape());
// auto result_data =
// make_shared<runtime::HostTensor>(out->get_element_type(), out->get_shape());
// results.push_back(result);
// results_data.push_back(result_data);
// }
// results_set[i] = results;
// results_data_set[i] = results_data;
// }
// stopwatch t1;
// // Before we start we write the first iteration's data
// size_t buffer_number = 0;
// auto args = args_set[buffer_number];
// auto args_data = args_data_set[buffer_number];
// for (size_t arg_index = 0; arg_index < args.size(); arg_index++)
// {
// const shared_ptr<runtime::Tensor>& arg = args[arg_index];
// const shared_ptr<runtime::HostTensor>& data = args_data[arg_index];
// arg->begin_write(data->get_data_ptr(),
// data->get_element_count() * data->get_element_type().size(),
// buffer_number);
// }
// const vector<shared_ptr<runtime::Tensor>>& results = results_set[buffer_number];
// const vector<shared_ptr<runtime::HostTensor>>& results_data = results_data_set[buffer_number];
// for (size_t i = 0; i < iterations + warmup_iterations; i++)
// {
// if (i == warmup_iterations)
// {
// t1.start();
// }
// future<void> exec_future = compiled_func->begin_execute(results, args);
// if (i > 0)
// {
// for (size_t result_index = 0; result_index < results.size(); result_index++)
// {
// const shared_ptr<runtime::HostTensor>& data = results_data[result_index];
// const shared_ptr<runtime::Tensor>& result = results[result_index];
// result->begin_read(data->get_data_ptr(),
// data->get_element_count() * data->get_element_type().size(),
// (buffer_number - 1) & 1);
// }
// }
// buffer_number = (buffer_number + 1) & 1;
// for (size_t arg_index = 0; arg_index < args.size(); arg_index++)
// {
// const shared_ptr<runtime::Tensor>& arg = args[arg_index];
// const shared_ptr<runtime::HostTensor>& data = args_data[arg_index];
// arg->begin_write(data->get_data_ptr(),
// data->get_element_count() * data->get_element_type().size(),
// buffer_number);
// }
// exec_future.get();
// }
// for (size_t result_index = 0; result_index < results.size(); result_index++)
// {
// const shared_ptr<runtime::HostTensor>& data = results_data[result_index];
// const shared_ptr<runtime::Tensor>& result = results[result_index];
// result->begin_read(data->get_data_ptr(),
// data->get_element_count() * data->get_element_type().size(),
// (buffer_number - 1) & 1);
// }
// t1.stop();
// float time = t1.get_milliseconds();
// cout << time / iterations << "ms per iteration" << endl;
vector<runtime::PerformanceCounter> perf_data = compiled_func->get_performance_data();
return perf_data;
}
......@@ -34,11 +34,3 @@ std::vector<ngraph::runtime::PerformanceCounter> run_benchmark(std::shared_ptr<n
bool timing_detail,
int warmup_iterations,
bool copy_data);
std::vector<ngraph::runtime::PerformanceCounter>
run_benchmark_double_buffered(std::shared_ptr<ngraph::Function> f,
const std::string& backend_name,
size_t iterations,
bool timing_detail,
int warmup_iterations,
bool copy_data);
......@@ -181,7 +181,6 @@ int main(int argc, char** argv)
int warmup_iterations = 1;
bool copy_data = true;
bool dot_file = false;
bool double_buffer = false;
for (size_t i = 1; i < argc; i++)
{
......@@ -230,10 +229,6 @@ int main(int argc, char** argv)
{
directory = argv[++i];
}
else if (arg == "--double_buffer")
{
double_buffer = true;
}
else if (arg == "-w" || arg == "--warmup_iterations")
{
try
......@@ -288,7 +283,6 @@ OPTIONS
-w|--warmup_iterations Number of warm-up iterations
--no_copy_data Disable copy of input/result data every iteration
--dot Generate Graphviz dot file
--double_buffer Double buffer inputs and outputs
)###";
return 1;
}
......@@ -426,17 +420,8 @@ OPTIONS
{
cout << "\n---- Benchmark ----\n";
shared_ptr<Function> f = deserialize(model);
vector<runtime::PerformanceCounter> perf_data;
if (double_buffer)
{
perf_data = run_benchmark_double_buffered(
f, backend, iterations, timing_detail, warmup_iterations, copy_data);
}
else
{
perf_data = run_benchmark(
f, backend, iterations, timing_detail, warmup_iterations, copy_data);
}
auto perf_data = run_benchmark(
f, backend, iterations, timing_detail, warmup_iterations, copy_data);
auto perf_shape = to_perf_shape(f, perf_data);
aggregate_perf_data.insert(
aggregate_perf_data.end(), perf_shape.begin(), perf_shape.end());
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment