Unverified Commit fe676f72 authored by Robert Kimball's avatar Robert Kimball Committed by GitHub

Handle unsupported op in nbench (#1531)

* add unsupported_op exception

* unsupported_op test

* add printout of unsupported op in model

* fix GPU dispatcher check

* fix test designation

* catch exceptions on single file runs too

* add unsupported_op exception where needed

* remove unsupported_op class

* add unassigned op exception

* add unit test

* catch unsupported op in nbench

* add cpu test back

* update all latest merges

* mode change
parent b0e4d8cb
...@@ -22,8 +22,9 @@ ...@@ -22,8 +22,9 @@
namespace ngraph namespace ngraph
{ {
/// Base error for ngraph runtime errors. /// Base error for ngraph runtime errors.
struct ngraph_error : std::runtime_error class ngraph_error : public std::runtime_error
{ {
public:
explicit ngraph_error(const std::string& what_arg) explicit ngraph_error(const std::string& what_arg)
: std::runtime_error(what_arg) : std::runtime_error(what_arg)
{ {
...@@ -39,4 +40,13 @@ namespace ngraph ...@@ -39,4 +40,13 @@ namespace ngraph
{ {
} }
}; };
class unsupported_op : public std::runtime_error
{
public:
unsupported_op(const std::string& what_arg)
: std::runtime_error(what_arg)
{
}
};
} }
...@@ -250,7 +250,8 @@ namespace ngraph ...@@ -250,7 +250,8 @@ namespace ngraph
const std::vector<TensorViewWrapper>& args, const std::vector<TensorViewWrapper>& args,
const std::vector<TensorViewWrapper>& out) const std::vector<TensorViewWrapper>& out)
{ {
throw std::runtime_error("Unimplemented op in CPU builder"); throw unsupported_op("Unimplemented op '" + node->description() +
"' in CPU builder");
} }
static void nop(CPU_ExternalFunction* external_function, static void nop(CPU_ExternalFunction* external_function,
......
...@@ -47,7 +47,8 @@ namespace ngraph ...@@ -47,7 +47,8 @@ namespace ngraph
const std::vector<TensorViewWrapper>& args, const std::vector<TensorViewWrapper>& args,
const std::vector<TensorViewWrapper>& out) const std::vector<TensorViewWrapper>& out)
{ {
throw std::runtime_error("Unimplemented op in CPU emitter"); throw std::runtime_error("Unimplemented op '" + node->description() +
"' in CPU emitter");
} }
static void nop(CPU_ExternalFunction* external_function, static void nop(CPU_ExternalFunction* external_function,
......
...@@ -728,7 +728,7 @@ using namespace ngraph::runtime; ...@@ -728,7 +728,7 @@ using namespace ngraph::runtime;
auto handler = dispatcher.find(type_index(typeid(n))); auto handler = dispatcher.find(type_index(typeid(n)));
if (handler == dispatcher.end()) if (handler == dispatcher.end())
{ {
throw ngraph_error("Unhandled op during code generation : " + node->description()); throw unsupported_op(node->description());
} }
vector<TensorViewWrapper> in; vector<TensorViewWrapper> in;
vector<string> node_input_names; vector<string> node_input_names;
...@@ -1276,8 +1276,7 @@ void runtime::cpu::CPU_ExternalFunction::build() ...@@ -1276,8 +1276,7 @@ void runtime::cpu::CPU_ExternalFunction::build()
auto handler = build_dispatcher.find(type_index(typeid(n))); auto handler = build_dispatcher.find(type_index(typeid(n)));
if (handler == build_dispatcher.end()) if (handler == build_dispatcher.end())
{ {
throw ngraph_error("Unhandled op during executor construction : " + throw unsupported_op(node->description());
node->description());
} }
vector<TensorViewWrapper> in; vector<TensorViewWrapper> in;
vector<string> in_names; vector<string> in_names;
...@@ -1621,7 +1620,7 @@ string runtime::cpu::CPU_ExternalFunction::emit_op_as_function(const Node& node, ...@@ -1621,7 +1620,7 @@ string runtime::cpu::CPU_ExternalFunction::emit_op_as_function(const Node& node,
auto handler = dispatcher.find(type_index(typeid(node))); auto handler = dispatcher.find(type_index(typeid(node)));
if (handler == dispatcher.end()) if (handler == dispatcher.end())
{ {
throw ngraph_error("Unhandled op during function emit : " + node.description()); throw unsupported_op(node.description());
} }
vector<TensorViewWrapper> in; vector<TensorViewWrapper> in;
size_t arg_index = 0; size_t arg_index = 0;
......
File mode changed from 100755 to 100644
File mode changed from 100755 to 100644
File mode changed from 100755 to 100644
...@@ -52,7 +52,8 @@ namespace ngraph ...@@ -52,7 +52,8 @@ namespace ngraph
assign(ngraph::runtime::cpu::CPU_ExternalFunction* external_function, assign(ngraph::runtime::cpu::CPU_ExternalFunction* external_function,
ngraph::Node* node) ngraph::Node* node)
{ {
throw std::runtime_error("Unimplemented op in CPU assignment"); throw std::runtime_error("Unimplemented op '" + node->description() +
"' in CPU assignment");
} }
private: private:
......
File mode changed from 100755 to 100644
...@@ -46,8 +46,8 @@ namespace ngraph ...@@ -46,8 +46,8 @@ namespace ngraph
const std::vector<GPU_TensorViewWrapper>& args, const std::vector<GPU_TensorViewWrapper>& args,
const std::vector<GPU_TensorViewWrapper>& out) const std::vector<GPU_TensorViewWrapper>& out)
{ {
throw std::runtime_error("Unimplemented op in GPU emitter for " + throw std::runtime_error("Unimplemented op '" + node->description() +
node->get_name()); "' in GPU emitter");
} }
static void nop(GPU_ExternalFunction* external_function, static void nop(GPU_ExternalFunction* external_function,
......
...@@ -548,14 +548,13 @@ void runtime::gpu::GPU_ExternalFunction::emit_functions() ...@@ -548,14 +548,13 @@ void runtime::gpu::GPU_ExternalFunction::emit_functions()
for (shared_ptr<Node> node : m_function_ordered_ops.at(current_function)) for (shared_ptr<Node> node : m_function_ordered_ops.at(current_function))
{ {
auto& n = auto& n = *node;
*node; // Work around a compiler warning (*node inside typeid may have effects // Work around a compiler warning (*node inside typeid may have effects
// with shared pointers, which is fine here but clang doesn't like it.) // with shared pointers, which is fine here but clang doesn't like it.)
auto handler = dispatcher.find(type_index(typeid(n))); auto handler = dispatcher.find(type_index(typeid(n)));
if (handler == dispatcher.end()) if (handler == dispatcher.end())
{ {
throw ngraph_error("Unhandled op during code generation : " + throw ngraph::unsupported_op(node->description());
node->description());
} }
vector<GPU_TensorViewWrapper> in; vector<GPU_TensorViewWrapper> in;
vector<string> node_input_names; vector<string> node_input_names;
...@@ -753,6 +752,10 @@ string runtime::gpu::GPU_ExternalFunction::emit_op_as_function(const Node& node, ...@@ -753,6 +752,10 @@ string runtime::gpu::GPU_ExternalFunction::emit_op_as_function(const Node& node,
// Work around a compiler warning (*node inside typeid may have effects // Work around a compiler warning (*node inside typeid may have effects
// with shared pointers, which is fine here but clang doesn't like it.) // with shared pointers, which is fine here but clang doesn't like it.)
auto handler = dispatcher.find(type_index(typeid(node))); auto handler = dispatcher.find(type_index(typeid(node)));
if (handler == dispatcher.end())
{
throw ngraph::unsupported_op(node.description());
}
vector<GPU_TensorViewWrapper> in; vector<GPU_TensorViewWrapper> in;
size_t arg_index = 0; size_t arg_index = 0;
set<string> arg_names; set<string> arg_names;
......
...@@ -1030,7 +1030,7 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func) ...@@ -1030,7 +1030,7 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func)
} }
else else
{ {
throw invalid_argument("IntelGPU: Unsupported operation \"" + op->description() + "\""); throw unsupported_op(op->description());
} }
} }
......
...@@ -135,7 +135,7 @@ bool runtime::interpreter::INTBackend::call(shared_ptr<Function> function, ...@@ -135,7 +135,7 @@ bool runtime::interpreter::INTBackend::call(shared_ptr<Function> function,
{ {
const Node* op = &wrapped.get_node(); const Node* op = &wrapped.get_node();
auto type_id = wrapped.get_typeid(); auto type_id = wrapped.get_typeid();
if (op->description() == "Parameter") if (type_id == OP_TYPEID::Parameter)
{ {
continue; continue;
} }
......
...@@ -1083,10 +1083,7 @@ private: ...@@ -1083,10 +1083,7 @@ private:
args[0]->get_data_ptr<T>(), out[0]->get_data_ptr<T>(), out[0]->get_element_count()); args[0]->get_data_ptr<T>(), out[0]->get_data_ptr<T>(), out[0]->get_element_count());
break; break;
} }
case OP_TYPEID::StopGradient: case OP_TYPEID::StopGradient: { throw unsupported_op("Unsupported op 'StopGradient'");
{
// TODO: Throw a real unsupported_op when available
throw std::runtime_error("Unsupported op 'StopGradient'");
} }
case OP_TYPEID::Subtract: case OP_TYPEID::Subtract:
{ {
...@@ -1147,7 +1144,9 @@ private: ...@@ -1147,7 +1144,9 @@ private:
{ {
throw ngraph_error("Unexpected type"); throw ngraph_error("Unexpected type");
} }
break;
} }
default: throw unsupported_op("Unsupported op '" + node.description() + "'");
#pragma GCC diagnostic pop #pragma GCC diagnostic pop
} }
} }
......
...@@ -39,7 +39,6 @@ runtime::interpreter::NodeWrapper::NodeWrapper(const shared_ptr<const Node>& nod ...@@ -39,7 +39,6 @@ runtime::interpreter::NodeWrapper::NodeWrapper(const shared_ptr<const Node>& nod
} }
else else
{ {
// TODO: use unsupported_op when that is merged to master throw unsupported_op("Unsupported op '" + m_node->description() + "'");
throw runtime_error(m_node->description());
} }
} }
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include <iomanip> #include <iomanip>
#include "benchmark.hpp" #include "benchmark.hpp"
#include "ngraph/except.hpp"
#include "ngraph/file_util.hpp" #include "ngraph/file_util.hpp"
#include "ngraph/graph_util.hpp" #include "ngraph/graph_util.hpp"
#include "ngraph/pass/manager.hpp" #include "ngraph/pass/manager.hpp"
...@@ -328,8 +329,14 @@ OPTIONS ...@@ -328,8 +329,14 @@ OPTIONS
}, },
true); true);
unordered_map<string, Shape> shape_info; unordered_map<string, Shape> shape_info;
cout << "Benchmarking " << endl;
cout << " Backend: " << backend << endl;
cout << " Iterations: " << iterations << endl;
cout << " Warmup: " << warmup_iterations << endl;
cout << " Copy Data: " << (copy_data ? "true" : "false") << endl;
for (const string& m : models) for (const string& m : models)
{ {
cout << "Benchmarking " << m << endl;
try try
{ {
shared_ptr<Function> f = deserialize(m); shared_ptr<Function> f = deserialize(m);
...@@ -339,6 +346,10 @@ OPTIONS ...@@ -339,6 +346,10 @@ OPTIONS
aggregate_perf_data.insert( aggregate_perf_data.insert(
aggregate_perf_data.end(), perf_shape.begin(), perf_shape.end()); aggregate_perf_data.end(), perf_shape.begin(), perf_shape.end());
} }
catch (ngraph::unsupported_op ue)
{
cout << "Unsupported op '" << ue.what() << "' in model " << m << endl;
}
catch (exception e) catch (exception e)
{ {
cout << "Exception caught on '" << m << "'\n" << e.what() << endl; cout << "Exception caught on '" << m << "'\n" << e.what() << endl;
...@@ -348,16 +359,27 @@ OPTIONS ...@@ -348,16 +359,27 @@ OPTIONS
} }
else if (iterations > 0) else if (iterations > 0)
{ {
shared_ptr<Function> f = deserialize(model); try
cout << "Benchmarking " << model << endl; {
cout << " Backend: " << backend << endl; shared_ptr<Function> f = deserialize(model);
cout << " Iterations: " << iterations << endl; cout << "Benchmarking " << model << endl;
cout << " Warmup: " << warmup_iterations << endl; cout << " Backend: " << backend << endl;
cout << " Copy Data: " << (copy_data ? "true" : "false") << endl; cout << " Iterations: " << iterations << endl;
auto perf_data = cout << " Warmup: " << warmup_iterations << endl;
run_benchmark(f, backend, iterations, timing_detail, warmup_iterations, copy_data); cout << " Copy Data: " << (copy_data ? "true" : "false") << endl;
auto perf_shape = to_perf_shape(f, perf_data); auto perf_data =
print_results(perf_shape, timing_detail); run_benchmark(f, backend, iterations, timing_detail, warmup_iterations, copy_data);
auto perf_shape = to_perf_shape(f, perf_data);
print_results(perf_shape, timing_detail);
}
catch (ngraph::unsupported_op ue)
{
cout << "Unsupported op '" << ue.what() << endl;
}
catch (exception e)
{
cout << e.what() << endl;
}
} }
return 0; return 0;
......
...@@ -51,6 +51,33 @@ static const vector<element::Type> s_known_element_types = {element::from<float> ...@@ -51,6 +51,33 @@ static const vector<element::Type> s_known_element_types = {element::from<float>
element::from<uint32_t>(), element::from<uint32_t>(),
element::from<uint64_t>()}; element::from<uint64_t>()};
class UnhandledOp : public ngraph::op::Op
{
public:
UnhandledOp(const std::shared_ptr<Node>& arg)
: Op("Unsupported_op", {})
{
set_output_type(0, arg->get_element_type(), arg->get_shape());
}
shared_ptr<Node> copy_with_new_args(const NodeVector& new_args) const override
{
return make_shared<UnhandledOp>(new_args[0]);
}
};
NGRAPH_TEST(${BACKEND_NAME}, unhandled_op)
{
Shape shape{2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto unhandled = make_shared<UnhandledOp>(A);
auto f = make_shared<Function>(unhandled, op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
shared_ptr<runtime::TensorView> a = backend->create_tensor<float>(shape);
shared_ptr<runtime::TensorView> result = backend->create_tensor<float>(shape);
ASSERT_THROW(backend->call_with_validate(f, {result}, {a}), unsupported_op);
}
NGRAPH_TEST(${BACKEND_NAME}, function_name) NGRAPH_TEST(${BACKEND_NAME}, function_name)
{ {
Shape shape{2, 2}; Shape shape{2, 2};
......
File mode changed from 100755 to 100644
...@@ -60,7 +60,7 @@ TEST(cpu_test, unhandled_op) ...@@ -60,7 +60,7 @@ TEST(cpu_test, unhandled_op)
auto unhandled = make_shared<UnhandledOp>(A); auto unhandled = make_shared<UnhandledOp>(A);
auto f = make_shared<Function>(unhandled, op::ParameterVector{A}); auto f = make_shared<Function>(unhandled, op::ParameterVector{A});
auto backend = runtime::Backend::create("CPU"); auto backend = runtime::Backend::create("CPU");
ASSERT_THROW(backend->compile(f), ngraph_error); ASSERT_THROW(backend->compile(f), unsupported_op);
} }
TEST(cpu_test, trivial_in_place_relu) TEST(cpu_test, trivial_in_place_relu)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment