Commit 54403cdc authored by shssf's avatar shssf Committed by Robert Kimball

IntelGPU backend. Change Intel GPU backend to use op_tbl (#1626)

* IntelGPU backend. Compile function moved to use switch for operations.

* PR1626. IntelGPU backend. Test unhandled_op fixed.

* PR1626. Comments addressed
parent 8970915b
...@@ -67,6 +67,36 @@ ...@@ -67,6 +67,36 @@
using namespace std; using namespace std;
using namespace ngraph; using namespace ngraph;
// This expands the op list in op_tbl.hpp into a list of enumerations that look like this:
// Abs,
// Acos,
// ...
#define NGRAPH_OP(a) a,
enum class OP_TYPEID
{
#include "ngraph/op/op_tbl.hpp"
};
#undef NGRAPH_OP
static OP_TYPEID get_typeid(const string& s)
{
// This expands the op list in op_tbl.hpp into a list of enumerations that look like this:
// {"Abs", OP_TYPEID::Abs},
// {"Acos", OP_TYPEID::Acos},
// ...
#define NGRAPH_OP(a) {#a, OP_TYPEID::a},
static const unordered_map<string, OP_TYPEID> typeid_map{
#include "ngraph/op/op_tbl.hpp"
};
#undef NGRAPH_OP
auto it = typeid_map.find(s);
if (it == typeid_map.end())
{
throw unsupported_op("Unsupported op '" + s + "'");
}
return it->second;
}
static void arguments_check(const shared_ptr<Node>& op, size_t input, size_t output) static void arguments_check(const shared_ptr<Node>& op, size_t input, size_t output)
{ {
if (op->get_input_size() != input || op->get_output_size() != output) if (op->get_input_size() != input || op->get_output_size() != output)
...@@ -230,7 +260,15 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func) ...@@ -230,7 +260,15 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func)
for (shared_ptr<Node> op : func->get_ops()) for (shared_ptr<Node> op : func->get_ops())
{ {
if ("Parameter" == op->description()) // We want to check that every OP_TYPEID enumeration is included in the list.
// These GCC flags enable compile-time checking so that if an enumeration
// is not in the list an error is generated.
#pragma GCC diagnostic push
#pragma GCC diagnostic error "-Wswitch"
#pragma GCC diagnostic error "-Wswitch-enum"
switch (get_typeid(op->description()))
{
case OP_TYPEID::Parameter:
{ {
arguments_check(op, 0, 1); arguments_check(op, 0, 1);
...@@ -240,14 +278,16 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func) ...@@ -240,14 +278,16 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func)
const cldnn::input_layout op_layout(element_name, element_layout); const cldnn::input_layout op_layout(element_name, element_layout);
topology.add(op_layout); topology.add(op_layout);
break;
} }
else if ("Result" == op->description()) case OP_TYPEID::Result:
{ {
arguments_check(op, 1, 1); arguments_check(op, 1, 1);
do_equal_propagation(topology, get_input_name(op), get_output_name(op)); do_equal_propagation(topology, get_input_name(op), get_output_name(op));
break;
} }
else if ("GetOutputElement" == op->description()) case OP_TYPEID::GetOutputElement:
{ {
if (op->get_inputs().empty() || op->get_outputs().size() != 1) if (op->get_inputs().empty() || op->get_outputs().size() != 1)
{ {
...@@ -258,8 +298,9 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func) ...@@ -258,8 +298,9 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func)
static_pointer_cast<op::GetOutputElement>(op); static_pointer_cast<op::GetOutputElement>(op);
do_equal_propagation(topology, get_input_name(op, elem->get_n()), get_output_name(op)); do_equal_propagation(topology, get_input_name(op, elem->get_n()), get_output_name(op));
break;
} }
else if ("Slice" == op->description()) case OP_TYPEID::Slice:
{ {
arguments_check(op, 1, 1); arguments_check(op, 1, 1);
...@@ -285,8 +326,9 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func) ...@@ -285,8 +326,9 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func)
upper_bounds, upper_bounds,
strides); strides);
} }
break;
} }
else if ("Select" == op->description()) case OP_TYPEID::Select:
{ {
arguments_check(op, 3, 1); arguments_check(op, 3, 1);
...@@ -300,8 +342,9 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func) ...@@ -300,8 +342,9 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func)
get_output_name(op), get_output_name(op),
get_output_shape(op), get_output_shape(op),
get_output_type(op)); get_output_type(op));
break;
} }
else if ("Reverse" == op->description()) case OP_TYPEID::Reverse:
{ {
arguments_check(op, 1, 1); arguments_check(op, 1, 1);
...@@ -322,8 +365,9 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func) ...@@ -322,8 +365,9 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func)
get_output_type(op), get_output_type(op),
reversed_axes); reversed_axes);
} }
break;
} }
else if ("Convert" == op->description()) case OP_TYPEID::Convert:
{ {
arguments_check(op, 1, 1); arguments_check(op, 1, 1);
...@@ -341,8 +385,9 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func) ...@@ -341,8 +385,9 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func)
get_output_shape(op), get_output_shape(op),
get_output_type(op)); get_output_type(op));
} }
break;
} }
else if ("Concat" == op->description()) case OP_TYPEID::Concat:
{ {
if (op->get_inputs().empty() || op->get_outputs().size() != 1) if (op->get_inputs().empty() || op->get_outputs().size() != 1)
{ {
...@@ -368,8 +413,9 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func) ...@@ -368,8 +413,9 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func)
const cldnn::concatenation cldnn_concat(get_output_name(op), inputs, cldnn_axis); const cldnn::concatenation cldnn_concat(get_output_name(op), inputs, cldnn_axis);
topology.add(cldnn_concat); topology.add(cldnn_concat);
break;
} }
else if ("Softmax" == op->description()) case OP_TYPEID::Softmax:
{ {
arguments_check(op, 1, 1); arguments_check(op, 1, 1);
...@@ -412,28 +458,34 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func) ...@@ -412,28 +458,34 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func)
get_output_name(op), get_input_name(op), dimension); get_output_name(op), get_input_name(op), dimension);
topology.add(cldnn_softmax); topology.add(cldnn_softmax);
} }
break;
} }
else if ("Add" == op->description()) case OP_TYPEID::Add:
{ {
do_eltwise_operation(topology, op, cldnn::eltwise_mode::sum); do_eltwise_operation(topology, op, cldnn::eltwise_mode::sum);
break;
} }
else if ("Multiply" == op->description()) case OP_TYPEID::Multiply:
{ {
do_eltwise_operation(topology, op, cldnn::eltwise_mode::prod); do_eltwise_operation(topology, op, cldnn::eltwise_mode::prod);
break;
} }
else if ("Divide" == op->description()) case OP_TYPEID::Divide:
{ {
do_eltwise_operation(topology, op, cldnn::eltwise_mode::div); do_eltwise_operation(topology, op, cldnn::eltwise_mode::div);
break;
} }
else if ("Maximum" == op->description()) case OP_TYPEID::Maximum:
{ {
do_eltwise_operation(topology, op, cldnn::eltwise_mode::max); do_eltwise_operation(topology, op, cldnn::eltwise_mode::max);
break;
} }
else if ("Minimum" == op->description()) case OP_TYPEID::Minimum:
{ {
do_eltwise_operation(topology, op, cldnn::eltwise_mode::min); do_eltwise_operation(topology, op, cldnn::eltwise_mode::min);
break;
} }
else if ("Constant" == op->description()) case OP_TYPEID::Constant:
{ {
arguments_check(op, 0, 1); arguments_check(op, 0, 1);
...@@ -447,8 +499,9 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func) ...@@ -447,8 +499,9 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func)
const cldnn::data op_const(get_output_name(op), mem); const cldnn::data op_const(get_output_name(op), mem);
topology.add(op_const); topology.add(op_const);
break;
} }
else if ("Dot" == op->description()) case OP_TYPEID::Dot:
{ {
arguments_check(op, 2, 1); arguments_check(op, 2, 1);
...@@ -460,8 +513,9 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func) ...@@ -460,8 +513,9 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func)
get_output_name(op), get_output_name(op),
get_output_shape(op), get_output_shape(op),
get_output_type(op)); get_output_type(op));
break;
} }
else if ("MaxPool" == op->description()) case OP_TYPEID::MaxPool:
{ {
const shared_ptr<op::MaxPool> max_pool = static_pointer_cast<op::MaxPool>(op); const shared_ptr<op::MaxPool> max_pool = static_pointer_cast<op::MaxPool>(op);
...@@ -471,8 +525,9 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func) ...@@ -471,8 +525,9 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func)
max_pool->get_window_movement_strides(), max_pool->get_window_movement_strides(),
max_pool->get_padding_below(), max_pool->get_padding_below(),
cldnn::pooling_mode::max); cldnn::pooling_mode::max);
break;
} }
else if ("MaxPoolBackprop" == op->description()) case OP_TYPEID::MaxPoolBackprop:
{ {
arguments_check(op, 2, 1); arguments_check(op, 2, 1);
...@@ -490,8 +545,9 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func) ...@@ -490,8 +545,9 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func)
max_pool_b->get_window_shape(), max_pool_b->get_window_shape(),
max_pool_b->get_window_movement_strides(), max_pool_b->get_window_movement_strides(),
max_pool_b->get_padding_below()); max_pool_b->get_padding_below());
break;
} }
else if ("AvgPool" == op->description()) case OP_TYPEID::AvgPool:
{ {
const shared_ptr<op::AvgPool> avg_pool = static_pointer_cast<op::AvgPool>(op); const shared_ptr<op::AvgPool> avg_pool = static_pointer_cast<op::AvgPool>(op);
const cldnn::pooling_mode mode = avg_pool->get_include_padding_in_avg_computation() const cldnn::pooling_mode mode = avg_pool->get_include_padding_in_avg_computation()
...@@ -504,8 +560,9 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func) ...@@ -504,8 +560,9 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func)
avg_pool->get_window_movement_strides(), avg_pool->get_window_movement_strides(),
avg_pool->get_padding_below(), avg_pool->get_padding_below(),
mode); mode);
break;
} }
else if ("AvgPoolBackprop" == op->description()) case OP_TYPEID::AvgPoolBackprop:
{ {
arguments_check(op, 1, 1); arguments_check(op, 1, 1);
...@@ -522,8 +579,9 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func) ...@@ -522,8 +579,9 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func)
avg_pool_b->get_window_movement_strides(), avg_pool_b->get_window_movement_strides(),
avg_pool_b->get_padding_below(), avg_pool_b->get_padding_below(),
avg_pool_b->get_include_padding_in_avg_computation()); avg_pool_b->get_include_padding_in_avg_computation());
break;
} }
else if ("Broadcast" == op->description()) case OP_TYPEID::Broadcast:
{ {
arguments_check(op, 1, 1); arguments_check(op, 1, 1);
...@@ -546,8 +604,9 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func) ...@@ -546,8 +604,9 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func)
axis, axis,
true); true);
} }
break;
} }
else if ("Sum" == op->description()) case OP_TYPEID::Sum:
{ {
arguments_check(op, 1, 1); arguments_check(op, 1, 1);
...@@ -570,8 +629,9 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func) ...@@ -570,8 +629,9 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func)
axis, axis,
false); false);
} }
break;
} }
else if ("Product" == op->description()) case OP_TYPEID::Product:
{ {
arguments_check(op, 1, 1); arguments_check(op, 1, 1);
...@@ -592,8 +652,9 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func) ...@@ -592,8 +652,9 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func)
get_output_type(op), get_output_type(op),
axis); axis);
} }
break;
} }
else if ("Reshape" == op->description()) case OP_TYPEID::Reshape:
{ {
arguments_check(op, 1, 1); arguments_check(op, 1, 1);
...@@ -625,17 +686,20 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func) ...@@ -625,17 +686,20 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func)
{ {
do_equal_propagation(topology, get_input_name(op), get_output_name(op)); do_equal_propagation(topology, get_input_name(op), get_output_name(op));
} }
break;
} }
else if ("Negative" == op->description()) case OP_TYPEID::Negative:
{ {
const cldnn_activation_additional_params param = {-1.f, 0.f}; const cldnn_activation_additional_params param = {-1.f, 0.f};
do_unary_operation(topology, op, activation_linear, param); do_unary_operation(topology, op, activation_linear, param);
break;
} }
else if ("Relu" == op->description()) case OP_TYPEID::Relu:
{ {
do_unary_operation(topology, op, activation_relu); do_unary_operation(topology, op, activation_relu);
break;
} }
else if ("ReluBackprop" == op->description()) case OP_TYPEID::ReluBackprop:
{ {
arguments_check(op, 2, 1); arguments_check(op, 2, 1);
...@@ -646,56 +710,69 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func) ...@@ -646,56 +710,69 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func)
activation_grad_relu, activation_grad_relu,
param); param);
topology.add(cldnn_activ_grad); topology.add(cldnn_activ_grad);
break;
} }
else if ("Abs" == op->description()) case OP_TYPEID::Abs:
{ {
do_unary_operation(topology, op, activation_abs); do_unary_operation(topology, op, activation_abs);
break;
} }
else if ("Sqrt" == op->description()) case OP_TYPEID::Sqrt:
{ {
do_unary_operation(topology, op, activation_sqrt); do_unary_operation(topology, op, activation_sqrt);
break;
} }
else if ("Tanh" == op->description()) case OP_TYPEID::Tanh:
{ {
do_unary_operation(topology, op, activation_hyperbolic_tan); do_unary_operation(topology, op, activation_hyperbolic_tan);
break;
} }
else if ("Sin" == op->description()) case OP_TYPEID::Sin:
{ {
do_unary_operation(topology, op, activation_sin); do_unary_operation(topology, op, activation_sin);
break;
} }
else if ("Asin" == op->description()) case OP_TYPEID::Asin:
{ {
do_unary_operation(topology, op, activation_asin); do_unary_operation(topology, op, activation_asin);
break;
} }
else if ("Sinh" == op->description()) case OP_TYPEID::Sinh:
{ {
do_unary_operation(topology, op, activation_sinh); do_unary_operation(topology, op, activation_sinh);
break;
} }
else if ("Cos" == op->description()) case OP_TYPEID::Cos:
{ {
do_unary_operation(topology, op, activation_cos); do_unary_operation(topology, op, activation_cos);
break;
} }
else if ("Acos" == op->description()) case OP_TYPEID::Acos:
{ {
do_unary_operation(topology, op, activation_acos); do_unary_operation(topology, op, activation_acos);
break;
} }
else if ("Cosh" == op->description()) case OP_TYPEID::Cosh:
{ {
do_unary_operation(topology, op, activation_cosh); do_unary_operation(topology, op, activation_cosh);
break;
} }
else if ("Log" == op->description()) case OP_TYPEID::Log:
{ {
do_unary_operation(topology, op, activation_log); do_unary_operation(topology, op, activation_log);
break;
} }
else if ("Exp" == op->description()) case OP_TYPEID::Exp:
{ {
do_unary_operation(topology, op, activation_exp); do_unary_operation(topology, op, activation_exp);
break;
} }
else if ("Sigmoid" == op->description()) case OP_TYPEID::Sigmoid:
{ {
do_unary_operation(topology, op, activation_logistic); do_unary_operation(topology, op, activation_logistic);
break;
} }
else if ("SigmoidBackprop" == op->description()) case OP_TYPEID::SigmoidBackprop:
{ {
arguments_check(op, 2, 1); arguments_check(op, 2, 1);
...@@ -707,8 +784,9 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func) ...@@ -707,8 +784,9 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func)
get_output_name(op), get_output_name(op),
get_output_shape(op), get_output_shape(op),
get_output_type(op)); get_output_type(op));
break;
} }
else if ("Not" == op->description()) case OP_TYPEID::Not:
{ {
arguments_check(op, 1, 1); arguments_check(op, 1, 1);
...@@ -718,48 +796,59 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func) ...@@ -718,48 +796,59 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func)
get_output_name(op), get_output_name(op),
get_output_shape(op), get_output_shape(op),
get_output_type(op)); get_output_type(op));
break;
} }
else if ("Greater" == op->description()) case OP_TYPEID::Greater:
{ {
do_logical_operation(topology, op, " > "); do_logical_operation(topology, op, " > ");
break;
} }
else if ("GreaterEq" == op->description()) case OP_TYPEID::GreaterEq:
{ {
do_logical_operation(topology, op, " >= "); do_logical_operation(topology, op, " >= ");
break;
} }
else if ("Equal" == op->description()) case OP_TYPEID::Equal:
{ {
do_logical_operation(topology, op, " == "); do_logical_operation(topology, op, " == ");
break;
} }
else if ("NotEqual" == op->description()) case OP_TYPEID::NotEqual:
{ {
do_logical_operation(topology, op, " != "); do_logical_operation(topology, op, " != ");
break;
} }
else if ("Less" == op->description()) case OP_TYPEID::Less:
{ {
do_logical_operation(topology, op, " < "); do_logical_operation(topology, op, " < ");
break;
} }
else if ("LessEq" == op->description()) case OP_TYPEID::LessEq:
{ {
do_logical_operation(topology, op, " <= "); do_logical_operation(topology, op, " <= ");
break;
} }
else if ("And" == op->description()) case OP_TYPEID::And:
{ {
do_logical_operation(topology, op, " && "); do_logical_operation(topology, op, " && ");
break;
} }
else if ("Or" == op->description()) case OP_TYPEID::Or:
{ {
do_logical_operation(topology, op, " || "); do_logical_operation(topology, op, " || ");
break;
} }
else if ("Subtract" == op->description()) case OP_TYPEID::Subtract:
{ {
do_eltwise_operation(topology, op, cldnn::eltwise_mode::sub); do_eltwise_operation(topology, op, cldnn::eltwise_mode::sub);
break;
} }
else if ("Power" == op->description()) case OP_TYPEID::Power:
{ {
do_eltwise_operation(topology, op, cldnn::eltwise_mode::pow); do_eltwise_operation(topology, op, cldnn::eltwise_mode::pow);
break;
} }
else if ("Atan" == op->description()) case OP_TYPEID::Atan:
{ {
arguments_check(op, 1, 1); arguments_check(op, 1, 1);
do_custom_eltwise_operation(topology, do_custom_eltwise_operation(topology,
...@@ -770,8 +859,9 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func) ...@@ -770,8 +859,9 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func)
get_output_shape(op), get_output_shape(op),
get_output_type(op), get_output_type(op),
CUSTOM_ELTWISE::Atan); CUSTOM_ELTWISE::Atan);
break;
} }
else if ("Ceiling" == op->description()) case OP_TYPEID::Ceiling:
{ {
arguments_check(op, 1, 1); arguments_check(op, 1, 1);
do_custom_eltwise_operation(topology, do_custom_eltwise_operation(topology,
...@@ -782,8 +872,9 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func) ...@@ -782,8 +872,9 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func)
get_output_shape(op), get_output_shape(op),
get_output_type(op), get_output_type(op),
CUSTOM_ELTWISE::Ceil); CUSTOM_ELTWISE::Ceil);
break;
} }
else if ("Floor" == op->description()) case OP_TYPEID::Floor:
{ {
arguments_check(op, 1, 1); arguments_check(op, 1, 1);
do_custom_eltwise_operation(topology, do_custom_eltwise_operation(topology,
...@@ -794,8 +885,9 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func) ...@@ -794,8 +885,9 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func)
get_output_shape(op), get_output_shape(op),
get_output_type(op), get_output_type(op),
CUSTOM_ELTWISE::Floor); CUSTOM_ELTWISE::Floor);
break;
} }
else if ("Sign" == op->description()) case OP_TYPEID::Sign:
{ {
arguments_check(op, 1, 1); arguments_check(op, 1, 1);
do_custom_eltwise_operation(topology, do_custom_eltwise_operation(topology,
...@@ -806,8 +898,9 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func) ...@@ -806,8 +898,9 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func)
get_output_shape(op), get_output_shape(op),
get_output_type(op), get_output_type(op),
CUSTOM_ELTWISE::Sign); CUSTOM_ELTWISE::Sign);
break;
} }
else if ("Tan" == op->description()) case OP_TYPEID::Tan:
{ {
arguments_check(op, 1, 1); arguments_check(op, 1, 1);
do_custom_eltwise_operation(topology, do_custom_eltwise_operation(topology,
...@@ -818,8 +911,9 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func) ...@@ -818,8 +911,9 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func)
get_output_shape(op), get_output_shape(op),
get_output_type(op), get_output_type(op),
CUSTOM_ELTWISE::Tan); CUSTOM_ELTWISE::Tan);
break;
} }
else if ("Pad" == op->description()) case OP_TYPEID::Pad:
{ {
arguments_check(op, 2, 1); arguments_check(op, 2, 1);
...@@ -836,8 +930,9 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func) ...@@ -836,8 +930,9 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func)
get_output_type(op), get_output_type(op),
pad_below, pad_below,
pad_interior); pad_interior);
break;
} }
else if ("BatchNormBackprop" == op->description()) case OP_TYPEID::BatchNormBackprop:
{ {
arguments_check(op, 6, 3); arguments_check(op, 6, 3);
...@@ -875,8 +970,9 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func) ...@@ -875,8 +970,9 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func)
get_output_name(op, 0), get_output_name(op, 0),
get_output_name(op, 1), get_output_name(op, 1),
get_output_name(op, 2)); get_output_name(op, 2));
break;
} }
else if ("BatchNorm" == op->description()) case OP_TYPEID::BatchNorm:
{ {
const shared_ptr<op::BatchNorm> batch_norm = static_pointer_cast<op::BatchNorm>(op); const shared_ptr<op::BatchNorm> batch_norm = static_pointer_cast<op::BatchNorm>(op);
const double eps = batch_norm->get_eps_value(); const double eps = batch_norm->get_eps_value();
...@@ -935,8 +1031,9 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func) ...@@ -935,8 +1031,9 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func)
{ {
arguments_check(op, 5, 1); // throw exception in this case arguments_check(op, 5, 1); // throw exception in this case
} }
break;
} }
else if ("Convolution" == op->description()) case OP_TYPEID::Convolution:
{ {
arguments_check(op, 2, 1); arguments_check(op, 2, 1);
...@@ -988,8 +1085,9 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func) ...@@ -988,8 +1085,9 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func)
dilation); dilation);
topology.add(cldnn_conv); topology.add(cldnn_conv);
} }
break;
} }
else if ("ConvolutionBackpropFilters" == op->description()) case OP_TYPEID::ConvolutionBackpropFilters:
{ {
arguments_check(op, 2, 1); arguments_check(op, 2, 1);
...@@ -1015,8 +1113,9 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func) ...@@ -1015,8 +1113,9 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func)
"filter[input_channel][output_channel]", "filter[input_channel][output_channel]",
"output[output_channel][batch]", "output[output_channel][batch]",
false); false);
break;
} }
else if ("ConvolutionBackpropData" == op->description()) case OP_TYPEID::ConvolutionBackpropData:
{ {
arguments_check(op, 2, 1); arguments_check(op, 2, 1);
...@@ -1042,8 +1141,9 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func) ...@@ -1042,8 +1141,9 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func)
"filter[input_channel][output_channel]", "filter[input_channel][output_channel]",
"output[batch][output_channel]", "output[batch][output_channel]",
true); true);
break;
} }
else if ("Min" == op->description()) case OP_TYPEID::Min:
{ {
arguments_check(op, 1, 1); arguments_check(op, 1, 1);
...@@ -1058,8 +1158,9 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func) ...@@ -1058,8 +1158,9 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func)
get_output_type(op), get_output_type(op),
axis, axis,
true); true);
break;
} }
else if ("Max" == op->description()) case OP_TYPEID::Max:
{ {
arguments_check(op, 1, 1); arguments_check(op, 1, 1);
...@@ -1074,8 +1175,9 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func) ...@@ -1074,8 +1175,9 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func)
get_output_type(op), get_output_type(op),
axis, axis,
false); false);
break;
} }
else if ("OneHot" == op->description()) case OP_TYPEID::OneHot:
{ {
arguments_check(op, 1, 1); arguments_check(op, 1, 1);
...@@ -1090,10 +1192,30 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func) ...@@ -1090,10 +1192,30 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func)
get_output_shape(op), get_output_shape(op),
get_output_type(op), get_output_type(op),
one_hot_axis); one_hot_axis);
} break;
else }
{ case OP_TYPEID::AllReduce:
throw unsupported_op(op->description()); case OP_TYPEID::ArgMax:
case OP_TYPEID::ArgMin:
case OP_TYPEID::Atan:
case OP_TYPEID::Ceiling:
case OP_TYPEID::Floor:
case OP_TYPEID::FunctionCall:
case OP_TYPEID::LRN:
case OP_TYPEID::Reduce:
case OP_TYPEID::ReduceWindow:
case OP_TYPEID::ReplaceSlice:
case OP_TYPEID::ReverseSequence:
case OP_TYPEID::SelectAndScatter:
case OP_TYPEID::Sign:
case OP_TYPEID::StopGradient:
case OP_TYPEID::Tan:
case OP_TYPEID::TopK:
{
throw unsupported_op("Unsupported op '" + op->description() +
"' in IntelGPU back end.");
}
#pragma GCC diagnostic pop
} }
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment