Commit 5927bbe4 authored by shssf's avatar shssf Committed by Robert Kimball

IntelGPU backend: Dot operation (partially implemented) (#1275)

* IntelGPU backend: Dot operation (partially implemented)

* PR1275. Debug output deleted.

* PR1275. Comments addressed
parent c007740b
...@@ -219,6 +219,28 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func) ...@@ -219,6 +219,28 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func)
const cldnn::data op_const(output_name, mem); const cldnn::data op_const(output_name, mem);
topology.add(op_const); topology.add(op_const);
} }
else if ("Dot" == op->description())
{
arguments_check(op, 2, 1);
const string& inputA_name = op->get_inputs().at(0).get_tensor().get_name();
const Shape& inputA_shape = op->get_inputs().at(0).get_shape();
const string& inputB_name = op->get_inputs().at(1).get_tensor().get_name();
const Shape& inputB_shape = op->get_inputs().at(1).get_shape();
const string& output_name = op->get_outputs().begin()->get_tensor().get_name();
const Shape& output_shape = op->get_outputs().begin()->get_shape();
const element::Type& output_type =
op->get_outputs().begin()->get_tensor().get_element_type();
do_dot_operation(topology,
inputA_name,
inputA_shape,
inputB_name,
inputB_shape,
output_name,
output_shape,
output_type);
}
else if ("MaxPool" == op->description()) else if ("MaxPool" == op->description())
{ {
arguments_check(op, 1, 1); arguments_check(op, 1, 1);
...@@ -349,15 +371,15 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func) ...@@ -349,15 +371,15 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func)
const Shape& pad_below = pad->get_padding_below(); const Shape& pad_below = pad->get_padding_below();
const Shape& pad_interior = pad->get_padding_interior(); const Shape& pad_interior = pad->get_padding_interior();
do_pad_kernel(topology, do_pad_operation(topology,
input_name, input_name,
input_shape, input_shape,
scalar_name, scalar_name,
output_name, output_name,
output_shape, output_shape,
output_type, output_type,
pad_below, pad_below,
pad_interior); pad_interior);
} }
else if ("BatchNorm" == op->description()) else if ("BatchNorm" == op->description())
{ {
......
...@@ -28,15 +28,24 @@ namespace ngraph ...@@ -28,15 +28,24 @@ namespace ngraph
{ {
namespace intelgpu namespace intelgpu
{ {
void do_pad_kernel(cldnn::topology& topology, void do_pad_operation(cldnn::topology& topology,
const std::string& input_name, const std::string& input_name,
const Shape& input_shape, const Shape& input_shape,
const std::string& scalar_name, const std::string& scalar_name,
const std::string& output_name, const std::string& output_name,
const Shape& output_shape, const Shape& output_shape,
const element::Type& output_type, const element::Type& output_type,
const Shape& pad_below, const Shape& pad_below,
const Shape& pad_interior); const Shape& pad_interior);
void do_dot_operation(cldnn::topology& topology,
const std::string& inputA_name,
const Shape& inputA_shape,
const std::string& inputB_name,
const Shape& inputB_shape,
const std::string& output_name,
const Shape& output_shape,
const element::Type& output_type);
} }
} }
} }
...@@ -34,13 +34,11 @@ backwards_concat_axis_1 ...@@ -34,13 +34,11 @@ backwards_concat_axis_1
backwards_concat_vector backwards_concat_vector
backwards_cos backwards_cos
backwards_cosh backwards_cosh
backwards_dot_scalar_scalar
backwards_dot_scalar_tensor backwards_dot_scalar_tensor
backwards_dot_tensor2_tensor2 backwards_dot_tensor2_tensor2
backwards_dot_tensor3_tensor3 backwards_dot_tensor3_tensor3
backwards_dot_tensor_scalar backwards_dot_tensor_scalar
backwards_dot_tensor_vector backwards_dot_tensor_vector
backwards_dot_vector_vector
backwards_exp backwards_exp
backwards_floor backwards_floor
backwards_log backwards_log
...@@ -139,21 +137,12 @@ cos ...@@ -139,21 +137,12 @@ cos
cosh cosh
divide_by_zero_int32 divide_by_zero_int32
dot_0_0 dot_0_0
dot1d
dot2d
dot_2x0_0 dot_2x0_0
dot3d_2d
dot3d_3d
dot_matrix_0x2_2x0 dot_matrix_0x2_2x0
dot_matrix_2x0_0x2 dot_matrix_2x0_0x2
dot_matrix_3x2_2x0 dot_matrix_3x2_2x0
dot_matrix_vector
dot_matrix_vector_4_3
dot_matrix_vector_int64 dot_matrix_vector_int64
dot_scalar_0x2 dot_scalar_0x2
dot_scalar_scalar
dot_scalar_tensor_arg0
dot_scalar_tensor_arg1
equal equal
exp exp
floor floor
...@@ -216,14 +205,9 @@ one_hot_vector_1_barely_oob ...@@ -216,14 +205,9 @@ one_hot_vector_1_barely_oob
one_hot_vector_1_far_oob one_hot_vector_1_far_oob
one_hot_vector_1_fp one_hot_vector_1_fp
one_hot_vector_1_fp_nonint one_hot_vector_1_fp_nonint
pad_exterior_1d
pad_exterior_2d_0x0 pad_exterior_2d_0x0
pad_exterior_2d_0x3 pad_exterior_2d_0x3
pad_exterior_2d_3x0 pad_exterior_2d_3x0
pad_exterior_4d_1x2x2x2
pad_interior_1d
pad_interior_exterior_1d
pad_interior_exterior_2d
pad_interior_exterior_4d_2x0x3x2 pad_interior_exterior_4d_2x0x3x2
power power
product_3d_eliminate_zero_dim product_3d_eliminate_zero_dim
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment