Commit 8db7b24b authored by Anna Alberska's avatar Anna Alberska Committed by Robert Kimball

IntelGPU backend: AvgPool operation(partially) (#1336)

* IntelGPU backend: AvgPool operation(partially)

* Code format update intelgpu_backend.cpp

* Delete code duplication in pooling ops intelgpu_backend.cpp
parent 8476dea0
......@@ -37,6 +37,7 @@
#include "ngraph/runtime/intelgpu/intelgpu_tensor_view.hpp"
#include "ngraph/node.hpp"
#include "ngraph/op/avg_pool.hpp"
#include "ngraph/op/batch_norm.hpp"
#include "ngraph/op/broadcast.hpp"
#include "ngraph/op/constant.hpp"
......@@ -111,6 +112,33 @@ static void do_unary_operation(cldnn::topology& topology,
topology.add(cldnn_unary);
}
static void do_pooling_operation(cldnn::topology& topology,
const shared_ptr<Node>& op,
const Shape& pool_shape,
const Strides& pool_strides,
const Shape& pad_below,
const Shape& pad_above,
const cldnn::pooling_mode mode)
{
arguments_check(op, 1, 1);
const string& input_name = op->get_inputs().begin()->get_tensor().get_name();
const string& output_name = op->get_outputs().begin()->get_tensor().get_name();
const Shape& out_shape = op->get_outputs().begin()->get_shape();
const cldnn::tensor output_size =
runtime::intelgpu::IntelGPULayout::create_cldnn_tensor(out_shape);
const cldnn::tensor input_offset =
runtime::intelgpu::IntelGPULayout::create_cldnn_offset(pad_below);
const cldnn::tensor size = runtime::intelgpu::IntelGPULayout::create_cldnn_tensor(pool_shape);
const cldnn::tensor stride =
runtime::intelgpu::IntelGPULayout::create_cldnn_tensor(pool_strides);
const cldnn::pooling cldnn_pooling(
output_name, input_name, mode, size, stride, input_offset, output_size);
topology.add(cldnn_pooling);
}
static void do_logical_operation(cldnn::topology& topology,
const shared_ptr<Node>& op,
const string& operation)
......@@ -344,41 +372,35 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func)
}
else if ("MaxPool" == op->description())
{
arguments_check(op, 1, 1);
const string& input_name = op->get_inputs().begin()->get_tensor().get_name();
const string& output_name = op->get_outputs().begin()->get_tensor().get_name();
const Shape& out_shape = op->get_outputs().begin()->get_shape();
const cldnn::tensor output_size =
runtime::intelgpu::IntelGPULayout::create_cldnn_tensor(out_shape);
const shared_ptr<op::MaxPool> max_pool = static_pointer_cast<op::MaxPool>(op);
const Shape& pool_shape = max_pool->get_window_shape();
const Strides& pool_strides = max_pool->get_window_movement_strides();
const Shape& pad = max_pool->get_padding_below();
vector<cldnn::tensor::value_type> offset({0, 0, 0, 0}); // No action by default
size_t ridx = 4;
for (auto i = pad.crbegin(); i != pad.crend() && ridx > 0; ++i, --ridx)
{
offset.at(ridx - 1) = -(*i);
}
const cldnn::tensor input_offset(
offset.at(0), offset.at(1), offset.at(3), offset.at(2));
const cldnn::tensor size =
runtime::intelgpu::IntelGPULayout::create_cldnn_tensor(pool_shape);
const cldnn::tensor strides =
runtime::intelgpu::IntelGPULayout::create_cldnn_tensor(pool_strides);
const cldnn::pooling cldd_pooling(output_name,
input_name,
cldnn::pooling_mode::max,
size,
strides,
input_offset,
output_size);
topology.add(cldd_pooling);
const Shape& pad_below = max_pool->get_padding_below();
const Shape& pad_above = max_pool->get_padding_above();
do_pooling_operation(topology,
op,
pool_shape,
pool_strides,
pad_below,
pad_above,
cldnn::pooling_mode::max);
}
else if ("AvgPool" == op->description())
{
const shared_ptr<op::AvgPool> avg_pool = static_pointer_cast<op::AvgPool>(op);
const Shape& pool_shape = avg_pool->get_window_shape();
const Strides& pool_strides = avg_pool->get_window_movement_strides();
const Shape& pad_below = avg_pool->get_padding_below();
const Shape& pad_above = avg_pool->get_padding_above();
const cldnn::pooling_mode mode = avg_pool->get_include_padding_in_avg_computation()
? cldnn::pooling_mode::average
: cldnn::pooling_mode::average_no_padding;
do_pooling_operation(
topology, op, pool_shape, pool_strides, pad_below, pad_above, mode);
}
else if ("Broadcast" == op->description())
{
......
......@@ -106,6 +106,20 @@ cldnn::tensor runtime::intelgpu::IntelGPULayout::create_cldnn_tensor(const Shape
return tns;
}
cldnn::tensor runtime::intelgpu::IntelGPULayout::create_cldnn_offset(const Shape& pad_below)
{
vector<cldnn::tensor::value_type> offset({0, 0, 0, 0});
size_t ridx = 4;
for (auto i = pad_below.crbegin(); i != pad_below.crend() && ridx > 0; ++i, --ridx)
{
offset.at(ridx - 1) = -(*i);
}
const cldnn::tensor input_offset(offset.at(0), offset.at(1), offset.at(3), offset.at(2));
return input_offset;
}
cldnn::layout runtime::intelgpu::IntelGPULayout::create_cldnn_layout(
const ngraph::element::Type& element_type, const Shape& element_shape)
{
......
......@@ -51,7 +51,7 @@ public:
static cldnn::layout create_cldnn_layout(const ngraph::element::Type& element_type,
const Shape& element_shape);
static cldnn::tensor create_cldnn_tensor(const Shape& element_shape);
static cldnn::tensor create_cldnn_offset(const Shape& pad_below);
// This function converts Shape dimension_id into cldnn::concatenation id
static cldnn::concatenation::concatenation_axis get_cldnn_axis(size_t tensor_channel);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment