Commit f1c3e4ab authored by shssf's avatar shssf Committed by Robert Kimball

IntelGPU backend: Product operation (#1334)

parent 81216a9e
......@@ -47,6 +47,7 @@
#include "ngraph/op/max_pool.hpp"
#include "ngraph/op/min.hpp"
#include "ngraph/op/pad.hpp"
#include "ngraph/op/product.hpp"
#include "ngraph/op/reshape.hpp"
#include "ngraph/op/slice.hpp"
#include "ngraph/op/sum.hpp"
......@@ -461,6 +462,36 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func)
false);
}
}
else if ("Product" == op->description())
{
arguments_check(op, 1, 1);
const string& input_name = op->get_inputs().begin()->get_tensor().get_name();
const Shape& input_shape = op->get_inputs().begin()->get_shape();
const string& output_name = op->get_outputs().begin()->get_tensor().get_name();
const Shape& output_shape = op->get_outputs().begin()->get_shape();
const element::Type& output_type =
op->get_outputs().begin()->get_tensor().get_element_type();
const shared_ptr<op::Product> prod = static_pointer_cast<op::Product>(op);
const AxisSet& axis = prod->get_reduction_axes();
if (axis.empty())
{
do_equal_propagation(topology, input_name, output_name);
}
else
{
do_product_operation(topology,
input_name,
input_shape,
output_name,
output_shape,
output_type,
axis);
}
}
else if ("Reshape" == op->description())
{
arguments_check(op, 1, 1);
......
......@@ -247,3 +247,75 @@ void runtime::intelgpu::do_max_min_operation(cldnn::topology& topology,
{1});
topology.add(op_min_max);
}
void runtime::intelgpu::do_product_operation(cldnn::topology& topology,
const string& input_name,
const Shape& input_shape,
const string& output_name,
const Shape& output_shape,
const element::Type& output_type,
const AxisSet& axis)
{
const string function_name = "product_" + output_name;
const size_t input_size = shape_size<Shape>(input_shape);
codegen::CodeWriter writer;
writer << "__kernel void " << function_name << "(const __global float input"
<< array_dims(input_shape) << ", __global float output" << array_dims(output_shape)
<< ")\n";
writer.block_begin();
{
// Initialization loop
size_t var_idx = 0;
for (auto const& i : output_shape)
{
writer << "for (uint i" << var_idx << " = 0; i" << var_idx << " < " << i << "; ++i"
<< var_idx << ")\n";
writer.block_begin();
++var_idx;
}
writer << "output" << access_dims(output_shape) << " = 1;\n";
// Closing brackets for initialization loop
for (auto const& i : output_shape)
{
writer.block_end();
}
if (input_size && !input_shape.empty())
{
// Main operation loop
var_idx = 0;
for (auto const& i : input_shape)
{
writer << "for (uint i" << var_idx << " = 0; i" << var_idx << " < " << i << "; ++i"
<< var_idx << ")\n";
writer.block_begin();
++var_idx;
}
writer << "output" << access_dims(input_shape, axis) << " *= input"
<< access_dims(input_shape) << ";\n";
// Closing brackets for loop
for (auto const& i : input_shape)
{
writer.block_end();
}
}
} // End of function bracket
writer.block_end();
const cldnn::layout layout = IntelGPULayout::create_cldnn_layout(output_type, output_shape);
const cldnn::custom_gpu_primitive op_product(output_name,
{input_name},
{writer.get_code()},
function_name,
get_kernel_args(1, 1),
"",
layout,
{1});
topology.add(op_product);
}
......@@ -57,6 +57,15 @@ namespace ngraph
const element::Type& output_type,
const AxisSet& axis,
bool is_min);
// This implements Product operation
void do_product_operation(cldnn::topology& topology,
const std::string& input_name,
const Shape& input_shape,
const std::string& output_name,
const Shape& output_shape,
const element::Type& output_type,
const AxisSet& axis);
}
}
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment