Commit 4d64be33 authored by shssf's avatar shssf Committed by Robert Kimball

IntelGPU backend: helper function improvement (#1541)

* IntelGPU backend: helper function improvement

* PR1541. Change string to stringsteam
parent 9a6c3f3e
......@@ -52,7 +52,7 @@ void runtime::intelgpu::do_bcast_sum_operation(cldnn::topology& topology,
gws = runtime::intelgpu::generate_loops(writer, output_shape, true);
writer << "output" << access_dims(output_shape) << " = input0"
<< access_dims(output_shape, axis) << ";\n";
<< access_dims(output_shape, "i", axis) << ";\n";
// Closing brackets for Broadcast loop
runtime::intelgpu::generate_loops(writer, output_shape, false);
......@@ -88,7 +88,7 @@ void runtime::intelgpu::do_bcast_sum_operation(cldnn::topology& topology,
++var_idx;
}
writer << "output" << access_dims(input_shape, axis) << " += input0"
writer << "output" << access_dims(input_shape, "i", axis) << " += input0"
<< access_dims(input_shape) << ";\n";
// Closing brackets for Sum loop
......@@ -164,10 +164,10 @@ void runtime::intelgpu::do_max_min_operation(cldnn::topology& topology,
}
writer << "if (input" << access_dims(input_shape) << operation << "output"
<< access_dims(input_shape, axis) << ")\n";
<< access_dims(input_shape, "i", axis) << ")\n";
writer.block_begin();
{
writer << "output" << access_dims(input_shape, axis) << " = input"
writer << "output" << access_dims(input_shape, "i", axis) << " = input"
<< access_dims(input_shape) << ";\n";
}
writer.block_end();
......@@ -241,7 +241,7 @@ void runtime::intelgpu::do_product_operation(cldnn::topology& topology,
++var_idx;
}
writer << "output" << access_dims(input_shape, axis) << " *= input"
writer << "output" << access_dims(input_shape, "i", axis) << " *= input"
<< access_dims(input_shape) << ";\n";
// Closing brackets for loop
......
......@@ -65,31 +65,33 @@ string runtime::intelgpu::array_dims(const Shape& dimentions, const AxisSet& axi
return buffer;
}
string
runtime::intelgpu::access_dims(const Shape& dimentions, const AxisSet& axis, bool is_reversed)
string runtime::intelgpu::access_dims(const Shape& dimentions,
const string& var,
const AxisSet& axis,
bool is_reversed)
{
size_t var_idx = 0;
string buffer;
stringstream buffer;
for (auto const& i : dimentions)
{
if (axis.find(var_idx) == axis.end())
{
buffer += "[i" + to_string(var_idx) + "]";
buffer << "[" << var << var_idx << "]";
}
else if (is_reversed)
{
buffer += "[" + to_string(i) + " - i" + to_string(var_idx) + " - 1]";
buffer << "[" << i << " - " << var << var_idx << " - 1]";
}
++var_idx;
}
if (buffer.empty())
if (!buffer.rdbuf()->in_avail())
{ // it means scalar
buffer = "[0]";
buffer.str("[0]");
}
return buffer;
return buffer.str();
}
void runtime::intelgpu::gen_func_def(codegen::CodeWriter& writer,
......@@ -1028,7 +1030,7 @@ void runtime::intelgpu::do_reverse_operation(cldnn::topology& topology,
gws = generate_loops(writer, output_shape, true);
writer << "output" << access_dims(output_shape) << " = input0"
<< access_dims(output_shape, reversed_axes, true) << ";\n";
<< access_dims(output_shape, "i", reversed_axes, true) << ";\n";
generate_loops(writer, output_shape, false);
}
......
......@@ -152,6 +152,7 @@ namespace ngraph
std::vector<cldnn_arg> get_kernel_args(size_t input, size_t output);
std::string array_dims(const Shape& dimentions, const AxisSet& axis = {});
std::string access_dims(const Shape& dimentions,
const std::string& var = "i",
const AxisSet& axis = {},
bool is_reversed = false);
std::vector<size_t>
......
......@@ -132,7 +132,7 @@ void runtime::intelgpu::do_softmax_operation(cldnn::topology& topology,
const string entry_point_name = "softmax_" + output_name;
const string middle_name = entry_point_name + "_middle";
const string entry_point_middle_name = "softmax_middle_" + output_name;
const string expression = "output" + access_dims(input_shape, axes) + " = 0.0f;\n";
const string expression = "output" + access_dims(input_shape, "i", axes) + " = 0.0f;\n";
const Shape new_shape = shape_dims(output_shape, axes);
const cldnn::layout layout_middle = IntelGPULayout::create_cldnn_layout(output_type, new_shape);
codegen::CodeWriter writer0;
......@@ -147,7 +147,7 @@ void runtime::intelgpu::do_softmax_operation(cldnn::topology& topology,
{
gws = generate_loops_w_axes(writer0, output_shape, true, axes, expression);
writer0 << "output" << access_dims(input_shape, axes) << " += exp(input"
writer0 << "output" << access_dims(input_shape, "i", axes) << " += exp(input"
<< access_dims(input_shape) << ");\n";
generate_loops_w_axes(writer0, output_shape, false, axes, "");
......@@ -173,7 +173,7 @@ void runtime::intelgpu::do_softmax_operation(cldnn::topology& topology,
{
gws = generate_loops(writer1, output_shape, true);
writer1 << "output" << access_dims(input_shape) << " = exp(input0"
<< access_dims(input_shape) << ")/input1" << access_dims(input_shape, axes)
<< access_dims(input_shape) << ")/input1" << access_dims(input_shape, "i", axes)
<< ";\n";
generate_loops(writer1, output_shape, false);
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment