Commit 8ab89b29 authored by shssf's avatar shssf Committed by Robert Kimball

IntelGPU backend: Code refactored. No algo changed. (#1328)

parent 2b26df18
......@@ -23,50 +23,13 @@
#include "ngraph/runtime/intelgpu/code_writer.hpp"
#include "ngraph/runtime/intelgpu/intelgpu_layout.hpp"
#include "ngraph/runtime/intelgpu/intelgpu_op_batchnorm.hpp"
#include "ngraph/runtime/intelgpu/intelgpu_op_custom_kernels.hpp"
#include "ngraph/op/batch_norm.hpp"
using namespace std;
using namespace ngraph;
static vector<cldnn_arg> parameters_1inp_1out = {{arg_input, 0}, {arg_output, 0}};
static vector<cldnn_arg> parameters_2inp_1out = {{arg_input, 0}, {arg_input, 1}, {arg_output, 0}};
static vector<cldnn_arg> parameters_5inp_1out = {{arg_input, 0},
{arg_input, 1},
{arg_input, 2},
{arg_input, 3},
{arg_input, 4},
{arg_output, 0}};
static string array_dims(const Shape& dimentions)
{
string buffer;
for (auto const& dim : dimentions)
{
buffer += "[" + to_string(dim) + "]";
}
return buffer;
}
static string access_dims(const Shape& dimentions, const AxisSet& axis = {})
{
size_t var_idx = 0;
string buffer;
for (auto const& i : dimentions)
{
if (axis.find(var_idx) == axis.end())
{
buffer += "[i" + to_string(var_idx) + "]";
}
++var_idx;
}
return buffer;
}
void runtime::intelgpu::do_create_mean(cldnn::topology& topology,
const string& output_name,
const Shape& output_shape,
......@@ -138,7 +101,7 @@ void runtime::intelgpu::do_create_mean(cldnn::topology& topology,
{input_name},
{writer.get_code()},
entry_point_name,
parameters_1inp_1out,
get_kernel_args(1, 1),
"",
layout,
{1});
......@@ -221,7 +184,7 @@ void runtime::intelgpu::do_create_variance(cldnn::topology& topology,
{input_name, mean_name},
{writer.get_code()},
entry_point_name,
parameters_2inp_1out,
get_kernel_args(2, 1),
"",
layout,
{1});
......@@ -313,7 +276,7 @@ void runtime::intelgpu::do_batch_norm_operation(cldnn::topology& topology,
inputs,
{writer.get_code()},
entry_point_name,
parameters_5inp_1out,
get_kernel_args(5, 1),
"",
layout,
{1});
......
......@@ -21,43 +21,13 @@
#include "ngraph/runtime/intelgpu/code_writer.hpp"
#include "ngraph/runtime/intelgpu/intelgpu_layout.hpp"
#include "ngraph/runtime/intelgpu/intelgpu_op_broadcast.hpp"
#include "ngraph/runtime/intelgpu/intelgpu_op_custom_kernels.hpp"
#include "ngraph/util.hpp"
using namespace std;
using namespace ngraph;
static vector<cldnn_arg> parameters_1inp_1out = {{arg_input, 0}, {arg_output, 0}};
static string array_dims(const Shape& dimentions)
{
string buffer;
for (auto const& dim : dimentions)
{
buffer += "[" + to_string(dim) + "]";
}
return buffer;
}
static string access_dims(const Shape& dimentions, const AxisSet& axis = {})
{
size_t var_idx = 0;
string buffer;
for (auto const& i : dimentions)
{
if (axis.find(var_idx) == axis.end())
{
buffer += "[i" + to_string(var_idx) + "]";
}
++var_idx;
}
return buffer;
}
void runtime::intelgpu::do_bcast_sum_operation_scalar(cldnn::topology& topology,
const string& input_name,
const Shape& input_shape,
......@@ -66,7 +36,8 @@ void runtime::intelgpu::do_bcast_sum_operation_scalar(cldnn::topology& topology,
const element::Type& output_type,
bool is_bcast)
{
const string function_name = is_bcast ? "broadcast_scalar" : "sum_scalar";
string function_name = is_bcast ? "broadcast_scalar" : "sum_scalar";
function_name += output_name;
const size_t input_count =
is_bcast ? shape_size<Shape>(output_shape) : shape_size<Shape>(input_shape);
codegen::CodeWriter writer;
......@@ -98,7 +69,7 @@ void runtime::intelgpu::do_bcast_sum_operation_scalar(cldnn::topology& topology,
{input_name},
{writer.get_code()},
function_name,
parameters_1inp_1out,
get_kernel_args(1, 1),
string("-DCOUNT=" + to_string(input_count)),
layout,
{1});
......@@ -170,7 +141,7 @@ void runtime::intelgpu::do_bcast_sum_operation(cldnn::topology& topology,
{input_name},
{writer.get_code()},
function_name,
parameters_1inp_1out,
get_kernel_args(1, 1),
"",
layout,
{1});
......
......@@ -69,6 +69,11 @@ namespace ngraph
const std::string& output_name,
const Shape& output_shape,
const element::Type& output_type);
// Helper functions used in cldnn::custom_gpu_primitive kernels
std::vector<cldnn_arg> get_kernel_args(size_t input, size_t output);
std::string array_dims(const Shape& dimentions);
std::string access_dims(const Shape& dimentions, const AxisSet& axis = {});
}
}
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment