Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in / Register
Toggle navigation
N
ngraph
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
submodule
ngraph
Commits
e9e7f0f9
Commit
e9e7f0f9
authored
Apr 24, 2019
by
Sergey Shalnov
Committed by
Scott Cyphers
Apr 24, 2019
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
IntelGPU backend: Need to keep intermadiate code (#2796)
parent
d4df5695
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
58 additions
and
1 deletion
+58
-1
intelgpu_kernels.cpp
src/ngraph/runtime/intelgpu/intelgpu_kernels.cpp
+56
-1
intelgpu_kernels.hpp
src/ngraph/runtime/intelgpu/intelgpu_kernels.hpp
+2
-0
No files found.
src/ngraph/runtime/intelgpu/intelgpu_kernels.cpp
View file @
e9e7f0f9
...
...
@@ -21,6 +21,7 @@
#include "ngraph/runtime/intelgpu/intelgpu_op_custom_kernels.hpp"
#include "ngraph/node.hpp"
#include "ngraph/util.hpp"
using
namespace
std
;
using
namespace
ngraph
;
...
...
@@ -28,8 +29,61 @@ using namespace ngraph;
void
runtime
::
intelgpu
::
CustomKernels
::
queue_krnl
(
const
krnl_info
&
krnl_info
,
const
shared_ptr
<
Node
>&
op
)
{
for
(
const
aut
o
&
kr
:
krnl_info
)
for
(
const
CustomKernelInf
o
&
kr
:
krnl_info
)
{
// Need to save this code to allow further work on it later
#if 0
mkldnn::engine eng(0);
shared_ptr<mkldnn::stream> mkldnn_stream = make_shared<mkldnn::stream>(eng);
cl_device_id device = eng.get_ocl_device();
const char* source_code = kr.m_code.c_str();
const size_t source_code_length = strlen(source_code);
cl_int errcode = CL_SUCCESS;
cl_command_queue queue = mkldnn_stream->get_ocl_command_queue();
cl_program program = clCreateProgramWithSource(
eng.get_ocl_context(), 1, &source_code, &source_code_length, &errcode);
if (errcode != CL_SUCCESS)
{
throw ngraph_error("Build OpenCL program error: " + to_string(errcode));
}
errcode = clBuildProgram(program, 1, &device, "", NULL, NULL);
if (errcode != CL_SUCCESS)
{
size_t log_length = 0;
int info_errcode =
clGetProgramBuildInfo(program, device, CL_PROGRAM_BUILD_LOG, 0, 0, &log_length);
if (info_errcode != CL_SUCCESS)
{
throw ngraph_error("clGetProgramBuildInfo(log_length) error: " +
to_string(info_errcode));
}
void* log = ngraph_malloc(log_length);
info_errcode =
clGetProgramBuildInfo(program, device, CL_PROGRAM_BUILD_LOG, log_length, log, 0);
if (info_errcode != CL_SUCCESS)
{
throw ngraph_error("clGetProgramBuildInfo(log) error: " + to_string(info_errcode));
}
string err_string((const char*)log);
ngraph_free(log);
throw ngraph_error("Error during the build of OpenCL program. Error: " +
to_string(errcode) + "\nBuild log:" + err_string);
}
cl_kernel kernel = clCreateKernel(program, kr.m_entry_point.c_str(), &errcode);
if (errcode != CL_SUCCESS)
{
throw ngraph_error("Create OpenCL kernel error: " + to_string(errcode));
}
//kr.kernel = kernel;
#else
const
cldnn
::
layout
layout
=
IntelGPULayout
::
create_cldnn_layout
(
kr
.
m_type
,
kr
.
m_shape
);
const
cldnn
::
custom_gpu_primitive
kernel_item
(
kr
.
m_name
,
...
...
@@ -42,6 +96,7 @@ void runtime::intelgpu::CustomKernels::queue_krnl(const krnl_info& krnl_info,
kr
.
m_gws
,
kr
.
m_lws
);
stream
.
add
(
kernel_item
);
#endif
}
}
...
...
src/ngraph/runtime/intelgpu/intelgpu_kernels.hpp
View file @
e9e7f0f9
...
...
@@ -78,6 +78,7 @@ public:
m_entry_point
=
entry_point
;
m_gws
=
gws
;
m_lws
=
lws
;
kernel
=
nullptr
;
}
std
::
string
m_name
;
...
...
@@ -88,6 +89,7 @@ public:
std
::
string
m_entry_point
;
std
::
vector
<
size_t
>
m_gws
;
std
::
vector
<
size_t
>
m_lws
;
void
*
kernel
;
};
class
ngraph
::
runtime
::
intelgpu
::
CustomKernels
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment