Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in / Register
Toggle navigation
N
ngraph
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
submodule
ngraph
Commits
f9ded0b1
Commit
f9ded0b1
authored
Aug 05, 2018
by
shssf
Committed by
Robert Kimball
Aug 05, 2018
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
IntelGPU backend: Greater, Less, Equal operations (#1331)
parent
c5889b2b
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
135 additions
and
0 deletions
+135
-0
intelgpu_backend.cpp
src/ngraph/runtime/intelgpu/intelgpu_backend.cpp
+61
-0
intelgpu_op_custom_kernels.cpp
src/ngraph/runtime/intelgpu/intelgpu_op_custom_kernels.cpp
+64
-0
intelgpu_op_custom_kernels.hpp
src/ngraph/runtime/intelgpu/intelgpu_op_custom_kernels.hpp
+10
-0
No files found.
src/ngraph/runtime/intelgpu/intelgpu_backend.cpp
View file @
f9ded0b1
...
...
@@ -65,6 +65,16 @@ static void arguments_check(const shared_ptr<Node>& op, size_t input, size_t out
}
}
static
void
argument_type_check
(
const
element
::
Type
&
type
)
{
if
(
type
!=
element
::
f32
)
{
ostringstream
os
;
os
<<
"Kernel data type "
<<
type
<<
" is not supported"
;
throw
invalid_argument
(
os
.
str
());
}
}
static
void
do_eltwise_operation
(
cldnn
::
topology
&
topology
,
const
shared_ptr
<
Node
>&
op
,
cldnn
::
eltwise_mode
mode
)
...
...
@@ -98,6 +108,33 @@ static void do_unary_operation(cldnn::topology& topology,
topology
.
add
(
cldnn_unary
);
}
static
void
do_logical_operation
(
cldnn
::
topology
&
topology
,
const
shared_ptr
<
Node
>&
op
,
const
string
&
operation
)
{
arguments_check
(
op
,
2
,
1
);
const
string
&
inputA_name
=
op
->
get_inputs
().
at
(
0
).
get_tensor
().
get_name
();
const
Shape
&
inputA_shape
=
op
->
get_inputs
().
at
(
0
).
get_shape
();
argument_type_check
(
op
->
get_inputs
().
at
(
0
).
get_tensor
().
get_element_type
());
const
string
&
inputB_name
=
op
->
get_inputs
().
at
(
1
).
get_tensor
().
get_name
();
const
Shape
&
inputB_shape
=
op
->
get_inputs
().
at
(
1
).
get_shape
();
argument_type_check
(
op
->
get_inputs
().
at
(
1
).
get_tensor
().
get_element_type
());
const
string
&
output_name
=
op
->
get_outputs
().
begin
()
->
get_tensor
().
get_name
();
const
Shape
&
output_shape
=
op
->
get_outputs
().
begin
()
->
get_shape
();
const
element
::
Type
&
output_type
=
op
->
get_outputs
().
begin
()
->
get_tensor
().
get_element_type
();
runtime
::
intelgpu
::
do_logic_kernel
(
topology
,
inputA_name
,
inputA_shape
,
inputB_name
,
inputB_shape
,
output_name
,
output_shape
,
output_type
,
operation
);
}
// This function needed to only change the name of the data in topology
// No real data copy needed
static
void
do_equal_propagation
(
cldnn
::
topology
&
topology
,
...
...
@@ -487,6 +524,30 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func)
{
do_unary_operation
(
topology
,
op
,
activation_logistic
);
}
else
if
(
"Greater"
==
op
->
description
())
{
do_logical_operation
(
topology
,
op
,
" > "
);
}
else
if
(
"GreaterEq"
==
op
->
description
())
{
do_logical_operation
(
topology
,
op
,
" >= "
);
}
else
if
(
"Equal"
==
op
->
description
())
{
do_logical_operation
(
topology
,
op
,
" == "
);
}
else
if
(
"NotEqual"
==
op
->
description
())
{
do_logical_operation
(
topology
,
op
,
" != "
);
}
else
if
(
"Less"
==
op
->
description
())
{
do_logical_operation
(
topology
,
op
,
" < "
);
}
else
if
(
"LessEq"
==
op
->
description
())
{
do_logical_operation
(
topology
,
op
,
" <= "
);
}
else
if
(
"Subtract"
==
op
->
description
())
{
do_eltwise_operation
(
topology
,
op
,
cldnn
::
eltwise_mode
::
sub
);
...
...
src/ngraph/runtime/intelgpu/intelgpu_op_custom_kernels.cpp
View file @
f9ded0b1
...
...
@@ -592,3 +592,67 @@ void runtime::intelgpu::do_select_operation(cldnn::topology& topology,
{
1
});
topology
.
add
(
op_select
);
}
void
runtime
::
intelgpu
::
do_logic_kernel
(
cldnn
::
topology
&
topology
,
const
string
&
inputA_name
,
const
Shape
&
inputA_shape
,
const
string
&
inputB_name
,
const
Shape
&
inputB_shape
,
const
string
&
output_name
,
const
Shape
&
output_shape
,
const
element
::
Type
&
output_type
,
const
string
&
operation
)
{
const
cldnn
::
layout
layout
=
IntelGPULayout
::
create_cldnn_layout
(
output_type
,
output_shape
);
const
string
entry_point_name
=
"logic_"
+
output_name
;
codegen
::
CodeWriter
writer
;
writer
<<
"__kernel void "
<<
entry_point_name
<<
"(const __global float inputA"
<<
array_dims
(
inputA_shape
)
<<
", const __global float inputB"
<<
array_dims
(
inputB_shape
)
<<
", __global char output"
<<
array_dims
(
output_shape
)
<<
")
\n
"
;
writer
.
block_begin
();
{
size_t
var_idx
=
0
;
// Main loops
for
(
auto
const
&
i
:
output_shape
)
{
writer
<<
"for (uint i"
<<
var_idx
<<
" = 0; i"
<<
var_idx
<<
" < "
<<
i
<<
"; ++i"
<<
var_idx
<<
")
\n
"
;
writer
.
block_begin
();
++
var_idx
;
}
writer
<<
"if (inputA"
<<
access_dims
(
inputA_shape
)
<<
operation
<<
"inputB"
<<
access_dims
(
inputB_shape
)
<<
")
\n
"
;
writer
.
block_begin
();
{
writer
<<
"output"
<<
access_dims
(
output_shape
)
<<
" = 1;
\n
"
;
}
writer
.
block_end
();
writer
<<
"else
\n
"
;
writer
.
block_begin
();
{
writer
<<
"output"
<<
access_dims
(
output_shape
)
<<
" = 0;
\n
"
;
}
writer
.
block_end
();
// Closing brackets for main loops
for
(
auto
const
&
i
:
output_shape
)
{
writer
.
block_end
();
}
}
writer
.
block_end
();
const
cldnn
::
custom_gpu_primitive
op_logical
(
output_name
,
{
inputA_name
,
inputB_name
},
{
writer
.
get_code
()},
entry_point_name
,
get_kernel_args
(
2
,
1
),
""
,
layout
,
{
1
});
topology
.
add
(
op_logical
);
}
src/ngraph/runtime/intelgpu/intelgpu_op_custom_kernels.hpp
View file @
f9ded0b1
...
...
@@ -70,6 +70,16 @@ namespace ngraph
const
Shape
&
output_shape
,
const
element
::
Type
&
output_type
);
void
do_logic_kernel
(
cldnn
::
topology
&
topology
,
const
std
::
string
&
inputA_name
,
const
Shape
&
inputA_shape
,
const
std
::
string
&
inputB_name
,
const
Shape
&
inputB_shape
,
const
std
::
string
&
output_name
,
const
Shape
&
output_shape
,
const
element
::
Type
&
output_type
,
const
std
::
string
&
operation
);
// Helper functions used in cldnn::custom_gpu_primitive kernels
std
::
vector
<
cldnn_arg
>
get_kernel_args
(
size_t
input
,
size_t
output
);
std
::
string
array_dims
(
const
Shape
&
dimentions
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment