Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in / Register
Toggle navigation
N
ngraph
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
submodule
ngraph
Commits
4f26640b
Commit
4f26640b
authored
Aug 05, 2018
by
shssf
Committed by
Robert Kimball
Aug 05, 2018
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
IntelGPU backend: Max and Min operations (#1333)
parent
f9ded0b1
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
151 additions
and
0 deletions
+151
-0
intelgpu_backend.cpp
src/ngraph/runtime/intelgpu/intelgpu_backend.cpp
+50
-0
intelgpu_op_broadcast.cpp
src/ngraph/runtime/intelgpu/intelgpu_op_broadcast.cpp
+81
-0
intelgpu_op_broadcast.hpp
src/ngraph/runtime/intelgpu/intelgpu_op_broadcast.hpp
+10
-0
intelgpu_op_custom_kernels.cpp
src/ngraph/runtime/intelgpu/intelgpu_op_custom_kernels.cpp
+10
-0
No files found.
src/ngraph/runtime/intelgpu/intelgpu_backend.cpp
View file @
4f26640b
...
...
@@ -43,7 +43,9 @@
#include "ngraph/op/convolution.hpp"
#include "ngraph/op/dot.hpp"
#include "ngraph/op/get_output_element.hpp"
#include "ngraph/op/max.hpp"
#include "ngraph/op/max_pool.hpp"
#include "ngraph/op/min.hpp"
#include "ngraph/op/pad.hpp"
#include "ngraph/op/reshape.hpp"
#include "ngraph/op/slice.hpp"
...
...
@@ -714,6 +716,54 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func)
conv_name
,
image_name
,
{
weight_name
},
strides
,
input_offset
,
dilation
);
topology
.
add
(
cldnn_conv
);
}
else
if
(
"Min"
==
op
->
description
())
{
arguments_check
(
op
,
1
,
1
);
const
string
&
input_name
=
op
->
get_inputs
().
begin
()
->
get_tensor
().
get_name
();
const
Shape
&
input_shape
=
op
->
get_inputs
().
begin
()
->
get_shape
();
const
string
&
output_name
=
op
->
get_outputs
().
begin
()
->
get_tensor
().
get_name
();
const
Shape
&
output_shape
=
op
->
get_outputs
().
begin
()
->
get_shape
();
const
element
::
Type
&
output_type
=
op
->
get_outputs
().
begin
()
->
get_tensor
().
get_element_type
();
const
shared_ptr
<
op
::
Min
>
min_op
=
static_pointer_cast
<
op
::
Min
>
(
op
);
const
AxisSet
&
axis
=
min_op
->
get_reduction_axes
();
do_max_min_operation
(
topology
,
input_name
,
input_shape
,
output_name
,
output_shape
,
output_type
,
axis
,
true
);
}
else
if
(
"Max"
==
op
->
description
())
{
arguments_check
(
op
,
1
,
1
);
const
string
&
input_name
=
op
->
get_inputs
().
begin
()
->
get_tensor
().
get_name
();
const
Shape
&
input_shape
=
op
->
get_inputs
().
begin
()
->
get_shape
();
const
string
&
output_name
=
op
->
get_outputs
().
begin
()
->
get_tensor
().
get_name
();
const
Shape
&
output_shape
=
op
->
get_outputs
().
begin
()
->
get_shape
();
const
element
::
Type
&
output_type
=
op
->
get_outputs
().
begin
()
->
get_tensor
().
get_element_type
();
const
shared_ptr
<
op
::
Max
>
max_op
=
static_pointer_cast
<
op
::
Max
>
(
op
);
const
AxisSet
&
axis
=
max_op
->
get_reduction_axes
();
do_max_min_operation
(
topology
,
input_name
,
input_shape
,
output_name
,
output_shape
,
output_type
,
axis
,
false
);
}
else
{
ostringstream
os
;
...
...
src/ngraph/runtime/intelgpu/intelgpu_op_broadcast.cpp
View file @
4f26640b
...
...
@@ -147,3 +147,84 @@ void runtime::intelgpu::do_bcast_sum_operation(cldnn::topology& topology,
{
1
});
topology
.
add
(
op_bcast_sum
);
}
void
runtime
::
intelgpu
::
do_max_min_operation
(
cldnn
::
topology
&
topology
,
const
string
&
input_name
,
const
Shape
&
input_shape
,
const
string
&
output_name
,
const
Shape
&
output_shape
,
const
element
::
Type
&
output_type
,
const
AxisSet
&
axis
,
bool
is_min
)
{
const
string
function_name
=
"min_max_"
+
output_name
;
const
size_t
input_size
=
shape_size
<
Shape
>
(
input_shape
);
const
string
&
init_value
=
is_min
?
"INFINITY"
:
"-INFINITY"
;
const
string
&
operation
=
is_min
?
" < "
:
" > "
;
codegen
::
CodeWriter
writer
;
writer
<<
"__kernel void "
<<
function_name
<<
"(const __global float input"
<<
array_dims
(
input_shape
)
<<
", __global float output"
<<
array_dims
(
output_shape
)
<<
")
\n
"
;
writer
.
block_begin
();
{
// Initialization loop
size_t
var_idx
=
0
;
for
(
auto
const
&
i
:
output_shape
)
{
writer
<<
"for (uint i"
<<
var_idx
<<
" = 0; i"
<<
var_idx
<<
" < "
<<
i
<<
"; ++i"
<<
var_idx
<<
")
\n
"
;
writer
.
block_begin
();
++
var_idx
;
}
writer
<<
"output"
<<
access_dims
(
output_shape
)
<<
" = "
<<
init_value
<<
";
\n
"
;
// Closing brackets for initialization loop
for
(
auto
const
&
i
:
output_shape
)
{
writer
.
block_end
();
}
if
(
input_size
&&
!
input_shape
.
empty
())
{
// Main operation loop
var_idx
=
0
;
for
(
auto
const
&
i
:
input_shape
)
{
writer
<<
"for (uint i"
<<
var_idx
<<
" = 0; i"
<<
var_idx
<<
" < "
<<
i
<<
"; ++i"
<<
var_idx
<<
")
\n
"
;
writer
.
block_begin
();
++
var_idx
;
}
writer
<<
"if (input"
<<
access_dims
(
input_shape
)
<<
operation
<<
"output"
<<
access_dims
(
input_shape
,
axis
)
<<
")
\n
"
;
writer
.
block_begin
();
{
writer
<<
"output"
<<
access_dims
(
input_shape
,
axis
)
<<
" = input"
<<
access_dims
(
input_shape
)
<<
";
\n
"
;
}
writer
.
block_end
();
// Closing brackets for loop
for
(
auto
const
&
i
:
input_shape
)
{
writer
.
block_end
();
}
}
}
// End of function bracket
writer
.
block_end
();
const
cldnn
::
layout
layout
=
IntelGPULayout
::
create_cldnn_layout
(
output_type
,
output_shape
);
const
cldnn
::
custom_gpu_primitive
op_min_max
(
output_name
,
{
input_name
},
{
writer
.
get_code
()},
function_name
,
get_kernel_args
(
1
,
1
),
""
,
layout
,
{
1
});
topology
.
add
(
op_min_max
);
}
src/ngraph/runtime/intelgpu/intelgpu_op_broadcast.hpp
View file @
4f26640b
...
...
@@ -47,6 +47,16 @@ namespace ngraph
const
Shape
&
output_shape
,
const
element
::
Type
&
output_type
,
bool
is_bcast
);
// This implements Min and Max operations depends on is_min parameter
void
do_max_min_operation
(
cldnn
::
topology
&
topology
,
const
std
::
string
&
input_name
,
const
Shape
&
input_shape
,
const
std
::
string
&
output_name
,
const
Shape
&
output_shape
,
const
element
::
Type
&
output_type
,
const
AxisSet
&
axis
,
bool
is_min
);
}
}
}
src/ngraph/runtime/intelgpu/intelgpu_op_custom_kernels.cpp
View file @
4f26640b
...
...
@@ -53,6 +53,11 @@ string runtime::intelgpu::array_dims(const Shape& dimentions)
buffer
+=
"["
+
to_string
(
dim
)
+
"]"
;
}
if
(
buffer
.
empty
())
{
// it means scalar
buffer
=
"[1]"
;
}
return
buffer
;
}
...
...
@@ -70,6 +75,11 @@ string runtime::intelgpu::access_dims(const Shape& dimentions, const AxisSet& ax
++
var_idx
;
}
if
(
buffer
.
empty
())
{
// it means scalar
buffer
=
"[0]"
;
}
return
buffer
;
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment