Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in / Register
Toggle navigation
N
ngraph
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
submodule
ngraph
Commits
f1c3e4ab
Commit
f1c3e4ab
authored
Aug 06, 2018
by
shssf
Committed by
Robert Kimball
Aug 06, 2018
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
IntelGPU backend: Product operation (#1334)
parent
81216a9e
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
112 additions
and
0 deletions
+112
-0
intelgpu_backend.cpp
src/ngraph/runtime/intelgpu/intelgpu_backend.cpp
+31
-0
intelgpu_op_broadcast.cpp
src/ngraph/runtime/intelgpu/intelgpu_op_broadcast.cpp
+72
-0
intelgpu_op_broadcast.hpp
src/ngraph/runtime/intelgpu/intelgpu_op_broadcast.hpp
+9
-0
No files found.
src/ngraph/runtime/intelgpu/intelgpu_backend.cpp
View file @
f1c3e4ab
...
...
@@ -47,6 +47,7 @@
#include "ngraph/op/max_pool.hpp"
#include "ngraph/op/min.hpp"
#include "ngraph/op/pad.hpp"
#include "ngraph/op/product.hpp"
#include "ngraph/op/reshape.hpp"
#include "ngraph/op/slice.hpp"
#include "ngraph/op/sum.hpp"
...
...
@@ -461,6 +462,36 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func)
false
);
}
}
else
if
(
"Product"
==
op
->
description
())
{
arguments_check
(
op
,
1
,
1
);
const
string
&
input_name
=
op
->
get_inputs
().
begin
()
->
get_tensor
().
get_name
();
const
Shape
&
input_shape
=
op
->
get_inputs
().
begin
()
->
get_shape
();
const
string
&
output_name
=
op
->
get_outputs
().
begin
()
->
get_tensor
().
get_name
();
const
Shape
&
output_shape
=
op
->
get_outputs
().
begin
()
->
get_shape
();
const
element
::
Type
&
output_type
=
op
->
get_outputs
().
begin
()
->
get_tensor
().
get_element_type
();
const
shared_ptr
<
op
::
Product
>
prod
=
static_pointer_cast
<
op
::
Product
>
(
op
);
const
AxisSet
&
axis
=
prod
->
get_reduction_axes
();
if
(
axis
.
empty
())
{
do_equal_propagation
(
topology
,
input_name
,
output_name
);
}
else
{
do_product_operation
(
topology
,
input_name
,
input_shape
,
output_name
,
output_shape
,
output_type
,
axis
);
}
}
else
if
(
"Reshape"
==
op
->
description
())
{
arguments_check
(
op
,
1
,
1
);
...
...
src/ngraph/runtime/intelgpu/intelgpu_op_broadcast.cpp
View file @
f1c3e4ab
...
...
@@ -247,3 +247,75 @@ void runtime::intelgpu::do_max_min_operation(cldnn::topology& topology,
{
1
});
topology
.
add
(
op_min_max
);
}
void
runtime
::
intelgpu
::
do_product_operation
(
cldnn
::
topology
&
topology
,
const
string
&
input_name
,
const
Shape
&
input_shape
,
const
string
&
output_name
,
const
Shape
&
output_shape
,
const
element
::
Type
&
output_type
,
const
AxisSet
&
axis
)
{
const
string
function_name
=
"product_"
+
output_name
;
const
size_t
input_size
=
shape_size
<
Shape
>
(
input_shape
);
codegen
::
CodeWriter
writer
;
writer
<<
"__kernel void "
<<
function_name
<<
"(const __global float input"
<<
array_dims
(
input_shape
)
<<
", __global float output"
<<
array_dims
(
output_shape
)
<<
")
\n
"
;
writer
.
block_begin
();
{
// Initialization loop
size_t
var_idx
=
0
;
for
(
auto
const
&
i
:
output_shape
)
{
writer
<<
"for (uint i"
<<
var_idx
<<
" = 0; i"
<<
var_idx
<<
" < "
<<
i
<<
"; ++i"
<<
var_idx
<<
")
\n
"
;
writer
.
block_begin
();
++
var_idx
;
}
writer
<<
"output"
<<
access_dims
(
output_shape
)
<<
" = 1;
\n
"
;
// Closing brackets for initialization loop
for
(
auto
const
&
i
:
output_shape
)
{
writer
.
block_end
();
}
if
(
input_size
&&
!
input_shape
.
empty
())
{
// Main operation loop
var_idx
=
0
;
for
(
auto
const
&
i
:
input_shape
)
{
writer
<<
"for (uint i"
<<
var_idx
<<
" = 0; i"
<<
var_idx
<<
" < "
<<
i
<<
"; ++i"
<<
var_idx
<<
")
\n
"
;
writer
.
block_begin
();
++
var_idx
;
}
writer
<<
"output"
<<
access_dims
(
input_shape
,
axis
)
<<
" *= input"
<<
access_dims
(
input_shape
)
<<
";
\n
"
;
// Closing brackets for loop
for
(
auto
const
&
i
:
input_shape
)
{
writer
.
block_end
();
}
}
}
// End of function bracket
writer
.
block_end
();
const
cldnn
::
layout
layout
=
IntelGPULayout
::
create_cldnn_layout
(
output_type
,
output_shape
);
const
cldnn
::
custom_gpu_primitive
op_product
(
output_name
,
{
input_name
},
{
writer
.
get_code
()},
function_name
,
get_kernel_args
(
1
,
1
),
""
,
layout
,
{
1
});
topology
.
add
(
op_product
);
}
src/ngraph/runtime/intelgpu/intelgpu_op_broadcast.hpp
View file @
f1c3e4ab
...
...
@@ -57,6 +57,15 @@ namespace ngraph
const
element
::
Type
&
output_type
,
const
AxisSet
&
axis
,
bool
is_min
);
// This implements Product operation
void
do_product_operation
(
cldnn
::
topology
&
topology
,
const
std
::
string
&
input_name
,
const
Shape
&
input_shape
,
const
std
::
string
&
output_name
,
const
Shape
&
output_shape
,
const
element
::
Type
&
output_type
,
const
AxisSet
&
axis
);
}
}
}
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment