Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in / Register
Toggle navigation
N
ngraph
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
submodule
ngraph
Commits
6ccfbeb5
Commit
6ccfbeb5
authored
Jun 14, 2018
by
Chris Sullivan
Committed by
Adam Procter
Jun 14, 2018
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
unsigned int -> uint32_t (#1106)
parent
838ba3f1
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
20 additions
and
20 deletions
+20
-20
cuda_emitter.cpp
src/ngraph/runtime/gpu/cuda_emitter.cpp
+9
-9
gpu_cuda_kernel_builder.cpp
src/ngraph/runtime/gpu/gpu_cuda_kernel_builder.cpp
+7
-7
gpu_cuda_kernel_emitters.cpp
src/ngraph/runtime/gpu/gpu_cuda_kernel_emitters.cpp
+4
-4
No files found.
src/ngraph/runtime/gpu/cuda_emitter.cpp
View file @
6ccfbeb5
...
...
@@ -268,8 +268,8 @@ size_t runtime::gpu::CUDAEmitter::build_pad_dynamic(const runtime::gpu::GPURunti
compiled_kernel
=
ctx
->
compiled_kernel_pool
->
set
(
kernel_name
.
str
(),
writer
.
get_code
());
}
u
nsigned
int
rank
=
static_cast
<
unsigned
in
t
>
(
input_shape
.
size
());
u
nsigned
int
nthreads
=
static_cast
<
unsigned
in
t
>
(
shape_size
(
input_shape
));
u
int32_t
rank
=
static_cast
<
uint32_
t
>
(
input_shape
.
size
());
u
int32_t
nthreads
=
static_cast
<
uint32_
t
>
(
shape_size
(
input_shape
));
GPUShape
pad_below
(
input_shape
.
size
(),
0
);
GPUShape
pad_interior
(
input_shape
.
size
(),
1
);
...
...
@@ -286,14 +286,14 @@ size_t runtime::gpu::CUDAEmitter::build_pad_dynamic(const runtime::gpu::GPURunti
// get an allocator for transient per kernel gpu memory
GPUAllocator
allocator
=
this
->
m_primitive_emitter
->
get_memory_allocator
();
size_t
idx_input_strides
=
allocator
.
reserve_argspace
(
input_strides
.
data
(),
input_strides
.
size
()
*
sizeof
(
unsigned
in
t
));
size_t
idx_output_strides
=
allocator
.
reserve_argspace
(
output_strides
.
data
(),
output_strides
.
size
()
*
sizeof
(
unsigned
in
t
));
size_t
idx_input_strides
=
allocator
.
reserve_argspace
(
input_strides
.
data
(),
input_strides
.
size
()
*
sizeof
(
uint32_
t
));
size_t
idx_output_strides
=
allocator
.
reserve_argspace
(
output_strides
.
data
(),
output_strides
.
size
()
*
sizeof
(
uint32_
t
));
size_t
idx_padding_below
=
allocator
.
reserve_argspace
(
pad_below
.
data
(),
pad_below
.
size
()
*
sizeof
(
u
nsigned
in
t
));
allocator
.
reserve_argspace
(
pad_below
.
data
(),
pad_below
.
size
()
*
sizeof
(
u
int32_
t
));
size_t
idx_padding_interior
=
allocator
.
reserve_argspace
(
pad_interior
.
data
(),
pad_interior
.
size
()
*
sizeof
(
u
nsigned
in
t
));
allocator
.
reserve_argspace
(
pad_interior
.
data
(),
pad_interior
.
size
()
*
sizeof
(
u
int32_
t
));
// create the launch primitive
std
::
unique_ptr
<
gpu
::
primitive
>
pad_dynamic
(
new
gpu
::
primitive
{[
=
](
void
**
inputs
,
...
...
@@ -1015,7 +1015,7 @@ size_t runtime::gpu::CUDAEmitter::build_reduce_window(const GPURuntimeContext* c
args_list
[
6
]
=
&
nthreads
;
CUDA_SAFE_CALL
(
cuLaunchKernel
(
*
compiled_kernel
.
get
(),
static_cast
<
u
nsigned
in
t
>
(
nthreads
),
static_cast
<
u
int32_
t
>
(
nthreads
),
1
,
1
,
// grid dim
1
,
...
...
src/ngraph/runtime/gpu/gpu_cuda_kernel_builder.cpp
View file @
6ccfbeb5
...
...
@@ -285,19 +285,19 @@ void runtime::gpu::CudaKernelBuilder::get_pad_dynamic_op(
const
std
::
array
<
std
::
string
,
2
>&
data_types
)
{
writer
<<
"extern
\"
C
\"
__global__ void cuda_"
<<
name
<<
"("
<<
data_types
[
0
]
<<
"* in, "
<<
data_types
[
1
]
<<
"* out, u
nsigned int* input_strides, unsigned in
t* output_strides, "
"u
nsigned int* padding_below, unsigned in
t* "
"padding_interior, u
nsigned int rank, unsigned in
t n)
\n
"
;
<<
data_types
[
1
]
<<
"* out, u
int32_t* input_strides, uint32_
t* output_strides, "
"u
int32_t* padding_below, uint32_
t* "
"padding_interior, u
int32_t rank, uint32_
t n)
\n
"
;
writer
.
block_begin
();
{
writer
<<
"u
nsigned in
t tid = blockIdx.x * blockDim.x + threadIdx.x;
\n
"
;
writer
<<
"u
int32_
t tid = blockIdx.x * blockDim.x + threadIdx.x;
\n
"
;
writer
<<
"if (tid < n)
\n
"
;
writer
.
block_begin
();
{
writer
<<
"u
nsigned in
t output_idx = 0;
\n
"
;
writer
<<
"u
nsigned in
t input_idx = tid;
\n
"
;
writer
<<
"u
int32_
t output_idx = 0;
\n
"
;
writer
<<
"u
int32_
t input_idx = tid;
\n
"
;
writer
<<
"for(u
nsigned in
t i = 0; i < rank; i++)
\n
"
;
writer
<<
"for(u
int32_
t i = 0; i < rank; i++)
\n
"
;
writer
.
block_begin
();
{
writer
<<
"output_idx += (input_idx / input_strides[i] * padding_interior[i] + "
...
...
src/ngraph/runtime/gpu/gpu_cuda_kernel_emitters.cpp
View file @
6ccfbeb5
...
...
@@ -47,7 +47,7 @@ void runtime::gpu::emit_onehot(const std::string& name,
void
*
args_list
[]
=
{
&
in
,
&
out
,
&
repeat_size
,
&
repeat_times
,
&
count
};
CUDA_SAFE_CALL
(
cuLaunchKernel
(
*
compiled_kernel
.
get
(),
static_cast
<
u
nsigned
in
t
>
(
count
),
static_cast
<
u
int32_
t
>
(
count
),
1
,
1
,
// grid dim
1
,
...
...
@@ -84,7 +84,7 @@ void runtime::gpu::emit_reshape(const std::string& name,
void
*
args_list
[]
=
{
&
in
,
&
out
,
&
input_strides
,
&
trans_strides
,
&
rank
,
&
count
};
CUDA_SAFE_CALL
(
cuLaunchKernel
(
*
compiled_kernel
.
get
(),
static_cast
<
u
nsigned
in
t
>
(
count
),
static_cast
<
u
int32_
t
>
(
count
),
1
,
1
,
// grid dim
1
,
...
...
@@ -124,7 +124,7 @@ void runtime::gpu::emit_slice(const std::string& name,
void
*
args_list
[]
=
{
&
in
,
&
out
,
&
input_strides
,
&
lower_bounds
,
&
slice_strides
,
&
output_strides
,
&
rank
,
&
count
};
CUDA_SAFE_CALL
(
cuLaunchKernel
(
*
compiled_kernel
.
get
(),
static_cast
<
u
nsigned
in
t
>
(
count
),
static_cast
<
u
int32_
t
>
(
count
),
1
,
1
,
// grid dim
1
,
...
...
@@ -161,7 +161,7 @@ void runtime::gpu::emit_reverse(const std::string& name,
void
*
args_list
[]
=
{
&
in
,
&
out
,
&
input_shapes
,
&
reverse_axes
,
&
rank
,
&
count
};
CUDA_SAFE_CALL
(
cuLaunchKernel
(
*
compiled_kernel
.
get
(),
static_cast
<
u
nsigned
in
t
>
(
count
),
static_cast
<
u
int32_
t
>
(
count
),
1
,
1
,
// grid dim
1
,
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment