Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in / Register
Toggle navigation
O
opencv
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
submodule
opencv
Commits
63a022dc
Commit
63a022dc
authored
Nov 26, 2012
by
Vladislav Vinogradov
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
added explicit unroll to reduce implementation
parent
11c6eb63
Show whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
68 additions
and
62 deletions
+68
-62
reduce.hpp
modules/gpu/include/opencv2/gpu/device/detail/reduce.hpp
+34
-31
reduce_key_val.hpp
.../gpu/include/opencv2/gpu/device/detail/reduce_key_val.hpp
+34
-31
No files found.
modules/gpu/include/opencv2/gpu/device/detail/reduce.hpp
View file @
63a022dc
...
...
@@ -243,29 +243,46 @@ namespace cv { namespace gpu { namespace device
}
};
template
<
unsigned
int
I
,
typename
Pointer
,
typename
Reference
,
class
Op
>
struct
Unroll
{
static
__device__
void
loopShfl
(
Reference
val
,
Op
op
,
unsigned
int
N
)
{
mergeShfl
(
val
,
I
,
N
,
op
);
Unroll
<
I
/
2
,
Pointer
,
Reference
,
Op
>::
loopShfl
(
val
,
op
,
N
);
}
static
__device__
void
loop
(
Pointer
smem
,
Reference
val
,
unsigned
int
tid
,
Op
op
)
{
merge
(
smem
,
val
,
tid
,
I
,
op
);
Unroll
<
I
/
2
,
Pointer
,
Reference
,
Op
>::
loop
(
smem
,
val
,
tid
,
op
);
}
};
template
<
typename
Pointer
,
typename
Reference
,
class
Op
>
struct
Unroll
<
0
,
Pointer
,
Reference
,
Op
>
{
static
__device__
void
loopShfl
(
Reference
,
Op
,
unsigned
int
)
{
}
static
__device__
void
loop
(
Pointer
,
Reference
,
unsigned
int
,
Op
)
{
}
};
template
<
unsigned
int
N
>
struct
WarpOptimized
{
template
<
typename
Pointer
,
typename
Reference
,
class
Op
>
static
__device__
void
reduce
(
Pointer
smem
,
Reference
val
,
unsigned
int
tid
,
Op
op
)
{
#if __CUDA_ARCH >= 300
#if __CUDA_ARCH
__
>= 300
(
void
)
smem
;
(
void
)
tid
;
#pragma unroll
for
(
unsigned
int
i
=
N
/
2
;
i
>=
1
;
i
/=
2
)
mergeShfl
(
val
,
i
,
N
,
op
);
Unroll
<
N
/
2
,
Pointer
,
Reference
,
Op
>::
loopShfl
(
val
,
op
,
N
);
#else
loadToSmem
(
smem
,
val
,
tid
);
if
(
tid
<
N
/
2
)
{
#if __CUDA_ARCH__ >= 200
#pragma unroll
#endif
for
(
unsigned
int
i
=
N
/
2
;
i
>=
1
;
i
/=
2
)
merge
(
smem
,
val
,
tid
,
i
,
op
);
}
Unroll
<
N
/
2
,
Pointer
,
Reference
,
Op
>::
loop
(
smem
,
val
,
tid
,
op
);
#endif
}
};
...
...
@@ -279,10 +296,8 @@ namespace cv { namespace gpu { namespace device
{
const
unsigned
int
laneId
=
Warp
::
laneId
();
#if __CUDA_ARCH >= 300
#pragma unroll
for
(
int
i
=
16
;
i
>=
1
;
i
/=
2
)
mergeShfl
(
val
,
i
,
warpSize
,
op
);
#if __CUDA_ARCH__ >= 300
Unroll
<
16
,
Pointer
,
Reference
,
Op
>::
loopShfl
(
val
,
op
,
warpSize
);
if
(
laneId
==
0
)
loadToSmem
(
smem
,
val
,
tid
/
32
);
...
...
@@ -290,13 +305,7 @@ namespace cv { namespace gpu { namespace device
loadToSmem
(
smem
,
val
,
tid
);
if
(
laneId
<
16
)
{
#if __CUDA_ARCH__ >= 200
#pragma unroll
#endif
for
(
int
i
=
16
;
i
>=
1
;
i
/=
2
)
merge
(
smem
,
val
,
tid
,
i
,
op
);
}
Unroll
<
16
,
Pointer
,
Reference
,
Op
>::
loop
(
smem
,
val
,
tid
,
op
);
__syncthreads
();
...
...
@@ -310,16 +319,10 @@ namespace cv { namespace gpu { namespace device
if
(
tid
<
32
)
{
#if __CUDA_ARCH >= 300
#pragma unroll
for
(
int
i
=
M
/
2
;
i
>=
1
;
i
/=
2
)
mergeShfl
(
val
,
i
,
M
,
op
);
#if __CUDA_ARCH__ >= 300
Unroll
<
M
/
2
,
Pointer
,
Reference
,
Op
>::
loopShfl
(
val
,
op
,
M
);
#else
#if __CUDA_ARCH__ >= 200
#pragma unroll
#endif
for
(
int
i
=
M
/
2
;
i
>=
1
;
i
/=
2
)
merge
(
smem
,
val
,
tid
,
i
,
op
);
Unroll
<
M
/
2
,
Pointer
,
Reference
,
Op
>::
loop
(
smem
,
val
,
tid
,
op
);
#endif
}
}
...
...
modules/gpu/include/opencv2/gpu/device/detail/reduce_key_val.hpp
View file @
63a022dc
...
...
@@ -369,31 +369,48 @@ namespace cv { namespace gpu { namespace device
}
};
template
<
unsigned
int
I
,
class
KP
,
class
KR
,
class
VP
,
class
VR
,
class
Cmp
>
struct
Unroll
{
static
__device__
void
loopShfl
(
KR
key
,
VR
val
,
Cmp
cmp
,
unsigned
int
N
)
{
mergeShfl
(
key
,
val
,
cmp
,
I
,
N
);
Unroll
<
I
/
2
,
KP
,
KR
,
VP
,
VR
,
Cmp
>::
loopShfl
(
key
,
val
,
cmp
,
N
);
}
static
__device__
void
loop
(
KP
skeys
,
KR
key
,
VP
svals
,
VR
val
,
unsigned
int
tid
,
Cmp
cmp
)
{
merge
(
skeys
,
key
,
svals
,
val
,
cmp
,
tid
,
I
);
Unroll
<
I
/
2
,
KP
,
KR
,
VP
,
VR
,
Cmp
>::
loop
(
skeys
,
key
,
svals
,
val
,
tid
,
cmp
);
}
};
template
<
class
KP
,
class
KR
,
class
VP
,
class
VR
,
class
Cmp
>
struct
Unroll
<
0
,
KP
,
KR
,
VP
,
VR
,
Cmp
>
{
static
__device__
void
loopShfl
(
KR
,
VR
,
Cmp
,
unsigned
int
)
{
}
static
__device__
void
loop
(
KP
,
KR
,
VP
,
VR
,
unsigned
int
,
Cmp
)
{
}
};
template
<
unsigned
int
N
>
struct
WarpOptimized
{
template
<
class
KP
,
class
KR
,
class
VP
,
class
VR
,
class
Cmp
>
static
__device__
void
reduce
(
KP
skeys
,
KR
key
,
VP
svals
,
VR
val
,
unsigned
int
tid
,
Cmp
cmp
)
{
#if __CUDA_ARCH >= 300
#if __CUDA_ARCH
__
>= 300
(
void
)
skeys
;
(
void
)
svals
;
(
void
)
tid
;
#pragma unroll
for
(
unsigned
int
i
=
N
/
2
;
i
>=
1
;
i
/=
2
)
mergeShfl
(
key
,
val
,
cml
,
i
,
N
);
Unroll
<
N
/
2
,
KP
,
KR
,
VP
,
VR
,
Cmp
>::
loopShfl
(
key
,
val
,
cmp
,
N
);
#else
loadToSmem
(
skeys
,
key
,
tid
);
loadToSmem
(
svals
,
val
,
tid
);
if
(
tid
<
N
/
2
)
{
#if __CUDA_ARCH__ >= 200
#pragma unroll
#endif
for
(
unsigned
int
i
=
N
/
2
;
i
>=
1
;
i
/=
2
)
merge
(
skeys
,
key
,
svals
,
val
,
cmp
,
tid
,
i
);
}
Unroll
<
N
/
2
,
KP
,
KR
,
VP
,
VR
,
Cmp
>::
loop
(
skeys
,
key
,
svals
,
val
,
tid
,
cmp
);
#endif
}
};
...
...
@@ -407,10 +424,8 @@ namespace cv { namespace gpu { namespace device
{
const
unsigned
int
laneId
=
Warp
::
laneId
();
#if __CUDA_ARCH >= 300
#pragma unroll
for
(
unsigned
int
i
=
16
;
i
>=
1
;
i
/=
2
)
mergeShfl
(
key
,
val
,
cml
,
i
,
warpSize
);
#if __CUDA_ARCH__ >= 300
Unroll
<
16
,
KP
,
KR
,
VP
,
VR
,
Cmp
>::
loopShfl
(
key
,
val
,
cmp
,
warpSize
);
if
(
laneId
==
0
)
{
...
...
@@ -422,13 +437,7 @@ namespace cv { namespace gpu { namespace device
loadToSmem
(
svals
,
val
,
tid
);
if
(
laneId
<
16
)
{
#if __CUDA_ARCH__ >= 200
#pragma unroll
#endif
for
(
int
i
=
16
;
i
>=
1
;
i
/=
2
)
merge
(
skeys
,
key
,
svals
,
val
,
cmp
,
tid
,
i
);
}
Unroll
<
16
,
KP
,
KR
,
VP
,
VR
,
Cmp
>::
loop
(
skeys
,
key
,
svals
,
val
,
tid
,
cmp
);
__syncthreads
();
...
...
@@ -445,18 +454,12 @@ namespace cv { namespace gpu { namespace device
if
(
tid
<
32
)
{
#if __CUDA_ARCH >= 300
#if __CUDA_ARCH
__
>= 300
loadFromSmem
(
svals
,
val
,
tid
);
#pragma unroll
for
(
unsigned
int
i
=
M
/
2
;
i
>=
1
;
i
/=
2
)
mergeShfl
(
key
,
val
,
cml
,
i
,
M
);
Unroll
<
M
/
2
,
KP
,
KR
,
VP
,
VR
,
Cmp
>::
loopShfl
(
key
,
val
,
cmp
,
M
);
#else
#if __CUDA_ARCH__ >= 200
#pragma unroll
#endif
for
(
unsigned
int
i
=
M
/
2
;
i
>=
1
;
i
/=
2
)
merge
(
skeys
,
key
,
svals
,
val
,
cmp
,
tid
,
i
);
Unroll
<
M
/
2
,
KP
,
KR
,
VP
,
VR
,
Cmp
>::
loop
(
skeys
,
key
,
svals
,
val
,
tid
,
cmp
);
#endif
}
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment