Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in / Register
Toggle navigation
O
opencv
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
submodule
opencv
Commits
fc21b15d
Commit
fc21b15d
authored
Oct 31, 2018
by
Alexander Alekhin
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
samples(gpu): cleanup samples for legacy API
parent
850053f9
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
5 additions
and
1072 deletions
+5
-1072
CMakeLists.txt
samples/gpu/CMakeLists.txt
+0
-1
bgfg_segm.cpp
samples/gpu/bgfg_segm.cpp
+5
-32
cascadeclassifier_nvidia_api.cpp
samples/gpu/cascadeclassifier_nvidia_api.cpp
+0
-388
opticalflow_nvidia_api.cpp
samples/gpu/opticalflow_nvidia_api.cpp
+0
-651
No files found.
samples/gpu/CMakeLists.txt
View file @
fc21b15d
...
...
@@ -21,7 +21,6 @@ set(OPENCV_CUDA_SAMPLES_REQUIRED_DEPS
opencv_cudaoptflow
opencv_cudabgsegm
opencv_cudastereo
opencv_cudalegacy
opencv_cudaobjdetect
)
ocv_check_dependencies
(
${
OPENCV_CUDA_SAMPLES_REQUIRED_DEPS
}
)
...
...
samples/gpu/bgfg_segm.cpp
View file @
fc21b15d
...
...
@@ -4,7 +4,6 @@
#include "opencv2/core.hpp"
#include "opencv2/core/utility.hpp"
#include "opencv2/cudabgsegm.hpp"
#include "opencv2/cudalegacy.hpp"
#include "opencv2/video.hpp"
#include "opencv2/highgui.hpp"
...
...
@@ -16,8 +15,6 @@ enum Method
{
MOG
,
MOG2
,
GMG
,
FGD_STAT
};
int
main
(
int
argc
,
const
char
**
argv
)
...
...
@@ -25,7 +22,7 @@ int main(int argc, const char** argv)
cv
::
CommandLineParser
cmd
(
argc
,
argv
,
"{ c camera | | use camera }"
"{ f file | ../data/vtest.avi | input video file }"
"{ m method | mog | method (mog, mog2
, gmg, fgd
) }"
"{ m method | mog | method (mog, mog2) }"
"{ h help | | print help message }"
);
if
(
cmd
.
has
(
"help"
)
||
!
cmd
.
check
())
...
...
@@ -40,9 +37,7 @@ int main(int argc, const char** argv)
string
method
=
cmd
.
get
<
string
>
(
"method"
);
if
(
method
!=
"mog"
&&
method
!=
"mog2"
&&
method
!=
"gmg"
&&
method
!=
"fgd"
)
&&
method
!=
"mog2"
)
{
cerr
<<
"Incorrect method"
<<
endl
;
return
-
1
;
...
...
@@ -50,8 +45,8 @@ int main(int argc, const char** argv)
Method
m
=
method
==
"mog"
?
MOG
:
method
==
"mog2"
?
MOG2
:
method
==
"fgd"
?
FGD_STAT
:
GMG
;
(
Method
)
-
1
;
CV_Assert
(
m
!=
(
Method
)
-
1
)
;
VideoCapture
cap
;
...
...
@@ -73,8 +68,6 @@ int main(int argc, const char** argv)
Ptr
<
BackgroundSubtractor
>
mog
=
cuda
::
createBackgroundSubtractorMOG
();
Ptr
<
BackgroundSubtractor
>
mog2
=
cuda
::
createBackgroundSubtractorMOG2
();
Ptr
<
BackgroundSubtractor
>
gmg
=
cuda
::
createBackgroundSubtractorGMG
(
40
);
Ptr
<
BackgroundSubtractor
>
fgd
=
cuda
::
createBackgroundSubtractorFGD
();
GpuMat
d_fgmask
;
GpuMat
d_fgimg
;
...
...
@@ -93,23 +86,12 @@ int main(int argc, const char** argv)
case
MOG2
:
mog2
->
apply
(
d_frame
,
d_fgmask
);
break
;
case
GMG
:
gmg
->
apply
(
d_frame
,
d_fgmask
);
break
;
case
FGD_STAT
:
fgd
->
apply
(
d_frame
,
d_fgmask
);
break
;
}
namedWindow
(
"image"
,
WINDOW_NORMAL
);
namedWindow
(
"foreground mask"
,
WINDOW_NORMAL
);
namedWindow
(
"foreground image"
,
WINDOW_NORMAL
);
if
(
m
!=
GMG
)
{
namedWindow
(
"mean background image"
,
WINDOW_NORMAL
);
}
namedWindow
(
"mean background image"
,
WINDOW_NORMAL
);
for
(;;)
{
...
...
@@ -132,15 +114,6 @@ int main(int argc, const char** argv)
mog2
->
apply
(
d_frame
,
d_fgmask
);
mog2
->
getBackgroundImage
(
d_bgimg
);
break
;
case
GMG
:
gmg
->
apply
(
d_frame
,
d_fgmask
);
break
;
case
FGD_STAT
:
fgd
->
apply
(
d_frame
,
d_fgmask
);
fgd
->
getBackgroundImage
(
d_bgimg
);
break
;
}
double
fps
=
cv
::
getTickFrequency
()
/
(
cv
::
getTickCount
()
-
start
);
...
...
samples/gpu/cascadeclassifier_nvidia_api.cpp
deleted
100644 → 0
View file @
850053f9
#if defined _MSC_VER && _MSC_VER >= 1400
#pragma warning( disable : 4201 4408 4127 4100)
#endif
#include <iostream>
#include <iomanip>
#include <cstdio>
#include "opencv2/core/cuda.hpp"
#include "opencv2/cudalegacy.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/objdetect.hpp"
#include "opencv2/objdetect/objdetect_c.h"
using
namespace
std
;
using
namespace
cv
;
#if !defined(HAVE_CUDA) || defined(__arm__)
int
main
(
int
,
const
char
**
)
{
#if !defined(HAVE_CUDA)
std
::
cout
<<
"CUDA support is required (CMake key 'WITH_CUDA' must be true)."
<<
std
::
endl
;
#endif
#if defined(__arm__)
std
::
cout
<<
"Unsupported for ARM CUDA library."
<<
std
::
endl
;
#endif
return
0
;
}
#else
const
Size2i
preferredVideoFrameSize
(
640
,
480
);
const
cv
::
String
wndTitle
=
"NVIDIA Computer Vision :: Haar Classifiers Cascade"
;
static
void
matPrint
(
Mat
&
img
,
int
lineOffsY
,
Scalar
fontColor
,
const
string
&
ss
)
{
int
fontFace
=
FONT_HERSHEY_DUPLEX
;
double
fontScale
=
0.8
;
int
fontThickness
=
2
;
Size
fontSize
=
cv
::
getTextSize
(
"T[]"
,
fontFace
,
fontScale
,
fontThickness
,
0
);
Point
org
;
org
.
x
=
1
;
org
.
y
=
3
*
fontSize
.
height
*
(
lineOffsY
+
1
)
/
2
;
putText
(
img
,
ss
,
org
,
fontFace
,
fontScale
,
Scalar
(
0
,
0
,
0
),
5
*
fontThickness
/
2
,
16
);
putText
(
img
,
ss
,
org
,
fontFace
,
fontScale
,
fontColor
,
fontThickness
,
16
);
}
static
void
displayState
(
Mat
&
canvas
,
bool
bHelp
,
bool
bGpu
,
bool
bLargestFace
,
bool
bFilter
,
double
fps
)
{
Scalar
fontColorRed
(
0
,
0
,
255
);
Scalar
fontColorNV
(
0
,
185
,
118
);
ostringstream
ss
;
ss
<<
"FPS = "
<<
setprecision
(
1
)
<<
fixed
<<
fps
;
matPrint
(
canvas
,
0
,
fontColorRed
,
ss
.
str
());
ss
.
str
(
""
);
ss
<<
"["
<<
canvas
.
cols
<<
"x"
<<
canvas
.
rows
<<
"], "
<<
(
bGpu
?
"GPU, "
:
"CPU, "
)
<<
(
bLargestFace
?
"OneFace, "
:
"MultiFace, "
)
<<
(
bFilter
?
"Filter:ON"
:
"Filter:OFF"
);
matPrint
(
canvas
,
1
,
fontColorRed
,
ss
.
str
());
if
(
bHelp
)
{
matPrint
(
canvas
,
2
,
fontColorNV
,
"Space - switch GPU / CPU"
);
matPrint
(
canvas
,
3
,
fontColorNV
,
"M - switch OneFace / MultiFace"
);
matPrint
(
canvas
,
4
,
fontColorNV
,
"F - toggle rectangles Filter"
);
matPrint
(
canvas
,
5
,
fontColorNV
,
"H - toggle hotkeys help"
);
}
else
{
matPrint
(
canvas
,
2
,
fontColorNV
,
"H - toggle hotkeys help"
);
}
}
static
NCVStatus
process
(
Mat
*
srcdst
,
Ncv32u
width
,
Ncv32u
height
,
NcvBool
bFilterRects
,
NcvBool
bLargestFace
,
HaarClassifierCascadeDescriptor
&
haar
,
NCVVector
<
HaarStage64
>
&
d_haarStages
,
NCVVector
<
HaarClassifierNode128
>
&
d_haarNodes
,
NCVVector
<
HaarFeature64
>
&
d_haarFeatures
,
NCVVector
<
HaarStage64
>
&
h_haarStages
,
INCVMemAllocator
&
gpuAllocator
,
INCVMemAllocator
&
cpuAllocator
,
cudaDeviceProp
&
devProp
)
{
ncvAssertReturn
(
!
((
srcdst
==
NULL
)
^
gpuAllocator
.
isCounting
()),
NCV_NULL_PTR
);
NCVStatus
ncvStat
;
NCV_SET_SKIP_COND
(
gpuAllocator
.
isCounting
());
NCVMatrixAlloc
<
Ncv8u
>
d_src
(
gpuAllocator
,
width
,
height
);
ncvAssertReturn
(
d_src
.
isMemAllocated
(),
NCV_ALLOCATOR_BAD_ALLOC
);
NCVMatrixAlloc
<
Ncv8u
>
h_src
(
cpuAllocator
,
width
,
height
);
ncvAssertReturn
(
h_src
.
isMemAllocated
(),
NCV_ALLOCATOR_BAD_ALLOC
);
NCVVectorAlloc
<
NcvRect32u
>
d_rects
(
gpuAllocator
,
100
);
ncvAssertReturn
(
d_rects
.
isMemAllocated
(),
NCV_ALLOCATOR_BAD_ALLOC
);
NCV_SKIP_COND_BEGIN
for
(
Ncv32u
i
=
0
;
i
<
(
Ncv32u
)
srcdst
->
rows
;
i
++
)
{
memcpy
(
h_src
.
ptr
()
+
i
*
h_src
.
stride
(),
srcdst
->
ptr
(
i
),
srcdst
->
cols
);
}
ncvStat
=
h_src
.
copySolid
(
d_src
,
0
);
ncvAssertReturnNcvStat
(
ncvStat
);
ncvAssertCUDAReturn
(
cudaStreamSynchronize
(
0
),
NCV_CUDA_ERROR
);
NCV_SKIP_COND_END
NcvSize32u
roi
;
roi
.
width
=
d_src
.
width
();
roi
.
height
=
d_src
.
height
();
Ncv32u
numDetections
;
ncvStat
=
ncvDetectObjectsMultiScale_device
(
d_src
,
roi
,
d_rects
,
numDetections
,
haar
,
h_haarStages
,
d_haarStages
,
d_haarNodes
,
d_haarFeatures
,
haar
.
ClassifierSize
,
(
bFilterRects
||
bLargestFace
)
?
4
:
0
,
1.2
f
,
1
,
(
bLargestFace
?
NCVPipeObjDet_FindLargestObject
:
0
)
|
NCVPipeObjDet_VisualizeInPlace
,
gpuAllocator
,
cpuAllocator
,
devProp
,
0
);
ncvAssertReturnNcvStat
(
ncvStat
);
ncvAssertCUDAReturn
(
cudaStreamSynchronize
(
0
),
NCV_CUDA_ERROR
);
NCV_SKIP_COND_BEGIN
ncvStat
=
d_src
.
copySolid
(
h_src
,
0
);
ncvAssertReturnNcvStat
(
ncvStat
);
ncvAssertCUDAReturn
(
cudaStreamSynchronize
(
0
),
NCV_CUDA_ERROR
);
for
(
Ncv32u
i
=
0
;
i
<
(
Ncv32u
)
srcdst
->
rows
;
i
++
)
{
memcpy
(
srcdst
->
ptr
(
i
),
h_src
.
ptr
()
+
i
*
h_src
.
stride
(),
srcdst
->
cols
);
}
NCV_SKIP_COND_END
return
NCV_SUCCESS
;
}
int
main
(
int
argc
,
const
char
**
argv
)
{
cout
<<
"OpenCV / NVIDIA Computer Vision"
<<
endl
;
cout
<<
"Face Detection in video and live feed"
<<
endl
;
cout
<<
"Syntax: exename <cascade_file> <image_or_video_or_cameraid>"
<<
endl
;
cout
<<
"========================================="
<<
endl
;
ncvAssertPrintReturn
(
cv
::
cuda
::
getCudaEnabledDeviceCount
()
!=
0
,
"No GPU found or the library is compiled without CUDA support"
,
-
1
);
ncvAssertPrintReturn
(
argc
==
3
,
"Invalid number of arguments"
,
-
1
);
cv
::
cuda
::
printShortCudaDeviceInfo
(
cv
::
cuda
::
getDevice
());
string
cascadeName
=
argv
[
1
];
string
inputName
=
argv
[
2
];
NCVStatus
ncvStat
;
NcvBool
bQuit
=
false
;
VideoCapture
capture
;
Size2i
frameSize
;
//open content source
Mat
image
=
imread
(
inputName
);
Mat
frame
;
if
(
!
image
.
empty
())
{
frameSize
.
width
=
image
.
cols
;
frameSize
.
height
=
image
.
rows
;
}
else
{
if
(
!
capture
.
open
(
inputName
))
{
int
camid
=
-
1
;
istringstream
ss
(
inputName
);
int
x
=
0
;
ss
>>
x
;
ncvAssertPrintReturn
(
capture
.
open
(
camid
)
!=
0
,
"Can't open source"
,
-
1
);
}
capture
>>
frame
;
ncvAssertPrintReturn
(
!
frame
.
empty
(),
"Empty video source"
,
-
1
);
frameSize
.
width
=
frame
.
cols
;
frameSize
.
height
=
frame
.
rows
;
}
NcvBool
bUseGPU
=
true
;
NcvBool
bLargestObject
=
false
;
NcvBool
bFilterRects
=
true
;
NcvBool
bHelpScreen
=
false
;
CascadeClassifier
classifierOpenCV
;
ncvAssertPrintReturn
(
classifierOpenCV
.
load
(
cascadeName
)
!=
0
,
"Error (in OpenCV) opening classifier"
,
-
1
);
int
devId
;
ncvAssertCUDAReturn
(
cudaGetDevice
(
&
devId
),
-
1
);
cudaDeviceProp
devProp
;
ncvAssertCUDAReturn
(
cudaGetDeviceProperties
(
&
devProp
,
devId
),
-
1
);
cout
<<
"Using GPU: "
<<
devId
<<
"("
<<
devProp
.
name
<<
"), arch="
<<
devProp
.
major
<<
"."
<<
devProp
.
minor
<<
endl
;
//==============================================================================
//
// Load the classifier from file (assuming its size is about 1 mb)
// using a simple allocator
//
//==============================================================================
NCVMemNativeAllocator
gpuCascadeAllocator
(
NCVMemoryTypeDevice
,
static_cast
<
Ncv32u
>
(
devProp
.
textureAlignment
));
ncvAssertPrintReturn
(
gpuCascadeAllocator
.
isInitialized
(),
"Error creating cascade GPU allocator"
,
-
1
);
NCVMemNativeAllocator
cpuCascadeAllocator
(
NCVMemoryTypeHostPinned
,
static_cast
<
Ncv32u
>
(
devProp
.
textureAlignment
));
ncvAssertPrintReturn
(
cpuCascadeAllocator
.
isInitialized
(),
"Error creating cascade CPU allocator"
,
-
1
);
Ncv32u
haarNumStages
,
haarNumNodes
,
haarNumFeatures
;
ncvStat
=
ncvHaarGetClassifierSize
(
cascadeName
,
haarNumStages
,
haarNumNodes
,
haarNumFeatures
);
ncvAssertPrintReturn
(
ncvStat
==
NCV_SUCCESS
,
"Error reading classifier size (check the file)"
,
-
1
);
NCVVectorAlloc
<
HaarStage64
>
h_haarStages
(
cpuCascadeAllocator
,
haarNumStages
);
ncvAssertPrintReturn
(
h_haarStages
.
isMemAllocated
(),
"Error in cascade CPU allocator"
,
-
1
);
NCVVectorAlloc
<
HaarClassifierNode128
>
h_haarNodes
(
cpuCascadeAllocator
,
haarNumNodes
);
ncvAssertPrintReturn
(
h_haarNodes
.
isMemAllocated
(),
"Error in cascade CPU allocator"
,
-
1
);
NCVVectorAlloc
<
HaarFeature64
>
h_haarFeatures
(
cpuCascadeAllocator
,
haarNumFeatures
);
ncvAssertPrintReturn
(
h_haarFeatures
.
isMemAllocated
(),
"Error in cascade CPU allocator"
,
-
1
);
HaarClassifierCascadeDescriptor
haar
;
ncvStat
=
ncvHaarLoadFromFile_host
(
cascadeName
,
haar
,
h_haarStages
,
h_haarNodes
,
h_haarFeatures
);
ncvAssertPrintReturn
(
ncvStat
==
NCV_SUCCESS
,
"Error loading classifier"
,
-
1
);
NCVVectorAlloc
<
HaarStage64
>
d_haarStages
(
gpuCascadeAllocator
,
haarNumStages
);
ncvAssertPrintReturn
(
d_haarStages
.
isMemAllocated
(),
"Error in cascade GPU allocator"
,
-
1
);
NCVVectorAlloc
<
HaarClassifierNode128
>
d_haarNodes
(
gpuCascadeAllocator
,
haarNumNodes
);
ncvAssertPrintReturn
(
d_haarNodes
.
isMemAllocated
(),
"Error in cascade GPU allocator"
,
-
1
);
NCVVectorAlloc
<
HaarFeature64
>
d_haarFeatures
(
gpuCascadeAllocator
,
haarNumFeatures
);
ncvAssertPrintReturn
(
d_haarFeatures
.
isMemAllocated
(),
"Error in cascade GPU allocator"
,
-
1
);
ncvStat
=
h_haarStages
.
copySolid
(
d_haarStages
,
0
);
ncvAssertPrintReturn
(
ncvStat
==
NCV_SUCCESS
,
"Error copying cascade to GPU"
,
-
1
);
ncvStat
=
h_haarNodes
.
copySolid
(
d_haarNodes
,
0
);
ncvAssertPrintReturn
(
ncvStat
==
NCV_SUCCESS
,
"Error copying cascade to GPU"
,
-
1
);
ncvStat
=
h_haarFeatures
.
copySolid
(
d_haarFeatures
,
0
);
ncvAssertPrintReturn
(
ncvStat
==
NCV_SUCCESS
,
"Error copying cascade to GPU"
,
-
1
);
//==============================================================================
//
// Calculate memory requirements and create real allocators
//
//==============================================================================
NCVMemStackAllocator
gpuCounter
(
static_cast
<
Ncv32u
>
(
devProp
.
textureAlignment
));
ncvAssertPrintReturn
(
gpuCounter
.
isInitialized
(),
"Error creating GPU memory counter"
,
-
1
);
NCVMemStackAllocator
cpuCounter
(
static_cast
<
Ncv32u
>
(
devProp
.
textureAlignment
));
ncvAssertPrintReturn
(
cpuCounter
.
isInitialized
(),
"Error creating CPU memory counter"
,
-
1
);
ncvStat
=
process
(
NULL
,
frameSize
.
width
,
frameSize
.
height
,
false
,
false
,
haar
,
d_haarStages
,
d_haarNodes
,
d_haarFeatures
,
h_haarStages
,
gpuCounter
,
cpuCounter
,
devProp
);
ncvAssertPrintReturn
(
ncvStat
==
NCV_SUCCESS
,
"Error in memory counting pass"
,
-
1
);
NCVMemStackAllocator
gpuAllocator
(
NCVMemoryTypeDevice
,
gpuCounter
.
maxSize
(),
static_cast
<
Ncv32u
>
(
devProp
.
textureAlignment
));
ncvAssertPrintReturn
(
gpuAllocator
.
isInitialized
(),
"Error creating GPU memory allocator"
,
-
1
);
NCVMemStackAllocator
cpuAllocator
(
NCVMemoryTypeHostPinned
,
cpuCounter
.
maxSize
(),
static_cast
<
Ncv32u
>
(
devProp
.
textureAlignment
));
ncvAssertPrintReturn
(
cpuAllocator
.
isInitialized
(),
"Error creating CPU memory allocator"
,
-
1
);
printf
(
"Initialized for frame size [%dx%d]
\n
"
,
frameSize
.
width
,
frameSize
.
height
);
//==============================================================================
//
// Main processing loop
//
//==============================================================================
namedWindow
(
wndTitle
,
1
);
Mat
frameDisp
;
do
{
Mat
gray
;
cvtColor
((
image
.
empty
()
?
frame
:
image
),
gray
,
cv
::
COLOR_BGR2GRAY
);
//
// process
//
NcvSize32u
minSize
=
haar
.
ClassifierSize
;
if
(
bLargestObject
)
{
Ncv32u
ratioX
=
preferredVideoFrameSize
.
width
/
minSize
.
width
;
Ncv32u
ratioY
=
preferredVideoFrameSize
.
height
/
minSize
.
height
;
Ncv32u
ratioSmallest
=
min
(
ratioX
,
ratioY
);
ratioSmallest
=
max
((
Ncv32u
)(
ratioSmallest
/
2.5
f
),
(
Ncv32u
)
1
);
minSize
.
width
*=
ratioSmallest
;
minSize
.
height
*=
ratioSmallest
;
}
Ncv32f
avgTime
;
NcvTimer
timer
=
ncvStartTimer
();
if
(
bUseGPU
)
{
ncvStat
=
process
(
&
gray
,
frameSize
.
width
,
frameSize
.
height
,
bFilterRects
,
bLargestObject
,
haar
,
d_haarStages
,
d_haarNodes
,
d_haarFeatures
,
h_haarStages
,
gpuAllocator
,
cpuAllocator
,
devProp
);
ncvAssertPrintReturn
(
ncvStat
==
NCV_SUCCESS
,
"Error in memory counting pass"
,
-
1
);
}
else
{
vector
<
Rect
>
rectsOpenCV
;
classifierOpenCV
.
detectMultiScale
(
gray
,
rectsOpenCV
,
1.2
f
,
bFilterRects
?
4
:
0
,
(
bLargestObject
?
CV_HAAR_FIND_BIGGEST_OBJECT
:
0
)
|
CV_HAAR_SCALE_IMAGE
,
Size
(
minSize
.
width
,
minSize
.
height
));
for
(
size_t
rt
=
0
;
rt
<
rectsOpenCV
.
size
();
++
rt
)
rectangle
(
gray
,
rectsOpenCV
[
rt
],
Scalar
(
255
));
}
avgTime
=
(
Ncv32f
)
ncvEndQueryTimerMs
(
timer
);
cvtColor
(
gray
,
frameDisp
,
cv
::
COLOR_GRAY2BGR
);
displayState
(
frameDisp
,
bHelpScreen
,
bUseGPU
,
bLargestObject
,
bFilterRects
,
1000.0
f
/
avgTime
);
imshow
(
wndTitle
,
frameDisp
);
//handle input
switch
(
cv
::
waitKey
(
3
))
{
case
' '
:
bUseGPU
=
!
bUseGPU
;
break
;
case
'm'
:
case
'M'
:
bLargestObject
=
!
bLargestObject
;
break
;
case
'f'
:
case
'F'
:
bFilterRects
=
!
bFilterRects
;
break
;
case
'h'
:
case
'H'
:
bHelpScreen
=
!
bHelpScreen
;
break
;
case
27
:
bQuit
=
true
;
break
;
}
// For camera and video file, capture the next image
if
(
capture
.
isOpened
())
{
capture
>>
frame
;
if
(
frame
.
empty
())
{
break
;
}
}
}
while
(
!
bQuit
);
cv
::
destroyWindow
(
wndTitle
);
return
0
;
}
#endif //!defined(HAVE_CUDA)
samples/gpu/opticalflow_nvidia_api.cpp
deleted
100644 → 0
View file @
850053f9
#if defined _MSC_VER && _MSC_VER >= 1400
#pragma warning( disable : 4201 4408 4127 4100)
#endif
#include <iostream>
#include <iomanip>
#include <memory>
#include <exception>
#include <ctime>
#include <ctype.h>
#include <iostream>
#include <iomanip>
#include "opencv2/core/cuda.hpp"
#include "opencv2/cudalegacy.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/core/core_c.h" // FIXIT legacy API
#include "opencv2/highgui/highgui_c.h" // FIXIT legacy API
#if !defined(HAVE_CUDA)
int
main
(
int
,
const
char
**
)
{
std
::
cout
<<
"Please compile the library with CUDA support"
<<
std
::
endl
;
return
-
1
;
}
#else
//using std::shared_ptr;
using
cv
::
Ptr
;
#define PARAM_LEFT "--left"
#define PARAM_RIGHT "--right"
#define PARAM_SCALE "--scale"
#define PARAM_ALPHA "--alpha"
#define PARAM_GAMMA "--gamma"
#define PARAM_INNER "--inner"
#define PARAM_OUTER "--outer"
#define PARAM_SOLVER "--solver"
#define PARAM_TIME_STEP "--time-step"
#define PARAM_HELP "--help"
Ptr
<
INCVMemAllocator
>
g_pGPUMemAllocator
;
Ptr
<
INCVMemAllocator
>
g_pHostMemAllocator
;
class
RgbToMonochrome
{
public
:
float
operator
()(
unsigned
char
b
,
unsigned
char
g
,
unsigned
char
r
)
{
float
_r
=
static_cast
<
float
>
(
r
)
/
255.0
f
;
float
_g
=
static_cast
<
float
>
(
g
)
/
255.0
f
;
float
_b
=
static_cast
<
float
>
(
b
)
/
255.0
f
;
return
(
_r
+
_g
+
_b
)
/
3.0
f
;
}
};
class
RgbToR
{
public
:
float
operator
()(
unsigned
char
/*b*/
,
unsigned
char
/*g*/
,
unsigned
char
r
)
{
return
static_cast
<
float
>
(
r
)
/
255.0
f
;
}
};
class
RgbToG
{
public
:
float
operator
()(
unsigned
char
/*b*/
,
unsigned
char
g
,
unsigned
char
/*r*/
)
{
return
static_cast
<
float
>
(
g
)
/
255.0
f
;
}
};
class
RgbToB
{
public
:
float
operator
()(
unsigned
char
b
,
unsigned
char
/*g*/
,
unsigned
char
/*r*/
)
{
return
static_cast
<
float
>
(
b
)
/
255.0
f
;
}
};
template
<
class
T
>
NCVStatus
CopyData
(
IplImage
*
image
,
Ptr
<
NCVMatrixAlloc
<
Ncv32f
>
>&
dst
)
{
dst
=
Ptr
<
NCVMatrixAlloc
<
Ncv32f
>
>
(
new
NCVMatrixAlloc
<
Ncv32f
>
(
*
g_pHostMemAllocator
,
image
->
width
,
image
->
height
));
ncvAssertReturn
(
dst
->
isMemAllocated
(),
NCV_ALLOCATOR_BAD_ALLOC
);
unsigned
char
*
row
=
reinterpret_cast
<
unsigned
char
*>
(
image
->
imageData
);
T
convert
;
for
(
int
i
=
0
;
i
<
image
->
height
;
++
i
)
{
for
(
int
j
=
0
;
j
<
image
->
width
;
++
j
)
{
if
(
image
->
nChannels
<
3
)
{
dst
->
ptr
()[
j
+
i
*
dst
->
stride
()]
=
static_cast
<
float
>
(
*
(
row
+
j
*
image
->
nChannels
))
/
255.0
f
;
}
else
{
unsigned
char
*
color
=
row
+
j
*
image
->
nChannels
;
dst
->
ptr
()[
j
+
i
*
dst
->
stride
()]
=
convert
(
color
[
0
],
color
[
1
],
color
[
2
]);
}
}
row
+=
image
->
widthStep
;
}
return
NCV_SUCCESS
;
}
template
<
class
T
>
NCVStatus
CopyData
(
const
IplImage
*
image
,
const
NCVMatrixAlloc
<
Ncv32f
>
&
dst
)
{
unsigned
char
*
row
=
reinterpret_cast
<
unsigned
char
*>
(
image
->
imageData
);
T
convert
;
for
(
int
i
=
0
;
i
<
image
->
height
;
++
i
)
{
for
(
int
j
=
0
;
j
<
image
->
width
;
++
j
)
{
if
(
image
->
nChannels
<
3
)
{
dst
.
ptr
()[
j
+
i
*
dst
.
stride
()]
=
static_cast
<
float
>
(
*
(
row
+
j
*
image
->
nChannels
))
/
255.0
f
;
}
else
{
unsigned
char
*
color
=
row
+
j
*
image
->
nChannels
;
dst
.
ptr
()[
j
+
i
*
dst
.
stride
()]
=
convert
(
color
[
0
],
color
[
1
],
color
[
2
]);
}
}
row
+=
image
->
widthStep
;
}
return
NCV_SUCCESS
;
}
static
NCVStatus
LoadImages
(
const
char
*
frame0Name
,
const
char
*
frame1Name
,
int
&
width
,
int
&
height
,
Ptr
<
NCVMatrixAlloc
<
Ncv32f
>
>
&
src
,
Ptr
<
NCVMatrixAlloc
<
Ncv32f
>
>
&
dst
,
IplImage
*&
firstFrame
,
IplImage
*&
lastFrame
)
{
IplImage
*
image
;
image
=
cvLoadImage
(
frame0Name
);
if
(
image
==
0
)
{
std
::
cout
<<
"Could not open '"
<<
frame0Name
<<
"'
\n
"
;
return
NCV_FILE_ERROR
;
}
firstFrame
=
image
;
// copy data to src
ncvAssertReturnNcvStat
(
CopyData
<
RgbToMonochrome
>
(
image
,
src
));
IplImage
*
image2
;
image2
=
cvLoadImage
(
frame1Name
);
if
(
image2
==
0
)
{
std
::
cout
<<
"Could not open '"
<<
frame1Name
<<
"'
\n
"
;
return
NCV_FILE_ERROR
;
}
lastFrame
=
image2
;
ncvAssertReturnNcvStat
(
CopyData
<
RgbToMonochrome
>
(
image2
,
dst
));
width
=
image
->
width
;
height
=
image
->
height
;
return
NCV_SUCCESS
;
}
template
<
typename
T
>
inline
T
Clamp
(
T
x
,
T
a
,
T
b
)
{
return
((
x
)
>
(
a
)
?
((
x
)
<
(
b
)
?
(
x
)
:
(
b
))
:
(
a
));
}
template
<
typename
T
>
inline
T
MapValue
(
T
x
,
T
a
,
T
b
,
T
c
,
T
d
)
{
x
=
Clamp
(
x
,
a
,
b
);
return
c
+
(
d
-
c
)
*
(
x
-
a
)
/
(
b
-
a
);
}
static
NCVStatus
ShowFlow
(
NCVMatrixAlloc
<
Ncv32f
>
&
u
,
NCVMatrixAlloc
<
Ncv32f
>
&
v
,
const
char
*
name
)
{
IplImage
*
flowField
;
NCVMatrixAlloc
<
Ncv32f
>
host_u
(
*
g_pHostMemAllocator
,
u
.
width
(),
u
.
height
());
ncvAssertReturn
(
host_u
.
isMemAllocated
(),
NCV_ALLOCATOR_BAD_ALLOC
);
NCVMatrixAlloc
<
Ncv32f
>
host_v
(
*
g_pHostMemAllocator
,
u
.
width
(),
u
.
height
());
ncvAssertReturn
(
host_v
.
isMemAllocated
(),
NCV_ALLOCATOR_BAD_ALLOC
);
ncvAssertReturnNcvStat
(
u
.
copySolid
(
host_u
,
0
));
ncvAssertReturnNcvStat
(
v
.
copySolid
(
host_v
,
0
));
float
*
ptr_u
=
host_u
.
ptr
();
float
*
ptr_v
=
host_v
.
ptr
();
float
maxDisplacement
=
1.0
f
;
for
(
Ncv32u
i
=
0
;
i
<
u
.
height
();
++
i
)
{
for
(
Ncv32u
j
=
0
;
j
<
u
.
width
();
++
j
)
{
float
d
=
std
::
max
(
fabsf
(
*
ptr_u
),
fabsf
(
*
ptr_v
)
);
if
(
d
>
maxDisplacement
)
maxDisplacement
=
d
;
++
ptr_u
;
++
ptr_v
;
}
ptr_u
+=
u
.
stride
()
-
u
.
width
();
ptr_v
+=
v
.
stride
()
-
v
.
width
();
}
CvSize
image_size
=
cvSize
(
u
.
width
(),
u
.
height
());
flowField
=
cvCreateImage
(
image_size
,
IPL_DEPTH_8U
,
4
);
if
(
flowField
==
0
)
return
NCV_NULL_PTR
;
unsigned
char
*
row
=
reinterpret_cast
<
unsigned
char
*>
(
flowField
->
imageData
);
ptr_u
=
host_u
.
ptr
();
ptr_v
=
host_v
.
ptr
();
for
(
int
i
=
0
;
i
<
flowField
->
height
;
++
i
)
{
for
(
int
j
=
0
;
j
<
flowField
->
width
;
++
j
)
{
(
row
+
j
*
flowField
->
nChannels
)[
0
]
=
0
;
(
row
+
j
*
flowField
->
nChannels
)[
1
]
=
static_cast
<
unsigned
char
>
(
MapValue
(
-
(
*
ptr_v
),
-
maxDisplacement
,
maxDisplacement
,
0.0
f
,
255.0
f
));
(
row
+
j
*
flowField
->
nChannels
)[
2
]
=
static_cast
<
unsigned
char
>
(
MapValue
(
*
ptr_u
,
-
maxDisplacement
,
maxDisplacement
,
0.0
f
,
255.0
f
));
(
row
+
j
*
flowField
->
nChannels
)[
3
]
=
255
;
++
ptr_u
;
++
ptr_v
;
}
row
+=
flowField
->
widthStep
;
ptr_u
+=
u
.
stride
()
-
u
.
width
();
ptr_v
+=
v
.
stride
()
-
v
.
width
();
}
cvShowImage
(
name
,
flowField
);
return
NCV_SUCCESS
;
}
static
IplImage
*
CreateImage
(
NCVMatrixAlloc
<
Ncv32f
>
&
h_r
,
NCVMatrixAlloc
<
Ncv32f
>
&
h_g
,
NCVMatrixAlloc
<
Ncv32f
>
&
h_b
)
{
CvSize
imageSize
=
cvSize
(
h_r
.
width
(),
h_r
.
height
());
IplImage
*
image
=
cvCreateImage
(
imageSize
,
IPL_DEPTH_8U
,
4
);
if
(
image
==
0
)
return
0
;
unsigned
char
*
row
=
reinterpret_cast
<
unsigned
char
*>
(
image
->
imageData
);
for
(
int
i
=
0
;
i
<
image
->
height
;
++
i
)
{
for
(
int
j
=
0
;
j
<
image
->
width
;
++
j
)
{
int
offset
=
j
*
image
->
nChannels
;
int
pos
=
i
*
h_r
.
stride
()
+
j
;
row
[
offset
+
0
]
=
static_cast
<
unsigned
char
>
(
h_b
.
ptr
()[
pos
]
*
255.0
f
);
row
[
offset
+
1
]
=
static_cast
<
unsigned
char
>
(
h_g
.
ptr
()[
pos
]
*
255.0
f
);
row
[
offset
+
2
]
=
static_cast
<
unsigned
char
>
(
h_r
.
ptr
()[
pos
]
*
255.0
f
);
row
[
offset
+
3
]
=
255
;
}
row
+=
image
->
widthStep
;
}
return
image
;
}
static
void
PrintHelp
()
{
std
::
cout
<<
"Usage help:
\n
"
;
std
::
cout
<<
std
::
setiosflags
(
std
::
ios
::
left
);
std
::
cout
<<
"
\t
"
<<
std
::
setw
(
15
)
<<
PARAM_ALPHA
<<
" - set alpha
\n
"
;
std
::
cout
<<
"
\t
"
<<
std
::
setw
(
15
)
<<
PARAM_GAMMA
<<
" - set gamma
\n
"
;
std
::
cout
<<
"
\t
"
<<
std
::
setw
(
15
)
<<
PARAM_INNER
<<
" - set number of inner iterations
\n
"
;
std
::
cout
<<
"
\t
"
<<
std
::
setw
(
15
)
<<
PARAM_LEFT
<<
" - specify left image
\n
"
;
std
::
cout
<<
"
\t
"
<<
std
::
setw
(
15
)
<<
PARAM_RIGHT
<<
" - specify right image
\n
"
;
std
::
cout
<<
"
\t
"
<<
std
::
setw
(
15
)
<<
PARAM_OUTER
<<
" - set number of outer iterations
\n
"
;
std
::
cout
<<
"
\t
"
<<
std
::
setw
(
15
)
<<
PARAM_SCALE
<<
" - set pyramid scale factor
\n
"
;
std
::
cout
<<
"
\t
"
<<
std
::
setw
(
15
)
<<
PARAM_SOLVER
<<
" - set number of basic solver iterations
\n
"
;
std
::
cout
<<
"
\t
"
<<
std
::
setw
(
15
)
<<
PARAM_TIME_STEP
<<
" - set frame interpolation time step
\n
"
;
std
::
cout
<<
"
\t
"
<<
std
::
setw
(
15
)
<<
PARAM_HELP
<<
" - display this help message
\n
"
;
}
static
int
ProcessCommandLine
(
int
argc
,
char
**
argv
,
Ncv32f
&
timeStep
,
char
*&
frame0Name
,
char
*&
frame1Name
,
NCVBroxOpticalFlowDescriptor
&
desc
)
{
timeStep
=
0.25
f
;
for
(
int
iarg
=
1
;
iarg
<
argc
;
++
iarg
)
{
if
(
strcmp
(
argv
[
iarg
],
PARAM_LEFT
)
==
0
)
{
if
(
iarg
+
1
<
argc
)
{
frame0Name
=
argv
[
++
iarg
];
}
else
return
-
1
;
}
if
(
strcmp
(
argv
[
iarg
],
PARAM_RIGHT
)
==
0
)
{
if
(
iarg
+
1
<
argc
)
{
frame1Name
=
argv
[
++
iarg
];
}
else
return
-
1
;
}
else
if
(
strcmp
(
argv
[
iarg
],
PARAM_SCALE
)
==
0
)
{
if
(
iarg
+
1
<
argc
)
desc
.
scale_factor
=
static_cast
<
Ncv32f
>
(
atof
(
argv
[
++
iarg
]));
else
return
-
1
;
}
else
if
(
strcmp
(
argv
[
iarg
],
PARAM_ALPHA
)
==
0
)
{
if
(
iarg
+
1
<
argc
)
desc
.
alpha
=
static_cast
<
Ncv32f
>
(
atof
(
argv
[
++
iarg
]));
else
return
-
1
;
}
else
if
(
strcmp
(
argv
[
iarg
],
PARAM_GAMMA
)
==
0
)
{
if
(
iarg
+
1
<
argc
)
desc
.
gamma
=
static_cast
<
Ncv32f
>
(
atof
(
argv
[
++
iarg
]));
else
return
-
1
;
}
else
if
(
strcmp
(
argv
[
iarg
],
PARAM_INNER
)
==
0
)
{
if
(
iarg
+
1
<
argc
)
desc
.
number_of_inner_iterations
=
static_cast
<
Ncv32u
>
(
atoi
(
argv
[
++
iarg
]));
else
return
-
1
;
}
else
if
(
strcmp
(
argv
[
iarg
],
PARAM_OUTER
)
==
0
)
{
if
(
iarg
+
1
<
argc
)
desc
.
number_of_outer_iterations
=
static_cast
<
Ncv32u
>
(
atoi
(
argv
[
++
iarg
]));
else
return
-
1
;
}
else
if
(
strcmp
(
argv
[
iarg
],
PARAM_SOLVER
)
==
0
)
{
if
(
iarg
+
1
<
argc
)
desc
.
number_of_solver_iterations
=
static_cast
<
Ncv32u
>
(
atoi
(
argv
[
++
iarg
]));
else
return
-
1
;
}
else
if
(
strcmp
(
argv
[
iarg
],
PARAM_TIME_STEP
)
==
0
)
{
if
(
iarg
+
1
<
argc
)
timeStep
=
static_cast
<
Ncv32f
>
(
atof
(
argv
[
++
iarg
]));
else
return
-
1
;
}
else
if
(
strcmp
(
argv
[
iarg
],
PARAM_HELP
)
==
0
)
{
PrintHelp
();
return
0
;
}
}
return
0
;
}
int
main
(
int
argc
,
char
**
argv
)
{
char
*
frame0Name
=
0
,
*
frame1Name
=
0
;
Ncv32f
timeStep
=
0.01
f
;
NCVBroxOpticalFlowDescriptor
desc
;
desc
.
alpha
=
0.197
f
;
desc
.
gamma
=
50.0
f
;
desc
.
number_of_inner_iterations
=
10
;
desc
.
number_of_outer_iterations
=
77
;
desc
.
number_of_solver_iterations
=
10
;
desc
.
scale_factor
=
0.8
f
;
int
result
=
ProcessCommandLine
(
argc
,
argv
,
timeStep
,
frame0Name
,
frame1Name
,
desc
);
if
(
argc
==
1
||
result
)
{
PrintHelp
();
return
result
;
}
cv
::
cuda
::
printShortCudaDeviceInfo
(
cv
::
cuda
::
getDevice
());
std
::
cout
<<
"OpenCV / NVIDIA Computer Vision
\n
"
;
std
::
cout
<<
"Optical Flow Demo: Frame Interpolation
\n
"
;
std
::
cout
<<
"=========================================
\n
"
;
std
::
cout
<<
"Press:
\n
ESC to quit
\n
'a' to move to the previous frame
\n
's' to move to the next frame
\n
"
;
int
devId
;
ncvAssertCUDAReturn
(
cudaGetDevice
(
&
devId
),
-
1
);
cudaDeviceProp
devProp
;
ncvAssertCUDAReturn
(
cudaGetDeviceProperties
(
&
devProp
,
devId
),
-
1
);
std
::
cout
<<
"Using GPU: "
<<
devId
<<
"("
<<
devProp
.
name
<<
"), arch="
<<
devProp
.
major
<<
"."
<<
devProp
.
minor
<<
std
::
endl
;
g_pGPUMemAllocator
=
Ptr
<
INCVMemAllocator
>
(
new
NCVMemNativeAllocator
(
NCVMemoryTypeDevice
,
static_cast
<
Ncv32u
>
(
devProp
.
textureAlignment
)));
ncvAssertPrintReturn
(
g_pGPUMemAllocator
->
isInitialized
(),
"Device memory allocator isn't initialized"
,
-
1
);
g_pHostMemAllocator
=
Ptr
<
INCVMemAllocator
>
(
new
NCVMemNativeAllocator
(
NCVMemoryTypeHostPageable
,
static_cast
<
Ncv32u
>
(
devProp
.
textureAlignment
)));
ncvAssertPrintReturn
(
g_pHostMemAllocator
->
isInitialized
(),
"Host memory allocator isn't initialized"
,
-
1
);
int
width
,
height
;
Ptr
<
NCVMatrixAlloc
<
Ncv32f
>
>
src_host
;
Ptr
<
NCVMatrixAlloc
<
Ncv32f
>
>
dst_host
;
IplImage
*
firstFrame
,
*
lastFrame
;
if
(
frame0Name
!=
0
&&
frame1Name
!=
0
)
{
ncvAssertReturnNcvStat
(
LoadImages
(
frame0Name
,
frame1Name
,
width
,
height
,
src_host
,
dst_host
,
firstFrame
,
lastFrame
));
}
else
{
ncvAssertReturnNcvStat
(
LoadImages
(
"frame10.bmp"
,
"frame11.bmp"
,
width
,
height
,
src_host
,
dst_host
,
firstFrame
,
lastFrame
));
}
Ptr
<
NCVMatrixAlloc
<
Ncv32f
>
>
src
(
new
NCVMatrixAlloc
<
Ncv32f
>
(
*
g_pGPUMemAllocator
,
src_host
->
width
(),
src_host
->
height
()));
ncvAssertReturn
(
src
->
isMemAllocated
(),
-
1
);
Ptr
<
NCVMatrixAlloc
<
Ncv32f
>
>
dst
(
new
NCVMatrixAlloc
<
Ncv32f
>
(
*
g_pGPUMemAllocator
,
src_host
->
width
(),
src_host
->
height
()));
ncvAssertReturn
(
dst
->
isMemAllocated
(),
-
1
);
ncvAssertReturnNcvStat
(
src_host
->
copySolid
(
*
src
,
0
));
ncvAssertReturnNcvStat
(
dst_host
->
copySolid
(
*
dst
,
0
));
#if defined SAFE_MAT_DECL
#undef SAFE_MAT_DECL
#endif
#define SAFE_MAT_DECL(name, allocator, sx, sy) \
NCVMatrixAlloc<Ncv32f> name(*allocator, sx, sy);\
ncvAssertReturn(name.isMemAllocated(), -1);
SAFE_MAT_DECL
(
u
,
g_pGPUMemAllocator
,
width
,
height
);
SAFE_MAT_DECL
(
v
,
g_pGPUMemAllocator
,
width
,
height
);
SAFE_MAT_DECL
(
uBck
,
g_pGPUMemAllocator
,
width
,
height
);
SAFE_MAT_DECL
(
vBck
,
g_pGPUMemAllocator
,
width
,
height
);
SAFE_MAT_DECL
(
h_r
,
g_pHostMemAllocator
,
width
,
height
);
SAFE_MAT_DECL
(
h_g
,
g_pHostMemAllocator
,
width
,
height
);
SAFE_MAT_DECL
(
h_b
,
g_pHostMemAllocator
,
width
,
height
);
std
::
cout
<<
"Estimating optical flow
\n
Forward...
\n
"
;
if
(
NCV_SUCCESS
!=
NCVBroxOpticalFlow
(
desc
,
*
g_pGPUMemAllocator
,
*
src
,
*
dst
,
u
,
v
,
0
))
{
std
::
cout
<<
"Failed
\n
"
;
return
-
1
;
}
std
::
cout
<<
"Backward...
\n
"
;
if
(
NCV_SUCCESS
!=
NCVBroxOpticalFlow
(
desc
,
*
g_pGPUMemAllocator
,
*
dst
,
*
src
,
uBck
,
vBck
,
0
))
{
std
::
cout
<<
"Failed
\n
"
;
return
-
1
;
}
// matrix for temporary data
SAFE_MAT_DECL
(
d_temp
,
g_pGPUMemAllocator
,
width
,
height
);
// first frame color components (GPU memory)
SAFE_MAT_DECL
(
d_r
,
g_pGPUMemAllocator
,
width
,
height
);
SAFE_MAT_DECL
(
d_g
,
g_pGPUMemAllocator
,
width
,
height
);
SAFE_MAT_DECL
(
d_b
,
g_pGPUMemAllocator
,
width
,
height
);
// second frame color components (GPU memory)
SAFE_MAT_DECL
(
d_rt
,
g_pGPUMemAllocator
,
width
,
height
);
SAFE_MAT_DECL
(
d_gt
,
g_pGPUMemAllocator
,
width
,
height
);
SAFE_MAT_DECL
(
d_bt
,
g_pGPUMemAllocator
,
width
,
height
);
// intermediate frame color components (GPU memory)
SAFE_MAT_DECL
(
d_rNew
,
g_pGPUMemAllocator
,
width
,
height
);
SAFE_MAT_DECL
(
d_gNew
,
g_pGPUMemAllocator
,
width
,
height
);
SAFE_MAT_DECL
(
d_bNew
,
g_pGPUMemAllocator
,
width
,
height
);
// interpolated forward flow
SAFE_MAT_DECL
(
ui
,
g_pGPUMemAllocator
,
width
,
height
);
SAFE_MAT_DECL
(
vi
,
g_pGPUMemAllocator
,
width
,
height
);
// interpolated backward flow
SAFE_MAT_DECL
(
ubi
,
g_pGPUMemAllocator
,
width
,
height
);
SAFE_MAT_DECL
(
vbi
,
g_pGPUMemAllocator
,
width
,
height
);
// occlusion masks
SAFE_MAT_DECL
(
occ0
,
g_pGPUMemAllocator
,
width
,
height
);
SAFE_MAT_DECL
(
occ1
,
g_pGPUMemAllocator
,
width
,
height
);
// prepare color components on host and copy them to device memory
ncvAssertReturnNcvStat
(
CopyData
<
RgbToR
>
(
firstFrame
,
h_r
));
ncvAssertReturnNcvStat
(
CopyData
<
RgbToG
>
(
firstFrame
,
h_g
));
ncvAssertReturnNcvStat
(
CopyData
<
RgbToB
>
(
firstFrame
,
h_b
));
ncvAssertReturnNcvStat
(
h_r
.
copySolid
(
d_r
,
0
));
ncvAssertReturnNcvStat
(
h_g
.
copySolid
(
d_g
,
0
));
ncvAssertReturnNcvStat
(
h_b
.
copySolid
(
d_b
,
0
));
ncvAssertReturnNcvStat
(
CopyData
<
RgbToR
>
(
lastFrame
,
h_r
));
ncvAssertReturnNcvStat
(
CopyData
<
RgbToG
>
(
lastFrame
,
h_g
));
ncvAssertReturnNcvStat
(
CopyData
<
RgbToB
>
(
lastFrame
,
h_b
));
ncvAssertReturnNcvStat
(
h_r
.
copySolid
(
d_rt
,
0
));
ncvAssertReturnNcvStat
(
h_g
.
copySolid
(
d_gt
,
0
));
ncvAssertReturnNcvStat
(
h_b
.
copySolid
(
d_bt
,
0
));
std
::
cout
<<
"Interpolating...
\n
"
;
std
::
cout
.
precision
(
4
);
std
::
vector
<
IplImage
*>
frames
;
frames
.
push_back
(
firstFrame
);
// compute interpolated frames
for
(
Ncv32f
timePos
=
timeStep
;
timePos
<
1.0
f
;
timePos
+=
timeStep
)
{
ncvAssertCUDAReturn
(
cudaMemset
(
ui
.
ptr
(),
0
,
ui
.
pitch
()
*
ui
.
height
()),
NCV_CUDA_ERROR
);
ncvAssertCUDAReturn
(
cudaMemset
(
vi
.
ptr
(),
0
,
vi
.
pitch
()
*
vi
.
height
()),
NCV_CUDA_ERROR
);
ncvAssertCUDAReturn
(
cudaMemset
(
ubi
.
ptr
(),
0
,
ubi
.
pitch
()
*
ubi
.
height
()),
NCV_CUDA_ERROR
);
ncvAssertCUDAReturn
(
cudaMemset
(
vbi
.
ptr
(),
0
,
vbi
.
pitch
()
*
vbi
.
height
()),
NCV_CUDA_ERROR
);
ncvAssertCUDAReturn
(
cudaMemset
(
occ0
.
ptr
(),
0
,
occ0
.
pitch
()
*
occ0
.
height
()),
NCV_CUDA_ERROR
);
ncvAssertCUDAReturn
(
cudaMemset
(
occ1
.
ptr
(),
0
,
occ1
.
pitch
()
*
occ1
.
height
()),
NCV_CUDA_ERROR
);
NppStInterpolationState
state
;
// interpolation state should be filled once except pSrcFrame0, pSrcFrame1, and pNewFrame
// we will only need to reset buffers content to 0 since interpolator doesn't do this itself
state
.
size
=
NcvSize32u
(
width
,
height
);
state
.
nStep
=
d_r
.
pitch
();
state
.
pSrcFrame0
=
d_r
.
ptr
();
state
.
pSrcFrame1
=
d_rt
.
ptr
();
state
.
pFU
=
u
.
ptr
();
state
.
pFV
=
v
.
ptr
();
state
.
pBU
=
uBck
.
ptr
();
state
.
pBV
=
vBck
.
ptr
();
state
.
pos
=
timePos
;
state
.
pNewFrame
=
d_rNew
.
ptr
();
state
.
ppBuffers
[
0
]
=
occ0
.
ptr
();
state
.
ppBuffers
[
1
]
=
occ1
.
ptr
();
state
.
ppBuffers
[
2
]
=
ui
.
ptr
();
state
.
ppBuffers
[
3
]
=
vi
.
ptr
();
state
.
ppBuffers
[
4
]
=
ubi
.
ptr
();
state
.
ppBuffers
[
5
]
=
vbi
.
ptr
();
// interpolate red channel
nppiStInterpolateFrames
(
&
state
);
// reset buffers
ncvAssertCUDAReturn
(
cudaMemset
(
ui
.
ptr
(),
0
,
ui
.
pitch
()
*
ui
.
height
()),
NCV_CUDA_ERROR
);
ncvAssertCUDAReturn
(
cudaMemset
(
vi
.
ptr
(),
0
,
vi
.
pitch
()
*
vi
.
height
()),
NCV_CUDA_ERROR
);
ncvAssertCUDAReturn
(
cudaMemset
(
ubi
.
ptr
(),
0
,
ubi
.
pitch
()
*
ubi
.
height
()),
NCV_CUDA_ERROR
);
ncvAssertCUDAReturn
(
cudaMemset
(
vbi
.
ptr
(),
0
,
vbi
.
pitch
()
*
vbi
.
height
()),
NCV_CUDA_ERROR
);
ncvAssertCUDAReturn
(
cudaMemset
(
occ0
.
ptr
(),
0
,
occ0
.
pitch
()
*
occ0
.
height
()),
NCV_CUDA_ERROR
);
ncvAssertCUDAReturn
(
cudaMemset
(
occ1
.
ptr
(),
0
,
occ1
.
pitch
()
*
occ1
.
height
()),
NCV_CUDA_ERROR
);
// interpolate green channel
state
.
pSrcFrame0
=
d_g
.
ptr
();
state
.
pSrcFrame1
=
d_gt
.
ptr
();
state
.
pNewFrame
=
d_gNew
.
ptr
();
nppiStInterpolateFrames
(
&
state
);
// reset buffers
ncvAssertCUDAReturn
(
cudaMemset
(
ui
.
ptr
(),
0
,
ui
.
pitch
()
*
ui
.
height
()),
NCV_CUDA_ERROR
);
ncvAssertCUDAReturn
(
cudaMemset
(
vi
.
ptr
(),
0
,
vi
.
pitch
()
*
vi
.
height
()),
NCV_CUDA_ERROR
);
ncvAssertCUDAReturn
(
cudaMemset
(
ubi
.
ptr
(),
0
,
ubi
.
pitch
()
*
ubi
.
height
()),
NCV_CUDA_ERROR
);
ncvAssertCUDAReturn
(
cudaMemset
(
vbi
.
ptr
(),
0
,
vbi
.
pitch
()
*
vbi
.
height
()),
NCV_CUDA_ERROR
);
ncvAssertCUDAReturn
(
cudaMemset
(
occ0
.
ptr
(),
0
,
occ0
.
pitch
()
*
occ0
.
height
()),
NCV_CUDA_ERROR
);
ncvAssertCUDAReturn
(
cudaMemset
(
occ1
.
ptr
(),
0
,
occ1
.
pitch
()
*
occ1
.
height
()),
NCV_CUDA_ERROR
);
// interpolate blue channel
state
.
pSrcFrame0
=
d_b
.
ptr
();
state
.
pSrcFrame1
=
d_bt
.
ptr
();
state
.
pNewFrame
=
d_bNew
.
ptr
();
nppiStInterpolateFrames
(
&
state
);
// copy to host memory
ncvAssertReturnNcvStat
(
d_rNew
.
copySolid
(
h_r
,
0
));
ncvAssertReturnNcvStat
(
d_gNew
.
copySolid
(
h_g
,
0
));
ncvAssertReturnNcvStat
(
d_bNew
.
copySolid
(
h_b
,
0
));
// convert to IplImage
IplImage
*
newFrame
=
CreateImage
(
h_r
,
h_g
,
h_b
);
if
(
newFrame
==
0
)
{
std
::
cout
<<
"Could not create new frame in host memory
\n
"
;
break
;
}
frames
.
push_back
(
newFrame
);
std
::
cout
<<
timePos
*
100.0
f
<<
"%
\r
"
;
}
std
::
cout
<<
std
::
setw
(
5
)
<<
"100%
\n
"
;
frames
.
push_back
(
lastFrame
);
Ncv32u
currentFrame
;
currentFrame
=
0
;
ShowFlow
(
u
,
v
,
"Forward flow"
);
ShowFlow
(
uBck
,
vBck
,
"Backward flow"
);
cvShowImage
(
"Interpolated frame"
,
frames
[
currentFrame
]);
bool
qPressed
=
false
;
while
(
!
qPressed
)
{
int
key
=
toupper
(
cvWaitKey
(
10
));
switch
(
key
)
{
case
27
:
qPressed
=
true
;
break
;
case
'A'
:
if
(
currentFrame
>
0
)
--
currentFrame
;
cvShowImage
(
"Interpolated frame"
,
frames
[
currentFrame
]);
break
;
case
'S'
:
if
(
currentFrame
<
frames
.
size
()
-
1
)
++
currentFrame
;
cvShowImage
(
"Interpolated frame"
,
frames
[
currentFrame
]);
break
;
}
}
cvDestroyAllWindows
();
std
::
vector
<
IplImage
*>::
iterator
iter
;
for
(
iter
=
frames
.
begin
();
iter
!=
frames
.
end
();
++
iter
)
{
cvReleaseImage
(
&
(
*
iter
));
}
return
0
;
}
#endif
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment