Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in / Register
Toggle navigation
O
opencv
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
submodule
opencv
Commits
2bfaf540
Commit
2bfaf540
authored
Apr 17, 2012
by
Vladislav Vinogradov
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
added VideoWriter_GPU
parent
0824cf50
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
12 changed files
with
1048 additions
and
7 deletions
+1048
-7
opencv_ffmpeg.dll
3rdparty/ffmpeg/opencv_ffmpeg.dll
+0
-0
opencv_ffmpeg_64.dll
3rdparty/ffmpeg/opencv_ffmpeg_64.dll
+0
-0
CMakeLists.txt
modules/gpu/CMakeLists.txt
+16
-2
gpu.hpp
modules/gpu/include/opencv2/gpu/gpu.hpp
+95
-0
rgb_to_yv12.cu
modules/gpu/src/cuda/rgb_to_yv12.cu
+171
-0
precomp.hpp
modules/gpu/src/precomp.hpp
+11
-5
video_writer.cpp
modules/gpu/src/video_writer.cpp
+0
-0
test_video.cpp
modules/gpu/test/test_video.cpp
+62
-0
cap_ffmpeg_api.hpp
modules/highgui/src/cap_ffmpeg_api.hpp
+12
-0
cap_ffmpeg_impl.hpp
modules/highgui/src/cap_ffmpeg_impl.hpp
+292
-0
cap_ffmpeg_impl_v2.hpp
modules/highgui/src/cap_ffmpeg_impl_v2.hpp
+293
-0
video_writer.cpp
samples/gpu/video_writer.cpp
+96
-0
No files found.
3rdparty/ffmpeg/opencv_ffmpeg.dll
View file @
2bfaf540
No preview for this file type
3rdparty/ffmpeg/opencv_ffmpeg_64.dll
View file @
2bfaf540
No preview for this file type
modules/gpu/CMakeLists.txt
View file @
2bfaf540
...
...
@@ -7,6 +7,8 @@ ocv_add_module(gpu opencv_imgproc opencv_calib3d opencv_objdetect opencv_video o
ocv_module_include_directories
(
"
${
CMAKE_CURRENT_SOURCE_DIR
}
/src/cuda"
)
ocv_module_include_directories
(
"
${
CMAKE_CURRENT_SOURCE_DIR
}
/../highgui/src"
)
file
(
GLOB lib_hdrs
"include/opencv2/
${
name
}
/*.hpp"
"include/opencv2/
${
name
}
/*.h"
)
file
(
GLOB lib_int_hdrs
"src/*.hpp"
"src/*.h"
)
file
(
GLOB lib_cuda_hdrs
"src/cuda/*.hpp"
"src/cuda/*.h"
)
...
...
@@ -48,7 +50,19 @@ if (HAVE_CUDA)
OCV_CUDA_COMPILE
(
cuda_objs
${
lib_cuda
}
${
ncv_cuda
}
)
#CUDA_BUILD_CLEAN_TARGET()
set
(
cuda_link_libs
${
CUDA_LIBRARIES
}
${
CUDA_npp_LIBRARY
}
)
unset
(
CUDA_nvcuvid_LIBRARY CACHE
)
find_cuda_helper_libs
(
nvcuvid
)
if
(
WIN32
)
unset
(
CUDA_nvcuvenc_LIBRARY CACHE
)
find_cuda_helper_libs
(
nvcuvenc
)
endif
()
set
(
cuda_link_libs
${
CUDA_LIBRARIES
}
${
CUDA_npp_LIBRARY
}
${
CUDA_nvcuvid_LIBRARY
}
)
if
(
WIN32
)
set
(
cuda_link_libs
${
cuda_link_libs
}
${
CUDA_nvcuvenc_LIBRARY
}
)
endif
()
else
()
set
(
lib_cuda
""
)
set
(
cuda_objs
""
)
...
...
@@ -61,7 +75,7 @@ ocv_set_module_sources(
SOURCES
${
lib_int_hdrs
}
${
lib_cuda_hdrs
}
${
lib_device_hdrs
}
${
lib_device_hdrs_detail
}
${
lib_srcs
}
${
lib_cuda
}
${
ncv_files
}
${
cuda_objs
}
)
ocv_create_module
(
${
cuda_link_libs
}
)
ocv_create_module
(
${
cuda_link_libs
}
${
HIGHGUI_LIBRARIES
}
)
if
(
HAVE_CUDA
)
if
(
HAVE_CUFFT
)
...
...
modules/gpu/include/opencv2/gpu/gpu.hpp
View file @
2bfaf540
...
...
@@ -45,6 +45,7 @@
#ifndef SKIP_INCLUDES
#include <vector>
#include <memory>
#endif
#include "opencv2/core/gpumat.hpp"
...
...
@@ -1884,6 +1885,100 @@ CV_EXPORTS void interpolateFrames(const GpuMat& frame0, const GpuMat& frame1,
CV_EXPORTS
void
createOpticalFlowNeedleMap
(
const
GpuMat
&
u
,
const
GpuMat
&
v
,
GpuMat
&
vertex
,
GpuMat
&
colors
);
////////////////////////////////// Video Encoding //////////////////////////////////////////
// Works only under Windows
// Supports olny H264 video codec and AVI files
class
CV_EXPORTS
VideoWriter_GPU
{
public
:
struct
EncoderParams
;
// Callbacks for video encoder, use it if you want to work with raw video stream
class
EncoderCallBack
;
VideoWriter_GPU
();
VideoWriter_GPU
(
const
std
::
string
&
fileName
,
cv
::
Size
frameSize
,
double
fps
);
VideoWriter_GPU
(
const
std
::
string
&
fileName
,
cv
::
Size
frameSize
,
double
fps
,
const
EncoderParams
&
params
);
VideoWriter_GPU
(
const
cv
::
Ptr
<
EncoderCallBack
>&
encoderCallback
,
cv
::
Size
frameSize
,
double
fps
);
VideoWriter_GPU
(
const
cv
::
Ptr
<
EncoderCallBack
>&
encoderCallback
,
cv
::
Size
frameSize
,
double
fps
,
const
EncoderParams
&
params
);
~
VideoWriter_GPU
();
// all methods throws cv::Exception if error occurs
void
open
(
const
std
::
string
&
fileName
,
cv
::
Size
frameSize
,
double
fps
);
void
open
(
const
std
::
string
&
fileName
,
cv
::
Size
frameSize
,
double
fps
,
const
EncoderParams
&
params
);
void
open
(
const
cv
::
Ptr
<
EncoderCallBack
>&
encoderCallback
,
cv
::
Size
frameSize
,
double
fps
);
void
open
(
const
cv
::
Ptr
<
EncoderCallBack
>&
encoderCallback
,
cv
::
Size
frameSize
,
double
fps
,
const
EncoderParams
&
params
);
bool
isOpened
()
const
;
void
close
();
void
write
(
const
cv
::
gpu
::
GpuMat
&
image
,
bool
lastFrame
=
false
);
struct
EncoderParams
{
int
P_Interval
;
// NVVE_P_INTERVAL,
int
IDR_Period
;
// NVVE_IDR_PERIOD,
int
DynamicGOP
;
// NVVE_DYNAMIC_GOP,
int
RCType
;
// NVVE_RC_TYPE,
int
AvgBitrate
;
// NVVE_AVG_BITRATE,
int
PeakBitrate
;
// NVVE_PEAK_BITRATE,
int
QP_Level_Intra
;
// NVVE_QP_LEVEL_INTRA,
int
QP_Level_InterP
;
// NVVE_QP_LEVEL_INTER_P,
int
QP_Level_InterB
;
// NVVE_QP_LEVEL_INTER_B,
int
DeblockMode
;
// NVVE_DEBLOCK_MODE,
int
ProfileLevel
;
// NVVE_PROFILE_LEVEL,
int
ForceIntra
;
// NVVE_FORCE_INTRA,
int
ForceIDR
;
// NVVE_FORCE_IDR,
int
ClearStat
;
// NVVE_CLEAR_STAT,
int
DIMode
;
// NVVE_SET_DEINTERLACE,
int
Presets
;
// NVVE_PRESETS,
int
DisableCabac
;
// NVVE_DISABLE_CABAC,
int
NaluFramingType
;
// NVVE_CONFIGURE_NALU_FRAMING_TYPE
int
DisableSPSPPS
;
// NVVE_DISABLE_SPS_PPS
EncoderParams
();
explicit
EncoderParams
(
const
std
::
string
&
configFile
);
void
load
(
const
std
::
string
&
configFile
);
void
save
(
const
std
::
string
&
configFile
)
const
;
};
class
EncoderCallBack
{
public
:
enum
PicType
{
IFRAME
=
1
,
PFRAME
=
2
,
BFRAME
=
3
};
virtual
~
EncoderCallBack
()
{}
// callback function to signal the start of bitstream that is to be encoded
// must return pointer to buffer
virtual
unsigned
char
*
acquireBitStream
(
int
*
bufferSize
)
=
0
;
// callback function to signal that the encoded bitstream is ready to be written to file
virtual
void
releaseBitStream
(
unsigned
char
*
data
,
int
size
)
=
0
;
// callback function to signal that the encoding operation on the frame has started
virtual
void
onBeginFrame
(
int
frameNumber
,
PicType
picType
)
=
0
;
// callback function signals that the encoding operation on the frame has finished
virtual
void
onEndFrame
(
int
frameNumber
,
PicType
picType
)
=
0
;
};
private
:
VideoWriter_GPU
(
const
VideoWriter_GPU
&
);
VideoWriter_GPU
&
operator
=
(
const
VideoWriter_GPU
&
);
class
Impl
;
std
::
auto_ptr
<
Impl
>
impl_
;
};
}
// namespace gpu
}
// namespace cv
...
...
modules/gpu/src/cuda/rgb_to_yv12.cu
0 → 100644
View file @
2bfaf540
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or bpied warranties, including, but not limited to, the bpied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "opencv2/gpu/device/common.hpp"
#include "opencv2/gpu/device/vec_traits.hpp"
namespace cv { namespace gpu { namespace device
{
namespace video_encoding
{
__device__ __forceinline__ void rgbtoy(const uchar b, const uchar g, const uchar r, uchar& y)
{
y = static_cast<uchar>(((int)(30 * r) + (int)(59 * g) + (int)(11 * b)) / 100);
}
__device__ __forceinline__ void rgbtoyuv(const uchar b, const uchar g, const uchar r, uchar& y, uchar& u, uchar& v)
{
rgbtoy(b, g, r, y);
u = static_cast<uchar>(((int)(-17 * r) - (int)(33 * g) + (int)(50 * b) + 12800) / 100);
v = static_cast<uchar>(((int)(50 * r) - (int)(42 * g) - (int)(8 * b) + 12800) / 100);
}
__global__ void Gray_to_YV12(const DevMem2Db src, PtrStepb dst)
{
const int x = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
const int y = (blockIdx.y * blockDim.y + threadIdx.y) * 2;
if (x + 1 >= src.cols || y + 1 >= src.rows)
return;
// get pointers to the data
const size_t planeSize = src.rows * dst.step;
PtrStepb y_plane(dst.data, dst.step);
PtrStepb u_plane(y_plane.data + planeSize, dst.step / 2);
PtrStepb v_plane(u_plane.data + (planeSize / 4), dst.step / 2);
uchar pix;
uchar y_val, u_val, v_val;
pix = src(y, x);
rgbtoy(pix, pix, pix, y_val);
y_plane(y, x) = y_val;
pix = src(y, x + 1);
rgbtoy(pix, pix, pix, y_val);
y_plane(y, x + 1) = y_val;
pix = src(y + 1, x);
rgbtoy(pix, pix, pix, y_val);
y_plane(y + 1, x) = y_val;
pix = src(y + 1, x + 1);
rgbtoyuv(pix, pix, pix, y_val, u_val, v_val);
y_plane(y + 1, x + 1) = y_val;
u_plane(y / 2, x / 2) = u_val;
v_plane(y / 2, x / 2) = v_val;
}
template <typename T>
__global__ void BGR_to_YV12(const DevMem2D_<T> src, PtrStepb dst)
{
const int x = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
const int y = (blockIdx.y * blockDim.y + threadIdx.y) * 2;
if (x + 1 >= src.cols || y + 1 >= src.rows)
return;
// get pointers to the data
const size_t planeSize = src.rows * dst.step;
PtrStepb y_plane(dst.data, dst.step);
PtrStepb u_plane(y_plane.data + planeSize, dst.step / 2);
PtrStepb v_plane(u_plane.data + (planeSize / 4), dst.step / 2);
T pix;
uchar y_val, u_val, v_val;
pix = src(y, x);
rgbtoy(pix.z, pix.y, pix.x, y_val);
y_plane(y, x) = y_val;
pix = src(y, x + 1);
rgbtoy(pix.z, pix.y, pix.x, y_val);
y_plane(y, x + 1) = y_val;
pix = src(y + 1, x);
rgbtoy(pix.z, pix.y, pix.x, y_val);
y_plane(y + 1, x) = y_val;
pix = src(y + 1, x + 1);
rgbtoyuv(pix.z, pix.y, pix.x, y_val, u_val, v_val);
y_plane(y + 1, x + 1) = y_val;
u_plane(y / 2, x / 2) = u_val;
v_plane(y / 2, x / 2) = v_val;
}
void Gray_to_YV12_caller(const DevMem2Db src, PtrStepb dst)
{
dim3 block(32, 8);
dim3 grid(divUp(src.cols, block.x * 2), divUp(src.rows, block.y * 2));
Gray_to_YV12<<<grid, block>>>(src, dst);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
template <int cn>
void BGR_to_YV12_caller(const DevMem2Db src, PtrStepb dst)
{
typedef typename TypeVec<uchar, cn>::vec_type src_t;
dim3 block(32, 8);
dim3 grid(divUp(src.cols, block.x * 2), divUp(src.rows, block.y * 2));
BGR_to_YV12<<<grid, block>>>(static_cast< DevMem2D_<src_t> >(src), dst);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
void YV12_gpu(const DevMem2Db src, int cn, DevMem2Db dst)
{
typedef void (*func_t)(const DevMem2Db src, PtrStepb dst);
static const func_t funcs[] =
{
0, Gray_to_YV12_caller, 0, BGR_to_YV12_caller<3>, BGR_to_YV12_caller<4>
};
funcs[cn](src, dst);
}
}
}}}
modules/gpu/src/precomp.hpp
View file @
2bfaf540
...
...
@@ -71,16 +71,22 @@
#ifdef HAVE_CUDA
#include
"cuda.h"
#include
"cuda_runtime_api.h"
#include
"npp.h"
#include
<cuda.h>
#include
<cuda_runtime.h>
#include
<npp.h>
#ifdef HAVE_CUFFT
#include
"cufft.h"
#include
<cufft.h>
#endif
#ifdef HAVE_CUBLAS
#include "cublas.h"
#include <cublas.h>
#endif
#include <nvcuvid.h>
#ifdef WIN32
#include <NVEncoderAPI.h>
#endif
#include "internal_shared.hpp"
...
...
modules/gpu/src/video_writer.cpp
0 → 100644
View file @
2bfaf540
This diff is collapsed.
Click to expand it.
modules/gpu/test/test_video.cpp
View file @
2bfaf540
...
...
@@ -384,4 +384,66 @@ INSTANTIATE_TEST_CASE_P(GPU_Video, FarnebackOpticalFlow, testing::Combine(
testing
::
Values
(
FarnebackOptFlowFlags
(
0
),
FarnebackOptFlowFlags
(
cv
::
OPTFLOW_FARNEBACK_GAUSSIAN
)),
testing
::
Values
(
UseInitFlow
(
false
),
UseInitFlow
(
true
))));
/////////////////////////////////////////////////////////////////////////////////////////////////
// VideoWriter
PARAM_TEST_CASE
(
VideoWriter
,
cv
::
gpu
::
DeviceInfo
,
std
::
string
)
{
cv
::
gpu
::
DeviceInfo
devInfo
;
std
::
string
inputFile
;
std
::
string
outputFile
;
virtual
void
SetUp
()
{
devInfo
=
GET_PARAM
(
0
);
inputFile
=
GET_PARAM
(
1
);
cv
::
gpu
::
setDevice
(
devInfo
.
deviceID
());
inputFile
=
std
::
string
(
cvtest
::
TS
::
ptr
()
->
get_data_path
())
+
"video/"
+
inputFile
;
outputFile
=
inputFile
.
substr
(
0
,
inputFile
.
find
(
'.'
))
+
"_test.avi"
;
}
};
TEST_P
(
VideoWriter
,
Regression
)
{
const
double
FPS
=
25.0
;
cv
::
VideoCapture
reader
(
inputFile
);
ASSERT_TRUE
(
reader
.
isOpened
()
);
cv
::
gpu
::
VideoWriter_GPU
d_writer
;
cv
::
Mat
frame
;
std
::
vector
<
cv
::
Mat
>
frames
;
cv
::
gpu
::
GpuMat
d_frame
;
for
(
int
i
=
1
;
i
<
10
;
++
i
)
{
reader
>>
frame
;
if
(
frame
.
empty
())
break
;
frames
.
push_back
(
frame
.
clone
());
d_frame
.
upload
(
frame
);
if
(
!
d_writer
.
isOpened
())
d_writer
.
open
(
outputFile
,
frame
.
size
(),
FPS
);
d_writer
.
write
(
d_frame
);
}
reader
.
release
();
d_writer
.
close
();
reader
.
open
(
outputFile
);
ASSERT_TRUE
(
reader
.
isOpened
()
);
}
INSTANTIATE_TEST_CASE_P
(
GPU_Video
,
VideoWriter
,
testing
::
Combine
(
ALL_DEVICES
,
testing
::
Values
(
"VID00003-20100701-2204.3GP"
,
"big_buck_bunny.mpg"
)));
}
// namespace
modules/highgui/src/cap_ffmpeg_api.hpp
View file @
2bfaf540
...
...
@@ -65,6 +65,18 @@ typedef int (*CvWriteFrame_Plugin)( void* writer_handle, const unsigned char* da
int
width
,
int
height
,
int
cn
,
int
origin
);
typedef
void
(
*
CvReleaseVideoWriter_Plugin
)(
void
**
writer
);
/*
* For CUDA encoder
*/
OPENCV_FFMPEG_API
struct
OutputMediaStream_FFMPEG
*
create_OutputMediaStream_FFMPEG
(
const
char
*
fileName
,
int
width
,
int
height
,
double
fps
);
OPENCV_FFMPEG_API
void
release_OutputMediaStream_FFMPEG
(
struct
OutputMediaStream_FFMPEG
*
stream
);
OPENCV_FFMPEG_API
void
write_OutputMediaStream_FFMPEG
(
struct
OutputMediaStream_FFMPEG
*
stream
,
unsigned
char
*
data
,
int
size
);
typedef
struct
OutputMediaStream_FFMPEG
*
(
*
Create_OutputMediaStream_FFMPEG_Plugin
)(
const
char
*
fileName
,
int
width
,
int
height
,
double
fps
);
typedef
void
(
*
Release_OutputMediaStream_FFMPEG_Plugin
)(
struct
OutputMediaStream_FFMPEG
*
stream
);
typedef
void
(
*
Write_OutputMediaStream_FFMPEG_Plugin
)(
struct
OutputMediaStream_FFMPEG
*
stream
,
unsigned
char
*
data
,
int
size
);
#ifdef __cplusplus
}
#endif
...
...
modules/highgui/src/cap_ffmpeg_impl.hpp
View file @
2bfaf540
...
...
@@ -1446,3 +1446,295 @@ void CvVideoWriter_FFMPEG::close()
return
writer
->
writeFrame
(
data
,
step
,
width
,
height
,
cn
,
origin
);
}
/*
* For CUDA encoder
*/
struct
OutputMediaStream_FFMPEG
{
bool
open
(
const
char
*
fileName
,
int
width
,
int
height
,
double
fps
);
void
write
(
unsigned
char
*
data
,
int
size
);
void
close
();
// add a video output stream to the container
static
AVStream
*
addVideoStream
(
AVFormatContext
*
oc
,
CodecID
codec_id
,
int
w
,
int
h
,
int
bitrate
,
double
fps
,
PixelFormat
pixel_format
);
AVOutputFormat
*
fmt_
;
AVFormatContext
*
oc_
;
AVStream
*
video_st_
;
};
void
OutputMediaStream_FFMPEG
::
close
()
{
// no more frame to compress. The codec has a latency of a few
// frames if using B frames, so we get the last frames by
// passing the same picture again
// TODO -- do we need to account for latency here?
if
(
oc_
)
{
// write the trailer, if any
av_write_trailer
(
oc_
);
// free the streams
for
(
unsigned
int
i
=
0
;
i
<
oc_
->
nb_streams
;
++
i
)
{
av_freep
(
&
oc_
->
streams
[
i
]
->
codec
);
av_freep
(
&
oc_
->
streams
[
i
]);
}
if
(
!
(
fmt_
->
flags
&
AVFMT_NOFILE
)
&&
oc_
->
pb
)
{
// close the output file
#if LIBAVCODEC_VERSION_INT < ((52<<16)+(123<<8)+0)
#if LIBAVCODEC_VERSION_INT >= ((51<<16)+(49<<8)+0)
url_fclose
(
oc_
->
pb
);
#else
url_fclose
(
&
oc_
->
pb
);
#endif
#else
avio_close
(
oc_
->
pb
);
#endif
}
// free the stream
av_free
(
oc_
);
}
}
AVStream
*
OutputMediaStream_FFMPEG
::
addVideoStream
(
AVFormatContext
*
oc
,
CodecID
codec_id
,
int
w
,
int
h
,
int
bitrate
,
double
fps
,
PixelFormat
pixel_format
)
{
#if LIBAVFORMAT_BUILD >= CALC_FFMPEG_VERSION(53, 10, 0)
AVStream
*
st
=
avformat_new_stream
(
oc
,
0
);
#else
AVStream
*
st
=
av_new_stream
(
oc
,
0
);
#endif
if
(
!
st
)
return
0
;
#if LIBAVFORMAT_BUILD > 4628
AVCodecContext
*
c
=
st
->
codec
;
#else
AVCodecContext
*
c
=
&
(
st
->
codec
);
#endif
c
->
codec_id
=
codec_id
;
c
->
codec_type
=
AVMEDIA_TYPE_VIDEO
;
// put sample parameters
unsigned
long
long
lbit_rate
=
static_cast
<
unsigned
long
long
>
(
bitrate
);
lbit_rate
+=
(
bitrate
/
4
);
lbit_rate
=
std
::
min
(
lbit_rate
,
static_cast
<
unsigned
long
long
>
(
std
::
numeric_limits
<
int
>::
max
()));
c
->
bit_rate
=
bitrate
;
// took advice from
// http://ffmpeg-users.933282.n4.nabble.com/warning-clipping-1-dct-coefficients-to-127-127-td934297.html
c
->
qmin
=
3
;
// resolution must be a multiple of two
c
->
width
=
w
;
c
->
height
=
h
;
AVCodec
*
codec
=
avcodec_find_encoder
(
c
->
codec_id
);
// time base: this is the fundamental unit of time (in seconds) in terms
// of which frame timestamps are represented. for fixed-fps content,
// timebase should be 1/framerate and timestamp increments should be
// identically 1
int
frame_rate
=
static_cast
<
int
>
(
fps
+
0.5
);
int
frame_rate_base
=
1
;
while
(
fabs
(
static_cast
<
double
>
(
frame_rate
)
/
frame_rate_base
)
-
fps
>
0.001
)
{
frame_rate_base
*=
10
;
frame_rate
=
static_cast
<
int
>
(
fps
*
frame_rate_base
+
0.5
);
}
c
->
time_base
.
den
=
frame_rate
;
c
->
time_base
.
num
=
frame_rate_base
;
#if LIBAVFORMAT_BUILD > 4752
// adjust time base for supported framerates
if
(
codec
&&
codec
->
supported_framerates
)
{
AVRational
req
=
{
frame_rate
,
frame_rate_base
};
const
AVRational
*
best
=
NULL
;
AVRational
best_error
=
{
INT_MAX
,
1
};
for
(
const
AVRational
*
p
=
codec
->
supported_framerates
;
p
->
den
!=
0
;
++
p
)
{
AVRational
error
=
av_sub_q
(
req
,
*
p
);
if
(
error
.
num
<
0
)
error
.
num
*=
-
1
;
if
(
av_cmp_q
(
error
,
best_error
)
<
0
)
{
best_error
=
error
;
best
=
p
;
}
}
c
->
time_base
.
den
=
best
->
num
;
c
->
time_base
.
num
=
best
->
den
;
}
#endif
c
->
gop_size
=
12
;
// emit one intra frame every twelve frames at most
c
->
pix_fmt
=
pixel_format
;
if
(
c
->
codec_id
==
CODEC_ID_MPEG2VIDEO
)
c
->
max_b_frames
=
2
;
if
(
c
->
codec_id
==
CODEC_ID_MPEG1VIDEO
||
c
->
codec_id
==
CODEC_ID_MSMPEG4V3
)
{
// needed to avoid using macroblocks in which some coeffs overflow
// this doesnt happen with normal video, it just happens here as the
// motion of the chroma plane doesnt match the luma plane
// avoid FFMPEG warning 'clipping 1 dct coefficients...'
c
->
mb_decision
=
2
;
}
#if LIBAVCODEC_VERSION_INT > 0x000409
// some formats want stream headers to be seperate
if
(
oc
->
oformat
->
flags
&
AVFMT_GLOBALHEADER
)
{
c
->
flags
|=
CODEC_FLAG_GLOBAL_HEADER
;
}
#endif
return
st
;
}
bool
OutputMediaStream_FFMPEG
::
open
(
const
char
*
fileName
,
int
width
,
int
height
,
double
fps
)
{
fmt_
=
0
;
oc_
=
0
;
video_st_
=
0
;
// tell FFMPEG to register codecs
av_register_all
();
av_log_set_level
(
AV_LOG_ERROR
);
// auto detect the output format from the name and fourcc code
#if LIBAVFORMAT_BUILD >= CALC_FFMPEG_VERSION(53, 2, 0)
fmt_
=
av_guess_format
(
NULL
,
fileName
,
NULL
);
#else
fmt_
=
guess_format
(
NULL
,
fileName
,
NULL
);
#endif
if
(
!
fmt_
)
return
false
;
CodecID
codec_id
=
CODEC_ID_H264
;
// alloc memory for context
#if LIBAVFORMAT_BUILD >= CALC_FFMPEG_VERSION(53, 2, 0)
oc_
=
avformat_alloc_context
();
#else
oc_
=
av_alloc_format_context
();
#endif
if
(
!
oc_
)
return
false
;
// set some options
oc_
->
oformat
=
fmt_
;
snprintf
(
oc_
->
filename
,
sizeof
(
oc_
->
filename
),
"%s"
,
fileName
);
oc_
->
max_delay
=
(
int
)(
0.7
*
AV_TIME_BASE
);
// This reduces buffer underrun warnings with MPEG
// set a few optimal pixel formats for lossless codecs of interest..
PixelFormat
codec_pix_fmt
=
PIX_FMT_YUV420P
;
int
bitrate_scale
=
64
;
// TODO -- safe to ignore output audio stream?
video_st_
=
addVideoStream
(
oc_
,
codec_id
,
width
,
height
,
width
*
height
*
bitrate_scale
,
fps
,
codec_pix_fmt
);
if
(
!
video_st_
)
return
false
;
// set the output parameters (must be done even if no parameters)
#if LIBAVFORMAT_BUILD < CALC_FFMPEG_VERSION(53, 2, 0)
if
(
av_set_parameters
(
oc_
,
NULL
)
<
0
)
return
false
;
#endif
// now that all the parameters are set, we can open the audio and
// video codecs and allocate the necessary encode buffers
#if LIBAVFORMAT_BUILD > 4628
AVCodecContext
*
c
=
(
video_st_
->
codec
);
#else
AVCodecContext
*
c
=
&
(
video_st_
->
codec
);
#endif
c
->
codec_tag
=
MKTAG
(
'H'
,
'2'
,
'6'
,
'4'
);
c
->
bit_rate_tolerance
=
c
->
bit_rate
;
// open the output file, if needed
if
(
!
(
fmt_
->
flags
&
AVFMT_NOFILE
))
{
#if LIBAVFORMAT_BUILD < CALC_FFMPEG_VERSION(53, 2, 0)
int
err
=
url_fopen
(
&
oc_
->
pb
,
fileName
,
URL_WRONLY
);
#else
int
err
=
avio_open
(
&
oc_
->
pb
,
fileName
,
AVIO_FLAG_WRITE
);
#endif
if
(
err
!=
0
)
return
false
;
}
// write the stream header, if any
#if LIBAVFORMAT_BUILD < CALC_FFMPEG_VERSION(53, 2, 0)
av_write_header
(
oc_
);
#else
avformat_write_header
(
oc_
,
NULL
);
#endif
return
true
;
}
void
OutputMediaStream_FFMPEG
::
write
(
unsigned
char
*
data
,
int
size
)
{
// if zero size, it means the image was buffered
if
(
size
>
0
)
{
AVPacket
pkt
;
av_init_packet
(
&
pkt
);
pkt
.
stream_index
=
video_st_
->
index
;
pkt
.
data
=
data
;
pkt
.
size
=
size
;
// write the compressed frame in the media file
av_write_frame
(
oc_
,
&
pkt
);
}
}
struct
OutputMediaStream_FFMPEG
*
create_OutputMediaStream_FFMPEG
(
const
char
*
fileName
,
int
width
,
int
height
,
double
fps
)
{
OutputMediaStream_FFMPEG
*
stream
=
(
OutputMediaStream_FFMPEG
*
)
malloc
(
sizeof
(
OutputMediaStream_FFMPEG
));
if
(
stream
->
open
(
fileName
,
width
,
height
,
fps
))
return
stream
;
stream
->
close
();
free
(
stream
);
return
0
;
}
void
release_OutputMediaStream_FFMPEG
(
struct
OutputMediaStream_FFMPEG
*
stream
)
{
stream
->
close
();
free
(
stream
);
}
void
write_OutputMediaStream_FFMPEG
(
struct
OutputMediaStream_FFMPEG
*
stream
,
unsigned
char
*
data
,
int
size
)
{
stream
->
write
(
data
,
size
);
}
modules/highgui/src/cap_ffmpeg_impl_v2.hpp
View file @
2bfaf540
...
...
@@ -43,6 +43,7 @@
#include "cap_ffmpeg_api.hpp"
#include <assert.h>
#include <algorithm>
#include <limits>
#if defined _MSC_VER && _MSC_VER >= 1200
#pragma warning( disable: 4244 4510 4512 4610 )
...
...
@@ -1611,3 +1612,295 @@ void CvVideoWriter_FFMPEG::close()
return
writer
->
writeFrame
(
data
,
step
,
width
,
height
,
cn
,
origin
);
}
/*
* For CUDA encoder
*/
struct
OutputMediaStream_FFMPEG
{
bool
open
(
const
char
*
fileName
,
int
width
,
int
height
,
double
fps
);
void
write
(
unsigned
char
*
data
,
int
size
);
void
close
();
// add a video output stream to the container
static
AVStream
*
addVideoStream
(
AVFormatContext
*
oc
,
CodecID
codec_id
,
int
w
,
int
h
,
int
bitrate
,
double
fps
,
PixelFormat
pixel_format
);
AVOutputFormat
*
fmt_
;
AVFormatContext
*
oc_
;
AVStream
*
video_st_
;
};
void
OutputMediaStream_FFMPEG
::
close
()
{
// no more frame to compress. The codec has a latency of a few
// frames if using B frames, so we get the last frames by
// passing the same picture again
// TODO -- do we need to account for latency here?
if
(
oc_
)
{
// write the trailer, if any
av_write_trailer
(
oc_
);
// free the streams
for
(
unsigned
int
i
=
0
;
i
<
oc_
->
nb_streams
;
++
i
)
{
av_freep
(
&
oc_
->
streams
[
i
]
->
codec
);
av_freep
(
&
oc_
->
streams
[
i
]);
}
if
(
!
(
fmt_
->
flags
&
AVFMT_NOFILE
)
&&
oc_
->
pb
)
{
// close the output file
#if LIBAVCODEC_VERSION_INT < ((52<<16)+(123<<8)+0)
#if LIBAVCODEC_VERSION_INT >= ((51<<16)+(49<<8)+0)
url_fclose
(
oc_
->
pb
);
#else
url_fclose
(
&
oc_
->
pb
);
#endif
#else
avio_close
(
oc_
->
pb
);
#endif
}
// free the stream
av_free
(
oc_
);
}
}
AVStream
*
OutputMediaStream_FFMPEG
::
addVideoStream
(
AVFormatContext
*
oc
,
CodecID
codec_id
,
int
w
,
int
h
,
int
bitrate
,
double
fps
,
PixelFormat
pixel_format
)
{
#if LIBAVFORMAT_BUILD >= CALC_FFMPEG_VERSION(53, 10, 0)
AVStream
*
st
=
avformat_new_stream
(
oc
,
0
);
#else
AVStream
*
st
=
av_new_stream
(
oc
,
0
);
#endif
if
(
!
st
)
return
0
;
#if LIBAVFORMAT_BUILD > 4628
AVCodecContext
*
c
=
st
->
codec
;
#else
AVCodecContext
*
c
=
&
(
st
->
codec
);
#endif
c
->
codec_id
=
codec_id
;
c
->
codec_type
=
AVMEDIA_TYPE_VIDEO
;
// put sample parameters
unsigned
long
long
lbit_rate
=
static_cast
<
unsigned
long
long
>
(
bitrate
);
lbit_rate
+=
(
bitrate
/
4
);
lbit_rate
=
std
::
min
(
lbit_rate
,
static_cast
<
unsigned
long
long
>
(
std
::
numeric_limits
<
int
>::
max
()));
c
->
bit_rate
=
bitrate
;
// took advice from
// http://ffmpeg-users.933282.n4.nabble.com/warning-clipping-1-dct-coefficients-to-127-127-td934297.html
c
->
qmin
=
3
;
// resolution must be a multiple of two
c
->
width
=
w
;
c
->
height
=
h
;
AVCodec
*
codec
=
avcodec_find_encoder
(
c
->
codec_id
);
// time base: this is the fundamental unit of time (in seconds) in terms
// of which frame timestamps are represented. for fixed-fps content,
// timebase should be 1/framerate and timestamp increments should be
// identically 1
int
frame_rate
=
static_cast
<
int
>
(
fps
+
0.5
);
int
frame_rate_base
=
1
;
while
(
fabs
(
static_cast
<
double
>
(
frame_rate
)
/
frame_rate_base
)
-
fps
>
0.001
)
{
frame_rate_base
*=
10
;
frame_rate
=
static_cast
<
int
>
(
fps
*
frame_rate_base
+
0.5
);
}
c
->
time_base
.
den
=
frame_rate
;
c
->
time_base
.
num
=
frame_rate_base
;
#if LIBAVFORMAT_BUILD > 4752
// adjust time base for supported framerates
if
(
codec
&&
codec
->
supported_framerates
)
{
AVRational
req
=
{
frame_rate
,
frame_rate_base
};
const
AVRational
*
best
=
NULL
;
AVRational
best_error
=
{
INT_MAX
,
1
};
for
(
const
AVRational
*
p
=
codec
->
supported_framerates
;
p
->
den
!=
0
;
++
p
)
{
AVRational
error
=
av_sub_q
(
req
,
*
p
);
if
(
error
.
num
<
0
)
error
.
num
*=
-
1
;
if
(
av_cmp_q
(
error
,
best_error
)
<
0
)
{
best_error
=
error
;
best
=
p
;
}
}
c
->
time_base
.
den
=
best
->
num
;
c
->
time_base
.
num
=
best
->
den
;
}
#endif
c
->
gop_size
=
12
;
// emit one intra frame every twelve frames at most
c
->
pix_fmt
=
pixel_format
;
if
(
c
->
codec_id
==
CODEC_ID_MPEG2VIDEO
)
c
->
max_b_frames
=
2
;
if
(
c
->
codec_id
==
CODEC_ID_MPEG1VIDEO
||
c
->
codec_id
==
CODEC_ID_MSMPEG4V3
)
{
// needed to avoid using macroblocks in which some coeffs overflow
// this doesnt happen with normal video, it just happens here as the
// motion of the chroma plane doesnt match the luma plane
// avoid FFMPEG warning 'clipping 1 dct coefficients...'
c
->
mb_decision
=
2
;
}
#if LIBAVCODEC_VERSION_INT > 0x000409
// some formats want stream headers to be seperate
if
(
oc
->
oformat
->
flags
&
AVFMT_GLOBALHEADER
)
{
c
->
flags
|=
CODEC_FLAG_GLOBAL_HEADER
;
}
#endif
return
st
;
}
bool
OutputMediaStream_FFMPEG
::
open
(
const
char
*
fileName
,
int
width
,
int
height
,
double
fps
)
{
fmt_
=
0
;
oc_
=
0
;
video_st_
=
0
;
// tell FFMPEG to register codecs
av_register_all
();
av_log_set_level
(
AV_LOG_ERROR
);
// auto detect the output format from the name and fourcc code
#if LIBAVFORMAT_BUILD >= CALC_FFMPEG_VERSION(53, 2, 0)
fmt_
=
av_guess_format
(
NULL
,
fileName
,
NULL
);
#else
fmt_
=
guess_format
(
NULL
,
fileName
,
NULL
);
#endif
if
(
!
fmt_
)
return
false
;
CodecID
codec_id
=
CODEC_ID_H264
;
// alloc memory for context
#if LIBAVFORMAT_BUILD >= CALC_FFMPEG_VERSION(53, 2, 0)
oc_
=
avformat_alloc_context
();
#else
oc_
=
av_alloc_format_context
();
#endif
if
(
!
oc_
)
return
false
;
// set some options
oc_
->
oformat
=
fmt_
;
snprintf
(
oc_
->
filename
,
sizeof
(
oc_
->
filename
),
"%s"
,
fileName
);
oc_
->
max_delay
=
(
int
)(
0.7
*
AV_TIME_BASE
);
// This reduces buffer underrun warnings with MPEG
// set a few optimal pixel formats for lossless codecs of interest..
PixelFormat
codec_pix_fmt
=
PIX_FMT_YUV420P
;
int
bitrate_scale
=
64
;
// TODO -- safe to ignore output audio stream?
video_st_
=
addVideoStream
(
oc_
,
codec_id
,
width
,
height
,
width
*
height
*
bitrate_scale
,
fps
,
codec_pix_fmt
);
if
(
!
video_st_
)
return
false
;
// set the output parameters (must be done even if no parameters)
#if LIBAVFORMAT_BUILD < CALC_FFMPEG_VERSION(53, 2, 0)
if
(
av_set_parameters
(
oc_
,
NULL
)
<
0
)
return
false
;
#endif
// now that all the parameters are set, we can open the audio and
// video codecs and allocate the necessary encode buffers
#if LIBAVFORMAT_BUILD > 4628
AVCodecContext
*
c
=
(
video_st_
->
codec
);
#else
AVCodecContext
*
c
=
&
(
video_st_
->
codec
);
#endif
c
->
codec_tag
=
MKTAG
(
'H'
,
'2'
,
'6'
,
'4'
);
c
->
bit_rate_tolerance
=
c
->
bit_rate
;
// open the output file, if needed
if
(
!
(
fmt_
->
flags
&
AVFMT_NOFILE
))
{
#if LIBAVFORMAT_BUILD < CALC_FFMPEG_VERSION(53, 2, 0)
int
err
=
url_fopen
(
&
oc_
->
pb
,
fileName
,
URL_WRONLY
);
#else
int
err
=
avio_open
(
&
oc_
->
pb
,
fileName
,
AVIO_FLAG_WRITE
);
#endif
if
(
err
!=
0
)
return
false
;
}
// write the stream header, if any
#if LIBAVFORMAT_BUILD < CALC_FFMPEG_VERSION(53, 2, 0)
av_write_header
(
oc_
);
#else
avformat_write_header
(
oc_
,
NULL
);
#endif
return
true
;
}
void
OutputMediaStream_FFMPEG
::
write
(
unsigned
char
*
data
,
int
size
)
{
// if zero size, it means the image was buffered
if
(
size
>
0
)
{
AVPacket
pkt
;
av_init_packet
(
&
pkt
);
pkt
.
stream_index
=
video_st_
->
index
;
pkt
.
data
=
data
;
pkt
.
size
=
size
;
// write the compressed frame in the media file
av_write_frame
(
oc_
,
&
pkt
);
}
}
struct
OutputMediaStream_FFMPEG
*
create_OutputMediaStream_FFMPEG
(
const
char
*
fileName
,
int
width
,
int
height
,
double
fps
)
{
OutputMediaStream_FFMPEG
*
stream
=
(
OutputMediaStream_FFMPEG
*
)
malloc
(
sizeof
(
OutputMediaStream_FFMPEG
));
if
(
stream
->
open
(
fileName
,
width
,
height
,
fps
))
return
stream
;
stream
->
close
();
free
(
stream
);
return
0
;
}
void
release_OutputMediaStream_FFMPEG
(
struct
OutputMediaStream_FFMPEG
*
stream
)
{
stream
->
close
();
free
(
stream
);
}
void
write_OutputMediaStream_FFMPEG
(
struct
OutputMediaStream_FFMPEG
*
stream
,
unsigned
char
*
data
,
int
size
)
{
stream
->
write
(
data
,
size
);
}
samples/gpu/video_writer.cpp
0 → 100644
View file @
2bfaf540
#include <iostream>
#include <vector>
#include <numeric>
#include "opencv2/core/core.hpp"
#include "opencv2/gpu/gpu.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/contrib/contrib.hpp"
int
main
(
int
argc
,
const
char
*
argv
[])
{
if
(
argc
!=
2
)
{
std
::
cerr
<<
"Usage : video_writer <input video file>"
<<
std
::
endl
;
return
-
1
;
}
const
double
FPS
=
25.0
;
cv
::
VideoCapture
reader
(
argv
[
1
]);
if
(
!
reader
.
isOpened
())
{
std
::
cerr
<<
"Can't open input video file"
<<
std
::
endl
;
return
-
1
;
}
cv
::
gpu
::
printShortCudaDeviceInfo
(
cv
::
gpu
::
getDevice
());
cv
::
VideoWriter
writer
;
cv
::
gpu
::
VideoWriter_GPU
d_writer
;
cv
::
Mat
frame
;
cv
::
gpu
::
GpuMat
d_frame
;
std
::
vector
<
double
>
cpu_times
;
std
::
vector
<
double
>
gpu_times
;
cv
::
TickMeter
tm
;
for
(
int
i
=
1
;;
++
i
)
{
std
::
cout
<<
"Read "
<<
i
<<
" frame"
<<
std
::
endl
;
reader
>>
frame
;
if
(
frame
.
empty
())
{
std
::
cout
<<
"Stop"
<<
std
::
endl
;
break
;
}
if
(
!
writer
.
isOpened
())
{
std
::
cout
<<
"Frame Size : "
<<
frame
.
cols
<<
"x"
<<
frame
.
rows
<<
std
::
endl
;
std
::
cout
<<
"Open CPU Writer"
<<
std
::
endl
;
if
(
!
writer
.
open
(
"output_cpu.avi"
,
CV_FOURCC
(
'X'
,
'V'
,
'I'
,
'D'
),
FPS
,
frame
.
size
()))
return
-
1
;
}
if
(
!
d_writer
.
isOpened
())
{
std
::
cout
<<
"Open GPU Writer"
<<
std
::
endl
;
d_writer
.
open
(
"output_gpu.avi"
,
frame
.
size
(),
FPS
);
}
d_frame
.
upload
(
frame
);
std
::
cout
<<
"Write "
<<
i
<<
" frame"
<<
std
::
endl
;
tm
.
reset
();
tm
.
start
();
writer
.
write
(
frame
);
tm
.
stop
();
cpu_times
.
push_back
(
tm
.
getTimeMilli
());
tm
.
reset
();
tm
.
start
();
d_writer
.
write
(
d_frame
);
tm
.
stop
();
gpu_times
.
push_back
(
tm
.
getTimeMilli
());
}
std
::
cout
<<
std
::
endl
<<
"Results:"
<<
std
::
endl
;
std
::
sort
(
cpu_times
.
begin
(),
cpu_times
.
end
());
std
::
sort
(
gpu_times
.
begin
(),
gpu_times
.
end
());
double
cpu_avg
=
std
::
accumulate
(
cpu_times
.
begin
(),
cpu_times
.
end
(),
0.0
)
/
cpu_times
.
size
();
double
gpu_avg
=
std
::
accumulate
(
gpu_times
.
begin
(),
gpu_times
.
end
(),
0.0
)
/
gpu_times
.
size
();
std
::
cout
<<
"CPU [XVID] : Avg : "
<<
cpu_avg
<<
" ms FPS : "
<<
1000.0
/
cpu_avg
<<
std
::
endl
;
std
::
cout
<<
"GPU [H264] : Avg : "
<<
gpu_avg
<<
" ms FPS : "
<<
1000.0
/
gpu_avg
<<
std
::
endl
;
return
0
;
}
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment