Commit 2fe8fd39 authored by Stefano Sabatini's avatar Stefano Sabatini

examples/demuxing: add audio decoding/demuxing

parent ec794685
...@@ -25,75 +25,185 @@ ...@@ -25,75 +25,185 @@
* libavformat demuxing API use example. * libavformat demuxing API use example.
* *
* Show how to use the libavformat and libavcodec API to demux and * Show how to use the libavformat and libavcodec API to demux and
* decode video data. * decode audio and video data.
*/ */
#include <libavutil/imgutils.h> #include <libavutil/imgutils.h>
#include <libavutil/samplefmt.h>
#include <libavutil/timestamp.h> #include <libavutil/timestamp.h>
#include <libavformat/avformat.h> #include <libavformat/avformat.h>
static AVFormatContext *fmt_ctx = NULL; static AVFormatContext *fmt_ctx = NULL;
static AVCodecContext *dec_ctx = NULL; static AVCodecContext *video_dec_ctx = NULL, *audio_dec_ctx;
static AVCodec *dec = NULL; static AVStream *video_stream = NULL, *audio_stream = NULL;
static AVStream *stream = NULL;
static const char *src_filename = NULL; static const char *src_filename = NULL;
static const char *dst_filename = NULL; static const char *video_dst_filename = NULL;
static FILE *dst_file = NULL; static const char *audio_dst_filename = NULL;
static uint8_t *dst_data[4] = {NULL}; static FILE *video_dst_file = NULL;
static int dst_linesize[4]; static FILE *audio_dst_file = NULL;
static int dst_bufsize;
static int stream_idx; static uint8_t *video_dst_data[4] = {NULL};
static int video_dst_linesize[4];
static int video_dst_bufsize;
static uint8_t **audio_dst_data = NULL;
static int audio_dst_linesize;
static int audio_dst_bufsize;
static int video_stream_idx = -1, audio_stream_idx = -1;
static AVFrame *frame = NULL; static AVFrame *frame = NULL;
static AVPacket pkt; static AVPacket pkt;
static int frame_count = 0; static int video_frame_count = 0;
static int audio_frame_count = 0;
static int decode_packet(int *got_frame, int cached) static int decode_packet(int *got_frame, int cached)
{ {
int ret; int ret = 0;
if (pkt.stream_index == video_stream_idx) {
/* decode video frame */
ret = avcodec_decode_video2(video_dec_ctx, frame, got_frame, &pkt);
if (ret < 0) {
fprintf(stderr, "Error decoding video frame\n");
return ret;
}
if (*got_frame) {
printf("video_frame%s n:%d coded_n:%d pts:%s\n",
cached ? "(cached)" : "",
video_frame_count++, frame->coded_picture_number,
av_ts2timestr(frame->pts, &video_dec_ctx->time_base));
/* copy decoded frame to destination buffer:
* this is required since rawvideo expects non aligned data */
av_image_copy(video_dst_data, video_dst_linesize,
(const uint8_t **)(frame->data), frame->linesize,
video_dec_ctx->pix_fmt, video_dec_ctx->width, video_dec_ctx->height);
/* write to rawvideo file */
fwrite(video_dst_data[0], 1, video_dst_bufsize, video_dst_file);
}
} else if (pkt.stream_index == audio_stream_idx) {
/* decode audio frame */
ret = avcodec_decode_audio4(audio_dec_ctx, frame, got_frame, &pkt);
if (ret < 0) {
fprintf(stderr, "Error decoding audio frame\n");
return ret;
}
if (*got_frame) {
printf("audio_frame%s n:%d nb_samples:%d pts:%s\n",
cached ? "(cached)" : "",
audio_frame_count++, frame->nb_samples,
av_ts2timestr(frame->pts, &audio_dec_ctx->time_base));
ret = av_samples_alloc(audio_dst_data, &audio_dst_linesize, frame->channels,
frame->nb_samples, frame->format, 1);
if (ret < 0) {
fprintf(stderr, "Could not allocate audio buffer\n");
return AVERROR(ENOMEM);
}
/* TODO: extend return code of the av_samples_* functions so that this call is not needed */
audio_dst_bufsize =
av_samples_get_buffer_size(NULL, frame->channels,
frame->nb_samples, frame->format, 1);
/* copy audio data to destination buffer:
* this is required since rawaudio expects non aligned data */
av_samples_copy(audio_dst_data, frame->data, 0, 0,
frame->nb_samples, frame->channels, frame->format);
if (pkt.stream_index != stream_idx) /* write to rawaudio file */
return 0; fwrite(audio_dst_data[0], 1, audio_dst_bufsize, audio_dst_file);
av_freep(&audio_dst_data[0]);
}
}
return ret;
}
static int open_codec_context(int *stream_idx,
AVFormatContext *fmt_ctx, enum AVMediaType type)
{
int ret;
AVStream *st;
AVCodecContext *dec_ctx = NULL;
AVCodec *dec = NULL;
/* decode video frame */ ret = av_find_best_stream(fmt_ctx, type, -1, -1, NULL, 0);
ret = avcodec_decode_video2(dec_ctx, frame, got_frame, &pkt);
if (ret < 0) { if (ret < 0) {
fprintf(stderr, "Error decoding video frame\n"); fprintf(stderr, "Could not find %s stream in input file '%s'\n",
av_get_media_type_string(type), src_filename);
return ret; return ret;
} else {
*stream_idx = ret;
st = fmt_ctx->streams[*stream_idx];
/* find decoder for the stream */
dec_ctx = st->codec;
dec = avcodec_find_decoder(dec_ctx->codec_id);
if (!dec) {
fprintf(stderr, "Failed to find %s codec\n",
av_get_media_type_string(type));
return ret;
}
if ((ret = avcodec_open2(dec_ctx, dec, NULL)) < 0) {
fprintf(stderr, "Failed to open %s codec\n",
av_get_media_type_string(type));
return ret;
}
} }
if (*got_frame) { return 0;
printf("frame%s n:%d coded_n:%d pts:%s\n", }
cached ? "(cached)" : "",
frame_count++, frame->coded_picture_number,
av_ts2timestr(frame->pts, &dec_ctx->time_base));
/* copy decoded frame to destination buffer: static int get_format_from_sample_fmt(const char **fmt,
* this is required since rawvideo expect non aligned data */ enum AVSampleFormat sample_fmt)
av_image_copy(dst_data, dst_linesize, {
(const uint8_t **)(frame->data), frame->linesize, int i;
dec_ctx->pix_fmt, dec_ctx->width, dec_ctx->height); struct sample_fmt_entry {
enum AVSampleFormat sample_fmt; const char *fmt_be, *fmt_le;
} sample_fmt_entries[] = {
{ AV_SAMPLE_FMT_U8, "u8", "u8" },
{ AV_SAMPLE_FMT_S16, "s16be", "s16le" },
{ AV_SAMPLE_FMT_S32, "s32be", "s32le" },
{ AV_SAMPLE_FMT_FLT, "f32be", "f32le" },
{ AV_SAMPLE_FMT_DBL, "f64be", "f64le" },
};
*fmt = NULL;
/* write to rawvideo file */ for (i = 0; i < FF_ARRAY_ELEMS(sample_fmt_entries); i++) {
fwrite(dst_data[0], 1, dst_bufsize, dst_file); struct sample_fmt_entry *entry = &sample_fmt_entries[i];
if (sample_fmt == entry->sample_fmt) {
*fmt = AV_NE(entry->fmt_be, entry->fmt_le);
return 0;
}
} }
return ret; fprintf(stderr,
"sample format %s is not supported as output format\n",
av_get_sample_fmt_name(sample_fmt));
return -1;
} }
int main (int argc, char **argv) int main (int argc, char **argv)
{ {
int ret, got_frame; int ret = 0, got_frame;
if (argc != 3) { if (argc != 4) {
fprintf(stderr, "usage: %s input_file output_file\n" fprintf(stderr, "usage: %s input_file video_output_file audio_output_file\n"
"API example program to show how to read frames from an input file.\n" "API example program to show how to read frames from an input file.\n"
"This program reads frames from a file, decode them, and write them " "This program reads frames from a file, decodes them, and writes decoded\n"
"to a rawvideo file named like output_file." "video frames to a rawvideo file named video_output_file, and decoded\n"
"audio frames to a rawaudio file named audio_output_file.\n"
"\n", argv[0]); "\n", argv[0]);
exit(1); exit(1);
} }
src_filename = argv[1]; src_filename = argv[1];
dst_filename = argv[2]; video_dst_filename = argv[2];
audio_dst_filename = argv[3];
/* register all formats and codecs */ /* register all formats and codecs */
av_register_all(); av_register_all();
...@@ -110,60 +220,75 @@ int main (int argc, char **argv) ...@@ -110,60 +220,75 @@ int main (int argc, char **argv)
exit(1); exit(1);
} }
ret = av_find_best_stream(fmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0); if (open_codec_context(&video_stream_idx, fmt_ctx, AVMEDIA_TYPE_VIDEO) >= 0) {
if (ret < 0) { video_stream = fmt_ctx->streams[video_stream_idx];
fprintf(stderr, "Could not find video stream in file\n"); video_dec_ctx = video_stream->codec;
goto end;
}
stream_idx = ret;
stream = fmt_ctx->streams[stream_idx];
/* find decoder for the stream */
dec_ctx = stream->codec;
dec = avcodec_find_decoder(dec_ctx->codec_id);
if (!dec) {
fprintf(stderr, "Failed to find any codec\n");
ret = 1;
goto end;
}
if ((ret = avcodec_open2(dec_ctx, dec, NULL)) < 0) { video_dst_file = fopen(video_dst_filename, "wb");
fprintf(stderr, "Failed to open codec\n"); if (!video_dst_file) {
goto end; fprintf(stderr, "Could not open destination file %s\n", video_dst_filename);
ret = 1;
goto end;
}
/* allocate image where the decoded image will be put */
ret = av_image_alloc(video_dst_data, video_dst_linesize,
video_dec_ctx->width, video_dec_ctx->height,
video_dec_ctx->pix_fmt, 1);
if (ret < 0) {
fprintf(stderr, "Could not allocate raw video buffer\n");
goto end;
}
video_dst_bufsize = ret;
} }
/* dump input information to stderr */ /* dump input information to stderr */
av_dump_format(fmt_ctx, 0, src_filename, 0); av_dump_format(fmt_ctx, 0, src_filename, 0);
dst_file = fopen(dst_filename, "wb"); if (open_codec_context(&audio_stream_idx, fmt_ctx, AVMEDIA_TYPE_AUDIO) >= 0) {
if (!dst_file) { int nb_planes;
fprintf(stderr, "Could not open destination file %s\n", dst_filename);
ret = 1; audio_stream = fmt_ctx->streams[audio_stream_idx];
goto end; audio_dec_ctx = audio_stream->codec;
audio_dst_file = fopen(audio_dst_filename, "wb");
if (!audio_dst_file) {
fprintf(stderr, "Could not open destination file %s\n", video_dst_filename);
ret = 1;
goto end;
}
nb_planes = av_sample_fmt_is_planar(audio_dec_ctx->sample_fmt) ?
audio_dec_ctx->channels : 1;
audio_dst_data = av_mallocz(sizeof(uint8_t *) * nb_planes);
if (!audio_dst_data) {
fprintf(stderr, "Could not allocate audio data buffers\n");
ret = AVERROR(ENOMEM);
goto end;
}
} }
frame = avcodec_alloc_frame(); if (!audio_stream && !video_stream) {
if (!frame) { fprintf(stderr, "Could not find audio or video stream in the input, aborting\n");
fprintf(stderr, "Could not allocate video frame\n");
ret = 1; ret = 1;
goto end; goto end;
} }
/* allocate image where the decoded image will be put */ frame = avcodec_alloc_frame();
ret = av_image_alloc(dst_data, dst_linesize, if (!frame) {
dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt, 1); fprintf(stderr, "Could not allocate frame\n");
if (ret < 0) { ret = AVERROR(ENOMEM);
fprintf(stderr, "Could not alloc raw video buffer\n");
goto end; goto end;
} }
dst_bufsize = ret;
/* initialize packet, set data to NULL, let the demuxer fill it */ /* initialize packet, set data to NULL, let the demuxer fill it */
av_init_packet(&pkt); av_init_packet(&pkt);
pkt.size = 0;
pkt.data = NULL; pkt.data = NULL;
pkt.size = 0;
printf("Demuxing file '%s' to '%s'\n", src_filename, dst_filename); if (video_stream)
printf("Demuxing video from file '%s' into '%s'\n", src_filename, video_dst_filename);
if (audio_stream)
printf("Demuxing video from file '%s' into '%s'\n", src_filename, audio_dst_filename);
/* read frames from the file */ /* read frames from the file */
while (av_read_frame(fmt_ctx, &pkt) >= 0) while (av_read_frame(fmt_ctx, &pkt) >= 0)
...@@ -176,18 +301,39 @@ int main (int argc, char **argv) ...@@ -176,18 +301,39 @@ int main (int argc, char **argv)
decode_packet(&got_frame, 1); decode_packet(&got_frame, 1);
} while (got_frame); } while (got_frame);
printf("Demuxing succeeded. Play the output file with the command:\n" printf("Demuxing succeeded.\n");
"ffplay -f rawvideo -pix_fmt %s -video_size %dx%d %s\n",
av_get_pix_fmt_name(dec_ctx->pix_fmt), dec_ctx->width, dec_ctx->height, if (video_stream) {
dst_filename); printf("Play the output video file with the command:\n"
"ffplay -f rawvideo -pix_fmt %s -video_size %dx%d %s\n",
av_get_pix_fmt_name(video_dec_ctx->pix_fmt), video_dec_ctx->width, video_dec_ctx->height,
video_dst_filename);
}
if (audio_stream) {
const char *fmt;
if ((ret = get_format_from_sample_fmt(&fmt, audio_dec_ctx->sample_fmt) < 0))
goto end;
printf("Play the output audio file with the command:\n"
"ffplay -f %s -ac %d -ar %d %s\n",
fmt, audio_dec_ctx->channels, audio_dec_ctx->sample_rate,
audio_dst_filename);
}
end: end:
avcodec_close(dec_ctx); if (video_dec_ctx)
avcodec_close(video_dec_ctx);
if (audio_dec_ctx)
avcodec_close(audio_dec_ctx);
avformat_close_input(&fmt_ctx); avformat_close_input(&fmt_ctx);
if (dst_file) if (video_dst_file)
fclose(dst_file); fclose(video_dst_file);
if (audio_dst_file)
fclose(audio_dst_file);
av_free(frame); av_free(frame);
av_free(dst_data[0]); av_free(video_dst_data[0]);
av_free(audio_dst_data);
return ret < 0; return ret < 0;
} }
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment