Commit 728d90a0 authored by Anton Khirnov's avatar Anton Khirnov

h264: decouple h264_sei from the h264 decoder

Make the SEI parsing independent of the H264Context, to allow
decoupling the parser from the decoder.
parent c8dcff0c
...@@ -400,7 +400,6 @@ static int h264_init_context(AVCodecContext *avctx, H264Context *h) ...@@ -400,7 +400,6 @@ static int h264_init_context(AVCodecContext *avctx, H264Context *h)
h->workaround_bugs = avctx->workaround_bugs; h->workaround_bugs = avctx->workaround_bugs;
h->flags = avctx->flags; h->flags = avctx->flags;
h->poc.prev_poc_msb = 1 << 16; h->poc.prev_poc_msb = 1 << 16;
h->x264_build = -1;
h->recovery_frame = -1; h->recovery_frame = -1;
h->frame_recovered = 0; h->frame_recovered = 0;
...@@ -408,7 +407,7 @@ static int h264_init_context(AVCodecContext *avctx, H264Context *h) ...@@ -408,7 +407,7 @@ static int h264_init_context(AVCodecContext *avctx, H264Context *h)
for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++) for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++)
h->last_pocs[i] = INT_MIN; h->last_pocs[i] = INT_MIN;
ff_h264_reset_sei(h); ff_h264_sei_uninit(&h->sei);
avctx->chroma_sample_location = AVCHROMA_LOC_LEFT; avctx->chroma_sample_location = AVCHROMA_LOC_LEFT;
...@@ -545,7 +544,8 @@ static void decode_postinit(H264Context *h, int setup_finished) ...@@ -545,7 +544,8 @@ static void decode_postinit(H264Context *h, int setup_finished)
* decoding process if it exists. */ * decoding process if it exists. */
if (sps->pic_struct_present_flag) { if (sps->pic_struct_present_flag) {
switch (h->sei_pic_struct) { H264SEIPictureTiming *pt = &h->sei.picture_timing;
switch (pt->pic_struct) {
case SEI_PIC_STRUCT_FRAME: case SEI_PIC_STRUCT_FRAME:
break; break;
case SEI_PIC_STRUCT_TOP_FIELD: case SEI_PIC_STRUCT_TOP_FIELD:
...@@ -575,9 +575,9 @@ static void decode_postinit(H264Context *h, int setup_finished) ...@@ -575,9 +575,9 @@ static void decode_postinit(H264Context *h, int setup_finished)
break; break;
} }
if ((h->sei_ct_type & 3) && if ((pt->ct_type & 3) &&
h->sei_pic_struct <= SEI_PIC_STRUCT_BOTTOM_TOP) pt->pic_struct <= SEI_PIC_STRUCT_BOTTOM_TOP)
cur->f->interlaced_frame = (h->sei_ct_type & (1 << 1)) != 0; cur->f->interlaced_frame = (pt->ct_type & (1 << 1)) != 0;
} else { } else {
/* Derive interlacing flag from used decoding process. */ /* Derive interlacing flag from used decoding process. */
cur->f->interlaced_frame = FIELD_OR_MBAFF_PICTURE(h); cur->f->interlaced_frame = FIELD_OR_MBAFF_PICTURE(h);
...@@ -591,8 +591,8 @@ static void decode_postinit(H264Context *h, int setup_finished) ...@@ -591,8 +591,8 @@ static void decode_postinit(H264Context *h, int setup_finished)
if (cur->f->interlaced_frame || sps->pic_struct_present_flag) { if (cur->f->interlaced_frame || sps->pic_struct_present_flag) {
/* Use picture timing SEI information. Even if it is a /* Use picture timing SEI information. Even if it is a
* information of a past frame, better than nothing. */ * information of a past frame, better than nothing. */
if (h->sei_pic_struct == SEI_PIC_STRUCT_TOP_BOTTOM || if (h->sei.picture_timing.pic_struct == SEI_PIC_STRUCT_TOP_BOTTOM ||
h->sei_pic_struct == SEI_PIC_STRUCT_TOP_BOTTOM_TOP) h->sei.picture_timing.pic_struct == SEI_PIC_STRUCT_TOP_BOTTOM_TOP)
cur->f->top_field_first = 1; cur->f->top_field_first = 1;
else else
cur->f->top_field_first = 0; cur->f->top_field_first = 0;
...@@ -602,16 +602,17 @@ static void decode_postinit(H264Context *h, int setup_finished) ...@@ -602,16 +602,17 @@ static void decode_postinit(H264Context *h, int setup_finished)
} }
} }
if (h->sei_frame_packing_present && if (h->sei.frame_packing.present &&
h->frame_packing_arrangement_type >= 0 && h->sei.frame_packing.arrangement_type >= 0 &&
h->frame_packing_arrangement_type <= 6 && h->sei.frame_packing.arrangement_type <= 6 &&
h->content_interpretation_type > 0 && h->sei.frame_packing.content_interpretation_type > 0 &&
h->content_interpretation_type < 3) { h->sei.frame_packing.content_interpretation_type < 3) {
H264SEIFramePacking *fp = &h->sei.frame_packing;
AVStereo3D *stereo = av_stereo3d_create_side_data(cur->f); AVStereo3D *stereo = av_stereo3d_create_side_data(cur->f);
if (!stereo) if (!stereo)
return; return;
switch (h->frame_packing_arrangement_type) { switch (fp->arrangement_type) {
case 0: case 0:
stereo->type = AV_STEREO3D_CHECKERBOARD; stereo->type = AV_STEREO3D_CHECKERBOARD;
break; break;
...@@ -622,7 +623,7 @@ static void decode_postinit(H264Context *h, int setup_finished) ...@@ -622,7 +623,7 @@ static void decode_postinit(H264Context *h, int setup_finished)
stereo->type = AV_STEREO3D_LINES; stereo->type = AV_STEREO3D_LINES;
break; break;
case 3: case 3:
if (h->quincunx_subsampling) if (fp->quincunx_subsampling)
stereo->type = AV_STEREO3D_SIDEBYSIDE_QUINCUNX; stereo->type = AV_STEREO3D_SIDEBYSIDE_QUINCUNX;
else else
stereo->type = AV_STEREO3D_SIDEBYSIDE; stereo->type = AV_STEREO3D_SIDEBYSIDE;
...@@ -638,13 +639,16 @@ static void decode_postinit(H264Context *h, int setup_finished) ...@@ -638,13 +639,16 @@ static void decode_postinit(H264Context *h, int setup_finished)
break; break;
} }
if (h->content_interpretation_type == 2) if (fp->content_interpretation_type == 2)
stereo->flags = AV_STEREO3D_FLAG_INVERT; stereo->flags = AV_STEREO3D_FLAG_INVERT;
} }
if (h->sei_display_orientation_present && if (h->sei.display_orientation.present &&
(h->sei_anticlockwise_rotation || h->sei_hflip || h->sei_vflip)) { (h->sei.display_orientation.anticlockwise_rotation ||
double angle = h->sei_anticlockwise_rotation * 360 / (double) (1 << 16); h->sei.display_orientation.hflip ||
h->sei.display_orientation.vflip)) {
H264SEIDisplayOrientation *o = &h->sei.display_orientation;
double angle = o->anticlockwise_rotation * 360 / (double) (1 << 16);
AVFrameSideData *rotation = av_frame_new_side_data(cur->f, AVFrameSideData *rotation = av_frame_new_side_data(cur->f,
AV_FRAME_DATA_DISPLAYMATRIX, AV_FRAME_DATA_DISPLAYMATRIX,
sizeof(int32_t) * 9); sizeof(int32_t) * 9);
...@@ -653,29 +657,30 @@ static void decode_postinit(H264Context *h, int setup_finished) ...@@ -653,29 +657,30 @@ static void decode_postinit(H264Context *h, int setup_finished)
av_display_rotation_set((int32_t *)rotation->data, angle); av_display_rotation_set((int32_t *)rotation->data, angle);
av_display_matrix_flip((int32_t *)rotation->data, av_display_matrix_flip((int32_t *)rotation->data,
h->sei_hflip, h->sei_vflip); o->hflip, o->vflip);
} }
if (h->sei_reguserdata_afd_present) { if (h->sei.afd.present) {
AVFrameSideData *sd = av_frame_new_side_data(cur->f, AV_FRAME_DATA_AFD, AVFrameSideData *sd = av_frame_new_side_data(cur->f, AV_FRAME_DATA_AFD,
sizeof(uint8_t)); sizeof(uint8_t));
if (!sd) if (!sd)
return; return;
*sd->data = h->active_format_description; *sd->data = h->sei.afd.active_format_description;
h->sei_reguserdata_afd_present = 0; h->sei.afd.present = 0;
} }
if (h->a53_caption) { if (h->sei.a53_caption.a53_caption) {
H264SEIA53Caption *a53 = &h->sei.a53_caption;
AVFrameSideData *sd = av_frame_new_side_data(cur->f, AVFrameSideData *sd = av_frame_new_side_data(cur->f,
AV_FRAME_DATA_A53_CC, AV_FRAME_DATA_A53_CC,
h->a53_caption_size); a53->a53_caption_size);
if (!sd) if (!sd)
return; return;
memcpy(sd->data, h->a53_caption, h->a53_caption_size); memcpy(sd->data, a53->a53_caption, a53->a53_caption_size);
av_freep(&h->a53_caption); av_freep(&a53->a53_caption);
h->a53_caption_size = 0; a53->a53_caption_size = 0;
} }
// FIXME do something with unavailable reference frames // FIXME do something with unavailable reference frames
...@@ -831,7 +836,7 @@ void ff_h264_flush_change(H264Context *h) ...@@ -831,7 +836,7 @@ void ff_h264_flush_change(H264Context *h)
if (h->cur_pic_ptr) if (h->cur_pic_ptr)
h->cur_pic_ptr->reference = 0; h->cur_pic_ptr->reference = 0;
h->first_field = 0; h->first_field = 0;
ff_h264_reset_sei(h); ff_h264_sei_uninit(&h->sei);
h->recovery_frame = -1; h->recovery_frame = -1;
h->frame_recovered = 0; h->frame_recovered = 0;
} }
...@@ -927,7 +932,7 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size, ...@@ -927,7 +932,7 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size,
h->current_slice = 0; h->current_slice = 0;
if (!h->first_field) if (!h->first_field)
h->cur_pic_ptr = NULL; h->cur_pic_ptr = NULL;
ff_h264_reset_sei(h); ff_h264_sei_uninit(&h->sei);
} }
ret = ff_h2645_packet_split(&h->pkt, buf, buf_size, avctx, h->is_avc, ret = ff_h2645_packet_split(&h->pkt, buf, buf_size, avctx, h->is_avc,
...@@ -984,13 +989,13 @@ again: ...@@ -984,13 +989,13 @@ again:
if ((err = ff_h264_decode_slice_header(h, sl))) if ((err = ff_h264_decode_slice_header(h, sl)))
break; break;
if (h->sei_recovery_frame_cnt >= 0 && h->recovery_frame < 0) { if (h->sei.recovery_point.recovery_frame_cnt >= 0 && h->recovery_frame < 0) {
h->recovery_frame = (h->poc.frame_num + h->sei_recovery_frame_cnt) & h->recovery_frame = (h->poc.frame_num + h->sei.recovery_point.recovery_frame_cnt) &
((1 << h->ps.sps->log2_max_frame_num) - 1); ((1 << h->ps.sps->log2_max_frame_num) - 1);
} }
h->cur_pic_ptr->f->key_frame |= h->cur_pic_ptr->f->key_frame |=
(nal->type == NAL_IDR_SLICE) || (h->sei_recovery_frame_cnt >= 0); (nal->type == NAL_IDR_SLICE) || (h->sei.recovery_point.recovery_frame_cnt >= 0);
if (nal->type == NAL_IDR_SLICE || h->recovery_frame == h->poc.frame_num) { if (nal->type == NAL_IDR_SLICE || h->recovery_frame == h->poc.frame_num) {
h->recovery_frame = -1; h->recovery_frame = -1;
...@@ -1034,8 +1039,7 @@ again: ...@@ -1034,8 +1039,7 @@ again:
goto end; goto end;
break; break;
case NAL_SEI: case NAL_SEI:
h->gb = nal->gb; ret = ff_h264_sei_decode(&h->sei, &nal->gb, &h->ps, avctx);
ret = ff_h264_decode_sei(h);
if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE)) if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
goto end; goto end;
break; break;
......
...@@ -35,6 +35,7 @@ ...@@ -35,6 +35,7 @@
#include "error_resilience.h" #include "error_resilience.h"
#include "get_bits.h" #include "get_bits.h"
#include "h264_parse.h" #include "h264_parse.h"
#include "h264_sei.h"
#include "h2645_parse.h" #include "h2645_parse.h"
#include "h264chroma.h" #include "h264chroma.h"
#include "h264dsp.h" #include "h264dsp.h"
...@@ -127,34 +128,6 @@ enum { ...@@ -127,34 +128,6 @@ enum {
NAL_FF_IGNORE = 0xff0f001, NAL_FF_IGNORE = 0xff0f001,
}; };
/**
* SEI message types
*/
typedef enum {
SEI_TYPE_BUFFERING_PERIOD = 0, ///< buffering period (H.264, D.1.1)
SEI_TYPE_PIC_TIMING = 1, ///< picture timing
SEI_TYPE_USER_DATA_REGISTERED = 4, ///< registered user data as specified by Rec. ITU-T T.35
SEI_TYPE_USER_DATA_UNREGISTERED = 5, ///< unregistered user data
SEI_TYPE_RECOVERY_POINT = 6, ///< recovery point (frame # to decoder sync)
SEI_TYPE_FRAME_PACKING = 45, ///< frame packing arrangement
SEI_TYPE_DISPLAY_ORIENTATION = 47, ///< display orientation
} SEI_Type;
/**
* pic_struct in picture timing SEI message
*/
typedef enum {
SEI_PIC_STRUCT_FRAME = 0, ///< 0: %frame
SEI_PIC_STRUCT_TOP_FIELD = 1, ///< 1: top field
SEI_PIC_STRUCT_BOTTOM_FIELD = 2, ///< 2: bottom field
SEI_PIC_STRUCT_TOP_BOTTOM = 3, ///< 3: top field, bottom field, in that order
SEI_PIC_STRUCT_BOTTOM_TOP = 4, ///< 4: bottom field, top field, in that order
SEI_PIC_STRUCT_TOP_BOTTOM_TOP = 5, ///< 5: top field, bottom field, top field repeated, in that order
SEI_PIC_STRUCT_BOTTOM_TOP_BOTTOM = 6, ///< 6: bottom field, top field, bottom field repeated, in that order
SEI_PIC_STRUCT_FRAME_DOUBLING = 7, ///< 7: %frame doubling
SEI_PIC_STRUCT_FRAME_TRIPLING = 8 ///< 8: %frame tripling
} SEI_PicStructType;
/** /**
* Sequence parameter set * Sequence parameter set
*/ */
...@@ -551,8 +524,6 @@ typedef struct H264Context { ...@@ -551,8 +524,6 @@ typedef struct H264Context {
const uint8_t *field_scan8x8_q0; const uint8_t *field_scan8x8_q0;
const uint8_t *field_scan8x8_cavlc_q0; const uint8_t *field_scan8x8_cavlc_q0;
int x264_build;
int mb_y; int mb_y;
int mb_height, mb_width; int mb_height, mb_width;
int mb_stride; int mb_stride;
...@@ -635,11 +606,6 @@ typedef struct H264Context { ...@@ -635,11 +606,6 @@ typedef struct H264Context {
/** @} */ /** @} */
/**
* pic_struct in picture timing SEI message
*/
SEI_PicStructType sei_pic_struct;
/** /**
* Complement sei_pic_struct * Complement sei_pic_struct
* SEI_PIC_STRUCT_TOP_BOTTOM and SEI_PIC_STRUCT_BOTTOM_TOP indicate interlaced frames. * SEI_PIC_STRUCT_TOP_BOTTOM and SEI_PIC_STRUCT_BOTTOM_TOP indicate interlaced frames.
...@@ -648,55 +614,6 @@ typedef struct H264Context { ...@@ -648,55 +614,6 @@ typedef struct H264Context {
*/ */
int prev_interlaced_frame; int prev_interlaced_frame;
/**
* frame_packing_arrangment SEI message
*/
int sei_frame_packing_present;
int frame_packing_arrangement_type;
int content_interpretation_type;
int quincunx_subsampling;
/**
* display orientation SEI message
*/
int sei_display_orientation_present;
int sei_anticlockwise_rotation;
int sei_hflip, sei_vflip;
/**
* User data registered by Rec. ITU-T T.35 SEI
*/
int sei_reguserdata_afd_present;
uint8_t active_format_description;
int a53_caption_size;
uint8_t *a53_caption;
/**
* Bit set of clock types for fields/frames in picture timing SEI message.
* For each found ct_type, appropriate bit is set (e.g., bit 1 for
* interlaced).
*/
int sei_ct_type;
/**
* dpb_output_delay in picture timing SEI message, see H.264 C.2.2
*/
int sei_dpb_output_delay;
/**
* cpb_removal_delay in picture timing SEI message, see H.264 C.1.2
*/
int sei_cpb_removal_delay;
/**
* recovery_frame_cnt from SEI message
*
* Set to -1 if no recovery point SEI message found or to number of frames
* before playback synchronizes. Frames having recovery point are key
* frames.
*/
int sei_recovery_frame_cnt;
/** /**
* recovery_frame is the frame_num at which the next frame should * recovery_frame is the frame_num at which the next frame should
* be fully constructed. * be fully constructed.
...@@ -724,12 +641,10 @@ typedef struct H264Context { ...@@ -724,12 +641,10 @@ typedef struct H264Context {
* slices) anymore */ * slices) anymore */
int setup_finished; int setup_finished;
// Timestamp stuff
int sei_buffering_period_present; ///< Buffering period SEI flag
int initial_cpb_removal_delay[32]; ///< Initial timestamps for CPBs
int enable_er; int enable_er;
H264SEIContext sei;
AVBufferPool *qscale_table_pool; AVBufferPool *qscale_table_pool;
AVBufferPool *mb_type_pool; AVBufferPool *mb_type_pool;
AVBufferPool *motion_val_pool; AVBufferPool *motion_val_pool;
...@@ -742,11 +657,6 @@ typedef struct H264Context { ...@@ -742,11 +657,6 @@ typedef struct H264Context {
extern const uint16_t ff_h264_mb_sizes[4]; extern const uint16_t ff_h264_mb_sizes[4];
/**
* Decode SEI
*/
int ff_h264_decode_sei(H264Context *h);
/** /**
* Decode SPS * Decode SPS
*/ */
...@@ -828,13 +738,6 @@ void ff_h264_filter_mb(const H264Context *h, H264SliceContext *sl, int mb_x, int ...@@ -828,13 +738,6 @@ void ff_h264_filter_mb(const H264Context *h, H264SliceContext *sl, int mb_x, int
uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr,
unsigned int linesize, unsigned int uvlinesize); unsigned int linesize, unsigned int uvlinesize);
/**
* Reset SEI values at the beginning of the frame.
*
* @param h H.264 context.
*/
void ff_h264_reset_sei(H264Context *h);
/* /*
* o-o o-o * o-o o-o
* / / / * / / /
......
...@@ -390,7 +390,7 @@ single_col: ...@@ -390,7 +390,7 @@ single_col:
(l1ref0[0] < 0 && !l1ref1[0] && (l1ref0[0] < 0 && !l1ref1[0] &&
FFABS(l1mv1[0][0]) <= 1 && FFABS(l1mv1[0][0]) <= 1 &&
FFABS(l1mv1[0][1]) <= 1 && FFABS(l1mv1[0][1]) <= 1 &&
h->x264_build > 33U))) { h->sei.unregistered.x264_build > 33U))) {
a = b = 0; a = b = 0;
if (ref[0] > 0) if (ref[0] > 0)
a = mv[0]; a = mv[0];
...@@ -425,7 +425,7 @@ single_col: ...@@ -425,7 +425,7 @@ single_col:
(l1ref0[i8] == 0 || (l1ref0[i8] == 0 ||
(l1ref0[i8] < 0 && (l1ref0[i8] < 0 &&
l1ref1[i8] == 0 && l1ref1[i8] == 0 &&
h->x264_build > 33U))) { h->sei.unregistered.x264_build > 33U))) {
const int16_t (*l1mv)[2] = l1ref0[i8] == 0 ? l1mv0 : l1mv1; const int16_t (*l1mv)[2] = l1ref0[i8] == 0 ? l1mv0 : l1mv1;
if (IS_SUB_8X8(sub_mb_type)) { if (IS_SUB_8X8(sub_mb_type)) {
const int16_t *mv_col = l1mv[x8 * 3 + y8 * 3 * b4_stride]; const int16_t *mv_col = l1mv[x8 * 3 + y8 * 3 * b4_stride];
......
...@@ -37,6 +37,7 @@ ...@@ -37,6 +37,7 @@
#include "get_bits.h" #include "get_bits.h"
#include "golomb.h" #include "golomb.h"
#include "h264.h" #include "h264.h"
#include "h264_sei.h"
#include "h264data.h" #include "h264data.h"
#include "internal.h" #include "internal.h"
#include "mpegutils.h" #include "mpegutils.h"
...@@ -48,6 +49,7 @@ typedef struct H264ParseContext { ...@@ -48,6 +49,7 @@ typedef struct H264ParseContext {
H264ParamSets ps; H264ParamSets ps;
H264DSPContext h264dsp; H264DSPContext h264dsp;
H264POCContext poc; H264POCContext poc;
H264SEIContext sei;
int got_first; int got_first;
} H264ParseContext; } H264ParseContext;
...@@ -216,7 +218,7 @@ static inline int parse_nal_units(AVCodecParserContext *s, ...@@ -216,7 +218,7 @@ static inline int parse_nal_units(AVCodecParserContext *s,
s->picture_structure = AV_PICTURE_STRUCTURE_UNKNOWN; s->picture_structure = AV_PICTURE_STRUCTURE_UNKNOWN;
h->avctx = avctx; h->avctx = avctx;
ff_h264_reset_sei(h); ff_h264_sei_uninit(&p->sei);
if (!buf_size) if (!buf_size)
return 0; return 0;
...@@ -270,7 +272,7 @@ static inline int parse_nal_units(AVCodecParserContext *s, ...@@ -270,7 +272,7 @@ static inline int parse_nal_units(AVCodecParserContext *s,
nal.size_bits); nal.size_bits);
break; break;
case NAL_SEI: case NAL_SEI:
ff_h264_decode_sei(h); ff_h264_sei_decode(&p->sei, &nal.gb, &p->ps, avctx);
break; break;
case NAL_IDR_SLICE: case NAL_IDR_SLICE:
s->key_frame = 1; s->key_frame = 1;
...@@ -284,7 +286,7 @@ static inline int parse_nal_units(AVCodecParserContext *s, ...@@ -284,7 +286,7 @@ static inline int parse_nal_units(AVCodecParserContext *s,
get_ue_golomb(&nal.gb); // skip first_mb_in_slice get_ue_golomb(&nal.gb); // skip first_mb_in_slice
slice_type = get_ue_golomb_31(&nal.gb); slice_type = get_ue_golomb_31(&nal.gb);
s->pict_type = ff_h264_golomb_to_pict_type[slice_type % 5]; s->pict_type = ff_h264_golomb_to_pict_type[slice_type % 5];
if (h->sei_recovery_frame_cnt >= 0) { if (p->sei.recovery_point.recovery_frame_cnt >= 0) {
/* key frame, since recovery_frame_cnt is set */ /* key frame, since recovery_frame_cnt is set */
s->key_frame = 1; s->key_frame = 1;
} }
...@@ -405,7 +407,7 @@ static inline int parse_nal_units(AVCodecParserContext *s, ...@@ -405,7 +407,7 @@ static inline int parse_nal_units(AVCodecParserContext *s,
} }
if (sps->pic_struct_present_flag) { if (sps->pic_struct_present_flag) {
switch (h->sei_pic_struct) { switch (p->sei.picture_timing.pic_struct) {
case SEI_PIC_STRUCT_TOP_FIELD: case SEI_PIC_STRUCT_TOP_FIELD:
case SEI_PIC_STRUCT_BOTTOM_FIELD: case SEI_PIC_STRUCT_BOTTOM_FIELD:
s->repeat_pict = 0; s->repeat_pict = 0;
...@@ -436,7 +438,7 @@ static inline int parse_nal_units(AVCodecParserContext *s, ...@@ -436,7 +438,7 @@ static inline int parse_nal_units(AVCodecParserContext *s,
if (h->picture_structure == PICT_FRAME) { if (h->picture_structure == PICT_FRAME) {
s->picture_structure = AV_PICTURE_STRUCTURE_FRAME; s->picture_structure = AV_PICTURE_STRUCTURE_FRAME;
if (sps->pic_struct_present_flag) { if (sps->pic_struct_present_flag) {
switch (h->sei_pic_struct) { switch (p->sei.picture_timing.pic_struct) {
case SEI_PIC_STRUCT_TOP_BOTTOM: case SEI_PIC_STRUCT_TOP_BOTTOM:
case SEI_PIC_STRUCT_TOP_BOTTOM_TOP: case SEI_PIC_STRUCT_TOP_BOTTOM_TOP:
s->field_order = AV_FIELD_TT; s->field_order = AV_FIELD_TT;
...@@ -521,10 +523,10 @@ static int h264_parse(AVCodecParserContext *s, ...@@ -521,10 +523,10 @@ static int h264_parse(AVCodecParserContext *s,
parse_nal_units(s, avctx, buf, buf_size); parse_nal_units(s, avctx, buf, buf_size);
if (h->sei_cpb_removal_delay >= 0) { if (p->sei.picture_timing.cpb_removal_delay >= 0) {
s->dts_sync_point = h->sei_buffering_period_present; s->dts_sync_point = p->sei.buffering_period.present;
s->dts_ref_dts_delta = h->sei_cpb_removal_delay; s->dts_ref_dts_delta = p->sei.picture_timing.cpb_removal_delay;
s->pts_dts_delta = h->sei_dpb_output_delay; s->pts_dts_delta = p->sei.picture_timing.dpb_output_delay;
} else { } else {
s->dts_sync_point = INT_MIN; s->dts_sync_point = INT_MIN;
s->dts_ref_dts_delta = INT_MIN; s->dts_ref_dts_delta = INT_MIN;
...@@ -581,6 +583,8 @@ static void h264_close(AVCodecParserContext *s) ...@@ -581,6 +583,8 @@ static void h264_close(AVCodecParserContext *s)
av_free(pc->buffer); av_free(pc->buffer);
ff_h264_free_context(h); ff_h264_free_context(h);
ff_h264_sei_uninit(&p->sei);
for (i = 0; i < FF_ARRAY_ELEMS(p->ps.sps_list); i++) for (i = 0; i < FF_ARRAY_ELEMS(p->ps.sps_list); i++)
av_buffer_unref(&p->ps.sps_list[i]); av_buffer_unref(&p->ps.sps_list[i]);
......
This diff is collapsed.
/*
* This file is part of Libav.
*
* Libav is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* Libav is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with Libav; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_H264_SEI_H
#define AVCODEC_H264_SEI_H
#include "get_bits.h"
/**
* SEI message types
*/
typedef enum {
SEI_TYPE_BUFFERING_PERIOD = 0, ///< buffering period (H.264, D.1.1)
SEI_TYPE_PIC_TIMING = 1, ///< picture timing
SEI_TYPE_USER_DATA_REGISTERED = 4, ///< registered user data as specified by Rec. ITU-T T.35
SEI_TYPE_USER_DATA_UNREGISTERED = 5, ///< unregistered user data
SEI_TYPE_RECOVERY_POINT = 6, ///< recovery point (frame # to decoder sync)
SEI_TYPE_FRAME_PACKING = 45, ///< frame packing arrangement
SEI_TYPE_DISPLAY_ORIENTATION = 47, ///< display orientation
} SEI_Type;
/**
* pic_struct in picture timing SEI message
*/
typedef enum {
SEI_PIC_STRUCT_FRAME = 0, ///< 0: %frame
SEI_PIC_STRUCT_TOP_FIELD = 1, ///< 1: top field
SEI_PIC_STRUCT_BOTTOM_FIELD = 2, ///< 2: bottom field
SEI_PIC_STRUCT_TOP_BOTTOM = 3, ///< 3: top field, bottom field, in that order
SEI_PIC_STRUCT_BOTTOM_TOP = 4, ///< 4: bottom field, top field, in that order
SEI_PIC_STRUCT_TOP_BOTTOM_TOP = 5, ///< 5: top field, bottom field, top field repeated, in that order
SEI_PIC_STRUCT_BOTTOM_TOP_BOTTOM = 6, ///< 6: bottom field, top field, bottom field repeated, in that order
SEI_PIC_STRUCT_FRAME_DOUBLING = 7, ///< 7: %frame doubling
SEI_PIC_STRUCT_FRAME_TRIPLING = 8 ///< 8: %frame tripling
} SEI_PicStructType;
typedef struct H264SEIPictureTiming {
SEI_PicStructType pic_struct;
/**
* Bit set of clock types for fields/frames in picture timing SEI message.
* For each found ct_type, appropriate bit is set (e.g., bit 1 for
* interlaced).
*/
int ct_type;
/**
* dpb_output_delay in picture timing SEI message, see H.264 C.2.2
*/
int dpb_output_delay;
/**
* cpb_removal_delay in picture timing SEI message, see H.264 C.1.2
*/
int cpb_removal_delay;
} H264SEIPictureTiming;
typedef struct H264SEIAFD {
int present;
uint8_t active_format_description;
} H264SEIAFD;
typedef struct H264SEIA53Caption {
int a53_caption_size;
uint8_t *a53_caption;
} H264SEIA53Caption;
typedef struct H264SEIUnregistered {
int x264_build;
} H264SEIUnregistered;
typedef struct H264SEIRecoveryPoint {
/**
* recovery_frame_cnt
*
* Set to -1 if no recovery point SEI message found or to number of frames
* before playback synchronizes. Frames having recovery point are key
* frames.
*/
int recovery_frame_cnt;
} H264SEIRecoveryPoint;
typedef struct H264SEIBufferingPeriod {
int present; ///< Buffering period SEI flag
int initial_cpb_removal_delay[32]; ///< Initial timestamps for CPBs
} H264SEIBufferingPeriod;
typedef struct H264SEIFramePacking {
int present;
int arrangement_type;
int content_interpretation_type;
int quincunx_subsampling;
} H264SEIFramePacking;
typedef struct H264SEIDisplayOrientation {
int present;
int anticlockwise_rotation;
int hflip, vflip;
} H264SEIDisplayOrientation;
typedef struct H264SEIContext {
H264SEIPictureTiming picture_timing;
H264SEIAFD afd;
H264SEIA53Caption a53_caption;
H264SEIUnregistered unregistered;
H264SEIRecoveryPoint recovery_point;
H264SEIBufferingPeriod buffering_period;
H264SEIFramePacking frame_packing;
H264SEIDisplayOrientation display_orientation;
} H264SEIContext;
struct H264ParamSets;
int ff_h264_sei_decode(H264SEIContext *h, GetBitContext *gb,
const struct H264ParamSets *ps, void *logctx);
/**
* Reset SEI values at the beginning of the frame.
*/
void ff_h264_sei_uninit(H264SEIContext *h);
#endif /* AVCODEC_H264_SEI_H */
...@@ -826,7 +826,7 @@ static int h264_slice_header_init(H264Context *h) ...@@ -826,7 +826,7 @@ static int h264_slice_header_init(H264Context *h)
if (sps->timing_info_present_flag) { if (sps->timing_info_present_flag) {
int64_t den = sps->time_scale; int64_t den = sps->time_scale;
if (h->x264_build < 44U) if (h->sei.unregistered.x264_build < 44U)
den *= 2; den *= 2;
av_reduce(&h->avctx->framerate.den, &h->avctx->framerate.num, av_reduce(&h->avctx->framerate.den, &h->avctx->framerate.num,
sps->num_units_in_tick, den, 1 << 30); sps->num_units_in_tick, den, 1 << 30);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment