Commit fd1a6a01 authored by Mark Thompson's avatar Mark Thompson

vaapi_mpeg4: Convert to use the new VAAPI hwaccel code

(cherry picked from commit ccd0316f)
parent 32b3812b
...@@ -20,11 +20,11 @@ ...@@ -20,11 +20,11 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/ */
#include "vaapi_internal.h"
#include "internal.h"
#include "h263.h" #include "h263.h"
#include "internal.h"
#include "mpeg4video.h" #include "mpeg4video.h"
#include "mpegvideo.h" #include "mpegvideo.h"
#include "vaapi_decode.h"
/** Reconstruct bitstream intra_dc_vlc_thr */ /** Reconstruct bitstream intra_dc_vlc_thr */
static int mpeg4_get_intra_dc_vlc_thr(Mpeg4DecContext *s) static int mpeg4_get_intra_dc_vlc_thr(Mpeg4DecContext *s)
...@@ -45,121 +45,176 @@ static int mpeg4_get_intra_dc_vlc_thr(Mpeg4DecContext *s) ...@@ -45,121 +45,176 @@ static int mpeg4_get_intra_dc_vlc_thr(Mpeg4DecContext *s)
static int vaapi_mpeg4_start_frame(AVCodecContext *avctx, av_unused const uint8_t *buffer, av_unused uint32_t size) static int vaapi_mpeg4_start_frame(AVCodecContext *avctx, av_unused const uint8_t *buffer, av_unused uint32_t size)
{ {
Mpeg4DecContext *ctx = avctx->priv_data; Mpeg4DecContext *ctx = avctx->priv_data;
MpegEncContext * const s = &ctx->m; MpegEncContext *s = &ctx->m;
FFVAContext * const vactx = ff_vaapi_get_context(avctx); VAAPIDecodePicture *pic = s->current_picture_ptr->hwaccel_picture_private;
VAPictureParameterBufferMPEG4 *pic_param; VAPictureParameterBufferMPEG4 pic_param;
VAIQMatrixBufferMPEG4 *iq_matrix; int i, err;
int i;
pic->output_surface = ff_vaapi_get_surface_id(s->current_picture_ptr->f);
vactx->slice_param_size = sizeof(VASliceParameterBufferMPEG4);
pic_param = (VAPictureParameterBufferMPEG4) {
/* Fill in VAPictureParameterBufferMPEG4 */ .vop_width = s->width,
pic_param = ff_vaapi_alloc_pic_param(vactx, sizeof(VAPictureParameterBufferMPEG4)); .vop_height = s->height,
if (!pic_param) .forward_reference_picture = VA_INVALID_ID,
return -1; .backward_reference_picture = VA_INVALID_ID,
pic_param->vop_width = s->width; .vol_fields.bits = {
pic_param->vop_height = s->height; .short_video_header = avctx->codec->id == AV_CODEC_ID_H263,
pic_param->forward_reference_picture = VA_INVALID_ID; .chroma_format = CHROMA_420,
pic_param->backward_reference_picture = VA_INVALID_ID; .interlaced = !s->progressive_sequence,
pic_param->vol_fields.value = 0; /* reset all bits */ .obmc_disable = 1,
pic_param->vol_fields.bits.short_video_header = avctx->codec->id == AV_CODEC_ID_H263; .sprite_enable = ctx->vol_sprite_usage,
pic_param->vol_fields.bits.chroma_format = CHROMA_420; .sprite_warping_accuracy = s->sprite_warping_accuracy,
pic_param->vol_fields.bits.interlaced = !s->progressive_sequence; .quant_type = s->mpeg_quant,
pic_param->vol_fields.bits.obmc_disable = 1; .quarter_sample = s->quarter_sample,
pic_param->vol_fields.bits.sprite_enable = ctx->vol_sprite_usage; .data_partitioned = s->data_partitioning,
pic_param->vol_fields.bits.sprite_warping_accuracy = s->sprite_warping_accuracy; .reversible_vlc = ctx->rvlc,
pic_param->vol_fields.bits.quant_type = s->mpeg_quant; .resync_marker_disable = !ctx->resync_marker,
pic_param->vol_fields.bits.quarter_sample = s->quarter_sample; },
pic_param->vol_fields.bits.data_partitioned = s->data_partitioning; .no_of_sprite_warping_points = ctx->num_sprite_warping_points,
pic_param->vol_fields.bits.reversible_vlc = ctx->rvlc; .quant_precision = s->quant_precision,
pic_param->vol_fields.bits.resync_marker_disable = !ctx->resync_marker; .vop_fields.bits = {
pic_param->no_of_sprite_warping_points = ctx->num_sprite_warping_points; .vop_coding_type = s->pict_type - AV_PICTURE_TYPE_I,
.backward_reference_vop_coding_type =
s->pict_type == AV_PICTURE_TYPE_B ? s->next_picture.f->pict_type - AV_PICTURE_TYPE_I : 0,
.vop_rounding_type = s->no_rounding,
.intra_dc_vlc_thr = mpeg4_get_intra_dc_vlc_thr(ctx),
.top_field_first = s->top_field_first,
.alternate_vertical_scan_flag = s->alternate_scan,
},
.vop_fcode_forward = s->f_code,
.vop_fcode_backward = s->b_code,
.vop_time_increment_resolution = avctx->framerate.num,
.num_macroblocks_in_gob = s->mb_width * H263_GOB_HEIGHT(s->height),
.num_gobs_in_vop =
(s->mb_width * s->mb_height) / (s->mb_width * H263_GOB_HEIGHT(s->height)),
.TRB = s->pb_time,
.TRD = s->pp_time,
};
for (i = 0; i < ctx->num_sprite_warping_points && i < 3; i++) { for (i = 0; i < ctx->num_sprite_warping_points && i < 3; i++) {
pic_param->sprite_trajectory_du[i] = ctx->sprite_traj[i][0]; pic_param.sprite_trajectory_du[i] = ctx->sprite_traj[i][0];
pic_param->sprite_trajectory_dv[i] = ctx->sprite_traj[i][1]; pic_param.sprite_trajectory_dv[i] = ctx->sprite_traj[i][1];
} }
pic_param->quant_precision = s->quant_precision;
pic_param->vop_fields.value = 0; /* reset all bits */
pic_param->vop_fields.bits.vop_coding_type = s->pict_type - AV_PICTURE_TYPE_I;
pic_param->vop_fields.bits.backward_reference_vop_coding_type = s->pict_type == AV_PICTURE_TYPE_B ? s->next_picture.f->pict_type - AV_PICTURE_TYPE_I : 0;
pic_param->vop_fields.bits.vop_rounding_type = s->no_rounding;
pic_param->vop_fields.bits.intra_dc_vlc_thr = mpeg4_get_intra_dc_vlc_thr(ctx);
pic_param->vop_fields.bits.top_field_first = s->top_field_first;
pic_param->vop_fields.bits.alternate_vertical_scan_flag = s->alternate_scan;
pic_param->vop_fcode_forward = s->f_code;
pic_param->vop_fcode_backward = s->b_code;
pic_param->vop_time_increment_resolution = avctx->framerate.num;
pic_param->num_macroblocks_in_gob = s->mb_width * H263_GOB_HEIGHT(s->height);
pic_param->num_gobs_in_vop = (s->mb_width * s->mb_height) / pic_param->num_macroblocks_in_gob;
pic_param->TRB = s->pb_time;
pic_param->TRD = s->pp_time;
if (s->pict_type == AV_PICTURE_TYPE_B) if (s->pict_type == AV_PICTURE_TYPE_B)
pic_param->backward_reference_picture = ff_vaapi_get_surface_id(s->next_picture.f); pic_param.backward_reference_picture = ff_vaapi_get_surface_id(s->next_picture.f);
if (s->pict_type != AV_PICTURE_TYPE_I) if (s->pict_type != AV_PICTURE_TYPE_I)
pic_param->forward_reference_picture = ff_vaapi_get_surface_id(s->last_picture.f); pic_param.forward_reference_picture = ff_vaapi_get_surface_id(s->last_picture.f);
err = ff_vaapi_decode_make_param_buffer(avctx, pic,
VAPictureParameterBufferType,
&pic_param, sizeof(pic_param));
if (err < 0)
goto fail;
/* Fill in VAIQMatrixBufferMPEG4 */
/* Only the first inverse quantisation method uses the weighting matrices */ /* Only the first inverse quantisation method uses the weighting matrices */
if (pic_param->vol_fields.bits.quant_type) { if (pic_param.vol_fields.bits.quant_type) {
iq_matrix = ff_vaapi_alloc_iq_matrix(vactx, sizeof(VAIQMatrixBufferMPEG4)); VAIQMatrixBufferMPEG4 iq_matrix;
if (!iq_matrix)
return -1; iq_matrix.load_intra_quant_mat = 1;
iq_matrix->load_intra_quant_mat = 1; iq_matrix.load_non_intra_quant_mat = 1;
iq_matrix->load_non_intra_quant_mat = 1;
for (i = 0; i < 64; i++) { for (i = 0; i < 64; i++) {
int n = s->idsp.idct_permutation[ff_zigzag_direct[i]]; int n = s->idsp.idct_permutation[ff_zigzag_direct[i]];
iq_matrix->intra_quant_mat[i] = s->intra_matrix[n]; iq_matrix.intra_quant_mat[i] = s->intra_matrix[n];
iq_matrix->non_intra_quant_mat[i] = s->inter_matrix[n]; iq_matrix.non_intra_quant_mat[i] = s->inter_matrix[n];
} }
err = ff_vaapi_decode_make_param_buffer(avctx, pic,
VAIQMatrixBufferType,
&iq_matrix, sizeof(iq_matrix));
if (err < 0)
goto fail;
} }
return 0; return 0;
fail:
ff_vaapi_decode_cancel(avctx, pic);
return err;
}
static int vaapi_mpeg4_end_frame(AVCodecContext *avctx)
{
MpegEncContext *s = avctx->priv_data;
VAAPIDecodePicture *pic = s->current_picture_ptr->hwaccel_picture_private;
int ret;
ret = ff_vaapi_decode_issue(avctx, pic);
if (ret < 0)
goto fail;
ff_mpeg_draw_horiz_band(s, 0, s->avctx->height);
fail:
return ret;
} }
static int vaapi_mpeg4_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size) static int vaapi_mpeg4_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
{ {
MpegEncContext * const s = avctx->priv_data; MpegEncContext *s = avctx->priv_data;
FFVAContext * const vactx = ff_vaapi_get_context(avctx); VAAPIDecodePicture *pic = s->current_picture_ptr->hwaccel_picture_private;
VASliceParameterBufferMPEG4 *slice_param; VASliceParameterBufferMPEG4 slice_param;
int err;
/* Fill in VASliceParameterBufferMPEG4 */
slice_param = (VASliceParameterBufferMPEG4 *)ff_vaapi_alloc_slice(vactx, buffer, size); /* video_plane_with_short_video_header() contains all GOBs
if (!slice_param) * in-order, and this is what VA API (Intel backend) expects: only
return -1; * a single slice param. So fake macroblock_number for Libav so
slice_param->macroblock_offset = get_bits_count(&s->gb) % 8; * that we don't call vaapi_mpeg4_decode_slice() again
slice_param->macroblock_number = 0; */
slice_param->quant_scale = s->qscale; if (avctx->codec->id == AV_CODEC_ID_H263)
size = s->gb.buffer_end - buffer;
slice_param = (VASliceParameterBufferMPEG4) {
.slice_data_size = size,
.slice_data_offset = 0,
.slice_data_flag = VA_SLICE_DATA_FLAG_ALL,
.macroblock_offset = get_bits_count(&s->gb) % 8,
.macroblock_number = s->mb_y * s->mb_width + s->mb_x,
.quant_scale = s->qscale,
};
if (avctx->codec->id == AV_CODEC_ID_H263)
s->mb_y = s->mb_height;
err = ff_vaapi_decode_make_slice_buffer(avctx, pic,
&slice_param, sizeof(slice_param),
buffer, size);
if (err < 0) {
ff_vaapi_decode_cancel(avctx, pic);
return err;
}
return 0; return 0;
} }
#if CONFIG_MPEG4_VAAPI_HWACCEL #if CONFIG_MPEG4_VAAPI_HWACCEL
AVHWAccel ff_mpeg4_vaapi_hwaccel = { AVHWAccel ff_mpeg4_vaapi_hwaccel = {
.name = "mpeg4_vaapi", .name = "mpeg4_vaapi",
.type = AVMEDIA_TYPE_VIDEO, .type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_MPEG4, .id = AV_CODEC_ID_MPEG4,
.pix_fmt = AV_PIX_FMT_VAAPI, .pix_fmt = AV_PIX_FMT_VAAPI,
.start_frame = vaapi_mpeg4_start_frame, .start_frame = &vaapi_mpeg4_start_frame,
.end_frame = ff_vaapi_mpeg_end_frame, .end_frame = &vaapi_mpeg4_end_frame,
.decode_slice = vaapi_mpeg4_decode_slice, .decode_slice = &vaapi_mpeg4_decode_slice,
.init = ff_vaapi_context_init, .frame_priv_data_size = sizeof(VAAPIDecodePicture),
.uninit = ff_vaapi_context_fini, .init = &ff_vaapi_decode_init,
.priv_data_size = sizeof(FFVAContext), .uninit = &ff_vaapi_decode_uninit,
.priv_data_size = sizeof(VAAPIDecodeContext),
}; };
#endif #endif
#if CONFIG_H263_VAAPI_HWACCEL #if CONFIG_H263_VAAPI_HWACCEL
AVHWAccel ff_h263_vaapi_hwaccel = { AVHWAccel ff_h263_vaapi_hwaccel = {
.name = "h263_vaapi", .name = "h263_vaapi",
.type = AVMEDIA_TYPE_VIDEO, .type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_H263, .id = AV_CODEC_ID_H263,
.pix_fmt = AV_PIX_FMT_VAAPI, .pix_fmt = AV_PIX_FMT_VAAPI,
.start_frame = vaapi_mpeg4_start_frame, .start_frame = &vaapi_mpeg4_start_frame,
.end_frame = ff_vaapi_mpeg_end_frame, .end_frame = &vaapi_mpeg4_end_frame,
.decode_slice = vaapi_mpeg4_decode_slice, .decode_slice = &vaapi_mpeg4_decode_slice,
.init = ff_vaapi_context_init, .frame_priv_data_size = sizeof(VAAPIDecodePicture),
.uninit = ff_vaapi_context_fini, .init = &ff_vaapi_decode_init,
.priv_data_size = sizeof(FFVAContext), .uninit = &ff_vaapi_decode_uninit,
.priv_data_size = sizeof(VAAPIDecodeContext),
}; };
#endif #endif
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment