Commit 71acbea1 authored by Mark Thompson's avatar Mark Thompson

vaapi_mpeg2: Convert to use the new VAAPI hwaccel code

(cherry picked from commit 102e13c3)
parent c8b26d59
...@@ -22,8 +22,8 @@ ...@@ -22,8 +22,8 @@
#include "mpegutils.h" #include "mpegutils.h"
#include "mpegvideo.h" #include "mpegvideo.h"
#include "vaapi_internal.h"
#include "internal.h" #include "internal.h"
#include "vaapi_decode.h"
/** Reconstruct bitstream f_code */ /** Reconstruct bitstream f_code */
static inline int mpeg2_get_f_code(const MpegEncContext *s) static inline int mpeg2_get_f_code(const MpegEncContext *s)
...@@ -41,71 +41,100 @@ static inline int mpeg2_get_is_frame_start(const MpegEncContext *s) ...@@ -41,71 +41,100 @@ static inline int mpeg2_get_is_frame_start(const MpegEncContext *s)
static int vaapi_mpeg2_start_frame(AVCodecContext *avctx, av_unused const uint8_t *buffer, av_unused uint32_t size) static int vaapi_mpeg2_start_frame(AVCodecContext *avctx, av_unused const uint8_t *buffer, av_unused uint32_t size)
{ {
const MpegEncContext *s = avctx->priv_data; const MpegEncContext *s = avctx->priv_data;
FFVAContext *vactx = ff_vaapi_get_context(avctx); VAAPIDecodePicture *pic = s->current_picture_ptr->hwaccel_picture_private;
VAPictureParameterBufferMPEG2 *pic_param; VAPictureParameterBufferMPEG2 pic_param;
VAIQMatrixBufferMPEG2 *iq_matrix; VAIQMatrixBufferMPEG2 iq_matrix;
int i; int i, err;
vactx->slice_param_size = sizeof(VASliceParameterBufferMPEG2); pic->output_surface = ff_vaapi_get_surface_id(s->current_picture_ptr->f);
/* Fill in VAPictureParameterBufferMPEG2 */ pic_param = (VAPictureParameterBufferMPEG2) {
pic_param = ff_vaapi_alloc_pic_param(vactx, sizeof(VAPictureParameterBufferMPEG2)); .horizontal_size = s->width,
if (!pic_param) .vertical_size = s->height,
return -1; .forward_reference_picture = VA_INVALID_ID,
pic_param->horizontal_size = s->width; .backward_reference_picture = VA_INVALID_ID,
pic_param->vertical_size = s->height; .picture_coding_type = s->pict_type,
pic_param->forward_reference_picture = VA_INVALID_ID; .f_code = mpeg2_get_f_code(s),
pic_param->backward_reference_picture = VA_INVALID_ID; .picture_coding_extension.bits = {
pic_param->picture_coding_type = s->pict_type; .intra_dc_precision = s->intra_dc_precision,
pic_param->f_code = mpeg2_get_f_code(s); .picture_structure = s->picture_structure,
pic_param->picture_coding_extension.value = 0; /* reset all bits */ .top_field_first = s->top_field_first,
pic_param->picture_coding_extension.bits.intra_dc_precision = s->intra_dc_precision; .frame_pred_frame_dct = s->frame_pred_frame_dct,
pic_param->picture_coding_extension.bits.picture_structure = s->picture_structure; .concealment_motion_vectors = s->concealment_motion_vectors,
pic_param->picture_coding_extension.bits.top_field_first = s->top_field_first; .q_scale_type = s->q_scale_type,
pic_param->picture_coding_extension.bits.frame_pred_frame_dct = s->frame_pred_frame_dct; .intra_vlc_format = s->intra_vlc_format,
pic_param->picture_coding_extension.bits.concealment_motion_vectors = s->concealment_motion_vectors; .alternate_scan = s->alternate_scan,
pic_param->picture_coding_extension.bits.q_scale_type = s->q_scale_type; .repeat_first_field = s->repeat_first_field,
pic_param->picture_coding_extension.bits.intra_vlc_format = s->intra_vlc_format; .progressive_frame = s->progressive_frame,
pic_param->picture_coding_extension.bits.alternate_scan = s->alternate_scan; .is_first_field = mpeg2_get_is_frame_start(s),
pic_param->picture_coding_extension.bits.repeat_first_field = s->repeat_first_field; },
pic_param->picture_coding_extension.bits.progressive_frame = s->progressive_frame; };
pic_param->picture_coding_extension.bits.is_first_field = mpeg2_get_is_frame_start(s);
switch (s->pict_type) { switch (s->pict_type) {
case AV_PICTURE_TYPE_B: case AV_PICTURE_TYPE_B:
pic_param->backward_reference_picture = ff_vaapi_get_surface_id(s->next_picture.f); pic_param.backward_reference_picture = ff_vaapi_get_surface_id(s->next_picture.f);
// fall-through // fall-through
case AV_PICTURE_TYPE_P: case AV_PICTURE_TYPE_P:
pic_param->forward_reference_picture = ff_vaapi_get_surface_id(s->last_picture.f); pic_param.forward_reference_picture = ff_vaapi_get_surface_id(s->last_picture.f);
break; break;
} }
/* Fill in VAIQMatrixBufferMPEG2 */ err = ff_vaapi_decode_make_param_buffer(avctx, pic,
iq_matrix = ff_vaapi_alloc_iq_matrix(vactx, sizeof(VAIQMatrixBufferMPEG2)); VAPictureParameterBufferType,
if (!iq_matrix) &pic_param, sizeof(pic_param));
return -1; if (err < 0)
iq_matrix->load_intra_quantiser_matrix = 1; goto fail;
iq_matrix->load_non_intra_quantiser_matrix = 1;
iq_matrix->load_chroma_intra_quantiser_matrix = 1; iq_matrix.load_intra_quantiser_matrix = 1;
iq_matrix->load_chroma_non_intra_quantiser_matrix = 1; iq_matrix.load_non_intra_quantiser_matrix = 1;
iq_matrix.load_chroma_intra_quantiser_matrix = 1;
iq_matrix.load_chroma_non_intra_quantiser_matrix = 1;
for (i = 0; i < 64; i++) { for (i = 0; i < 64; i++) {
int n = s->idsp.idct_permutation[ff_zigzag_direct[i]]; int n = s->idsp.idct_permutation[ff_zigzag_direct[i]];
iq_matrix->intra_quantiser_matrix[i] = s->intra_matrix[n]; iq_matrix.intra_quantiser_matrix[i] = s->intra_matrix[n];
iq_matrix->non_intra_quantiser_matrix[i] = s->inter_matrix[n]; iq_matrix.non_intra_quantiser_matrix[i] = s->inter_matrix[n];
iq_matrix->chroma_intra_quantiser_matrix[i] = s->chroma_intra_matrix[n]; iq_matrix.chroma_intra_quantiser_matrix[i] = s->chroma_intra_matrix[n];
iq_matrix->chroma_non_intra_quantiser_matrix[i] = s->chroma_inter_matrix[n]; iq_matrix.chroma_non_intra_quantiser_matrix[i] = s->chroma_inter_matrix[n];
} }
err = ff_vaapi_decode_make_param_buffer(avctx, pic,
VAIQMatrixBufferType,
&iq_matrix, sizeof(iq_matrix));
if (err < 0)
goto fail;
return 0; return 0;
fail:
ff_vaapi_decode_cancel(avctx, pic);
return err;
}
static int vaapi_mpeg2_end_frame(AVCodecContext *avctx)
{
MpegEncContext *s = avctx->priv_data;
VAAPIDecodePicture *pic = s->current_picture_ptr->hwaccel_picture_private;
int ret;
ret = ff_vaapi_decode_issue(avctx, pic);
if (ret < 0)
goto fail;
ff_mpeg_draw_horiz_band(s, 0, s->avctx->height);
fail:
return ret;
} }
static int vaapi_mpeg2_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size) static int vaapi_mpeg2_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
{ {
const MpegEncContext *s = avctx->priv_data; const MpegEncContext *s = avctx->priv_data;
FFVAContext *vactx = ff_vaapi_get_context(avctx); VAAPIDecodePicture *pic = s->current_picture_ptr->hwaccel_picture_private;
VASliceParameterBufferMPEG2 *slice_param; VASliceParameterBufferMPEG2 slice_param;
GetBitContext gb; GetBitContext gb;
uint32_t quantiser_scale_code, intra_slice_flag, macroblock_offset; uint32_t quantiser_scale_code, intra_slice_flag, macroblock_offset;
int err;
/* Determine macroblock_offset */ /* Determine macroblock_offset */
init_get_bits(&gb, buffer, 8 * size); init_get_bits(&gb, buffer, 8 * size);
...@@ -120,27 +149,38 @@ static int vaapi_mpeg2_decode_slice(AVCodecContext *avctx, const uint8_t *buffer ...@@ -120,27 +149,38 @@ static int vaapi_mpeg2_decode_slice(AVCodecContext *avctx, const uint8_t *buffer
} }
macroblock_offset = get_bits_count(&gb); macroblock_offset = get_bits_count(&gb);
/* Fill in VASliceParameterBufferMPEG2 */ slice_param = (VASliceParameterBufferMPEG2) {
slice_param = (VASliceParameterBufferMPEG2 *)ff_vaapi_alloc_slice(vactx, buffer, size); .slice_data_size = size,
if (!slice_param) .slice_data_offset = 0,
return -1; .slice_data_flag = VA_SLICE_DATA_FLAG_ALL,
slice_param->macroblock_offset = macroblock_offset; .macroblock_offset = macroblock_offset,
slice_param->slice_horizontal_position = s->mb_x; .slice_horizontal_position = s->mb_x,
slice_param->slice_vertical_position = s->mb_y >> (s->picture_structure != PICT_FRAME); .slice_vertical_position = s->mb_y >> (s->picture_structure != PICT_FRAME),
slice_param->quantiser_scale_code = quantiser_scale_code; .quantiser_scale_code = quantiser_scale_code,
slice_param->intra_slice_flag = intra_slice_flag; .intra_slice_flag = intra_slice_flag,
};
err = ff_vaapi_decode_make_slice_buffer(avctx, pic,
&slice_param, sizeof(slice_param),
buffer, size);
if (err < 0) {
ff_vaapi_decode_cancel(avctx, pic);
return err;
}
return 0; return 0;
} }
AVHWAccel ff_mpeg2_vaapi_hwaccel = { AVHWAccel ff_mpeg2_vaapi_hwaccel = {
.name = "mpeg2_vaapi", .name = "mpeg2_vaapi",
.type = AVMEDIA_TYPE_VIDEO, .type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_MPEG2VIDEO, .id = AV_CODEC_ID_MPEG2VIDEO,
.pix_fmt = AV_PIX_FMT_VAAPI, .pix_fmt = AV_PIX_FMT_VAAPI,
.start_frame = vaapi_mpeg2_start_frame, .start_frame = &vaapi_mpeg2_start_frame,
.end_frame = ff_vaapi_mpeg_end_frame, .end_frame = &vaapi_mpeg2_end_frame,
.decode_slice = vaapi_mpeg2_decode_slice, .decode_slice = &vaapi_mpeg2_decode_slice,
.init = ff_vaapi_context_init, .frame_priv_data_size = sizeof(VAAPIDecodePicture),
.uninit = ff_vaapi_context_fini, .init = &ff_vaapi_decode_init,
.priv_data_size = sizeof(FFVAContext), .uninit = &ff_vaapi_decode_uninit,
.priv_data_size = sizeof(VAAPIDecodeContext),
}; };
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment