Commit 51513b98 authored by Ronald S. Bultje's avatar Ronald S. Bultje Committed by Michael Niedermayer

h264/svq3: stop using draw_edges.

Instead, only extend edges on-demand when the motion vector actually
crosses the visible decoded area using ff_emulated_edge_mc(). This
changes decoding time for cathedral from 8.722sec to 8.706sec, i.e.
0.2% faster overall. More generally (VP8 uses this also), low-motion
content gets significant speed improvements, whereas high-motion content
tends to decode in approximately the same time.
Signed-off-by: 's avatarMichael Niedermayer <michaelni@gmx.at>
parent f8942463
...@@ -128,9 +128,9 @@ static void h264_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type, ...@@ -128,9 +128,9 @@ static void h264_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
void ff_h264_draw_horiz_band(H264Context *h, int y, int height) void ff_h264_draw_horiz_band(H264Context *h, int y, int height)
{ {
ff_draw_horiz_band(h->avctx, &h->dsp, &h->cur_pic, ff_draw_horiz_band(h->avctx, NULL, &h->cur_pic,
h->ref_list[0][0].f.data[0] ? &h->ref_list[0][0] : NULL, h->ref_list[0][0].f.data[0] ? &h->ref_list[0][0] : NULL,
y, height, h->picture_structure, h->first_field, 1, y, height, h->picture_structure, h->first_field, 0,
h->low_delay, h->mb_height * 16, h->mb_width * 16); h->low_delay, h->mb_height * 16, h->mb_width * 16);
} }
...@@ -692,8 +692,8 @@ static av_always_inline void mc_dir_part(H264Context *h, Picture *pic, ...@@ -692,8 +692,8 @@ static av_always_inline void mc_dir_part(H264Context *h, Picture *pic,
int offset = ((mx >> 2) << pixel_shift) + (my >> 2) * h->mb_linesize; int offset = ((mx >> 2) << pixel_shift) + (my >> 2) * h->mb_linesize;
uint8_t *src_y = pic->f.data[0] + offset; uint8_t *src_y = pic->f.data[0] + offset;
uint8_t *src_cb, *src_cr; uint8_t *src_cb, *src_cr;
int extra_width = h->emu_edge_width; int extra_width = 0;
int extra_height = h->emu_edge_height; int extra_height = 0;
int emu = 0; int emu = 0;
const int full_mx = mx >> 2; const int full_mx = mx >> 2;
const int full_my = my >> 2; const int full_my = my >> 2;
...@@ -2737,29 +2737,6 @@ static int field_end(H264Context *h, int in_setup) ...@@ -2737,29 +2737,6 @@ static int field_end(H264Context *h, int in_setup)
h->er.next_pic = h->ref_count[1] ? &h->ref_list[1][0] : NULL; h->er.next_pic = h->ref_count[1] ? &h->ref_list[1][0] : NULL;
ff_er_frame_end(&h->er); ff_er_frame_end(&h->er);
} }
/* redraw edges for the frame if decoding didn't complete */
if (h->er.error_count &&
!h->avctx->hwaccel &&
!(h->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
h->cur_pic_ptr->f.reference &&
!(h->flags & CODEC_FLAG_EMU_EDGE)) {
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(h->avctx->pix_fmt);
int hshift = desc->log2_chroma_w;
int vshift = desc->log2_chroma_h;
h->dsp.draw_edges(h->cur_pic.f.data[0], h->linesize,
h->mb_width * 16, h->mb_height * 16,
EDGE_WIDTH, EDGE_WIDTH,
EDGE_TOP | EDGE_BOTTOM);
h->dsp.draw_edges(h->cur_pic.f.data[1], h->uvlinesize,
(h->mb_width * 16) >> hshift, (h->mb_height * 16) >> vshift,
EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
EDGE_TOP | EDGE_BOTTOM);
h->dsp.draw_edges(h->cur_pic.f.data[2], h->uvlinesize,
(h->mb_width * 16) >> hshift, (h->mb_height * 16) >> vshift,
EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
EDGE_TOP | EDGE_BOTTOM);
}
emms_c(); emms_c();
h->current_slice = 0; h->current_slice = 0;
...@@ -3727,13 +3704,6 @@ static int decode_slice_header(H264Context *h, H264Context *h0) ...@@ -3727,13 +3704,6 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
(h->ref_list[j][i].f.reference & 3); (h->ref_list[j][i].f.reference & 3);
} }
// FIXME: fix draw_edges + PAFF + frame threads
h->emu_edge_width = (h->flags & CODEC_FLAG_EMU_EDGE ||
(!h->sps.frame_mbs_only_flag &&
h->avctx->active_thread_type))
? 0 : 16;
h->emu_edge_height = (FRAME_MBAFF || FIELD_PICTURE) ? 0 : h->emu_edge_width;
if (h->avctx->debug & FF_DEBUG_PICT_INFO) { if (h->avctx->debug & FF_DEBUG_PICT_INFO) {
av_log(h->avctx, AV_LOG_DEBUG, av_log(h->avctx, AV_LOG_DEBUG,
"slice:%d %s mb:%d %c%s%s pps:%u frame:%d poc:%d/%d ref:%d/%d qp:%d loop:%d:%d:%d weight:%d%s %s\n", "slice:%d %s mb:%d %c%s%s pps:%u frame:%d poc:%d/%d ref:%d/%d qp:%d loop:%d:%d:%d weight:%d%s %s\n",
...@@ -4100,11 +4070,11 @@ static void decode_finish_row(H264Context *h) ...@@ -4100,11 +4070,11 @@ static void decode_finish_row(H264Context *h)
top -= deblock_border; top -= deblock_border;
} }
if (top >= pic_height || (top + height) < h->emu_edge_height) if (top >= pic_height || (top + height) < 0)
return; return;
height = FFMIN(height, pic_height - top); height = FFMIN(height, pic_height - top);
if (top < h->emu_edge_height) { if (top < 0) {
height = top + height; height = top + height;
top = 0; top = 0;
} }
......
...@@ -354,9 +354,6 @@ typedef struct H264Context { ...@@ -354,9 +354,6 @@ typedef struct H264Context {
int mb_linesize; ///< may be equal to s->linesize or s->linesize * 2, for mbaff int mb_linesize; ///< may be equal to s->linesize or s->linesize * 2, for mbaff
int mb_uvlinesize; int mb_uvlinesize;
int emu_edge_width;
int emu_edge_height;
unsigned current_sps_id; ///< id of the current SPS unsigned current_sps_id; ///< id of the current SPS
SPS sps; ///< current sps SPS sps; ///< current sps
......
...@@ -295,9 +295,7 @@ static inline void svq3_mc_dir_part(SVQ3Context *s, ...@@ -295,9 +295,7 @@ static inline void svq3_mc_dir_part(SVQ3Context *s,
if (mx < 0 || mx >= s->h_edge_pos - width - 1 || if (mx < 0 || mx >= s->h_edge_pos - width - 1 ||
my < 0 || my >= s->v_edge_pos - height - 1) { my < 0 || my >= s->v_edge_pos - height - 1) {
if ((h->flags & CODEC_FLAG_EMU_EDGE))
emu = 1; emu = 1;
mx = av_clip(mx, -16, s->h_edge_pos - width + 15); mx = av_clip(mx, -16, s->h_edge_pos - width + 15);
my = av_clip(my, -16, s->v_edge_pos - height + 15); my = av_clip(my, -16, s->v_edge_pos - height + 15);
} }
...@@ -1055,6 +1053,11 @@ static int get_buffer(AVCodecContext *avctx, Picture *pic) ...@@ -1055,6 +1053,11 @@ static int get_buffer(AVCodecContext *avctx, Picture *pic)
pic->f.reference = !(h->pict_type == AV_PICTURE_TYPE_B); pic->f.reference = !(h->pict_type == AV_PICTURE_TYPE_B);
ret = ff_get_buffer(avctx, &pic->f); ret = ff_get_buffer(avctx, &pic->f);
if (!h->edge_emu_buffer) {
h->edge_emu_buffer = av_mallocz(pic->f.linesize[0] * 17);
if (!h->edge_emu_buffer)
return AVERROR(ENOMEM);
}
h->linesize = pic->f.linesize[0]; h->linesize = pic->f.linesize[0];
h->uvlinesize = pic->f.linesize[1]; h->uvlinesize = pic->f.linesize[1];
...@@ -1242,8 +1245,8 @@ static int svq3_decode_frame(AVCodecContext *avctx, void *data, ...@@ -1242,8 +1245,8 @@ static int svq3_decode_frame(AVCodecContext *avctx, void *data,
(h->pict_type == AV_PICTURE_TYPE_P && mb_type < 8) ? (mb_type - 1) : -1; (h->pict_type == AV_PICTURE_TYPE_P && mb_type < 8) ? (mb_type - 1) : -1;
} }
ff_draw_horiz_band(avctx, &h->dsp, s->cur_pic, s->last_pic->f.data[0] ? s->last_pic : NULL, ff_draw_horiz_band(avctx, NULL, s->cur_pic, s->last_pic->f.data[0] ? s->last_pic : NULL,
16 * h->mb_y, 16, h->picture_structure, 0, 1, 16 * h->mb_y, 16, h->picture_structure, 0, 0,
h->low_delay, h->mb_height * 16, h->mb_width * 16); h->low_delay, h->mb_height * 16, h->mb_width * 16);
} }
...@@ -1302,6 +1305,7 @@ static int svq3_decode_end(AVCodecContext *avctx) ...@@ -1302,6 +1305,7 @@ static int svq3_decode_end(AVCodecContext *avctx)
av_freep(&s->buf); av_freep(&s->buf);
s->buf_size = 0; s->buf_size = 0;
av_freep(&h->edge_emu_buffer);
return 0; return 0;
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment