Commit fae6fd5b authored by Ronald S. Bultje's avatar Ronald S. Bultje Committed by Martin Storsjö

h264/svq3: Stop using draw_edges

Instead, only extend edges on-demand when the motion vector actually
crosses the visible decoded area using ff_emulated_edge_mc(). This
changes decoding time for cathedral from 8.722sec to 8.706sec, i.e.
0.2% faster overall. More generally (VP8 uses this also), low-motion
content gets significant speed improvements, whereas high-motion content
tends to decode in approximately the same time.
Signed-off-by: 's avatarMartin Storsjö <martin@martin.st>
parent 7ebfb466
...@@ -106,9 +106,9 @@ static void h264_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type, ...@@ -106,9 +106,9 @@ static void h264_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
void ff_h264_draw_horiz_band(H264Context *h, int y, int height) void ff_h264_draw_horiz_band(H264Context *h, int y, int height)
{ {
ff_draw_horiz_band(h->avctx, &h->dsp, &h->cur_pic, ff_draw_horiz_band(h->avctx, NULL, &h->cur_pic,
h->ref_list[0][0].f.data[0] ? &h->ref_list[0][0] : NULL, h->ref_list[0][0].f.data[0] ? &h->ref_list[0][0] : NULL,
y, height, h->picture_structure, h->first_field, 1, y, height, h->picture_structure, h->first_field, 0,
h->low_delay, h->mb_height * 16, h->mb_width * 16); h->low_delay, h->mb_height * 16, h->mb_width * 16);
} }
...@@ -659,8 +659,8 @@ static av_always_inline void mc_dir_part(H264Context *h, Picture *pic, ...@@ -659,8 +659,8 @@ static av_always_inline void mc_dir_part(H264Context *h, Picture *pic,
int offset = ((mx >> 2) << pixel_shift) + (my >> 2) * h->mb_linesize; int offset = ((mx >> 2) << pixel_shift) + (my >> 2) * h->mb_linesize;
uint8_t *src_y = pic->f.data[0] + offset; uint8_t *src_y = pic->f.data[0] + offset;
uint8_t *src_cb, *src_cr; uint8_t *src_cb, *src_cr;
int extra_width = h->emu_edge_width; int extra_width = 0;
int extra_height = h->emu_edge_height; int extra_height = 0;
int emu = 0; int emu = 0;
const int full_mx = mx >> 2; const int full_mx = mx >> 2;
const int full_my = my >> 2; const int full_my = my >> 2;
...@@ -2670,29 +2670,6 @@ static int field_end(H264Context *h, int in_setup) ...@@ -2670,29 +2670,6 @@ static int field_end(H264Context *h, int in_setup)
h->er.next_pic = h->ref_count[1] ? &h->ref_list[1][0] : NULL; h->er.next_pic = h->ref_count[1] ? &h->ref_list[1][0] : NULL;
ff_er_frame_end(&h->er); ff_er_frame_end(&h->er);
} }
/* redraw edges for the frame if decoding didn't complete */
if (h->er.error_count &&
!h->avctx->hwaccel &&
!(h->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
h->cur_pic_ptr->f.reference &&
!(h->flags & CODEC_FLAG_EMU_EDGE)) {
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(h->avctx->pix_fmt);
int hshift = desc->log2_chroma_w;
int vshift = desc->log2_chroma_h;
h->dsp.draw_edges(h->cur_pic.f.data[0], h->linesize,
h->mb_width * 16, h->mb_height * 16,
EDGE_WIDTH, EDGE_WIDTH,
EDGE_TOP | EDGE_BOTTOM);
h->dsp.draw_edges(h->cur_pic.f.data[1], h->uvlinesize,
(h->mb_width * 16) >> hshift, (h->mb_height * 16) >> vshift,
EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
EDGE_TOP | EDGE_BOTTOM);
h->dsp.draw_edges(h->cur_pic.f.data[2], h->uvlinesize,
(h->mb_width * 16) >> hshift, (h->mb_height * 16) >> vshift,
EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
EDGE_TOP | EDGE_BOTTOM);
}
emms_c(); emms_c();
h->current_slice = 0; h->current_slice = 0;
...@@ -3596,13 +3573,6 @@ static int decode_slice_header(H264Context *h, H264Context *h0) ...@@ -3596,13 +3573,6 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
(h->ref_list[j][i].f.reference & 3); (h->ref_list[j][i].f.reference & 3);
} }
// FIXME: fix draw_edges + PAFF + frame threads
h->emu_edge_width = (h->flags & CODEC_FLAG_EMU_EDGE ||
(!h->sps.frame_mbs_only_flag &&
h->avctx->active_thread_type))
? 0 : 16;
h->emu_edge_height = (FRAME_MBAFF || FIELD_PICTURE) ? 0 : h->emu_edge_width;
if (h->avctx->debug & FF_DEBUG_PICT_INFO) { if (h->avctx->debug & FF_DEBUG_PICT_INFO) {
av_log(h->avctx, AV_LOG_DEBUG, av_log(h->avctx, AV_LOG_DEBUG,
"slice:%d %s mb:%d %c%s%s pps:%u frame:%d poc:%d/%d ref:%d/%d qp:%d loop:%d:%d:%d weight:%d%s %s\n", "slice:%d %s mb:%d %c%s%s pps:%u frame:%d poc:%d/%d ref:%d/%d qp:%d loop:%d:%d:%d weight:%d%s %s\n",
...@@ -3969,11 +3939,11 @@ static void decode_finish_row(H264Context *h) ...@@ -3969,11 +3939,11 @@ static void decode_finish_row(H264Context *h)
top -= deblock_border; top -= deblock_border;
} }
if (top >= pic_height || (top + height) < h->emu_edge_height) if (top >= pic_height || (top + height) < 0)
return; return;
height = FFMIN(height, pic_height - top); height = FFMIN(height, pic_height - top);
if (top < h->emu_edge_height) { if (top < 0) {
height = top + height; height = top + height;
top = 0; top = 0;
} }
......
...@@ -350,9 +350,6 @@ typedef struct H264Context { ...@@ -350,9 +350,6 @@ typedef struct H264Context {
int mb_linesize; ///< may be equal to s->linesize or s->linesize * 2, for mbaff int mb_linesize; ///< may be equal to s->linesize or s->linesize * 2, for mbaff
int mb_uvlinesize; int mb_uvlinesize;
int emu_edge_width;
int emu_edge_height;
unsigned current_sps_id; ///< id of the current SPS unsigned current_sps_id; ///< id of the current SPS
SPS sps; ///< current sps SPS sps; ///< current sps
......
...@@ -291,9 +291,7 @@ static inline void svq3_mc_dir_part(SVQ3Context *s, ...@@ -291,9 +291,7 @@ static inline void svq3_mc_dir_part(SVQ3Context *s,
if (mx < 0 || mx >= s->h_edge_pos - width - 1 || if (mx < 0 || mx >= s->h_edge_pos - width - 1 ||
my < 0 || my >= s->v_edge_pos - height - 1) { my < 0 || my >= s->v_edge_pos - height - 1) {
if ((h->flags & CODEC_FLAG_EMU_EDGE)) emu = 1;
emu = 1;
mx = av_clip(mx, -16, s->h_edge_pos - width + 15); mx = av_clip(mx, -16, s->h_edge_pos - width + 15);
my = av_clip(my, -16, s->v_edge_pos - height + 15); my = av_clip(my, -16, s->v_edge_pos - height + 15);
} }
...@@ -1050,6 +1048,11 @@ static int get_buffer(AVCodecContext *avctx, Picture *pic) ...@@ -1050,6 +1048,11 @@ static int get_buffer(AVCodecContext *avctx, Picture *pic)
pic->f.reference = !(h->pict_type == AV_PICTURE_TYPE_B); pic->f.reference = !(h->pict_type == AV_PICTURE_TYPE_B);
ret = ff_get_buffer(avctx, &pic->f); ret = ff_get_buffer(avctx, &pic->f);
if (!h->edge_emu_buffer) {
h->edge_emu_buffer = av_mallocz(pic->f.linesize[0] * 17);
if (!h->edge_emu_buffer)
return AVERROR(ENOMEM);
}
h->linesize = pic->f.linesize[0]; h->linesize = pic->f.linesize[0];
h->uvlinesize = pic->f.linesize[1]; h->uvlinesize = pic->f.linesize[1];
...@@ -1225,8 +1228,8 @@ static int svq3_decode_frame(AVCodecContext *avctx, void *data, ...@@ -1225,8 +1228,8 @@ static int svq3_decode_frame(AVCodecContext *avctx, void *data,
(h->pict_type == AV_PICTURE_TYPE_P && mb_type < 8) ? (mb_type - 1) : -1; (h->pict_type == AV_PICTURE_TYPE_P && mb_type < 8) ? (mb_type - 1) : -1;
} }
ff_draw_horiz_band(avctx, &h->dsp, s->cur_pic, s->last_pic->f.data[0] ? s->last_pic : NULL, ff_draw_horiz_band(avctx, NULL, s->cur_pic, s->last_pic->f.data[0] ? s->last_pic : NULL,
16 * h->mb_y, 16, h->picture_structure, 0, 1, 16 * h->mb_y, 16, h->picture_structure, 0, 0,
h->low_delay, h->mb_height * 16, h->mb_width * 16); h->low_delay, h->mb_height * 16, h->mb_width * 16);
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment