h264dec.c 42.2 KB
Newer Older
1
/*
2
 * H.26L/H.264/AVC/JVT/14496-10/... decoder
3 4
 * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
 *
5 6 7
 * This file is part of FFmpeg.
 *
 * FFmpeg is free software; you can redistribute it and/or
8 9
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
10
 * version 2.1 of the License, or (at your option) any later version.
11
 *
12
 * FFmpeg is distributed in the hope that it will be useful,
13 14 15 16 17
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
18
 * License along with FFmpeg; if not, write to the Free Software
19
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20
 */
21

22
/**
23
 * @file
24
 * H.264 / AVC / MPEG-4 part10 codec.
25 26 27
 * @author Michael Niedermayer <michaelni@gmx.at>
 */

28 29
#define UNCHECKED_BITSTREAM_READER 1

30
#include "libavutil/avassert.h"
31
#include "libavutil/display.h"
32
#include "libavutil/imgutils.h"
33
#include "libavutil/opt.h"
34
#include "libavutil/stereo3d.h"
35
#include "libavutil/timer.h"
36
#include "internal.h"
37
#include "bytestream.h"
38 39
#include "cabac.h"
#include "cabac_functions.h"
40
#include "error_resilience.h"
41
#include "avcodec.h"
42
#include "h264.h"
43
#include "h264dec.h"
44
#include "h2645_parse.h"
45
#include "h264data.h"
46
#include "h264chroma.h"
47
#include "h264_mvpred.h"
48
#include "h264_ps.h"
49
#include "golomb.h"
50
#include "mathops.h"
51
#include "me_cmp.h"
52
#include "mpegutils.h"
53
#include "profiles.h"
54
#include "rectangle.h"
55
#include "thread.h"
56
#include "vdpau_compat.h"
57

58 59
static int h264_decode_end(AVCodecContext *avctx);

60 61
const uint16_t ff_h264_mb_sizes[4] = { 256, 384, 512, 768 };

62 63 64
int avpriv_h264_has_num_reorder_frames(AVCodecContext *avctx)
{
    H264Context *h = avctx->priv_data;
65
    return h && h->ps.sps ? h->ps.sps->num_reorder_frames : 0;
66 67
}

68 69 70 71
static void h264_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
                              int (*mv)[2][4][2],
                              int mb_x, int mb_y, int mb_intra, int mb_skipped)
{
72
    H264Context *h = opaque;
73
    H264SliceContext *sl = &h->slice_ctx[0];
74

75 76
    sl->mb_x = mb_x;
    sl->mb_y = mb_y;
77
    sl->mb_xy = mb_x + mb_y * h->mb_stride;
78
    memset(sl->non_zero_count_cache, 0, sizeof(sl->non_zero_count_cache));
79
    av_assert1(ref >= 0);
80 81 82 83
    /* FIXME: It is possible albeit uncommon that slice references
     * differ between slices. We take the easy approach and ignore
     * it for now. If this turns out to have any relevance in
     * practice then correct remapping should be added. */
84
    if (ref >= sl->ref_count[0])
85
        ref = 0;
86
    if (!sl->ref_list[0][ref].data[0]) {
87
        av_log(h->avctx, AV_LOG_DEBUG, "Reference not available for error concealing\n");
88 89
        ref = 0;
    }
90
    if ((sl->ref_list[0][ref].reference&3) != 3) {
91
        av_log(h->avctx, AV_LOG_DEBUG, "Reference invalid\n");
92 93
        return;
    }
94
    fill_rectangle(&h->cur_pic.ref_index[0][4 * sl->mb_xy],
95
                   2, 2, 2, ref, 1);
96 97
    fill_rectangle(&sl->ref_cache[0][scan8[0]], 4, 4, 8, ref, 1);
    fill_rectangle(sl->mv_cache[0][scan8[0]], 4, 4, 8,
98
                   pack16to32((*mv)[0][0][0], (*mv)[0][0][1]), 4);
99
    sl->mb_mbaff =
100
    sl->mb_field_decoding_flag = 0;
101
    ff_h264_hl_decode_mb(h, &h->slice_ctx[0]);
102 103
}

104
void ff_h264_draw_horiz_band(const H264Context *h, H264SliceContext *sl,
105
                             int y, int height)
106
{
107
    AVCodecContext *avctx = h->avctx;
108
    const AVFrame   *src  = h->cur_pic.f;
109 110
    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
    int vshift = desc->log2_chroma_h;
111 112 113
    const int field_pic = h->picture_structure != PICT_FRAME;
    if (field_pic) {
        height <<= 1;
114
        y      <<= 1;
115 116
    }

117
    height = FFMIN(height, avctx->height - y);
118

119 120
    if (field_pic && h->first_field && !(avctx->slice_flags & SLICE_FLAG_ALLOW_FIELD))
        return;
121 122 123 124 125

    if (avctx->draw_horiz_band) {
        int offset[AV_NUM_DATA_POINTERS];
        int i;

126 127 128
        offset[0] = y * src->linesize[0];
        offset[1] =
        offset[2] = (y >> vshift) * src->linesize[1];
129 130 131 132 133 134
        for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
            offset[i] = 0;

        emms_c();

        avctx->draw_horiz_band(avctx, src, offset,
135
                               y, h->picture_structure, height);
136 137 138
    }
}

139
void ff_h264_free_tables(H264Context *h)
140
{
141
    int i;
142

143
    av_freep(&h->intra4x4_pred_mode);
144 145
    av_freep(&h->chroma_pred_mode_table);
    av_freep(&h->cbp_table);
146 147
    av_freep(&h->mvd_table[0]);
    av_freep(&h->mvd_table[1]);
148
    av_freep(&h->direct_table);
149 150
    av_freep(&h->non_zero_count);
    av_freep(&h->slice_table_base);
151
    h->slice_table = NULL;
152
    av_freep(&h->list_counts);
153

154
    av_freep(&h->mb2b_xy);
155
    av_freep(&h->mb2br_xy);
156

157 158 159 160 161
    av_buffer_pool_uninit(&h->qscale_table_pool);
    av_buffer_pool_uninit(&h->mb_type_pool);
    av_buffer_pool_uninit(&h->motion_val_pool);
    av_buffer_pool_uninit(&h->ref_index_pool);

162 163 164
    for (i = 0; i < h->nb_slice_ctx; i++) {
        H264SliceContext *sl = &h->slice_ctx[i];

165 166 167 168 169
        av_freep(&sl->dc_val_base);
        av_freep(&sl->er.mb_index2xy);
        av_freep(&sl->er.error_status_table);
        av_freep(&sl->er.er_temp_buffer);

170 171
        av_freep(&sl->bipred_scratchpad);
        av_freep(&sl->edge_emu_buffer);
172 173
        av_freep(&sl->top_borders[0]);
        av_freep(&sl->top_borders[1]);
174 175 176

        sl->bipred_scratchpad_allocated = 0;
        sl->edge_emu_buffer_allocated   = 0;
177 178
        sl->top_borders_allocated[0]    = 0;
        sl->top_borders_allocated[1]    = 0;
179
    }
180 181
}

182 183
int ff_h264_alloc_tables(H264Context *h)
{
184
    const int big_mb_num = h->mb_stride * (h->mb_height + 1);
185
    const int row_mb_num = 2*h->mb_stride*FFMAX(h->nb_slice_ctx, 1);
186
    int x, y;
187

188 189
    FF_ALLOCZ_ARRAY_OR_GOTO(h->avctx, h->intra4x4_pred_mode,
                      row_mb_num, 8 * sizeof(uint8_t), fail)
190 191
    h->slice_ctx[0].intra4x4_pred_mode = h->intra4x4_pred_mode;

192
    FF_ALLOCZ_OR_GOTO(h->avctx, h->non_zero_count,
193
                      big_mb_num * 48 * sizeof(uint8_t), fail)
194 195 196
    FF_ALLOCZ_OR_GOTO(h->avctx, h->slice_table_base,
                      (big_mb_num + h->mb_stride) * sizeof(*h->slice_table_base), fail)
    FF_ALLOCZ_OR_GOTO(h->avctx, h->cbp_table,
197
                      big_mb_num * sizeof(uint16_t), fail)
198
    FF_ALLOCZ_OR_GOTO(h->avctx, h->chroma_pred_mode_table,
199
                      big_mb_num * sizeof(uint8_t), fail)
200 201 202 203
    FF_ALLOCZ_ARRAY_OR_GOTO(h->avctx, h->mvd_table[0],
                      row_mb_num, 16 * sizeof(uint8_t), fail);
    FF_ALLOCZ_ARRAY_OR_GOTO(h->avctx, h->mvd_table[1],
                      row_mb_num, 16 * sizeof(uint8_t), fail);
204 205 206
    h->slice_ctx[0].mvd_table[0] = h->mvd_table[0];
    h->slice_ctx[0].mvd_table[1] = h->mvd_table[1];

207
    FF_ALLOCZ_OR_GOTO(h->avctx, h->direct_table,
208
                      4 * big_mb_num * sizeof(uint8_t), fail);
209
    FF_ALLOCZ_OR_GOTO(h->avctx, h->list_counts,
210 211 212
                      big_mb_num * sizeof(uint8_t), fail)

    memset(h->slice_table_base, -1,
213 214
           (big_mb_num + h->mb_stride) * sizeof(*h->slice_table_base));
    h->slice_table = h->slice_table_base + h->mb_stride * 2 + 1;
215

216
    FF_ALLOCZ_OR_GOTO(h->avctx, h->mb2b_xy,
217
                      big_mb_num * sizeof(uint32_t), fail);
218
    FF_ALLOCZ_OR_GOTO(h->avctx, h->mb2br_xy,
219
                      big_mb_num * sizeof(uint32_t), fail);
220 221 222
    for (y = 0; y < h->mb_height; y++)
        for (x = 0; x < h->mb_width; x++) {
            const int mb_xy = x + y * h->mb_stride;
223 224 225
            const int b_xy  = 4 * x + 4 * y * h->b_stride;

            h->mb2b_xy[mb_xy]  = b_xy;
226
            h->mb2br_xy[mb_xy] = 8 * (FMO ? mb_xy : (mb_xy % (2 * h->mb_stride)));
227
        }
228

229
    return 0;
230

231
fail:
232
    ff_h264_free_tables(h);
233
    return AVERROR(ENOMEM);
234 235
}

236 237 238 239
/**
 * Init context
 * Allocate buffers which are not shared amongst multiple threads.
 */
240
int ff_h264_slice_context_init(H264Context *h, H264SliceContext *sl)
241
{
242
    ERContext *er = &sl->er;
243 244 245 246 247 248
    int mb_array_size = h->mb_height * h->mb_stride;
    int y_size  = (2 * h->mb_width + 1) * (2 * h->mb_height + 1);
    int c_size  = h->mb_stride * (h->mb_height + 1);
    int yc_size = y_size + 2   * c_size;
    int x, y, i;

249 250 251 252 253 254
    sl->ref_cache[0][scan8[5]  + 1] =
    sl->ref_cache[0][scan8[7]  + 1] =
    sl->ref_cache[0][scan8[13] + 1] =
    sl->ref_cache[1][scan8[5]  + 1] =
    sl->ref_cache[1][scan8[7]  + 1] =
    sl->ref_cache[1][scan8[13] + 1] = PART_NOT_AVAILABLE;
255

256 257 258
    if (sl != h->slice_ctx) {
        memset(er, 0, sizeof(*er));
    } else
259
    if (CONFIG_ERROR_RESILIENCE) {
260

261 262 263 264 265 266 267 268 269 270 271
        /* init ER */
        er->avctx          = h->avctx;
        er->decode_mb      = h264_er_decode_mb;
        er->opaque         = h;
        er->quarter_sample = 1;

        er->mb_num      = h->mb_num;
        er->mb_width    = h->mb_width;
        er->mb_height   = h->mb_height;
        er->mb_stride   = h->mb_stride;
        er->b8_stride   = h->mb_width * 2 + 1;
272

273 274 275 276
        // error resilience code looks cleaner with this
        FF_ALLOCZ_OR_GOTO(h->avctx, er->mb_index2xy,
                          (h->mb_num + 1) * sizeof(int), fail);

277 278 279
        for (y = 0; y < h->mb_height; y++)
            for (x = 0; x < h->mb_width; x++)
                er->mb_index2xy[x + y * h->mb_width] = x + y * h->mb_stride;
280

281
        er->mb_index2xy[h->mb_height * h->mb_width] = (h->mb_height - 1) *
282
                                                      h->mb_stride + h->mb_width;
283

284 285
        FF_ALLOCZ_OR_GOTO(h->avctx, er->error_status_table,
                          mb_array_size * sizeof(uint8_t), fail);
286

287 288
        FF_ALLOC_OR_GOTO(h->avctx, er->er_temp_buffer,
                         h->mb_height * h->mb_stride, fail);
289

290
        FF_ALLOCZ_OR_GOTO(h->avctx, sl->dc_val_base,
291
                          yc_size * sizeof(int16_t), fail);
292 293
        er->dc_val[0] = sl->dc_val_base + h->mb_width * 2 + 2;
        er->dc_val[1] = sl->dc_val_base + y_size + h->mb_stride + 1;
294 295
        er->dc_val[2] = er->dc_val[1] + c_size;
        for (i = 0; i < yc_size; i++)
296
            sl->dc_val_base[i] = 1024;
297
    }
298

299
    return 0;
300

301
fail:
302
    return AVERROR(ENOMEM); // ff_h264_free_tables will clean up for us
303 304
}

305
static int h264_init_context(AVCodecContext *avctx, H264Context *h)
306
{
307
    int i;
308

309
    h->avctx                 = avctx;
310 311 312
    h->backup_width          = -1;
    h->backup_height         = -1;
    h->backup_pix_fmt        = AV_PIX_FMT_NONE;
313
    h->cur_chroma_format_idc = -1;
314

315 316 317
    h->picture_structure     = PICT_FRAME;
    h->workaround_bugs       = avctx->workaround_bugs;
    h->flags                 = avctx->flags;
318
    h->poc.prev_poc_msb      = 1 << 16;
319 320
    h->recovery_frame        = -1;
    h->frame_recovered       = 0;
321
    h->poc.prev_frame_num    = -1;
322 323
    h->sei.frame_packing.frame_packing_arrangement_cancel_flag = -1;
    h->sei.unregistered.x264_build = -1;
324

325
    h->next_outputed_poc = INT_MIN;
326 327
    for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++)
        h->last_pocs[i] = INT_MIN;
328

329
    ff_h264_sei_uninit(&h->sei);
330 331 332

    avctx->chroma_sample_location = AVCHROMA_LOC_LEFT;

333
    h->nb_slice_ctx = (avctx->active_thread_type & FF_THREAD_SLICE) ? avctx->thread_count : 1;
334 335 336 337 338 339
    h->slice_ctx = av_mallocz_array(h->nb_slice_ctx, sizeof(*h->slice_ctx));
    if (!h->slice_ctx) {
        h->nb_slice_ctx = 0;
        return AVERROR(ENOMEM);
    }

340 341 342 343 344 345 346 347 348
    for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) {
        h->DPB[i].f = av_frame_alloc();
        if (!h->DPB[i].f)
            return AVERROR(ENOMEM);
    }

    h->cur_pic.f = av_frame_alloc();
    if (!h->cur_pic.f)
        return AVERROR(ENOMEM);
349

350 351 352
    h->last_pic_for_ec.f = av_frame_alloc();
    if (!h->last_pic_for_ec.f)
        return AVERROR(ENOMEM);
353

354
    for (i = 0; i < h->nb_slice_ctx; i++)
355
        h->slice_ctx[i].h264 = h;
356

357 358 359
    return 0;
}

360 361 362 363 364
static av_cold int h264_decode_end(AVCodecContext *avctx)
{
    H264Context *h = avctx->priv_data;
    int i;

365
    ff_h264_remove_all_refs(h);
366 367 368 369 370 371
    ff_h264_free_tables(h);

    for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) {
        ff_h264_unref_picture(h, &h->DPB[i]);
        av_frame_free(&h->DPB[i].f);
    }
372
    memset(h->delayed_pic, 0, sizeof(h->delayed_pic));
373 374 375 376 377 378

    h->cur_pic_ptr = NULL;

    av_freep(&h->slice_ctx);
    h->nb_slice_ctx = 0;

379
    ff_h264_sei_uninit(&h->sei);
380
    ff_h264_ps_uninit(&h->ps);
381

382 383 384 385
    ff_h2645_packet_uninit(&h->pkt);

    ff_h264_unref_picture(h, &h->cur_pic);
    av_frame_free(&h->cur_pic.f);
386 387
    ff_h264_unref_picture(h, &h->last_pic_for_ec);
    av_frame_free(&h->last_pic_for_ec.f);
388 389 390 391

    return 0;
}

392 393
static AVOnce h264_vlc_init = AV_ONCE_INIT;

394 395 396 397 398 399 400 401 402
av_cold int ff_h264_decode_init(AVCodecContext *avctx)
{
    H264Context *h = avctx->priv_data;
    int ret;

    ret = h264_init_context(avctx, h);
    if (ret < 0)
        return ret;

403 404 405 406 407
    ret = ff_thread_once(&h264_vlc_init, ff_h264_decode_init_vlc);
    if (ret != 0) {
        av_log(avctx, AV_LOG_ERROR, "pthread_once has failed.");
        return AVERROR_UNKNOWN;
    }
408

James Almer's avatar
James Almer committed
409 410 411 412 413
    if (avctx->ticks_per_frame == 1) {
        if(h->avctx->time_base.den < INT_MAX/2) {
            h->avctx->time_base.den *= 2;
        } else
            h->avctx->time_base.num /= 2;
414
    }
415
    avctx->ticks_per_frame = 2;
416

417
    if (avctx->extradata_size > 0 && avctx->extradata) {
418 419 420
        ret = ff_h264_decode_extradata(avctx->extradata, avctx->extradata_size,
                                       &h->ps, &h->is_avc, &h->nal_length_size,
                                       avctx->err_recognition, avctx);
421
        if (ret < 0) {
422
            h264_decode_end(avctx);
423 424
            return ret;
        }
425
    }
426

427 428 429
    if (h->ps.sps && h->ps.sps->bitstream_restriction_flag &&
        h->avctx->has_b_frames < h->ps.sps->num_reorder_frames) {
        h->avctx->has_b_frames = h->ps.sps->num_reorder_frames;
430
    }
431

432
    avctx->internal->allocate_progress = 1;
433

434
    ff_h264_flush_change(h);
435

436 437 438
    if (h->enable_er < 0 && (avctx->active_thread_type & FF_THREAD_SLICE))
        h->enable_er = 0;

439
    if (h->enable_er && (avctx->active_thread_type & FF_THREAD_SLICE)) {
440
        av_log(avctx, AV_LOG_WARNING,
441
               "Error resilience with slice threads is enabled. It is unsafe and unsupported and may crash. "
442 443 444
               "Use it at your own risk\n");
    }

445
    return 0;
446 447
}

448
#if HAVE_THREADS
449 450 451
static int decode_init_thread_copy(AVCodecContext *avctx)
{
    H264Context *h = avctx->priv_data;
452
    int ret;
453

454 455
    if (!avctx->internal->is_copy)
        return 0;
456 457

    memset(h, 0, sizeof(*h));
458

459 460 461
    ret = h264_init_context(avctx, h);
    if (ret < 0)
        return ret;
462

463
    h->context_initialized = 0;
464

465 466
    return 0;
}
467
#endif
468 469

/**
470 471 472 473 474 475 476 477 478
 * Run setup operations that must be run after slice header decoding.
 * This includes finding the next displayed frame.
 *
 * @param h h264 master context
 * @param setup_finished enough NALs have been read that we can call
 * ff_thread_finish_setup()
 */
static void decode_postinit(H264Context *h, int setup_finished)
{
479
    const SPS *sps = h->ps.sps;
480 481
    H264Picture *out = h->cur_pic_ptr;
    H264Picture *cur = h->cur_pic_ptr;
482 483
    int i, pics, out_of_order, out_idx;

484 485
    if (h->next_output_pic)
        return;
486

487 488 489 490 491 492
    if (cur->field_poc[0] == INT_MAX || cur->field_poc[1] == INT_MAX) {
        /* FIXME: if we have two PAFF fields in one packet, we can't start
         * the next thread here. If we have one field per packet, we can.
         * The check in decode_nal_units() is not good enough to find this
         * yet, so we assume the worst for now. */
        // if (setup_finished)
493
        //    ff_thread_finish_setup(h->avctx);
494 495 496 497
        if (cur->field_poc[0] == INT_MAX && cur->field_poc[1] == INT_MAX)
            return;
        if (h->avctx->hwaccel || h->missing_fields <=1)
            return;
498 499
    }

500 501
    cur->mmco_reset = h->mmco_reset;
    h->mmco_reset = 0;
502

503
    // FIXME do something with unavailable reference frames
504 505

    /* Sort B-frames into display order */
506
    if (sps->bitstream_restriction_flag ||
507
        h->avctx->strict_std_compliance >= FF_COMPLIANCE_STRICT) {
508
        h->avctx->has_b_frames = FFMAX(h->avctx->has_b_frames, sps->num_reorder_frames);
509 510
    }

511 512 513 514 515 516 517 518 519 520
    for (i = 0; 1; i++) {
        if(i == MAX_DELAYED_PIC_COUNT || cur->poc < h->last_pocs[i]){
            if(i)
                h->last_pocs[i-1] = cur->poc;
            break;
        } else if(i) {
            h->last_pocs[i-1]= h->last_pocs[i];
        }
    }
    out_of_order = MAX_DELAYED_PIC_COUNT - i;
521
    if(   cur->f->pict_type == AV_PICTURE_TYPE_B
522 523
       || (h->last_pocs[MAX_DELAYED_PIC_COUNT-2] > INT_MIN && h->last_pocs[MAX_DELAYED_PIC_COUNT-1] - h->last_pocs[MAX_DELAYED_PIC_COUNT-2] > 2))
        out_of_order = FFMAX(out_of_order, 1);
524
    if (out_of_order == MAX_DELAYED_PIC_COUNT) {
525
        av_log(h->avctx, AV_LOG_VERBOSE, "Invalid POC %d<%d\n", cur->poc, h->last_pocs[0]);
526 527 528 529
        for (i = 1; i < MAX_DELAYED_PIC_COUNT; i++)
            h->last_pocs[i] = INT_MIN;
        h->last_pocs[0] = cur->poc;
        cur->mmco_reset = 1;
530
    } else if(h->avctx->has_b_frames < out_of_order && !sps->bitstream_restriction_flag){
531 532
        int loglevel = h->avctx->frame_number > 1 ? AV_LOG_WARNING : AV_LOG_VERBOSE;
        av_log(h->avctx, loglevel, "Increasing reorder buffer to %d\n", out_of_order);
533
        h->avctx->has_b_frames = out_of_order;
534 535
    }

536
    pics = 0;
537 538
    while (h->delayed_pic[pics])
        pics++;
539

540
    av_assert0(pics <= MAX_DELAYED_PIC_COUNT);
541 542

    h->delayed_pic[pics++] = cur;
543 544
    if (cur->reference == 0)
        cur->reference = DELAYED_PIC_REF;
545

546
    out     = h->delayed_pic[0];
547
    out_idx = 0;
548
    for (i = 1; h->delayed_pic[i] &&
549
                !h->delayed_pic[i]->f->key_frame &&
550
                !h->delayed_pic[i]->mmco_reset;
551 552 553
         i++)
        if (h->delayed_pic[i]->poc < out->poc) {
            out     = h->delayed_pic[i];
554 555
            out_idx = i;
        }
556
    if (h->avctx->has_b_frames == 0 &&
557
        (h->delayed_pic[0]->f->key_frame || h->delayed_pic[0]->mmco_reset))
558
        h->next_outputed_poc = INT_MIN;
559
    out_of_order = out->poc < h->next_outputed_poc;
560

561
    if (out_of_order || pics > h->avctx->has_b_frames) {
562
        out->reference &= ~DELAYED_PIC_REF;
563 564
        for (i = out_idx; h->delayed_pic[i]; i++)
            h->delayed_pic[i] = h->delayed_pic[i + 1];
565
    }
566
    if (!out_of_order && pics > h->avctx->has_b_frames) {
567
        h->next_output_pic = out;
568
        if (out_idx == 0 && h->delayed_pic[0] && (h->delayed_pic[0]->f->key_frame || h->delayed_pic[0]->mmco_reset)) {
569 570
            h->next_outputed_poc = INT_MIN;
        } else
571
            h->next_outputed_poc = out->poc;
572
    } else {
573
        av_log(h->avctx, AV_LOG_DEBUG, "no picture %s\n", out_of_order ? "ooo" : "");
574 575
    }

576 577 578 579 580 581 582
    if (h->next_output_pic) {
        if (h->next_output_pic->recovered) {
            // We have reached an recovery point and all frames after it in
            // display order are "recovered".
            h->frame_recovered |= FRAME_RECOVERED_SEI;
        }
        h->next_output_pic->recovered |= !!(h->frame_recovered & FRAME_RECOVERED_SEI);
583 584
    }

585
    if (setup_finished && !h->avctx->hwaccel) {
586
        ff_thread_finish_setup(h->avctx);
587 588 589 590

        if (h->avctx->active_thread_type & FF_THREAD_FRAME)
            h->setup_finished = 1;
    }
591 592
}

593
/**
Loic Le Loarer's avatar
Loic Le Loarer committed
594
 * instantaneous decoder refresh.
595
 */
596 597
static void idr(H264Context *h)
{
598
    int i;
599
    ff_h264_remove_all_refs(h);
600
    h->poc.prev_frame_num        =
601 602
    h->poc.prev_frame_num_offset = 0;
    h->poc.prev_poc_msb          = 1<<16;
603
    h->poc.prev_poc_lsb          = 0;
604 605
    for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++)
        h->last_pocs[i] = INT_MIN;
606 607
}

608
/* forget old pics after a seek */
609
void ff_h264_flush_change(H264Context *h)
610
{
611 612
    int i, j;

613
    h->next_outputed_poc = INT_MIN;
614
    h->prev_interlaced_frame = 1;
615
    idr(h);
616

617
    h->poc.prev_frame_num = -1;
618
    if (h->cur_pic_ptr) {
619
        h->cur_pic_ptr->reference = 0;
620
        for (j=i=0; h->delayed_pic[i]; i++)
621
            if (h->delayed_pic[i] != h->cur_pic_ptr)
622 623 624
                h->delayed_pic[j++] = h->delayed_pic[i];
        h->delayed_pic[j] = NULL;
    }
625 626
    ff_h264_unref_picture(h, &h->last_pic_for_ec);

627
    h->first_field = 0;
628
    ff_h264_sei_uninit(&h->sei);
629 630
    h->recovery_frame = -1;
    h->frame_recovered = 0;
631
    h->current_slice = 0;
632
    h->mmco_reset = 1;
633 634 635 636 637 638 639 640
}

/* forget old pics after a seek */
static void flush_dpb(AVCodecContext *avctx)
{
    H264Context *h = avctx->priv_data;
    int i;

641
    memset(h->delayed_pic, 0, sizeof(h->delayed_pic));
642

643
    ff_h264_flush_change(h);
644

645 646
    for (i = 0; i < H264_MAX_PICTURE_COUNT; i++)
        ff_h264_unref_picture(h, &h->DPB[i]);
647
    h->cur_pic_ptr = NULL;
648
    ff_h264_unref_picture(h, &h->cur_pic);
649

650
    h->mb_y = 0;
651

652
    ff_h264_free_tables(h);
653
    h->context_initialized = 0;
654 655
}

656
#if FF_API_CAP_VDPAU
657
static const uint8_t start_code[] = { 0x00, 0x00, 0x01 };
658
#endif
659

660
static int get_last_needed_nal(H264Context *h)
661 662
{
    int nals_needed = 0;
663
    int first_slice = 0;
664
    int i;
665
    int ret;
666

667 668
    for (i = 0; i < h->pkt.nb_nals; i++) {
        H2645NAL *nal = &h->pkt.nals[i];
669
        GetBitContext gb;
670 671 672 673 674

        /* packets can sometimes contain multiple PPS/SPS,
         * e.g. two PAFF field pictures in one packet, or a demuxer
         * which splits NALs strangely if so, when frame threading we
         * can't start the next thread until we've read all of them */
675
        switch (nal->type) {
676 677
        case H264_NAL_SPS:
        case H264_NAL_PPS:
678
            nals_needed = i;
679
            break;
680 681 682
        case H264_NAL_DPA:
        case H264_NAL_IDR_SLICE:
        case H264_NAL_SLICE:
683 684 685
            ret = init_get_bits8(&gb, nal->data + 1, (nal->size - 1));
            if (ret < 0)
                return ret;
686
            if (!get_ue_golomb_long(&gb) ||  // first_mb_in_slice
687
                !first_slice ||
688
                first_slice != nal->type)
689
                nals_needed = i;
690
            if (!first_slice)
691
                first_slice = nal->type;
692 693 694 695 696 697
        }
    }

    return nals_needed;
}

698
static void debug_green_metadata(const H264SEIGreenMetaData *gm, void *logctx)
699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725
{
    av_log(logctx, AV_LOG_DEBUG, "Green Metadata Info SEI message\n");
    av_log(logctx, AV_LOG_DEBUG, "  green_metadata_type: %d\n", gm->green_metadata_type);

    if (gm->green_metadata_type == 0) {
        av_log(logctx, AV_LOG_DEBUG, "  green_metadata_period_type: %d\n", gm->period_type);

        if (gm->period_type == 2)
            av_log(logctx, AV_LOG_DEBUG, "  green_metadata_num_seconds: %d\n", gm->num_seconds);
        else if (gm->period_type == 3)
            av_log(logctx, AV_LOG_DEBUG, "  green_metadata_num_pictures: %d\n", gm->num_pictures);

        av_log(logctx, AV_LOG_DEBUG, "  SEI GREEN Complexity Metrics: %f %f %f %f\n",
               (float)gm->percent_non_zero_macroblocks/255,
               (float)gm->percent_intra_coded_macroblocks/255,
               (float)gm->percent_six_tap_filtering/255,
               (float)gm->percent_alpha_point_deblocking_instance/255);

    } else if (gm->green_metadata_type == 1) {
        av_log(logctx, AV_LOG_DEBUG, "  xsd_metric_type: %d\n", gm->xsd_metric_type);

        if (gm->xsd_metric_type == 0)
            av_log(logctx, AV_LOG_DEBUG, "  xsd_metric_value: %f\n",
                   (float)gm->xsd_metric_value/100);
    }
}

726
static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size)
727
{
728
    AVCodecContext *const avctx = h->avctx;
729
    unsigned context_count = 0;
730
    int nals_needed = 0; ///< number of NALs that need decoding before the next frame thread starts
731
    int idr_cleared=0;
732
    int i, ret = 0;
733

734 735
    h->nal_unit_type= 0;

736
    h->max_contexts = h->nb_slice_ctx;
737
    if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS)) {
738
        h->current_slice = 0;
739 740
        if (!h->first_field)
            h->cur_pic_ptr = NULL;
741
        ff_h264_sei_uninit(&h->sei);
742 743
    }

744 745 746 747 748 749 750
    if (h->nal_length_size == 4) {
        if (buf_size > 8 && AV_RB32(buf) == 1 && AV_RB32(buf+5) > (unsigned)buf_size) {
            h->is_avc = 0;
        }else if(buf_size > 3 && AV_RB32(buf) > 1 && AV_RB32(buf) <= (unsigned)buf_size)
            h->is_avc = 1;
    }

751
    ret = ff_h2645_packet_split(&h->pkt, buf, buf_size, avctx, h->is_avc,
752
                                h->nal_length_size, avctx->codec_id, avctx->flags2 & AV_CODEC_FLAG2_FAST);
753 754 755 756 757
    if (ret < 0) {
        av_log(avctx, AV_LOG_ERROR,
               "Error splitting the input into NAL units.\n");
        return ret;
    }
758

759
    if (avctx->active_thread_type & FF_THREAD_FRAME)
760
        nals_needed = get_last_needed_nal(h);
761 762
    if (nals_needed < 0)
        return nals_needed;
763

764 765 766 767
    for (i = 0; i < h->pkt.nb_nals; i++) {
        H2645NAL *nal = &h->pkt.nals[i];
        H264SliceContext *sl = &h->slice_ctx[context_count];
        int err;
768

769
        if (avctx->skip_frame >= AVDISCARD_NONREF &&
770
            nal->ref_idc == 0 && nal->type != H264_NAL_SEI)
771
            continue;
772

773
again:
774 775 776
        // FIXME these should stop being context-global variables
        h->nal_ref_idc   = nal->ref_idc;
        h->nal_unit_type = nal->type;
777

778 779
        err = 0;
        switch (nal->type) {
780
        case H264_NAL_IDR_SLICE:
781 782 783 784 785 786 787 788 789 790
            if ((nal->data[1] & 0xFC) == 0x98) {
                av_log(h->avctx, AV_LOG_ERROR, "Invalid inter IDR frame\n");
                h->next_outputed_poc = INT_MIN;
                ret = -1;
                goto end;
            }
            if(!idr_cleared) {
                if (h->current_slice && (avctx->active_thread_type & FF_THREAD_SLICE)) {
                    av_log(h, AV_LOG_ERROR, "invalid mixed IDR / non IDR frames cannot be decoded in slice multithreading mode\n");
                    ret = AVERROR_INVALIDDATA;
791
                    goto end;
792
                }
793 794 795 796
                idr(h); // FIXME ensure we don't lose some frames if there is reordering
            }
            idr_cleared = 1;
            h->has_recovery_point = 1;
797
        case H264_NAL_SLICE:
798
            sl->gb = nal->gb;
799

800
            if ((err = ff_h264_decode_slice_header(h, sl, nal)))
801
                break;
802

803 804 805
            if (sl->redundant_pic_count > 0)
                break;

806 807 808
            if (h->current_slice == 1) {
                if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS))
                    decode_postinit(h, i >= nals_needed);
809

810
                if (h->avctx->hwaccel &&
811 812
                    (ret = h->avctx->hwaccel->start_frame(h->avctx, buf, buf_size)) < 0)
                    goto end;
813
#if FF_API_CAP_VDPAU
814 815 816
                if (CONFIG_H264_VDPAU_DECODER &&
                    h->avctx->codec->capabilities & AV_CODEC_CAP_HWACCEL_VDPAU)
                    ff_vdpau_h264_picture_start(h);
817
#endif
818
            }
819

Timothy Gu's avatar
Timothy Gu committed
820 821 822 823 824 825
            if (avctx->hwaccel) {
                ret = avctx->hwaccel->decode_slice(avctx,
                                                   nal->raw_data,
                                                   nal->raw_size);
                if (ret < 0)
                    goto end;
826
#if FF_API_CAP_VDPAU
Timothy Gu's avatar
Timothy Gu committed
827 828 829 830 831 832 833 834
            } else if (CONFIG_H264_VDPAU_DECODER &&
                       h->avctx->codec->capabilities & AV_CODEC_CAP_HWACCEL_VDPAU) {
                ff_vdpau_add_data_chunk(h->cur_pic_ptr->f->data[0],
                                        start_code,
                                        sizeof(start_code));
                ff_vdpau_add_data_chunk(h->cur_pic_ptr->f->data[0],
                                        nal->raw_data,
                                        nal->raw_size);
835
#endif
Timothy Gu's avatar
Timothy Gu committed
836 837
            } else
                context_count++;
838
            break;
839 840 841
        case H264_NAL_DPA:
        case H264_NAL_DPB:
        case H264_NAL_DPC:
842 843
            avpriv_request_sample(avctx, "data partitioning");
            break;
844
        case H264_NAL_SEI:
845
            ret = ff_h264_sei_decode(&h->sei, &nal->gb, &h->ps, avctx);
846
            h->has_recovery_point = h->has_recovery_point || h->sei.recovery_point.recovery_frame_cnt != -1;
847
            if (avctx->debug & FF_DEBUG_GREEN_MD)
848 849 850 851 852 853
                debug_green_metadata(&h->sei.green_metadata, h->avctx);
#if FF_API_AFD
FF_DISABLE_DEPRECATION_WARNINGS
            h->avctx->dtg_active_format = h->sei.afd.active_format_description;
FF_ENABLE_DEPRECATION_WARNINGS
#endif /* FF_API_AFD */
854 855 856
            if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
                goto end;
            break;
James Almer's avatar
James Almer committed
857
        case H264_NAL_SPS: {
858 859
            GetBitContext tmp_gb = nal->gb;
            if (ff_h264_decode_seq_parameter_set(&tmp_gb, avctx, &h->ps, 0) >= 0)
860
                break;
861 862
            av_log(h->avctx, AV_LOG_DEBUG,
                   "SPS decoding failure, trying again with the complete NAL\n");
863 864
            init_get_bits8(&tmp_gb, nal->raw_data + 1, nal->raw_size - 1);
            if (ff_h264_decode_seq_parameter_set(&tmp_gb, avctx, &h->ps, 0) >= 0)
865
                break;
866
            ff_h264_decode_seq_parameter_set(&nal->gb, avctx, &h->ps, 1);
867
            break;
868
        }
869
        case H264_NAL_PPS:
870 871
            ret = ff_h264_decode_picture_parameter_set(&nal->gb, avctx, &h->ps,
                                                       nal->size_bits);
872 873 874
            if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
                goto end;
            break;
875 876 877 878 879 880
        case H264_NAL_AUD:
        case H264_NAL_END_SEQUENCE:
        case H264_NAL_END_STREAM:
        case H264_NAL_FILLER_DATA:
        case H264_NAL_SPS_EXT:
        case H264_NAL_AUXILIARY_SLICE:
881 882 883 884 885
            break;
        default:
            av_log(avctx, AV_LOG_DEBUG, "Unknown NAL code: %d (%d bits)\n",
                   nal->type, nal->size_bits);
        }
886

887 888 889 890 891 892
        if (context_count == h->max_contexts) {
            ret = ff_h264_execute_decode_slices(h, context_count);
            if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
                goto end;
            context_count = 0;
        }
893

894 895 896
        if (err < 0 || err == SLICE_SKIPED) {
            if (err < 0)
                av_log(h->avctx, AV_LOG_ERROR, "decode_slice_header error\n");
897
            sl->ref_count[0] = sl->ref_count[1] = sl->list_count = 0;
898
        } else if (err == SLICE_SINGLETHREAD) {
899 900 901 902 903 904
            if (context_count > 0) {
                ret = ff_h264_execute_decode_slices(h, context_count);
                if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
                    goto end;
                context_count = 0;
            }
905
            /* Slice could not be decoded in parallel mode, restart. */
906 907
            sl               = &h->slice_ctx[0];
            goto again;
908 909
        }
    }
910 911 912 913 914
    if (context_count) {
        ret = ff_h264_execute_decode_slices(h, context_count);
        if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
            goto end;
    }
915

916
    ret = 0;
917
end:
918 919 920 921 922 923 924 925 926 927 928 929 930 931

#if CONFIG_ERROR_RESILIENCE
    /*
     * FIXME: Error handling code does not seem to support interlaced
     * when slices span multiple rows
     * The ff_er_add_slice calls don't work right for bottom
     * fields; they cause massive erroneous error concealing
     * Error marking covers both fields (top and bottom).
     * This causes a mismatched s->error_count
     * and a bad error table. Further, the error count goes to
     * INT_MAX when called for bottom field, because mb_y is
     * past end by one (callers fault) and resync_mb_y != 0
     * causes problems for the first MB line, too.
     */
932 933 934 935
    if (!FIELD_PICTURE(h) && h->current_slice &&
        h->ps.sps == (const SPS*)h->ps.sps_list[h->ps.pps->sps_id]->data &&
        h->enable_er) {

936
        H264SliceContext *sl = h->slice_ctx;
937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961
        int use_last_pic = h->last_pic_for_ec.f->buf[0] && !sl->ref_count[0];

        ff_h264_set_erpic(&sl->er.cur_pic, h->cur_pic_ptr);

        if (use_last_pic) {
            ff_h264_set_erpic(&sl->er.last_pic, &h->last_pic_for_ec);
            sl->ref_list[0][0].parent = &h->last_pic_for_ec;
            memcpy(sl->ref_list[0][0].data, h->last_pic_for_ec.f->data, sizeof(sl->ref_list[0][0].data));
            memcpy(sl->ref_list[0][0].linesize, h->last_pic_for_ec.f->linesize, sizeof(sl->ref_list[0][0].linesize));
            sl->ref_list[0][0].reference = h->last_pic_for_ec.reference;
        } else if (sl->ref_count[0]) {
            ff_h264_set_erpic(&sl->er.last_pic, sl->ref_list[0][0].parent);
        } else
            ff_h264_set_erpic(&sl->er.last_pic, NULL);

        if (sl->ref_count[1])
            ff_h264_set_erpic(&sl->er.next_pic, sl->ref_list[1][0].parent);

        sl->er.ref_count = sl->ref_count[0];

        ff_er_frame_end(&sl->er);
        if (use_last_pic)
            memset(&sl->ref_list[0][0], 0, sizeof(sl->ref_list[0][0]));
    }
#endif /* CONFIG_ERROR_RESILIENCE */
962
    /* clean up */
963 964
    if (h->cur_pic_ptr && !h->droppable) {
        ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
965
                                  h->picture_structure == PICT_BOTTOM_FIELD);
966 967
    }

968
    return (ret < 0) ? ret : buf_size;
969 970 971
}

/**
972
 * Return the number of bytes consumed for building the current frame.
973
 */
974
static int get_consumed_bytes(int pos, int buf_size)
975 976
{
    if (pos == 0)
977
        pos = 1;        // avoid infinite loops (I doubt that is needed but...)
978
    if (pos + 10 > buf_size)
979
        pos = buf_size; // oops ;)
980

981
    return pos;
982 983
}

984
static int output_frame(H264Context *h, AVFrame *dst, H264Picture *srcp)
985
{
986
    AVFrame *src = srcp->f;
987
    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(src->format);
988 989 990 991 992
    int i;
    int ret = av_frame_ref(dst, src);
    if (ret < 0)
        return ret;

993
    av_dict_set(&dst->metadata, "stereo_mode", ff_h264_sei_stereo_mode(&h->sei.frame_packing), 0);
994

995 996 997 998 999 1000 1001 1002
    h->backup_width   = h->avctx->width;
    h->backup_height  = h->avctx->height;
    h->backup_pix_fmt = h->avctx->pix_fmt;

    h->avctx->width   = dst->width;
    h->avctx->height  = dst->height;
    h->avctx->pix_fmt = dst->format;

1003 1004
    if (srcp->sei_recovery_frame_cnt == 0)
        dst->key_frame = 1;
1005
    if (!srcp->crop)
1006 1007
        return 0;

1008 1009 1010
    for (i = 0; i < desc->nb_components; i++) {
        int hshift = (i > 0) ? desc->log2_chroma_w : 0;
        int vshift = (i > 0) ? desc->log2_chroma_h : 0;
1011
        int off    = ((srcp->crop_left >> hshift) << h->pixel_shift) +
1012
                      (srcp->crop_top  >> vshift) * dst->linesize[i];
1013 1014 1015 1016 1017
        dst->data[i] += off;
    }
    return 0;
}

1018 1019 1020 1021 1022 1023
static int is_extra(const uint8_t *buf, int buf_size)
{
    int cnt= buf[5]&0x1f;
    const uint8_t *p= buf+6;
    while(cnt--){
        int nalsize= AV_RB16(p) + 2;
1024
        if(nalsize > buf_size - (p-buf) || (p[2] & 0x9F) != 7)
1025 1026 1027 1028 1029 1030 1031 1032
            return 0;
        p += nalsize;
    }
    cnt = *(p++);
    if(!cnt)
        return 0;
    while(cnt--){
        int nalsize= AV_RB16(p) + 2;
1033
        if(nalsize > buf_size - (p-buf) || (p[2] & 0x9F) != 8)
1034 1035 1036 1037 1038 1039
            return 0;
        p += nalsize;
    }
    return 1;
}

1040 1041
static int h264_decode_frame(AVCodecContext *avctx, void *data,
                             int *got_frame, AVPacket *avpkt)
1042
{
1043
    const uint8_t *buf = avpkt->data;
1044 1045 1046 1047
    int buf_size       = avpkt->size;
    H264Context *h     = avctx->priv_data;
    AVFrame *pict      = data;
    int buf_index      = 0;
1048
    H264Picture *out;
1049
    int i, out_idx;
1050
    int ret;
1051

1052
    h->flags = avctx->flags;
1053
    h->setup_finished = 0;
1054

1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067
    if (h->backup_width != -1) {
        avctx->width    = h->backup_width;
        h->backup_width = -1;
    }
    if (h->backup_height != -1) {
        avctx->height    = h->backup_height;
        h->backup_height = -1;
    }
    if (h->backup_pix_fmt != AV_PIX_FMT_NONE) {
        avctx->pix_fmt    = h->backup_pix_fmt;
        h->backup_pix_fmt = AV_PIX_FMT_NONE;
    }

1068 1069
    ff_h264_unref_picture(h, &h->last_pic_for_ec);

1070
    /* end of stream, output what is still in the buffers */
1071
    if (buf_size == 0) {
1072
 out:
1073

1074
        h->cur_pic_ptr = NULL;
1075
        h->first_field = 0;
1076

1077 1078
        // FIXME factorize this with the output code below
        out     = h->delayed_pic[0];
1079
        out_idx = 0;
1080 1081
        for (i = 1;
             h->delayed_pic[i] &&
1082
             !h->delayed_pic[i]->f->key_frame &&
1083 1084 1085 1086
             !h->delayed_pic[i]->mmco_reset;
             i++)
            if (h->delayed_pic[i]->poc < out->poc) {
                out     = h->delayed_pic[i];
1087 1088 1089
                out_idx = i;
            }

1090 1091
        for (i = out_idx; h->delayed_pic[i]; i++)
            h->delayed_pic[i] = h->delayed_pic[i + 1];
1092

1093
        if (out) {
1094
            out->reference &= ~DELAYED_PIC_REF;
1095
            ret = output_frame(h, pict, out);
1096
            if (ret < 0)
1097
                return ret;
1098
            *got_frame = 1;
1099 1100
        }

1101
        return buf_index;
1102
    }
1103 1104 1105 1106
    if (h->is_avc && av_packet_get_side_data(avpkt, AV_PKT_DATA_NEW_EXTRADATA, NULL)) {
        int side_size;
        uint8_t *side = av_packet_get_side_data(avpkt, AV_PKT_DATA_NEW_EXTRADATA, &side_size);
        if (is_extra(side, side_size))
1107 1108 1109
            ff_h264_decode_extradata(side, side_size,
                                     &h->ps, &h->is_avc, &h->nal_length_size,
                                     avctx->err_recognition, avctx);
1110
    }
1111
    if(h->is_avc && buf_size >= 9 && buf[0]==1 && buf[2]==0 && (buf[4]&0xFC)==0xFC && (buf[5]&0x1F) && buf[8]==0x67){
1112
        if (is_extra(buf, buf_size))
1113 1114 1115
            return ff_h264_decode_extradata(buf, buf_size,
                                            &h->ps, &h->is_avc, &h->nal_length_size,
                                            avctx->err_recognition, avctx);
1116
    }
1117

1118
    buf_index = decode_nal_units(h, buf, buf_size);
1119
    if (buf_index < 0)
1120
        return AVERROR_INVALIDDATA;
1121

1122
    if (!h->cur_pic_ptr && h->nal_unit_type == H264_NAL_END_SEQUENCE) {
1123
        av_assert0(buf_index <= buf_size);
1124 1125 1126
        goto out;
    }

1127
    if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS) && !h->cur_pic_ptr) {
1128 1129
        if (avctx->skip_frame >= AVDISCARD_NONREF ||
            buf_size >= 4 && !memcmp("Q264", buf, 4))
1130
            return buf_size;
1131
        av_log(avctx, AV_LOG_ERROR, "no frame!\n");
1132
        return AVERROR_INVALIDDATA;
1133 1134
    }

1135
    if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS) ||
1136
        (h->mb_y >= h->mb_height && h->mb_height)) {
1137
        if (avctx->flags2 & AV_CODEC_FLAG2_CHUNKS)
1138
            decode_postinit(h, 1);
1139

wm4's avatar
wm4 committed
1140 1141
        if ((ret = ff_h264_field_end(h, &h->slice_ctx[0], 0)) < 0)
            return ret;
1142

1143
        /* Wait for second field. */
1144
        *got_frame = 0;
1145 1146
        if (h->next_output_pic && ((avctx->flags & AV_CODEC_FLAG_OUTPUT_CORRUPT) ||
                                   (avctx->flags2 & AV_CODEC_FLAG2_SHOW_ALL) ||
1147 1148
                                   h->next_output_pic->recovered)) {
            if (!h->next_output_pic->recovered)
1149
                h->next_output_pic->f->flags |= AV_FRAME_FLAG_CORRUPT;
1150

1151 1152 1153 1154 1155
            if (!h->avctx->hwaccel &&
                 (h->next_output_pic->field_poc[0] == INT_MAX ||
                  h->next_output_pic->field_poc[1] == INT_MAX)
            ) {
                int p;
1156
                AVFrame *f = h->next_output_pic->f;
1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173
                int field = h->next_output_pic->field_poc[0] == INT_MAX;
                uint8_t *dst_data[4];
                int linesizes[4];
                const uint8_t *src_data[4];

                av_log(h->avctx, AV_LOG_DEBUG, "Duplicating field %d to fill missing\n", field);

                for (p = 0; p<4; p++) {
                    dst_data[p] = f->data[p] + (field^1)*f->linesize[p];
                    src_data[p] = f->data[p] +  field   *f->linesize[p];
                    linesizes[p] = 2*f->linesize[p];
                }

                av_image_copy(dst_data, linesizes, src_data, linesizes,
                              f->format, f->width, f->height>>1);
            }

1174
            ret = output_frame(h, pict, h->next_output_pic);
1175
            if (ret < 0)
1176
                return ret;
1177
            *got_frame = 1;
1178
            if (CONFIG_MPEGVIDEO) {
1179
                ff_print_debug_info2(h->avctx, pict, NULL,
1180 1181 1182
                                    h->next_output_pic->mb_type,
                                    h->next_output_pic->qscale_table,
                                    h->next_output_pic->motion_val,
1183
                                    NULL,
1184 1185
                                    h->mb_width, h->mb_height, h->mb_stride, 1);
            }
1186
        }
1187 1188
    }

1189
    av_assert0(pict->buf[0] || !*got_frame);
1190

1191 1192
    ff_h264_unref_picture(h, &h->last_pic_for_ec);

1193
    return get_consumed_bytes(buf_index, buf_size);
1194 1195
}

1196 1197 1198
#define OFFSET(x) offsetof(H264Context, x)
#define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
static const AVOption h264_options[] = {
1199
    {"is_avc", "is avc", offsetof(H264Context, is_avc), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, 0},
1200
    {"nal_length_size", "nal_length_size", offsetof(H264Context, nal_length_size), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 4, 0},
1201
    { "enable_er", "Enable error resilience on damaged frames (unsafe)", OFFSET(enable_er), AV_OPT_TYPE_BOOL, { .i64 = -1 }, -1, 1, VD },
1202 1203 1204 1205
    { NULL },
};

static const AVClass h264_class = {
1206
    .class_name = "H264 Decoder",
1207 1208 1209 1210 1211
    .item_name  = av_default_item_name,
    .option     = h264_options,
    .version    = LIBAVUTIL_VERSION_INT,
};

1212
AVCodec ff_h264_decoder = {
1213
    .name                  = "h264",
1214
    .long_name             = NULL_IF_CONFIG_SMALL("H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10"),
1215
    .type                  = AVMEDIA_TYPE_VIDEO,
1216
    .id                    = AV_CODEC_ID_H264,
1217 1218
    .priv_data_size        = sizeof(H264Context),
    .init                  = ff_h264_decode_init,
1219
    .close                 = h264_decode_end,
1220
    .decode                = h264_decode_frame,
1221 1222 1223
    .capabilities          = /*AV_CODEC_CAP_DRAW_HORIZ_BAND |*/ AV_CODEC_CAP_DR1 |
                             AV_CODEC_CAP_DELAY | AV_CODEC_CAP_SLICE_THREADS |
                             AV_CODEC_CAP_FRAME_THREADS,
1224
    .caps_internal         = FF_CODEC_CAP_INIT_THREADSAFE,
1225
    .flush                 = flush_dpb,
1226
    .init_thread_copy      = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
1227
    .update_thread_context = ONLY_IF_THREADS_ENABLED(ff_h264_update_thread_context),
1228
    .profiles              = NULL_IF_CONFIG_SMALL(ff_h264_profiles),
1229
    .priv_class            = &h264_class,
1230 1231
};

1232
#if CONFIG_H264_VDPAU_DECODER && FF_API_VDPAU
1233 1234 1235 1236 1237 1238 1239
static const AVClass h264_vdpau_class = {
    .class_name = "H264 VDPAU Decoder",
    .item_name  = av_default_item_name,
    .option     = h264_options,
    .version    = LIBAVUTIL_VERSION_INT,
};

1240 1241
AVCodec ff_h264_vdpau_decoder = {
    .name           = "h264_vdpau",
1242
    .long_name      = NULL_IF_CONFIG_SMALL("H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10 (VDPAU acceleration)"),
1243 1244 1245 1246 1247
    .type           = AVMEDIA_TYPE_VIDEO,
    .id             = AV_CODEC_ID_H264,
    .priv_data_size = sizeof(H264Context),
    .init           = ff_h264_decode_init,
    .close          = h264_decode_end,
1248
    .decode         = h264_decode_frame,
1249
    .capabilities   = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY | AV_CODEC_CAP_HWACCEL_VDPAU,
1250 1251 1252
    .flush          = flush_dpb,
    .pix_fmts       = (const enum AVPixelFormat[]) { AV_PIX_FMT_VDPAU_H264,
                                                     AV_PIX_FMT_NONE},
1253
    .profiles       = NULL_IF_CONFIG_SMALL(ff_h264_profiles),
1254 1255 1256
    .priv_class     = &h264_vdpau_class,
};
#endif