vp56.c 23.8 KB
Newer Older
1
/*
2 3
 * Copyright (C) 2006  Aurelien Jacobs <aurel@gnuage.org>
 *
4 5 6
 * This file is part of FFmpeg.
 *
 * FFmpeg is free software; you can redistribute it and/or
7 8 9 10
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2.1 of the License, or (at your option) any later version.
 *
11
 * FFmpeg is distributed in the hope that it will be useful,
12 13 14 15 16
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with FFmpeg; if not, write to the Free Software
18
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 20
 */

21 22 23 24 25
/**
 * @file
 * VP5 and VP6 compatible video decoder (common features)
 */

26
#include "avcodec.h"
27
#include "bytestream.h"
28 29 30 31 32

#include "vp56.h"
#include "vp56data.h"


33
void ff_vp56_init_dequant(VP56Context *s, int quantizer)
34 35 36 37
{
    s->quantizer = quantizer;
    s->dequant_dc = vp56_dc_dequant[quantizer] << 2;
    s->dequant_ac = vp56_ac_dequant[quantizer] << 2;
38
    memset(s->qscale_table, quantizer, s->mb_width);
39 40
}

41 42
static int vp56_get_vectors_predictors(VP56Context *s, int row, int col,
                                       VP56Frame ref_frame)
43 44
{
    int nb_pred = 0;
45
    VP56mv vect[2] = {{0,0}, {0,0}};
46
    int pos, offset;
47
    VP56mv mvp;
48 49 50 51 52 53 54 55 56 57 58

    for (pos=0; pos<12; pos++) {
        mvp.x = col + vp56_candidate_predictor_pos[pos][0];
        mvp.y = row + vp56_candidate_predictor_pos[pos][1];
        if (mvp.x < 0 || mvp.x >= s->mb_width ||
            mvp.y < 0 || mvp.y >= s->mb_height)
            continue;
        offset = mvp.x + s->mb_width*mvp.y;

        if (vp56_reference_frame[s->macroblocks[offset].type] != ref_frame)
            continue;
59 60
        if ((s->macroblocks[offset].mv.x == vect[0].x &&
             s->macroblocks[offset].mv.y == vect[0].y) ||
61 62 63 64
            (s->macroblocks[offset].mv.x == 0 &&
             s->macroblocks[offset].mv.y == 0))
            continue;

65
        vect[nb_pred++] = s->macroblocks[offset].mv;
66 67 68 69 70 71 72
        if (nb_pred > 1) {
            nb_pred = -1;
            break;
        }
        s->vector_candidate_pos = pos;
    }

73 74
    s->vector_candidate[0] = vect[0];
    s->vector_candidate[1] = vect[1];
75 76 77 78

    return nb_pred+1;
}

79
static void vp56_parse_mb_type_models(VP56Context *s)
80
{
81
    VP56RangeCoder *c = &s->c;
82
    VP56Model *model = s->modelp;
83 84 85 86 87
    int i, ctx, type;

    for (ctx=0; ctx<3; ctx++) {
        if (vp56_rac_get_prob(c, 174)) {
            int idx = vp56_rac_gets(c, 4);
88 89 90
            memcpy(model->mb_types_stats[ctx],
                   vp56_pre_def_mb_type_stats[idx][ctx],
                   sizeof(model->mb_types_stats[ctx]));
91 92 93 94 95 96 97 98 99 100 101
        }
        if (vp56_rac_get_prob(c, 254)) {
            for (type=0; type<10; type++) {
                for(i=0; i<2; i++) {
                    if (vp56_rac_get_prob(c, 205)) {
                        int delta, sign = vp56_rac_get(c);

                        delta = vp56_rac_get_tree(c, vp56_pmbtm_tree,
                                                  vp56_mb_type_model_model);
                        if (!delta)
                            delta = 4 * vp56_rac_gets(c, 7);
102
                        model->mb_types_stats[ctx][type][i] += (delta ^ -sign) + sign;
103 104 105 106 107 108 109 110 111 112 113
                    }
                }
            }
        }
    }

    /* compute MB type probability tables based on previous MB type */
    for (ctx=0; ctx<3; ctx++) {
        int p[10];

        for (type=0; type<10; type++)
114
            p[type] = 100 * model->mb_types_stats[ctx][type][1];
115 116 117 118 119

        for (type=0; type<10; type++) {
            int p02, p34, p0234, p17, p56, p89, p5689, p156789;

            /* conservative MB type probability */
120
            model->mb_type[ctx][type][0] = 255 - (255 * model->mb_types_stats[ctx][type][0]) / (1 + model->mb_types_stats[ctx][type][0] + model->mb_types_stats[ctx][type][1]);
121 122 123 124 125 126 127 128 129 130 131 132 133

            p[type] = 0;    /* same MB type => weight is null */

            /* binary tree parsing probabilities */
            p02 = p[0] + p[2];
            p34 = p[3] + p[4];
            p0234 = p02 + p34;
            p17 = p[1] + p[7];
            p56 = p[5] + p[6];
            p89 = p[8] + p[9];
            p5689 = p56 + p89;
            p156789 = p17 + p5689;

134 135 136 137 138 139 140 141 142
            model->mb_type[ctx][type][1] = 1 + 255 * p0234/(1+p0234+p156789);
            model->mb_type[ctx][type][2] = 1 + 255 * p02  / (1+p0234);
            model->mb_type[ctx][type][3] = 1 + 255 * p17  / (1+p156789);
            model->mb_type[ctx][type][4] = 1 + 255 * p[0] / (1+p02);
            model->mb_type[ctx][type][5] = 1 + 255 * p[3] / (1+p34);
            model->mb_type[ctx][type][6] = 1 + 255 * p[1] / (1+p17);
            model->mb_type[ctx][type][7] = 1 + 255 * p56  / (1+p5689);
            model->mb_type[ctx][type][8] = 1 + 255 * p[5] / (1+p56);
            model->mb_type[ctx][type][9] = 1 + 255 * p[8] / (1+p89);
143 144

            /* restore initial value */
145
            p[type] = 100 * model->mb_types_stats[ctx][type][1];
146 147 148 149
        }
    }
}

150
static VP56mb vp56_parse_mb_type(VP56Context *s,
151
                                 VP56mb prev_type, int ctx)
152
{
153
    uint8_t *mb_type_model = s->modelp->mb_type[ctx][prev_type];
154
    VP56RangeCoder *c = &s->c;
155 156 157 158 159 160 161

    if (vp56_rac_get_prob(c, mb_type_model[0]))
        return prev_type;
    else
        return vp56_rac_get_tree(c, vp56_pmbt_tree, mb_type_model);
}

162
static void vp56_decode_4mv(VP56Context *s, int row, int col)
163
{
164
    VP56mv mv = {0,0};
165 166 167 168 169 170 171 172 173 174 175 176 177 178
    int type[4];
    int b;

    /* parse each block type */
    for (b=0; b<4; b++) {
        type[b] = vp56_rac_gets(&s->c, 2);
        if (type[b])
            type[b]++;  /* only returns 0, 2, 3 or 4 (all INTER_PF) */
    }

    /* get vectors */
    for (b=0; b<4; b++) {
        switch (type[b]) {
            case VP56_MB_INTER_NOVEC_PF:
179
                s->mv[b] = (VP56mv) {0,0};
180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198
                break;
            case VP56_MB_INTER_DELTA_PF:
                s->parse_vector_adjustment(s, &s->mv[b]);
                break;
            case VP56_MB_INTER_V1_PF:
                s->mv[b] = s->vector_candidate[0];
                break;
            case VP56_MB_INTER_V2_PF:
                s->mv[b] = s->vector_candidate[1];
                break;
        }
        mv.x += s->mv[b].x;
        mv.y += s->mv[b].y;
    }

    /* this is the one selected for the whole MB for prediction */
    s->macroblocks[row * s->mb_width + col].mv = s->mv[3];

    /* chroma vectors are average luma vectors */
199
    if (s->avctx->codec->id == AV_CODEC_ID_VP5) {
200 201 202
        s->mv[4].x = s->mv[5].x = RSHIFT(mv.x,2);
        s->mv[4].y = s->mv[5].y = RSHIFT(mv.y,2);
    } else {
203
        s->mv[4] = s->mv[5] = (VP56mv) {mv.x/4, mv.y/4};
204 205 206
    }
}

207
static VP56mb vp56_decode_mv(VP56Context *s, int row, int col)
208
{
209
    VP56mv *mv, vect = {0,0};
210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235
    int ctx, b;

    ctx = vp56_get_vectors_predictors(s, row, col, VP56_FRAME_PREVIOUS);
    s->mb_type = vp56_parse_mb_type(s, s->mb_type, ctx);
    s->macroblocks[row * s->mb_width + col].type = s->mb_type;

    switch (s->mb_type) {
        case VP56_MB_INTER_V1_PF:
            mv = &s->vector_candidate[0];
            break;

        case VP56_MB_INTER_V2_PF:
            mv = &s->vector_candidate[1];
            break;

        case VP56_MB_INTER_V1_GF:
            vp56_get_vectors_predictors(s, row, col, VP56_FRAME_GOLDEN);
            mv = &s->vector_candidate[0];
            break;

        case VP56_MB_INTER_V2_GF:
            vp56_get_vectors_predictors(s, row, col, VP56_FRAME_GOLDEN);
            mv = &s->vector_candidate[1];
            break;

        case VP56_MB_INTER_DELTA_PF:
236 237
            s->parse_vector_adjustment(s, &vect);
            mv = &vect;
238 239 240 241
            break;

        case VP56_MB_INTER_DELTA_GF:
            vp56_get_vectors_predictors(s, row, col, VP56_FRAME_GOLDEN);
242 243
            s->parse_vector_adjustment(s, &vect);
            mv = &vect;
244 245 246 247 248 249 250
            break;

        case VP56_MB_INTER_4V:
            vp56_decode_4mv(s, row, col);
            return s->mb_type;

        default:
251
            mv = &vect;
252 253 254 255 256 257 258 259 260 261 262 263
            break;
    }

    s->macroblocks[row*s->mb_width + col].mv = *mv;

    /* same vector for all blocks */
    for (b=0; b<6; b++)
        s->mv[b] = *mv;

    return s->mb_type;
}

264
static void vp56_add_predictors_dc(VP56Context *s, VP56Frame ref_frame)
265 266
{
    int idx = s->scantable.permutated[0];
267
    int b;
268

269
    for (b=0; b<6; b++) {
270
        VP56RefDc *ab = &s->above_blocks[s->above_block_idx[b]];
271
        VP56RefDc *lb = &s->left_block[ff_vp56_b6to4[b]];
272 273
        int count = 0;
        int dc = 0;
Aurelien Jacobs's avatar
Aurelien Jacobs committed
274
        int i;
275 276 277 278 279 280 281 282 283

        if (ref_frame == lb->ref_frame) {
            dc += lb->dc_coeff;
            count++;
        }
        if (ref_frame == ab->ref_frame) {
            dc += ab->dc_coeff;
            count++;
        }
284
        if (s->avctx->codec->id == AV_CODEC_ID_VP5)
Aurelien Jacobs's avatar
Aurelien Jacobs committed
285 286 287 288 289
            for (i=0; i<2; i++)
                if (count < 2 && ref_frame == ab[-1+2*i].ref_frame) {
                    dc += ab[-1+2*i].dc_coeff;
                    count++;
                }
290
        if (count == 0)
291
            dc = s->prev_dc[ff_vp56_b2p[b]][ref_frame];
292 293 294
        else if (count == 2)
            dc /= 2;

295
        s->block_coeff[b][idx] += dc;
296
        s->prev_dc[ff_vp56_b2p[b]][ref_frame] = s->block_coeff[b][idx];
297
        ab->dc_coeff = s->block_coeff[b][idx];
298
        ab->ref_frame = ref_frame;
299
        lb->dc_coeff = s->block_coeff[b][idx];
300
        lb->ref_frame = ref_frame;
301
        s->block_coeff[b][idx] *= s->dequant_dc;
302 303 304
    }
}

305
static void vp56_deblock_filter(VP56Context *s, uint8_t *yuv,
306 307 308
                                int stride, int dx, int dy)
{
    int t = vp56_filter_threshold[s->quantizer];
309 310
    if (dx)  s->vp56dsp.edge_filter_hor(yuv +         10-dx , stride, t);
    if (dy)  s->vp56dsp.edge_filter_ver(yuv + stride*(10-dy), stride, t);
311 312
}

313
static void vp56_mc(VP56Context *s, int b, int plane, uint8_t *src,
314 315
                    int stride, int x, int y)
{
316
    uint8_t *dst=s->framep[VP56_FRAME_CURRENT]->data[plane]+s->block_offset[b];
317 318 319 320 321 322 323 324 325 326
    uint8_t *src_block;
    int src_offset;
    int overlap_offset = 0;
    int mask = s->vp56_coord_div[b] - 1;
    int deblock_filtering = s->deblock_filtering;
    int dx;
    int dy;

    if (s->avctx->skip_loop_filter >= AVDISCARD_ALL ||
        (s->avctx->skip_loop_filter >= AVDISCARD_NONKEY
327
         && !s->framep[VP56_FRAME_CURRENT]->key_frame))
328 329 330 331 332 333 334 335 336 337 338 339 340 341
        deblock_filtering = 0;

    dx = s->mv[b].x / s->vp56_coord_div[b];
    dy = s->mv[b].y / s->vp56_coord_div[b];

    if (b >= 4) {
        x /= 2;
        y /= 2;
    }
    x += dx - 2;
    y += dy - 2;

    if (x<0 || x+12>=s->plane_width[plane] ||
        y<0 || y+12>=s->plane_height[plane]) {
342
        s->dsp.emulated_edge_mc(s->edge_emu_buffer,
343 344 345 346 347 348 349
                            src + s->block_offset[b] + (dy-2)*stride + (dx-2),
                            stride, 12, 12, x, y,
                            s->plane_width[plane],
                            s->plane_height[plane]);
        src_block = s->edge_emu_buffer;
        src_offset = 2 + 2*stride;
    } else if (deblock_filtering) {
350 351 352 353 354
        /* only need a 12x12 block, but there is no such dsp function, */
        /* so copy a 16x12 block */
        s->dsp.put_pixels_tab[0][0](s->edge_emu_buffer,
                                    src + s->block_offset[b] + (dy-2)*stride + (dx-2),
                                    stride, 12);
355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382
        src_block = s->edge_emu_buffer;
        src_offset = 2 + 2*stride;
    } else {
        src_block = src;
        src_offset = s->block_offset[b] + dy*stride + dx;
    }

    if (deblock_filtering)
        vp56_deblock_filter(s, src_block, stride, dx&7, dy&7);

    if (s->mv[b].x & mask)
        overlap_offset += (s->mv[b].x > 0) ? 1 : -1;
    if (s->mv[b].y & mask)
        overlap_offset += (s->mv[b].y > 0) ? stride : -stride;

    if (overlap_offset) {
        if (s->filter)
            s->filter(s, dst, src_block, src_offset, src_offset+overlap_offset,
                      stride, s->mv[b], mask, s->filter_selection, b<4);
        else
            s->dsp.put_no_rnd_pixels_l2[1](dst, src_block+src_offset,
                                           src_block+src_offset+overlap_offset,
                                           stride, 8);
    } else {
        s->dsp.put_pixels_tab[1][0](dst, src_block+src_offset, stride, 8);
    }
}

383
static void vp56_decode_mb(VP56Context *s, int row, int col, int is_alpha)
384 385
{
    AVFrame *frame_current, *frame_ref;
386 387
    VP56mb mb_type;
    VP56Frame ref_frame;
388
    int b, ab, b_max, plane, off;
389

390
    if (s->framep[VP56_FRAME_CURRENT]->key_frame)
391 392 393 394 395
        mb_type = VP56_MB_INTRA;
    else
        mb_type = vp56_decode_mv(s, row, col);
    ref_frame = vp56_reference_frame[mb_type];

Loren Merritt's avatar
Loren Merritt committed
396
    s->dsp.clear_blocks(*s->block_coeff);
397 398 399 400 401

    s->parse_coeff(s);

    vp56_add_predictors_dc(s, ref_frame);

402 403
    frame_current = s->framep[VP56_FRAME_CURRENT];
    frame_ref = s->framep[ref_frame];
404 405
    if (mb_type != VP56_MB_INTRA && !frame_ref->data[0])
        return;
406

407 408 409
    ab = 6*is_alpha;
    b_max = 6 - 2*is_alpha;

410 411
    switch (mb_type) {
        case VP56_MB_INTRA:
412
            for (b=0; b<b_max; b++) {
413
                plane = ff_vp56_b2p[b+ab];
414
                s->vp3dsp.idct_put(frame_current->data[plane] + s->block_offset[b],
415
                                s->stride[plane], s->block_coeff[b]);
416 417 418 419 420
            }
            break;

        case VP56_MB_INTER_NOVEC_PF:
        case VP56_MB_INTER_NOVEC_GF:
421
            for (b=0; b<b_max; b++) {
422
                plane = ff_vp56_b2p[b+ab];
423
                off = s->block_offset[b];
424 425 426
                s->dsp.put_pixels_tab[1][0](frame_current->data[plane] + off,
                                            frame_ref->data[plane] + off,
                                            s->stride[plane], 8);
427
                s->vp3dsp.idct_add(frame_current->data[plane] + off,
428
                                s->stride[plane], s->block_coeff[b]);
429 430 431 432 433 434 435 436 437 438
            }
            break;

        case VP56_MB_INTER_DELTA_PF:
        case VP56_MB_INTER_V1_PF:
        case VP56_MB_INTER_V2_PF:
        case VP56_MB_INTER_DELTA_GF:
        case VP56_MB_INTER_4V:
        case VP56_MB_INTER_V1_GF:
        case VP56_MB_INTER_V2_GF:
439
            for (b=0; b<b_max; b++) {
440 441
                int x_off = b==1 || b==3 ? 8 : 0;
                int y_off = b==2 || b==3 ? 8 : 0;
442
                plane = ff_vp56_b2p[b+ab];
443
                vp56_mc(s, b, plane, frame_ref->data[plane], s->stride[plane],
444
                        16*col+x_off, 16*row+y_off);
445
                s->vp3dsp.idct_add(frame_current->data[plane] + s->block_offset[b],
446
                                s->stride[plane], s->block_coeff[b]);
447 448 449 450 451
            }
            break;
    }
}

452
static int vp56_size_changed(VP56Context *s)
453
{
454
    AVCodecContext *avctx = s->avctx;
455
    int stride = s->framep[VP56_FRAME_CURRENT]->linesize[0];
456 457
    int i;

458
    s->plane_width[0]  = s->plane_width[3]  = avctx->coded_width;
459
    s->plane_width[1]  = s->plane_width[2]  = avctx->coded_width/2;
460
    s->plane_height[0] = s->plane_height[3] = avctx->coded_height;
461
    s->plane_height[1] = s->plane_height[2] = avctx->coded_height/2;
462

463
    for (i=0; i<4; i++)
464
        s->stride[i] = s->flip * s->framep[VP56_FRAME_CURRENT]->linesize[i];
465

466 467
    s->mb_width  = (avctx->coded_width +15) / 16;
    s->mb_height = (avctx->coded_height+15) / 16;
468 469

    if (s->mb_width > 1000 || s->mb_height > 1000) {
470
        avcodec_set_dimensions(avctx, 0, 0);
471 472 473 474
        av_log(avctx, AV_LOG_ERROR, "picture too big\n");
        return -1;
    }

475
    s->qscale_table = av_realloc(s->qscale_table, s->mb_width);
476 477 478 479
    s->above_blocks = av_realloc(s->above_blocks,
                                 (4*s->mb_width+6) * sizeof(*s->above_blocks));
    s->macroblocks = av_realloc(s->macroblocks,
                                s->mb_width*s->mb_height*sizeof(*s->macroblocks));
480 481
    av_free(s->edge_emu_buffer_alloc);
    s->edge_emu_buffer_alloc = av_malloc(16*stride);
482 483 484 485
    s->edge_emu_buffer = s->edge_emu_buffer_alloc;
    if (s->flip < 0)
        s->edge_emu_buffer += 15 * stride;

486 487 488
    if (s->alpha_context)
        return vp56_size_changed(s->alpha_context);

489 490 491
    return 0;
}

492 493
static int ff_vp56_decode_mbs(AVCodecContext *avctx, void *, int, int);

494
int ff_vp56_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
495
                         AVPacket *avpkt)
496
{
497
    const uint8_t *buf = avpkt->data;
498
    VP56Context *s = avctx->priv_data;
499
    AVFrame *p = 0;
500
    int remaining_buf_size = avpkt->size;
501 502
    int av_uninit(alpha_offset);
    int i, res;
503 504 505 506 507 508 509 510 511 512

    /* select a current frame from the unused frames */
    for (i = 0; i < 4; ++i) {
        if (!s->frames[i].data[0]) {
            p = &s->frames[i];
            break;
        }
    }
    av_assert0(p != 0);
    s->framep[VP56_FRAME_CURRENT] = p;
513 514
    if (s->alpha_context)
        s->alpha_context->framep[VP56_FRAME_CURRENT] = p;
515 516

    if (s->has_alpha) {
517 518
        if (remaining_buf_size < 3)
            return -1;
519
        alpha_offset = bytestream_get_be24(&buf);
520
        remaining_buf_size -= 3;
521 522
        if (remaining_buf_size < alpha_offset)
            return -1;
523 524
    }

525 526 527
    res = s->parse_header(s, buf, remaining_buf_size);
    if (!res)
        return -1;
528

529 530 531 532
    if (res == 2) {
        for (i = 0; i < 4; i++) {
            if (s->frames[i].data[0])
                avctx->release_buffer(avctx, &s->frames[i]);
533
        }
534
    }
535

536 537 538 539 540
    p->reference = 3;
    if (avctx->get_buffer(avctx, p) < 0) {
        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
        return -1;
    }
541

542 543 544 545 546 547
    if (res == 2) {
        if (vp56_size_changed(s)) {
            avctx->release_buffer(avctx, p);
            return -1;
        }
    }
548 549 550 551 552 553 554

    if (s->has_alpha) {
        buf += alpha_offset;
        remaining_buf_size -= alpha_offset;

        res = s->alpha_context->parse_header(s->alpha_context, buf, remaining_buf_size);
        if (res != 1) {
555 556 557 558 559 560 561 562 563 564
            if(res==2) {
                av_log(avctx, AV_LOG_ERROR, "Alpha reconfiguration\n");
                for (i = 0; i < 4; i++) {
                    if (s->frames[i].data[0])
                        avctx->release_buffer(avctx, &s->frames[i]);
                    av_assert0(!s->alpha_context->frames[i].data[0]);
                }
                vp56_size_changed(s);
            }else
                avctx->release_buffer(avctx, p);
565
            return -1;
566
        }
567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599
    }

    avctx->execute2(avctx, ff_vp56_decode_mbs, 0, 0, s->has_alpha + 1);

    /* release frames that aren't in use */
    for (i = 0; i < 4; ++i) {
        AVFrame *victim = &s->frames[i];
        if (!victim->data[0])
            continue;
        if (victim != s->framep[VP56_FRAME_PREVIOUS] &&
            victim != s->framep[VP56_FRAME_GOLDEN] &&
            (!s->has_alpha || victim != s->alpha_context->framep[VP56_FRAME_GOLDEN]))
            avctx->release_buffer(avctx, victim);
    }

    p->qstride = 0;
    p->qscale_table = s->qscale_table;
    p->qscale_type = FF_QSCALE_TYPE_VP56;
    *(AVFrame*)data = *p;
    *data_size = sizeof(AVFrame);

    return avpkt->size;
}

static int ff_vp56_decode_mbs(AVCodecContext *avctx, void *data,
                              int jobnr, int threadnr)
{
    VP56Context *s0 = avctx->priv_data;
    int is_alpha = (jobnr == 1);
    VP56Context *s = is_alpha ? s0->alpha_context : s0;
    AVFrame *const p = s->framep[VP56_FRAME_CURRENT];
    int mb_row, mb_col, mb_row_flip, mb_offset = 0;
    int block, y, uv, stride_y, stride_uv;
600

601 602 603 604 605 606 607 608 609 610 611
    if (p->key_frame) {
        p->pict_type = AV_PICTURE_TYPE_I;
        s->default_models_init(s);
        for (block=0; block<s->mb_height*s->mb_width; block++)
            s->macroblocks[block].type = VP56_MB_INTRA;
    } else {
        p->pict_type = AV_PICTURE_TYPE_P;
        vp56_parse_mb_type_models(s);
        s->parse_vector_models(s);
        s->mb_type = VP56_MB_INTER_NOVEC_PF;
    }
612

613 614
    if (s->parse_coeff_models(s))
        goto next;
615

616 617 618
    memset(s->prev_dc, 0, sizeof(s->prev_dc));
    s->prev_dc[1][VP56_FRAME_CURRENT] = 128;
    s->prev_dc[2][VP56_FRAME_CURRENT] = 128;
619

620 621 622 623 624 625 626
    for (block=0; block < 4*s->mb_width+6; block++) {
        s->above_blocks[block].ref_frame = VP56_FRAME_NONE;
        s->above_blocks[block].dc_coeff = 0;
        s->above_blocks[block].not_null_dc = 0;
    }
    s->above_blocks[2*s->mb_width + 2].ref_frame = VP56_FRAME_CURRENT;
    s->above_blocks[3*s->mb_width + 4].ref_frame = VP56_FRAME_CURRENT;
627

628 629
    stride_y  = p->linesize[0];
    stride_uv = p->linesize[1];
630

631 632 633 634 635
    if (s->flip < 0)
        mb_offset = 7;

    /* main macroblocks loop */
    for (mb_row=0; mb_row<s->mb_height; mb_row++) {
636
        if (s->flip < 0)
637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668
            mb_row_flip = s->mb_height - mb_row - 1;
        else
            mb_row_flip = mb_row;

        for (block=0; block<4; block++) {
            s->left_block[block].ref_frame = VP56_FRAME_NONE;
            s->left_block[block].dc_coeff = 0;
            s->left_block[block].not_null_dc = 0;
        }
        memset(s->coeff_ctx, 0, sizeof(s->coeff_ctx));
        memset(s->coeff_ctx_last, 24, sizeof(s->coeff_ctx_last));

        s->above_block_idx[0] = 1;
        s->above_block_idx[1] = 2;
        s->above_block_idx[2] = 1;
        s->above_block_idx[3] = 2;
        s->above_block_idx[4] = 2*s->mb_width + 2 + 1;
        s->above_block_idx[5] = 3*s->mb_width + 4 + 1;

        s->block_offset[s->frbi] = (mb_row_flip*16 + mb_offset) * stride_y;
        s->block_offset[s->srbi] = s->block_offset[s->frbi] + 8*stride_y;
        s->block_offset[1] = s->block_offset[0] + 8;
        s->block_offset[3] = s->block_offset[2] + 8;
        s->block_offset[4] = (mb_row_flip*8 + mb_offset) * stride_uv;
        s->block_offset[5] = s->block_offset[4];

        for (mb_col=0; mb_col<s->mb_width; mb_col++) {
            vp56_decode_mb(s, mb_row, mb_col, is_alpha);

            for (y=0; y<4; y++) {
                s->above_block_idx[y] += 2;
                s->block_offset[y] += 16;
669 670
            }

671 672 673
            for (uv=4; uv<6; uv++) {
                s->above_block_idx[uv] += 1;
                s->block_offset[uv] += 8;
674 675
            }
        }
676
    }
677

678 679 680 681
next:
    if (p->key_frame || s->golden_frame) {
        s->framep[VP56_FRAME_GOLDEN] = p;
    }
682

683 684
    FFSWAP(AVFrame *, s->framep[VP56_FRAME_CURRENT],
                      s->framep[VP56_FRAME_PREVIOUS]);
685
    return 0;
686 687
}

688
av_cold void ff_vp56_init(AVCodecContext *avctx, int flip, int has_alpha)
689
{
690
    VP56Context *s = avctx->priv_data;
691 692 693 694 695 696
    ff_vp56_init_context(avctx, s, flip, has_alpha);
}

av_cold void ff_vp56_init_context(AVCodecContext *avctx, VP56Context *s,
                                  int flip, int has_alpha)
{
697 698 699
    int i;

    s->avctx = avctx;
700
    avctx->pix_fmt = has_alpha ? AV_PIX_FMT_YUVA420P : AV_PIX_FMT_YUV420P;
701

702
    ff_dsputil_init(&s->dsp, avctx);
703
    ff_vp3dsp_init(&s->vp3dsp, avctx->flags);
704
    ff_vp56dsp_init(&s->vp56dsp, avctx->codec->id);
705
    ff_init_scantable_permutation(s->dsp.idct_permutation, s->vp3dsp.idct_perm);
706 707
    ff_init_scantable(s->dsp.idct_permutation, &s->scantable,ff_zigzag_direct);

708
    for (i=0; i<4; i++) {
709
        s->framep[i] = &s->frames[i];
710 711
        avcodec_get_frame_defaults(&s->frames[i]);
    }
712
    s->framep[VP56_FRAME_UNUSED] = s->framep[VP56_FRAME_GOLDEN];
713
    s->framep[VP56_FRAME_UNUSED2] = s->framep[VP56_FRAME_GOLDEN2];
714 715 716 717 718 719
    s->edge_emu_buffer_alloc = NULL;

    s->above_blocks = NULL;
    s->macroblocks = NULL;
    s->quantizer = -1;
    s->deblock_filtering = 1;
720
    s->golden_frame = 0;
721 722 723

    s->filter = NULL;

724
    s->has_alpha = has_alpha;
725 726 727

    s->modelp = &s->model;

728 729 730 731 732 733 734 735 736 737 738
    if (flip) {
        s->flip = -1;
        s->frbi = 2;
        s->srbi = 0;
    } else {
        s->flip = 1;
        s->frbi = 0;
        s->srbi = 2;
    }
}

739
av_cold int ff_vp56_free(AVCodecContext *avctx)
740
{
741
    VP56Context *s = avctx->priv_data;
742 743 744 745 746 747
    return ff_vp56_free_context(s);
}

av_cold int ff_vp56_free_context(VP56Context *s)
{
    AVCodecContext *avctx = s->avctx;
748
    int i;
749

750
    av_freep(&s->qscale_table);
751 752 753
    av_freep(&s->above_blocks);
    av_freep(&s->macroblocks);
    av_freep(&s->edge_emu_buffer_alloc);
754 755 756 757
    for (i = 0; i < 4; ++i) {
        if (s->frames[i].data[0])
            avctx->release_buffer(avctx, &s->frames[i]);
    }
758 759
    return 0;
}