vp3.c 76.5 KB
Newer Older
1
/*
2
 * Copyright (C) 2003-2004 the ffmpeg project
3
 *
4 5 6
 * This file is part of FFmpeg.
 *
 * FFmpeg is free software; you can redistribute it and/or
7 8
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
9
 * version 2.1 of the License, or (at your option) any later version.
10
 *
11
 * FFmpeg is distributed in the hope that it will be useful,
12 13 14 15 16
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with FFmpeg; if not, write to the Free Software
18
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 20 21
 */

/**
22
 * @file libavcodec/vp3.c
23
 * On2 VP3 Video Decoder
24 25 26
 *
 * VP3 Video Decoder by Mike Melanson (mike at multimedia.cx)
 * For more information about the VP3 coding process, visit:
27
 *   http://wiki.multimedia.cx/index.php?title=On2_VP3
28 29
 *
 * Theora decoder by Alex Beregszaszi
30 31 32 33 34 35 36 37
 */

#include <stdio.h>
#include <stdlib.h>
#include <string.h>

#include "avcodec.h"
#include "dsputil.h"
38
#include "get_bits.h"
39 40

#include "vp3data.h"
41
#include "xiph.h"
42 43 44

#define FRAGMENT_PIXELS 8

45 46
static av_cold int vp3_decode_end(AVCodecContext *avctx);

47 48 49 50 51 52 53
typedef struct Coeff {
    struct Coeff *next;
    DCTELEM coeff;
    uint8_t index;
} Coeff;

//FIXME split things out into their own arrays
54
typedef struct Vp3Fragment {
55
    Coeff *next_coeff;
56 57 58
    uint8_t coding_method;
    int8_t motion_x;
    int8_t motion_y;
59
    uint8_t qpi;
60 61 62 63 64 65
} Vp3Fragment;

#define SB_NOT_CODED        0
#define SB_PARTIALLY_CODED  1
#define SB_FULLY_CODED      2

66 67 68 69 70
// This is the maximum length of a single long bit run that can be encoded
// for superblock coding or block qps. Theora special-cases this to read a
// bit instead of flipping the current bit to allow for runs longer than 4129.
#define MAXIMUM_LONG_BIT_RUN 4129

71 72 73 74 75 76 77 78 79 80 81 82 83 84
#define MODE_INTER_NO_MV      0
#define MODE_INTRA            1
#define MODE_INTER_PLUS_MV    2
#define MODE_INTER_LAST_MV    3
#define MODE_INTER_PRIOR_LAST 4
#define MODE_USING_GOLDEN     5
#define MODE_GOLDEN_MV        6
#define MODE_INTER_FOURMV     7
#define CODING_MODE_COUNT     8

/* special internal mode */
#define MODE_COPY             8

/* There are 6 preset schemes, plus a free-form scheme */
85
static const int ModeAlphabet[6][CODING_MODE_COUNT] =
86 87
{
    /* scheme 1: Last motion vector dominates */
88
    {    MODE_INTER_LAST_MV,    MODE_INTER_PRIOR_LAST,
89
         MODE_INTER_PLUS_MV,    MODE_INTER_NO_MV,
90
         MODE_INTRA,            MODE_USING_GOLDEN,
91 92 93
         MODE_GOLDEN_MV,        MODE_INTER_FOURMV },

    /* scheme 2 */
94
    {    MODE_INTER_LAST_MV,    MODE_INTER_PRIOR_LAST,
95
         MODE_INTER_NO_MV,      MODE_INTER_PLUS_MV,
96
         MODE_INTRA,            MODE_USING_GOLDEN,
97 98 99
         MODE_GOLDEN_MV,        MODE_INTER_FOURMV },

    /* scheme 3 */
100
    {    MODE_INTER_LAST_MV,    MODE_INTER_PLUS_MV,
101
         MODE_INTER_PRIOR_LAST, MODE_INTER_NO_MV,
102
         MODE_INTRA,            MODE_USING_GOLDEN,
103 104 105
         MODE_GOLDEN_MV,        MODE_INTER_FOURMV },

    /* scheme 4 */
106
    {    MODE_INTER_LAST_MV,    MODE_INTER_PLUS_MV,
107
         MODE_INTER_NO_MV,      MODE_INTER_PRIOR_LAST,
108
         MODE_INTRA,            MODE_USING_GOLDEN,
109 110 111
         MODE_GOLDEN_MV,        MODE_INTER_FOURMV },

    /* scheme 5: No motion vector dominates */
112
    {    MODE_INTER_NO_MV,      MODE_INTER_LAST_MV,
113
         MODE_INTER_PRIOR_LAST, MODE_INTER_PLUS_MV,
114
         MODE_INTRA,            MODE_USING_GOLDEN,
115 116 117
         MODE_GOLDEN_MV,        MODE_INTER_FOURMV },

    /* scheme 6 */
118
    {    MODE_INTER_NO_MV,      MODE_USING_GOLDEN,
119
         MODE_INTER_LAST_MV,    MODE_INTER_PRIOR_LAST,
120
         MODE_INTER_PLUS_MV,    MODE_INTRA,
121 122 123 124 125 126 127 128
         MODE_GOLDEN_MV,        MODE_INTER_FOURMV },

};

#define MIN_DEQUANT_VAL 2

typedef struct Vp3DecodeContext {
    AVCodecContext *avctx;
129
    int theora, theora_tables;
130
    int version;
131 132 133 134 135 136
    int width, height;
    AVFrame golden_frame;
    AVFrame last_frame;
    AVFrame current_frame;
    int keyframe;
    DSPContext dsp;
137
    int flipped_image;
138
    int last_slice_end;
139

140 141 142
    int qps[3];
    int nqps;
    int last_qps[3];
143 144

    int superblock_count;
145 146 147 148
    int y_superblock_width;
    int y_superblock_height;
    int c_superblock_width;
    int c_superblock_height;
149 150 151 152 153 154 155 156 157 158 159 160 161
    int u_superblock_start;
    int v_superblock_start;
    unsigned char *superblock_coding;

    int macroblock_count;
    int macroblock_width;
    int macroblock_height;

    int fragment_count;
    int fragment_width;
    int fragment_height;

    Vp3Fragment *all_fragments;
162
    uint8_t *coeff_counts;
163 164
    Coeff *coeffs;
    Coeff *next_coeff;
Michael Niedermayer's avatar
Michael Niedermayer committed
165
    int fragment_start[3];
166
    int data_offset[3];
167

168
    ScanTable scantable;
169

170 171
    /* tables */
    uint16_t coded_dc_scale_factor[64];
172
    uint32_t coded_ac_scale_factor[64];
173 174 175 176
    uint8_t base_matrix[384][64];
    uint8_t qr_count[2][3];
    uint8_t qr_size [2][3][64];
    uint16_t qr_base[2][3][64];
177

178
    /* this is a list of indexes into the all_fragments array indicating
179 180 181 182
     * which of the fragments are coded */
    int *coded_fragment_list;
    int coded_fragment_list_index;

183 184 185 186 187 188 189 190
    /* track which fragments have already been decoded; called 'fast'
     * because this data structure avoids having to iterate through every
     * fragment in coded_fragment_list; once a fragment has been fully
     * decoded, it is removed from this list */
    int *fast_fragment_list;
    int fragment_list_y_head;
    int fragment_list_c_head;

191 192 193 194 195 196
    VLC dc_vlc[16];
    VLC ac_vlc_1[16];
    VLC ac_vlc_2[16];
    VLC ac_vlc_3[16];
    VLC ac_vlc_4[16];

197 198 199 200 201
    VLC superblock_run_length_vlc;
    VLC fragment_run_length_vlc;
    VLC mode_code_vlc;
    VLC motion_vector_vlc;

202 203
    /* these arrays need to be on 16-byte boundaries since SSE2 operations
     * index into them */
204
    DECLARE_ALIGNED_16(int16_t, qmat)[3][2][3][64];     //<qmat[qpi][is_inter][plane]
205 206

    /* This table contains superblock_count * 16 entries. Each set of 16
207
     * numbers corresponds to the fragment indexes 0..15 of the superblock.
208 209 210 211
     * An entry will be -1 to indicate that no entry corresponds to that
     * index. */
    int *superblock_fragments;

212
    /* This is an array that indicates how a particular macroblock
213
     * is coded. */
214
    unsigned char *macroblock_coding;
215

216 217 218 219 220
    int first_coded_y_fragment;
    int first_coded_c_fragment;
    int last_coded_y_fragment;
    int last_coded_c_fragment;

Michael Niedermayer's avatar
Michael Niedermayer committed
221
    uint8_t edge_emu_buffer[9*2048]; //FIXME dynamic alloc
222
    int8_t qscale_table[2048]; //FIXME dynamic alloc (width+15)/16
223

224 225 226 227 228 229 230
    /* Huffman decode */
    int hti;
    unsigned int hbits;
    int entries;
    int huff_code_size;
    uint16_t huffman_table[80][32][2];

231
    uint8_t filter_limit_values[64];
232
    DECLARE_ALIGNED_8(int, bounding_values_array)[256+2];
233 234 235 236 237 238 239 240 241 242
} Vp3DecodeContext;

/************************************************************************
 * VP3 specific functions
 ************************************************************************/

/*
 * This function sets up all of the various blocks mappings:
 * superblocks <-> fragments, macroblocks <-> fragments,
 * superblocks <-> macroblocks
243 244
 *
 * Returns 0 is successful; returns 1 if *anything* went wrong.
245
 */
246
static int init_block_mapping(Vp3DecodeContext *s)
247 248 249 250 251 252 253 254 255 256 257 258 259 260 261
{
    int i, j;
    signed int hilbert_walk_mb[4];

    int current_fragment = 0;
    int current_width = 0;
    int current_height = 0;
    int right_edge = 0;
    int bottom_edge = 0;
    int superblock_row_inc = 0;
    int mapping_index = 0;

    int current_macroblock;
    int c_fragment;

262
    static const signed char travel_width[16] = {
263
         1,  1,  0, -1,
264 265 266 267 268
         0,  0,  1,  0,
         1,  0,  1,  0,
         0, -1,  0,  1
    };

269
    static const signed char travel_height[16] = {
270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288
         0,  0,  1,  0,
         1,  1,  0, -1,
         0,  1,  0, -1,
        -1,  0, -1,  0
    };

    hilbert_walk_mb[0] = 1;
    hilbert_walk_mb[1] = s->macroblock_width;
    hilbert_walk_mb[2] = 1;
    hilbert_walk_mb[3] = -s->macroblock_width;

    /* iterate through each superblock (all planes) and map the fragments */
    for (i = 0; i < s->superblock_count; i++) {
        /* time to re-assign the limits? */
        if (i == 0) {

            /* start of Y superblocks */
            right_edge = s->fragment_width;
            bottom_edge = s->fragment_height;
289
            current_width = -1;
290
            current_height = 0;
291
            superblock_row_inc = 3 * s->fragment_width -
292
                (s->y_superblock_width * 4 - s->fragment_width);
293 294 295 296 297 298 299 300 301

            /* the first operation for this variable is to advance by 1 */
            current_fragment = -1;

        } else if (i == s->u_superblock_start) {

            /* start of U superblocks */
            right_edge = s->fragment_width / 2;
            bottom_edge = s->fragment_height / 2;
302
            current_width = -1;
303
            current_height = 0;
304
            superblock_row_inc = 3 * (s->fragment_width / 2) -
305
                (s->c_superblock_width * 4 - s->fragment_width / 2);
306 307

            /* the first operation for this variable is to advance by 1 */
Michael Niedermayer's avatar
Michael Niedermayer committed
308
            current_fragment = s->fragment_start[1] - 1;
309 310 311 312 313 314

        } else if (i == s->v_superblock_start) {

            /* start of V superblocks */
            right_edge = s->fragment_width / 2;
            bottom_edge = s->fragment_height / 2;
315
            current_width = -1;
316
            current_height = 0;
317
            superblock_row_inc = 3 * (s->fragment_width / 2) -
318
                (s->c_superblock_width * 4 - s->fragment_width / 2);
319 320

            /* the first operation for this variable is to advance by 1 */
Michael Niedermayer's avatar
Michael Niedermayer committed
321
            current_fragment = s->fragment_start[2] - 1;
322 323 324

        }

325
        if (current_width >= right_edge - 1) {
326
            /* reset width and move to next superblock row */
327
            current_width = -1;
328 329 330 331 332 333 334 335
            current_height += 4;

            /* fragment is now at the start of a new superblock row */
            current_fragment += superblock_row_inc;
        }

        /* iterate through all 16 fragments in a superblock */
        for (j = 0; j < 16; j++) {
Michael Niedermayer's avatar
Michael Niedermayer committed
336
            current_fragment += travel_width[j] + right_edge * travel_height[j];
337
            current_width += travel_width[j];
338 339 340
            current_height += travel_height[j];

            /* check if the fragment is in bounds */
341
            if ((current_width < right_edge) &&
342 343 344 345 346 347 348 349 350 351
                (current_height < bottom_edge)) {
                s->superblock_fragments[mapping_index] = current_fragment;
            } else {
                s->superblock_fragments[mapping_index] = -1;
            }

            mapping_index++;
        }
    }

352
    return 0;  /* successful path out */
353 354 355 356 357 358 359 360 361 362 363 364
}

/*
 * This function wipes out all of the fragment data.
 */
static void init_frame(Vp3DecodeContext *s, GetBitContext *gb)
{
    int i;

    /* zero out all of the fragment information */
    s->coded_fragment_list_index = 0;
    for (i = 0; i < s->fragment_count; i++) {
365
        s->coeff_counts[i] = 0;
366 367 368
        s->all_fragments[i].motion_x = 127;
        s->all_fragments[i].motion_y = 127;
        s->all_fragments[i].next_coeff= NULL;
369
        s->all_fragments[i].qpi = 0;
370 371 372
        s->coeffs[i].index=
        s->coeffs[i].coeff=0;
        s->coeffs[i].next= NULL;
373 374 375 376
    }
}

/*
377
 * This function sets up the dequantization tables used for a particular
378 379
 * frame.
 */
380
static void init_dequantizer(Vp3DecodeContext *s, int qpi)
381
{
382 383
    int ac_scale_factor = s->coded_ac_scale_factor[s->qps[qpi]];
    int dc_scale_factor = s->coded_dc_scale_factor[s->qps[qpi]];
384
    int i, plane, inter, qri, bmi, bmj, qistart;
385

386 387 388 389 390
    for(inter=0; inter<2; inter++){
        for(plane=0; plane<3; plane++){
            int sum=0;
            for(qri=0; qri<s->qr_count[inter][plane]; qri++){
                sum+= s->qr_size[inter][plane][qri];
391
                if(s->qps[qpi] <= sum)
392 393 394 395 396 397
                    break;
            }
            qistart= sum - s->qr_size[inter][plane][qri];
            bmi= s->qr_base[inter][plane][qri  ];
            bmj= s->qr_base[inter][plane][qri+1];
            for(i=0; i<64; i++){
398 399
                int coeff= (  2*(sum    -s->qps[qpi])*s->base_matrix[bmi][i]
                            - 2*(qistart-s->qps[qpi])*s->base_matrix[bmj][i]
400 401 402
                            + s->qr_size[inter][plane][qri])
                           / (2*s->qr_size[inter][plane][qri]);

Michael Niedermayer's avatar
Michael Niedermayer committed
403
                int qmin= 8<<(inter + !i);
404 405
                int qscale= i ? ac_scale_factor : dc_scale_factor;

406
                s->qmat[qpi][inter][plane][s->dsp.idct_permutation[i]]= av_clip((qscale * coeff)/100 * 4, qmin, 4096);
407
            }
408 409
            // all DC coefficients use the same quant so as not to interfere with DC prediction
            s->qmat[qpi][inter][plane][0] = s->qmat[0][inter][plane][0];
410
        }
411
    }
412

413
    memset(s->qscale_table, (FFMAX(s->qmat[0][0][0][1], s->qmat[0][0][1][1])+8)/16, 512); //FIXME finetune
414 415
}

416 417 418
/*
 * This function initializes the loop filter boundary limits if the frame's
 * quality index is different from the previous frame's.
419 420
 *
 * The filter_limit_values may not be larger than 127.
421 422 423 424 425 426
 */
static void init_loop_filter(Vp3DecodeContext *s)
{
    int *bounding_values= s->bounding_values_array+127;
    int filter_limit;
    int x;
427
    int value;
428

429
    filter_limit = s->filter_limit_values[s->qps[0]];
430 431 432 433 434 435 436

    /* set up the bounding values */
    memset(s->bounding_values_array, 0, 256 * sizeof(int));
    for (x = 0; x < filter_limit; x++) {
        bounding_values[-x] = -x;
        bounding_values[x] = x;
    }
437 438 439 440 441 442
    for (x = value = filter_limit; x < 128 && value; x++, value--) {
        bounding_values[ x] =  value;
        bounding_values[-x] = -value;
    }
    if (value)
        bounding_values[128] = value;
David Conrad's avatar
David Conrad committed
443
    bounding_values[129] = bounding_values[130] = filter_limit * 0x02020202;
444 445
}

446
/*
447
 * This function unpacks all of the superblock/macroblock/fragment coding
448 449
 * information from the bitstream.
 */
450
static int unpack_superblocks(Vp3DecodeContext *s, GetBitContext *gb)
451 452 453 454
{
    int bit = 0;
    int current_superblock = 0;
    int current_run = 0;
455
    int num_partial_superblocks = 0;
456
    int first_c_fragment_seen;
457 458 459 460 461 462 463 464 465 466

    int i, j;
    int current_fragment;

    if (s->keyframe) {
        memset(s->superblock_coding, SB_FULLY_CODED, s->superblock_count);

    } else {

        /* unpack the list of partially-coded superblocks */
467
        bit = get_bits1(gb);
468
        while (current_superblock < s->superblock_count) {
469
                current_run = get_vlc2(gb,
470 471
                    s->superblock_run_length_vlc.table, 6, 2) + 1;
                if (current_run == 34)
472
                    current_run += get_bits(gb, 12);
473

474 475 476 477 478 479 480 481
            if (current_superblock + current_run > s->superblock_count) {
                av_log(s->avctx, AV_LOG_ERROR, "Invalid partially coded superblock run length\n");
                return -1;
            }

            memset(s->superblock_coding + current_superblock, bit, current_run);

            current_superblock += current_run;
482 483
            if (bit)
                num_partial_superblocks += current_run;
484

485 486 487
            if (s->theora && current_run == MAXIMUM_LONG_BIT_RUN)
                bit = get_bits1(gb);
            else
488
                bit ^= 1;
489 490 491 492
        }

        /* unpack the list of fully coded superblocks if any of the blocks were
         * not marked as partially coded in the previous step */
493 494
        if (num_partial_superblocks < s->superblock_count) {
            int superblocks_decoded = 0;
495 496

            current_superblock = 0;
497
            bit = get_bits1(gb);
498
            while (superblocks_decoded < s->superblock_count - num_partial_superblocks) {
499
                        current_run = get_vlc2(gb,
500 501
                            s->superblock_run_length_vlc.table, 6, 2) + 1;
                        if (current_run == 34)
502
                            current_run += get_bits(gb, 12);
503 504 505 506 507

                for (j = 0; j < current_run; current_superblock++) {
                    if (current_superblock >= s->superblock_count) {
                        av_log(s->avctx, AV_LOG_ERROR, "Invalid fully coded superblock run length\n");
                        return -1;
508
                    }
509 510 511

                /* skip any superblocks already marked as partially coded */
                if (s->superblock_coding[current_superblock] == SB_NOT_CODED) {
512
                    s->superblock_coding[current_superblock] = 2*bit;
513 514
                    j++;
                }
515
                }
516 517
                superblocks_decoded += current_run;

518 519 520
                if (s->theora && current_run == MAXIMUM_LONG_BIT_RUN)
                    bit = get_bits1(gb);
                else
521
                    bit ^= 1;
522 523 524 525 526
            }
        }

        /* if there were partial blocks, initialize bitstream for
         * unpacking fragment codings */
527
        if (num_partial_superblocks) {
528 529

            current_run = 0;
530
            bit = get_bits1(gb);
531
            /* toggle the bit because as soon as the first run length is
532 533 534 535 536 537 538 539
             * fetched the bit will be toggled again */
            bit ^= 1;
        }
    }

    /* figure out which fragments are coded; iterate through each
     * superblock (all planes) */
    s->coded_fragment_list_index = 0;
540
    s->next_coeff= s->coeffs + s->fragment_count;
541 542
    s->first_coded_y_fragment = s->first_coded_c_fragment = 0;
    s->last_coded_y_fragment = s->last_coded_c_fragment = -1;
543
    first_c_fragment_seen = 0;
544
    memset(s->macroblock_coding, MODE_COPY, s->macroblock_count);
545 546 547 548 549 550 551
    for (i = 0; i < s->superblock_count; i++) {

        /* iterate through all 16 fragments in a superblock */
        for (j = 0; j < 16; j++) {

            /* if the fragment is in bounds, check its coding status */
            current_fragment = s->superblock_fragments[i * 16 + j];
552
            if (current_fragment >= s->fragment_count) {
553
                av_log(s->avctx, AV_LOG_ERROR, "  vp3:unpack_superblocks(): bad fragment number (%d >= %d)\n",
554 555 556
                    current_fragment, s->fragment_count);
                return 1;
            }
557
            if (current_fragment != -1) {
558
                int coded = s->superblock_coding[i];
559

560
                if (s->superblock_coding[i] == SB_PARTIALLY_CODED) {
561 562 563

                    /* fragment may or may not be coded; this is the case
                     * that cares about the fragment coding runs */
564
                    if (current_run-- == 0) {
565
                        bit ^= 1;
566
                        current_run = get_vlc2(gb,
567
                            s->fragment_run_length_vlc.table, 5, 2);
568
                    }
569 570
                    coded = bit;
                }
571

572
                    if (coded) {
573
                        /* default mode; actual mode will be decoded in
574
                         * the next phase */
575
                        s->all_fragments[current_fragment].coding_method =
576
                            MODE_INTER_NO_MV;
577
                        s->all_fragments[current_fragment].next_coeff= s->coeffs + current_fragment;
578
                        s->coded_fragment_list[s->coded_fragment_list_index] =
579
                            current_fragment;
Michael Niedermayer's avatar
Michael Niedermayer committed
580
                        if ((current_fragment >= s->fragment_start[1]) &&
581 582
                            (s->last_coded_y_fragment == -1) &&
                            (!first_c_fragment_seen)) {
583 584
                            s->first_coded_c_fragment = s->coded_fragment_list_index;
                            s->last_coded_y_fragment = s->first_coded_c_fragment - 1;
585
                            first_c_fragment_seen = 1;
586 587
                        }
                        s->coded_fragment_list_index++;
588 589 590 591 592 593 594 595
                    } else {
                        /* not coded; copy this fragment from the prior frame */
                        s->all_fragments[current_fragment].coding_method =
                            MODE_COPY;
                    }
            }
        }
    }
596

597 598
    if (!first_c_fragment_seen)
        /* only Y fragments coded in this frame */
599
        s->last_coded_y_fragment = s->coded_fragment_list_index - 1;
600
    else
601
        /* end the list of coded C fragments */
602
        s->last_coded_c_fragment = s->coded_fragment_list_index - 1;
603

604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622
    for (i = 0; i < s->fragment_count - 1; i++) {
        s->fast_fragment_list[i] = i + 1;
    }
    s->fast_fragment_list[s->fragment_count - 1] = -1;

    if (s->last_coded_y_fragment == -1)
        s->fragment_list_y_head = -1;
    else {
        s->fragment_list_y_head = s->first_coded_y_fragment;
        s->fast_fragment_list[s->last_coded_y_fragment] = -1;
    }

    if (s->last_coded_c_fragment == -1)
        s->fragment_list_c_head = -1;
    else {
        s->fragment_list_c_head = s->first_coded_c_fragment;
        s->fast_fragment_list[s->last_coded_c_fragment] = -1;
    }

623
    return 0;
624 625 626 627 628 629
}

/*
 * This function unpacks all the coding mode data for individual macroblocks
 * from the bitstream.
 */
630
static int unpack_modes(Vp3DecodeContext *s, GetBitContext *gb)
631
{
632
    int i, j, k, sb_x, sb_y;
633 634 635 636
    int scheme;
    int current_macroblock;
    int current_fragment;
    int coding_mode;
637
    int custom_mode_alphabet[CODING_MODE_COUNT];
638
    const int *alphabet;
639 640 641 642 643 644 645 646 647 648 649 650

    if (s->keyframe) {
        for (i = 0; i < s->fragment_count; i++)
            s->all_fragments[i].coding_method = MODE_INTRA;

    } else {

        /* fetch the mode coding scheme for this frame */
        scheme = get_bits(gb, 3);

        /* is it a custom coding scheme? */
        if (scheme == 0) {
651 652
            for (i = 0; i < 8; i++)
                custom_mode_alphabet[i] = MODE_INTER_NO_MV;
653
            for (i = 0; i < 8; i++)
654
                custom_mode_alphabet[get_bits(gb, 3)] = i;
655 656 657
            alphabet = custom_mode_alphabet;
        } else
            alphabet = ModeAlphabet[scheme-1];
658 659 660

        /* iterate through all of the macroblocks that contain 1 or more
         * coded fragments */
661 662
        for (sb_y = 0; sb_y < s->y_superblock_height; sb_y++) {
            for (sb_x = 0; sb_x < s->y_superblock_width; sb_x++) {
663 664

            for (j = 0; j < 4; j++) {
665 666
                int mb_x = 2*sb_x +   (j>>1);
                int mb_y = 2*sb_y + (((j>>1)+j)&1);
667
                int frags_coded = 0;
668 669
                current_macroblock = mb_y * s->macroblock_width + mb_x;

670
                if (mb_x >= s->macroblock_width || mb_y >= s->macroblock_height)
671 672
                    continue;

673 674
#define BLOCK_X (2*mb_x + (k&1))
#define BLOCK_Y (2*mb_y + (k>>1))
675 676 677 678 679 680 681 682 683 684 685
                /* coding modes are only stored if the macroblock has at least one
                 * luma block coded, otherwise it must be INTER_NO_MV */
                for (k = 0; k < 4; k++) {
                    current_fragment = BLOCK_Y*s->fragment_width + BLOCK_X;
                    if (s->all_fragments[current_fragment].coding_method != MODE_COPY)
                        break;
                }
                if (k == 4) {
                    s->macroblock_coding[current_macroblock] = MODE_INTER_NO_MV;
                    continue;
                }
686

687 688 689 690
                /* mode 7 means get 3 bits for each coding mode */
                if (scheme == 7)
                    coding_mode = get_bits(gb, 3);
                else
691
                    coding_mode = alphabet
692
                        [get_vlc2(gb, s->mode_code_vlc.table, 3, 3)];
693

694
                s->macroblock_coding[current_macroblock] = coding_mode;
695
                for (k = 0; k < 4; k++) {
696
                    current_fragment =
697 698 699 700 701 702 703 704 705
                        BLOCK_Y*s->fragment_width + BLOCK_X;
                    if (s->all_fragments[current_fragment].coding_method !=
                        MODE_COPY)
                        s->all_fragments[current_fragment].coding_method =
                            coding_mode;
                }
                for (k = 0; k < 2; k++) {
                    current_fragment = s->fragment_start[k+1] +
                        mb_y*(s->fragment_width>>1) + mb_x;
706
                    if (s->all_fragments[current_fragment].coding_method !=
707 708 709 710 711
                        MODE_COPY)
                        s->all_fragments[current_fragment].coding_method =
                            coding_mode;
                }
            }
712
            }
713 714
        }
    }
715 716

    return 0;
717 718
}

719 720 721 722
/*
 * This function unpacks all the motion vectors for the individual
 * macroblocks from the bitstream.
 */
723
static int unpack_vectors(Vp3DecodeContext *s, GetBitContext *gb)
724
{
725
    int j, k, sb_x, sb_y;
726 727 728 729 730 731 732 733 734 735
    int coding_mode;
    int motion_x[6];
    int motion_y[6];
    int last_motion_x = 0;
    int last_motion_y = 0;
    int prior_last_motion_x = 0;
    int prior_last_motion_y = 0;
    int current_macroblock;
    int current_fragment;

David Conrad's avatar
David Conrad committed
736
    if (s->keyframe)
737
        return 0;
David Conrad's avatar
David Conrad committed
738

David Conrad's avatar
David Conrad committed
739 740
    memset(motion_x, 0, 6 * sizeof(int));
    memset(motion_y, 0, 6 * sizeof(int));
741

David Conrad's avatar
David Conrad committed
742 743
    /* coding mode 0 is the VLC scheme; 1 is the fixed code scheme */
    coding_mode = get_bits1(gb);
744

David Conrad's avatar
David Conrad committed
745 746
    /* iterate through all of the macroblocks that contain 1 or more
     * coded fragments */
747 748
    for (sb_y = 0; sb_y < s->y_superblock_height; sb_y++) {
        for (sb_x = 0; sb_x < s->y_superblock_width; sb_x++) {
749

David Conrad's avatar
David Conrad committed
750
        for (j = 0; j < 4; j++) {
751 752 753 754 755
            int mb_x = 2*sb_x +   (j>>1);
            int mb_y = 2*sb_y + (((j>>1)+j)&1);
            current_macroblock = mb_y * s->macroblock_width + mb_x;

            if (mb_x >= s->macroblock_width || mb_y >= s->macroblock_height ||
David Conrad's avatar
David Conrad committed
756 757
                (s->macroblock_coding[current_macroblock] == MODE_COPY))
                continue;
758

David Conrad's avatar
David Conrad committed
759 760 761 762 763 764 765 766 767 768 769
            switch (s->macroblock_coding[current_macroblock]) {

            case MODE_INTER_PLUS_MV:
            case MODE_GOLDEN_MV:
                /* all 6 fragments use the same motion vector */
                if (coding_mode == 0) {
                    motion_x[0] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)];
                    motion_y[0] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)];
                } else {
                    motion_x[0] = fixed_motion_vector_table[get_bits(gb, 6)];
                    motion_y[0] = fixed_motion_vector_table[get_bits(gb, 6)];
770
                }
771

David Conrad's avatar
David Conrad committed
772 773 774
                /* vector maintenance, only on MODE_INTER_PLUS_MV */
                if (s->macroblock_coding[current_macroblock] ==
                    MODE_INTER_PLUS_MV) {
775 776
                    prior_last_motion_x = last_motion_x;
                    prior_last_motion_y = last_motion_y;
David Conrad's avatar
David Conrad committed
777 778 779 780 781 782 783 784 785 786 787 788 789 790
                    last_motion_x = motion_x[0];
                    last_motion_y = motion_y[0];
                }
                break;

            case MODE_INTER_FOURMV:
                /* vector maintenance */
                prior_last_motion_x = last_motion_x;
                prior_last_motion_y = last_motion_y;

                /* fetch 4 vectors from the bitstream, one for each
                 * Y fragment, then average for the C fragment vectors */
                motion_x[4] = motion_y[4] = 0;
                for (k = 0; k < 4; k++) {
791
                    current_fragment = BLOCK_Y*s->fragment_width + BLOCK_X;
792
                    if (s->all_fragments[current_fragment].coding_method != MODE_COPY) {
David Conrad's avatar
David Conrad committed
793 794 795
                        if (coding_mode == 0) {
                            motion_x[k] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)];
                            motion_y[k] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)];
796
                        } else {
David Conrad's avatar
David Conrad committed
797 798
                            motion_x[k] = fixed_motion_vector_table[get_bits(gb, 6)];
                            motion_y[k] = fixed_motion_vector_table[get_bits(gb, 6)];
799
                        }
David Conrad's avatar
David Conrad committed
800 801 802 803 804
                        last_motion_x = motion_x[k];
                        last_motion_y = motion_y[k];
                    } else {
                        motion_x[k] = 0;
                        motion_y[k] = 0;
805
                    }
David Conrad's avatar
David Conrad committed
806 807 808
                    motion_x[4] += motion_x[k];
                    motion_y[4] += motion_y[k];
                }
809

David Conrad's avatar
David Conrad committed
810 811 812 813 814 815 816 817 818 819
                motion_x[5]=
                motion_x[4]= RSHIFT(motion_x[4], 2);
                motion_y[5]=
                motion_y[4]= RSHIFT(motion_y[4], 2);
                break;

            case MODE_INTER_LAST_MV:
                /* all 6 fragments use the last motion vector */
                motion_x[0] = last_motion_x;
                motion_y[0] = last_motion_y;
820

David Conrad's avatar
David Conrad committed
821 822 823 824 825 826 827 828 829
                /* no vector maintenance (last vector remains the
                 * last vector) */
                break;

            case MODE_INTER_PRIOR_LAST:
                /* all 6 fragments use the motion vector prior to the
                 * last motion vector */
                motion_x[0] = prior_last_motion_x;
                motion_y[0] = prior_last_motion_y;
830

David Conrad's avatar
David Conrad committed
831 832 833 834 835 836
                /* vector maintenance */
                prior_last_motion_x = last_motion_x;
                prior_last_motion_y = last_motion_y;
                last_motion_x = motion_x[0];
                last_motion_y = motion_y[0];
                break;
837

David Conrad's avatar
David Conrad committed
838 839
            default:
                /* covers intra, inter without MV, golden without MV */
840 841
                motion_x[0] = 0;
                motion_y[0] = 0;
842

David Conrad's avatar
David Conrad committed
843 844 845
                /* no vector maintenance */
                break;
            }
846

David Conrad's avatar
David Conrad committed
847
            /* assign the motion vectors to the correct fragments */
848
            for (k = 0; k < 4; k++) {
David Conrad's avatar
David Conrad committed
849
                current_fragment =
850
                    BLOCK_Y*s->fragment_width + BLOCK_X;
851
                if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) {
852 853
                    s->all_fragments[current_fragment].motion_x = motion_x[k];
                    s->all_fragments[current_fragment].motion_y = motion_y[k];
854 855 856 857
                } else {
                    s->all_fragments[current_fragment].motion_x = motion_x[0];
                    s->all_fragments[current_fragment].motion_y = motion_y[0];
                }
858
            }
859 860 861 862 863 864 865 866 867 868 869
            for (k = 0; k < 2; k++) {
                current_fragment = s->fragment_start[k+1] +
                    mb_y*(s->fragment_width>>1) + mb_x;
                if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) {
                    s->all_fragments[current_fragment].motion_x = motion_x[k+4];
                    s->all_fragments[current_fragment].motion_y = motion_y[k+4];
                } else {
                    s->all_fragments[current_fragment].motion_x = motion_x[0];
                    s->all_fragments[current_fragment].motion_y = motion_y[0];
                }
            }
870
        }
871
        }
David Conrad's avatar
David Conrad committed
872
    }
873 874

    return 0;
875 876
}

877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896
static int unpack_block_qpis(Vp3DecodeContext *s, GetBitContext *gb)
{
    int qpi, i, j, bit, run_length, blocks_decoded, num_blocks_at_qpi;
    int num_blocks = s->coded_fragment_list_index;

    for (qpi = 0; qpi < s->nqps-1 && num_blocks > 0; qpi++) {
        i = blocks_decoded = num_blocks_at_qpi = 0;

        bit = get_bits1(gb);

        do {
            run_length = get_vlc2(gb, s->superblock_run_length_vlc.table, 6, 2) + 1;
            if (run_length == 34)
                run_length += get_bits(gb, 12);
            blocks_decoded += run_length;

            if (!bit)
                num_blocks_at_qpi += run_length;

            for (j = 0; j < run_length; i++) {
897
                if (i >= s->coded_fragment_list_index)
898 899 900 901 902 903 904 905
                    return -1;

                if (s->all_fragments[s->coded_fragment_list[i]].qpi == qpi) {
                    s->all_fragments[s->coded_fragment_list[i]].qpi += bit;
                    j++;
                }
            }

906
            if (run_length == MAXIMUM_LONG_BIT_RUN)
907 908 909 910 911 912 913 914 915 916 917
                bit = get_bits1(gb);
            else
                bit ^= 1;
        } while (blocks_decoded < num_blocks);

        num_blocks -= num_blocks_at_qpi;
    }

    return 0;
}

918
/*
919 920 921 922 923 924 925 926 927 928 929 930 931
 * This function is called by unpack_dct_coeffs() to extract the VLCs from
 * the bitstream. The VLCs encode tokens which are used to unpack DCT
 * data. This function unpacks all the VLCs for either the Y plane or both
 * C planes, and is called for DC coefficients or different AC coefficient
 * levels (since different coefficient types require different VLC tables.
 *
 * This function returns a residual eob run. E.g, if a particular token gave
 * instructions to EOB the next 5 fragments and there were only 2 fragments
 * left in the current fragment range, 3 would be returned so that it could
 * be passed into the next call to this same function.
 */
static int unpack_vlcs(Vp3DecodeContext *s, GetBitContext *gb,
                        VLC *table, int coeff_index,
932
                        int y_plane,
933 934 935 936
                        int eob_run)
{
    int i;
    int token;
937 938
    int zero_run = 0;
    DCTELEM coeff = 0;
939
    Vp3Fragment *fragment;
940
    int bits_to_get;
941 942 943 944
    int next_fragment;
    int previous_fragment;
    int fragment_num;
    int *list_head;
945

946 947 948 949 950 951
    /* local references to structure members to avoid repeated deferences */
    uint8_t *perm= s->scantable.permutated;
    int *coded_fragment_list = s->coded_fragment_list;
    Vp3Fragment *all_fragments = s->all_fragments;
    uint8_t *coeff_counts = s->coeff_counts;
    VLC_TYPE (*vlc_table)[2] = table->table;
952
    int *fast_fragment_list = s->fast_fragment_list;
953

954 955 956 957 958 959
    if (y_plane) {
        next_fragment = s->fragment_list_y_head;
        list_head = &s->fragment_list_y_head;
    } else {
        next_fragment = s->fragment_list_c_head;
        list_head = &s->fragment_list_c_head;
960 961
    }

962 963 964 965
    i = next_fragment;
    previous_fragment = -1;  /* this indicates that the previous fragment is actually the list head */
    while (i != -1) {
        fragment_num = coded_fragment_list[i];
966

967 968 969
        if (coeff_counts[fragment_num] > coeff_index) {
            previous_fragment = i;
            i = fast_fragment_list[i];
970
            continue;
971
        }
972
        fragment = &all_fragments[fragment_num];
973 974 975

        if (!eob_run) {
            /* decode a VLC into a token */
976
            token = get_vlc2(gb, vlc_table, 5, 3);
977
            /* use the token to get a zero run, a coefficient, and an eob run */
978 979 980 981 982 983 984
            if (token <= 6) {
                eob_run = eob_run_base[token];
                if (eob_run_get_bits[token])
                    eob_run += get_bits(gb, eob_run_get_bits[token]);
                coeff = zero_run = 0;
            } else {
                bits_to_get = coeff_get_bits[token];
985 986 987
                if (bits_to_get)
                    bits_to_get = get_bits(gb, bits_to_get);
                coeff = coeff_tables[token][bits_to_get];
988 989 990 991 992

                zero_run = zero_run_base[token];
                if (zero_run_get_bits[token])
                    zero_run += get_bits(gb, zero_run_get_bits[token]);
            }
993 994 995
        }

        if (!eob_run) {
996 997
            coeff_counts[fragment_num] += zero_run;
            if (coeff_counts[fragment_num] < 64){
998
                fragment->next_coeff->coeff= coeff;
999
                fragment->next_coeff->index= perm[coeff_counts[fragment_num]++]; //FIXME perm here already?
1000 1001 1002 1003
                fragment->next_coeff->next= s->next_coeff;
                s->next_coeff->next=NULL;
                fragment->next_coeff= s->next_coeff++;
            }
1004 1005
            /* previous fragment is now this fragment */
            previous_fragment = i;
1006
        } else {
1007
            coeff_counts[fragment_num] |= 128;
1008
            eob_run--;
1009 1010 1011 1012 1013 1014
            /* remove this fragment from the list */
            if (previous_fragment != -1)
                fast_fragment_list[previous_fragment] = fast_fragment_list[i];
            else
                *list_head = fast_fragment_list[i];
            /* previous fragment remains unchanged */
1015
        }
1016 1017

        i = fast_fragment_list[i];
1018 1019 1020 1021 1022
    }

    return eob_run;
}

1023 1024 1025 1026
static void reverse_dc_prediction(Vp3DecodeContext *s,
                                  int first_fragment,
                                  int fragment_width,
                                  int fragment_height);
1027 1028 1029 1030
/*
 * This function unpacks all of the DCT coefficient data from the
 * bitstream.
 */
1031
static int unpack_dct_coeffs(Vp3DecodeContext *s, GetBitContext *gb)
1032 1033 1034 1035 1036 1037 1038
{
    int i;
    int dc_y_table;
    int dc_c_table;
    int ac_y_table;
    int ac_c_table;
    int residual_eob_run = 0;
1039 1040
    VLC *y_tables[64];
    VLC *c_tables[64];
1041

1042
    /* fetch the DC table indexes */
1043 1044 1045 1046
    dc_y_table = get_bits(gb, 4);
    dc_c_table = get_bits(gb, 4);

    /* unpack the Y plane DC coefficients */
1047
    residual_eob_run = unpack_vlcs(s, gb, &s->dc_vlc[dc_y_table], 0,
1048
        1, residual_eob_run);
1049

1050 1051 1052
    /* reverse prediction of the Y-plane DC coefficients */
    reverse_dc_prediction(s, 0, s->fragment_width, s->fragment_height);

1053 1054
    /* unpack the C plane DC coefficients */
    residual_eob_run = unpack_vlcs(s, gb, &s->dc_vlc[dc_c_table], 0,
1055
        0, residual_eob_run);
1056

1057 1058 1059 1060 1061 1062 1063 1064 1065
    /* reverse prediction of the C-plane DC coefficients */
    if (!(s->avctx->flags & CODEC_FLAG_GRAY))
    {
        reverse_dc_prediction(s, s->fragment_start[1],
            s->fragment_width / 2, s->fragment_height / 2);
        reverse_dc_prediction(s, s->fragment_start[2],
            s->fragment_width / 2, s->fragment_height / 2);
    }

1066
    /* fetch the AC table indexes */
1067 1068 1069
    ac_y_table = get_bits(gb, 4);
    ac_c_table = get_bits(gb, 4);

1070
    /* build tables of AC VLC tables */
1071
    for (i = 1; i <= 5; i++) {
1072 1073
        y_tables[i] = &s->ac_vlc_1[ac_y_table];
        c_tables[i] = &s->ac_vlc_1[ac_c_table];
1074 1075
    }
    for (i = 6; i <= 14; i++) {
1076 1077
        y_tables[i] = &s->ac_vlc_2[ac_y_table];
        c_tables[i] = &s->ac_vlc_2[ac_c_table];
1078 1079
    }
    for (i = 15; i <= 27; i++) {
1080 1081
        y_tables[i] = &s->ac_vlc_3[ac_y_table];
        c_tables[i] = &s->ac_vlc_3[ac_c_table];
1082 1083
    }
    for (i = 28; i <= 63; i++) {
1084 1085 1086 1087 1088 1089 1090 1091 1092
        y_tables[i] = &s->ac_vlc_4[ac_y_table];
        c_tables[i] = &s->ac_vlc_4[ac_c_table];
    }

    /* decode all AC coefficents */
    for (i = 1; i <= 63; i++) {
        if (s->fragment_list_y_head != -1)
            residual_eob_run = unpack_vlcs(s, gb, y_tables[i], i,
                1, residual_eob_run);
1093

1094 1095 1096
        if (s->fragment_list_c_head != -1)
            residual_eob_run = unpack_vlcs(s, gb, c_tables[i], i,
                0, residual_eob_run);
1097
    }
1098 1099

    return 0;
1100 1101 1102 1103
}

/*
 * This function reverses the DC prediction for each coded fragment in
1104
 * the frame. Much of this function is adapted directly from the original
1105 1106 1107 1108
 * VP3 source code.
 */
#define COMPATIBLE_FRAME(x) \
  (compatible_frame[s->all_fragments[x].coding_method] == current_frame_type)
1109
#define DC_COEFF(u) (s->coeffs[u].index ? 0 : s->coeffs[u].coeff) //FIXME do somethin to simplify this
1110 1111 1112 1113

static void reverse_dc_prediction(Vp3DecodeContext *s,
                                  int first_fragment,
                                  int fragment_width,
1114
                                  int fragment_height)
1115 1116 1117 1118 1119 1120 1121 1122 1123 1124
{

#define PUL 8
#define PU 4
#define PUR 2
#define PL 1

    int x, y;
    int i = first_fragment;

1125
    int predicted_dc;
1126 1127 1128 1129

    /* DC values for the left, up-left, up, and up-right fragments */
    int vl, vul, vu, vur;

1130
    /* indexes for the left, up-left, up, and up-right fragments */
1131 1132
    int l, ul, u, ur;

1133
    /*
1134 1135 1136 1137 1138 1139
     * The 6 fields mean:
     *   0: up-left multiplier
     *   1: up multiplier
     *   2: up-right multiplier
     *   3: left multiplier
     */
1140
    static const int predictor_transform[16][4] = {
1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156
        {  0,  0,  0,  0},
        {  0,  0,  0,128},        // PL
        {  0,  0,128,  0},        // PUR
        {  0,  0, 53, 75},        // PUR|PL
        {  0,128,  0,  0},        // PU
        {  0, 64,  0, 64},        // PU|PL
        {  0,128,  0,  0},        // PU|PUR
        {  0,  0, 53, 75},        // PU|PUR|PL
        {128,  0,  0,  0},        // PUL
        {  0,  0,  0,128},        // PUL|PL
        { 64,  0, 64,  0},        // PUL|PUR
        {  0,  0, 53, 75},        // PUL|PUR|PL
        {  0,128,  0,  0},        // PUL|PU
       {-104,116,  0,116},        // PUL|PU|PL
        { 24, 80, 24,  0},        // PUL|PU|PUR
       {-104,116,  0,116}         // PUL|PU|PUR|PL
1157 1158 1159 1160 1161
    };

    /* This table shows which types of blocks can use other blocks for
     * prediction. For example, INTRA is the only mode in this table to
     * have a frame number of 0. That means INTRA blocks can only predict
1162
     * from other INTRA blocks. There are 2 golden frame coding types;
1163 1164
     * blocks encoding in these modes can only predict from other blocks
     * that were encoded with these 1 of these 2 modes. */
1165
    static const unsigned char compatible_frame[9] = {
1166 1167 1168 1169 1170 1171 1172
        1,    /* MODE_INTER_NO_MV */
        0,    /* MODE_INTRA */
        1,    /* MODE_INTER_PLUS_MV */
        1,    /* MODE_INTER_LAST_MV */
        1,    /* MODE_INTER_PRIOR_MV */
        2,    /* MODE_USING_GOLDEN */
        2,    /* MODE_GOLDEN_MV */
1173 1174
        1,    /* MODE_INTER_FOUR_MV */
        3     /* MODE_COPY */
1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194
    };
    int current_frame_type;

    /* there is a last DC predictor for each of the 3 frame types */
    short last_dc[3];

    int transform = 0;

    vul = vu = vur = vl = 0;
    last_dc[0] = last_dc[1] = last_dc[2] = 0;

    /* for each fragment row... */
    for (y = 0; y < fragment_height; y++) {

        /* for each fragment in a row... */
        for (x = 0; x < fragment_width; x++, i++) {

            /* reverse prediction if this block was coded */
            if (s->all_fragments[i].coding_method != MODE_COPY) {

1195
                current_frame_type =
1196 1197
                    compatible_frame[s->all_fragments[i].coding_method];

Michael Niedermayer's avatar
Michael Niedermayer committed
1198 1199 1200
                transform= 0;
                if(x){
                    l= i-1;
1201
                    vl = DC_COEFF(l);
1202
                    if(COMPATIBLE_FRAME(l))
1203
                        transform |= PL;
Michael Niedermayer's avatar
Michael Niedermayer committed
1204 1205 1206
                }
                if(y){
                    u= i-fragment_width;
1207
                    vu = DC_COEFF(u);
1208
                    if(COMPATIBLE_FRAME(u))
1209
                        transform |= PU;
Michael Niedermayer's avatar
Michael Niedermayer committed
1210 1211 1212
                    if(x){
                        ul= i-fragment_width-1;
                        vul = DC_COEFF(ul);
1213
                        if(COMPATIBLE_FRAME(ul))
1214
                            transform |= PUL;
Michael Niedermayer's avatar
Michael Niedermayer committed
1215 1216 1217 1218
                    }
                    if(x + 1 < fragment_width){
                        ur= i-fragment_width+1;
                        vur = DC_COEFF(ur);
1219
                        if(COMPATIBLE_FRAME(ur))
1220
                            transform |= PUR;
Michael Niedermayer's avatar
Michael Niedermayer committed
1221
                    }
1222 1223 1224 1225 1226 1227
                }

                if (transform == 0) {

                    /* if there were no fragments to predict from, use last
                     * DC saved */
1228
                    predicted_dc = last_dc[current_frame_type];
1229 1230 1231 1232 1233 1234 1235 1236 1237
                } else {

                    /* apply the appropriate predictor transform */
                    predicted_dc =
                        (predictor_transform[transform][0] * vul) +
                        (predictor_transform[transform][1] * vu) +
                        (predictor_transform[transform][2] * vur) +
                        (predictor_transform[transform][3] * vl);

Michael Niedermayer's avatar
Michael Niedermayer committed
1238
                    predicted_dc /= 128;
1239 1240 1241

                    /* check for outranging on the [ul u l] and
                     * [ul u ur l] predictors */
1242
                    if ((transform == 15) || (transform == 13)) {
1243
                        if (FFABS(predicted_dc - vu) > 128)
1244
                            predicted_dc = vu;
1245
                        else if (FFABS(predicted_dc - vl) > 128)
1246
                            predicted_dc = vl;
1247
                        else if (FFABS(predicted_dc - vul) > 128)
1248 1249 1250 1251
                            predicted_dc = vul;
                    }
                }

1252 1253 1254 1255 1256 1257 1258 1259
                /* at long last, apply the predictor */
                if(s->coeffs[i].index){
                    *s->next_coeff= s->coeffs[i];
                    s->coeffs[i].index=0;
                    s->coeffs[i].coeff=0;
                    s->coeffs[i].next= s->next_coeff++;
                }
                s->coeffs[i].coeff += predicted_dc;
1260
                /* save the DC */
1261
                last_dc[current_frame_type] = DC_COEFF(i);
1262 1263
                if(DC_COEFF(i) && !(s->coeff_counts[i]&127)){
                    s->coeff_counts[i]= 129;
1264 1265 1266 1267
//                    s->all_fragments[i].next_coeff= s->next_coeff;
                    s->coeffs[i].next= s->next_coeff;
                    (s->next_coeff++)->next=NULL;
                }
1268 1269 1270 1271 1272
            }
        }
    }
}

1273
static void apply_loop_filter(Vp3DecodeContext *s, int plane, int ystart, int yend)
1274 1275 1276 1277
{
    int x, y;
    int *bounding_values= s->bounding_values_array+127;

David Conrad's avatar
David Conrad committed
1278 1279 1280 1281 1282 1283
    int width           = s->fragment_width  >> !!plane;
    int height          = s->fragment_height >> !!plane;
    int fragment        = s->fragment_start        [plane] + ystart * width;
    int stride          = s->current_frame.linesize[plane];
    uint8_t *plane_data = s->current_frame.data    [plane];
    if (!s->flipped_image) stride = -stride;
1284
    plane_data += s->data_offset[plane] + 8*ystart*stride;
David Conrad's avatar
David Conrad committed
1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297

    for (y = ystart; y < yend; y++) {

        for (x = 0; x < width; x++) {
            /* This code basically just deblocks on the edges of coded blocks.
             * However, it has to be much more complicated because of the
             * braindamaged deblock ordering used in VP3/Theora. Order matters
             * because some pixels get filtered twice. */
            if( s->all_fragments[fragment].coding_method != MODE_COPY )
            {
                /* do not perform left edge filter for left columns frags */
                if (x > 0) {
                    s->dsp.vp3_h_loop_filter(
1298
                        plane_data + 8*x,
David Conrad's avatar
David Conrad committed
1299 1300
                        stride, bounding_values);
                }
1301

David Conrad's avatar
David Conrad committed
1302 1303 1304
                /* do not perform top edge filter for top row fragments */
                if (y > 0) {
                    s->dsp.vp3_v_loop_filter(
1305
                        plane_data + 8*x,
David Conrad's avatar
David Conrad committed
1306 1307
                        stride, bounding_values);
                }
1308

David Conrad's avatar
David Conrad committed
1309 1310 1311 1312 1313 1314
                /* do not perform right edge filter for right column
                 * fragments or if right fragment neighbor is also coded
                 * in this frame (it will be filtered in next iteration) */
                if ((x < width - 1) &&
                    (s->all_fragments[fragment + 1].coding_method == MODE_COPY)) {
                    s->dsp.vp3_h_loop_filter(
1315
                        plane_data + 8*x + 8,
David Conrad's avatar
David Conrad committed
1316
                        stride, bounding_values);
1317 1318
                }

David Conrad's avatar
David Conrad committed
1319 1320 1321 1322 1323 1324
                /* do not perform bottom edge filter for bottom row
                 * fragments or if bottom fragment neighbor is also coded
                 * in this frame (it will be filtered in the next row) */
                if ((y < height - 1) &&
                    (s->all_fragments[fragment + width].coding_method == MODE_COPY)) {
                    s->dsp.vp3_v_loop_filter(
1325
                        plane_data + 8*x + 8*stride,
David Conrad's avatar
David Conrad committed
1326 1327
                        stride, bounding_values);
                }
1328
            }
David Conrad's avatar
David Conrad committed
1329 1330

            fragment++;
1331
        }
1332
        plane_data += 8*stride;
David Conrad's avatar
David Conrad committed
1333
    }
1334 1335
}

1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366
/**
 * called when all pixels up to row y are complete
 */
static void vp3_draw_horiz_band(Vp3DecodeContext *s, int y)
{
    int h, cy;
    int offset[4];

    if(s->avctx->draw_horiz_band==NULL)
        return;

    h= y - s->last_slice_end;
    y -= h;

    if (!s->flipped_image) {
        if (y == 0)
            h -= s->height - s->avctx->height;  // account for non-mod16
        y = s->height - y - h;
    }

    cy = y >> 1;
    offset[0] = s->current_frame.linesize[0]*y;
    offset[1] = s->current_frame.linesize[1]*cy;
    offset[2] = s->current_frame.linesize[2]*cy;
    offset[3] = 0;

    emms_c();
    s->avctx->draw_horiz_band(s->avctx, &s->current_frame, offset, y, 3, h);
    s->last_slice_end= y + h;
}

1367 1368 1369 1370 1371 1372
/*
 * Perform the final rendering for a particular slice of data.
 * The slice number ranges from 0..(macroblock_height - 1).
 */
static void render_slice(Vp3DecodeContext *s, int slice)
{
Michael Niedermayer's avatar
Michael Niedermayer committed
1373
    int x;
1374
    int16_t *dequantizer;
1375
    LOCAL_ALIGNED_16(DCTELEM, block, [64]);
1376 1377 1378 1379 1380 1381 1382 1383 1384
    int motion_x = 0xdeadbeef, motion_y = 0xdeadbeef;
    int motion_halfpel_index;
    uint8_t *motion_source;
    int plane;

    if (slice >= s->macroblock_height)
        return;

    for (plane = 0; plane < 3; plane++) {
1385 1386 1387
        uint8_t *output_plane = s->current_frame.data    [plane] + s->data_offset[plane];
        uint8_t *  last_plane = s->   last_frame.data    [plane] + s->data_offset[plane];
        uint8_t *golden_plane = s-> golden_frame.data    [plane] + s->data_offset[plane];
Michael Niedermayer's avatar
Michael Niedermayer committed
1388 1389 1390 1391 1392
        int stride            = s->current_frame.linesize[plane];
        int plane_width       = s->width  >> !!plane;
        int plane_height      = s->height >> !!plane;
        int y =        slice *  FRAGMENT_PIXELS << !plane ;
        int slice_height = y + (FRAGMENT_PIXELS << !plane);
1393
        int i = s->fragment_start[plane] + (y>>3)*(s->fragment_width>>!!plane);
Michael Niedermayer's avatar
Michael Niedermayer committed
1394 1395

        if (!s->flipped_image) stride = -stride;
1396 1397
        if (CONFIG_GRAY && plane && (s->avctx->flags & CODEC_FLAG_GRAY))
            continue;
1398

1399

1400
        if(FFABS(stride) > 2048)
1401 1402 1403 1404 1405 1406 1407
            return; //various tables are fixed size

        /* for each fragment row in the slice (both of them)... */
        for (; y < slice_height; y += 8) {

            /* for each fragment in a row... */
            for (x = 0; x < plane_width; x += 8, i++) {
1408
                int first_pixel = y*stride + x;
1409 1410 1411 1412 1413 1414 1415

                if ((i < 0) || (i >= s->fragment_count)) {
                    av_log(s->avctx, AV_LOG_ERROR, "  vp3:render_slice(): bad fragment number (%d)\n", i);
                    return;
                }

                /* transform if this block was coded */
1416
                if (s->all_fragments[i].coding_method != MODE_COPY) {
1417 1418 1419 1420

                    if ((s->all_fragments[i].coding_method == MODE_USING_GOLDEN) ||
                        (s->all_fragments[i].coding_method == MODE_GOLDEN_MV))
                        motion_source= golden_plane;
1421
                    else
1422 1423
                        motion_source= last_plane;

1424
                    motion_source += first_pixel;
1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458
                    motion_halfpel_index = 0;

                    /* sort out the motion vector if this fragment is coded
                     * using a motion vector method */
                    if ((s->all_fragments[i].coding_method > MODE_INTRA) &&
                        (s->all_fragments[i].coding_method != MODE_USING_GOLDEN)) {
                        int src_x, src_y;
                        motion_x = s->all_fragments[i].motion_x;
                        motion_y = s->all_fragments[i].motion_y;
                        if(plane){
                            motion_x= (motion_x>>1) | (motion_x&1);
                            motion_y= (motion_y>>1) | (motion_y&1);
                        }

                        src_x= (motion_x>>1) + x;
                        src_y= (motion_y>>1) + y;
                        if ((motion_x == 127) || (motion_y == 127))
                            av_log(s->avctx, AV_LOG_ERROR, " help! got invalid motion vector! (%X, %X)\n", motion_x, motion_y);

                        motion_halfpel_index = motion_x & 0x01;
                        motion_source += (motion_x >> 1);

                        motion_halfpel_index |= (motion_y & 0x01) << 1;
                        motion_source += ((motion_y >> 1) * stride);

                        if(src_x<0 || src_y<0 || src_x + 9 >= plane_width || src_y + 9 >= plane_height){
                            uint8_t *temp= s->edge_emu_buffer;
                            if(stride<0) temp -= 9*stride;
                            else temp += 9*stride;

                            ff_emulated_edge_mc(temp, motion_source, stride, 9, 9, src_x, src_y, plane_width, plane_height);
                            motion_source= temp;
                        }
                    }
1459

1460 1461 1462 1463

                    /* first, take care of copying a block from either the
                     * previous or the golden frame */
                    if (s->all_fragments[i].coding_method != MODE_INTRA) {
1464 1465 1466
                        /* Note, it is possible to implement all MC cases with
                           put_no_rnd_pixels_l2 which would look more like the
                           VP3 source but this would be slower as
1467 1468 1469
                           put_no_rnd_pixels_tab is better optimzed */
                        if(motion_halfpel_index != 3){
                            s->dsp.put_no_rnd_pixels_tab[1][motion_halfpel_index](
1470
                                output_plane + first_pixel,
1471 1472 1473 1474
                                motion_source, stride, 8);
                        }else{
                            int d= (motion_x ^ motion_y)>>31; // d is 0 if motion_x and _y have the same sign, else -1
                            s->dsp.put_no_rnd_pixels_l2[1](
1475
                                output_plane + first_pixel,
1476 1477
                                motion_source - d,
                                motion_source + stride + 1 + d,
1478 1479
                                stride, 8);
                        }
1480
                        dequantizer = s->qmat[s->all_fragments[i].qpi][1][plane];
1481
                    }else{
1482
                        dequantizer = s->qmat[s->all_fragments[i].qpi][0][plane];
1483 1484 1485 1486 1487
                    }

                    /* dequantize the DCT coefficients */
                    if(s->avctx->idct_algo==FF_IDCT_VP3){
                        Coeff *coeff= s->coeffs + i;
Loren Merritt's avatar
Loren Merritt committed
1488
                        s->dsp.clear_block(block);
1489 1490 1491 1492 1493 1494
                        while(coeff->next){
                            block[coeff->index]= coeff->coeff * dequantizer[coeff->index];
                            coeff= coeff->next;
                        }
                    }else{
                        Coeff *coeff= s->coeffs + i;
Loren Merritt's avatar
Loren Merritt committed
1495
                        s->dsp.clear_block(block);
1496 1497 1498 1499 1500 1501 1502
                        while(coeff->next){
                            block[coeff->index]= (coeff->coeff * dequantizer[coeff->index] + 2)>>2;
                            coeff= coeff->next;
                        }
                    }

                    /* invert DCT and place (or add) in final output */
1503

1504 1505 1506 1507
                    if (s->all_fragments[i].coding_method == MODE_INTRA) {
                        if(s->avctx->idct_algo!=FF_IDCT_VP3)
                            block[0] += 128<<3;
                        s->dsp.idct_put(
1508
                            output_plane + first_pixel,
1509 1510 1511 1512
                            stride,
                            block);
                    } else {
                        s->dsp.idct_add(
1513
                            output_plane + first_pixel,
1514 1515 1516 1517 1518 1519 1520
                            stride,
                            block);
                    }
                } else {

                    /* copy directly from the previous frame */
                    s->dsp.put_pixels_tab[1][0](
1521 1522
                        output_plane + first_pixel,
                        last_plane + first_pixel,
1523 1524 1525 1526
                        stride, 8);

                }
            }
1527 1528 1529 1530
            // Filter the previous block row. We can't filter the current row yet
            // since it needs pixels from the next row
            if (y > 0)
                apply_loop_filter(s, plane, (y>>3)-1, (y>>3));
1531 1532 1533 1534 1535 1536
        }
    }

     /* this looks like a good place for slice dispatch... */
     /* algorithm:
      *   if (slice == s->macroblock_height - 1)
1537 1538 1539
      *     dispatch (both last slice & 2nd-to-last slice);
      *   else if (slice > 0)
      *     dispatch (slice - 1);
1540 1541
      */

1542 1543 1544
    // now that we've filtered the last rows, they're safe to display
    if (slice)
        vp3_draw_horiz_band(s, 16*slice);
1545 1546
}

1547 1548 1549
/*
 * This is the ffmpeg/libavcodec API init function.
 */
1550
static av_cold int vp3_decode_init(AVCodecContext *avctx)
1551 1552
{
    Vp3DecodeContext *s = avctx->priv_data;
1553
    int i, inter, plane;
1554 1555 1556 1557
    int c_width;
    int c_height;
    int y_superblock_count;
    int c_superblock_count;
1558

1559
    if (avctx->codec_tag == MKTAG('V','P','3','0'))
1560
        s->version = 0;
1561
    else
1562
        s->version = 1;
1563

1564
    s->avctx = avctx;
1565 1566
    s->width = FFALIGN(avctx->width, 16);
    s->height = FFALIGN(avctx->height, 16);
1567
    avctx->pix_fmt = PIX_FMT_YUV420P;
1568
    avctx->chroma_sample_location = AVCHROMA_LOC_CENTER;
1569 1570
    if(avctx->idct_algo==FF_IDCT_AUTO)
        avctx->idct_algo=FF_IDCT_VP3;
1571
    dsputil_init(&s->dsp, avctx);
1572

1573
    ff_init_scantable(s->dsp.idct_permutation, &s->scantable, ff_zigzag_direct);
1574 1575 1576

    /* initialize to an impossible value which will force a recalculation
     * in the first frame decode */
1577 1578
    for (i = 0; i < 3; i++)
        s->qps[i] = -1;
1579

1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593
    s->y_superblock_width = (s->width + 31) / 32;
    s->y_superblock_height = (s->height + 31) / 32;
    y_superblock_count = s->y_superblock_width * s->y_superblock_height;

    /* work out the dimensions for the C planes */
    c_width = s->width / 2;
    c_height = s->height / 2;
    s->c_superblock_width = (c_width + 31) / 32;
    s->c_superblock_height = (c_height + 31) / 32;
    c_superblock_count = s->c_superblock_width * s->c_superblock_height;

    s->superblock_count = y_superblock_count + (c_superblock_count * 2);
    s->u_superblock_start = y_superblock_count;
    s->v_superblock_start = s->u_superblock_start + c_superblock_count;
1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604
    s->superblock_coding = av_malloc(s->superblock_count);

    s->macroblock_width = (s->width + 15) / 16;
    s->macroblock_height = (s->height + 15) / 16;
    s->macroblock_count = s->macroblock_width * s->macroblock_height;

    s->fragment_width = s->width / FRAGMENT_PIXELS;
    s->fragment_height = s->height / FRAGMENT_PIXELS;

    /* fragment count covers all 8x8 blocks for all 3 planes */
    s->fragment_count = s->fragment_width * s->fragment_height * 3 / 2;
Michael Niedermayer's avatar
Michael Niedermayer committed
1605 1606
    s->fragment_start[1] = s->fragment_width * s->fragment_height;
    s->fragment_start[2] = s->fragment_width * s->fragment_height * 5 / 4;
1607 1608

    s->all_fragments = av_malloc(s->fragment_count * sizeof(Vp3Fragment));
1609
    s->coeff_counts = av_malloc(s->fragment_count * sizeof(*s->coeff_counts));
1610
    s->coeffs = av_malloc(s->fragment_count * sizeof(Coeff) * 65);
1611
    s->coded_fragment_list = av_malloc(s->fragment_count * sizeof(int));
1612
    s->fast_fragment_list = av_malloc(s->fragment_count * sizeof(int));
1613
    if (!s->superblock_coding || !s->all_fragments || !s->coeff_counts ||
1614
        !s->coeffs || !s->coded_fragment_list || !s->fast_fragment_list) {
1615 1616 1617
        vp3_decode_end(avctx);
        return -1;
    }
1618

1619 1620
    if (!s->theora_tables)
    {
Michael Niedermayer's avatar
Michael Niedermayer committed
1621
        for (i = 0; i < 64; i++) {
1622 1623
            s->coded_dc_scale_factor[i] = vp31_dc_scale_factor[i];
            s->coded_ac_scale_factor[i] = vp31_ac_scale_factor[i];
1624 1625 1626
            s->base_matrix[0][i] = vp31_intra_y_dequant[i];
            s->base_matrix[1][i] = vp31_intra_c_dequant[i];
            s->base_matrix[2][i] = vp31_inter_dequant[i];
1627
            s->filter_limit_values[i] = vp31_filter_limit_values[i];
Michael Niedermayer's avatar
Michael Niedermayer committed
1628
        }
1629

1630 1631 1632 1633 1634 1635 1636 1637 1638
        for(inter=0; inter<2; inter++){
            for(plane=0; plane<3; plane++){
                s->qr_count[inter][plane]= 1;
                s->qr_size [inter][plane][0]= 63;
                s->qr_base [inter][plane][0]=
                s->qr_base [inter][plane][1]= 2*inter + (!!plane)*!inter;
            }
        }

1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670
        /* init VLC tables */
        for (i = 0; i < 16; i++) {

            /* DC histograms */
            init_vlc(&s->dc_vlc[i], 5, 32,
                &dc_bias[i][0][1], 4, 2,
                &dc_bias[i][0][0], 4, 2, 0);

            /* group 1 AC histograms */
            init_vlc(&s->ac_vlc_1[i], 5, 32,
                &ac_bias_0[i][0][1], 4, 2,
                &ac_bias_0[i][0][0], 4, 2, 0);

            /* group 2 AC histograms */
            init_vlc(&s->ac_vlc_2[i], 5, 32,
                &ac_bias_1[i][0][1], 4, 2,
                &ac_bias_1[i][0][0], 4, 2, 0);

            /* group 3 AC histograms */
            init_vlc(&s->ac_vlc_3[i], 5, 32,
                &ac_bias_2[i][0][1], 4, 2,
                &ac_bias_2[i][0][0], 4, 2, 0);

            /* group 4 AC histograms */
            init_vlc(&s->ac_vlc_4[i], 5, 32,
                &ac_bias_3[i][0][1], 4, 2,
                &ac_bias_3[i][0][0], 4, 2, 0);
        }
    } else {
        for (i = 0; i < 16; i++) {

            /* DC histograms */
1671
            if (init_vlc(&s->dc_vlc[i], 5, 32,
1672
                &s->huffman_table[i][0][1], 4, 2,
1673 1674
                &s->huffman_table[i][0][0], 4, 2, 0) < 0)
                goto vlc_fail;
1675 1676

            /* group 1 AC histograms */
1677
            if (init_vlc(&s->ac_vlc_1[i], 5, 32,
1678
                &s->huffman_table[i+16][0][1], 4, 2,
1679 1680
                &s->huffman_table[i+16][0][0], 4, 2, 0) < 0)
                goto vlc_fail;
1681 1682

            /* group 2 AC histograms */
1683
            if (init_vlc(&s->ac_vlc_2[i], 5, 32,
1684
                &s->huffman_table[i+16*2][0][1], 4, 2,
1685 1686
                &s->huffman_table[i+16*2][0][0], 4, 2, 0) < 0)
                goto vlc_fail;
1687 1688

            /* group 3 AC histograms */
1689
            if (init_vlc(&s->ac_vlc_3[i], 5, 32,
1690
                &s->huffman_table[i+16*3][0][1], 4, 2,
1691 1692
                &s->huffman_table[i+16*3][0][0], 4, 2, 0) < 0)
                goto vlc_fail;
1693 1694

            /* group 4 AC histograms */
1695
            if (init_vlc(&s->ac_vlc_4[i], 5, 32,
1696
                &s->huffman_table[i+16*4][0][1], 4, 2,
1697 1698
                &s->huffman_table[i+16*4][0][0], 4, 2, 0) < 0)
                goto vlc_fail;
1699
        }
1700 1701
    }

1702 1703 1704 1705
    init_vlc(&s->superblock_run_length_vlc, 6, 34,
        &superblock_run_length_vlc_table[0][1], 4, 2,
        &superblock_run_length_vlc_table[0][0], 4, 2, 0);

1706
    init_vlc(&s->fragment_run_length_vlc, 5, 30,
1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717
        &fragment_run_length_vlc_table[0][1], 4, 2,
        &fragment_run_length_vlc_table[0][0], 4, 2, 0);

    init_vlc(&s->mode_code_vlc, 3, 8,
        &mode_code_vlc_table[0][1], 2, 1,
        &mode_code_vlc_table[0][0], 2, 1, 0);

    init_vlc(&s->motion_vector_vlc, 6, 63,
        &motion_vector_vlc_table[0][1], 2, 1,
        &motion_vector_vlc_table[0][0], 2, 1, 0);

1718 1719
    /* work out the block mapping tables */
    s->superblock_fragments = av_malloc(s->superblock_count * 16 * sizeof(int));
1720
    s->macroblock_coding = av_malloc(s->macroblock_count + 1);
1721
    if (!s->superblock_fragments || !s->macroblock_coding) {
1722 1723 1724
        vp3_decode_end(avctx);
        return -1;
    }
1725 1726
    init_block_mapping(s);

1727 1728 1729 1730
    for (i = 0; i < 3; i++) {
        s->current_frame.data[i] = NULL;
        s->last_frame.data[i] = NULL;
        s->golden_frame.data[i] = NULL;
1731 1732
    }

1733
    return 0;
1734 1735 1736 1737

vlc_fail:
    av_log(avctx, AV_LOG_FATAL, "Invalid huffman table\n");
    return -1;
1738 1739 1740 1741 1742
}

/*
 * This is the ffmpeg/libavcodec API frame decode function.
 */
1743
static int vp3_decode_frame(AVCodecContext *avctx,
1744
                            void *data, int *data_size,
1745
                            AVPacket *avpkt)
1746
{
1747 1748
    const uint8_t *buf = avpkt->data;
    int buf_size = avpkt->size;
1749 1750 1751
    Vp3DecodeContext *s = avctx->priv_data;
    GetBitContext gb;
    static int counter = 0;
1752
    int i;
1753 1754

    init_get_bits(&gb, buf, buf_size * 8);
1755

1756 1757
    if (s->theora && get_bits1(&gb))
    {
1758 1759
        av_log(avctx, AV_LOG_ERROR, "Header packet passed to frame decoder, skipping\n");
        return -1;
1760
    }
1761 1762 1763

    s->keyframe = !get_bits1(&gb);
    if (!s->theora)
1764
        skip_bits(&gb, 1);
1765 1766
    for (i = 0; i < 3; i++)
        s->last_qps[i] = s->qps[i];
1767

1768
    s->nqps=0;
1769
    do{
1770 1771 1772 1773
        s->qps[s->nqps++]= get_bits(&gb, 6);
    } while(s->theora >= 0x030200 && s->nqps<3 && get_bits1(&gb));
    for (i = s->nqps; i < 3; i++)
        s->qps[i] = -1;
1774

1775
    if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1776
        av_log(s->avctx, AV_LOG_INFO, " VP3 %sframe #%d: Q index = %d\n",
1777
            s->keyframe?"key":"", counter, s->qps[0]);
1778 1779
    counter++;

1780
    if (s->qps[0] != s->last_qps[0])
1781
        init_loop_filter(s);
1782 1783 1784 1785 1786 1787

    for (i = 0; i < s->nqps; i++)
        // reinit all dequantizers if the first one changed, because
        // the DC of the first quantizer must be used for all matrices
        if (s->qps[i] != s->last_qps[i] || s->qps[0] != s->last_qps[0])
            init_dequantizer(s, i);
1788

1789 1790 1791
    if (avctx->skip_frame >= AVDISCARD_NONKEY && !s->keyframe)
        return buf_size;

1792
    if (s->keyframe) {
1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809
        if (!s->theora)
        {
            skip_bits(&gb, 4); /* width code */
            skip_bits(&gb, 4); /* height code */
            if (s->version)
            {
                s->version = get_bits(&gb, 5);
                if (counter == 1)
                    av_log(s->avctx, AV_LOG_DEBUG, "VP version: %d\n", s->version);
            }
        }
        if (s->version || s->theora)
        {
                if (get_bits1(&gb))
                    av_log(s->avctx, AV_LOG_ERROR, "Warning, unsupported keyframe coding type?!\n");
            skip_bits(&gb, 2); /* reserved? */
        }
1810

1811 1812 1813
        if (s->last_frame.data[0] == s->golden_frame.data[0]) {
            if (s->golden_frame.data[0])
                avctx->release_buffer(avctx, &s->golden_frame);
1814
            s->last_frame= s->golden_frame; /* ensure that we catch any access to this released frame */
1815 1816 1817 1818 1819 1820
        } else {
            if (s->golden_frame.data[0])
                avctx->release_buffer(avctx, &s->golden_frame);
            if (s->last_frame.data[0])
                avctx->release_buffer(avctx, &s->last_frame);
        }
1821

1822
        s->golden_frame.reference = 3;
1823
        if(avctx->get_buffer(avctx, &s->golden_frame) < 0) {
1824
            av_log(s->avctx, AV_LOG_ERROR, "vp3: get_buffer() failed\n");
1825 1826 1827 1828
            return -1;
        }

        /* golden frame is also the current frame */
Michael Niedermayer's avatar
Michael Niedermayer committed
1829
        s->current_frame= s->golden_frame;
1830 1831
    } else {
        /* allocate a new current frame */
1832
        s->current_frame.reference = 3;
1833
        if (!s->golden_frame.data[0]) {
1834 1835 1836
            av_log(s->avctx, AV_LOG_ERROR, "vp3: first frame not a keyframe\n");
            return -1;
        }
1837
        if(avctx->get_buffer(avctx, &s->current_frame) < 0) {
1838
            av_log(s->avctx, AV_LOG_ERROR, "vp3: get_buffer() failed\n");
1839 1840 1841 1842
            return -1;
        }
    }

Michael Niedermayer's avatar
Michael Niedermayer committed
1843 1844 1845
    s->current_frame.qscale_table= s->qscale_table; //FIXME allocate individual tables per AVFrame
    s->current_frame.qstride= 0;

1846 1847
    init_frame(s, &gb);

1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859
    if (unpack_superblocks(s, &gb)){
        av_log(s->avctx, AV_LOG_ERROR, "error in unpack_superblocks\n");
        return -1;
    }
    if (unpack_modes(s, &gb)){
        av_log(s->avctx, AV_LOG_ERROR, "error in unpack_modes\n");
        return -1;
    }
    if (unpack_vectors(s, &gb)){
        av_log(s->avctx, AV_LOG_ERROR, "error in unpack_vectors\n");
        return -1;
    }
1860 1861 1862 1863
    if (unpack_block_qpis(s, &gb)){
        av_log(s->avctx, AV_LOG_ERROR, "error in unpack_block_qpis\n");
        return -1;
    }
1864 1865
    if (unpack_dct_coeffs(s, &gb)){
        av_log(s->avctx, AV_LOG_ERROR, "error in unpack_dct_coeffs\n");
1866 1867
        return -1;
    }
1868 1869 1870 1871 1872 1873 1874

    for (i = 0; i < 3; i++) {
        if (s->flipped_image)
            s->data_offset[i] = 0;
        else
            s->data_offset[i] = ((s->height>>!!i)-1) * s->current_frame.linesize[i];
    }
1875

1876
    s->last_slice_end = 0;
1877 1878
    for (i = 0; i < s->macroblock_height; i++)
        render_slice(s, i);
1879

1880 1881 1882 1883 1884
    // filter the last row
    for (i = 0; i < 3; i++) {
        int row = (s->height >> (3+!!i)) - 1;
        apply_loop_filter(s, i, row, row+1);
    }
1885
    vp3_draw_horiz_band(s, s->height);
1886

1887 1888 1889
    *data_size=sizeof(AVFrame);
    *(AVFrame*)data= s->current_frame;

1890 1891 1892 1893 1894
    /* release the last frame, if it is allocated and if it is not the
     * golden frame */
    if ((s->last_frame.data[0]) &&
        (s->last_frame.data[0] != s->golden_frame.data[0]))
        avctx->release_buffer(avctx, &s->last_frame);
1895

1896
    /* shuffle frames (last = current) */
Michael Niedermayer's avatar
Michael Niedermayer committed
1897
    s->last_frame= s->current_frame;
1898
    s->current_frame.data[0]= NULL; /* ensure that we catch any access to this released frame */
1899 1900 1901 1902 1903 1904 1905

    return buf_size;
}

/*
 * This is the ffmpeg/libavcodec API module cleanup function.
 */
1906
static av_cold int vp3_decode_end(AVCodecContext *avctx)
1907 1908
{
    Vp3DecodeContext *s = avctx->priv_data;
1909
    int i;
1910

1911
    av_free(s->superblock_coding);
1912
    av_free(s->all_fragments);
1913
    av_free(s->coeff_counts);
1914
    av_free(s->coeffs);
1915
    av_free(s->coded_fragment_list);
1916
    av_free(s->fast_fragment_list);
1917
    av_free(s->superblock_fragments);
1918
    av_free(s->macroblock_coding);
1919

1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932
    for (i = 0; i < 16; i++) {
        free_vlc(&s->dc_vlc[i]);
        free_vlc(&s->ac_vlc_1[i]);
        free_vlc(&s->ac_vlc_2[i]);
        free_vlc(&s->ac_vlc_3[i]);
        free_vlc(&s->ac_vlc_4[i]);
    }

    free_vlc(&s->superblock_run_length_vlc);
    free_vlc(&s->fragment_run_length_vlc);
    free_vlc(&s->mode_code_vlc);
    free_vlc(&s->motion_vector_vlc);

1933
    /* release all frames */
1934
    if (s->golden_frame.data[0] && s->golden_frame.data[0] != s->last_frame.data[0])
1935 1936 1937 1938 1939
        avctx->release_buffer(avctx, &s->golden_frame);
    if (s->last_frame.data[0])
        avctx->release_buffer(avctx, &s->last_frame);
    /* no need to release the current_frame since it will always be pointing
     * to the same frame as either the golden or last frame */
1940 1941 1942 1943

    return 0;
}

1944 1945 1946 1947
static int read_huffman_tree(AVCodecContext *avctx, GetBitContext *gb)
{
    Vp3DecodeContext *s = avctx->priv_data;

1948
    if (get_bits1(gb)) {
1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966
        int token;
        if (s->entries >= 32) { /* overflow */
            av_log(avctx, AV_LOG_ERROR, "huffman tree overflow\n");
            return -1;
        }
        token = get_bits(gb, 5);
        //av_log(avctx, AV_LOG_DEBUG, "hti %d hbits %x token %d entry : %d size %d\n", s->hti, s->hbits, token, s->entries, s->huff_code_size);
        s->huffman_table[s->hti][token][0] = s->hbits;
        s->huffman_table[s->hti][token][1] = s->huff_code_size;
        s->entries++;
    }
    else {
        if (s->huff_code_size >= 32) {/* overflow */
            av_log(avctx, AV_LOG_ERROR, "huffman tree overflow\n");
            return -1;
        }
        s->huff_code_size++;
        s->hbits <<= 1;
1967 1968
        if (read_huffman_tree(avctx, gb))
            return -1;
1969
        s->hbits |= 1;
1970 1971
        if (read_huffman_tree(avctx, gb))
            return -1;
1972 1973 1974 1975 1976 1977
        s->hbits >>= 1;
        s->huff_code_size--;
    }
    return 0;
}

1978
#if CONFIG_THEORA_DECODER
1979
static int theora_decode_header(AVCodecContext *avctx, GetBitContext *gb)
1980 1981
{
    Vp3DecodeContext *s = avctx->priv_data;
1982
    int visible_width, visible_height, colorspace;
1983

1984
    s->theora = get_bits_long(gb, 24);
1985
    av_log(avctx, AV_LOG_DEBUG, "Theora bitstream version %X\n", s->theora);
1986

1987
    /* 3.2.0 aka alpha3 has the same frame orientation as original vp3 */
1988
    /* but previous versions have the image flipped relative to vp3 */
1989
    if (s->theora < 0x030200)
1990
    {
1991
        s->flipped_image = 1;
1992 1993
        av_log(avctx, AV_LOG_DEBUG, "Old (<alpha3) Theora bitstream, flipped image\n");
    }
1994

1995 1996
    visible_width  = s->width  = get_bits(gb, 16) << 4;
    visible_height = s->height = get_bits(gb, 16) << 4;
1997

1998
    if(avcodec_check_dimensions(avctx, s->width, s->height)){
1999
        av_log(avctx, AV_LOG_ERROR, "Invalid dimensions (%dx%d)\n", s->width, s->height);
2000 2001 2002
        s->width= s->height= 0;
        return -1;
    }
2003

2004
    if (s->theora >= 0x030200) {
David Conrad's avatar
David Conrad committed
2005 2006
        visible_width  = get_bits_long(gb, 24);
        visible_height = get_bits_long(gb, 24);
2007

2008 2009 2010
        skip_bits(gb, 8); /* offset x */
        skip_bits(gb, 8); /* offset y */
    }
2011

2012 2013 2014 2015
    skip_bits(gb, 32); /* fps numerator */
    skip_bits(gb, 32); /* fps denumerator */
    skip_bits(gb, 24); /* aspect numerator */
    skip_bits(gb, 24); /* aspect denumerator */
2016

2017
    if (s->theora < 0x030200)
2018
        skip_bits(gb, 5); /* keyframe frequency force */
2019
    colorspace = get_bits(gb, 8);
2020
    skip_bits(gb, 24); /* bitrate */
2021

2022
    skip_bits(gb, 6); /* quality hint */
2023

2024
    if (s->theora >= 0x030200)
2025
    {
2026
        skip_bits(gb, 5); /* keyframe frequency force */
2027 2028
        skip_bits(gb, 2); /* pixel format: 420,res,422,444 */
        skip_bits(gb, 3); /* reserved */
2029
    }
2030

2031
//    align_get_bits(gb);
2032

2033 2034 2035 2036 2037
    if (   visible_width  <= s->width  && visible_width  > s->width-16
        && visible_height <= s->height && visible_height > s->height-16)
        avcodec_set_dimensions(avctx, visible_width, visible_height);
    else
        avcodec_set_dimensions(avctx, s->width, s->height);
2038

2039 2040 2041 2042 2043 2044 2045 2046 2047 2048
    if (colorspace == 1) {
        avctx->color_primaries = AVCOL_PRI_BT470M;
    } else if (colorspace == 2) {
        avctx->color_primaries = AVCOL_PRI_BT470BG;
    }
    if (colorspace == 1 || colorspace == 2) {
        avctx->colorspace = AVCOL_SPC_BT470BG;
        avctx->color_trc  = AVCOL_TRC_BT709;
    }

2049 2050 2051
    return 0;
}

2052
static int theora_decode_tables(AVCodecContext *avctx, GetBitContext *gb)
2053 2054
{
    Vp3DecodeContext *s = avctx->priv_data;
2055
    int i, n, matrices, inter, plane;
2056 2057

    if (s->theora >= 0x030200) {
2058
        n = get_bits(gb, 3);
2059
        /* loop filter limit values table */
2060
        for (i = 0; i < 64; i++) {
2061
            s->filter_limit_values[i] = get_bits(gb, n);
2062 2063 2064 2065 2066
            if (s->filter_limit_values[i] > 127) {
                av_log(avctx, AV_LOG_ERROR, "filter limit value too large (%i > 127), clamping\n", s->filter_limit_values[i]);
                s->filter_limit_values[i] = 127;
            }
        }
2067
    }
2068

2069
    if (s->theora >= 0x030200)
2070
        n = get_bits(gb, 4) + 1;
2071 2072
    else
        n = 16;
2073 2074
    /* quality threshold table */
    for (i = 0; i < 64; i++)
2075
        s->coded_ac_scale_factor[i] = get_bits(gb, n);
2076

2077
    if (s->theora >= 0x030200)
2078
        n = get_bits(gb, 4) + 1;
2079 2080
    else
        n = 16;
2081 2082
    /* dc scale factor table */
    for (i = 0; i < 64; i++)
2083
        s->coded_dc_scale_factor[i] = get_bits(gb, n);
2084

2085
    if (s->theora >= 0x030200)
2086
        matrices = get_bits(gb, 9) + 1;
2087
    else
2088
        matrices = 3;
2089

2090 2091 2092 2093
    if(matrices > 384){
        av_log(avctx, AV_LOG_ERROR, "invalid number of base matrixes\n");
        return -1;
    }
2094

2095
    for(n=0; n<matrices; n++){
2096
        for (i = 0; i < 64; i++)
2097 2098
            s->base_matrix[n][i]= get_bits(gb, 8);
    }
2099

2100 2101 2102 2103
    for (inter = 0; inter <= 1; inter++) {
        for (plane = 0; plane <= 2; plane++) {
            int newqr= 1;
            if (inter || plane > 0)
2104
                newqr = get_bits1(gb);
2105
            if (!newqr) {
2106
                int qtj, plj;
2107
                if(inter && get_bits1(gb)){
2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118
                    qtj = 0;
                    plj = plane;
                }else{
                    qtj= (3*inter + plane - 1) / 3;
                    plj= (plane + 2) % 3;
                }
                s->qr_count[inter][plane]= s->qr_count[qtj][plj];
                memcpy(s->qr_size[inter][plane], s->qr_size[qtj][plj], sizeof(s->qr_size[0][0]));
                memcpy(s->qr_base[inter][plane], s->qr_base[qtj][plj], sizeof(s->qr_base[0][0]));
            } else {
                int qri= 0;
2119
                int qi = 0;
2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132

                for(;;){
                    i= get_bits(gb, av_log2(matrices-1)+1);
                    if(i>= matrices){
                        av_log(avctx, AV_LOG_ERROR, "invalid base matrix index\n");
                        return -1;
                    }
                    s->qr_base[inter][plane][qri]= i;
                    if(qi >= 63)
                        break;
                    i = get_bits(gb, av_log2(63-qi)+1) + 1;
                    s->qr_size[inter][plane][qri++]= i;
                    qi += i;
2133
                }
2134

2135
                if (qi > 63) {
2136
                    av_log(avctx, AV_LOG_ERROR, "invalid qi %d > 63\n", qi);
2137 2138
                    return -1;
                }
2139
                s->qr_count[inter][plane]= qri;
2140 2141 2142 2143
            }
        }
    }

2144
    /* Huffman tables */
2145 2146 2147
    for (s->hti = 0; s->hti < 80; s->hti++) {
        s->entries = 0;
        s->huff_code_size = 1;
2148
        if (!get_bits1(gb)) {
2149
            s->hbits = 0;
2150 2151
            if(read_huffman_tree(avctx, gb))
                return -1;
2152
            s->hbits = 1;
2153 2154
            if(read_huffman_tree(avctx, gb))
                return -1;
2155 2156
        }
    }
2157

2158
    s->theora_tables = 1;
2159

2160 2161 2162
    return 0;
}

2163
static av_cold int theora_decode_init(AVCodecContext *avctx)
2164 2165 2166 2167
{
    Vp3DecodeContext *s = avctx->priv_data;
    GetBitContext gb;
    int ptype;
2168 2169 2170
    uint8_t *header_start[3];
    int header_len[3];
    int i;
2171

2172 2173 2174
    s->theora = 1;

    if (!avctx->extradata_size)
2175 2176
    {
        av_log(avctx, AV_LOG_ERROR, "Missing extradata!\n");
2177
        return -1;
2178
    }
2179

2180 2181 2182 2183 2184
    if (ff_split_xiph_headers(avctx->extradata, avctx->extradata_size,
                              42, header_start, header_len) < 0) {
        av_log(avctx, AV_LOG_ERROR, "Corrupt extradata\n");
        return -1;
    }
2185

2186
  for(i=0;i<3;i++) {
2187
    init_get_bits(&gb, header_start[i], header_len[i] * 8);
2188 2189

    ptype = get_bits(&gb, 8);
2190

2191 2192 2193
     if (!(ptype & 0x80))
     {
        av_log(avctx, AV_LOG_ERROR, "Invalid extradata!\n");
2194
//        return -1;
2195
     }
2196

2197
    // FIXME: Check for this as well.
2198
    skip_bits_long(&gb, 6*8); /* "theora" */
2199

2200 2201 2202
    switch(ptype)
    {
        case 0x80:
2203
            theora_decode_header(avctx, &gb);
2204 2205
                break;
        case 0x81:
2206
// FIXME: is this needed? it breaks sometimes
2207 2208 2209
//            theora_decode_comments(avctx, gb);
            break;
        case 0x82:
2210 2211
            if (theora_decode_tables(avctx, &gb))
                return -1;
2212 2213 2214 2215
            break;
        default:
            av_log(avctx, AV_LOG_ERROR, "Unknown Theora config packet: %d\n", ptype&~0x80);
            break;
2216
    }
2217 2218
    if(ptype != 0x81 && 8*header_len[i] != get_bits_count(&gb))
        av_log(avctx, AV_LOG_WARNING, "%d bits left in packet %X\n", 8*header_len[i] - get_bits_count(&gb), ptype);
2219 2220
    if (s->theora < 0x030200)
        break;
2221
  }
2222

2223
    return vp3_decode_init(avctx);
2224 2225
}

2226 2227
AVCodec theora_decoder = {
    "theora",
2228
    CODEC_TYPE_VIDEO,
2229
    CODEC_ID_THEORA,
2230
    sizeof(Vp3DecodeContext),
2231
    theora_decode_init,
2232 2233 2234
    NULL,
    vp3_decode_end,
    vp3_decode_frame,
2235
    CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND,
2236
    NULL,
2237
    .long_name = NULL_IF_CONFIG_SMALL("Theora"),
2238
};
2239
#endif
2240

2241 2242
AVCodec vp3_decoder = {
    "vp3",
2243
    CODEC_TYPE_VIDEO,
2244
    CODEC_ID_VP3,
2245
    sizeof(Vp3DecodeContext),
2246
    vp3_decode_init,
2247 2248 2249
    NULL,
    vp3_decode_end,
    vp3_decode_frame,
2250
    CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND,
2251
    NULL,
2252
    .long_name = NULL_IF_CONFIG_SMALL("On2 VP3"),
2253
};