vp3.c 83.5 KB
Newer Older
1
/*
2
 * Copyright (C) 2003-2004 the ffmpeg project
3
 *
4 5 6
 * This file is part of FFmpeg.
 *
 * FFmpeg is free software; you can redistribute it and/or
7 8
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
9
 * version 2.1 of the License, or (at your option) any later version.
10
 *
11
 * FFmpeg is distributed in the hope that it will be useful,
12 13 14 15 16
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with FFmpeg; if not, write to the Free Software
18
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 20 21
 */

/**
22
 * @file
23
 * On2 VP3 Video Decoder
24 25 26
 *
 * VP3 Video Decoder by Mike Melanson (mike at multimedia.cx)
 * For more information about the VP3 coding process, visit:
27
 *   http://wiki.multimedia.cx/index.php?title=On2_VP3
28 29
 *
 * Theora decoder by Alex Beregszaszi
30 31 32 33 34 35
 */

#include <stdio.h>
#include <stdlib.h>
#include <string.h>

36
#include "libavutil/imgutils.h"
37
#include "avcodec.h"
38
#include "internal.h"
39
#include "dsputil.h"
40
#include "get_bits.h"
41 42

#include "vp3data.h"
43
#include "xiph.h"
44
#include "thread.h"
45 46 47

#define FRAGMENT_PIXELS 8

48
//FIXME split things out into their own arrays
49
typedef struct Vp3Fragment {
50
    int16_t dc;
51
    uint8_t coding_method;
52
    uint8_t qpi;
53 54 55 56 57 58
} Vp3Fragment;

#define SB_NOT_CODED        0
#define SB_PARTIALLY_CODED  1
#define SB_FULLY_CODED      2

59 60 61 62 63
// This is the maximum length of a single long bit run that can be encoded
// for superblock coding or block qps. Theora special-cases this to read a
// bit instead of flipping the current bit to allow for runs longer than 4129.
#define MAXIMUM_LONG_BIT_RUN 4129

64 65 66 67 68 69 70 71 72 73 74 75 76 77
#define MODE_INTER_NO_MV      0
#define MODE_INTRA            1
#define MODE_INTER_PLUS_MV    2
#define MODE_INTER_LAST_MV    3
#define MODE_INTER_PRIOR_LAST 4
#define MODE_USING_GOLDEN     5
#define MODE_GOLDEN_MV        6
#define MODE_INTER_FOURMV     7
#define CODING_MODE_COUNT     8

/* special internal mode */
#define MODE_COPY             8

/* There are 6 preset schemes, plus a free-form scheme */
78
static const int ModeAlphabet[6][CODING_MODE_COUNT] =
79 80
{
    /* scheme 1: Last motion vector dominates */
81
    {    MODE_INTER_LAST_MV,    MODE_INTER_PRIOR_LAST,
82
         MODE_INTER_PLUS_MV,    MODE_INTER_NO_MV,
83
         MODE_INTRA,            MODE_USING_GOLDEN,
84 85 86
         MODE_GOLDEN_MV,        MODE_INTER_FOURMV },

    /* scheme 2 */
87
    {    MODE_INTER_LAST_MV,    MODE_INTER_PRIOR_LAST,
88
         MODE_INTER_NO_MV,      MODE_INTER_PLUS_MV,
89
         MODE_INTRA,            MODE_USING_GOLDEN,
90 91 92
         MODE_GOLDEN_MV,        MODE_INTER_FOURMV },

    /* scheme 3 */
93
    {    MODE_INTER_LAST_MV,    MODE_INTER_PLUS_MV,
94
         MODE_INTER_PRIOR_LAST, MODE_INTER_NO_MV,
95
         MODE_INTRA,            MODE_USING_GOLDEN,
96 97 98
         MODE_GOLDEN_MV,        MODE_INTER_FOURMV },

    /* scheme 4 */
99
    {    MODE_INTER_LAST_MV,    MODE_INTER_PLUS_MV,
100
         MODE_INTER_NO_MV,      MODE_INTER_PRIOR_LAST,
101
         MODE_INTRA,            MODE_USING_GOLDEN,
102 103 104
         MODE_GOLDEN_MV,        MODE_INTER_FOURMV },

    /* scheme 5: No motion vector dominates */
105
    {    MODE_INTER_NO_MV,      MODE_INTER_LAST_MV,
106
         MODE_INTER_PRIOR_LAST, MODE_INTER_PLUS_MV,
107
         MODE_INTRA,            MODE_USING_GOLDEN,
108 109 110
         MODE_GOLDEN_MV,        MODE_INTER_FOURMV },

    /* scheme 6 */
111
    {    MODE_INTER_NO_MV,      MODE_USING_GOLDEN,
112
         MODE_INTER_LAST_MV,    MODE_INTER_PRIOR_LAST,
113
         MODE_INTER_PLUS_MV,    MODE_INTRA,
114 115 116 117
         MODE_GOLDEN_MV,        MODE_INTER_FOURMV },

};

118 119 120 121 122 123 124
static const uint8_t hilbert_offset[16][2] = {
    {0,0}, {1,0}, {1,1}, {0,1},
    {0,2}, {0,3}, {1,3}, {1,2},
    {2,2}, {2,3}, {3,3}, {3,2},
    {3,1}, {2,1}, {2,0}, {3,0}
};

125 126 127 128
#define MIN_DEQUANT_VAL 2

typedef struct Vp3DecodeContext {
    AVCodecContext *avctx;
129
    int theora, theora_tables;
130
    int version;
131
    int width, height;
132
    int chroma_x_shift, chroma_y_shift;
133 134 135 136 137
    AVFrame golden_frame;
    AVFrame last_frame;
    AVFrame current_frame;
    int keyframe;
    DSPContext dsp;
138
    int flipped_image;
139
    int last_slice_end;
140
    int skip_loop_filter;
141

142 143 144
    int qps[3];
    int nqps;
    int last_qps[3];
145 146

    int superblock_count;
147 148
    int y_superblock_width;
    int y_superblock_height;
149
    int y_superblock_count;
150 151
    int c_superblock_width;
    int c_superblock_height;
152
    int c_superblock_count;
153 154 155 156 157 158 159 160 161
    int u_superblock_start;
    int v_superblock_start;
    unsigned char *superblock_coding;

    int macroblock_count;
    int macroblock_width;
    int macroblock_height;

    int fragment_count;
162 163
    int fragment_width[2];
    int fragment_height[2];
164 165

    Vp3Fragment *all_fragments;
Michael Niedermayer's avatar
Michael Niedermayer committed
166
    int fragment_start[3];
167
    int data_offset[3];
168

169 170
    int8_t (*motion_val[2])[2];

171
    ScanTable scantable;
172

173 174
    /* tables */
    uint16_t coded_dc_scale_factor[64];
175
    uint32_t coded_ac_scale_factor[64];
176 177 178 179
    uint8_t base_matrix[384][64];
    uint8_t qr_count[2][3];
    uint8_t qr_size [2][3][64];
    uint16_t qr_base[2][3][64];
180

181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209
    /**
     * This is a list of all tokens in bitstream order. Reordering takes place
     * by pulling from each level during IDCT. As a consequence, IDCT must be
     * in Hilbert order, making the minimum slice height 64 for 4:2:0 and 32
     * otherwise. The 32 different tokens with up to 12 bits of extradata are
     * collapsed into 3 types, packed as follows:
     *   (from the low to high bits)
     *
     * 2 bits: type (0,1,2)
     *   0: EOB run, 14 bits for run length (12 needed)
     *   1: zero run, 7 bits for run length
     *                7 bits for the next coefficient (3 needed)
     *   2: coefficient, 14 bits (11 needed)
     *
     * Coefficients are signed, so are packed in the highest bits for automatic
     * sign extension.
     */
    int16_t *dct_tokens[3][64];
    int16_t *dct_tokens_base;
#define TOKEN_EOB(eob_run)              ((eob_run) << 2)
#define TOKEN_ZERO_RUN(coeff, zero_run) (((coeff) << 9) + ((zero_run) << 2) + 1)
#define TOKEN_COEFF(coeff)              (((coeff) << 2) + 2)

    /**
     * number of blocks that contain DCT coefficients at the given level or higher
     */
    int num_coded_frags[3][64];
    int total_num_coded_frags;

210
    /* this is a list of indexes into the all_fragments array indicating
211
     * which of the fragments are coded */
212
    int *coded_fragment_list[3];
213

214 215 216 217 218 219
    VLC dc_vlc[16];
    VLC ac_vlc_1[16];
    VLC ac_vlc_2[16];
    VLC ac_vlc_3[16];
    VLC ac_vlc_4[16];

220 221 222 223 224
    VLC superblock_run_length_vlc;
    VLC fragment_run_length_vlc;
    VLC mode_code_vlc;
    VLC motion_vector_vlc;

225 226
    /* these arrays need to be on 16-byte boundaries since SSE2 operations
     * index into them */
227
    DECLARE_ALIGNED(16, int16_t, qmat)[3][2][3][64];     ///< qmat[qpi][is_inter][plane]
228 229

    /* This table contains superblock_count * 16 entries. Each set of 16
230
     * numbers corresponds to the fragment indexes 0..15 of the superblock.
231 232 233 234
     * An entry will be -1 to indicate that no entry corresponds to that
     * index. */
    int *superblock_fragments;

235
    /* This is an array that indicates how a particular macroblock
236
     * is coded. */
237
    unsigned char *macroblock_coding;
238

239
    uint8_t *edge_emu_buffer;
240

241 242 243 244 245
    /* Huffman decode */
    int hti;
    unsigned int hbits;
    int entries;
    int huff_code_size;
246
    uint32_t huffman_table[80][32][2];
247

248
    uint8_t filter_limit_values[64];
249
    DECLARE_ALIGNED(8, int, bounding_values_array)[256+2];
250 251 252 253 254 255
} Vp3DecodeContext;

/************************************************************************
 * VP3 specific functions
 ************************************************************************/

256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294
static void vp3_decode_flush(AVCodecContext *avctx)
{
    Vp3DecodeContext *s = avctx->priv_data;

    if (s->golden_frame.data[0]) {
        if (s->golden_frame.data[0] == s->last_frame.data[0])
            memset(&s->last_frame, 0, sizeof(AVFrame));
        if (s->current_frame.data[0] == s->golden_frame.data[0])
            memset(&s->current_frame, 0, sizeof(AVFrame));
        ff_thread_release_buffer(avctx, &s->golden_frame);
    }
    if (s->last_frame.data[0]) {
        if (s->current_frame.data[0] == s->last_frame.data[0])
            memset(&s->current_frame, 0, sizeof(AVFrame));
        ff_thread_release_buffer(avctx, &s->last_frame);
    }
    if (s->current_frame.data[0])
        ff_thread_release_buffer(avctx, &s->current_frame);
}

static av_cold int vp3_decode_end(AVCodecContext *avctx)
{
    Vp3DecodeContext *s = avctx->priv_data;
    int i;

    av_free(s->superblock_coding);
    av_free(s->all_fragments);
    av_free(s->coded_fragment_list[0]);
    av_free(s->dct_tokens_base);
    av_free(s->superblock_fragments);
    av_free(s->macroblock_coding);
    av_free(s->motion_val[0]);
    av_free(s->motion_val[1]);
    av_free(s->edge_emu_buffer);

    if (avctx->internal->is_copy)
        return 0;

    for (i = 0; i < 16; i++) {
295 296 297 298 299
        ff_free_vlc(&s->dc_vlc[i]);
        ff_free_vlc(&s->ac_vlc_1[i]);
        ff_free_vlc(&s->ac_vlc_2[i]);
        ff_free_vlc(&s->ac_vlc_3[i]);
        ff_free_vlc(&s->ac_vlc_4[i]);
300 301
    }

302 303 304 305
    ff_free_vlc(&s->superblock_run_length_vlc);
    ff_free_vlc(&s->fragment_run_length_vlc);
    ff_free_vlc(&s->mode_code_vlc);
    ff_free_vlc(&s->motion_vector_vlc);
306 307 308 309 310 311 312

    /* release all frames */
    vp3_decode_flush(avctx);

    return 0;
}

313 314 315 316
/*
 * This function sets up all of the various blocks mappings:
 * superblocks <-> fragments, macroblocks <-> fragments,
 * superblocks <-> macroblocks
317
 *
318
 * @return 0 is successful; returns 1 if *anything* went wrong.
319
 */
320
static int init_block_mapping(Vp3DecodeContext *s)
321
{
322 323 324 325 326 327
    int sb_x, sb_y, plane;
    int x, y, i, j = 0;

    for (plane = 0; plane < 3; plane++) {
        int sb_width    = plane ? s->c_superblock_width  : s->y_superblock_width;
        int sb_height   = plane ? s->c_superblock_height : s->y_superblock_height;
328 329
        int frag_width  = s->fragment_width[!!plane];
        int frag_height = s->fragment_height[!!plane];
330 331 332 333 334 335 336 337 338 339 340 341

        for (sb_y = 0; sb_y < sb_height; sb_y++)
            for (sb_x = 0; sb_x < sb_width; sb_x++)
                for (i = 0; i < 16; i++) {
                    x = 4*sb_x + hilbert_offset[i][0];
                    y = 4*sb_y + hilbert_offset[i][1];

                    if (x < frag_width && y < frag_height)
                        s->superblock_fragments[j++] = s->fragment_start[plane] + y*frag_width + x;
                    else
                        s->superblock_fragments[j++] = -1;
                }
342 343
    }

344
    return 0;  /* successful path out */
345 346 347
}

/*
348
 * This function sets up the dequantization tables used for a particular
349 350
 * frame.
 */
351
static void init_dequantizer(Vp3DecodeContext *s, int qpi)
352
{
353 354
    int ac_scale_factor = s->coded_ac_scale_factor[s->qps[qpi]];
    int dc_scale_factor = s->coded_dc_scale_factor[s->qps[qpi]];
355
    int i, plane, inter, qri, bmi, bmj, qistart;
356

357 358 359 360 361
    for(inter=0; inter<2; inter++){
        for(plane=0; plane<3; plane++){
            int sum=0;
            for(qri=0; qri<s->qr_count[inter][plane]; qri++){
                sum+= s->qr_size[inter][plane][qri];
362
                if(s->qps[qpi] <= sum)
363 364 365 366 367 368
                    break;
            }
            qistart= sum - s->qr_size[inter][plane][qri];
            bmi= s->qr_base[inter][plane][qri  ];
            bmj= s->qr_base[inter][plane][qri+1];
            for(i=0; i<64; i++){
369 370
                int coeff= (  2*(sum    -s->qps[qpi])*s->base_matrix[bmi][i]
                            - 2*(qistart-s->qps[qpi])*s->base_matrix[bmj][i]
371 372 373
                            + s->qr_size[inter][plane][qri])
                           / (2*s->qr_size[inter][plane][qri]);

Michael Niedermayer's avatar
Michael Niedermayer committed
374
                int qmin= 8<<(inter + !i);
375 376
                int qscale= i ? ac_scale_factor : dc_scale_factor;

377
                s->qmat[qpi][inter][plane][s->dsp.idct_permutation[i]]= av_clip((qscale * coeff)/100 * 4, qmin, 4096);
378
            }
379 380
            // all DC coefficients use the same quant so as not to interfere with DC prediction
            s->qmat[qpi][inter][plane][0] = s->qmat[0][inter][plane][0];
381
        }
382 383 384
    }
}

385 386 387
/*
 * This function initializes the loop filter boundary limits if the frame's
 * quality index is different from the previous frame's.
388 389
 *
 * The filter_limit_values may not be larger than 127.
390 391 392 393 394 395
 */
static void init_loop_filter(Vp3DecodeContext *s)
{
    int *bounding_values= s->bounding_values_array+127;
    int filter_limit;
    int x;
396
    int value;
397

398
    filter_limit = s->filter_limit_values[s->qps[0]];
399
    av_assert0(filter_limit < 128U);
400 401 402 403 404 405 406

    /* set up the bounding values */
    memset(s->bounding_values_array, 0, 256 * sizeof(int));
    for (x = 0; x < filter_limit; x++) {
        bounding_values[-x] = -x;
        bounding_values[x] = x;
    }
407 408 409 410 411 412
    for (x = value = filter_limit; x < 128 && value; x++, value--) {
        bounding_values[ x] =  value;
        bounding_values[-x] = -value;
    }
    if (value)
        bounding_values[128] = value;
David Conrad's avatar
David Conrad committed
413
    bounding_values[129] = bounding_values[130] = filter_limit * 0x02020202;
414 415
}

416
/*
417
 * This function unpacks all of the superblock/macroblock/fragment coding
418 419
 * information from the bitstream.
 */
420
static int unpack_superblocks(Vp3DecodeContext *s, GetBitContext *gb)
421
{
David Conrad's avatar
David Conrad committed
422
    int superblock_starts[3] = { 0, s->u_superblock_start, s->v_superblock_start };
423 424 425
    int bit = 0;
    int current_superblock = 0;
    int current_run = 0;
426
    int num_partial_superblocks = 0;
427 428 429

    int i, j;
    int current_fragment;
430
    int plane;
431 432 433 434 435 436 437

    if (s->keyframe) {
        memset(s->superblock_coding, SB_FULLY_CODED, s->superblock_count);

    } else {

        /* unpack the list of partially-coded superblocks */
438 439 440
        bit = get_bits1(gb) ^ 1;
        current_run = 0;

441
        while (current_superblock < s->superblock_count && get_bits_left(gb) > 0) {
442 443 444 445 446
            if (s->theora && current_run == MAXIMUM_LONG_BIT_RUN)
                bit = get_bits1(gb);
            else
                bit ^= 1;

447
                current_run = get_vlc2(gb,
448 449
                    s->superblock_run_length_vlc.table, 6, 2) + 1;
                if (current_run == 34)
450
                    current_run += get_bits(gb, 12);
451

452 453 454 455 456 457 458 459
            if (current_superblock + current_run > s->superblock_count) {
                av_log(s->avctx, AV_LOG_ERROR, "Invalid partially coded superblock run length\n");
                return -1;
            }

            memset(s->superblock_coding + current_superblock, bit, current_run);

            current_superblock += current_run;
460 461
            if (bit)
                num_partial_superblocks += current_run;
462 463 464 465
        }

        /* unpack the list of fully coded superblocks if any of the blocks were
         * not marked as partially coded in the previous step */
466 467
        if (num_partial_superblocks < s->superblock_count) {
            int superblocks_decoded = 0;
468 469

            current_superblock = 0;
470 471 472
            bit = get_bits1(gb) ^ 1;
            current_run = 0;

473 474
            while (superblocks_decoded < s->superblock_count - num_partial_superblocks
                   && get_bits_left(gb) > 0) {
475 476 477 478 479 480

                if (s->theora && current_run == MAXIMUM_LONG_BIT_RUN)
                    bit = get_bits1(gb);
                else
                    bit ^= 1;

481
                        current_run = get_vlc2(gb,
482 483
                            s->superblock_run_length_vlc.table, 6, 2) + 1;
                        if (current_run == 34)
484
                            current_run += get_bits(gb, 12);
485 486 487 488 489

                for (j = 0; j < current_run; current_superblock++) {
                    if (current_superblock >= s->superblock_count) {
                        av_log(s->avctx, AV_LOG_ERROR, "Invalid fully coded superblock run length\n");
                        return -1;
490
                    }
491 492 493

                /* skip any superblocks already marked as partially coded */
                if (s->superblock_coding[current_superblock] == SB_NOT_CODED) {
494
                    s->superblock_coding[current_superblock] = 2*bit;
495 496
                    j++;
                }
497
                }
498
                superblocks_decoded += current_run;
499 500 501 502 503
            }
        }

        /* if there were partial blocks, initialize bitstream for
         * unpacking fragment codings */
504
        if (num_partial_superblocks) {
505 506

            current_run = 0;
507
            bit = get_bits1(gb);
508
            /* toggle the bit because as soon as the first run length is
509 510 511 512 513 514 515
             * fetched the bit will be toggled again */
            bit ^= 1;
        }
    }

    /* figure out which fragments are coded; iterate through each
     * superblock (all planes) */
516
    s->total_num_coded_frags = 0;
517
    memset(s->macroblock_coding, MODE_COPY, s->macroblock_count);
518 519

    for (plane = 0; plane < 3; plane++) {
David Conrad's avatar
David Conrad committed
520
        int sb_start = superblock_starts[plane];
521
        int sb_end = sb_start + (plane ? s->c_superblock_count : s->y_superblock_count);
522
        int num_coded_frags = 0;
523

524
    for (i = sb_start; i < sb_end && get_bits_left(gb) > 0; i++) {
525 526 527 528 529 530 531

        /* iterate through all 16 fragments in a superblock */
        for (j = 0; j < 16; j++) {

            /* if the fragment is in bounds, check its coding status */
            current_fragment = s->superblock_fragments[i * 16 + j];
            if (current_fragment != -1) {
532
                int coded = s->superblock_coding[i];
533

534
                if (s->superblock_coding[i] == SB_PARTIALLY_CODED) {
535 536 537

                    /* fragment may or may not be coded; this is the case
                     * that cares about the fragment coding runs */
538
                    if (current_run-- == 0) {
539
                        bit ^= 1;
540
                        current_run = get_vlc2(gb,
541
                            s->fragment_run_length_vlc.table, 5, 2);
542
                    }
543 544
                    coded = bit;
                }
545

546
                    if (coded) {
547
                        /* default mode; actual mode will be decoded in
548
                         * the next phase */
549
                        s->all_fragments[current_fragment].coding_method =
550
                            MODE_INTER_NO_MV;
551
                        s->coded_fragment_list[plane][num_coded_frags++] =
552 553 554 555 556 557 558 559 560
                            current_fragment;
                    } else {
                        /* not coded; copy this fragment from the prior frame */
                        s->all_fragments[current_fragment].coding_method =
                            MODE_COPY;
                    }
            }
        }
    }
561 562 563 564 565
        s->total_num_coded_frags += num_coded_frags;
        for (i = 0; i < 64; i++)
            s->num_coded_frags[plane][i] = num_coded_frags;
        if (plane < 2)
            s->coded_fragment_list[plane+1] = s->coded_fragment_list[plane] + num_coded_frags;
566
    }
567
    return 0;
568 569 570 571 572 573
}

/*
 * This function unpacks all the coding mode data for individual macroblocks
 * from the bitstream.
 */
574
static int unpack_modes(Vp3DecodeContext *s, GetBitContext *gb)
575
{
576
    int i, j, k, sb_x, sb_y;
577 578 579 580
    int scheme;
    int current_macroblock;
    int current_fragment;
    int coding_mode;
581
    int custom_mode_alphabet[CODING_MODE_COUNT];
582
    const int *alphabet;
583
    Vp3Fragment *frag;
584 585 586 587 588 589 590 591 592 593 594 595

    if (s->keyframe) {
        for (i = 0; i < s->fragment_count; i++)
            s->all_fragments[i].coding_method = MODE_INTRA;

    } else {

        /* fetch the mode coding scheme for this frame */
        scheme = get_bits(gb, 3);

        /* is it a custom coding scheme? */
        if (scheme == 0) {
596 597
            for (i = 0; i < 8; i++)
                custom_mode_alphabet[i] = MODE_INTER_NO_MV;
598
            for (i = 0; i < 8; i++)
599
                custom_mode_alphabet[get_bits(gb, 3)] = i;
600 601 602
            alphabet = custom_mode_alphabet;
        } else
            alphabet = ModeAlphabet[scheme-1];
603 604 605

        /* iterate through all of the macroblocks that contain 1 or more
         * coded fragments */
606 607
        for (sb_y = 0; sb_y < s->y_superblock_height; sb_y++) {
            for (sb_x = 0; sb_x < s->y_superblock_width; sb_x++) {
608 609
                if (get_bits_left(gb) <= 0)
                    return -1;
610 611

            for (j = 0; j < 4; j++) {
612 613 614 615
                int mb_x = 2*sb_x +   (j>>1);
                int mb_y = 2*sb_y + (((j>>1)+j)&1);
                current_macroblock = mb_y * s->macroblock_width + mb_x;

616
                if (mb_x >= s->macroblock_width || mb_y >= s->macroblock_height)
617 618
                    continue;

619 620
#define BLOCK_X (2*mb_x + (k&1))
#define BLOCK_Y (2*mb_y + (k>>1))
621 622 623
                /* coding modes are only stored if the macroblock has at least one
                 * luma block coded, otherwise it must be INTER_NO_MV */
                for (k = 0; k < 4; k++) {
624
                    current_fragment = BLOCK_Y*s->fragment_width[0] + BLOCK_X;
625 626 627 628 629 630 631
                    if (s->all_fragments[current_fragment].coding_method != MODE_COPY)
                        break;
                }
                if (k == 4) {
                    s->macroblock_coding[current_macroblock] = MODE_INTER_NO_MV;
                    continue;
                }
632

633 634 635 636
                /* mode 7 means get 3 bits for each coding mode */
                if (scheme == 7)
                    coding_mode = get_bits(gb, 3);
                else
637
                    coding_mode = alphabet
638
                        [get_vlc2(gb, s->mode_code_vlc.table, 3, 3)];
639

640
                s->macroblock_coding[current_macroblock] = coding_mode;
641
                for (k = 0; k < 4; k++) {
642 643 644
                    frag = s->all_fragments + BLOCK_Y*s->fragment_width[0] + BLOCK_X;
                    if (frag->coding_method != MODE_COPY)
                        frag->coding_method = coding_mode;
645
                }
646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666

#define SET_CHROMA_MODES \
    if (frag[s->fragment_start[1]].coding_method != MODE_COPY) \
        frag[s->fragment_start[1]].coding_method = coding_mode;\
    if (frag[s->fragment_start[2]].coding_method != MODE_COPY) \
        frag[s->fragment_start[2]].coding_method = coding_mode;

                if (s->chroma_y_shift) {
                    frag = s->all_fragments + mb_y*s->fragment_width[1] + mb_x;
                    SET_CHROMA_MODES
                } else if (s->chroma_x_shift) {
                    frag = s->all_fragments + 2*mb_y*s->fragment_width[1] + mb_x;
                    for (k = 0; k < 2; k++) {
                        SET_CHROMA_MODES
                        frag += s->fragment_width[1];
                    }
                } else {
                    for (k = 0; k < 4; k++) {
                        frag = s->all_fragments + BLOCK_Y*s->fragment_width[1] + BLOCK_X;
                        SET_CHROMA_MODES
                    }
667 668
                }
            }
669
            }
670 671
        }
    }
672 673

    return 0;
674 675
}

676 677 678 679
/*
 * This function unpacks all the motion vectors for the individual
 * macroblocks from the bitstream.
 */
680
static int unpack_vectors(Vp3DecodeContext *s, GetBitContext *gb)
681
{
682
    int j, k, sb_x, sb_y;
683
    int coding_mode;
684 685
    int motion_x[4];
    int motion_y[4];
686 687 688 689 690 691
    int last_motion_x = 0;
    int last_motion_y = 0;
    int prior_last_motion_x = 0;
    int prior_last_motion_y = 0;
    int current_macroblock;
    int current_fragment;
692
    int frag;
693

David Conrad's avatar
David Conrad committed
694
    if (s->keyframe)
695
        return 0;
David Conrad's avatar
David Conrad committed
696

David Conrad's avatar
David Conrad committed
697 698
    /* coding mode 0 is the VLC scheme; 1 is the fixed code scheme */
    coding_mode = get_bits1(gb);
699

David Conrad's avatar
David Conrad committed
700 701
    /* iterate through all of the macroblocks that contain 1 or more
     * coded fragments */
702 703
    for (sb_y = 0; sb_y < s->y_superblock_height; sb_y++) {
        for (sb_x = 0; sb_x < s->y_superblock_width; sb_x++) {
704 705
            if (get_bits_left(gb) <= 0)
                return -1;
706

David Conrad's avatar
David Conrad committed
707
        for (j = 0; j < 4; j++) {
708 709 710 711 712
            int mb_x = 2*sb_x +   (j>>1);
            int mb_y = 2*sb_y + (((j>>1)+j)&1);
            current_macroblock = mb_y * s->macroblock_width + mb_x;

            if (mb_x >= s->macroblock_width || mb_y >= s->macroblock_height ||
David Conrad's avatar
David Conrad committed
713 714
                (s->macroblock_coding[current_macroblock] == MODE_COPY))
                continue;
715

David Conrad's avatar
David Conrad committed
716 717 718 719 720 721 722 723 724 725 726
            switch (s->macroblock_coding[current_macroblock]) {

            case MODE_INTER_PLUS_MV:
            case MODE_GOLDEN_MV:
                /* all 6 fragments use the same motion vector */
                if (coding_mode == 0) {
                    motion_x[0] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)];
                    motion_y[0] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)];
                } else {
                    motion_x[0] = fixed_motion_vector_table[get_bits(gb, 6)];
                    motion_y[0] = fixed_motion_vector_table[get_bits(gb, 6)];
727
                }
728

David Conrad's avatar
David Conrad committed
729 730 731
                /* vector maintenance, only on MODE_INTER_PLUS_MV */
                if (s->macroblock_coding[current_macroblock] ==
                    MODE_INTER_PLUS_MV) {
732 733
                    prior_last_motion_x = last_motion_x;
                    prior_last_motion_y = last_motion_y;
David Conrad's avatar
David Conrad committed
734 735 736 737 738 739 740 741 742 743 744 745 746
                    last_motion_x = motion_x[0];
                    last_motion_y = motion_y[0];
                }
                break;

            case MODE_INTER_FOURMV:
                /* vector maintenance */
                prior_last_motion_x = last_motion_x;
                prior_last_motion_y = last_motion_y;

                /* fetch 4 vectors from the bitstream, one for each
                 * Y fragment, then average for the C fragment vectors */
                for (k = 0; k < 4; k++) {
747
                    current_fragment = BLOCK_Y*s->fragment_width[0] + BLOCK_X;
748
                    if (s->all_fragments[current_fragment].coding_method != MODE_COPY) {
David Conrad's avatar
David Conrad committed
749 750 751
                        if (coding_mode == 0) {
                            motion_x[k] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)];
                            motion_y[k] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)];
752
                        } else {
David Conrad's avatar
David Conrad committed
753 754
                            motion_x[k] = fixed_motion_vector_table[get_bits(gb, 6)];
                            motion_y[k] = fixed_motion_vector_table[get_bits(gb, 6)];
755
                        }
David Conrad's avatar
David Conrad committed
756 757 758 759 760
                        last_motion_x = motion_x[k];
                        last_motion_y = motion_y[k];
                    } else {
                        motion_x[k] = 0;
                        motion_y[k] = 0;
761
                    }
David Conrad's avatar
David Conrad committed
762 763 764 765 766 767 768
                }
                break;

            case MODE_INTER_LAST_MV:
                /* all 6 fragments use the last motion vector */
                motion_x[0] = last_motion_x;
                motion_y[0] = last_motion_y;
769

David Conrad's avatar
David Conrad committed
770 771 772 773 774 775 776 777 778
                /* no vector maintenance (last vector remains the
                 * last vector) */
                break;

            case MODE_INTER_PRIOR_LAST:
                /* all 6 fragments use the motion vector prior to the
                 * last motion vector */
                motion_x[0] = prior_last_motion_x;
                motion_y[0] = prior_last_motion_y;
779

David Conrad's avatar
David Conrad committed
780 781 782 783 784 785
                /* vector maintenance */
                prior_last_motion_x = last_motion_x;
                prior_last_motion_y = last_motion_y;
                last_motion_x = motion_x[0];
                last_motion_y = motion_y[0];
                break;
786

David Conrad's avatar
David Conrad committed
787 788
            default:
                /* covers intra, inter without MV, golden without MV */
789 790
                motion_x[0] = 0;
                motion_y[0] = 0;
791

David Conrad's avatar
David Conrad committed
792 793 794
                /* no vector maintenance */
                break;
            }
795

David Conrad's avatar
David Conrad committed
796
            /* assign the motion vectors to the correct fragments */
797
            for (k = 0; k < 4; k++) {
David Conrad's avatar
David Conrad committed
798
                current_fragment =
799
                    BLOCK_Y*s->fragment_width[0] + BLOCK_X;
800
                if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) {
801 802
                    s->motion_val[0][current_fragment][0] = motion_x[k];
                    s->motion_val[0][current_fragment][1] = motion_y[k];
803
                } else {
804 805
                    s->motion_val[0][current_fragment][0] = motion_x[0];
                    s->motion_val[0][current_fragment][1] = motion_y[0];
806
                }
807
            }
808 809

            if (s->chroma_y_shift) {
810 811 812 813
                if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) {
                    motion_x[0] = RSHIFT(motion_x[0] + motion_x[1] + motion_x[2] + motion_x[3], 2);
                    motion_y[0] = RSHIFT(motion_y[0] + motion_y[1] + motion_y[2] + motion_y[3], 2);
                }
814 815
                motion_x[0] = (motion_x[0]>>1) | (motion_x[0]&1);
                motion_y[0] = (motion_y[0]>>1) | (motion_y[0]&1);
816 817 818
                frag = mb_y*s->fragment_width[1] + mb_x;
                s->motion_val[1][frag][0] = motion_x[0];
                s->motion_val[1][frag][1] = motion_y[0];
819 820 821 822 823 824 825 826 827 828 829 830 831
            } else if (s->chroma_x_shift) {
                if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) {
                    motion_x[0] = RSHIFT(motion_x[0] + motion_x[1], 1);
                    motion_y[0] = RSHIFT(motion_y[0] + motion_y[1], 1);
                    motion_x[1] = RSHIFT(motion_x[2] + motion_x[3], 1);
                    motion_y[1] = RSHIFT(motion_y[2] + motion_y[3], 1);
                } else {
                    motion_x[1] = motion_x[0];
                    motion_y[1] = motion_y[0];
                }
                motion_x[0] = (motion_x[0]>>1) | (motion_x[0]&1);
                motion_x[1] = (motion_x[1]>>1) | (motion_x[1]&1);

832
                frag = 2*mb_y*s->fragment_width[1] + mb_x;
833
                for (k = 0; k < 2; k++) {
834 835
                    s->motion_val[1][frag][0] = motion_x[k];
                    s->motion_val[1][frag][1] = motion_y[k];
836 837 838 839
                    frag += s->fragment_width[1];
                }
            } else {
                for (k = 0; k < 4; k++) {
840
                    frag = BLOCK_Y*s->fragment_width[1] + BLOCK_X;
841
                    if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) {
842 843
                        s->motion_val[1][frag][0] = motion_x[k];
                        s->motion_val[1][frag][1] = motion_y[k];
844
                    } else {
845 846
                        s->motion_val[1][frag][0] = motion_x[0];
                        s->motion_val[1][frag][1] = motion_y[0];
847 848
                    }
                }
849
            }
850
        }
851
        }
David Conrad's avatar
David Conrad committed
852
    }
853 854

    return 0;
855 856
}

857 858 859
static int unpack_block_qpis(Vp3DecodeContext *s, GetBitContext *gb)
{
    int qpi, i, j, bit, run_length, blocks_decoded, num_blocks_at_qpi;
860
    int num_blocks = s->total_num_coded_frags;
861 862 863 864

    for (qpi = 0; qpi < s->nqps-1 && num_blocks > 0; qpi++) {
        i = blocks_decoded = num_blocks_at_qpi = 0;

865 866
        bit = get_bits1(gb) ^ 1;
        run_length = 0;
867 868

        do {
869 870 871 872 873
            if (run_length == MAXIMUM_LONG_BIT_RUN)
                bit = get_bits1(gb);
            else
                bit ^= 1;

874 875 876 877 878 879 880 881 882
            run_length = get_vlc2(gb, s->superblock_run_length_vlc.table, 6, 2) + 1;
            if (run_length == 34)
                run_length += get_bits(gb, 12);
            blocks_decoded += run_length;

            if (!bit)
                num_blocks_at_qpi += run_length;

            for (j = 0; j < run_length; i++) {
883
                if (i >= s->total_num_coded_frags)
884 885
                    return -1;

886 887
                if (s->all_fragments[s->coded_fragment_list[0][i]].qpi == qpi) {
                    s->all_fragments[s->coded_fragment_list[0][i]].qpi += bit;
888 889 890
                    j++;
                }
            }
891
        } while (blocks_decoded < num_blocks && get_bits_left(gb) > 0);
892 893 894 895 896 897 898

        num_blocks -= num_blocks_at_qpi;
    }

    return 0;
}

899
/*
900 901 902 903 904 905 906 907 908 909 910 911 912
 * This function is called by unpack_dct_coeffs() to extract the VLCs from
 * the bitstream. The VLCs encode tokens which are used to unpack DCT
 * data. This function unpacks all the VLCs for either the Y plane or both
 * C planes, and is called for DC coefficients or different AC coefficient
 * levels (since different coefficient types require different VLC tables.
 *
 * This function returns a residual eob run. E.g, if a particular token gave
 * instructions to EOB the next 5 fragments and there were only 2 fragments
 * left in the current fragment range, 3 would be returned so that it could
 * be passed into the next call to this same function.
 */
static int unpack_vlcs(Vp3DecodeContext *s, GetBitContext *gb,
                        VLC *table, int coeff_index,
913
                        int plane,
914 915
                        int eob_run)
{
916
    int i, j = 0;
917
    int token;
918 919 920
    int zero_run = 0;
    DCTELEM coeff = 0;
    int bits_to_get;
921 922 923 924
    int blocks_ended;
    int coeff_i = 0;
    int num_coeffs = s->num_coded_frags[plane][coeff_index];
    int16_t *dct_tokens = s->dct_tokens[plane][coeff_index];
925

926
    /* local references to structure members to avoid repeated deferences */
927
    int *coded_fragment_list = s->coded_fragment_list[plane];
928 929 930
    Vp3Fragment *all_fragments = s->all_fragments;
    VLC_TYPE (*vlc_table)[2] = table->table;

931 932 933 934 935 936
    if (num_coeffs < 0)
        av_log(s->avctx, AV_LOG_ERROR, "Invalid number of coefficents at level %d\n", coeff_index);

    if (eob_run > num_coeffs) {
        coeff_i = blocks_ended = num_coeffs;
        eob_run -= num_coeffs;
937
    } else {
938 939
        coeff_i = blocks_ended = eob_run;
        eob_run = 0;
940 941
    }

942 943 944
    // insert fake EOB token to cover the split between planes or zzi
    if (blocks_ended)
        dct_tokens[j++] = blocks_ended << 2;
945

946
    while (coeff_i < num_coeffs && get_bits_left(gb) > 0) {
947
            /* decode a VLC into a token */
948
            token = get_vlc2(gb, vlc_table, 11, 3);
949
            /* use the token to get a zero run, a coefficient, and an eob run */
950
            if ((unsigned) token <= 6U) {
951 952 953
                eob_run = eob_run_base[token];
                if (eob_run_get_bits[token])
                    eob_run += get_bits(gb, eob_run_get_bits[token]);
954 955 956 957 958 959 960 961 962 963 964 965 966 967

                // record only the number of blocks ended in this plane,
                // any spill will be recorded in the next plane.
                if (eob_run > num_coeffs - coeff_i) {
                    dct_tokens[j++] = TOKEN_EOB(num_coeffs - coeff_i);
                    blocks_ended   += num_coeffs - coeff_i;
                    eob_run        -= num_coeffs - coeff_i;
                    coeff_i         = num_coeffs;
                } else {
                    dct_tokens[j++] = TOKEN_EOB(eob_run);
                    blocks_ended   += eob_run;
                    coeff_i        += eob_run;
                    eob_run = 0;
                }
968
            } else if (token >= 0) {
969
                bits_to_get = coeff_get_bits[token];
970 971 972
                if (bits_to_get)
                    bits_to_get = get_bits(gb, bits_to_get);
                coeff = coeff_tables[token][bits_to_get];
973 974 975 976

                zero_run = zero_run_base[token];
                if (zero_run_get_bits[token])
                    zero_run += get_bits(gb, zero_run_get_bits[token]);
977

978 979 980 981 982 983 984 985 986 987 988 989 990 991
                if (zero_run) {
                    dct_tokens[j++] = TOKEN_ZERO_RUN(coeff, zero_run);
                } else {
                    // Save DC into the fragment structure. DC prediction is
                    // done in raster order, so the actual DC can't be in with
                    // other tokens. We still need the token in dct_tokens[]
                    // however, or else the structure collapses on itself.
                    if (!coeff_index)
                        all_fragments[coded_fragment_list[coeff_i]].dc = coeff;

                    dct_tokens[j++] = TOKEN_COEFF(coeff);
                }

                if (coeff_index + zero_run > 64) {
992
                    av_log(s->avctx, AV_LOG_DEBUG, "Invalid zero run of %d with"
993 994 995
                           " %d coeffs left\n", zero_run, 64-coeff_index);
                    zero_run = 64 - coeff_index;
                }
996

997 998 999 1000 1001
                // zero runs code multiple coefficients,
                // so don't try to decode coeffs for those higher levels
                for (i = coeff_index+1; i <= coeff_index+zero_run; i++)
                    s->num_coded_frags[plane][i]--;
                coeff_i++;
1002 1003 1004 1005
            } else {
                av_log(s->avctx, AV_LOG_ERROR,
                       "Invalid token %d\n", token);
                return -1;
1006
            }
1007 1008
    }

1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023
    if (blocks_ended > s->num_coded_frags[plane][coeff_index])
        av_log(s->avctx, AV_LOG_ERROR, "More blocks ended than coded!\n");

    // decrement the number of blocks that have higher coeffecients for each
    // EOB run at this level
    if (blocks_ended)
        for (i = coeff_index+1; i < 64; i++)
            s->num_coded_frags[plane][i] -= blocks_ended;

    // setup the next buffer
    if (plane < 2)
        s->dct_tokens[plane+1][coeff_index] = dct_tokens + j;
    else if (coeff_index < 63)
        s->dct_tokens[0][coeff_index+1] = dct_tokens + j;

1024 1025 1026
    return eob_run;
}

1027 1028 1029 1030
static void reverse_dc_prediction(Vp3DecodeContext *s,
                                  int first_fragment,
                                  int fragment_width,
                                  int fragment_height);
1031 1032 1033 1034
/*
 * This function unpacks all of the DCT coefficient data from the
 * bitstream.
 */
1035
static int unpack_dct_coeffs(Vp3DecodeContext *s, GetBitContext *gb)
1036 1037 1038 1039 1040 1041 1042
{
    int i;
    int dc_y_table;
    int dc_c_table;
    int ac_y_table;
    int ac_c_table;
    int residual_eob_run = 0;
1043 1044
    VLC *y_tables[64];
    VLC *c_tables[64];
1045

1046 1047
    s->dct_tokens[0][0] = s->dct_tokens_base;

1048
    /* fetch the DC table indexes */
1049 1050 1051 1052
    dc_y_table = get_bits(gb, 4);
    dc_c_table = get_bits(gb, 4);

    /* unpack the Y plane DC coefficients */
1053
    residual_eob_run = unpack_vlcs(s, gb, &s->dc_vlc[dc_y_table], 0,
1054
        0, residual_eob_run);
1055 1056
    if (residual_eob_run < 0)
        return residual_eob_run;
1057

1058
    /* reverse prediction of the Y-plane DC coefficients */
1059
    reverse_dc_prediction(s, 0, s->fragment_width[0], s->fragment_height[0]);
1060

1061 1062
    /* unpack the C plane DC coefficients */
    residual_eob_run = unpack_vlcs(s, gb, &s->dc_vlc[dc_c_table], 0,
1063
        1, residual_eob_run);
1064 1065
    if (residual_eob_run < 0)
        return residual_eob_run;
1066 1067
    residual_eob_run = unpack_vlcs(s, gb, &s->dc_vlc[dc_c_table], 0,
        2, residual_eob_run);
1068 1069
    if (residual_eob_run < 0)
        return residual_eob_run;
1070

1071 1072 1073 1074
    /* reverse prediction of the C-plane DC coefficients */
    if (!(s->avctx->flags & CODEC_FLAG_GRAY))
    {
        reverse_dc_prediction(s, s->fragment_start[1],
1075
            s->fragment_width[1], s->fragment_height[1]);
1076
        reverse_dc_prediction(s, s->fragment_start[2],
1077
            s->fragment_width[1], s->fragment_height[1]);
1078 1079
    }

1080
    /* fetch the AC table indexes */
1081 1082 1083
    ac_y_table = get_bits(gb, 4);
    ac_c_table = get_bits(gb, 4);

1084
    /* build tables of AC VLC tables */
1085
    for (i = 1; i <= 5; i++) {
1086 1087
        y_tables[i] = &s->ac_vlc_1[ac_y_table];
        c_tables[i] = &s->ac_vlc_1[ac_c_table];
1088 1089
    }
    for (i = 6; i <= 14; i++) {
1090 1091
        y_tables[i] = &s->ac_vlc_2[ac_y_table];
        c_tables[i] = &s->ac_vlc_2[ac_c_table];
1092 1093
    }
    for (i = 15; i <= 27; i++) {
1094 1095
        y_tables[i] = &s->ac_vlc_3[ac_y_table];
        c_tables[i] = &s->ac_vlc_3[ac_c_table];
1096 1097
    }
    for (i = 28; i <= 63; i++) {
1098 1099 1100 1101 1102 1103 1104
        y_tables[i] = &s->ac_vlc_4[ac_y_table];
        c_tables[i] = &s->ac_vlc_4[ac_c_table];
    }

    /* decode all AC coefficents */
    for (i = 1; i <= 63; i++) {
            residual_eob_run = unpack_vlcs(s, gb, y_tables[i], i,
1105
                0, residual_eob_run);
1106 1107
            if (residual_eob_run < 0)
                return residual_eob_run;
1108

1109
            residual_eob_run = unpack_vlcs(s, gb, c_tables[i], i,
1110
                1, residual_eob_run);
1111 1112
            if (residual_eob_run < 0)
                return residual_eob_run;
1113 1114
            residual_eob_run = unpack_vlcs(s, gb, c_tables[i], i,
                2, residual_eob_run);
1115 1116
            if (residual_eob_run < 0)
                return residual_eob_run;
1117
    }
1118 1119

    return 0;
1120 1121 1122 1123
}

/*
 * This function reverses the DC prediction for each coded fragment in
1124
 * the frame. Much of this function is adapted directly from the original
1125 1126 1127 1128
 * VP3 source code.
 */
#define COMPATIBLE_FRAME(x) \
  (compatible_frame[s->all_fragments[x].coding_method] == current_frame_type)
1129
#define DC_COEFF(u) s->all_fragments[u].dc
1130 1131 1132 1133

static void reverse_dc_prediction(Vp3DecodeContext *s,
                                  int first_fragment,
                                  int fragment_width,
1134
                                  int fragment_height)
1135 1136 1137 1138 1139 1140 1141 1142 1143 1144
{

#define PUL 8
#define PU 4
#define PUR 2
#define PL 1

    int x, y;
    int i = first_fragment;

1145
    int predicted_dc;
1146 1147 1148 1149

    /* DC values for the left, up-left, up, and up-right fragments */
    int vl, vul, vu, vur;

1150
    /* indexes for the left, up-left, up, and up-right fragments */
1151 1152
    int l, ul, u, ur;

1153
    /*
1154 1155 1156 1157 1158 1159
     * The 6 fields mean:
     *   0: up-left multiplier
     *   1: up multiplier
     *   2: up-right multiplier
     *   3: left multiplier
     */
1160
    static const int predictor_transform[16][4] = {
1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176
        {  0,  0,  0,  0},
        {  0,  0,  0,128},        // PL
        {  0,  0,128,  0},        // PUR
        {  0,  0, 53, 75},        // PUR|PL
        {  0,128,  0,  0},        // PU
        {  0, 64,  0, 64},        // PU|PL
        {  0,128,  0,  0},        // PU|PUR
        {  0,  0, 53, 75},        // PU|PUR|PL
        {128,  0,  0,  0},        // PUL
        {  0,  0,  0,128},        // PUL|PL
        { 64,  0, 64,  0},        // PUL|PUR
        {  0,  0, 53, 75},        // PUL|PUR|PL
        {  0,128,  0,  0},        // PUL|PU
       {-104,116,  0,116},        // PUL|PU|PL
        { 24, 80, 24,  0},        // PUL|PU|PUR
       {-104,116,  0,116}         // PUL|PU|PUR|PL
1177 1178 1179 1180 1181
    };

    /* This table shows which types of blocks can use other blocks for
     * prediction. For example, INTRA is the only mode in this table to
     * have a frame number of 0. That means INTRA blocks can only predict
1182
     * from other INTRA blocks. There are 2 golden frame coding types;
1183 1184
     * blocks encoding in these modes can only predict from other blocks
     * that were encoded with these 1 of these 2 modes. */
1185
    static const unsigned char compatible_frame[9] = {
1186 1187 1188 1189 1190 1191 1192
        1,    /* MODE_INTER_NO_MV */
        0,    /* MODE_INTRA */
        1,    /* MODE_INTER_PLUS_MV */
        1,    /* MODE_INTER_LAST_MV */
        1,    /* MODE_INTER_PRIOR_MV */
        2,    /* MODE_USING_GOLDEN */
        2,    /* MODE_GOLDEN_MV */
1193 1194
        1,    /* MODE_INTER_FOUR_MV */
        3     /* MODE_COPY */
1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214
    };
    int current_frame_type;

    /* there is a last DC predictor for each of the 3 frame types */
    short last_dc[3];

    int transform = 0;

    vul = vu = vur = vl = 0;
    last_dc[0] = last_dc[1] = last_dc[2] = 0;

    /* for each fragment row... */
    for (y = 0; y < fragment_height; y++) {

        /* for each fragment in a row... */
        for (x = 0; x < fragment_width; x++, i++) {

            /* reverse prediction if this block was coded */
            if (s->all_fragments[i].coding_method != MODE_COPY) {

1215
                current_frame_type =
1216 1217
                    compatible_frame[s->all_fragments[i].coding_method];

Michael Niedermayer's avatar
Michael Niedermayer committed
1218 1219 1220
                transform= 0;
                if(x){
                    l= i-1;
1221
                    vl = DC_COEFF(l);
1222
                    if(COMPATIBLE_FRAME(l))
1223
                        transform |= PL;
Michael Niedermayer's avatar
Michael Niedermayer committed
1224 1225 1226
                }
                if(y){
                    u= i-fragment_width;
1227
                    vu = DC_COEFF(u);
1228
                    if(COMPATIBLE_FRAME(u))
1229
                        transform |= PU;
Michael Niedermayer's avatar
Michael Niedermayer committed
1230 1231 1232
                    if(x){
                        ul= i-fragment_width-1;
                        vul = DC_COEFF(ul);
1233
                        if(COMPATIBLE_FRAME(ul))
1234
                            transform |= PUL;
Michael Niedermayer's avatar
Michael Niedermayer committed
1235 1236 1237 1238
                    }
                    if(x + 1 < fragment_width){
                        ur= i-fragment_width+1;
                        vur = DC_COEFF(ur);
1239
                        if(COMPATIBLE_FRAME(ur))
1240
                            transform |= PUR;
Michael Niedermayer's avatar
Michael Niedermayer committed
1241
                    }
1242 1243 1244 1245 1246 1247
                }

                if (transform == 0) {

                    /* if there were no fragments to predict from, use last
                     * DC saved */
1248
                    predicted_dc = last_dc[current_frame_type];
1249 1250 1251 1252 1253 1254 1255 1256 1257
                } else {

                    /* apply the appropriate predictor transform */
                    predicted_dc =
                        (predictor_transform[transform][0] * vul) +
                        (predictor_transform[transform][1] * vu) +
                        (predictor_transform[transform][2] * vur) +
                        (predictor_transform[transform][3] * vl);

Michael Niedermayer's avatar
Michael Niedermayer committed
1258
                    predicted_dc /= 128;
1259 1260 1261

                    /* check for outranging on the [ul u l] and
                     * [ul u ur l] predictors */
1262
                    if ((transform == 15) || (transform == 13)) {
1263
                        if (FFABS(predicted_dc - vu) > 128)
1264
                            predicted_dc = vu;
1265
                        else if (FFABS(predicted_dc - vl) > 128)
1266
                            predicted_dc = vl;
1267
                        else if (FFABS(predicted_dc - vul) > 128)
1268 1269 1270 1271
                            predicted_dc = vul;
                    }
                }

1272
                /* at long last, apply the predictor */
1273
                DC_COEFF(i) += predicted_dc;
1274
                /* save the DC */
1275
                last_dc[current_frame_type] = DC_COEFF(i);
1276 1277 1278 1279 1280
            }
        }
    }
}

1281
static void apply_loop_filter(Vp3DecodeContext *s, int plane, int ystart, int yend)
1282 1283 1284 1285
{
    int x, y;
    int *bounding_values= s->bounding_values_array+127;

1286 1287
    int width           = s->fragment_width[!!plane];
    int height          = s->fragment_height[!!plane];
David Conrad's avatar
David Conrad committed
1288 1289 1290 1291
    int fragment        = s->fragment_start        [plane] + ystart * width;
    int stride          = s->current_frame.linesize[plane];
    uint8_t *plane_data = s->current_frame.data    [plane];
    if (!s->flipped_image) stride = -stride;
1292
    plane_data += s->data_offset[plane] + 8*ystart*stride;
David Conrad's avatar
David Conrad committed
1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305

    for (y = ystart; y < yend; y++) {

        for (x = 0; x < width; x++) {
            /* This code basically just deblocks on the edges of coded blocks.
             * However, it has to be much more complicated because of the
             * braindamaged deblock ordering used in VP3/Theora. Order matters
             * because some pixels get filtered twice. */
            if( s->all_fragments[fragment].coding_method != MODE_COPY )
            {
                /* do not perform left edge filter for left columns frags */
                if (x > 0) {
                    s->dsp.vp3_h_loop_filter(
1306
                        plane_data + 8*x,
David Conrad's avatar
David Conrad committed
1307 1308
                        stride, bounding_values);
                }
1309

David Conrad's avatar
David Conrad committed
1310 1311 1312
                /* do not perform top edge filter for top row fragments */
                if (y > 0) {
                    s->dsp.vp3_v_loop_filter(
1313
                        plane_data + 8*x,
David Conrad's avatar
David Conrad committed
1314 1315
                        stride, bounding_values);
                }
1316

David Conrad's avatar
David Conrad committed
1317 1318 1319 1320 1321 1322
                /* do not perform right edge filter for right column
                 * fragments or if right fragment neighbor is also coded
                 * in this frame (it will be filtered in next iteration) */
                if ((x < width - 1) &&
                    (s->all_fragments[fragment + 1].coding_method == MODE_COPY)) {
                    s->dsp.vp3_h_loop_filter(
1323
                        plane_data + 8*x + 8,
David Conrad's avatar
David Conrad committed
1324
                        stride, bounding_values);
1325 1326
                }

David Conrad's avatar
David Conrad committed
1327 1328 1329 1330 1331 1332
                /* do not perform bottom edge filter for bottom row
                 * fragments or if bottom fragment neighbor is also coded
                 * in this frame (it will be filtered in the next row) */
                if ((y < height - 1) &&
                    (s->all_fragments[fragment + width].coding_method == MODE_COPY)) {
                    s->dsp.vp3_v_loop_filter(
1333
                        plane_data + 8*x + 8*stride,
David Conrad's avatar
David Conrad committed
1334 1335
                        stride, bounding_values);
                }
1336
            }
David Conrad's avatar
David Conrad committed
1337 1338

            fragment++;
1339
        }
1340
        plane_data += 8*stride;
David Conrad's avatar
David Conrad committed
1341
    }
1342 1343
}

1344
/**
1345
 * Pull DCT tokens from the 64 levels to decode and dequant the coefficients
1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366
 * for the next block in coding order
 */
static inline int vp3_dequant(Vp3DecodeContext *s, Vp3Fragment *frag,
                              int plane, int inter, DCTELEM block[64])
{
    int16_t *dequantizer = s->qmat[frag->qpi][inter][plane];
    uint8_t *perm = s->scantable.permutated;
    int i = 0;

    do {
        int token = *s->dct_tokens[plane][i];
        switch (token & 3) {
        case 0: // EOB
            if (--token < 4) // 0-3 are token types, so the EOB run must now be 0
                s->dct_tokens[plane][i]++;
            else
                *s->dct_tokens[plane][i] = token & ~3;
            goto end;
        case 1: // zero run
            s->dct_tokens[plane][i]++;
            i += (token >> 2) & 0x7f;
1367
            if (i > 63) {
1368
                av_log(s->avctx, AV_LOG_ERROR, "Coefficient index overflow\n");
1369
                return i;
1370
            }
1371 1372 1373 1374 1375 1376 1377
            block[perm[i]] = (token >> 9) * dequantizer[perm[i]];
            i++;
            break;
        case 2: // coeff
            block[perm[i]] = (token >> 2) * dequantizer[perm[i]];
            s->dct_tokens[plane][i++]++;
            break;
1378
        default: // shouldn't happen
1379 1380 1381
            return i;
        }
    } while (i < 64);
1382 1383
    // return value is expected to be a valid level
    i--;
1384 1385 1386 1387 1388 1389
end:
    // the actual DC+prediction is in the fragment structure
    block[0] = frag->dc * s->qmat[0][inter][plane][0];
    return i;
}

1390 1391 1392 1393 1394
/**
 * called when all pixels up to row y are complete
 */
static void vp3_draw_horiz_band(Vp3DecodeContext *s, int y)
{
1395 1396
    int h, cy, i;
    int offset[AV_NUM_DATA_POINTERS];
1397

1398
    if (HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
1399 1400 1401 1402 1403 1404 1405 1406
        int y_flipped = s->flipped_image ? s->avctx->height-y : y;

        // At the end of the frame, report INT_MAX instead of the height of the frame.
        // This makes the other threads' ff_thread_await_progress() calls cheaper, because
        // they don't have to clip their values.
        ff_thread_report_progress(&s->current_frame, y_flipped==s->avctx->height ? INT_MAX : y_flipped-1, 0);
    }

1407 1408 1409 1410
    if(s->avctx->draw_horiz_band==NULL)
        return;

    h= y - s->last_slice_end;
1411
    s->last_slice_end= y;
1412 1413 1414
    y -= h;

    if (!s->flipped_image) {
1415
        y = s->avctx->height - y - h;
1416 1417
    }

1418
    cy = y >> s->chroma_y_shift;
1419 1420 1421
    offset[0] = s->current_frame.linesize[0]*y;
    offset[1] = s->current_frame.linesize[1]*cy;
    offset[2] = s->current_frame.linesize[2]*cy;
1422 1423
    for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
        offset[i] = 0;
1424 1425 1426 1427 1428

    emms_c();
    s->avctx->draw_horiz_band(s->avctx, &s->current_frame, offset, y, 3, h);
}

1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450
/**
 * Wait for the reference frame of the current fragment.
 * The progress value is in luma pixel rows.
 */
static void await_reference_row(Vp3DecodeContext *s, Vp3Fragment *fragment, int motion_y, int y)
{
    AVFrame *ref_frame;
    int ref_row;
    int border = motion_y&1;

    if (fragment->coding_method == MODE_USING_GOLDEN ||
        fragment->coding_method == MODE_GOLDEN_MV)
        ref_frame = &s->golden_frame;
    else
        ref_frame = &s->last_frame;

    ref_row = y + (motion_y>>1);
    ref_row = FFMAX(FFABS(ref_row), ref_row + 8 + border);

    ff_thread_await_progress(ref_frame, ref_row, 0);
}

1451 1452
/*
 * Perform the final rendering for a particular slice of data.
1453
 * The slice number ranges from 0..(c_superblock_height - 1).
1454 1455 1456
 */
static void render_slice(Vp3DecodeContext *s, int slice)
{
1457
    int x, y, i, j, fragment;
1458
    LOCAL_ALIGNED_16(DCTELEM, block, [64]);
1459 1460 1461
    int motion_x = 0xdeadbeef, motion_y = 0xdeadbeef;
    int motion_halfpel_index;
    uint8_t *motion_source;
1462
    int plane, first_pixel;
1463

1464
    if (slice >= s->c_superblock_height)
1465 1466 1467
        return;

    for (plane = 0; plane < 3; plane++) {
1468 1469 1470
        uint8_t *output_plane = s->current_frame.data    [plane] + s->data_offset[plane];
        uint8_t *  last_plane = s->   last_frame.data    [plane] + s->data_offset[plane];
        uint8_t *golden_plane = s-> golden_frame.data    [plane] + s->data_offset[plane];
Michael Niedermayer's avatar
Michael Niedermayer committed
1471
        int stride            = s->current_frame.linesize[plane];
1472 1473
        int plane_width       = s->width  >> (plane && s->chroma_x_shift);
        int plane_height      = s->height >> (plane && s->chroma_y_shift);
1474
        int8_t (*motion_val)[2] = s->motion_val[!!plane];
1475

1476 1477
        int sb_x, sb_y        = slice << (!plane && s->chroma_y_shift);
        int slice_height      = sb_y + 1 + (!plane && s->chroma_y_shift);
1478 1479
        int slice_width       = plane ? s->c_superblock_width : s->y_superblock_width;

1480 1481
        int fragment_width    = s->fragment_width[!!plane];
        int fragment_height   = s->fragment_height[!!plane];
1482
        int fragment_start    = s->fragment_start[plane];
1483
        int do_await          = !plane && HAVE_THREADS && (s->avctx->active_thread_type&FF_THREAD_FRAME);
Michael Niedermayer's avatar
Michael Niedermayer committed
1484 1485

        if (!s->flipped_image) stride = -stride;
1486 1487
        if (CONFIG_GRAY && plane && (s->avctx->flags & CODEC_FLAG_GRAY))
            continue;
1488

1489 1490
        /* for each superblock row in the slice (both of them)... */
        for (; sb_y < slice_height; sb_y++) {
1491

1492 1493
            /* for each superblock in a row... */
            for (sb_x = 0; sb_x < slice_width; sb_x++) {
1494

1495 1496 1497 1498
                /* for each block in a superblock... */
                for (j = 0; j < 16; j++) {
                    x = 4*sb_x + hilbert_offset[j][0];
                    y = 4*sb_y + hilbert_offset[j][1];
1499
                    fragment = y*fragment_width + x;
1500

1501
                    i = fragment_start + fragment;
1502 1503 1504 1505 1506 1507

                    // bounds check
                    if (x >= fragment_width || y >= fragment_height)
                        continue;

                first_pixel = 8*y*stride + 8*x;
1508

1509 1510 1511
                if (do_await && s->all_fragments[i].coding_method != MODE_INTRA)
                    await_reference_row(s, &s->all_fragments[i], motion_val[fragment][1], (16*y) >> s->chroma_y_shift);

1512
                /* transform if this block was coded */
1513
                if (s->all_fragments[i].coding_method != MODE_COPY) {
1514 1515 1516
                    if ((s->all_fragments[i].coding_method == MODE_USING_GOLDEN) ||
                        (s->all_fragments[i].coding_method == MODE_GOLDEN_MV))
                        motion_source= golden_plane;
1517
                    else
1518 1519
                        motion_source= last_plane;

1520
                    motion_source += first_pixel;
1521 1522 1523 1524 1525 1526 1527
                    motion_halfpel_index = 0;

                    /* sort out the motion vector if this fragment is coded
                     * using a motion vector method */
                    if ((s->all_fragments[i].coding_method > MODE_INTRA) &&
                        (s->all_fragments[i].coding_method != MODE_USING_GOLDEN)) {
                        int src_x, src_y;
1528 1529
                        motion_x = motion_val[fragment][0];
                        motion_y = motion_val[fragment][1];
1530

1531 1532
                        src_x= (motion_x>>1) + 8*x;
                        src_y= (motion_y>>1) + 8*y;
1533 1534 1535 1536 1537 1538 1539 1540 1541

                        motion_halfpel_index = motion_x & 0x01;
                        motion_source += (motion_x >> 1);

                        motion_halfpel_index |= (motion_y & 0x01) << 1;
                        motion_source += ((motion_y >> 1) * stride);

                        if(src_x<0 || src_y<0 || src_x + 9 >= plane_width || src_y + 9 >= plane_height){
                            uint8_t *temp= s->edge_emu_buffer;
David Conrad's avatar
David Conrad committed
1542
                            if(stride<0) temp -= 8*stride;
1543

1544
                            s->dsp.emulated_edge_mc(temp, motion_source, stride, 9, 9, src_x, src_y, plane_width, plane_height);
1545 1546 1547
                            motion_source= temp;
                        }
                    }
1548

1549 1550 1551 1552

                    /* first, take care of copying a block from either the
                     * previous or the golden frame */
                    if (s->all_fragments[i].coding_method != MODE_INTRA) {
1553 1554 1555
                        /* Note, it is possible to implement all MC cases with
                           put_no_rnd_pixels_l2 which would look more like the
                           VP3 source but this would be slower as
1556 1557 1558
                           put_no_rnd_pixels_tab is better optimzed */
                        if(motion_halfpel_index != 3){
                            s->dsp.put_no_rnd_pixels_tab[1][motion_halfpel_index](
1559
                                output_plane + first_pixel,
1560 1561 1562 1563
                                motion_source, stride, 8);
                        }else{
                            int d= (motion_x ^ motion_y)>>31; // d is 0 if motion_x and _y have the same sign, else -1
                            s->dsp.put_no_rnd_pixels_l2[1](
1564
                                output_plane + first_pixel,
1565 1566
                                motion_source - d,
                                motion_source + stride + 1 + d,
1567 1568 1569 1570
                                stride, 8);
                        }
                    }

Loren Merritt's avatar
Loren Merritt committed
1571
                        s->dsp.clear_block(block);
1572 1573

                    /* invert DCT and place (or add) in final output */
1574

1575
                    if (s->all_fragments[i].coding_method == MODE_INTRA) {
1576
                        vp3_dequant(s, s->all_fragments + i, plane, 0, block);
1577 1578 1579
                        if(s->avctx->idct_algo!=FF_IDCT_VP3)
                            block[0] += 128<<3;
                        s->dsp.idct_put(
1580
                            output_plane + first_pixel,
1581 1582 1583
                            stride,
                            block);
                    } else {
1584
                        if (vp3_dequant(s, s->all_fragments + i, plane, 1, block)) {
1585
                        s->dsp.idct_add(
1586
                            output_plane + first_pixel,
1587 1588
                            stride,
                            block);
David Conrad's avatar
David Conrad committed
1589 1590 1591
                        } else {
                            s->dsp.vp3_idct_dc_add(output_plane + first_pixel, stride, block);
                        }
1592 1593 1594 1595 1596
                    }
                } else {

                    /* copy directly from the previous frame */
                    s->dsp.put_pixels_tab[1][0](
1597 1598
                        output_plane + first_pixel,
                        last_plane + first_pixel,
1599 1600 1601
                        stride, 8);

                }
1602
                }
1603
            }
1604 1605

            // Filter up to the last row in the superblock row
1606 1607
            if (!s->skip_loop_filter)
                apply_loop_filter(s, plane, 4*sb_y - !!sb_y, FFMIN(4*sb_y+3, fragment_height-1));
1608 1609 1610 1611 1612 1613
        }
    }

     /* this looks like a good place for slice dispatch... */
     /* algorithm:
      *   if (slice == s->macroblock_height - 1)
1614 1615 1616
      *     dispatch (both last slice & 2nd-to-last slice);
      *   else if (slice > 0)
      *     dispatch (slice - 1);
1617 1618
      */

1619
    vp3_draw_horiz_band(s, FFMIN((32 << s->chroma_y_shift) * (slice + 1) -16, s->height-16));
1620 1621
}

1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653
/// Allocate tables for per-frame data in Vp3DecodeContext
static av_cold int allocate_tables(AVCodecContext *avctx)
{
    Vp3DecodeContext *s = avctx->priv_data;
    int y_fragment_count, c_fragment_count;

    y_fragment_count = s->fragment_width[0] * s->fragment_height[0];
    c_fragment_count = s->fragment_width[1] * s->fragment_height[1];

    s->superblock_coding = av_malloc(s->superblock_count);
    s->all_fragments = av_malloc(s->fragment_count * sizeof(Vp3Fragment));
    s->coded_fragment_list[0] = av_malloc(s->fragment_count * sizeof(int));
    s->dct_tokens_base = av_malloc(64*s->fragment_count * sizeof(*s->dct_tokens_base));
    s->motion_val[0] = av_malloc(y_fragment_count * sizeof(*s->motion_val[0]));
    s->motion_val[1] = av_malloc(c_fragment_count * sizeof(*s->motion_val[1]));

    /* work out the block mapping tables */
    s->superblock_fragments = av_malloc(s->superblock_count * 16 * sizeof(int));
    s->macroblock_coding = av_malloc(s->macroblock_count + 1);

    if (!s->superblock_coding || !s->all_fragments || !s->dct_tokens_base ||
        !s->coded_fragment_list[0] || !s->superblock_fragments || !s->macroblock_coding ||
        !s->motion_val[0] || !s->motion_val[1]) {
        vp3_decode_end(avctx);
        return -1;
    }

    init_block_mapping(s);

    return 0;
}

1654
static av_cold int vp3_decode_init(AVCodecContext *avctx)
1655 1656
{
    Vp3DecodeContext *s = avctx->priv_data;
1657
    int i, inter, plane;
1658 1659
    int c_width;
    int c_height;
1660
    int y_fragment_count, c_fragment_count;
1661

1662
    if (avctx->codec_tag == MKTAG('V','P','3','0'))
1663
        s->version = 0;
1664
    else
1665
        s->version = 1;
1666

1667
    s->avctx = avctx;
1668 1669
    s->width = FFALIGN(avctx->width, 16);
    s->height = FFALIGN(avctx->height, 16);
1670
    if (avctx->codec_id != CODEC_ID_THEORA)
1671
        avctx->pix_fmt = PIX_FMT_YUV420P;
1672
    avctx->chroma_sample_location = AVCHROMA_LOC_CENTER;
1673 1674
    if(avctx->idct_algo==FF_IDCT_AUTO)
        avctx->idct_algo=FF_IDCT_VP3;
1675
    ff_dsputil_init(&s->dsp, avctx);
1676

1677
    ff_init_scantable(s->dsp.idct_permutation, &s->scantable, ff_zigzag_direct);
1678 1679 1680

    /* initialize to an impossible value which will force a recalculation
     * in the first frame decode */
1681 1682
    for (i = 0; i < 3; i++)
        s->qps[i] = -1;
1683

1684 1685
    avcodec_get_chroma_sub_sample(avctx->pix_fmt, &s->chroma_x_shift, &s->chroma_y_shift);

1686 1687
    s->y_superblock_width = (s->width + 31) / 32;
    s->y_superblock_height = (s->height + 31) / 32;
1688
    s->y_superblock_count = s->y_superblock_width * s->y_superblock_height;
1689 1690

    /* work out the dimensions for the C planes */
1691 1692
    c_width = s->width >> s->chroma_x_shift;
    c_height = s->height >> s->chroma_y_shift;
1693 1694
    s->c_superblock_width = (c_width + 31) / 32;
    s->c_superblock_height = (c_height + 31) / 32;
1695
    s->c_superblock_count = s->c_superblock_width * s->c_superblock_height;
1696

1697 1698 1699
    s->superblock_count = s->y_superblock_count + (s->c_superblock_count * 2);
    s->u_superblock_start = s->y_superblock_count;
    s->v_superblock_start = s->u_superblock_start + s->c_superblock_count;
1700 1701 1702 1703 1704

    s->macroblock_width = (s->width + 15) / 16;
    s->macroblock_height = (s->height + 15) / 16;
    s->macroblock_count = s->macroblock_width * s->macroblock_height;

1705 1706
    s->fragment_width[0] = s->width / FRAGMENT_PIXELS;
    s->fragment_height[0] = s->height / FRAGMENT_PIXELS;
1707 1708
    s->fragment_width[1]  = s->fragment_width[0]  >> s->chroma_x_shift;
    s->fragment_height[1] = s->fragment_height[0] >> s->chroma_y_shift;
1709 1710

    /* fragment count covers all 8x8 blocks for all 3 planes */
1711 1712 1713 1714 1715
    y_fragment_count     = s->fragment_width[0] * s->fragment_height[0];
    c_fragment_count     = s->fragment_width[1] * s->fragment_height[1];
    s->fragment_count    = y_fragment_count + 2*c_fragment_count;
    s->fragment_start[1] = y_fragment_count;
    s->fragment_start[2] = y_fragment_count + c_fragment_count;
1716

1717 1718
    if (!s->theora_tables)
    {
Michael Niedermayer's avatar
Michael Niedermayer committed
1719
        for (i = 0; i < 64; i++) {
1720 1721
            s->coded_dc_scale_factor[i] = vp31_dc_scale_factor[i];
            s->coded_ac_scale_factor[i] = vp31_ac_scale_factor[i];
1722 1723 1724
            s->base_matrix[0][i] = vp31_intra_y_dequant[i];
            s->base_matrix[1][i] = vp31_intra_c_dequant[i];
            s->base_matrix[2][i] = vp31_inter_dequant[i];
1725
            s->filter_limit_values[i] = vp31_filter_limit_values[i];
Michael Niedermayer's avatar
Michael Niedermayer committed
1726
        }
1727

1728 1729 1730 1731 1732 1733 1734 1735 1736
        for(inter=0; inter<2; inter++){
            for(plane=0; plane<3; plane++){
                s->qr_count[inter][plane]= 1;
                s->qr_size [inter][plane][0]= 63;
                s->qr_base [inter][plane][0]=
                s->qr_base [inter][plane][1]= 2*inter + (!!plane)*!inter;
            }
        }

1737 1738 1739 1740
        /* init VLC tables */
        for (i = 0; i < 16; i++) {

            /* DC histograms */
1741
            init_vlc(&s->dc_vlc[i], 11, 32,
1742 1743 1744 1745
                &dc_bias[i][0][1], 4, 2,
                &dc_bias[i][0][0], 4, 2, 0);

            /* group 1 AC histograms */
1746
            init_vlc(&s->ac_vlc_1[i], 11, 32,
1747 1748 1749 1750
                &ac_bias_0[i][0][1], 4, 2,
                &ac_bias_0[i][0][0], 4, 2, 0);

            /* group 2 AC histograms */
1751
            init_vlc(&s->ac_vlc_2[i], 11, 32,
1752 1753 1754 1755
                &ac_bias_1[i][0][1], 4, 2,
                &ac_bias_1[i][0][0], 4, 2, 0);

            /* group 3 AC histograms */
1756
            init_vlc(&s->ac_vlc_3[i], 11, 32,
1757 1758 1759 1760
                &ac_bias_2[i][0][1], 4, 2,
                &ac_bias_2[i][0][0], 4, 2, 0);

            /* group 4 AC histograms */
1761
            init_vlc(&s->ac_vlc_4[i], 11, 32,
1762 1763 1764 1765 1766
                &ac_bias_3[i][0][1], 4, 2,
                &ac_bias_3[i][0][0], 4, 2, 0);
        }
    } else {

1767
        for (i = 0; i < 16; i++) {
1768
            /* DC histograms */
1769 1770 1771
            if (init_vlc(&s->dc_vlc[i], 11, 32,
                &s->huffman_table[i][0][1], 8, 4,
                &s->huffman_table[i][0][0], 8, 4, 0) < 0)
1772
                goto vlc_fail;
1773 1774

            /* group 1 AC histograms */
1775 1776 1777
            if (init_vlc(&s->ac_vlc_1[i], 11, 32,
                &s->huffman_table[i+16][0][1], 8, 4,
                &s->huffman_table[i+16][0][0], 8, 4, 0) < 0)
1778
                goto vlc_fail;
1779 1780

            /* group 2 AC histograms */
1781 1782 1783
            if (init_vlc(&s->ac_vlc_2[i], 11, 32,
                &s->huffman_table[i+16*2][0][1], 8, 4,
                &s->huffman_table[i+16*2][0][0], 8, 4, 0) < 0)
1784
                goto vlc_fail;
1785 1786

            /* group 3 AC histograms */
1787 1788 1789
            if (init_vlc(&s->ac_vlc_3[i], 11, 32,
                &s->huffman_table[i+16*3][0][1], 8, 4,
                &s->huffman_table[i+16*3][0][0], 8, 4, 0) < 0)
1790
                goto vlc_fail;
1791 1792

            /* group 4 AC histograms */
1793 1794 1795
            if (init_vlc(&s->ac_vlc_4[i], 11, 32,
                &s->huffman_table[i+16*4][0][1], 8, 4,
                &s->huffman_table[i+16*4][0][0], 8, 4, 0) < 0)
1796
                goto vlc_fail;
1797
        }
1798 1799
    }

1800 1801 1802 1803
    init_vlc(&s->superblock_run_length_vlc, 6, 34,
        &superblock_run_length_vlc_table[0][1], 4, 2,
        &superblock_run_length_vlc_table[0][0], 4, 2, 0);

1804
    init_vlc(&s->fragment_run_length_vlc, 5, 30,
1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815
        &fragment_run_length_vlc_table[0][1], 4, 2,
        &fragment_run_length_vlc_table[0][0], 4, 2, 0);

    init_vlc(&s->mode_code_vlc, 3, 8,
        &mode_code_vlc_table[0][1], 2, 1,
        &mode_code_vlc_table[0][0], 2, 1, 0);

    init_vlc(&s->motion_vector_vlc, 6, 63,
        &motion_vector_vlc_table[0][1], 2, 1,
        &motion_vector_vlc_table[0][0], 2, 1, 0);

1816 1817 1818 1819
    for (i = 0; i < 3; i++) {
        s->current_frame.data[i] = NULL;
        s->last_frame.data[i] = NULL;
        s->golden_frame.data[i] = NULL;
1820 1821
    }

1822
    return allocate_tables(avctx);
1823 1824 1825 1826

vlc_fail:
    av_log(avctx, AV_LOG_FATAL, "Invalid huffman table\n");
    return -1;
1827 1828
}

1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856
/// Release and shuffle frames after decode finishes
static void update_frames(AVCodecContext *avctx)
{
    Vp3DecodeContext *s = avctx->priv_data;

    /* release the last frame, if it is allocated and if it is not the
     * golden frame */
    if (s->last_frame.data[0] && s->last_frame.type != FF_BUFFER_TYPE_COPY)
        ff_thread_release_buffer(avctx, &s->last_frame);

    /* shuffle frames (last = current) */
    s->last_frame= s->current_frame;

    if (s->keyframe) {
        if (s->golden_frame.data[0])
            ff_thread_release_buffer(avctx, &s->golden_frame);
        s->golden_frame = s->current_frame;
        s->last_frame.type = FF_BUFFER_TYPE_COPY;
    }

    s->current_frame.data[0]= NULL; /* ensure that we catch any access to this released frame */
}

static int vp3_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
{
    Vp3DecodeContext *s = dst->priv_data, *s1 = src->priv_data;
    int qps_changed = 0, i, err;

1857 1858
#define copy_fields(to, from, start_field, end_field) memcpy(&to->start_field, &from->start_field, (char*)&to->end_field - (char*)&to->start_field)

1859 1860
    if (!s1->current_frame.data[0]
        ||s->width != s1->width
1861 1862
        ||s->height!= s1->height) {
        if (s != s1)
1863
            copy_fields(s, s1, golden_frame, keyframe);
1864
        return -1;
1865
    }
1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891

    if (s != s1) {
        // init tables if the first frame hasn't been decoded
        if (!s->current_frame.data[0]) {
            int y_fragment_count, c_fragment_count;
            s->avctx = dst;
            err = allocate_tables(dst);
            if (err)
                return err;
            y_fragment_count = s->fragment_width[0] * s->fragment_height[0];
            c_fragment_count = s->fragment_width[1] * s->fragment_height[1];
            memcpy(s->motion_val[0], s1->motion_val[0], y_fragment_count * sizeof(*s->motion_val[0]));
            memcpy(s->motion_val[1], s1->motion_val[1], c_fragment_count * sizeof(*s->motion_val[1]));
        }

        // copy previous frame data
        copy_fields(s, s1, golden_frame, dsp);

        // copy qscale data if necessary
        for (i = 0; i < 3; i++) {
            if (s->qps[i] != s1->qps[1]) {
                qps_changed = 1;
                memcpy(&s->qmat[i], &s1->qmat[i], sizeof(s->qmat[i]));
            }
        }

1892
        if (s->qps[0] != s1->qps[0])
1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904
            memcpy(&s->bounding_values_array, &s1->bounding_values_array, sizeof(s->bounding_values_array));

        if (qps_changed)
            copy_fields(s, s1, qps, superblock_count);
#undef copy_fields
    }

    update_frames(dst);

    return 0;
}

1905
static int vp3_decode_frame(AVCodecContext *avctx,
1906
                            void *data, int *data_size,
1907
                            AVPacket *avpkt)
1908
{
1909 1910
    const uint8_t *buf = avpkt->data;
    int buf_size = avpkt->size;
1911 1912
    Vp3DecodeContext *s = avctx->priv_data;
    GetBitContext gb;
1913
    int i;
1914 1915

    init_get_bits(&gb, buf, buf_size * 8);
1916

1917 1918
    if (s->theora && get_bits1(&gb))
    {
1919 1920
        av_log(avctx, AV_LOG_ERROR, "Header packet passed to frame decoder, skipping\n");
        return -1;
1921
    }
1922 1923 1924

    s->keyframe = !get_bits1(&gb);
    if (!s->theora)
1925
        skip_bits(&gb, 1);
1926 1927
    for (i = 0; i < 3; i++)
        s->last_qps[i] = s->qps[i];
1928

1929
    s->nqps=0;
1930
    do{
1931 1932 1933 1934
        s->qps[s->nqps++]= get_bits(&gb, 6);
    } while(s->theora >= 0x030200 && s->nqps<3 && get_bits1(&gb));
    for (i = s->nqps; i < 3; i++)
        s->qps[i] = -1;
1935

1936
    if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1937
        av_log(s->avctx, AV_LOG_INFO, " VP3 %sframe #%d: Q index = %d\n",
1938
            s->keyframe?"key":"", avctx->frame_number+1, s->qps[0]);
1939

1940 1941 1942
    s->skip_loop_filter = !s->filter_limit_values[s->qps[0]] ||
        avctx->skip_loop_filter >= (s->keyframe ? AVDISCARD_ALL : AVDISCARD_NONKEY);

1943
    if (s->qps[0] != s->last_qps[0])
1944
        init_loop_filter(s);
1945 1946 1947 1948 1949 1950

    for (i = 0; i < s->nqps; i++)
        // reinit all dequantizers if the first one changed, because
        // the DC of the first quantizer must be used for all matrices
        if (s->qps[i] != s->last_qps[i] || s->qps[0] != s->last_qps[0])
            init_dequantizer(s, i);
1951

1952 1953 1954
    if (avctx->skip_frame >= AVDISCARD_NONKEY && !s->keyframe)
        return buf_size;

1955
    s->current_frame.reference = 3;
1956
    s->current_frame.pict_type = s->keyframe ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
1957
    s->current_frame.key_frame = s->keyframe;
1958
    if (ff_thread_get_buffer(avctx, &s->current_frame) < 0) {
1959
        av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed\n");
1960
        goto error;
1961 1962
    }

1963 1964 1965
    if (!s->edge_emu_buffer)
        s->edge_emu_buffer = av_malloc(9*FFABS(s->current_frame.linesize[0]));

1966
    if (s->keyframe) {
1967 1968 1969 1970 1971 1972 1973
        if (!s->theora)
        {
            skip_bits(&gb, 4); /* width code */
            skip_bits(&gb, 4); /* height code */
            if (s->version)
            {
                s->version = get_bits(&gb, 5);
1974
                if (avctx->frame_number == 0)
1975 1976 1977 1978 1979 1980 1981 1982 1983
                    av_log(s->avctx, AV_LOG_DEBUG, "VP version: %d\n", s->version);
            }
        }
        if (s->version || s->theora)
        {
                if (get_bits1(&gb))
                    av_log(s->avctx, AV_LOG_ERROR, "Warning, unsupported keyframe coding type?!\n");
            skip_bits(&gb, 2); /* reserved? */
        }
1984
    } else {
1985
        if (!s->golden_frame.data[0]) {
1986
            av_log(s->avctx, AV_LOG_WARNING, "vp3: first frame not a keyframe\n");
David Conrad's avatar
David Conrad committed
1987

1988
            s->golden_frame.reference = 3;
1989
            s->golden_frame.pict_type = AV_PICTURE_TYPE_I;
1990
            if (ff_thread_get_buffer(avctx, &s->golden_frame) < 0) {
1991 1992 1993 1994 1995
                av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed\n");
                goto error;
            }
            s->last_frame = s->golden_frame;
            s->last_frame.type = FF_BUFFER_TYPE_COPY;
1996
            ff_thread_report_progress(&s->last_frame, INT_MAX, 0);
1997 1998 1999
        }
    }

2000
    memset(s->all_fragments, 0, s->fragment_count * sizeof(Vp3Fragment));
2001
    ff_thread_finish_setup(avctx);
2002

2003 2004
    if (unpack_superblocks(s, &gb)){
        av_log(s->avctx, AV_LOG_ERROR, "error in unpack_superblocks\n");
2005
        goto error;
2006 2007 2008
    }
    if (unpack_modes(s, &gb)){
        av_log(s->avctx, AV_LOG_ERROR, "error in unpack_modes\n");
2009
        goto error;
2010 2011 2012
    }
    if (unpack_vectors(s, &gb)){
        av_log(s->avctx, AV_LOG_ERROR, "error in unpack_vectors\n");
2013
        goto error;
2014
    }
2015 2016
    if (unpack_block_qpis(s, &gb)){
        av_log(s->avctx, AV_LOG_ERROR, "error in unpack_block_qpis\n");
2017
        goto error;
2018
    }
2019 2020
    if (unpack_dct_coeffs(s, &gb)){
        av_log(s->avctx, AV_LOG_ERROR, "error in unpack_dct_coeffs\n");
2021
        goto error;
2022
    }
2023 2024

    for (i = 0; i < 3; i++) {
2025
        int height = s->height >> (i && s->chroma_y_shift);
2026 2027 2028
        if (s->flipped_image)
            s->data_offset[i] = 0;
        else
2029
            s->data_offset[i] = (height-1) * s->current_frame.linesize[i];
2030
    }
2031

2032
    s->last_slice_end = 0;
2033
    for (i = 0; i < s->c_superblock_height; i++)
2034
        render_slice(s, i);
2035

2036 2037
    // filter the last row
    for (i = 0; i < 3; i++) {
2038
        int row = (s->height >> (3+(i && s->chroma_y_shift))) - 1;
2039 2040
        apply_loop_filter(s, i, row, row+1);
    }
2041
    vp3_draw_horiz_band(s, s->avctx->height);
2042

2043 2044 2045
    *data_size=sizeof(AVFrame);
    *(AVFrame*)data= s->current_frame;

2046
    if (!HAVE_THREADS || !(s->avctx->active_thread_type&FF_THREAD_FRAME))
2047
        update_frames(avctx);
2048 2049

    return buf_size;
2050 2051

error:
2052 2053
    ff_thread_report_progress(&s->current_frame, INT_MAX, 0);

2054
    if (!HAVE_THREADS || !(s->avctx->active_thread_type&FF_THREAD_FRAME))
2055
        avctx->release_buffer(avctx, &s->current_frame);
2056

2057
    return -1;
2058 2059
}

2060 2061 2062 2063
static int read_huffman_tree(AVCodecContext *avctx, GetBitContext *gb)
{
    Vp3DecodeContext *s = avctx->priv_data;

2064
    if (get_bits1(gb)) {
2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082
        int token;
        if (s->entries >= 32) { /* overflow */
            av_log(avctx, AV_LOG_ERROR, "huffman tree overflow\n");
            return -1;
        }
        token = get_bits(gb, 5);
        //av_log(avctx, AV_LOG_DEBUG, "hti %d hbits %x token %d entry : %d size %d\n", s->hti, s->hbits, token, s->entries, s->huff_code_size);
        s->huffman_table[s->hti][token][0] = s->hbits;
        s->huffman_table[s->hti][token][1] = s->huff_code_size;
        s->entries++;
    }
    else {
        if (s->huff_code_size >= 32) {/* overflow */
            av_log(avctx, AV_LOG_ERROR, "huffman tree overflow\n");
            return -1;
        }
        s->huff_code_size++;
        s->hbits <<= 1;
2083 2084
        if (read_huffman_tree(avctx, gb))
            return -1;
2085
        s->hbits |= 1;
2086 2087
        if (read_huffman_tree(avctx, gb))
            return -1;
2088 2089 2090 2091 2092 2093
        s->hbits >>= 1;
        s->huff_code_size--;
    }
    return 0;
}

2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110
static int vp3_init_thread_copy(AVCodecContext *avctx)
{
    Vp3DecodeContext *s = avctx->priv_data;

    s->superblock_coding      = NULL;
    s->all_fragments          = NULL;
    s->coded_fragment_list[0] = NULL;
    s->dct_tokens_base        = NULL;
    s->superblock_fragments   = NULL;
    s->macroblock_coding      = NULL;
    s->motion_val[0]          = NULL;
    s->motion_val[1]          = NULL;
    s->edge_emu_buffer        = NULL;

    return 0;
}

2111
#if CONFIG_THEORA_DECODER
2112 2113 2114 2115
static const enum PixelFormat theora_pix_fmts[4] = {
    PIX_FMT_YUV420P, PIX_FMT_NONE, PIX_FMT_YUV422P, PIX_FMT_YUV444P
};

2116
static int theora_decode_header(AVCodecContext *avctx, GetBitContext *gb)
2117 2118
{
    Vp3DecodeContext *s = avctx->priv_data;
2119
    int visible_width, visible_height, colorspace;
2120
    int offset_x = 0, offset_y = 0;
2121
    AVRational fps, aspect;
2122

2123
    s->theora = get_bits_long(gb, 24);
2124
    av_log(avctx, AV_LOG_DEBUG, "Theora bitstream version %X\n", s->theora);
2125

2126
    /* 3.2.0 aka alpha3 has the same frame orientation as original vp3 */
2127
    /* but previous versions have the image flipped relative to vp3 */
2128
    if (s->theora < 0x030200)
2129
    {
2130
        s->flipped_image = 1;
2131 2132
        av_log(avctx, AV_LOG_DEBUG, "Old (<alpha3) Theora bitstream, flipped image\n");
    }
2133

2134 2135
    visible_width  = s->width  = get_bits(gb, 16) << 4;
    visible_height = s->height = get_bits(gb, 16) << 4;
2136

2137
    if(av_image_check_size(s->width, s->height, 0, avctx)){
2138
        av_log(avctx, AV_LOG_ERROR, "Invalid dimensions (%dx%d)\n", s->width, s->height);
2139 2140 2141
        s->width= s->height= 0;
        return -1;
    }
2142

2143
    if (s->theora >= 0x030200) {
David Conrad's avatar
David Conrad committed
2144 2145
        visible_width  = get_bits_long(gb, 24);
        visible_height = get_bits_long(gb, 24);
2146

2147 2148
        offset_x = get_bits(gb, 8); /* offset x */
        offset_y = get_bits(gb, 8); /* offset y, from bottom */
2149
    }
2150

2151 2152 2153
    fps.num = get_bits_long(gb, 32);
    fps.den = get_bits_long(gb, 32);
    if (fps.num && fps.den) {
2154 2155
        av_reduce(&avctx->time_base.num, &avctx->time_base.den,
                  fps.den, fps.num, 1<<30);
2156 2157
    }

2158 2159 2160 2161 2162 2163 2164
    aspect.num = get_bits_long(gb, 24);
    aspect.den = get_bits_long(gb, 24);
    if (aspect.num && aspect.den) {
        av_reduce(&avctx->sample_aspect_ratio.num,
                  &avctx->sample_aspect_ratio.den,
                  aspect.num, aspect.den, 1<<30);
    }
2165

2166
    if (s->theora < 0x030200)
2167
        skip_bits(gb, 5); /* keyframe frequency force */
2168
    colorspace = get_bits(gb, 8);
2169
    skip_bits(gb, 24); /* bitrate */
2170

2171
    skip_bits(gb, 6); /* quality hint */
2172

2173
    if (s->theora >= 0x030200)
2174
    {
2175
        skip_bits(gb, 5); /* keyframe frequency force */
2176
        avctx->pix_fmt = theora_pix_fmts[get_bits(gb, 2)];
2177
        skip_bits(gb, 3); /* reserved */
2178
    }
2179

2180
//    align_get_bits(gb);
2181

2182
    if (   visible_width  <= s->width  && visible_width  > s->width-16
2183 2184
        && visible_height <= s->height && visible_height > s->height-16
        && !offset_x && (offset_y == s->height - visible_height))
2185 2186 2187
        avcodec_set_dimensions(avctx, visible_width, visible_height);
    else
        avcodec_set_dimensions(avctx, s->width, s->height);
2188

2189 2190 2191 2192 2193 2194 2195 2196 2197 2198
    if (colorspace == 1) {
        avctx->color_primaries = AVCOL_PRI_BT470M;
    } else if (colorspace == 2) {
        avctx->color_primaries = AVCOL_PRI_BT470BG;
    }
    if (colorspace == 1 || colorspace == 2) {
        avctx->colorspace = AVCOL_SPC_BT470BG;
        avctx->color_trc  = AVCOL_TRC_BT709;
    }

2199 2200 2201
    return 0;
}

2202
static int theora_decode_tables(AVCodecContext *avctx, GetBitContext *gb)
2203 2204
{
    Vp3DecodeContext *s = avctx->priv_data;
2205
    int i, n, matrices, inter, plane;
2206 2207

    if (s->theora >= 0x030200) {
2208
        n = get_bits(gb, 3);
2209
        /* loop filter limit values table */
2210 2211
        if (n)
            for (i = 0; i < 64; i++)
2212
                s->filter_limit_values[i] = get_bits(gb, n);
2213
    }
2214

2215
    if (s->theora >= 0x030200)
2216
        n = get_bits(gb, 4) + 1;
2217 2218
    else
        n = 16;
2219 2220
    /* quality threshold table */
    for (i = 0; i < 64; i++)
2221
        s->coded_ac_scale_factor[i] = get_bits(gb, n);
2222

2223
    if (s->theora >= 0x030200)
2224
        n = get_bits(gb, 4) + 1;
2225 2226
    else
        n = 16;
2227 2228
    /* dc scale factor table */
    for (i = 0; i < 64; i++)
2229
        s->coded_dc_scale_factor[i] = get_bits(gb, n);
2230

2231
    if (s->theora >= 0x030200)
2232
        matrices = get_bits(gb, 9) + 1;
2233
    else
2234
        matrices = 3;
2235

2236 2237 2238 2239
    if(matrices > 384){
        av_log(avctx, AV_LOG_ERROR, "invalid number of base matrixes\n");
        return -1;
    }
2240

2241
    for(n=0; n<matrices; n++){
2242
        for (i = 0; i < 64; i++)
2243 2244
            s->base_matrix[n][i]= get_bits(gb, 8);
    }
2245

2246 2247 2248 2249
    for (inter = 0; inter <= 1; inter++) {
        for (plane = 0; plane <= 2; plane++) {
            int newqr= 1;
            if (inter || plane > 0)
2250
                newqr = get_bits1(gb);
2251
            if (!newqr) {
2252
                int qtj, plj;
2253
                if(inter && get_bits1(gb)){
2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264
                    qtj = 0;
                    plj = plane;
                }else{
                    qtj= (3*inter + plane - 1) / 3;
                    plj= (plane + 2) % 3;
                }
                s->qr_count[inter][plane]= s->qr_count[qtj][plj];
                memcpy(s->qr_size[inter][plane], s->qr_size[qtj][plj], sizeof(s->qr_size[0][0]));
                memcpy(s->qr_base[inter][plane], s->qr_base[qtj][plj], sizeof(s->qr_base[0][0]));
            } else {
                int qri= 0;
2265
                int qi = 0;
2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278

                for(;;){
                    i= get_bits(gb, av_log2(matrices-1)+1);
                    if(i>= matrices){
                        av_log(avctx, AV_LOG_ERROR, "invalid base matrix index\n");
                        return -1;
                    }
                    s->qr_base[inter][plane][qri]= i;
                    if(qi >= 63)
                        break;
                    i = get_bits(gb, av_log2(63-qi)+1) + 1;
                    s->qr_size[inter][plane][qri++]= i;
                    qi += i;
2279
                }
2280

2281
                if (qi > 63) {
2282
                    av_log(avctx, AV_LOG_ERROR, "invalid qi %d > 63\n", qi);
2283 2284
                    return -1;
                }
2285
                s->qr_count[inter][plane]= qri;
2286 2287 2288 2289
            }
        }
    }

2290
    /* Huffman tables */
2291 2292 2293
    for (s->hti = 0; s->hti < 80; s->hti++) {
        s->entries = 0;
        s->huff_code_size = 1;
2294
        if (!get_bits1(gb)) {
2295
            s->hbits = 0;
2296 2297
            if(read_huffman_tree(avctx, gb))
                return -1;
2298
            s->hbits = 1;
2299 2300
            if(read_huffman_tree(avctx, gb))
                return -1;
2301 2302
        }
    }
2303

2304
    s->theora_tables = 1;
2305

2306 2307 2308
    return 0;
}

2309
static av_cold int theora_decode_init(AVCodecContext *avctx)
2310 2311 2312 2313
{
    Vp3DecodeContext *s = avctx->priv_data;
    GetBitContext gb;
    int ptype;
2314 2315 2316
    uint8_t *header_start[3];
    int header_len[3];
    int i;
2317

2318 2319
    avctx->pix_fmt = PIX_FMT_YUV420P;

2320 2321 2322
    s->theora = 1;

    if (!avctx->extradata_size)
2323 2324
    {
        av_log(avctx, AV_LOG_ERROR, "Missing extradata!\n");
2325
        return -1;
2326
    }
2327

2328
    if (avpriv_split_xiph_headers(avctx->extradata, avctx->extradata_size,
2329 2330 2331 2332
                              42, header_start, header_len) < 0) {
        av_log(avctx, AV_LOG_ERROR, "Corrupt extradata\n");
        return -1;
    }
2333

2334
  for(i=0;i<3;i++) {
2335
    init_get_bits(&gb, header_start[i], header_len[i] * 8);
2336 2337

    ptype = get_bits(&gb, 8);
2338

2339 2340 2341
     if (!(ptype & 0x80))
     {
        av_log(avctx, AV_LOG_ERROR, "Invalid extradata!\n");
2342
//        return -1;
2343
     }
2344

2345
    // FIXME: Check for this as well.
2346
    skip_bits_long(&gb, 6*8); /* "theora" */
2347

2348 2349 2350
    switch(ptype)
    {
        case 0x80:
2351
            theora_decode_header(avctx, &gb);
2352 2353
                break;
        case 0x81:
2354
// FIXME: is this needed? it breaks sometimes
2355 2356 2357
//            theora_decode_comments(avctx, gb);
            break;
        case 0x82:
2358 2359
            if (theora_decode_tables(avctx, &gb))
                return -1;
2360 2361 2362 2363
            break;
        default:
            av_log(avctx, AV_LOG_ERROR, "Unknown Theora config packet: %d\n", ptype&~0x80);
            break;
2364
    }
2365 2366
    if(ptype != 0x81 && 8*header_len[i] != get_bits_count(&gb))
        av_log(avctx, AV_LOG_WARNING, "%d bits left in packet %X\n", 8*header_len[i] - get_bits_count(&gb), ptype);
2367 2368
    if (s->theora < 0x030200)
        break;
2369
  }
2370

2371
    return vp3_decode_init(avctx);
2372 2373
}

2374
AVCodec ff_theora_decoder = {
2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385
    .name                  = "theora",
    .type                  = AVMEDIA_TYPE_VIDEO,
    .id                    = CODEC_ID_THEORA,
    .priv_data_size        = sizeof(Vp3DecodeContext),
    .init                  = theora_decode_init,
    .close                 = vp3_decode_end,
    .decode                = vp3_decode_frame,
    .capabilities          = CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND |
                             CODEC_CAP_FRAME_THREADS,
    .flush                 = vp3_decode_flush,
    .long_name             = NULL_IF_CONFIG_SMALL("Theora"),
2386
    .init_thread_copy      = ONLY_IF_THREADS_ENABLED(vp3_init_thread_copy),
2387
    .update_thread_context = ONLY_IF_THREADS_ENABLED(vp3_update_thread_context)
2388
};
2389
#endif
2390

2391
AVCodec ff_vp3_decoder = {
2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402
    .name                  = "vp3",
    .type                  = AVMEDIA_TYPE_VIDEO,
    .id                    = CODEC_ID_VP3,
    .priv_data_size        = sizeof(Vp3DecodeContext),
    .init                  = vp3_decode_init,
    .close                 = vp3_decode_end,
    .decode                = vp3_decode_frame,
    .capabilities          = CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND |
                             CODEC_CAP_FRAME_THREADS,
    .flush                 = vp3_decode_flush,
    .long_name             = NULL_IF_CONFIG_SMALL("On2 VP3"),
2403
    .init_thread_copy      = ONLY_IF_THREADS_ENABLED(vp3_init_thread_copy),
2404
    .update_thread_context = ONLY_IF_THREADS_ENABLED(vp3_update_thread_context),
2405
};