h264_direct.c 26.2 KB
Newer Older
1 2 3 4
/*
 * H.26L/H.264/AVC/JVT/14496-10/... direct mb/block decoding
 * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
 *
5
 * This file is part of Libav.
6
 *
7
 * Libav is free software; you can redistribute it and/or
8 9 10 11
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2.1 of the License, or (at your option) any later version.
 *
12
 * Libav is distributed in the hope that it will be useful,
13 14 15 16 17
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
18
 * License along with Libav; if not, write to the Free Software
19 20 21 22
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 */

/**
23
 * @file
24 25 26 27 28 29 30 31 32 33
 * H.264 / AVC / MPEG4 part10 direct mb/block decoding.
 * @author Michael Niedermayer <michaelni@gmx.at>
 */

#include "internal.h"
#include "dsputil.h"
#include "avcodec.h"
#include "mpegvideo.h"
#include "h264.h"
#include "rectangle.h"
34
#include "thread.h"
35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73

//#undef NDEBUG
#include <assert.h>


static int get_scale_factor(H264Context * const h, int poc, int poc1, int i){
    int poc0 = h->ref_list[0][i].poc;
    int td = av_clip(poc1 - poc0, -128, 127);
    if(td == 0 || h->ref_list[0][i].long_ref){
        return 256;
    }else{
        int tb = av_clip(poc - poc0, -128, 127);
        int tx = (16384 + (FFABS(td) >> 1)) / td;
        return av_clip((tb*tx + 32) >> 6, -1024, 1023);
    }
}

void ff_h264_direct_dist_scale_factor(H264Context * const h){
    MpegEncContext * const s = &h->s;
    const int poc = h->s.current_picture_ptr->field_poc[ s->picture_structure == PICT_BOTTOM_FIELD ];
    const int poc1 = h->ref_list[1][0].poc;
    int i, field;
    for(field=0; field<2; field++){
        const int poc  = h->s.current_picture_ptr->field_poc[field];
        const int poc1 = h->ref_list[1][0].field_poc[field];
        for(i=0; i < 2*h->ref_count[0]; i++)
            h->dist_scale_factor_field[field][i^field] = get_scale_factor(h, poc, poc1, i+16);
    }

    for(i=0; i<h->ref_count[0]; i++){
        h->dist_scale_factor[i] = get_scale_factor(h, poc, poc1, i);
    }
}

static void fill_colmap(H264Context *h, int map[2][16+32], int list, int field, int colfield, int mbafi){
    MpegEncContext * const s = &h->s;
    Picture * const ref1 = &h->ref_list[1][0];
    int j, old_ref, rfield;
    int start= mbafi ? 16                      : 0;
Michael Niedermayer's avatar
Michael Niedermayer committed
74
    int end  = mbafi ? 16+2*h->ref_count[0]    : h->ref_count[0];
75 76 77 78 79 80 81 82 83 84 85 86 87 88 89
    int interl= mbafi || s->picture_structure != PICT_FRAME;

    /* bogus; fills in for missing frames */
    memset(map[list], 0, sizeof(map[list]));

    for(rfield=0; rfield<2; rfield++){
        for(old_ref=0; old_ref<ref1->ref_count[colfield][list]; old_ref++){
            int poc = ref1->ref_poc[colfield][list][old_ref];

            if     (!interl)
                poc |= 3;
            else if( interl && (poc&3) == 3) //FIXME store all MBAFF references so this isnt needed
                poc= (poc&~3) + rfield + 1;

            for(j=start; j<end; j++){
90
                if (4 * h->ref_list[0][j].frame_num + (h->ref_list[0][j].f.reference & 3) == poc) {
91 92
                    int cur_ref= mbafi ? (j-16)^field : j;
                    map[list][2*old_ref + (rfield^field) + 16] = cur_ref;
Michael Niedermayer's avatar
Michael Niedermayer committed
93
                    if(rfield == field || !interl)
94 95 96 97 98 99 100 101 102 103 104 105 106 107
                        map[list][old_ref] = cur_ref;
                    break;
                }
            }
        }
    }
}

void ff_h264_direct_ref_list_init(H264Context * const h){
    MpegEncContext * const s = &h->s;
    Picture * const ref1 = &h->ref_list[1][0];
    Picture * const cur = s->current_picture_ptr;
    int list, j, field;
    int sidx= (s->picture_structure&1)^1;
108
    int ref1sidx = (ref1->f.reference&1)^1;
109 110 111 112

    for(list=0; list<2; list++){
        cur->ref_count[sidx][list] = h->ref_count[list];
        for(j=0; j<h->ref_count[list]; j++)
113
            cur->ref_poc[sidx][list][j] = 4 * h->ref_list[list][j].frame_num + (h->ref_list[list][j].f.reference & 3);
114 115 116 117 118 119 120 121 122
    }

    if(s->picture_structure == PICT_FRAME){
        memcpy(cur->ref_count[1], cur->ref_count[0], sizeof(cur->ref_count[0]));
        memcpy(cur->ref_poc  [1], cur->ref_poc  [0], sizeof(cur->ref_poc  [0]));
    }

    cur->mbaff= FRAME_MBAFF;

123
    h->col_fieldoff= 0;
124 125 126
    if(s->picture_structure == PICT_FRAME){
        int cur_poc = s->current_picture_ptr->poc;
        int *col_poc = h->ref_list[1]->field_poc;
127 128
        h->col_parity= (FFABS(col_poc[0] - cur_poc) >= FFABS(col_poc[1] - cur_poc));
        ref1sidx=sidx= h->col_parity;
129 130
    } else if (!(s->picture_structure & h->ref_list[1][0].f.reference) && !h->ref_list[1][0].mbaff) { // FL -> FL & differ parity
        h->col_fieldoff = 2 * h->ref_list[1][0].f.reference - 3;
131 132
    }

133
    if (cur->f.pict_type != AV_PICTURE_TYPE_B || h->direct_spatial_mv_pred)
134 135
        return;

136 137
    for(list=0; list<2; list++){
        fill_colmap(h, h->map_col_to_list0, list, sidx, ref1sidx, 0);
138
        if(FRAME_MBAFF)
139 140 141 142 143
        for(field=0; field<2; field++)
            fill_colmap(h, h->map_col_to_list0_field[field], list, field, field, 1);
    }
}

144 145
static void await_reference_mb_row(H264Context * const h, Picture *ref, int mb_y)
{
146
    int ref_field = ref->f.reference - 1;
147 148 149
    int ref_field_picture = ref->field_picture;
    int ref_height = 16*h->s.mb_height >> ref_field_picture;

150
    if(!HAVE_THREADS || !(h->s.avctx->active_thread_type&FF_THREAD_FRAME))
151 152 153 154 155
        return;

    //FIXME it can be safe to access mb stuff
    //even if pixels aren't deblocked yet

156
    ff_thread_await_progress(&ref->f,
157
                             FFMIN(16 * mb_y >> ref_field_picture, ref_height - 1),
158 159 160
                             ref_field_picture && ref_field);
}

161
static void pred_spatial_direct_motion(H264Context * const h, int *mb_type){
162
    MpegEncContext * const s = &h->s;
163
    int b8_stride = 2;
164
    int b4_stride = h->b_stride;
165
    int mb_xy = h->mb_xy, mb_y = s->mb_y;
166 167 168 169
    int mb_type_col[2];
    const int16_t (*l1mv0)[2], (*l1mv1)[2];
    const int8_t *l1ref0, *l1ref1;
    const int is_b8x8 = IS_8X8(*mb_type);
170
    unsigned int sub_mb_type= MB_TYPE_L0L1;
171
    int i8, i4;
172 173 174
    int ref[2];
    int mv[2];
    int list;
175

176
    assert(h->ref_list[1][0].f.reference & 3);
177

178 179
    await_reference_mb_row(h, &h->ref_list[1][0], s->mb_y + !!IS_INTERLACED(*mb_type));

180 181
#define MB_TYPE_16x16_OR_INTRA (MB_TYPE_16x16|MB_TYPE_INTRA4x4|MB_TYPE_INTRA16x16|MB_TYPE_INTRA_PCM)

182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200

    /* ref = min(neighbors) */
    for(list=0; list<2; list++){
        int left_ref = h->ref_cache[list][scan8[0] - 1];
        int top_ref  = h->ref_cache[list][scan8[0] - 8];
        int refc = h->ref_cache[list][scan8[0] - 8 + 4];
        const int16_t *C= h->mv_cache[list][ scan8[0] - 8 + 4];
        if(refc == PART_NOT_AVAILABLE){
            refc = h->ref_cache[list][scan8[0] - 8 - 1];
            C    = h-> mv_cache[list][scan8[0] - 8 - 1];
        }
        ref[list] = FFMIN3((unsigned)left_ref, (unsigned)top_ref, (unsigned)refc);
        if(ref[list] >= 0){
            //this is just pred_motion() but with the cases removed that cannot happen for direct blocks
            const int16_t * const A= h->mv_cache[list][ scan8[0] - 1 ];
            const int16_t * const B= h->mv_cache[list][ scan8[0] - 8 ];

            int match_count= (left_ref==ref[list]) + (top_ref==ref[list]) + (refc==ref[list]);
            if(match_count > 1){ //most common
201 202
                mv[list]= pack16to32(mid_pred(A[0], B[0], C[0]),
                                     mid_pred(A[1], B[1], C[1]) );
203 204 205
            }else {
                assert(match_count==1);
                if(left_ref==ref[list]){
206
                    mv[list]= AV_RN32A(A);
207
                }else if(top_ref==ref[list]){
208
                    mv[list]= AV_RN32A(B);
209
                }else{
210
                    mv[list]= AV_RN32A(C);
211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228
                }
            }
        }else{
            int mask= ~(MB_TYPE_L0 << (2*list));
            mv[list] = 0;
            ref[list] = -1;
            if(!is_b8x8)
                *mb_type &= mask;
            sub_mb_type &= mask;
        }
    }
    if(ref[0] < 0 && ref[1] < 0){
        ref[0] = ref[1] = 0;
        if(!is_b8x8)
            *mb_type |= MB_TYPE_L0L1;
        sub_mb_type |= MB_TYPE_L0L1;
    }

Michael Niedermayer's avatar
Michael Niedermayer committed
229
    if(!(is_b8x8|mv[0]|mv[1])){
230 231 232 233 234 235 236 237
        fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, (uint8_t)ref[0], 1);
        fill_rectangle(&h->ref_cache[1][scan8[0]], 4, 4, 8, (uint8_t)ref[1], 1);
        fill_rectangle(&h->mv_cache[0][scan8[0]], 4, 4, 8, 0, 4);
        fill_rectangle(&h->mv_cache[1][scan8[0]], 4, 4, 8, 0, 4);
        *mb_type= (*mb_type & ~(MB_TYPE_8x8|MB_TYPE_16x8|MB_TYPE_8x16|MB_TYPE_P1L0|MB_TYPE_P1L1))|MB_TYPE_16x16|MB_TYPE_DIRECT2;
        return;
    }

238 239
    if (IS_INTERLACED(h->ref_list[1][0].f.mb_type[mb_xy])) { // AFL/AFR/FR/FL -> AFL/FL
        if (!IS_INTERLACED(*mb_type)) {                          //     AFR/FR    -> AFL/FL
240
            mb_y = (s->mb_y&~1) + h->col_parity;
241
            mb_xy= s->mb_x + ((s->mb_y&~1) + h->col_parity)*s->mb_stride;
242
            b8_stride = 0;
243
        }else{
244 245
            mb_y  += h->col_fieldoff;
            mb_xy += s->mb_stride*h->col_fieldoff; // non zero for FL -> FL & differ parity
246 247 248 249
        }
        goto single_col;
    }else{                                               // AFL/AFR/FR/FL -> AFR/FR
        if(IS_INTERLACED(*mb_type)){                     // AFL       /FL -> AFR/FR
250
            mb_y = s->mb_y&~1;
251
            mb_xy= s->mb_x + (s->mb_y&~1)*s->mb_stride;
252 253
            mb_type_col[0] = h->ref_list[1][0].f.mb_type[mb_xy];
            mb_type_col[1] = h->ref_list[1][0].f.mb_type[mb_xy + s->mb_stride];
254
            b8_stride = 2+4*s->mb_stride;
255
            b4_stride *= 6;
256 257 258 259
            if (IS_INTERLACED(mb_type_col[0]) != IS_INTERLACED(mb_type_col[1])) {
                mb_type_col[0] &= ~MB_TYPE_INTERLACED;
                mb_type_col[1] &= ~MB_TYPE_INTERLACED;
            }
260

261
            sub_mb_type |= MB_TYPE_16x16|MB_TYPE_DIRECT2; /* B_SUB_8x8 */
262 263 264
            if(    (mb_type_col[0] & MB_TYPE_16x16_OR_INTRA)
                && (mb_type_col[1] & MB_TYPE_16x16_OR_INTRA)
                && !is_b8x8){
265
                *mb_type   |= MB_TYPE_16x8 |MB_TYPE_DIRECT2; /* B_16x8 */
266
            }else{
267
                *mb_type   |= MB_TYPE_8x8;
268 269 270 271
            }
        }else{                                           //     AFR/FR    -> AFR/FR
single_col:
            mb_type_col[0] =
272
            mb_type_col[1] = h->ref_list[1][0].f.mb_type[mb_xy];
273

274
            sub_mb_type |= MB_TYPE_16x16|MB_TYPE_DIRECT2; /* B_SUB_8x8 */
275
            if(!is_b8x8 && (mb_type_col[0] & MB_TYPE_16x16_OR_INTRA)){
276
                *mb_type   |= MB_TYPE_16x16|MB_TYPE_DIRECT2; /* B_16x16 */
277
            }else if(!is_b8x8 && (mb_type_col[0] & (MB_TYPE_16x8|MB_TYPE_8x16))){
278
                *mb_type   |= MB_TYPE_DIRECT2 | (mb_type_col[0] & (MB_TYPE_16x8|MB_TYPE_8x16));
279
            }else{
280 281 282
                if(!h->sps.direct_8x8_inference_flag){
                    /* FIXME save sub mb types from previous frames (or derive from MVs)
                    * so we know exactly what block size to use */
283
                    sub_mb_type += (MB_TYPE_8x8-MB_TYPE_16x16); /* B_SUB_4x4 */
284
                }
285
                *mb_type   |= MB_TYPE_8x8;
286 287 288 289
            }
        }
    }

290 291
    await_reference_mb_row(h, &h->ref_list[1][0], mb_y);

292 293 294 295
    l1mv0  = &h->ref_list[1][0].f.motion_val[0][h->mb2b_xy [mb_xy]];
    l1mv1  = &h->ref_list[1][0].f.motion_val[1][h->mb2b_xy [mb_xy]];
    l1ref0 = &h->ref_list[1][0].f.ref_index [0][4 * mb_xy];
    l1ref1 = &h->ref_list[1][0].f.ref_index [1][4 * mb_xy];
296 297
    if(!b8_stride){
        if(s->mb_y&1){
298 299
            l1ref0 += 2;
            l1ref1 += 2;
300 301 302 303 304 305
            l1mv0  +=  2*b4_stride;
            l1mv1  +=  2*b4_stride;
        }
    }


306
        if(IS_INTERLACED(*mb_type) != IS_INTERLACED(mb_type_col[0])){
307
            int n=0;
308 309 310 311 312
            for(i8=0; i8<4; i8++){
                int x8 = i8&1;
                int y8 = i8>>1;
                int xy8 = x8+y8*b8_stride;
                int xy4 = 3*x8+y8*b4_stride;
313
                int a,b;
314 315 316 317 318 319 320

                if(is_b8x8 && !IS_DIRECT(h->sub_mb_type[i8]))
                    continue;
                h->sub_mb_type[i8] = sub_mb_type;

                fill_rectangle(&h->ref_cache[0][scan8[i8*4]], 2, 2, 8, (uint8_t)ref[0], 1);
                fill_rectangle(&h->ref_cache[1][scan8[i8*4]], 2, 2, 8, (uint8_t)ref[1], 1);
321
                if(!IS_INTRA(mb_type_col[y8]) && !h->ref_list[1][0].long_ref
322 323
                   && (   (l1ref0[xy8] == 0 && FFABS(l1mv0[xy4][0]) <= 1 && FFABS(l1mv0[xy4][1]) <= 1)
                       || (l1ref0[xy8]  < 0 && l1ref1[xy8] == 0 && FFABS(l1mv1[xy4][0]) <= 1 && FFABS(l1mv1[xy4][1]) <= 1))){
324
                    a=b=0;
325
                    if(ref[0] > 0)
326
                        a= mv[0];
327
                    if(ref[1] > 0)
328
                        b= mv[1];
329
                    n++;
330
                }else{
331 332
                    a= mv[0];
                    b= mv[1];
333 334 335 336
                }
                fill_rectangle(&h->mv_cache[0][scan8[i8*4]], 2, 2, 8, a, 4);
                fill_rectangle(&h->mv_cache[1][scan8[i8*4]], 2, 2, 8, b, 4);
            }
337 338
            if(!is_b8x8 && !(n&3))
                *mb_type= (*mb_type & ~(MB_TYPE_8x8|MB_TYPE_16x8|MB_TYPE_8x16|MB_TYPE_P1L0|MB_TYPE_P1L1))|MB_TYPE_16x16|MB_TYPE_DIRECT2;
339
        }else if(IS_16X16(*mb_type)){
340
            int a,b;
341 342 343

            fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, (uint8_t)ref[0], 1);
            fill_rectangle(&h->ref_cache[1][scan8[0]], 4, 4, 8, (uint8_t)ref[1], 1);
344
            if(!IS_INTRA(mb_type_col[0]) && !h->ref_list[1][0].long_ref
345 346
               && (   (l1ref0[0] == 0 && FFABS(l1mv0[0][0]) <= 1 && FFABS(l1mv0[0][1]) <= 1)
                   || (l1ref0[0]  < 0 && l1ref1[0] == 0 && FFABS(l1mv1[0][0]) <= 1 && FFABS(l1mv1[0][1]) <= 1
347
                       && h->x264_build>33U))){
348
                a=b=0;
349
                if(ref[0] > 0)
350
                    a= mv[0];
351
                if(ref[1] > 0)
352
                    b= mv[1];
353
            }else{
354 355
                a= mv[0];
                b= mv[1];
356 357 358 359
            }
            fill_rectangle(&h->mv_cache[0][scan8[0]], 4, 4, 8, a, 4);
            fill_rectangle(&h->mv_cache[1][scan8[0]], 4, 4, 8, b, 4);
        }else{
360
            int n=0;
361 362 363 364 365 366 367 368
            for(i8=0; i8<4; i8++){
                const int x8 = i8&1;
                const int y8 = i8>>1;

                if(is_b8x8 && !IS_DIRECT(h->sub_mb_type[i8]))
                    continue;
                h->sub_mb_type[i8] = sub_mb_type;

369 370
                fill_rectangle(&h->mv_cache[0][scan8[i8*4]], 2, 2, 8, mv[0], 4);
                fill_rectangle(&h->mv_cache[1][scan8[i8*4]], 2, 2, 8, mv[1], 4);
371 372 373
                fill_rectangle(&h->ref_cache[0][scan8[i8*4]], 2, 2, 8, (uint8_t)ref[0], 1);
                fill_rectangle(&h->ref_cache[1][scan8[i8*4]], 2, 2, 8, (uint8_t)ref[1], 1);

374
                assert(b8_stride==2);
375
                /* col_zero_flag */
376 377
                if(!IS_INTRA(mb_type_col[0]) && !h->ref_list[1][0].long_ref && (   l1ref0[i8] == 0
                                              || (l1ref0[i8] < 0 && l1ref1[i8] == 0
378
                                                  && h->x264_build>33U))){
379
                    const int16_t (*l1mv)[2]= l1ref0[i8] == 0 ? l1mv0 : l1mv1;
380 381 382 383 384 385 386
                    if(IS_SUB_8X8(sub_mb_type)){
                        const int16_t *mv_col = l1mv[x8*3 + y8*3*b4_stride];
                        if(FFABS(mv_col[0]) <= 1 && FFABS(mv_col[1]) <= 1){
                            if(ref[0] == 0)
                                fill_rectangle(&h->mv_cache[0][scan8[i8*4]], 2, 2, 8, 0, 4);
                            if(ref[1] == 0)
                                fill_rectangle(&h->mv_cache[1][scan8[i8*4]], 2, 2, 8, 0, 4);
387
                            n+=4;
388
                        }
389 390
                    }else{
                        int m=0;
391 392 393 394
                    for(i4=0; i4<4; i4++){
                        const int16_t *mv_col = l1mv[x8*2 + (i4&1) + (y8*2 + (i4>>1))*b4_stride];
                        if(FFABS(mv_col[0]) <= 1 && FFABS(mv_col[1]) <= 1){
                            if(ref[0] == 0)
395
                                AV_ZERO32(h->mv_cache[0][scan8[i8*4+i4]]);
396
                            if(ref[1] == 0)
397
                                AV_ZERO32(h->mv_cache[1][scan8[i8*4+i4]]);
398
                            m++;
399 400
                        }
                    }
401 402
                    if(!(m&3))
                        h->sub_mb_type[i8]+= MB_TYPE_16x16 - MB_TYPE_8x8;
403
                    n+=m;
404
                    }
405 406
                }
            }
407 408
            if(!is_b8x8 && !(n&15))
                *mb_type= (*mb_type & ~(MB_TYPE_8x8|MB_TYPE_16x8|MB_TYPE_8x16|MB_TYPE_P1L0|MB_TYPE_P1L1))|MB_TYPE_16x16|MB_TYPE_DIRECT2;
409
        }
410 411 412 413
}

static void pred_temp_direct_motion(H264Context * const h, int *mb_type){
    MpegEncContext * const s = &h->s;
414
    int b8_stride = 2;
415
    int b4_stride = h->b_stride;
416
    int mb_xy = h->mb_xy, mb_y = s->mb_y;
417 418 419 420 421 422 423
    int mb_type_col[2];
    const int16_t (*l1mv0)[2], (*l1mv1)[2];
    const int8_t *l1ref0, *l1ref1;
    const int is_b8x8 = IS_8X8(*mb_type);
    unsigned int sub_mb_type;
    int i8, i4;

424
    assert(h->ref_list[1][0].f.reference & 3);
425

426 427
    await_reference_mb_row(h, &h->ref_list[1][0], s->mb_y + !!IS_INTERLACED(*mb_type));

428 429
    if (IS_INTERLACED(h->ref_list[1][0].f.mb_type[mb_xy])) { // AFL/AFR/FR/FL -> AFL/FL
        if (!IS_INTERLACED(*mb_type)) {                          //     AFR/FR    -> AFL/FL
430
            mb_y = (s->mb_y&~1) + h->col_parity;
431 432 433
            mb_xy= s->mb_x + ((s->mb_y&~1) + h->col_parity)*s->mb_stride;
            b8_stride = 0;
        }else{
434 435
            mb_y  += h->col_fieldoff;
            mb_xy += s->mb_stride*h->col_fieldoff; // non zero for FL -> FL & differ parity
436 437 438 439
        }
        goto single_col;
    }else{                                               // AFL/AFR/FR/FL -> AFR/FR
        if(IS_INTERLACED(*mb_type)){                     // AFL       /FL -> AFR/FR
440
            mb_y = s->mb_y&~1;
441
            mb_xy= s->mb_x + (s->mb_y&~1)*s->mb_stride;
442 443
            mb_type_col[0] = h->ref_list[1][0].f.mb_type[mb_xy];
            mb_type_col[1] = h->ref_list[1][0].f.mb_type[mb_xy + s->mb_stride];
444
            b8_stride = 2+4*s->mb_stride;
445
            b4_stride *= 6;
446 447 448 449
            if (IS_INTERLACED(mb_type_col[0]) != IS_INTERLACED(mb_type_col[1])) {
                mb_type_col[0] &= ~MB_TYPE_INTERLACED;
                mb_type_col[1] &= ~MB_TYPE_INTERLACED;
            }
450 451 452 453 454 455 456 457 458 459 460 461 462

            sub_mb_type = MB_TYPE_16x16|MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2; /* B_SUB_8x8 */

            if(    (mb_type_col[0] & MB_TYPE_16x16_OR_INTRA)
                && (mb_type_col[1] & MB_TYPE_16x16_OR_INTRA)
                && !is_b8x8){
                *mb_type   |= MB_TYPE_16x8 |MB_TYPE_L0L1|MB_TYPE_DIRECT2; /* B_16x8 */
            }else{
                *mb_type   |= MB_TYPE_8x8|MB_TYPE_L0L1;
            }
        }else{                                           //     AFR/FR    -> AFR/FR
single_col:
            mb_type_col[0] =
463
            mb_type_col[1] = h->ref_list[1][0].f.mb_type[mb_xy];
464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480

            sub_mb_type = MB_TYPE_16x16|MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2; /* B_SUB_8x8 */
            if(!is_b8x8 && (mb_type_col[0] & MB_TYPE_16x16_OR_INTRA)){
                *mb_type   |= MB_TYPE_16x16|MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2; /* B_16x16 */
            }else if(!is_b8x8 && (mb_type_col[0] & (MB_TYPE_16x8|MB_TYPE_8x16))){
                *mb_type   |= MB_TYPE_L0L1|MB_TYPE_DIRECT2 | (mb_type_col[0] & (MB_TYPE_16x8|MB_TYPE_8x16));
            }else{
                if(!h->sps.direct_8x8_inference_flag){
                    /* FIXME save sub mb types from previous frames (or derive from MVs)
                    * so we know exactly what block size to use */
                    sub_mb_type = MB_TYPE_8x8|MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2; /* B_SUB_4x4 */
                }
                *mb_type   |= MB_TYPE_8x8|MB_TYPE_L0L1;
            }
        }
    }

481 482
    await_reference_mb_row(h, &h->ref_list[1][0], mb_y);

483 484 485 486
    l1mv0  = &h->ref_list[1][0].f.motion_val[0][h->mb2b_xy [mb_xy]];
    l1mv1  = &h->ref_list[1][0].f.motion_val[1][h->mb2b_xy [mb_xy]];
    l1ref0 = &h->ref_list[1][0].f.ref_index [0][4 * mb_xy];
    l1ref1 = &h->ref_list[1][0].f.ref_index [1][4 * mb_xy];
487 488
    if(!b8_stride){
        if(s->mb_y&1){
489 490
            l1ref0 += 2;
            l1ref1 += 2;
491 492 493 494 495 496
            l1mv0  +=  2*b4_stride;
            l1mv1  +=  2*b4_stride;
        }
    }

    {
497 498
        const int *map_col_to_list0[2] = {h->map_col_to_list0[0], h->map_col_to_list0[1]};
        const int *dist_scale_factor = h->dist_scale_factor;
499
        int ref_offset;
500 501 502 503 504 505

        if(FRAME_MBAFF && IS_INTERLACED(*mb_type)){
            map_col_to_list0[0] = h->map_col_to_list0_field[s->mb_y&1][0];
            map_col_to_list0[1] = h->map_col_to_list0_field[s->mb_y&1][1];
            dist_scale_factor   =h->dist_scale_factor_field[s->mb_y&1];
        }
506
        ref_offset = (h->ref_list[1][0].mbaff<<4) & (mb_type_col[0]>>3); //if(h->ref_list[1][0].mbaff && IS_INTERLACED(mb_type_col[0])) ref_offset=16 else 0
507 508 509

        if(IS_INTERLACED(*mb_type) != IS_INTERLACED(mb_type_col[0])){
            int y_shift  = 2*!IS_INTERLACED(*mb_type);
510
            assert(h->sps.direct_8x8_inference_flag);
511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592

            for(i8=0; i8<4; i8++){
                const int x8 = i8&1;
                const int y8 = i8>>1;
                int ref0, scale;
                const int16_t (*l1mv)[2]= l1mv0;

                if(is_b8x8 && !IS_DIRECT(h->sub_mb_type[i8]))
                    continue;
                h->sub_mb_type[i8] = sub_mb_type;

                fill_rectangle(&h->ref_cache[1][scan8[i8*4]], 2, 2, 8, 0, 1);
                if(IS_INTRA(mb_type_col[y8])){
                    fill_rectangle(&h->ref_cache[0][scan8[i8*4]], 2, 2, 8, 0, 1);
                    fill_rectangle(&h-> mv_cache[0][scan8[i8*4]], 2, 2, 8, 0, 4);
                    fill_rectangle(&h-> mv_cache[1][scan8[i8*4]], 2, 2, 8, 0, 4);
                    continue;
                }

                ref0 = l1ref0[x8 + y8*b8_stride];
                if(ref0 >= 0)
                    ref0 = map_col_to_list0[0][ref0 + ref_offset];
                else{
                    ref0 = map_col_to_list0[1][l1ref1[x8 + y8*b8_stride] + ref_offset];
                    l1mv= l1mv1;
                }
                scale = dist_scale_factor[ref0];
                fill_rectangle(&h->ref_cache[0][scan8[i8*4]], 2, 2, 8, ref0, 1);

                {
                    const int16_t *mv_col = l1mv[x8*3 + y8*b4_stride];
                    int my_col = (mv_col[1]<<y_shift)/2;
                    int mx = (scale * mv_col[0] + 128) >> 8;
                    int my = (scale * my_col + 128) >> 8;
                    fill_rectangle(&h->mv_cache[0][scan8[i8*4]], 2, 2, 8, pack16to32(mx,my), 4);
                    fill_rectangle(&h->mv_cache[1][scan8[i8*4]], 2, 2, 8, pack16to32(mx-mv_col[0],my-my_col), 4);
                }
            }
            return;
        }

        /* one-to-one mv scaling */

        if(IS_16X16(*mb_type)){
            int ref, mv0, mv1;

            fill_rectangle(&h->ref_cache[1][scan8[0]], 4, 4, 8, 0, 1);
            if(IS_INTRA(mb_type_col[0])){
                ref=mv0=mv1=0;
            }else{
                const int ref0 = l1ref0[0] >= 0 ? map_col_to_list0[0][l1ref0[0] + ref_offset]
                                                : map_col_to_list0[1][l1ref1[0] + ref_offset];
                const int scale = dist_scale_factor[ref0];
                const int16_t *mv_col = l1ref0[0] >= 0 ? l1mv0[0] : l1mv1[0];
                int mv_l0[2];
                mv_l0[0] = (scale * mv_col[0] + 128) >> 8;
                mv_l0[1] = (scale * mv_col[1] + 128) >> 8;
                ref= ref0;
                mv0= pack16to32(mv_l0[0],mv_l0[1]);
                mv1= pack16to32(mv_l0[0]-mv_col[0],mv_l0[1]-mv_col[1]);
            }
            fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, ref, 1);
            fill_rectangle(&h-> mv_cache[0][scan8[0]], 4, 4, 8, mv0, 4);
            fill_rectangle(&h-> mv_cache[1][scan8[0]], 4, 4, 8, mv1, 4);
        }else{
            for(i8=0; i8<4; i8++){
                const int x8 = i8&1;
                const int y8 = i8>>1;
                int ref0, scale;
                const int16_t (*l1mv)[2]= l1mv0;

                if(is_b8x8 && !IS_DIRECT(h->sub_mb_type[i8]))
                    continue;
                h->sub_mb_type[i8] = sub_mb_type;
                fill_rectangle(&h->ref_cache[1][scan8[i8*4]], 2, 2, 8, 0, 1);
                if(IS_INTRA(mb_type_col[0])){
                    fill_rectangle(&h->ref_cache[0][scan8[i8*4]], 2, 2, 8, 0, 1);
                    fill_rectangle(&h-> mv_cache[0][scan8[i8*4]], 2, 2, 8, 0, 4);
                    fill_rectangle(&h-> mv_cache[1][scan8[i8*4]], 2, 2, 8, 0, 4);
                    continue;
                }

593 594
                assert(b8_stride == 2);
                ref0 = l1ref0[i8];
595
                if(ref0 >= 0)
596
                    ref0 = map_col_to_list0[0][ref0 + ref_offset];
597
                else{
598
                    ref0 = map_col_to_list0[1][l1ref1[i8] + ref_offset];
599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615
                    l1mv= l1mv1;
                }
                scale = dist_scale_factor[ref0];

                fill_rectangle(&h->ref_cache[0][scan8[i8*4]], 2, 2, 8, ref0, 1);
                if(IS_SUB_8X8(sub_mb_type)){
                    const int16_t *mv_col = l1mv[x8*3 + y8*3*b4_stride];
                    int mx = (scale * mv_col[0] + 128) >> 8;
                    int my = (scale * mv_col[1] + 128) >> 8;
                    fill_rectangle(&h->mv_cache[0][scan8[i8*4]], 2, 2, 8, pack16to32(mx,my), 4);
                    fill_rectangle(&h->mv_cache[1][scan8[i8*4]], 2, 2, 8, pack16to32(mx-mv_col[0],my-mv_col[1]), 4);
                }else
                for(i4=0; i4<4; i4++){
                    const int16_t *mv_col = l1mv[x8*2 + (i4&1) + (y8*2 + (i4>>1))*b4_stride];
                    int16_t *mv_l0 = h->mv_cache[0][scan8[i8*4+i4]];
                    mv_l0[0] = (scale * mv_col[0] + 128) >> 8;
                    mv_l0[1] = (scale * mv_col[1] + 128) >> 8;
616 617
                    AV_WN32A(h->mv_cache[1][scan8[i8*4+i4]],
                        pack16to32(mv_l0[0]-mv_col[0],mv_l0[1]-mv_col[1]));
618 619 620 621 622
                }
            }
        }
    }
}
623 624 625 626 627 628 629 630

void ff_h264_pred_direct_motion(H264Context * const h, int *mb_type){
    if(h->direct_spatial_mv_pred){
        pred_spatial_direct_motion(h, mb_type);
    }else{
        pred_temp_direct_motion(h, mb_type);
    }
}