mpegvideo_motion.c 37.7 KB
Newer Older
1
/*
2
 * Copyright (c) 2000,2001 Fabrice Bellard
3 4
 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
 *
5 6
 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
 *
7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
 * This file is part of FFmpeg.
 *
 * FFmpeg is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2.1 of the License, or (at your option) any later version.
 *
 * FFmpeg is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
 * License along with FFmpeg; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 */

24
#include <string.h>
25

26
#include "libavutil/avassert.h"
27
#include "libavutil/internal.h"
28
#include "avcodec.h"
29
#include "h261.h"
30
#include "mpegutils.h"
31 32 33
#include "mpegvideo.h"
#include "mjpegenc.h"
#include "msmpeg4.h"
34
#include "qpeldsp.h"
35
#include "wmv2.h"
36 37
#include <limits.h>

38 39 40
static void gmc1_motion(MpegEncContext *s,
                        uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
                        uint8_t **ref_picture)
41 42
{
    uint8_t *ptr;
43
    int src_x, src_y, motion_x, motion_y;
44
    ptrdiff_t offset, linesize, uvlinesize;
45 46 47 48 49 50
    int emu = 0;

    motion_x   = s->sprite_offset[0][0];
    motion_y   = s->sprite_offset[0][1];
    src_x      = s->mb_x * 16 + (motion_x >> (s->sprite_warping_accuracy + 1));
    src_y      = s->mb_y * 16 + (motion_y >> (s->sprite_warping_accuracy + 1));
51 52
    motion_x *= 1 << (3 - s->sprite_warping_accuracy);
    motion_y *= 1 << (3 - s->sprite_warping_accuracy);
53
    src_x      = av_clip(src_x, -16, s->width);
54
    if (src_x == s->width)
55
        motion_x = 0;
56 57
    src_y = av_clip(src_y, -16, s->height);
    if (src_y == s->height)
58
        motion_y = 0;
59

60
    linesize   = s->linesize;
61 62
    uvlinesize = s->uvlinesize;

63
    ptr = ref_picture[0] + src_y * linesize + src_x;
64

65 66
    if ((unsigned)src_x >= FFMAX(s->h_edge_pos - 17, 0) ||
        (unsigned)src_y >= FFMAX(s->v_edge_pos - 17, 0)) {
67
        s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
68 69 70 71
                                 linesize, linesize,
                                 17, 17,
                                 src_x, src_y,
                                 s->h_edge_pos, s->v_edge_pos);
72
        ptr = s->sc.edge_emu_buffer;
73 74
    }

75
    if ((motion_x | motion_y) & 7) {
76 77 78 79
        s->mdsp.gmc1(dest_y, ptr, linesize, 16,
                     motion_x & 15, motion_y & 15, 128 - s->no_rounding);
        s->mdsp.gmc1(dest_y + 8, ptr + 8, linesize, 16,
                     motion_x & 15, motion_y & 15, 128 - s->no_rounding);
80
    } else {
81 82
        int dxy;

83 84
        dxy = ((motion_x >> 3) & 1) | ((motion_y >> 2) & 2);
        if (s->no_rounding) {
85
            s->hdsp.put_no_rnd_pixels_tab[0][dxy](dest_y, ptr, linesize, 16);
86 87
        } else {
            s->hdsp.put_pixels_tab[0][dxy](dest_y, ptr, linesize, 16);
88 89 90
        }
    }

91
    if (CONFIG_GRAY && s->avctx->flags & AV_CODEC_FLAG_GRAY)
92 93 94 95 96 97
        return;

    motion_x   = s->sprite_offset[1][0];
    motion_y   = s->sprite_offset[1][1];
    src_x      = s->mb_x * 8 + (motion_x >> (s->sprite_warping_accuracy + 1));
    src_y      = s->mb_y * 8 + (motion_y >> (s->sprite_warping_accuracy + 1));
98 99
    motion_x  *= 1 << (3 - s->sprite_warping_accuracy);
    motion_y  *= 1 << (3 - s->sprite_warping_accuracy);
100 101 102 103 104 105
    src_x      = av_clip(src_x, -8, s->width >> 1);
    if (src_x == s->width >> 1)
        motion_x = 0;
    src_y = av_clip(src_y, -8, s->height >> 1);
    if (src_y == s->height >> 1)
        motion_y = 0;
106 107

    offset = (src_y * uvlinesize) + src_x;
108
    ptr    = ref_picture[1] + offset;
109 110
    if ((unsigned)src_x >= FFMAX((s->h_edge_pos >> 1) - 9, 0) ||
        (unsigned)src_y >= FFMAX((s->v_edge_pos >> 1) - 9, 0)) {
111
        s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
112 113 114 115
                                 uvlinesize, uvlinesize,
                                 9, 9,
                                 src_x, src_y,
                                 s->h_edge_pos >> 1, s->v_edge_pos >> 1);
116
        ptr = s->sc.edge_emu_buffer;
117
        emu = 1;
118
    }
119 120
    s->mdsp.gmc1(dest_cb, ptr, uvlinesize, 8,
                 motion_x & 15, motion_y & 15, 128 - s->no_rounding);
121 122

    ptr = ref_picture[2] + offset;
123
    if (emu) {
124
        s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
125
                                 uvlinesize, uvlinesize,
126 127 128
                                 9, 9,
                                 src_x, src_y,
                                 s->h_edge_pos >> 1, s->v_edge_pos >> 1);
129
        ptr = s->sc.edge_emu_buffer;
130
    }
131 132
    s->mdsp.gmc1(dest_cr, ptr, uvlinesize, 8,
                 motion_x & 15, motion_y & 15, 128 - s->no_rounding);
133 134
}

135 136 137
static void gmc_motion(MpegEncContext *s,
                       uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
                       uint8_t **ref_picture)
138 139 140
{
    uint8_t *ptr;
    int linesize, uvlinesize;
141
    const int a = s->sprite_warping_accuracy;
142 143
    int ox, oy;

144
    linesize   = s->linesize;
145 146 147 148
    uvlinesize = s->uvlinesize;

    ptr = ref_picture[0];

149 150 151 152
    ox = s->sprite_offset[0][0] + s->sprite_delta[0][0] * s->mb_x * 16 +
         s->sprite_delta[0][1] * s->mb_y * 16;
    oy = s->sprite_offset[0][1] + s->sprite_delta[1][0] * s->mb_x * 16 +
         s->sprite_delta[1][1] * s->mb_y * 16;
153

154 155 156 157 158 159 160 161 162 163 164 165 166
    s->mdsp.gmc(dest_y, ptr, linesize, 16,
                ox, oy,
                s->sprite_delta[0][0], s->sprite_delta[0][1],
                s->sprite_delta[1][0], s->sprite_delta[1][1],
                a + 1, (1 << (2 * a + 1)) - s->no_rounding,
                s->h_edge_pos, s->v_edge_pos);
    s->mdsp.gmc(dest_y + 8, ptr, linesize, 16,
                ox + s->sprite_delta[0][0] * 8,
                oy + s->sprite_delta[1][0] * 8,
                s->sprite_delta[0][0], s->sprite_delta[0][1],
                s->sprite_delta[1][0], s->sprite_delta[1][1],
                a + 1, (1 << (2 * a + 1)) - s->no_rounding,
                s->h_edge_pos, s->v_edge_pos);
167

168
    if (CONFIG_GRAY && s->avctx->flags & AV_CODEC_FLAG_GRAY)
169 170 171 172 173 174
        return;

    ox = s->sprite_offset[1][0] + s->sprite_delta[0][0] * s->mb_x * 8 +
         s->sprite_delta[0][1] * s->mb_y * 8;
    oy = s->sprite_offset[1][1] + s->sprite_delta[1][0] * s->mb_x * 8 +
         s->sprite_delta[1][1] * s->mb_y * 8;
175 176

    ptr = ref_picture[1];
177 178 179 180 181
    s->mdsp.gmc(dest_cb, ptr, uvlinesize, 8,
                ox, oy,
                s->sprite_delta[0][0], s->sprite_delta[0][1],
                s->sprite_delta[1][0], s->sprite_delta[1][1],
                a + 1, (1 << (2 * a + 1)) - s->no_rounding,
182
                (s->h_edge_pos + 1) >> 1, (s->v_edge_pos + 1) >> 1);
183 184

    ptr = ref_picture[2];
185 186 187 188 189
    s->mdsp.gmc(dest_cr, ptr, uvlinesize, 8,
                ox, oy,
                s->sprite_delta[0][0], s->sprite_delta[0][1],
                s->sprite_delta[1][0], s->sprite_delta[1][1],
                a + 1, (1 << (2 * a + 1)) - s->no_rounding,
190
                (s->h_edge_pos + 1) >> 1, (s->v_edge_pos + 1) >> 1);
191 192 193
}

static inline int hpel_motion(MpegEncContext *s,
194 195 196 197
                              uint8_t *dest, uint8_t *src,
                              int src_x, int src_y,
                              op_pixels_func *pix_op,
                              int motion_x, int motion_y)
198
{
199
    int dxy = 0;
200
    int emu = 0;
201 202 203 204 205

    src_x += motion_x >> 1;
    src_y += motion_y >> 1;

    /* WARNING: do no forget half pels */
206
    src_x = av_clip(src_x, -16, s->width); // FIXME unneeded for emu?
207 208
    if (src_x != s->width)
        dxy |= motion_x & 1;
209
    src_y = av_clip(src_y, -16, s->height);
210 211
    if (src_y != s->height)
        dxy |= (motion_y & 1) << 1;
212
    src += src_y * s->linesize + src_x;
213

214 215
        if ((unsigned)src_x >= FFMAX(s->h_edge_pos - (motion_x & 1) - 7, 0) ||
            (unsigned)src_y >= FFMAX(s->v_edge_pos - (motion_y & 1) - 7, 0)) {
216
            s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, src,
217
                                     s->linesize, s->linesize,
218
                                     9, 9,
219 220
                                     src_x, src_y,
                                     s->h_edge_pos, s->v_edge_pos);
221
            src = s->sc.edge_emu_buffer;
222
            emu = 1;
223
        }
224
    pix_op[dxy](dest, src, s->linesize, 8);
225 226 227
    return emu;
}

228
static av_always_inline
Keiji Costantini's avatar
Keiji Costantini committed
229
void mpeg_motion_internal(MpegEncContext *s,
230 231 232 233 234 235 236 237 238 239 240 241
                          uint8_t *dest_y,
                          uint8_t *dest_cb,
                          uint8_t *dest_cr,
                          int field_based,
                          int bottom_field,
                          int field_select,
                          uint8_t **ref_picture,
                          op_pixels_func (*pix_op)[4],
                          int motion_x,
                          int motion_y,
                          int h,
                          int is_mpeg12,
242
                          int is_16x8,
243
                          int mb_y)
244 245
{
    uint8_t *ptr_y, *ptr_cb, *ptr_cr;
246
    int dxy, uvdxy, mx, my, src_x, src_y,
247
        uvsrc_x, uvsrc_y, v_edge_pos, block_y_half;
248
    ptrdiff_t uvlinesize, linesize;
249 250

    v_edge_pos = s->v_edge_pos >> field_based;
251 252
    linesize   = s->current_picture.f->linesize[0] << field_based;
    uvlinesize = s->current_picture.f->linesize[1] << field_based;
253
    block_y_half = (field_based | is_16x8);
254

255 256
    dxy   = ((motion_y & 1) << 1) | (motion_x & 1);
    src_x = s->mb_x * 16 + (motion_x >> 1);
257
    src_y = (mb_y << (4 - block_y_half)) + (motion_y >> 1);
258

Keiji Costantini's avatar
Keiji Costantini committed
259
    if (!is_mpeg12 && s->out_format == FMT_H263) {
260 261 262 263 264
        if ((s->workaround_bugs & FF_BUG_HPEL_CHROMA) && field_based) {
            mx      = (motion_x >> 1) | (motion_x & 1);
            my      = motion_y >> 1;
            uvdxy   = ((my & 1) << 1) | (mx & 1);
            uvsrc_x = s->mb_x * 8 + (mx >> 1);
265
            uvsrc_y = (mb_y << (3 - block_y_half)) + (my >> 1);
266 267 268 269
        } else {
            uvdxy   = dxy | (motion_y & 2) | ((motion_x & 2) >> 1);
            uvsrc_x = src_x >> 1;
            uvsrc_y = src_y >> 1;
270
        }
271 272 273 274 275 276 277
    // Even chroma mv's are full pel in H261
    } else if (!is_mpeg12 && s->out_format == FMT_H261) {
        mx      = motion_x / 4;
        my      = motion_y / 4;
        uvdxy   = 0;
        uvsrc_x = s->mb_x * 8 + mx;
        uvsrc_y = mb_y * 8 + my;
278
    } else {
279 280 281 282 283
        if (s->chroma_y_shift) {
            mx      = motion_x / 2;
            my      = motion_y / 2;
            uvdxy   = ((my & 1) << 1) | (mx & 1);
            uvsrc_x = s->mb_x * 8 + (mx >> 1);
284
            uvsrc_y = (mb_y << (3 - block_y_half)) + (my >> 1);
285
        } else {
286 287 288 289 290
            if (s->chroma_x_shift) {
                // Chroma422
                mx      = motion_x / 2;
                uvdxy   = ((motion_y & 1) << 1) | (mx & 1);
                uvsrc_x = s->mb_x * 8 + (mx >> 1);
291 292
                uvsrc_y = src_y;
            } else {
293 294
                // Chroma444
                uvdxy   = dxy;
295 296 297 298 299 300 301 302 303 304
                uvsrc_x = src_x;
                uvsrc_y = src_y;
            }
        }
    }

    ptr_y  = ref_picture[0] + src_y * linesize + src_x;
    ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
    ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;

305 306
    if ((unsigned)src_x >= FFMAX(s->h_edge_pos - (motion_x & 1) - 15   , 0) ||
        (unsigned)src_y >= FFMAX(   v_edge_pos - (motion_y & 1) - h + 1, 0)) {
307 308 309 310 311 312 313 314
        if (is_mpeg12 ||
            s->codec_id == AV_CODEC_ID_MPEG2VIDEO ||
            s->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
            av_log(s->avctx, AV_LOG_DEBUG,
                   "MPEG motion vector out of boundary (%d %d)\n", src_x,
                   src_y);
            return;
        }
315
        src_y = (unsigned)src_y << field_based;
316
        s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr_y,
317
                                 s->linesize, s->linesize,
318
                                 17, 17 + field_based,
319
                                 src_x, src_y,
320
                                 s->h_edge_pos, s->v_edge_pos);
321
        ptr_y = s->sc.edge_emu_buffer;
322
        if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
323
            uint8_t *ubuf = s->sc.edge_emu_buffer + 18 * s->linesize;
324
            uint8_t *vbuf = ubuf + 10 * s->uvlinesize;
325 326
            if (s->workaround_bugs & FF_BUG_IEDGE)
                vbuf -= s->uvlinesize;
327
            uvsrc_y = (unsigned)uvsrc_y << field_based;
328
            s->vdsp.emulated_edge_mc(ubuf, ptr_cb,
329
                                     s->uvlinesize, s->uvlinesize,
330
                                     9, 9 + field_based,
331
                                     uvsrc_x, uvsrc_y,
332
                                     s->h_edge_pos >> 1, s->v_edge_pos >> 1);
333
            s->vdsp.emulated_edge_mc(vbuf, ptr_cr,
334
                                     s->uvlinesize, s->uvlinesize,
335
                                     9, 9 + field_based,
336
                                     uvsrc_x, uvsrc_y,
337
                                     s->h_edge_pos >> 1, s->v_edge_pos >> 1);
338 339
            ptr_cb = ubuf;
            ptr_cr = vbuf;
340
        }
341 342
    }

343 344 345 346 347 348
    /* FIXME use this for field pix too instead of the obnoxious hack which
     * changes picture.data */
    if (bottom_field) {
        dest_y  += s->linesize;
        dest_cb += s->uvlinesize;
        dest_cr += s->uvlinesize;
349 350
    }

351 352 353 354
    if (field_select) {
        ptr_y  += s->linesize;
        ptr_cb += s->uvlinesize;
        ptr_cr += s->uvlinesize;
355 356 357 358
    }

    pix_op[0][dxy](dest_y, ptr_y, linesize, h);

359
    if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
360
        pix_op[s->chroma_x_shift][uvdxy]
361
            (dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift);
362
        pix_op[s->chroma_x_shift][uvdxy]
363
            (dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift);
364
    }
365 366
    if (!is_mpeg12 && (CONFIG_H261_ENCODER || CONFIG_H261_DECODER) &&
        s->out_format == FMT_H261) {
367 368 369
        ff_h261_loop_filter(s);
    }
}
Keiji Costantini's avatar
Keiji Costantini committed
370
/* apply one mpeg motion vector to the three components */
371 372 373 374
static void mpeg_motion(MpegEncContext *s,
                        uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
                        int field_select, uint8_t **ref_picture,
                        op_pixels_func (*pix_op)[4],
375
                        int motion_x, int motion_y, int h, int is_16x8, int mb_y)
Keiji Costantini's avatar
Keiji Costantini committed
376
{
377
#if !CONFIG_SMALL
378
    if (s->out_format == FMT_MPEG1)
379
        mpeg_motion_internal(s, dest_y, dest_cb, dest_cr, 0, 0,
380
                             field_select, ref_picture, pix_op,
381
                             motion_x, motion_y, h, 1, is_16x8, mb_y);
382 383 384
    else
#endif
        mpeg_motion_internal(s, dest_y, dest_cb, dest_cr, 0, 0,
385
                             field_select, ref_picture, pix_op,
386
                             motion_x, motion_y, h, 0, is_16x8, mb_y);
387 388 389 390 391 392 393 394 395 396
}

static void mpeg_motion_field(MpegEncContext *s, uint8_t *dest_y,
                              uint8_t *dest_cb, uint8_t *dest_cr,
                              int bottom_field, int field_select,
                              uint8_t **ref_picture,
                              op_pixels_func (*pix_op)[4],
                              int motion_x, int motion_y, int h, int mb_y)
{
#if !CONFIG_SMALL
397
    if (s->out_format == FMT_MPEG1)
398
        mpeg_motion_internal(s, dest_y, dest_cb, dest_cr, 1,
399
                             bottom_field, field_select, ref_picture, pix_op,
400
                             motion_x, motion_y, h, 1, 0, mb_y);
Keiji Costantini's avatar
Keiji Costantini committed
401 402
    else
#endif
403
        mpeg_motion_internal(s, dest_y, dest_cb, dest_cr, 1,
404
                             bottom_field, field_select, ref_picture, pix_op,
405
                             motion_x, motion_y, h, 0, 0, mb_y);
Keiji Costantini's avatar
Keiji Costantini committed
406
}
407

408
// FIXME: SIMDify, avg variant, 16x16 version
409 410
static inline void put_obmc(uint8_t *dst, uint8_t *src[5], int stride)
{
411
    int x;
412 413 414 415 416
    uint8_t *const top    = src[1];
    uint8_t *const left   = src[2];
    uint8_t *const mid    = src[0];
    uint8_t *const right  = src[3];
    uint8_t *const bottom = src[4];
417 418 419 420 421 422 423 424
#define OBMC_FILTER(x, t, l, m, r, b)\
    dst[x]= (t*top[x] + l*left[x] + m*mid[x] + r*right[x] + b*bottom[x] + 4)>>3
#define OBMC_FILTER4(x, t, l, m, r, b)\
    OBMC_FILTER(x         , t, l, m, r, b);\
    OBMC_FILTER(x+1       , t, l, m, r, b);\
    OBMC_FILTER(x  +stride, t, l, m, r, b);\
    OBMC_FILTER(x+1+stride, t, l, m, r, b);

425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458
    x = 0;
    OBMC_FILTER (x    , 2, 2, 4, 0, 0);
    OBMC_FILTER (x + 1, 2, 1, 5, 0, 0);
    OBMC_FILTER4(x + 2, 2, 1, 5, 0, 0);
    OBMC_FILTER4(x + 4, 2, 0, 5, 1, 0);
    OBMC_FILTER (x + 6, 2, 0, 5, 1, 0);
    OBMC_FILTER (x + 7, 2, 0, 4, 2, 0);
    x += stride;
    OBMC_FILTER (x    , 1, 2, 5, 0, 0);
    OBMC_FILTER (x + 1, 1, 2, 5, 0, 0);
    OBMC_FILTER (x + 6, 1, 0, 5, 2, 0);
    OBMC_FILTER (x + 7, 1, 0, 5, 2, 0);
    x += stride;
    OBMC_FILTER4(x    , 1, 2, 5, 0, 0);
    OBMC_FILTER4(x + 2, 1, 1, 6, 0, 0);
    OBMC_FILTER4(x + 4, 1, 0, 6, 1, 0);
    OBMC_FILTER4(x + 6, 1, 0, 5, 2, 0);
    x += 2 * stride;
    OBMC_FILTER4(x    , 0, 2, 5, 0, 1);
    OBMC_FILTER4(x + 2, 0, 1, 6, 0, 1);
    OBMC_FILTER4(x + 4, 0, 0, 6, 1, 1);
    OBMC_FILTER4(x + 6, 0, 0, 5, 2, 1);
    x += 2*stride;
    OBMC_FILTER (x    , 0, 2, 5, 0, 1);
    OBMC_FILTER (x + 1, 0, 2, 5, 0, 1);
    OBMC_FILTER4(x + 2, 0, 1, 5, 0, 2);
    OBMC_FILTER4(x + 4, 0, 0, 5, 1, 2);
    OBMC_FILTER (x + 6, 0, 0, 5, 2, 1);
    OBMC_FILTER (x + 7, 0, 0, 5, 2, 1);
    x += stride;
    OBMC_FILTER (x    , 0, 2, 4, 0, 2);
    OBMC_FILTER (x + 1, 0, 1, 5, 0, 2);
    OBMC_FILTER (x + 6, 0, 0, 5, 1, 2);
    OBMC_FILTER (x + 7, 0, 0, 4, 2, 2);
459 460 461 462 463 464 465
}

/* obmc for 1 8x8 luma block */
static inline void obmc_motion(MpegEncContext *s,
                               uint8_t *dest, uint8_t *src,
                               int src_x, int src_y,
                               op_pixels_func *pix_op,
466
                               int16_t mv[5][2] /* mid top left right bottom */)
467 468 469 470 471
#define MID    0
{
    int i;
    uint8_t *ptr[5];

472
    av_assert2(s->quarter_sample == 0);
473

474 475 476 477
    for (i = 0; i < 5; i++) {
        if (i && mv[i][0] == mv[MID][0] && mv[i][1] == mv[MID][1]) {
            ptr[i] = ptr[MID];
        } else {
478
            ptr[i] = s->sc.obmc_scratchpad + 8 * (i & 1) +
479 480
                     s->linesize * 8 * (i >> 1);
            hpel_motion(s, ptr[i], src, src_x, src_y, pix_op,
481 482 483 484 485 486 487 488
                        mv[i][0], mv[i][1]);
        }
    }

    put_obmc(dest, ptr, s->linesize);
}

static inline void qpel_motion(MpegEncContext *s,
489 490 491 492 493 494
                               uint8_t *dest_y,
                               uint8_t *dest_cb,
                               uint8_t *dest_cr,
                               int field_based, int bottom_field,
                               int field_select, uint8_t **ref_picture,
                               op_pixels_func (*pix_op)[4],
495 496 497 498
                               qpel_mc_func (*qpix_op)[16],
                               int motion_x, int motion_y, int h)
{
    uint8_t *ptr_y, *ptr_cb, *ptr_cr;
499 500
    int dxy, uvdxy, mx, my, src_x, src_y, uvsrc_x, uvsrc_y, v_edge_pos;
    ptrdiff_t linesize, uvlinesize;
501

502 503
    dxy   = ((motion_y & 3) << 2) | (motion_x & 3);

504 505 506 507
    src_x = s->mb_x *  16                 + (motion_x >> 2);
    src_y = s->mb_y * (16 >> field_based) + (motion_y >> 2);

    v_edge_pos = s->v_edge_pos >> field_based;
508
    linesize   = s->linesize   << field_based;
509 510
    uvlinesize = s->uvlinesize << field_based;

511 512 513 514 515 516 517 518 519 520 521 522 523
    if (field_based) {
        mx = motion_x / 2;
        my = motion_y >> 1;
    } else if (s->workaround_bugs & FF_BUG_QPEL_CHROMA2) {
        static const int rtab[8] = { 0, 0, 1, 1, 0, 0, 0, 1 };
        mx = (motion_x >> 1) + rtab[motion_x & 7];
        my = (motion_y >> 1) + rtab[motion_y & 7];
    } else if (s->workaround_bugs & FF_BUG_QPEL_CHROMA) {
        mx = (motion_x >> 1) | (motion_x & 1);
        my = (motion_y >> 1) | (motion_y & 1);
    } else {
        mx = motion_x / 2;
        my = motion_y / 2;
524
    }
525 526
    mx = (mx >> 1) | (mx & 1);
    my = (my >> 1) | (my & 1);
527

528 529 530
    uvdxy = (mx & 1) | ((my & 1) << 1);
    mx  >>= 1;
    my  >>= 1;
531 532 533 534

    uvsrc_x = s->mb_x *  8                 + mx;
    uvsrc_y = s->mb_y * (8 >> field_based) + my;

535
    ptr_y  = ref_picture[0] + src_y   * linesize   + src_x;
536 537 538
    ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
    ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;

539 540
    if ((unsigned)src_x >= FFMAX(s->h_edge_pos - (motion_x & 3) - 15   , 0) ||
        (unsigned)src_y >= FFMAX(   v_edge_pos - (motion_y & 3) - h + 1, 0)) {
541
        s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr_y,
542
                                 s->linesize, s->linesize,
543
                                 17, 17 + field_based,
544
                                 src_x, src_y * (1 << field_based),
545
                                 s->h_edge_pos, s->v_edge_pos);
546
        ptr_y = s->sc.edge_emu_buffer;
547
        if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
548
            uint8_t *ubuf = s->sc.edge_emu_buffer + 18 * s->linesize;
549
            uint8_t *vbuf = ubuf + 10 * s->uvlinesize;
550 551
            if (s->workaround_bugs & FF_BUG_IEDGE)
                vbuf -= s->uvlinesize;
552
            s->vdsp.emulated_edge_mc(ubuf, ptr_cb,
553
                                     s->uvlinesize, s->uvlinesize,
554
                                     9, 9 + field_based,
555
                                     uvsrc_x, uvsrc_y * (1 << field_based),
556
                                     s->h_edge_pos >> 1, s->v_edge_pos >> 1);
557
            s->vdsp.emulated_edge_mc(vbuf, ptr_cr,
558
                                     s->uvlinesize, s->uvlinesize,
559
                                     9, 9 + field_based,
560
                                     uvsrc_x, uvsrc_y * (1 << field_based),
561
                                     s->h_edge_pos >> 1, s->v_edge_pos >> 1);
562 563
            ptr_cb = ubuf;
            ptr_cr = vbuf;
564 565 566
        }
    }

567
    if (!field_based)
568
        qpix_op[0][dxy](dest_y, ptr_y, linesize);
569 570 571 572 573
    else {
        if (bottom_field) {
            dest_y  += s->linesize;
            dest_cb += s->uvlinesize;
            dest_cr += s->uvlinesize;
574 575
        }

576
        if (field_select) {
577 578 579 580
            ptr_y  += s->linesize;
            ptr_cb += s->uvlinesize;
            ptr_cr += s->uvlinesize;
        }
581 582 583 584
        // damn interlaced mode
        // FIXME boundary mirroring is not exactly correct here
        qpix_op[1][dxy](dest_y, ptr_y, linesize);
        qpix_op[1][dxy](dest_y + 8, ptr_y + 8, linesize);
585
    }
586
    if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
587 588 589 590 591 592
        pix_op[1][uvdxy](dest_cr, ptr_cr, uvlinesize, h >> 1);
        pix_op[1][uvdxy](dest_cb, ptr_cb, uvlinesize, h >> 1);
    }
}

/**
593
 * H.263 chroma 4mv motion compensation.
594
 */
595 596 597 598 599 600
static void chroma_4mv_motion(MpegEncContext *s,
                              uint8_t *dest_cb, uint8_t *dest_cr,
                              uint8_t **ref_picture,
                              op_pixels_func *pix_op,
                              int mx, int my)
{
601
    uint8_t *ptr;
602 603
    int src_x, src_y, dxy, emu = 0;
    ptrdiff_t offset;
604 605

    /* In case of 8X8, we construct a single chroma motion vector
606 607 608
     * with a special rounding */
    mx = ff_h263_round_chroma(mx);
    my = ff_h263_round_chroma(my);
609

610
    dxy  = ((my & 1) << 1) | (mx & 1);
611 612 613 614 615
    mx >>= 1;
    my >>= 1;

    src_x = s->mb_x * 8 + mx;
    src_y = s->mb_y * 8 + my;
616 617
    src_x = av_clip(src_x, -8, (s->width >> 1));
    if (src_x == (s->width >> 1))
618
        dxy &= ~1;
619 620
    src_y = av_clip(src_y, -8, (s->height >> 1));
    if (src_y == (s->height >> 1))
621 622
        dxy &= ~2;

623
    offset = src_y * s->uvlinesize + src_x;
624
    ptr    = ref_picture[1] + offset;
625 626
    if ((unsigned)src_x >= FFMAX((s->h_edge_pos >> 1) - (dxy  & 1) - 7, 0) ||
        (unsigned)src_y >= FFMAX((s->v_edge_pos >> 1) - (dxy >> 1) - 7, 0)) {
627
        s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
628 629 630
                                 s->uvlinesize, s->uvlinesize,
                                 9, 9, src_x, src_y,
                                 s->h_edge_pos >> 1, s->v_edge_pos >> 1);
631
        ptr = s->sc.edge_emu_buffer;
632
        emu = 1;
633 634 635 636
    }
    pix_op[dxy](dest_cb, ptr, s->uvlinesize, 8);

    ptr = ref_picture[2] + offset;
637
    if (emu) {
638
        s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
639
                                 s->uvlinesize, s->uvlinesize,
640 641
                                 9, 9, src_x, src_y,
                                 s->h_edge_pos >> 1, s->v_edge_pos >> 1);
642
        ptr = s->sc.edge_emu_buffer;
643 644 645 646
    }
    pix_op[dxy](dest_cr, ptr, s->uvlinesize, 8);
}

647 648
static inline void prefetch_motion(MpegEncContext *s, uint8_t **pix, int dir)
{
649 650 651
    /* fetch pixels for estimated mv 4 macroblocks ahead
     * optimized for 64byte cache lines */
    const int shift = s->quarter_sample ? 2 : 1;
652 653 654 655 656 657 658
    const int mx    = (s->mv[dir][0][0] >> shift) + 16 * s->mb_x + 8;
    const int my    = (s->mv[dir][0][1] >> shift) + 16 * s->mb_y;
    int off         = mx + (my + (s->mb_x & 3) * 4) * s->linesize + 64;

    s->vdsp.prefetch(pix[0] + off, s->linesize, 4);
    off = (mx >> 1) + ((my >> 1) + (s->mb_x & 7)) * s->uvlinesize + 64;
    s->vdsp.prefetch(pix[1] + off, pix[2] - pix[1], 2);
659 660
}

661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676
static inline void apply_obmc(MpegEncContext *s,
                              uint8_t *dest_y,
                              uint8_t *dest_cb,
                              uint8_t *dest_cr,
                              uint8_t **ref_picture,
                              op_pixels_func (*pix_op)[4])
{
    LOCAL_ALIGNED_8(int16_t, mv_cache, [4], [4][2]);
    Picture *cur_frame   = &s->current_picture;
    int mb_x = s->mb_x;
    int mb_y = s->mb_y;
    const int xy         = mb_x + mb_y * s->mb_stride;
    const int mot_stride = s->b8_stride;
    const int mot_xy     = mb_x * 2 + mb_y * 2 * mot_stride;
    int mx, my, i;

677
    av_assert2(!s->mb_skipped);
678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741

    AV_COPY32(mv_cache[1][1], cur_frame->motion_val[0][mot_xy]);
    AV_COPY32(mv_cache[1][2], cur_frame->motion_val[0][mot_xy + 1]);

    AV_COPY32(mv_cache[2][1],
              cur_frame->motion_val[0][mot_xy + mot_stride]);
    AV_COPY32(mv_cache[2][2],
              cur_frame->motion_val[0][mot_xy + mot_stride + 1]);

    AV_COPY32(mv_cache[3][1],
              cur_frame->motion_val[0][mot_xy + mot_stride]);
    AV_COPY32(mv_cache[3][2],
              cur_frame->motion_val[0][mot_xy + mot_stride + 1]);

    if (mb_y == 0 || IS_INTRA(cur_frame->mb_type[xy - s->mb_stride])) {
        AV_COPY32(mv_cache[0][1], mv_cache[1][1]);
        AV_COPY32(mv_cache[0][2], mv_cache[1][2]);
    } else {
        AV_COPY32(mv_cache[0][1],
                  cur_frame->motion_val[0][mot_xy - mot_stride]);
        AV_COPY32(mv_cache[0][2],
                  cur_frame->motion_val[0][mot_xy - mot_stride + 1]);
    }

    if (mb_x == 0 || IS_INTRA(cur_frame->mb_type[xy - 1])) {
        AV_COPY32(mv_cache[1][0], mv_cache[1][1]);
        AV_COPY32(mv_cache[2][0], mv_cache[2][1]);
    } else {
        AV_COPY32(mv_cache[1][0], cur_frame->motion_val[0][mot_xy - 1]);
        AV_COPY32(mv_cache[2][0],
                  cur_frame->motion_val[0][mot_xy - 1 + mot_stride]);
    }

    if (mb_x + 1 >= s->mb_width || IS_INTRA(cur_frame->mb_type[xy + 1])) {
        AV_COPY32(mv_cache[1][3], mv_cache[1][2]);
        AV_COPY32(mv_cache[2][3], mv_cache[2][2]);
    } else {
        AV_COPY32(mv_cache[1][3], cur_frame->motion_val[0][mot_xy + 2]);
        AV_COPY32(mv_cache[2][3],
                  cur_frame->motion_val[0][mot_xy + 2 + mot_stride]);
    }

    mx = 0;
    my = 0;
    for (i = 0; i < 4; i++) {
        const int x      = (i & 1) + 1;
        const int y      = (i >> 1) + 1;
        int16_t mv[5][2] = {
            { mv_cache[y][x][0],     mv_cache[y][x][1]         },
            { mv_cache[y - 1][x][0], mv_cache[y - 1][x][1]     },
            { mv_cache[y][x - 1][0], mv_cache[y][x - 1][1]     },
            { mv_cache[y][x + 1][0], mv_cache[y][x + 1][1]     },
            { mv_cache[y + 1][x][0], mv_cache[y + 1][x][1]     }
        };
        // FIXME cleanup
        obmc_motion(s, dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize,
                    ref_picture[0],
                    mb_x * 16 + (i & 1) * 8, mb_y * 16 + (i >> 1) * 8,
                    pix_op[1],
                    mv);

        mx += mv[0][0];
        my += mv[0][1];
    }
742
    if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY))
743 744 745 746 747
        chroma_4mv_motion(s, dest_cb, dest_cr,
                          ref_picture, pix_op[1],
                          mx, my);
}

748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782
static inline void apply_8x8(MpegEncContext *s,
                             uint8_t *dest_y,
                             uint8_t *dest_cb,
                             uint8_t *dest_cr,
                             int dir,
                             uint8_t **ref_picture,
                             qpel_mc_func (*qpix_op)[16],
                             op_pixels_func (*pix_op)[4])
{
    int dxy, mx, my, src_x, src_y;
    int i;
    int mb_x = s->mb_x;
    int mb_y = s->mb_y;
    uint8_t *ptr, *dest;

    mx = 0;
    my = 0;
    if (s->quarter_sample) {
        for (i = 0; i < 4; i++) {
            int motion_x = s->mv[dir][i][0];
            int motion_y = s->mv[dir][i][1];

            dxy   = ((motion_y & 3) << 2) | (motion_x & 3);
            src_x = mb_x * 16 + (motion_x >> 2) + (i & 1) * 8;
            src_y = mb_y * 16 + (motion_y >> 2) + (i >> 1) * 8;

            /* WARNING: do no forget half pels */
            src_x = av_clip(src_x, -16, s->width);
            if (src_x == s->width)
                dxy &= ~3;
            src_y = av_clip(src_y, -16, s->height);
            if (src_y == s->height)
                dxy &= ~12;

            ptr = ref_picture[0] + (src_y * s->linesize) + (src_x);
783 784
            if ((unsigned)src_x >= FFMAX(s->h_edge_pos - (motion_x & 3) - 7, 0) ||
                (unsigned)src_y >= FFMAX(s->v_edge_pos - (motion_y & 3) - 7, 0)) {
785
                s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
786 787 788 789 790
                                         s->linesize, s->linesize,
                                         9, 9,
                                         src_x, src_y,
                                         s->h_edge_pos,
                                         s->v_edge_pos);
791
                ptr = s->sc.edge_emu_buffer;
792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814
            }
            dest = dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize;
            qpix_op[1][dxy](dest, ptr, s->linesize);

            mx += s->mv[dir][i][0] / 2;
            my += s->mv[dir][i][1] / 2;
        }
    } else {
        for (i = 0; i < 4; i++) {
            hpel_motion(s,
                        dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize,
                        ref_picture[0],
                        mb_x * 16 + (i & 1) * 8,
                        mb_y * 16 + (i >> 1) * 8,
                        pix_op[1],
                        s->mv[dir][i][0],
                        s->mv[dir][i][1]);

            mx += s->mv[dir][i][0];
            my += s->mv[dir][i][1];
        }
    }

815
    if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY))
816 817 818 819
        chroma_4mv_motion(s, dest_cb, dest_cr,
                          ref_picture, pix_op[1], mx, my);
}

820 821 822 823 824 825 826 827
/**
 * motion compensation of a single macroblock
 * @param s context
 * @param dest_y luma destination pointer
 * @param dest_cb chroma cb/u destination pointer
 * @param dest_cr chroma cr/v destination pointer
 * @param dir direction (0->forward, 1->backward)
 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
828 829
 * @param pix_op halfpel motion compensation function (average or put normally)
 * @param qpix_op qpel motion compensation function (average or put normally)
830 831
 * the motion vectors are taken from s->mv and the MV type from s->mv_type
 */
832
static av_always_inline void mpv_motion_internal(MpegEncContext *s,
833 834 835 836 837 838 839 840
                                                 uint8_t *dest_y,
                                                 uint8_t *dest_cb,
                                                 uint8_t *dest_cr,
                                                 int dir,
                                                 uint8_t **ref_picture,
                                                 op_pixels_func (*pix_op)[4],
                                                 qpel_mc_func (*qpix_op)[16],
                                                 int is_mpeg12)
841
{
842 843
    int i;
    int mb_y = s->mb_y;
844 845 846

    prefetch_motion(s, ref_picture, dir);

847
    if (!is_mpeg12 && s->obmc && s->pict_type != AV_PICTURE_TYPE_B) {
848
        apply_obmc(s, dest_y, dest_cb, dest_cr, ref_picture, pix_op);
849 850 851
        return;
    }

852
    switch (s->mv_type) {
853
    case MV_TYPE_16X16:
854 855
        if (s->mcsel) {
            if (s->real_sprite_warping_points == 1) {
856 857
                gmc1_motion(s, dest_y, dest_cb, dest_cr,
                            ref_picture);
858
            } else {
859
                gmc_motion(s, dest_y, dest_cb, dest_cr,
860
                           ref_picture);
861
            }
862
        } else if (!is_mpeg12 && s->quarter_sample) {
863 864 865 866
            qpel_motion(s, dest_y, dest_cb, dest_cr,
                        0, 0, 0,
                        ref_picture, pix_op, qpix_op,
                        s->mv[dir][0][0], s->mv[dir][0][1], 16);
867
        } else if (!is_mpeg12 && (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) &&
868
                   s->mspel && s->codec_id == AV_CODEC_ID_WMV2) {
869
            ff_mspel_motion(s, dest_y, dest_cb, dest_cr,
870 871 872
                            ref_picture, pix_op,
                            s->mv[dir][0][0], s->mv[dir][0][1], 16);
        } else {
873
            mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
874
                        ref_picture, pix_op,
875
                        s->mv[dir][0][0], s->mv[dir][0][1], 16, 0, mb_y);
876 877 878
        }
        break;
    case MV_TYPE_8X8:
879 880 881
        if (!is_mpeg12)
            apply_8x8(s, dest_y, dest_cb, dest_cr,
                      dir, ref_picture, qpix_op, pix_op);
882 883 884
        break;
    case MV_TYPE_FIELD:
        if (s->picture_structure == PICT_FRAME) {
885 886
            if (!is_mpeg12 && s->quarter_sample) {
                for (i = 0; i < 2; i++)
887 888 889 890
                    qpel_motion(s, dest_y, dest_cb, dest_cr,
                                1, i, s->field_select[dir][i],
                                ref_picture, pix_op, qpix_op,
                                s->mv[dir][i][0], s->mv[dir][i][1], 8);
891
            } else {
892
                /* top field */
893 894 895 896
                mpeg_motion_field(s, dest_y, dest_cb, dest_cr,
                                  0, s->field_select[dir][0],
                                  ref_picture, pix_op,
                                  s->mv[dir][0][0], s->mv[dir][0][1], 8, mb_y);
897
                /* bottom field */
898 899 900 901
                mpeg_motion_field(s, dest_y, dest_cb, dest_cr,
                                  1, s->field_select[dir][1],
                                  ref_picture, pix_op,
                                  s->mv[dir][1][0], s->mv[dir][1][1], 8, mb_y);
902 903
            }
        } else {
904 905
            if (   s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != AV_PICTURE_TYPE_B && !s->first_field
                || !ref_picture[0]) {
906
                ref_picture = s->current_picture_ptr->f->data;
907 908 909
            }

            mpeg_motion(s, dest_y, dest_cb, dest_cr,
910
                        s->field_select[dir][0],
911
                        ref_picture, pix_op,
912
                        s->mv[dir][0][0], s->mv[dir][0][1], 16, 0, mb_y >> 1);
913 914 915
        }
        break;
    case MV_TYPE_16X8:
916 917
        for (i = 0; i < 2; i++) {
            uint8_t **ref2picture;
918

919 920
            if ((s->picture_structure == s->field_select[dir][i] + 1
                || s->pict_type == AV_PICTURE_TYPE_B || s->first_field) && ref_picture[0]) {
921 922
                ref2picture = ref_picture;
            } else {
923
                ref2picture = s->current_picture_ptr->f->data;
924 925 926
            }

            mpeg_motion(s, dest_y, dest_cb, dest_cr,
927
                        s->field_select[dir][i],
928
                        ref2picture, pix_op,
929 930
                        s->mv[dir][i][0], s->mv[dir][i][1],
                        8, 1, (mb_y & ~1) + i);
931

932 933 934
            dest_y  += 16 * s->linesize;
            dest_cb += (16 >> s->chroma_y_shift) * s->uvlinesize;
            dest_cr += (16 >> s->chroma_y_shift) * s->uvlinesize;
935 936 937
        }
        break;
    case MV_TYPE_DMV:
938 939
        if (s->picture_structure == PICT_FRAME) {
            for (i = 0; i < 2; i++) {
940
                int j;
941
                for (j = 0; j < 2; j++)
942
                    mpeg_motion_field(s, dest_y, dest_cb, dest_cr,
943 944 945
                                      j, j ^ i, ref_picture, pix_op,
                                      s->mv[dir][2 * i + j][0],
                                      s->mv[dir][2 * i + j][1], 8, mb_y);
946
                pix_op = s->hdsp.avg_pixels_tab;
947
            }
948
        } else {
949
            if (!ref_picture[0]) {
950
                ref_picture = s->current_picture_ptr->f->data;
951
            }
952
            for (i = 0; i < 2; i++) {
953
                mpeg_motion(s, dest_y, dest_cb, dest_cr,
954
                            s->picture_structure != i + 1,
955
                            ref_picture, pix_op,
956
                            s->mv[dir][2 * i][0], s->mv[dir][2 * i][1],
957
                            16, 0, mb_y >> 1);
958 959

                // after put we make avg of the same block
960
                pix_op = s->hdsp.avg_pixels_tab;
961

962 963 964
                /* opposite parity is always in the same frame if this is
                 * second field */
                if (!s->first_field) {
965
                    ref_picture = s->current_picture_ptr->f->data;
966 967 968
                }
            }
        }
969
        break;
970
    default: av_assert2(0);
971 972 973
    }
}

974
void ff_mpv_motion(MpegEncContext *s,
975 976 977 978 979
                   uint8_t *dest_y, uint8_t *dest_cb,
                   uint8_t *dest_cr, int dir,
                   uint8_t **ref_picture,
                   op_pixels_func (*pix_op)[4],
                   qpel_mc_func (*qpix_op)[16])
Keiji Costantini's avatar
Keiji Costantini committed
980
{
981
#if !CONFIG_SMALL
982
    if (s->out_format == FMT_MPEG1)
983
        mpv_motion_internal(s, dest_y, dest_cb, dest_cr, dir,
Keiji Costantini's avatar
Keiji Costantini committed
984 985 986
                            ref_picture, pix_op, qpix_op, 1);
    else
#endif
987
        mpv_motion_internal(s, dest_y, dest_cb, dest_cr, dir,
Keiji Costantini's avatar
Keiji Costantini committed
988 989
                            ref_picture, pix_op, qpix_op, 0);
}