dxa.c 12.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
/*
 * Feeble Files/ScummVM DXA decoder
 * Copyright (c) 2007 Konstantin Shishkov
 *
 * This file is part of FFmpeg.
 *
 * FFmpeg is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2.1 of the License, or (at your option) any later version.
 *
 * FFmpeg is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
 * License along with FFmpeg; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 */

/**
23
 * @file
24 25 26 27 28 29
 * DXA Video decoder
 */

#include <stdio.h>
#include <stdlib.h>

30
#include "libavutil/common.h"
31
#include "libavutil/intreadwrite.h"
32
#include "bytestream.h"
33
#include "avcodec.h"
34
#include "internal.h"
35 36 37 38 39 40 41

#include <zlib.h>

/*
 * Decoder context
 */
typedef struct DxaDecContext {
42
    AVFrame *prev;
43 44

    int dsize;
45
#define DECOMP_BUF_PADDING 16
46 47 48 49 50 51 52
    uint8_t *decomp_buf;
    uint32_t pal[256];
} DxaDecContext;

static const int shift1[6] = { 0, 8, 8, 8, 4, 4 };
static const int shift2[6] = { 0, 0, 8, 4, 0, 4 };

53
static int decode_13(AVCodecContext *avctx, DxaDecContext *c, uint8_t* dst,
54
                     int stride, uint8_t *src, int srcsize, uint8_t *ref)
55 56
{
    uint8_t *code, *data, *mv, *msk, *tmp, *tmp2;
57
    uint8_t *src_end = src + srcsize;
58 59 60 61
    int i, j, k;
    int type, x, y, d, d2;
    uint32_t mask;

62 63 64
    if (12ULL  + ((avctx->width * avctx->height) >> 4) + AV_RB32(src + 0) + AV_RB32(src + 4) > srcsize)
        return AVERROR_INVALIDDATA;

65 66 67 68 69 70 71
    code = src  + 12;
    data = code + ((avctx->width * avctx->height) >> 4);
    mv   = data + AV_RB32(src + 0);
    msk  = mv   + AV_RB32(src + 4);

    for(j = 0; j < avctx->height; j += 4){
        for(i = 0; i < avctx->width; i += 4){
72 73
            if (data > src_end || mv > src_end || msk > src_end)
                return AVERROR_INVALIDDATA;
74 75 76 77 78 79 80
            tmp  = dst + i;
            tmp2 = ref + i;
            type = *code++;
            switch(type){
            case 4: // motion compensation
                x = (*mv) >> 4;    if(x & 8) x = 8 - x;
                y = (*mv++) & 0xF; if(y & 8) y = 8 - y;
81 82 83 84 85
                if (i < -x || avctx->width  - i - 4 < x ||
                    j < -y || avctx->height - j - 4 < y) {
                    av_log(avctx, AV_LOG_ERROR, "MV %d %d out of bounds\n", x,y);
                    return AVERROR_INVALIDDATA;
                }
86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142
                tmp2 += x + y*stride;
            case 0: // skip
            case 5: // skip in method 12
                for(y = 0; y < 4; y++){
                    memcpy(tmp, tmp2, 4);
                    tmp  += stride;
                    tmp2 += stride;
                }
                break;
            case 1:  // masked change
            case 10: // masked change with only half of pixels changed
            case 11: // cases 10-15 are for method 12 only
            case 12:
            case 13:
            case 14:
            case 15:
                if(type == 1){
                    mask = AV_RB16(msk);
                    msk += 2;
                }else{
                    type -= 10;
                    mask = ((msk[0] & 0xF0) << shift1[type]) | ((msk[0] & 0xF) << shift2[type]);
                    msk++;
                }
                for(y = 0; y < 4; y++){
                    for(x = 0; x < 4; x++){
                        tmp[x] = (mask & 0x8000) ? *data++ : tmp2[x];
                        mask <<= 1;
                    }
                    tmp  += stride;
                    tmp2 += stride;
                }
                break;
            case 2: // fill block
                for(y = 0; y < 4; y++){
                    memset(tmp, data[0], 4);
                    tmp += stride;
                }
                data++;
                break;
            case 3: // raw block
                for(y = 0; y < 4; y++){
                    memcpy(tmp, data, 4);
                    data += 4;
                    tmp  += stride;
                }
                break;
            case 8: // subblocks - method 13 only
                mask = *msk++;
                for(k = 0; k < 4; k++){
                    d  = ((k & 1) << 1) + ((k & 2) * stride);
                    d2 = ((k & 1) << 1) + ((k & 2) * stride);
                    tmp2 = ref + i + d2;
                    switch(mask & 0xC0){
                    case 0x80: // motion compensation
                        x = (*mv) >> 4;    if(x & 8) x = 8 - x;
                        y = (*mv++) & 0xF; if(y & 8) y = 8 - y;
143 144 145 146 147
                        if (i + 2*(k & 1) < -x || avctx->width  - i - 2*(k & 1) - 2 < x ||
                            j +   (k & 2) < -y || avctx->height - j -   (k & 2) - 2 < y) {
                            av_log(avctx, AV_LOG_ERROR, "MV %d %d out of bounds\n", x,y);
                            return AVERROR_INVALIDDATA;
                        }
148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200
                        tmp2 += x + y*stride;
                    case 0x00: // skip
                        tmp[d + 0         ] = tmp2[0];
                        tmp[d + 1         ] = tmp2[1];
                        tmp[d + 0 + stride] = tmp2[0 + stride];
                        tmp[d + 1 + stride] = tmp2[1 + stride];
                        break;
                    case 0x40: // fill
                        tmp[d + 0         ] = data[0];
                        tmp[d + 1         ] = data[0];
                        tmp[d + 0 + stride] = data[0];
                        tmp[d + 1 + stride] = data[0];
                        data++;
                        break;
                    case 0xC0: // raw
                        tmp[d + 0         ] = *data++;
                        tmp[d + 1         ] = *data++;
                        tmp[d + 0 + stride] = *data++;
                        tmp[d + 1 + stride] = *data++;
                        break;
                    }
                    mask <<= 2;
                }
                break;
            case 32: // vector quantization - 2 colors
                mask = AV_RB16(msk);
                msk += 2;
                for(y = 0; y < 4; y++){
                    for(x = 0; x < 4; x++){
                        tmp[x] = data[mask & 1];
                        mask >>= 1;
                    }
                    tmp  += stride;
                    tmp2 += stride;
                }
                data += 2;
                break;
            case 33: // vector quantization - 3 or 4 colors
            case 34:
                mask = AV_RB32(msk);
                msk += 4;
                for(y = 0; y < 4; y++){
                    for(x = 0; x < 4; x++){
                        tmp[x] = data[mask & 3];
                        mask >>= 2;
                    }
                    tmp  += stride;
                    tmp2 += stride;
                }
                data += type - 30;
                break;
            default:
                av_log(avctx, AV_LOG_ERROR, "Unknown opcode %d\n", type);
201
                return AVERROR_INVALIDDATA;
202 203 204 205 206 207 208 209
            }
        }
        dst += stride * 4;
        ref += stride * 4;
    }
    return 0;
}

210
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
211
{
212
    AVFrame *frame = data;
213
    DxaDecContext * const c = avctx->priv_data;
214 215
    uint8_t *outptr, *srcptr, *tmpptr;
    unsigned long dsize;
216
    int i, j, compr, ret;
217 218
    int stride;
    int pc = 0;
219
    GetByteContext gb;
220

221
    bytestream2_init(&gb, avpkt->data, avpkt->size);
222

223 224 225
    /* make the palette available on the way out */
    if (bytestream2_peek_le32(&gb) == MKTAG('C','M','A','P')) {
        bytestream2_skip(&gb, 4);
226
        for(i = 0; i < 256; i++){
227
            c->pal[i] = 0xFFU << 24 | bytestream2_get_be24(&gb);
228 229 230 231
        }
        pc = 1;
    }

232
    if ((ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF)) < 0)
233
        return ret;
234 235
    memcpy(frame->data[1], c->pal, AVPALETTE_SIZE);
    frame->palette_has_changed = pc;
236

237
    outptr = frame->data[0];
238
    srcptr = c->decomp_buf;
239
    tmpptr = c->prev->data[0];
240
    stride = frame->linesize[0];
241

242
    if (bytestream2_get_le32(&gb) == MKTAG('N','U','L','L'))
243 244
        compr = -1;
    else
245
        compr = bytestream2_get_byte(&gb);
246 247

    dsize = c->dsize;
248 249 250 251 252
    if (compr != 4 && compr != -1) {
        bytestream2_skip(&gb, 4);
        if (uncompress(c->decomp_buf, &dsize, avpkt->data + bytestream2_tell(&gb),
                       bytestream2_get_bytes_left(&gb)) != Z_OK) {
            av_log(avctx, AV_LOG_ERROR, "Uncompress failed!\n");
253
            return AVERROR_UNKNOWN;
254
        }
255
        memset(c->decomp_buf + dsize, 0, DECOMP_BUF_PADDING);
256
    }
257 258 259 260

    if (avctx->debug & FF_DEBUG_PICT_INFO)
        av_log(avctx, AV_LOG_DEBUG, "compr:%2d, dsize:%d\n", compr, (int)dsize);

261 262
    switch(compr){
    case -1:
263 264
        frame->key_frame = 0;
        frame->pict_type = AV_PICTURE_TYPE_P;
265 266
        if (c->prev->data[0])
            memcpy(frame->data[0], c->prev->data[0], frame->linesize[0] * avctx->height);
267
        else{ // Should happen only when first frame is 'NULL'
268 269 270
            memset(frame->data[0], 0, frame->linesize[0] * avctx->height);
            frame->key_frame = 1;
            frame->pict_type = AV_PICTURE_TYPE_I;
271 272 273 274
        }
        break;
    case 2:
    case 4:
275 276 277 278 279 280 281 282 283
        frame->key_frame = 1;
        frame->pict_type = AV_PICTURE_TYPE_I;
        for (j = 0; j < avctx->height; j++) {
                memcpy(outptr, srcptr, avctx->width);
            outptr += stride;
            srcptr += avctx->width;
        }
        break;
    case 3:
284
    case 5:
285 286
        if (!tmpptr) {
            av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
287
            if (!(avctx->flags2 & AV_CODEC_FLAG2_SHOW_ALL))
288
                return AVERROR_INVALIDDATA;
289
        }
290 291 292
        frame->key_frame = 0;
        frame->pict_type = AV_PICTURE_TYPE_P;
        for (j = 0; j < avctx->height; j++) {
293
            if(tmpptr){
294 295 296 297 298 299 300 301 302 303 304
                for(i = 0; i < avctx->width; i++)
                    outptr[i] = srcptr[i] ^ tmpptr[i];
                tmpptr += stride;
            }else
                memcpy(outptr, srcptr, avctx->width);
            outptr += stride;
            srcptr += avctx->width;
        }
        break;
    case 12: // ScummVM coding
    case 13:
305 306
        frame->key_frame = 0;
        frame->pict_type = AV_PICTURE_TYPE_P;
307
        if (!c->prev->data[0]) {
308 309 310
            av_log(avctx, AV_LOG_ERROR, "Missing reference frame\n");
            return AVERROR_INVALIDDATA;
        }
311
        decode_13(avctx, c, frame->data[0], frame->linesize[0], srcptr, dsize, c->prev->data[0]);
312 313
        break;
    default:
314
        av_log(avctx, AV_LOG_ERROR, "Unknown/unsupported compression type %d\n", compr);
315
        return AVERROR_INVALIDDATA;
316 317
    }

318 319
    av_frame_unref(c->prev);
    if ((ret = av_frame_ref(c->prev, frame)) < 0)
320
        return ret;
321

322
    *got_frame = 1;
323 324

    /* always report that the buffer was completely consumed */
325
    return avpkt->size;
326 327
}

328
static av_cold int decode_init(AVCodecContext *avctx)
329
{
330
    DxaDecContext * const c = avctx->priv_data;
331

332 333 334 335 336
    if (avctx->width%4 || avctx->height%4) {
        avpriv_request_sample(avctx, "dimensions are not a multiple of 4");
        return AVERROR_INVALIDDATA;
    }

337 338 339
    c->prev = av_frame_alloc();
    if (!c->prev)
        return AVERROR(ENOMEM);
340

341
    avctx->pix_fmt = AV_PIX_FMT_PAL8;
342 343

    c->dsize = avctx->width * avctx->height * 2;
344
    c->decomp_buf = av_malloc(c->dsize + DECOMP_BUF_PADDING);
345
    if (!c->decomp_buf) {
346
        av_frame_free(&c->prev);
347
        av_log(avctx, AV_LOG_ERROR, "Can't allocate decompression buffer.\n");
348
        return AVERROR(ENOMEM);
349 350 351 352 353
    }

    return 0;
}

354
static av_cold int decode_end(AVCodecContext *avctx)
355
{
356
    DxaDecContext * const c = avctx->priv_data;
357 358

    av_freep(&c->decomp_buf);
359
    av_frame_free(&c->prev);
360 361 362 363

    return 0;
}

364
AVCodec ff_dxa_decoder = {
365
    .name           = "dxa",
366
    .long_name      = NULL_IF_CONFIG_SMALL("Feeble Files/ScummVM DXA"),
367
    .type           = AVMEDIA_TYPE_VIDEO,
368
    .id             = AV_CODEC_ID_DXA,
369 370 371 372
    .priv_data_size = sizeof(DxaDecContext),
    .init           = decode_init,
    .close          = decode_end,
    .decode         = decode_frame,
373
    .capabilities   = AV_CODEC_CAP_DR1,
374
};