pgssubdec.c 20.4 KB
Newer Older
1 2 3 4
/*
 * PGS subtitle decoder
 * Copyright (c) 2009 Stephen Backway
 *
5
 * This file is part of Libav.
6
 *
7
 * Libav is free software; you can redistribute it and/or
8 9 10 11
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2.1 of the License, or (at your option) any later version.
 *
12
 * Libav is distributed in the hope that it will be useful,
13 14 15 16 17
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
18
 * License along with Libav; if not, write to the Free Software
19 20 21 22
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 */

/**
23
 * @file
24 25 26 27 28
 * PGS subtitle decoder
 */

#include "avcodec.h"
#include "bytestream.h"
29
#include "internal.h"
30
#include "mathops.h"
31

32
#include "libavutil/colorspace.h"
33
#include "libavutil/imgutils.h"
34 35

#define RGBA(r,g,b,a) (((a) << 24) | ((r) << 16) | ((g) << 8) | (b))
36 37 38
#define MAX_EPOCH_PALETTES 8   // Max 8 allowed per PGS epoch
#define MAX_EPOCH_OBJECTS  64  // Max 64 allowed per PGS epoch
#define MAX_OBJECT_REFS    2   // Max objects per display set
39 40 41

enum SegmentType {
    PALETTE_SEGMENT      = 0x14,
42
    OBJECT_SEGMENT       = 0x15,
43 44 45 46 47
    PRESENTATION_SEGMENT = 0x16,
    WINDOW_SEGMENT       = 0x17,
    DISPLAY_SEGMENT      = 0x80,
};

48 49 50 51 52 53 54 55 56 57 58 59
typedef struct PGSSubObjectRef {
    int     id;
    int     window_id;
    uint8_t composition_flag;
    int     x;
    int     y;
    int     crop_x;
    int     crop_y;
    int     crop_w;
    int     crop_h;
} PGSSubObjectRef;

60 61
typedef struct PGSSubPresentation {
    int id_number;
62 63 64
    int palette_id;
    int object_count;
    PGSSubObjectRef objects[MAX_OBJECT_REFS];
65
    int64_t pts;
66 67
} PGSSubPresentation;

68 69
typedef struct PGSSubObject {
    int          id;
70 71 72 73
    int          w;
    int          h;
    uint8_t      *rle;
    unsigned int rle_buffer_size, rle_data_len;
74
    unsigned int rle_remaining_len;
75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90
} PGSSubObject;

typedef struct PGSSubObjects {
    int          count;
    PGSSubObject object[MAX_EPOCH_OBJECTS];
} PGSSubObjects;

typedef struct PGSSubPalette {
    int         id;
    uint32_t    clut[256];
} PGSSubPalette;

typedef struct PGSSubPalettes {
    int           count;
    PGSSubPalette palette[MAX_EPOCH_PALETTES];
} PGSSubPalettes;
91 92 93

typedef struct PGSSubContext {
    PGSSubPresentation presentation;
94 95
    PGSSubPalettes     palettes;
    PGSSubObjects      objects;
96 97
} PGSSubContext;

98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133
static void flush_cache(AVCodecContext *avctx)
{
    PGSSubContext *ctx = avctx->priv_data;
    int i;

    for (i = 0; i < ctx->objects.count; i++) {
        av_freep(&ctx->objects.object[i].rle);
        ctx->objects.object[i].rle_buffer_size  = 0;
        ctx->objects.object[i].rle_remaining_len  = 0;
    }
    ctx->objects.count = 0;
    ctx->palettes.count = 0;
}

static PGSSubObject * find_object(int id, PGSSubObjects *objects)
{
    int i;

    for (i = 0; i < objects->count; i++) {
        if (objects->object[i].id == id)
            return &objects->object[i];
    }
    return NULL;
}

static PGSSubPalette * find_palette(int id, PGSSubPalettes *palettes)
{
    int i;

    for (i = 0; i < palettes->count; i++) {
        if (palettes->palette[i].id == id)
            return &palettes->palette[i];
    }
    return NULL;
}

134 135
static av_cold int init_decoder(AVCodecContext *avctx)
{
136
    avctx->pix_fmt = AV_PIX_FMT_PAL8;
137 138 139 140 141 142

    return 0;
}

static av_cold int close_decoder(AVCodecContext *avctx)
{
143
    flush_cache(avctx);
144 145 146 147 148

    return 0;
}

/**
149
 * Decode the RLE data.
150 151 152 153 154 155 156 157
 *
 * The subtitle is stored as an Run Length Encoded image.
 *
 * @param avctx contains the current codec context
 * @param sub pointer to the processed subtitle data
 * @param buf pointer to the RLE data to process
 * @param buf_size size of the RLE data to process
 */
158
static int decode_rle(AVCodecContext *avctx, AVSubtitleRect *rect,
159 160 161 162 163 164 165
                      const uint8_t *buf, unsigned int buf_size)
{
    const uint8_t *rle_bitmap_end;
    int pixel_count, line_count;

    rle_bitmap_end = buf + buf_size;

166
    rect->pict.data[0] = av_malloc(rect->w * rect->h);
167

168 169
    if (!rect->pict.data[0])
        return AVERROR(ENOMEM);
170 171 172 173

    pixel_count = 0;
    line_count  = 0;

174
    while (buf < rle_bitmap_end && line_count < rect->h) {
175 176 177 178 179 180 181 182 183 184 185 186 187 188
        uint8_t flags, color;
        int run;

        color = bytestream_get_byte(&buf);
        run   = 1;

        if (color == 0x00) {
            flags = bytestream_get_byte(&buf);
            run   = flags & 0x3f;
            if (flags & 0x40)
                run = (run << 8) + bytestream_get_byte(&buf);
            color = flags & 0x80 ? bytestream_get_byte(&buf) : 0;
        }

189 190
        if (run > 0 && pixel_count + run <= rect->w * rect->h) {
            memset(rect->pict.data[0] + pixel_count, color, run);
191 192 193 194 195 196
            pixel_count += run;
        } else if (!run) {
            /*
             * New Line. Check if correct pixels decoded, if not display warning
             * and adjust bitmap pointer to correct new line position.
             */
197
            if (pixel_count % rect->w > 0) {
198
                av_log(avctx, AV_LOG_ERROR, "Decoded %d pixels, when line should be %d pixels\n",
199 200 201 202 203
                       pixel_count % rect->w, rect->w);
                if (avctx->err_recognition & AV_EF_EXPLODE) {
                    return AVERROR_INVALIDDATA;
                }
            }
204 205 206 207
            line_count++;
        }
    }

208
    if (pixel_count < rect->w * rect->h) {
209
        av_log(avctx, AV_LOG_ERROR, "Insufficient RLE data for subtitle\n");
210
        return AVERROR_INVALIDDATA;
211 212
    }

213
    ff_dlog(avctx, "Pixel Count = %d, Area = %d\n", pixel_count, rect->w * rect->h);
214 215 216 217 218

    return 0;
}

/**
219
 * Parse the picture segment packet.
220 221 222 223 224 225 226 227
 *
 * The picture segment contains details on the sequence id,
 * width, height and Run Length Encoded (RLE) bitmap data.
 *
 * @param avctx contains the current codec context
 * @param buf pointer to the packet to process
 * @param buf_size size of packet to process
 */
228
static int parse_object_segment(AVCodecContext *avctx,
229 230 231
                                  const uint8_t *buf, int buf_size)
{
    PGSSubContext *ctx = avctx->priv_data;
232
    PGSSubObject *object;
233 234 235

    uint8_t sequence_desc;
    unsigned int rle_bitmap_len, width, height;
236
    int id;
237

238
    if (buf_size <= 4)
239
        return AVERROR_INVALIDDATA;
240 241
    buf_size -= 4;

242 243 244 245 246 247 248 249 250 251 252 253 254
    id = bytestream_get_be16(&buf);
    object = find_object(id, &ctx->objects);
    if (!object) {
        if (ctx->objects.count >= MAX_EPOCH_OBJECTS) {
            av_log(avctx, AV_LOG_ERROR, "Too many objects in epoch\n");
            return AVERROR_INVALIDDATA;
        }
        object = &ctx->objects.object[ctx->objects.count++];
        object->id = id;
    }

    /* skip object version number */
    buf += 1;
255 256 257 258 259

    /* Read the Sequence Description to determine if start of RLE data or appended to previous RLE */
    sequence_desc = bytestream_get_byte(&buf);

    if (!(sequence_desc & 0x80)) {
260
        /* Additional RLE data */
261 262
        if (buf_size > object->rle_remaining_len)
            return AVERROR_INVALIDDATA;
263

264 265 266
        memcpy(object->rle + object->rle_data_len, buf, buf_size);
        object->rle_data_len += buf_size;
        object->rle_remaining_len -= buf_size;
267

268
        return 0;
269 270
    }

271
    if (buf_size <= 7)
272
        return AVERROR_INVALIDDATA;
273 274 275 276
    buf_size -= 7;

    /* Decode rle bitmap length, stored size includes width/height data */
    rle_bitmap_len = bytestream_get_be24(&buf) - 2*2;
277

278 279 280 281 282 283 284
    if (buf_size > rle_bitmap_len) {
        av_log(avctx, AV_LOG_ERROR,
               "Buffer dimension %d larger than the expected RLE data %d\n",
               buf_size, rle_bitmap_len);
        return AVERROR_INVALIDDATA;
    }

285 286 287 288 289
    /* Get bitmap dimensions from data */
    width  = bytestream_get_be16(&buf);
    height = bytestream_get_be16(&buf);

    /* Make sure the bitmap is not too large */
290
    if (avctx->width < width || avctx->height < height) {
291
        av_log(avctx, AV_LOG_ERROR, "Bitmap dimensions larger than video.\n");
292
        return AVERROR_INVALIDDATA;
293 294
    }

295 296
    object->w = width;
    object->h = height;
297

298
    av_fast_malloc(&object->rle, &object->rle_buffer_size, rle_bitmap_len);
299

300 301
    if (!object->rle)
        return AVERROR(ENOMEM);
302

303 304 305
    memcpy(object->rle, buf, buf_size);
    object->rle_data_len = buf_size;
    object->rle_remaining_len = rle_bitmap_len - buf_size;
306 307 308 309 310

    return 0;
}

/**
311
 * Parse the palette segment packet.
312 313 314 315 316 317 318 319
 *
 * The palette segment contains details of the palette,
 * a maximum of 256 colors can be defined.
 *
 * @param avctx contains the current codec context
 * @param buf pointer to the packet to process
 * @param buf_size size of packet to process
 */
320
static int parse_palette_segment(AVCodecContext *avctx,
321 322 323
                                  const uint8_t *buf, int buf_size)
{
    PGSSubContext *ctx = avctx->priv_data;
324
    PGSSubPalette *palette;
325 326

    const uint8_t *buf_end = buf + buf_size;
327
    const uint8_t *cm      = ff_crop_tab + MAX_NEG_CROP;
328 329 330
    int color_id;
    int y, cb, cr, alpha;
    int r, g, b, r_add, g_add, b_add;
331 332 333 334 335 336 337 338 339 340 341 342
    int id;

    id  = bytestream_get_byte(&buf);
    palette = find_palette(id, &ctx->palettes);
    if (!palette) {
        if (ctx->palettes.count >= MAX_EPOCH_PALETTES) {
            av_log(avctx, AV_LOG_ERROR, "Too many palettes in epoch\n");
            return AVERROR_INVALIDDATA;
        }
        palette = &ctx->palettes.palette[ctx->palettes.count++];
        palette->id  = id;
    }
343

344 345
    /* Skip palette version */
    buf += 1;
346 347 348 349 350

    while (buf < buf_end) {
        color_id  = bytestream_get_byte(&buf);
        y         = bytestream_get_byte(&buf);
        cr        = bytestream_get_byte(&buf);
351
        cb        = bytestream_get_byte(&buf);
352 353 354 355 356
        alpha     = bytestream_get_byte(&buf);

        YUV_TO_RGB1(cb, cr);
        YUV_TO_RGB2(r, g, b, y);

357
        ff_dlog(avctx, "Color %d := (%d,%d,%d,%d)\n", color_id, r, g, b, alpha);
358 359

        /* Store color in palette */
360
        palette->clut[color_id] = RGBA(r,g,b,alpha);
361
    }
362
    return 0;
363 364 365
}

/**
366
 * Parse the presentation segment packet.
367 368 369 370 371 372 373 374 375
 *
 * The presentation segment contains details on the video
 * width, video height, x & y subtitle position.
 *
 * @param avctx contains the current codec context
 * @param buf pointer to the packet to process
 * @param buf_size size of packet to process
 * @todo TODO: Implement cropping
 */
376 377 378
static int parse_presentation_segment(AVCodecContext *avctx,
                                      const uint8_t *buf, int buf_size,
                                      int64_t pts)
379 380 381
{
    PGSSubContext *ctx = avctx->priv_data;

382
    int i, state, ret;
383

384
    // Video descriptor
385 386
    int w = bytestream_get_be16(&buf);
    int h = bytestream_get_be16(&buf);
387

388 389
    ctx->presentation.pts = pts;

390
    ff_dlog(avctx, "Video Dimensions %dx%d\n",
391
            w, h);
392 393 394
    ret = ff_set_dimensions(avctx, w, h);
    if (ret < 0)
        return ret;
395

396
    /* Skip 1 bytes of unknown, frame rate */
397 398
    buf++;

399
    // Composition descriptor
400
    ctx->presentation.id_number = bytestream_get_be16(&buf);
Reimar Döffinger's avatar
Reimar Döffinger committed
401
    /*
402 403 404 405 406 407 408
     * state is a 2 bit field that defines pgs epoch boundaries
     * 00 - Normal, previously defined objects and palettes are still valid
     * 01 - Acquisition point, previous objects and palettes can be released
     * 10 - Epoch start, previous objects and palettes can be released
     * 11 - Epoch continue, previous objects and palettes can be released
     *
     * reserved 6 bits discarded
Reimar Döffinger's avatar
Reimar Döffinger committed
409
     */
410 411 412 413
    state = bytestream_get_byte(&buf) >> 6;
    if (state != 0) {
        flush_cache(avctx);
    }
Reimar Döffinger's avatar
Reimar Döffinger committed
414 415

    /*
416
     * skip palette_update_flag (0x80),
Reimar Döffinger's avatar
Reimar Döffinger committed
417
     */
418 419 420 421 422 423 424 425 426 427 428 429
    buf += 1;
    ctx->presentation.palette_id = bytestream_get_byte(&buf);
    ctx->presentation.object_count = bytestream_get_byte(&buf);
    if (ctx->presentation.object_count > MAX_OBJECT_REFS) {
        av_log(avctx, AV_LOG_ERROR,
               "Invalid number of presentation objects %d\n",
               ctx->presentation.object_count);
        ctx->presentation.object_count = 2;
        if (avctx->err_recognition & AV_EF_EXPLODE) {
            return AVERROR_INVALIDDATA;
        }
    }
Reimar Döffinger's avatar
Reimar Döffinger committed
430

431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446
    for (i = 0; i < ctx->presentation.object_count; i++)
    {
        ctx->presentation.objects[i].id = bytestream_get_be16(&buf);
        ctx->presentation.objects[i].window_id = bytestream_get_byte(&buf);
        ctx->presentation.objects[i].composition_flag = bytestream_get_byte(&buf);

        ctx->presentation.objects[i].x = bytestream_get_be16(&buf);
        ctx->presentation.objects[i].y = bytestream_get_be16(&buf);

        // If cropping
        if (ctx->presentation.objects[i].composition_flag & 0x80) {
            ctx->presentation.objects[i].crop_x = bytestream_get_be16(&buf);
            ctx->presentation.objects[i].crop_y = bytestream_get_be16(&buf);
            ctx->presentation.objects[i].crop_w = bytestream_get_be16(&buf);
            ctx->presentation.objects[i].crop_h = bytestream_get_be16(&buf);
        }
Reimar Döffinger's avatar
Reimar Döffinger committed
447

448
        ff_dlog(avctx, "Subtitle Placement x=%d, y=%d\n",
449 450 451 452 453 454 455 456 457 458 459 460 461 462
                ctx->presentation.objects[i].x, ctx->presentation.objects[i].y);

        if (ctx->presentation.objects[i].x > avctx->width ||
            ctx->presentation.objects[i].y > avctx->height) {
            av_log(avctx, AV_LOG_ERROR, "Subtitle out of video bounds. x = %d, y = %d, video width = %d, video height = %d.\n",
                   ctx->presentation.objects[i].x,
                   ctx->presentation.objects[i].y,
                    avctx->width, avctx->height);
            ctx->presentation.objects[i].x = 0;
            ctx->presentation.objects[i].y = 0;
            if (avctx->err_recognition & AV_EF_EXPLODE) {
                return AVERROR_INVALIDDATA;
            }
        }
Reimar Döffinger's avatar
Reimar Döffinger committed
463
    }
464

465
    return 0;
466 467 468
}

/**
469
 * Parse the display segment packet.
470 471 472 473 474 475 476 477 478 479 480 481 482
 *
 * The display segment controls the updating of the display.
 *
 * @param avctx contains the current codec context
 * @param data pointer to the data pertaining the subtitle to display
 * @param buf pointer to the packet to process
 * @param buf_size size of packet to process
 */
static int display_end_segment(AVCodecContext *avctx, void *data,
                               const uint8_t *buf, int buf_size)
{
    AVSubtitle    *sub = data;
    PGSSubContext *ctx = avctx->priv_data;
483 484
    PGSSubPalette *palette;
    int i, ret;
485

486
    memset(sub, 0, sizeof(*sub));
487
    sub->pts = ctx->presentation.pts;
488
    sub->start_display_time = 0;
489 490 491 492
    // There is no explicit end time for PGS subtitles.  The end time
    // is defined by the start of the next sub which may contain no
    // objects (i.e. clears the previous sub)
    sub->end_display_time   = UINT32_MAX;
493 494
    sub->format             = 0;

495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511
    // Blank if last object_count was 0.
    if (!ctx->presentation.object_count)
        return 1;
    sub->rects = av_mallocz(sizeof(*sub->rects) * ctx->presentation.object_count);
    if (!sub->rects) {
        return AVERROR(ENOMEM);
    }
    palette = find_palette(ctx->presentation.palette_id, &ctx->palettes);
    if (!palette) {
        // Missing palette.  Should only happen with damaged streams.
        av_log(avctx, AV_LOG_ERROR, "Invalid palette id %d\n",
               ctx->presentation.palette_id);
        avsubtitle_free(sub);
        return AVERROR_INVALIDDATA;
    }
    for (i = 0; i < ctx->presentation.object_count; i++) {
        PGSSubObject *object;
512

513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571
        sub->rects[i]  = av_mallocz(sizeof(*sub->rects[0]));
        if (!sub->rects[i]) {
            avsubtitle_free(sub);
            return AVERROR(ENOMEM);
        }
        sub->num_rects++;
        sub->rects[i]->type = SUBTITLE_BITMAP;

        /* Process bitmap */
        object = find_object(ctx->presentation.objects[i].id, &ctx->objects);
        if (!object) {
            // Missing object.  Should only happen with damaged streams.
            av_log(avctx, AV_LOG_ERROR, "Invalid object id %d\n",
                   ctx->presentation.objects[i].id);
            if (avctx->err_recognition & AV_EF_EXPLODE) {
                avsubtitle_free(sub);
                return AVERROR_INVALIDDATA;
            }
            // Leaves rect empty with 0 width and height.
            continue;
        }
        if (ctx->presentation.objects[i].composition_flag & 0x40)
            sub->rects[i]->flags |= AV_SUBTITLE_FLAG_FORCED;

        sub->rects[i]->x    = ctx->presentation.objects[i].x;
        sub->rects[i]->y    = ctx->presentation.objects[i].y;
        sub->rects[i]->w    = object->w;
        sub->rects[i]->h    = object->h;

        sub->rects[i]->pict.linesize[0] = object->w;

        if (object->rle) {
            if (object->rle_remaining_len) {
                av_log(avctx, AV_LOG_ERROR, "RLE data length %u is %u bytes shorter than expected\n",
                       object->rle_data_len, object->rle_remaining_len);
                if (avctx->err_recognition & AV_EF_EXPLODE) {
                    avsubtitle_free(sub);
                    return AVERROR_INVALIDDATA;
                }
            }
            ret = decode_rle(avctx, sub->rects[i], object->rle, object->rle_data_len);
            if (ret < 0) {
                if ((avctx->err_recognition & AV_EF_EXPLODE) ||
                    ret == AVERROR(ENOMEM)) {
                    avsubtitle_free(sub);
                    return ret;
                }
                sub->rects[i]->w = 0;
                sub->rects[i]->h = 0;
                continue;
            }
        }
        /* Allocate memory for colors */
        sub->rects[i]->nb_colors    = 256;
        sub->rects[i]->pict.data[1] = av_mallocz(AVPALETTE_SIZE);
        if (!sub->rects[i]->pict.data[1]) {
            avsubtitle_free(sub);
            return AVERROR(ENOMEM);
        }
572

573
        memcpy(sub->rects[i]->pict.data[1], palette->clut, sub->rects[i]->nb_colors * sizeof(uint32_t));
574

575
    }
576 577 578 579 580 581 582 583 584 585 586 587
    return 1;
}

static int decode(AVCodecContext *avctx, void *data, int *data_size,
                  AVPacket *avpkt)
{
    const uint8_t *buf = avpkt->data;
    int buf_size       = avpkt->size;

    const uint8_t *buf_end;
    uint8_t       segment_type;
    int           segment_length;
588
    int i, ret;
589

590
    ff_dlog(avctx, "PGS sub packet:\n");
591 592

    for (i = 0; i < buf_size; i++) {
593
        ff_dlog(avctx, "%02x ", buf[i]);
594
        if (i % 16 == 15)
595
            ff_dlog(avctx, "\n");
596 597 598
    }

    if (i & 15)
599
        ff_dlog(avctx, "\n");
600 601 602 603 604 605 606 607 608 609 610 611 612 613

    *data_size = 0;

    /* Ensure that we have received at a least a segment code and segment length */
    if (buf_size < 3)
        return -1;

    buf_end = buf + buf_size;

    /* Step through buffer to identify segments */
    while (buf < buf_end) {
        segment_type   = bytestream_get_byte(&buf);
        segment_length = bytestream_get_be16(&buf);

614
        ff_dlog(avctx, "Segment Length %d, Segment Type %x\n", segment_length, segment_type);
615 616 617 618

        if (segment_type != DISPLAY_SEGMENT && segment_length > buf_end - buf)
            break;

619
        ret = 0;
620 621
        switch (segment_type) {
        case PALETTE_SEGMENT:
622
            ret = parse_palette_segment(avctx, buf, segment_length);
623
            break;
624 625
        case OBJECT_SEGMENT:
            ret = parse_object_segment(avctx, buf, segment_length);
626 627
            break;
        case PRESENTATION_SEGMENT:
628
            ret = parse_presentation_segment(avctx, buf, segment_length, avpkt->pts);
629 630 631 632
            break;
        case WINDOW_SEGMENT:
            /*
             * Window Segment Structure (No new information provided):
633
             *     2 bytes: Unknown,
634 635 636 637 638 639 640
             *     2 bytes: X position of subtitle,
             *     2 bytes: Y position of subtitle,
             *     2 bytes: Width of subtitle,
             *     2 bytes: Height of subtitle.
             */
            break;
        case DISPLAY_SEGMENT:
641 642 643
            ret = display_end_segment(avctx, data, buf, segment_length);
            if (ret >= 0)
                *data_size = ret;
644 645 646 647
            break;
        default:
            av_log(avctx, AV_LOG_ERROR, "Unknown subtitle segment type 0x%x, length %d\n",
                   segment_type, segment_length);
648
            ret = AVERROR_INVALIDDATA;
649 650
            break;
        }
651 652
        if (ret < 0 && (avctx->err_recognition & AV_EF_EXPLODE))
            return ret;
653 654 655 656 657 658 659

        buf += segment_length;
    }

    return buf_size;
}

660
AVCodec ff_pgssub_decoder = {
661
    .name           = "pgssub",
662
    .long_name      = NULL_IF_CONFIG_SMALL("HDMV Presentation Graphic Stream subtitles"),
663
    .type           = AVMEDIA_TYPE_SUBTITLE,
664
    .id             = AV_CODEC_ID_HDMV_PGS_SUBTITLE,
665 666 667 668
    .priv_data_size = sizeof(PGSSubContext),
    .init           = init_decoder,
    .close          = close_decoder,
    .decode         = decode,
669
};