Commit 61904467 authored by Michael Niedermayer's avatar Michael Niedermayer

Merge commit 'e83c1e2d'

* commit 'e83c1e2d':
  avs: return meaningful error codes.
  aura: return meaningful error codes.
  asvdec: return meaningful error codes.
  ansi: return a meaningful error code
  anm: return meaningful error codes
  aasc: return meaningful error codes.
  8bps: return meaningful error codes.
  4xm: operate with pointers to AVFrames instead of whole structs.
  4xm: eliminate a pointless indirection

Conflicts:
	libavcodec/4xm.c
	libavcodec/aasc.c
	libavcodec/anm.c
Merged-by: 's avatarMichael Niedermayer <michaelni@gmx.at>
parents a2aeaff4 e83c1e2d
...@@ -130,7 +130,7 @@ typedef struct CFrameBuffer { ...@@ -130,7 +130,7 @@ typedef struct CFrameBuffer {
typedef struct FourXContext { typedef struct FourXContext {
AVCodecContext *avctx; AVCodecContext *avctx;
DSPContext dsp; DSPContext dsp;
AVFrame current_picture, last_picture; AVFrame *current_picture, *last_picture;
GetBitContext pre_gb; ///< ac/dc prefix GetBitContext pre_gb; ///< ac/dc prefix
GetBitContext gb; GetBitContext gb;
GetByteContext g; GetByteContext g;
...@@ -261,9 +261,9 @@ static void init_mv(FourXContext *f) ...@@ -261,9 +261,9 @@ static void init_mv(FourXContext *f)
for (i = 0; i < 256; i++) { for (i = 0; i < 256; i++) {
if (f->version > 1) if (f->version > 1)
f->mv[i] = mv[i][0] + mv[i][1] * f->current_picture.linesize[0] / 2; f->mv[i] = mv[i][0] + mv[i][1] * f->current_picture->linesize[0] / 2;
else else
f->mv[i] = (i & 15) - 8 + ((i >> 4) - 8) * f->current_picture.linesize[0] / 2; f->mv[i] = (i & 15) - 8 + ((i >> 4) - 8) * f->current_picture->linesize[0] / 2;
} }
} }
...@@ -340,7 +340,7 @@ static void decode_p_block(FourXContext *f, uint16_t *dst, uint16_t *src, ...@@ -340,7 +340,7 @@ static void decode_p_block(FourXContext *f, uint16_t *dst, uint16_t *src,
int code = get_vlc2(&f->gb, int code = get_vlc2(&f->gb,
block_type_vlc[1 - (f->version > 1)][index].table, block_type_vlc[1 - (f->version > 1)][index].table,
BLOCK_TYPE_VLC_BITS, 1); BLOCK_TYPE_VLC_BITS, 1);
uint16_t *start = (uint16_t *)f->last_picture.data[0]; uint16_t *start = (uint16_t *)f->last_picture->data[0];
uint16_t *end = start + stride * (f->avctx->height - h + 1) - (1 << log2w); uint16_t *end = start + stride * (f->avctx->height - h + 1) - (1 << log2w);
av_assert2(code >= 0 && code <= 6); av_assert2(code >= 0 && code <= 6);
...@@ -409,9 +409,9 @@ static int decode_p_frame(FourXContext *f, const uint8_t *buf, int length) ...@@ -409,9 +409,9 @@ static int decode_p_frame(FourXContext *f, const uint8_t *buf, int length)
int x, y; int x, y;
const int width = f->avctx->width; const int width = f->avctx->width;
const int height = f->avctx->height; const int height = f->avctx->height;
uint16_t *src = (uint16_t *)f->last_picture.data[0]; uint16_t *src = (uint16_t *)f->last_picture->data[0];
uint16_t *dst = (uint16_t *)f->current_picture.data[0]; uint16_t *dst = (uint16_t *)f->current_picture->data[0];
const int stride = f->current_picture.linesize[0] >> 1; const int stride = f->current_picture->linesize[0] >> 1;
unsigned int bitstream_size, bytestream_size, wordstream_size, extra, unsigned int bitstream_size, bytestream_size, wordstream_size, extra,
bytestream_offset, wordstream_offset; bytestream_offset, wordstream_offset;
...@@ -522,9 +522,9 @@ static int decode_i_block(FourXContext *f, DCTELEM *block) ...@@ -522,9 +522,9 @@ static int decode_i_block(FourXContext *f, DCTELEM *block)
static inline void idct_put(FourXContext *f, int x, int y) static inline void idct_put(FourXContext *f, int x, int y)
{ {
DCTELEM (*block)[64] = f->block; DCTELEM (*block)[64] = f->block;
int stride = f->current_picture.linesize[0] >> 1; int stride = f->current_picture->linesize[0] >> 1;
int i; int i;
uint16_t *dst = ((uint16_t*)f->current_picture.data[0]) + y * stride + x; uint16_t *dst = ((uint16_t*)f->current_picture->data[0]) + y * stride + x;
for (i = 0; i < 4; i++) { for (i = 0; i < 4; i++) {
block[i][0] += 0x80 * 8 * 8; block[i][0] += 0x80 * 8 * 8;
...@@ -681,8 +681,8 @@ static int decode_i2_frame(FourXContext *f, const uint8_t *buf, int length) ...@@ -681,8 +681,8 @@ static int decode_i2_frame(FourXContext *f, const uint8_t *buf, int length)
const int width = f->avctx->width; const int width = f->avctx->width;
const int height = f->avctx->height; const int height = f->avctx->height;
const int mbs = (FFALIGN(width, 16) >> 4) * (FFALIGN(height, 16) >> 4); const int mbs = (FFALIGN(width, 16) >> 4) * (FFALIGN(height, 16) >> 4);
uint16_t *dst = (uint16_t*)f->current_picture.data[0]; uint16_t *dst = (uint16_t*)f->current_picture->data[0];
const int stride = f->current_picture.linesize[0]>>1; const int stride = f->current_picture->linesize[0]>>1;
const uint8_t *buf_end = buf + length; const uint8_t *buf_end = buf + length;
GetByteContext g3; GetByteContext g3;
...@@ -867,9 +867,9 @@ static int decode_frame(AVCodecContext *avctx, void *data, ...@@ -867,9 +867,9 @@ static int decode_frame(AVCodecContext *avctx, void *data,
frame_size = buf_size - 12; frame_size = buf_size - 12;
} }
FFSWAP(AVFrame, f->current_picture, f->last_picture); FFSWAP(AVFrame*, f->current_picture, f->last_picture);
p = &f->current_picture; p = f->current_picture;
avctx->coded_frame = p; avctx->coded_frame = p;
// alternatively we would have to use our own buffer management // alternatively we would have to use our own buffer management
...@@ -894,14 +894,14 @@ static int decode_frame(AVCodecContext *avctx, void *data, ...@@ -894,14 +894,14 @@ static int decode_frame(AVCodecContext *avctx, void *data,
return ret; return ret;
} }
} else if (frame_4cc == AV_RL32("pfrm") || frame_4cc == AV_RL32("pfr2")) { } else if (frame_4cc == AV_RL32("pfrm") || frame_4cc == AV_RL32("pfr2")) {
if (!f->last_picture.data[0]) { if (!f->last_picture->data[0]) {
f->last_picture.reference = 3; f->last_picture->reference = 3;
if ((ret = ff_get_buffer(avctx, &f->last_picture)) < 0) { if ((ret = ff_get_buffer(avctx, f->last_picture)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }
for (i=0; i<avctx->height; i++) for (i=0; i<avctx->height; i++)
memset(f->last_picture.data[0] + i*f->last_picture.linesize[0], 0, 2*avctx->width); memset(f->last_picture->data[0] + i*f->last_picture->linesize[0], 0, 2*avctx->width);
} }
p->pict_type = AV_PICTURE_TYPE_P; p->pict_type = AV_PICTURE_TYPE_P;
...@@ -927,16 +927,6 @@ static int decode_frame(AVCodecContext *avctx, void *data, ...@@ -927,16 +927,6 @@ static int decode_frame(AVCodecContext *avctx, void *data,
return buf_size; return buf_size;
} }
static av_cold void common_init(AVCodecContext *avctx)
{
FourXContext * const f = avctx->priv_data;
ff_dsputil_init(&f->dsp, avctx);
f->avctx = avctx;
}
static av_cold int decode_init(AVCodecContext *avctx) static av_cold int decode_init(AVCodecContext *avctx)
{ {
FourXContext * const f = avctx->priv_data; FourXContext * const f = avctx->priv_data;
...@@ -953,7 +943,8 @@ static av_cold int decode_init(AVCodecContext *avctx) ...@@ -953,7 +943,8 @@ static av_cold int decode_init(AVCodecContext *avctx)
avcodec_get_frame_defaults(&f->current_picture); avcodec_get_frame_defaults(&f->current_picture);
avcodec_get_frame_defaults(&f->last_picture); avcodec_get_frame_defaults(&f->last_picture);
f->version = AV_RL32(avctx->extradata) >> 16; f->version = AV_RL32(avctx->extradata) >> 16;
common_init(avctx); ff_dsputil_init(&f->dsp, avctx);
f->avctx = avctx;
init_vlcs(f); init_vlcs(f);
if (f->version > 2) if (f->version > 2)
...@@ -961,6 +952,14 @@ static av_cold int decode_init(AVCodecContext *avctx) ...@@ -961,6 +952,14 @@ static av_cold int decode_init(AVCodecContext *avctx)
else else
avctx->pix_fmt = AV_PIX_FMT_BGR555; avctx->pix_fmt = AV_PIX_FMT_BGR555;
f->current_picture = avcodec_alloc_frame();
f->last_picture = avcodec_alloc_frame();
if (!f->current_picture || !f->last_picture) {
avcodec_free_frame(&f->current_picture);
avcodec_free_frame(&f->last_picture);
return AVERROR(ENOMEM);
}
return 0; return 0;
} }
...@@ -977,10 +976,12 @@ static av_cold int decode_end(AVCodecContext *avctx) ...@@ -977,10 +976,12 @@ static av_cold int decode_end(AVCodecContext *avctx)
f->cfrm[i].allocated_size = 0; f->cfrm[i].allocated_size = 0;
} }
ff_free_vlc(&f->pre_vlc); ff_free_vlc(&f->pre_vlc);
if (f->current_picture.data[0]) if (f->current_picture->data[0])
avctx->release_buffer(avctx, &f->current_picture); avctx->release_buffer(avctx, f->current_picture);
if (f->last_picture.data[0]) if (f->last_picture->data[0])
avctx->release_buffer(avctx, &f->last_picture); avctx->release_buffer(avctx, f->last_picture);
avcodec_free_frame(&f->current_picture);
avcodec_free_frame(&f->last_picture);
return 0; return 0;
} }
......
...@@ -68,15 +68,16 @@ static int decode_frame(AVCodecContext *avctx, void *data, ...@@ -68,15 +68,16 @@ static int decode_frame(AVCodecContext *avctx, void *data,
unsigned char count; unsigned char count;
unsigned int planes = c->planes; unsigned int planes = c->planes;
unsigned char *planemap = c->planemap; unsigned char *planemap = c->planemap;
int ret;
if (c->pic.data[0]) if (c->pic.data[0])
avctx->release_buffer(avctx, &c->pic); avctx->release_buffer(avctx, &c->pic);
c->pic.reference = 0; c->pic.reference = 0;
c->pic.buffer_hints = FF_BUFFER_HINTS_VALID; c->pic.buffer_hints = FF_BUFFER_HINTS_VALID;
if (ff_get_buffer(avctx, &c->pic) < 0){ if ((ret = ff_get_buffer(avctx, &c->pic)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1; return ret;
} }
/* Set data pointer after line lengths */ /* Set data pointer after line lengths */
...@@ -96,14 +97,14 @@ static int decode_frame(AVCodecContext *avctx, void *data, ...@@ -96,14 +97,14 @@ static int decode_frame(AVCodecContext *avctx, void *data,
/* Decode a row of this plane */ /* Decode a row of this plane */
while (dlen > 0) { while (dlen > 0) {
if (dp + 1 >= buf + buf_size) if (dp + 1 >= buf + buf_size)
return -1; return AVERROR_INVALIDDATA;
if ((count = *dp++) <= 127) { if ((count = *dp++) <= 127) {
count++; count++;
dlen -= count + 1; dlen -= count + 1;
if (pixptr + count * planes > pixptr_end) if (pixptr + count * planes > pixptr_end)
break; break;
if (dp + count > buf + buf_size) if (dp + count > buf + buf_size)
return -1; return AVERROR_INVALIDDATA;
while (count--) { while (count--) {
*pixptr = *dp++; *pixptr = *dp++;
pixptr += planes; pixptr += planes;
...@@ -181,7 +182,7 @@ static av_cold int decode_init(AVCodecContext *avctx) ...@@ -181,7 +182,7 @@ static av_cold int decode_init(AVCodecContext *avctx)
default: default:
av_log(avctx, AV_LOG_ERROR, "Error: Unsupported color depth: %u.\n", av_log(avctx, AV_LOG_ERROR, "Error: Unsupported color depth: %u.\n",
avctx->bits_per_coded_sample); avctx->bits_per_coded_sample);
return -1; return AVERROR_INVALIDDATA;
} }
return 0; return 0;
......
...@@ -81,7 +81,7 @@ static int aasc_decode_frame(AVCodecContext *avctx, ...@@ -81,7 +81,7 @@ static int aasc_decode_frame(AVCodecContext *avctx,
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size; int buf_size = avpkt->size;
AascContext *s = avctx->priv_data; AascContext *s = avctx->priv_data;
int compr, i, stride, psize; int compr, i, stride, psize, ret;
if (buf_size < 4) { if (buf_size < 4) {
av_log(avctx, AV_LOG_ERROR, "frame too short\n"); av_log(avctx, AV_LOG_ERROR, "frame too short\n");
...@@ -90,9 +90,9 @@ static int aasc_decode_frame(AVCodecContext *avctx, ...@@ -90,9 +90,9 @@ static int aasc_decode_frame(AVCodecContext *avctx,
s->frame.reference = 3; s->frame.reference = 3;
s->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE; s->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE;
if (avctx->reget_buffer(avctx, &s->frame)) { if ((ret = avctx->reget_buffer(avctx, &s->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return -1; return ret;
} }
compr = AV_RL32(buf); compr = AV_RL32(buf);
...@@ -124,7 +124,7 @@ static int aasc_decode_frame(AVCodecContext *avctx, ...@@ -124,7 +124,7 @@ static int aasc_decode_frame(AVCodecContext *avctx,
break; break;
default: default:
av_log(avctx, AV_LOG_ERROR, "Unknown compression type %d\n", compr); av_log(avctx, AV_LOG_ERROR, "Unknown compression type %d\n", compr);
return -1; return AVERROR_INVALIDDATA;
} }
break; break;
default: default:
......
...@@ -114,7 +114,7 @@ static int decode_frame(AVCodecContext *avctx, ...@@ -114,7 +114,7 @@ static int decode_frame(AVCodecContext *avctx,
uint8_t *dst, *dst_end; uint8_t *dst, *dst_end;
int count, ret; int count, ret;
if ((ret = avctx->reget_buffer(avctx, &s->frame)) < 0) { if ((ret = avctx->reget_buffer(avctx, &s->frame)) < 0){
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }
......
...@@ -434,8 +434,8 @@ static int decode_frame(AVCodecContext *avctx, ...@@ -434,8 +434,8 @@ static int decode_frame(AVCodecContext *avctx,
av_log(avctx, AV_LOG_WARNING, "args overflow (%i)\n", s->nb_args); av_log(avctx, AV_LOG_WARNING, "args overflow (%i)\n", s->nb_args);
if (s->nb_args < MAX_NB_ARGS && s->args[s->nb_args] >= 0) if (s->nb_args < MAX_NB_ARGS && s->args[s->nb_args] >= 0)
s->nb_args++; s->nb_args++;
if (execute_code(avctx, buf[0]) < 0) if ((ret = execute_code(avctx, buf[0])) < 0)
return -1; return ret;
s->state = STATE_NORMAL; s->state = STATE_NORMAL;
} }
break; break;
......
...@@ -108,7 +108,7 @@ static inline int asv1_decode_block(ASV1Context *a, DCTELEM block[64]) ...@@ -108,7 +108,7 @@ static inline int asv1_decode_block(ASV1Context *a, DCTELEM block[64])
break; break;
if (ccp < 0 || i >= 10) { if (ccp < 0 || i >= 10) {
av_log(a->avctx, AV_LOG_ERROR, "coded coeff pattern damaged\n"); av_log(a->avctx, AV_LOG_ERROR, "coded coeff pattern damaged\n");
return -1; return AVERROR_INVALIDDATA;
} }
if (ccp & 8) if (ccp & 8)
...@@ -210,15 +210,15 @@ static int decode_frame(AVCodecContext *avctx, ...@@ -210,15 +210,15 @@ static int decode_frame(AVCodecContext *avctx,
int buf_size = avpkt->size; int buf_size = avpkt->size;
AVFrame *picture = data; AVFrame *picture = data;
AVFrame * const p = &a->picture; AVFrame * const p = &a->picture;
int mb_x, mb_y; int mb_x, mb_y, ret;
if (p->data[0]) if (p->data[0])
avctx->release_buffer(avctx, p); avctx->release_buffer(avctx, p);
p->reference = 0; p->reference = 0;
if (ff_get_buffer(avctx, p) < 0) { if ((ret = ff_get_buffer(avctx, p)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1; return ret;
} }
p->pict_type = AV_PICTURE_TYPE_I; p->pict_type = AV_PICTURE_TYPE_I;
p->key_frame = 1; p->key_frame = 1;
...@@ -240,8 +240,8 @@ static int decode_frame(AVCodecContext *avctx, ...@@ -240,8 +240,8 @@ static int decode_frame(AVCodecContext *avctx,
for (mb_y = 0; mb_y < a->mb_height2; mb_y++) { for (mb_y = 0; mb_y < a->mb_height2; mb_y++) {
for (mb_x = 0; mb_x < a->mb_width2; mb_x++) { for (mb_x = 0; mb_x < a->mb_width2; mb_x++) {
if (decode_mb(a, a->block) < 0) if ((ret = decode_mb(a, a->block)) < 0)
return -1; return ret;
idct_put(a, mb_x, mb_y); idct_put(a, mb_x, mb_y);
} }
...@@ -250,8 +250,8 @@ static int decode_frame(AVCodecContext *avctx, ...@@ -250,8 +250,8 @@ static int decode_frame(AVCodecContext *avctx,
if (a->mb_width2 != a->mb_width) { if (a->mb_width2 != a->mb_width) {
mb_x = a->mb_width2; mb_x = a->mb_width2;
for (mb_y = 0; mb_y < a->mb_height2; mb_y++) { for (mb_y = 0; mb_y < a->mb_height2; mb_y++) {
if (decode_mb(a, a->block) < 0) if ((ret = decode_mb(a, a->block)) < 0)
return -1; return ret;
idct_put(a, mb_x, mb_y); idct_put(a, mb_x, mb_y);
} }
...@@ -260,8 +260,8 @@ static int decode_frame(AVCodecContext *avctx, ...@@ -260,8 +260,8 @@ static int decode_frame(AVCodecContext *avctx,
if (a->mb_height2 != a->mb_height) { if (a->mb_height2 != a->mb_height) {
mb_y = a->mb_height2; mb_y = a->mb_height2;
for (mb_x = 0; mb_x < a->mb_width; mb_x++) { for (mb_x = 0; mb_x < a->mb_width; mb_x++) {
if (decode_mb(a, a->block) < 0) if ((ret = decode_mb(a, a->block)) < 0)
return -1; return ret;
idct_put(a, mb_x, mb_y); idct_put(a, mb_x, mb_y);
} }
......
...@@ -39,7 +39,7 @@ static av_cold int aura_decode_init(AVCodecContext *avctx) ...@@ -39,7 +39,7 @@ static av_cold int aura_decode_init(AVCodecContext *avctx)
s->avctx = avctx; s->avctx = avctx;
/* width needs to be divisible by 4 for this codec to work */ /* width needs to be divisible by 4 for this codec to work */
if (avctx->width & 0x3) if (avctx->width & 0x3)
return -1; return AVERROR(EINVAL);
avctx->pix_fmt = AV_PIX_FMT_YUV422P; avctx->pix_fmt = AV_PIX_FMT_YUV422P;
avcodec_get_frame_defaults(&s->frame); avcodec_get_frame_defaults(&s->frame);
...@@ -53,7 +53,7 @@ static int aura_decode_frame(AVCodecContext *avctx, ...@@ -53,7 +53,7 @@ static int aura_decode_frame(AVCodecContext *avctx,
AuraDecodeContext *s = avctx->priv_data; AuraDecodeContext *s = avctx->priv_data;
uint8_t *Y, *U, *V; uint8_t *Y, *U, *V;
uint8_t val; uint8_t val;
int x, y; int x, y, ret;
const uint8_t *buf = pkt->data; const uint8_t *buf = pkt->data;
/* prediction error tables (make it clear that they are signed values) */ /* prediction error tables (make it clear that they are signed values) */
...@@ -62,7 +62,7 @@ static int aura_decode_frame(AVCodecContext *avctx, ...@@ -62,7 +62,7 @@ static int aura_decode_frame(AVCodecContext *avctx,
if (pkt->size != 48 + avctx->height * avctx->width) { if (pkt->size != 48 + avctx->height * avctx->width) {
av_log(avctx, AV_LOG_ERROR, "got a buffer with %d bytes when %d were expected\n", av_log(avctx, AV_LOG_ERROR, "got a buffer with %d bytes when %d were expected\n",
pkt->size, 48 + avctx->height * avctx->width); pkt->size, 48 + avctx->height * avctx->width);
return -1; return AVERROR_INVALIDDATA;
} }
/* pixel data starts 48 bytes in, after 3x16-byte tables */ /* pixel data starts 48 bytes in, after 3x16-byte tables */
...@@ -73,9 +73,9 @@ static int aura_decode_frame(AVCodecContext *avctx, ...@@ -73,9 +73,9 @@ static int aura_decode_frame(AVCodecContext *avctx,
s->frame.buffer_hints = FF_BUFFER_HINTS_VALID; s->frame.buffer_hints = FF_BUFFER_HINTS_VALID;
s->frame.reference = 0; s->frame.reference = 0;
if (ff_get_buffer(avctx, &s->frame) < 0) { if ((ret = ff_get_buffer(avctx, &s->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1; return ret;
} }
Y = s->frame.data[0]; Y = s->frame.data[0];
......
...@@ -54,14 +54,14 @@ avs_decode_frame(AVCodecContext * avctx, ...@@ -54,14 +54,14 @@ avs_decode_frame(AVCodecContext * avctx,
AVFrame *const p = &avs->picture; AVFrame *const p = &avs->picture;
const uint8_t *table, *vect; const uint8_t *table, *vect;
uint8_t *out; uint8_t *out;
int i, j, x, y, stride, vect_w = 3, vect_h = 3; int i, j, x, y, stride, ret, vect_w = 3, vect_h = 3;
AvsVideoSubType sub_type; AvsVideoSubType sub_type;
AvsBlockType type; AvsBlockType type;
GetBitContext change_map = {0}; //init to silence warning GetBitContext change_map = {0}; //init to silence warning
if (avctx->reget_buffer(avctx, p)) { if ((ret = avctx->reget_buffer(avctx, p)) < 0) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return -1; return ret;
} }
p->reference = 3; p->reference = 3;
p->pict_type = AV_PICTURE_TYPE_P; p->pict_type = AV_PICTURE_TYPE_P;
...@@ -96,7 +96,7 @@ avs_decode_frame(AVCodecContext * avctx, ...@@ -96,7 +96,7 @@ avs_decode_frame(AVCodecContext * avctx,
} }
if (type != AVS_VIDEO) if (type != AVS_VIDEO)
return -1; return AVERROR_INVALIDDATA;
switch (sub_type) { switch (sub_type) {
case AVS_I_FRAME: case AVS_I_FRAME:
...@@ -118,7 +118,7 @@ avs_decode_frame(AVCodecContext * avctx, ...@@ -118,7 +118,7 @@ avs_decode_frame(AVCodecContext * avctx,
break; break;
default: default:
return -1; return AVERROR_INVALIDDATA;
} }
if (buf_end - buf < 256 * vect_w * vect_h) if (buf_end - buf < 256 * vect_w * vect_h)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment