Commit 9ad477d9 authored by Michael Niedermayer's avatar Michael Niedermayer

Merge commit 'e4155f15'

* commit 'e4155f15':
  eamad: use the AVFrame API properly.
  dpxenc: use the AVFrame API properly.
  bmpenc: use the AVFrame API properly.
  sgienc: use the AVFrame API properly.

Conflicts:
	libavcodec/bmpenc.c
	libavcodec/dpxenc.c
	libavcodec/eamad.c
	libavcodec/sgienc.c
Merged-by: 's avatarMichael Niedermayer <michaelni@gmx.at>
parents 8d193a24 e4155f15
...@@ -60,22 +60,26 @@ static av_cold int bmp_encode_init(AVCodecContext *avctx){ ...@@ -60,22 +60,26 @@ static av_cold int bmp_encode_init(AVCodecContext *avctx){
return AVERROR(EINVAL); return AVERROR(EINVAL);
} }
avctx->coded_frame = av_frame_alloc();
if (!avctx->coded_frame)
return AVERROR(ENOMEM);
return 0; return 0;
} }
static int bmp_encode_frame(AVCodecContext *avctx, AVPacket *pkt, static int bmp_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
const AVFrame *pict, int *got_packet) const AVFrame *pict, int *got_packet)
{ {
const AVFrame * const p = pict;
int n_bytes_image, n_bytes_per_row, n_bytes, i, n, hsize, ret; int n_bytes_image, n_bytes_per_row, n_bytes, i, n, hsize, ret;
const uint32_t *pal = NULL; const uint32_t *pal = NULL;
uint32_t palette256[256]; uint32_t palette256[256];
int pad_bytes_per_row, pal_entries = 0, compression = BMP_RGB; int pad_bytes_per_row, pal_entries = 0, compression = BMP_RGB;
int bit_count = avctx->bits_per_coded_sample; int bit_count = avctx->bits_per_coded_sample;
uint8_t *ptr, *buf; uint8_t *ptr, *buf;
AVFrame * const p = (AVFrame *)pict;
p->pict_type= AV_PICTURE_TYPE_I; avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
p->key_frame= 1; avctx->coded_frame->key_frame = 1;
switch (avctx->pix_fmt) { switch (avctx->pix_fmt) {
case AV_PIX_FMT_RGB444: case AV_PIX_FMT_RGB444:
compression = BMP_BITFIELDS; compression = BMP_BITFIELDS;
...@@ -159,6 +163,12 @@ static int bmp_encode_frame(AVCodecContext *avctx, AVPacket *pkt, ...@@ -159,6 +163,12 @@ static int bmp_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
return 0; return 0;
} }
static av_cold int bmp_encode_close(AVCodecContext *avctx)
{
av_frame_free(&avctx->coded_frame);
return 0;
}
AVCodec ff_bmp_encoder = { AVCodec ff_bmp_encoder = {
.name = "bmp", .name = "bmp",
.long_name = NULL_IF_CONFIG_SMALL("BMP (Windows and OS/2 bitmap)"), .long_name = NULL_IF_CONFIG_SMALL("BMP (Windows and OS/2 bitmap)"),
...@@ -166,6 +176,7 @@ AVCodec ff_bmp_encoder = { ...@@ -166,6 +176,7 @@ AVCodec ff_bmp_encoder = {
.id = AV_CODEC_ID_BMP, .id = AV_CODEC_ID_BMP,
.init = bmp_encode_init, .init = bmp_encode_init,
.encode2 = bmp_encode_frame, .encode2 = bmp_encode_frame,
.close = bmp_encode_close,
.pix_fmts = (const enum AVPixelFormat[]){ .pix_fmts = (const enum AVPixelFormat[]){
AV_PIX_FMT_BGRA, AV_PIX_FMT_BGR24, AV_PIX_FMT_BGRA, AV_PIX_FMT_BGR24,
AV_PIX_FMT_RGB565, AV_PIX_FMT_RGB555, AV_PIX_FMT_RGB444, AV_PIX_FMT_RGB565, AV_PIX_FMT_RGB555, AV_PIX_FMT_RGB444,
......
...@@ -45,7 +45,7 @@ ...@@ -45,7 +45,7 @@
typedef struct MadContext { typedef struct MadContext {
AVCodecContext *avctx; AVCodecContext *avctx;
DSPContext dsp; DSPContext dsp;
AVFrame last_frame; AVFrame *last_frame;
GetBitContext gb; GetBitContext gb;
void *bitstream_buf; void *bitstream_buf;
unsigned int bitstream_buf_size; unsigned int bitstream_buf_size;
...@@ -65,6 +65,11 @@ static av_cold int decode_init(AVCodecContext *avctx) ...@@ -65,6 +65,11 @@ static av_cold int decode_init(AVCodecContext *avctx)
ff_init_scantable_permutation(s->dsp.idct_permutation, FF_NO_IDCT_PERM); ff_init_scantable_permutation(s->dsp.idct_permutation, FF_NO_IDCT_PERM);
ff_init_scantable(s->dsp.idct_permutation, &s->scantable, ff_zigzag_direct); ff_init_scantable(s->dsp.idct_permutation, &s->scantable, ff_zigzag_direct);
ff_mpeg12_init_vlcs(); ff_mpeg12_init_vlcs();
s->last_frame = av_frame_alloc();
if (!s->last_frame)
return AVERROR(ENOMEM);
return 0; return 0;
} }
...@@ -82,22 +87,22 @@ static inline void comp_block(MadContext *t, AVFrame *frame, ...@@ -82,22 +87,22 @@ static inline void comp_block(MadContext *t, AVFrame *frame,
int j, int mv_x, int mv_y, int add) int j, int mv_x, int mv_y, int add)
{ {
if (j < 4) { if (j < 4) {
unsigned offset = (mb_y*16 + ((j&2)<<2) + mv_y)*t->last_frame.linesize[0] + mb_x*16 + ((j&1)<<3) + mv_x; unsigned offset = (mb_y*16 + ((j&2)<<2) + mv_y)*t->last_frame->linesize[0] + mb_x*16 + ((j&1)<<3) + mv_x;
if (offset >= (t->avctx->height - 7) * t->last_frame.linesize[0] - 7) if (offset >= (t->avctx->height - 7) * t->last_frame->linesize[0] - 7)
return; return;
comp(frame->data[0] + (mb_y*16 + ((j&2)<<2))*frame->linesize[0] + mb_x*16 + ((j&1)<<3), comp(frame->data[0] + (mb_y*16 + ((j&2)<<2))*frame->linesize[0] + mb_x*16 + ((j&1)<<3),
frame->linesize[0], frame->linesize[0],
t->last_frame.data[0] + offset, t->last_frame->data[0] + offset,
t->last_frame.linesize[0], add); t->last_frame->linesize[0], add);
} else if (!(t->avctx->flags & CODEC_FLAG_GRAY)) { } else if (!(t->avctx->flags & CODEC_FLAG_GRAY)) {
int index = j - 3; int index = j - 3;
unsigned offset = (mb_y * 8 + (mv_y/2))*t->last_frame.linesize[index] + mb_x * 8 + (mv_x/2); unsigned offset = (mb_y * 8 + (mv_y/2))*t->last_frame->linesize[index] + mb_x * 8 + (mv_x/2);
if (offset >= (t->avctx->height/2 - 7) * t->last_frame.linesize[index] - 7) if (offset >= (t->avctx->height/2 - 7) * t->last_frame->linesize[index] - 7)
return; return;
comp(frame->data[index] + (mb_y*8)*frame->linesize[index] + mb_x * 8, comp(frame->data[index] + (mb_y*8)*frame->linesize[index] + mb_x * 8,
frame->linesize[index], frame->linesize[index],
t->last_frame.data[index] + offset, t->last_frame->data[index] + offset,
t->last_frame.linesize[index], add); t->last_frame->linesize[index], add);
} }
} }
...@@ -205,7 +210,7 @@ static int decode_mb(MadContext *s, AVFrame *frame, int inter) ...@@ -205,7 +210,7 @@ static int decode_mb(MadContext *s, AVFrame *frame, int inter)
for (j=0; j<6; j++) { for (j=0; j<6; j++) {
if (mv_map & (1<<j)) { // mv_x and mv_y are guarded by mv_map if (mv_map & (1<<j)) { // mv_x and mv_y are guarded by mv_map
int add = 2*decode_motion(&s->gb); int add = 2*decode_motion(&s->gb);
if (s->last_frame.data[0]) if (s->last_frame->data[0])
comp_block(s, frame, s->mb_x, s->mb_y, j, mv_x, mv_y, add); comp_block(s, frame, s->mb_x, s->mb_y, j, mv_x, mv_y, add);
} else { } else {
s->dsp.clear_block(s->block); s->dsp.clear_block(s->block);
...@@ -263,7 +268,7 @@ static int decode_frame(AVCodecContext *avctx, ...@@ -263,7 +268,7 @@ static int decode_frame(AVCodecContext *avctx,
} }
if (avctx->width != width || avctx->height != height) { if (avctx->width != width || avctx->height != height) {
av_frame_unref(&s->last_frame); av_frame_unref(s->last_frame);
if((width * height)/2048*7 > buf_end-buf) if((width * height)/2048*7 > buf_end-buf)
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
if ((ret = ff_set_dimensions(avctx, width, height)) < 0) if ((ret = ff_set_dimensions(avctx, width, height)) < 0)
...@@ -273,17 +278,17 @@ static int decode_frame(AVCodecContext *avctx, ...@@ -273,17 +278,17 @@ static int decode_frame(AVCodecContext *avctx,
if ((ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF)) < 0) if ((ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF)) < 0)
return ret; return ret;
if (inter && !s->last_frame.data[0]) { if (inter && !s->last_frame->data[0]) {
av_log(avctx, AV_LOG_WARNING, "Missing reference frame.\n"); av_log(avctx, AV_LOG_WARNING, "Missing reference frame.\n");
ret = ff_get_buffer(avctx, &s->last_frame, AV_GET_BUFFER_FLAG_REF); ret = ff_get_buffer(avctx, s->last_frame, AV_GET_BUFFER_FLAG_REF);
if (ret < 0) if (ret < 0)
return ret; return ret;
memset(s->last_frame.data[0], 0, s->last_frame.height * memset(s->last_frame->data[0], 0, s->last_frame->height *
s->last_frame.linesize[0]); s->last_frame->linesize[0]);
memset(s->last_frame.data[1], 0x80, s->last_frame.height / 2 * memset(s->last_frame->data[1], 0x80, s->last_frame->height / 2 *
s->last_frame.linesize[1]); s->last_frame->linesize[1]);
memset(s->last_frame.data[2], 0x80, s->last_frame.height / 2 * memset(s->last_frame->data[2], 0x80, s->last_frame->height / 2 *
s->last_frame.linesize[2]); s->last_frame->linesize[2]);
} }
av_fast_padded_malloc(&s->bitstream_buf, &s->bitstream_buf_size, av_fast_padded_malloc(&s->bitstream_buf, &s->bitstream_buf_size,
...@@ -302,8 +307,8 @@ static int decode_frame(AVCodecContext *avctx, ...@@ -302,8 +307,8 @@ static int decode_frame(AVCodecContext *avctx,
*got_frame = 1; *got_frame = 1;
if (chunk_type != MADe_TAG) { if (chunk_type != MADe_TAG) {
av_frame_unref(&s->last_frame); av_frame_unref(s->last_frame);
if ((ret = av_frame_ref(&s->last_frame, frame)) < 0) if ((ret = av_frame_ref(s->last_frame, frame)) < 0)
return ret; return ret;
} }
...@@ -313,7 +318,7 @@ static int decode_frame(AVCodecContext *avctx, ...@@ -313,7 +318,7 @@ static int decode_frame(AVCodecContext *avctx,
static av_cold int decode_end(AVCodecContext *avctx) static av_cold int decode_end(AVCodecContext *avctx)
{ {
MadContext *t = avctx->priv_data; MadContext *t = avctx->priv_data;
av_frame_unref(&t->last_frame); av_frame_free(&t->last_frame);
av_free(t->bitstream_buf); av_free(t->bitstream_buf);
return 0; return 0;
} }
......
...@@ -34,6 +34,9 @@ static av_cold int encode_init(AVCodecContext *avctx) ...@@ -34,6 +34,9 @@ static av_cold int encode_init(AVCodecContext *avctx)
av_log(avctx, AV_LOG_ERROR, "SGI does not support resolutions above 65535x65535\n"); av_log(avctx, AV_LOG_ERROR, "SGI does not support resolutions above 65535x65535\n");
return -1; return -1;
} }
avctx->coded_frame = av_frame_alloc();
if (!avctx->coded_frame)
return AVERROR(ENOMEM);
return 0; return 0;
} }
...@@ -41,14 +44,14 @@ static av_cold int encode_init(AVCodecContext *avctx) ...@@ -41,14 +44,14 @@ static av_cold int encode_init(AVCodecContext *avctx)
static int encode_frame(AVCodecContext *avctx, AVPacket *pkt, static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
const AVFrame *frame, int *got_packet) const AVFrame *frame, int *got_packet)
{ {
AVFrame * const p = (AVFrame *)frame; const AVFrame * const p = frame;
uint8_t *offsettab, *lengthtab, *in_buf, *encode_buf, *buf; uint8_t *offsettab, *lengthtab, *in_buf, *encode_buf, *buf;
int x, y, z, length, tablesize, ret; int x, y, z, length, tablesize, ret;
unsigned int width, height, depth, dimension, bytes_per_channel, pixmax, put_be; unsigned int width, height, depth, dimension, bytes_per_channel, pixmax, put_be;
unsigned char *end_buf; unsigned char *end_buf;
p->pict_type = AV_PICTURE_TYPE_I; avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
p->key_frame = 1; avctx->coded_frame->key_frame = 1;
width = avctx->width; width = avctx->width;
height = avctx->height; height = avctx->height;
...@@ -199,6 +202,12 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt, ...@@ -199,6 +202,12 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
return 0; return 0;
} }
static av_cold int encode_close(AVCodecContext *avctx)
{
av_frame_free(&avctx->coded_frame);
return 0;
}
AVCodec ff_sgi_encoder = { AVCodec ff_sgi_encoder = {
.name = "sgi", .name = "sgi",
.long_name = NULL_IF_CONFIG_SMALL("SGI image"), .long_name = NULL_IF_CONFIG_SMALL("SGI image"),
...@@ -206,6 +215,7 @@ AVCodec ff_sgi_encoder = { ...@@ -206,6 +215,7 @@ AVCodec ff_sgi_encoder = {
.id = AV_CODEC_ID_SGI, .id = AV_CODEC_ID_SGI,
.init = encode_init, .init = encode_init,
.encode2 = encode_frame, .encode2 = encode_frame,
.close = encode_close,
.pix_fmts = (const enum AVPixelFormat[]){ .pix_fmts = (const enum AVPixelFormat[]){
AV_PIX_FMT_RGB24, AV_PIX_FMT_RGBA, AV_PIX_FMT_RGB24, AV_PIX_FMT_RGBA,
AV_PIX_FMT_RGB48LE, AV_PIX_FMT_RGB48BE, AV_PIX_FMT_RGB48LE, AV_PIX_FMT_RGB48BE,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment