Commit 5b0c70c2 authored by Michael Niedermayer's avatar Michael Niedermayer

Merge commit '57e7b3a8'

* commit '57e7b3a8':
  dnxhdenc: use the AVFrame API properly.
  libx264: use the AVFrame API properly.
  svq1enc: use the AVFrame API properly.
  gif: use the AVFrame API properly.

Conflicts:
	libavcodec/gif.c
	libavcodec/svq1enc.c
Merged-by: 's avatarMichael Niedermayer <michaelni@gmx.at>
parents 3ea168ed 57e7b3a8
...@@ -329,9 +329,12 @@ static av_cold int dnxhd_encode_init(AVCodecContext *avctx) ...@@ -329,9 +329,12 @@ static av_cold int dnxhd_encode_init(AVCodecContext *avctx)
FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->mb_bits, ctx->m.mb_num *sizeof(uint16_t), fail); FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->mb_bits, ctx->m.mb_num *sizeof(uint16_t), fail);
FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->mb_qscale, ctx->m.mb_num *sizeof(uint8_t), fail); FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->mb_qscale, ctx->m.mb_num *sizeof(uint8_t), fail);
ctx->frame.key_frame = 1; avctx->coded_frame = av_frame_alloc();
ctx->frame.pict_type = AV_PICTURE_TYPE_I; if (!avctx->coded_frame)
ctx->m.avctx->coded_frame = &ctx->frame; return AVERROR(ENOMEM);
avctx->coded_frame->key_frame = 1;
avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
if (avctx->thread_count > MAX_THREADS) { if (avctx->thread_count > MAX_THREADS) {
av_log(avctx, AV_LOG_ERROR, "too many threads\n"); av_log(avctx, AV_LOG_ERROR, "too many threads\n");
...@@ -922,19 +925,14 @@ static void dnxhd_load_picture(DNXHDEncContext *ctx, const AVFrame *frame) ...@@ -922,19 +925,14 @@ static void dnxhd_load_picture(DNXHDEncContext *ctx, const AVFrame *frame)
{ {
int i; int i;
for (i = 0; i < 3; i++) {
ctx->frame.data[i] = frame->data[i];
ctx->frame.linesize[i] = frame->linesize[i];
}
for (i = 0; i < ctx->m.avctx->thread_count; i++) { for (i = 0; i < ctx->m.avctx->thread_count; i++) {
ctx->thread[i]->m.linesize = ctx->frame.linesize[0]<<ctx->interlaced; ctx->thread[i]->m.linesize = frame->linesize[0] << ctx->interlaced;
ctx->thread[i]->m.uvlinesize = ctx->frame.linesize[1]<<ctx->interlaced; ctx->thread[i]->m.uvlinesize = frame->linesize[1] << ctx->interlaced;
ctx->thread[i]->dct_y_offset = ctx->m.linesize *8; ctx->thread[i]->dct_y_offset = ctx->m.linesize *8;
ctx->thread[i]->dct_uv_offset = ctx->m.uvlinesize*8; ctx->thread[i]->dct_uv_offset = ctx->m.uvlinesize*8;
} }
ctx->frame.interlaced_frame = frame->interlaced_frame; ctx->m.avctx->coded_frame->interlaced_frame = frame->interlaced_frame;
ctx->cur_field = frame->interlaced_frame && !frame->top_field_first; ctx->cur_field = frame->interlaced_frame && !frame->top_field_first;
} }
...@@ -954,9 +952,9 @@ static int dnxhd_encode_picture(AVCodecContext *avctx, AVPacket *pkt, ...@@ -954,9 +952,9 @@ static int dnxhd_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
encode_coding_unit: encode_coding_unit:
for (i = 0; i < 3; i++) { for (i = 0; i < 3; i++) {
ctx->src[i] = ctx->frame.data[i]; ctx->src[i] = frame->data[i];
if (ctx->interlaced && ctx->cur_field) if (ctx->interlaced && ctx->cur_field)
ctx->src[i] += ctx->frame.linesize[i]; ctx->src[i] += frame->linesize[i];
} }
dnxhd_write_header(avctx, buf); dnxhd_write_header(avctx, buf);
...@@ -994,7 +992,7 @@ static int dnxhd_encode_picture(AVCodecContext *avctx, AVPacket *pkt, ...@@ -994,7 +992,7 @@ static int dnxhd_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
goto encode_coding_unit; goto encode_coding_unit;
} }
ctx->frame.quality = ctx->qscale*FF_QP2LAMBDA; avctx->coded_frame->quality = ctx->qscale * FF_QP2LAMBDA;
pkt->flags |= AV_PKT_FLAG_KEY; pkt->flags |= AV_PKT_FLAG_KEY;
*got_packet = 1; *got_packet = 1;
...@@ -1027,6 +1025,8 @@ static av_cold int dnxhd_encode_end(AVCodecContext *avctx) ...@@ -1027,6 +1025,8 @@ static av_cold int dnxhd_encode_end(AVCodecContext *avctx)
for (i = 1; i < avctx->thread_count; i++) for (i = 1; i < avctx->thread_count; i++)
av_freep(&ctx->thread[i]); av_freep(&ctx->thread[i]);
av_frame_free(&avctx->coded_frame);
return 0; return 0;
} }
......
...@@ -43,7 +43,6 @@ typedef struct DNXHDEncContext { ...@@ -43,7 +43,6 @@ typedef struct DNXHDEncContext {
AVClass *class; AVClass *class;
MpegEncContext m; ///< Used for quantization dsp functions MpegEncContext m; ///< Used for quantization dsp functions
AVFrame frame;
int cid; int cid;
const CIDEntry *cid_table; const CIDEntry *cid_table;
uint8_t *msip; ///< Macroblock Scan Indexes Payload uint8_t *msip; ///< Macroblock Scan Indexes Payload
......
...@@ -216,6 +216,13 @@ static av_cold int gif_encode_init(AVCodecContext *avctx) ...@@ -216,6 +216,13 @@ static av_cold int gif_encode_init(AVCodecContext *avctx)
return AVERROR(EINVAL); return AVERROR(EINVAL);
} }
avctx->coded_frame = av_frame_alloc();
if (!avctx->coded_frame)
return AVERROR(ENOMEM);
avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
avctx->coded_frame->key_frame = 1;
s->lzw = av_mallocz(ff_lzw_encode_state_size); s->lzw = av_mallocz(ff_lzw_encode_state_size);
s->buf = av_malloc(avctx->width*avctx->height*2); s->buf = av_malloc(avctx->width*avctx->height*2);
s->tmpl = av_malloc(avctx->width); s->tmpl = av_malloc(avctx->width);
...@@ -232,7 +239,6 @@ static int gif_encode_frame(AVCodecContext *avctx, AVPacket *pkt, ...@@ -232,7 +239,6 @@ static int gif_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
const AVFrame *pict, int *got_packet) const AVFrame *pict, int *got_packet)
{ {
GIFContext *s = avctx->priv_data; GIFContext *s = avctx->priv_data;
AVFrame *const p = (AVFrame *)pict;
uint8_t *outbuf_ptr, *end; uint8_t *outbuf_ptr, *end;
const uint32_t *palette = NULL; const uint32_t *palette = NULL;
int ret; int ret;
...@@ -242,15 +248,12 @@ static int gif_encode_frame(AVCodecContext *avctx, AVPacket *pkt, ...@@ -242,15 +248,12 @@ static int gif_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
outbuf_ptr = pkt->data; outbuf_ptr = pkt->data;
end = pkt->data + pkt->size; end = pkt->data + pkt->size;
p->pict_type = AV_PICTURE_TYPE_I;
p->key_frame = 1;
if (avctx->pix_fmt == AV_PIX_FMT_PAL8) { if (avctx->pix_fmt == AV_PIX_FMT_PAL8) {
uint8_t *pal_exdata = av_packet_new_side_data(pkt, AV_PKT_DATA_PALETTE, AVPALETTE_SIZE); uint8_t *pal_exdata = av_packet_new_side_data(pkt, AV_PKT_DATA_PALETTE, AVPALETTE_SIZE);
if (!pal_exdata) if (!pal_exdata)
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
memcpy(pal_exdata, p->data[1], AVPALETTE_SIZE); memcpy(pal_exdata, pict->data[1], AVPALETTE_SIZE);
palette = (uint32_t*)p->data[1]; palette = (uint32_t*)pict->data[1];
} }
gif_image_write_image(avctx, &outbuf_ptr, end, palette, gif_image_write_image(avctx, &outbuf_ptr, end, palette,
...@@ -276,6 +279,8 @@ static int gif_encode_close(AVCodecContext *avctx) ...@@ -276,6 +279,8 @@ static int gif_encode_close(AVCodecContext *avctx)
{ {
GIFContext *s = avctx->priv_data; GIFContext *s = avctx->priv_data;
av_frame_free(&avctx->coded_frame);
av_freep(&s->lzw); av_freep(&s->lzw);
av_freep(&s->buf); av_freep(&s->buf);
av_frame_free(&s->last_frame); av_frame_free(&s->last_frame);
......
...@@ -44,7 +44,6 @@ typedef struct X264Context { ...@@ -44,7 +44,6 @@ typedef struct X264Context {
x264_picture_t pic; x264_picture_t pic;
uint8_t *sei; uint8_t *sei;
int sei_size; int sei_size;
AVFrame out_pic;
char *preset; char *preset;
char *tune; char *tune;
char *profile; char *profile;
...@@ -208,20 +207,20 @@ static int X264_frame(AVCodecContext *ctx, AVPacket *pkt, const AVFrame *frame, ...@@ -208,20 +207,20 @@ static int X264_frame(AVCodecContext *ctx, AVPacket *pkt, const AVFrame *frame,
switch (pic_out.i_type) { switch (pic_out.i_type) {
case X264_TYPE_IDR: case X264_TYPE_IDR:
case X264_TYPE_I: case X264_TYPE_I:
x4->out_pic.pict_type = AV_PICTURE_TYPE_I; ctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
break; break;
case X264_TYPE_P: case X264_TYPE_P:
x4->out_pic.pict_type = AV_PICTURE_TYPE_P; ctx->coded_frame->pict_type = AV_PICTURE_TYPE_P;
break; break;
case X264_TYPE_B: case X264_TYPE_B:
case X264_TYPE_BREF: case X264_TYPE_BREF:
x4->out_pic.pict_type = AV_PICTURE_TYPE_B; ctx->coded_frame->pict_type = AV_PICTURE_TYPE_B;
break; break;
} }
pkt->flags |= AV_PKT_FLAG_KEY*pic_out.b_keyframe; pkt->flags |= AV_PKT_FLAG_KEY*pic_out.b_keyframe;
if (ret) if (ret)
x4->out_pic.quality = (pic_out.i_qpplus1 - 1) * FF_QP2LAMBDA; ctx->coded_frame->quality = (pic_out.i_qpplus1 - 1) * FF_QP2LAMBDA;
*got_packet = ret; *got_packet = ret;
return 0; return 0;
...@@ -237,6 +236,8 @@ static av_cold int X264_close(AVCodecContext *avctx) ...@@ -237,6 +236,8 @@ static av_cold int X264_close(AVCodecContext *avctx)
if (x4->enc) if (x4->enc)
x264_encoder_close(x4->enc); x264_encoder_close(x4->enc);
av_frame_free(&avctx->coded_frame);
return 0; return 0;
} }
...@@ -570,7 +571,9 @@ static av_cold int X264_init(AVCodecContext *avctx) ...@@ -570,7 +571,9 @@ static av_cold int X264_init(AVCodecContext *avctx)
if (!x4->enc) if (!x4->enc)
return -1; return -1;
avctx->coded_frame = &x4->out_pic; avctx->coded_frame = av_frame_alloc();
if (!avctx->coded_frame)
return AVERROR(ENOMEM);
if (avctx->flags & CODEC_FLAG_GLOBAL_HEADER) { if (avctx->flags & CODEC_FLAG_GLOBAL_HEADER) {
x264_nal_t *nal; x264_nal_t *nal;
......
...@@ -45,9 +45,8 @@ typedef struct SVQ1Context { ...@@ -45,9 +45,8 @@ typedef struct SVQ1Context {
AVCodecContext *avctx; AVCodecContext *avctx;
DSPContext dsp; DSPContext dsp;
HpelDSPContext hdsp; HpelDSPContext hdsp;
AVFrame picture; AVFrame *current_picture;
AVFrame current_picture; AVFrame *last_picture;
AVFrame last_picture;
PutBitContext pb; PutBitContext pb;
GetBitContext gb; GetBitContext gb;
...@@ -264,13 +263,14 @@ static int svq1_encode_plane(SVQ1Context *s, int plane, ...@@ -264,13 +263,14 @@ static int svq1_encode_plane(SVQ1Context *s, int plane,
unsigned char *decoded_plane, unsigned char *decoded_plane,
int width, int height, int src_stride, int stride) int width, int height, int src_stride, int stride)
{ {
const AVFrame *f = s->avctx->coded_frame;
int x, y; int x, y;
int i; int i;
int block_width, block_height; int block_width, block_height;
int level; int level;
int threshold[6]; int threshold[6];
uint8_t *src = s->scratchbuf + stride * 16; uint8_t *src = s->scratchbuf + stride * 16;
const int lambda = (s->picture.quality * s->picture.quality) >> const int lambda = (f->quality * f->quality) >>
(2 * FF_LAMBDA_SHIFT); (2 * FF_LAMBDA_SHIFT);
/* figure out the acceptable level thresholds in advance */ /* figure out the acceptable level thresholds in advance */
...@@ -281,7 +281,7 @@ static int svq1_encode_plane(SVQ1Context *s, int plane, ...@@ -281,7 +281,7 @@ static int svq1_encode_plane(SVQ1Context *s, int plane,
block_width = (width + 15) / 16; block_width = (width + 15) / 16;
block_height = (height + 15) / 16; block_height = (height + 15) / 16;
if (s->picture.pict_type == AV_PICTURE_TYPE_P) { if (f->pict_type == AV_PICTURE_TYPE_P) {
s->m.avctx = s->avctx; s->m.avctx = s->avctx;
s->m.current_picture_ptr = &s->m.current_picture; s->m.current_picture_ptr = &s->m.current_picture;
s->m.last_picture_ptr = &s->m.last_picture; s->m.last_picture_ptr = &s->m.last_picture;
...@@ -297,13 +297,13 @@ static int svq1_encode_plane(SVQ1Context *s, int plane, ...@@ -297,13 +297,13 @@ static int svq1_encode_plane(SVQ1Context *s, int plane,
s->m.mb_stride = s->m.mb_width + 1; s->m.mb_stride = s->m.mb_width + 1;
s->m.b8_stride = 2 * s->m.mb_width + 1; s->m.b8_stride = 2 * s->m.mb_width + 1;
s->m.f_code = 1; s->m.f_code = 1;
s->m.pict_type = s->picture.pict_type; s->m.pict_type = f->pict_type;
s->m.me_method = s->avctx->me_method; s->m.me_method = s->avctx->me_method;
s->m.me.scene_change_score = 0; s->m.me.scene_change_score = 0;
s->m.flags = s->avctx->flags; s->m.flags = s->avctx->flags;
// s->m.out_format = FMT_H263; // s->m.out_format = FMT_H263;
// s->m.unrestricted_mv = 1; // s->m.unrestricted_mv = 1;
s->m.lambda = s->picture.quality; s->m.lambda = f->quality;
s->m.qscale = s->m.lambda * 139 + s->m.qscale = s->m.lambda * 139 +
FF_LAMBDA_SCALE * 64 >> FF_LAMBDA_SCALE * 64 >>
FF_LAMBDA_SHIFT + 7; FF_LAMBDA_SHIFT + 7;
...@@ -396,13 +396,13 @@ static int svq1_encode_plane(SVQ1Context *s, int plane, ...@@ -396,13 +396,13 @@ static int svq1_encode_plane(SVQ1Context *s, int plane,
ff_init_block_index(&s->m); ff_init_block_index(&s->m);
ff_update_block_index(&s->m); ff_update_block_index(&s->m);
if (s->picture.pict_type == AV_PICTURE_TYPE_I || if (f->pict_type == AV_PICTURE_TYPE_I ||
(s->m.mb_type[x + y * s->m.mb_stride] & (s->m.mb_type[x + y * s->m.mb_stride] &
CANDIDATE_MB_TYPE_INTRA)) { CANDIDATE_MB_TYPE_INTRA)) {
for (i = 0; i < 6; i++) for (i = 0; i < 6; i++)
init_put_bits(&s->reorder_pb[i], reorder_buffer[0][i], init_put_bits(&s->reorder_pb[i], reorder_buffer[0][i],
7 * 32); 7 * 32);
if (s->picture.pict_type == AV_PICTURE_TYPE_P) { if (f->pict_type == AV_PICTURE_TYPE_P) {
const uint8_t *vlc = ff_svq1_block_type_vlc[SVQ1_BLOCK_INTRA]; const uint8_t *vlc = ff_svq1_block_type_vlc[SVQ1_BLOCK_INTRA];
put_bits(&s->reorder_pb[5], vlc[1], vlc[0]); put_bits(&s->reorder_pb[5], vlc[1], vlc[0]);
score[0] = vlc[1] * lambda; score[0] = vlc[1] * lambda;
...@@ -418,7 +418,7 @@ static int svq1_encode_plane(SVQ1Context *s, int plane, ...@@ -418,7 +418,7 @@ static int svq1_encode_plane(SVQ1Context *s, int plane,
best = 0; best = 0;
if (s->picture.pict_type == AV_PICTURE_TYPE_P) { if (f->pict_type == AV_PICTURE_TYPE_P) {
const uint8_t *vlc = ff_svq1_block_type_vlc[SVQ1_BLOCK_INTER]; const uint8_t *vlc = ff_svq1_block_type_vlc[SVQ1_BLOCK_INTER];
int mx, my, pred_x, pred_y, dxy; int mx, my, pred_x, pred_y, dxy;
int16_t *motion_ptr; int16_t *motion_ptr;
...@@ -498,13 +498,48 @@ static int svq1_encode_plane(SVQ1Context *s, int plane, ...@@ -498,13 +498,48 @@ static int svq1_encode_plane(SVQ1Context *s, int plane,
return 0; return 0;
} }
static av_cold int svq1_encode_end(AVCodecContext *avctx)
{
SVQ1Context *const s = avctx->priv_data;
int i;
av_log(avctx, AV_LOG_DEBUG, "RD: %f\n",
s->rd_total / (double)(avctx->width * avctx->height *
avctx->frame_number));
av_freep(&s->m.me.scratchpad);
av_freep(&s->m.me.map);
av_freep(&s->m.me.score_map);
av_freep(&s->mb_type);
av_freep(&s->dummy);
av_freep(&s->scratchbuf);
for (i = 0; i < 3; i++) {
av_freep(&s->motion_val8[i]);
av_freep(&s->motion_val16[i]);
}
av_frame_free(&s->current_picture);
av_frame_free(&s->last_picture);
av_frame_free(&avctx->coded_frame);
return 0;
}
static av_cold int svq1_encode_init(AVCodecContext *avctx) static av_cold int svq1_encode_init(AVCodecContext *avctx)
{ {
SVQ1Context *const s = avctx->priv_data; SVQ1Context *const s = avctx->priv_data;
ff_dsputil_init(&s->dsp, avctx); ff_dsputil_init(&s->dsp, avctx);
ff_hpeldsp_init(&s->hdsp, avctx->flags); ff_hpeldsp_init(&s->hdsp, avctx->flags);
avctx->coded_frame = &s->picture;
avctx->coded_frame = av_frame_alloc();
s->current_picture = av_frame_alloc();
s->last_picture = av_frame_alloc();
if (!avctx->coded_frame || !s->current_picture || !s->last_picture) {
svq1_encode_end(avctx);
return AVERROR(ENOMEM);
}
s->frame_width = avctx->width; s->frame_width = avctx->width;
s->frame_height = avctx->height; s->frame_height = avctx->height;
...@@ -536,8 +571,7 @@ static int svq1_encode_frame(AVCodecContext *avctx, AVPacket *pkt, ...@@ -536,8 +571,7 @@ static int svq1_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
const AVFrame *pict, int *got_packet) const AVFrame *pict, int *got_packet)
{ {
SVQ1Context *const s = avctx->priv_data; SVQ1Context *const s = avctx->priv_data;
AVFrame *const p = &s->picture; AVFrame *const p = avctx->coded_frame;
AVFrame temp;
int i, ret; int i, ret;
if ((ret = ff_alloc_packet2(avctx, pkt, s->y_block_width * s->y_block_height * if ((ret = ff_alloc_packet2(avctx, pkt, s->y_block_width * s->y_block_height *
...@@ -549,35 +583,33 @@ static int svq1_encode_frame(AVCodecContext *avctx, AVPacket *pkt, ...@@ -549,35 +583,33 @@ static int svq1_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
return -1; return -1;
} }
if (!s->current_picture.data[0]) { if (!s->current_picture->data[0]) {
if ((ret = ff_get_buffer(avctx, &s->current_picture, 0))< 0 || if ((ret = ff_get_buffer(avctx, s->current_picture, 0))< 0 ||
(ret = ff_get_buffer(avctx, &s->last_picture, 0)) < 0) { (ret = ff_get_buffer(avctx, s->last_picture, 0)) < 0) {
return ret; return ret;
} }
s->scratchbuf = av_malloc(s->current_picture.linesize[0] * 16 * 2); s->scratchbuf = av_malloc(s->current_picture->linesize[0] * 16 * 2);
} }
av_frame_move_ref(&temp, &s->current_picture); FFSWAP(AVFrame*, s->current_picture, s->last_picture);
av_frame_move_ref(&s->current_picture, &s->last_picture);
av_frame_move_ref(&s->last_picture, &temp);
init_put_bits(&s->pb, pkt->data, pkt->size); init_put_bits(&s->pb, pkt->data, pkt->size);
*p = *pict;
p->pict_type = avctx->gop_size && avctx->frame_number % avctx->gop_size ? p->pict_type = avctx->gop_size && avctx->frame_number % avctx->gop_size ?
AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_I; AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_I;
p->key_frame = p->pict_type == AV_PICTURE_TYPE_I; p->key_frame = p->pict_type == AV_PICTURE_TYPE_I;
p->quality = pict->quality;
svq1_write_header(s, p->pict_type); svq1_write_header(s, p->pict_type);
for (i = 0; i < 3; i++) for (i = 0; i < 3; i++)
if (svq1_encode_plane(s, i, if (svq1_encode_plane(s, i,
s->picture.data[i], pict->data[i],
s->last_picture.data[i], s->last_picture->data[i],
s->current_picture.data[i], s->current_picture->data[i],
s->frame_width / (i ? 4 : 1), s->frame_width / (i ? 4 : 1),
s->frame_height / (i ? 4 : 1), s->frame_height / (i ? 4 : 1),
s->picture.linesize[i], pict->linesize[i],
s->current_picture.linesize[i]) < 0) s->current_picture->linesize[i]) < 0)
return -1; return -1;
// avpriv_align_put_bits(&s->pb); // avpriv_align_put_bits(&s->pb);
...@@ -594,33 +626,6 @@ static int svq1_encode_frame(AVCodecContext *avctx, AVPacket *pkt, ...@@ -594,33 +626,6 @@ static int svq1_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
return 0; return 0;
} }
static av_cold int svq1_encode_end(AVCodecContext *avctx)
{
SVQ1Context *const s = avctx->priv_data;
int i;
av_log(avctx, AV_LOG_DEBUG, "RD: %f\n",
s->rd_total / (double)(avctx->width * avctx->height *
avctx->frame_number));
av_freep(&s->m.me.scratchpad);
av_freep(&s->m.me.map);
av_freep(&s->m.me.score_map);
av_freep(&s->mb_type);
av_freep(&s->dummy);
av_freep(&s->scratchbuf);
for (i = 0; i < 3; i++) {
av_freep(&s->motion_val8[i]);
av_freep(&s->motion_val16[i]);
}
av_frame_unref(&s->current_picture);
av_frame_unref(&s->last_picture);
return 0;
}
AVCodec ff_svq1_encoder = { AVCodec ff_svq1_encoder = {
.name = "svq1", .name = "svq1",
.long_name = NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 1 / Sorenson Video 1 / SVQ1"), .long_name = NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 1 / Sorenson Video 1 / SVQ1"),
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment