Commit 39885a4b authored by Anton Khirnov's avatar Anton Khirnov

avconv: reindent

parent 74b961db
...@@ -1635,81 +1635,81 @@ static void do_video_out(AVFormatContext *s, ...@@ -1635,81 +1635,81 @@ static void do_video_out(AVFormatContext *s,
if (!ost->frame_number) if (!ost->frame_number)
ost->first_pts = in_picture->pts; ost->first_pts = in_picture->pts;
av_init_packet(&pkt); av_init_packet(&pkt);
pkt.data = NULL; pkt.data = NULL;
pkt.size = 0; pkt.size = 0;
if (!check_recording_time(ost) || if (!check_recording_time(ost) ||
ost->frame_number >= ost->max_frames) ost->frame_number >= ost->max_frames)
return; return;
if (s->oformat->flags & AVFMT_RAWPICTURE && if (s->oformat->flags & AVFMT_RAWPICTURE &&
enc->codec->id == CODEC_ID_RAWVIDEO) { enc->codec->id == CODEC_ID_RAWVIDEO) {
/* raw pictures are written as AVPicture structure to /* raw pictures are written as AVPicture structure to
avoid any copies. We support temporarily the older avoid any copies. We support temporarily the older
method. */ method. */
enc->coded_frame->interlaced_frame = in_picture->interlaced_frame; enc->coded_frame->interlaced_frame = in_picture->interlaced_frame;
enc->coded_frame->top_field_first = in_picture->top_field_first; enc->coded_frame->top_field_first = in_picture->top_field_first;
pkt.data = (uint8_t *)in_picture; pkt.data = (uint8_t *)in_picture;
pkt.size = sizeof(AVPicture); pkt.size = sizeof(AVPicture);
pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base); pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
pkt.flags |= AV_PKT_FLAG_KEY; pkt.flags |= AV_PKT_FLAG_KEY;
write_frame(s, &pkt, ost); write_frame(s, &pkt, ost);
} else { } else {
int got_packet; int got_packet;
AVFrame big_picture; AVFrame big_picture;
big_picture = *in_picture; big_picture = *in_picture;
/* better than nothing: use input picture interlaced /* better than nothing: use input picture interlaced
settings */ settings */
big_picture.interlaced_frame = in_picture->interlaced_frame; big_picture.interlaced_frame = in_picture->interlaced_frame;
if (ost->st->codec->flags & (CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_INTERLACED_ME)) { if (ost->st->codec->flags & (CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_INTERLACED_ME)) {
if (ost->top_field_first == -1) if (ost->top_field_first == -1)
big_picture.top_field_first = in_picture->top_field_first; big_picture.top_field_first = in_picture->top_field_first;
else else
big_picture.top_field_first = !!ost->top_field_first; big_picture.top_field_first = !!ost->top_field_first;
} }
/* handles same_quant here. This is not correct because it may /* handles same_quant here. This is not correct because it may
not be a global option */ not be a global option */
big_picture.quality = quality; big_picture.quality = quality;
if (!enc->me_threshold) if (!enc->me_threshold)
big_picture.pict_type = 0; big_picture.pict_type = 0;
if (ost->forced_kf_index < ost->forced_kf_count && if (ost->forced_kf_index < ost->forced_kf_count &&
big_picture.pts >= ost->forced_kf_pts[ost->forced_kf_index]) { big_picture.pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
big_picture.pict_type = AV_PICTURE_TYPE_I; big_picture.pict_type = AV_PICTURE_TYPE_I;
ost->forced_kf_index++; ost->forced_kf_index++;
} }
ret = avcodec_encode_video2(enc, &pkt, &big_picture, &got_packet); ret = avcodec_encode_video2(enc, &pkt, &big_picture, &got_packet);
if (ret < 0) { if (ret < 0) {
av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n"); av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
exit_program(1); exit_program(1);
} }
if (got_packet) { if (got_packet) {
if (pkt.pts != AV_NOPTS_VALUE) if (pkt.pts != AV_NOPTS_VALUE)
pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base); pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base);
if (pkt.dts != AV_NOPTS_VALUE) if (pkt.dts != AV_NOPTS_VALUE)
pkt.dts = av_rescale_q(pkt.dts, enc->time_base, ost->st->time_base); pkt.dts = av_rescale_q(pkt.dts, enc->time_base, ost->st->time_base);
write_frame(s, &pkt, ost); write_frame(s, &pkt, ost);
*frame_size = pkt.size; *frame_size = pkt.size;
video_size += pkt.size; video_size += pkt.size;
/* if two pass, output log */ /* if two pass, output log */
if (ost->logfile && enc->stats_out) { if (ost->logfile && enc->stats_out) {
fprintf(ost->logfile, "%s", enc->stats_out); fprintf(ost->logfile, "%s", enc->stats_out);
}
} }
} }
ost->sync_opts++; }
/* ost->sync_opts++;
* For video, number of frames in == number of packets out. /*
* But there may be reordering, so we can't throw away frames on encoder * For video, number of frames in == number of packets out.
* flush, we need to limit them here, before they go into encoder. * But there may be reordering, so we can't throw away frames on encoder
*/ * flush, we need to limit them here, before they go into encoder.
ost->frame_number++; */
ost->frame_number++;
} }
static double psnr(double d) static double psnr(double d)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment