Commit 7432bcfe authored by Michael Niedermayer's avatar Michael Niedermayer

Merge remote-tracking branch 'qatar/master'

* qatar/master:
  vsrc_buffer: fix check from 7ae7c414.
  libxvid: Reorder functions to avoid forward declarations; make functions static.
  libxvid: drop some pointless dead code
  wmal: vertical alignment cosmetics
  wmal: Warn about missing bitstream splicing feature and ask for sample.
  wmal: Skip seekable_frame_in_packet.
  wmal: Drop unused variable num_possible_block_size.
  avfiltergraph: make the AVFilterInOut alloc/free API public
  graphparser: allow specifying sws flags in the graph description.
  graphparser: fix the order of connecting unlabeled links.
  graphparser: add avfilter_graph_parse2().
  vsrc_buffer: allow using a NULL buffer to signal EOF.
  swscale: handle last pixel if lines have an odd width.
  qdm2: fix a dubious pointer cast
  WMAL: Do not try to read rawpcm coefficients if bits is invalid
  mov: Fix detecting there is no sync sample.
  tiffdec: K&R cosmetics
  avf: has_duration does not check the global one
  dsputil: fix optimized emu_edge function on Win64.

Conflicts:
	doc/APIchanges
	libavcodec/libxvid_rc.c
	libavcodec/libxvidff.c
	libavcodec/tiff.c
	libavcodec/wmalosslessdec.c
	libavfilter/avfiltergraph.h
	libavfilter/graphparser.c
	libavfilter/version.h
	libavfilter/vsrc_buffer.c
	libswscale/output.c
Merged-by: 's avatarMichael Niedermayer <michaelni@gmx.at>
parents 01bf2ad7 7bf9e339
......@@ -19,6 +19,9 @@ API changes, most recent first:
2012-03-26 - a67d9cf - lavfi 2.66.100
Add avfilter_fill_frame_from_{audio_,}buffer_ref() functions.
2012-xx-xx - xxxxxxx - lavfi 2.16.0 - avfiltergraph.h
Add avfilter_graph_parse2()
2012-xx-xx - xxxxxxx - lavu 51.27.0 - samplefmt.h
Add av_get_packed_sample_fmt() and av_get_planar_sample_fmt()
......
......@@ -76,6 +76,12 @@ In a complete filterchain all the unlabelled filter input and output
pads must be connected. A filtergraph is considered valid if all the
filter input and output pads of all the filterchains are connected.
Libavfilter will automatically insert scale filters where format
conversion is required. It is possible to specify swscale flags
for those automatically inserted scalers by prepending
@code{sws_flags=@var{flags};}
to the filtergraph description.
Follows a BNF description for the filtergraph syntax:
@example
@var{NAME} ::= sequence of alphanumeric characters and '_'
......@@ -84,7 +90,7 @@ Follows a BNF description for the filtergraph syntax:
@var{FILTER_ARGUMENTS} ::= sequence of chars (eventually quoted)
@var{FILTER} ::= [@var{LINKNAMES}] @var{NAME} ["=" @var{ARGUMENTS}] [@var{LINKNAMES}]
@var{FILTERCHAIN} ::= @var{FILTER} [,@var{FILTERCHAIN}]
@var{FILTERGRAPH} ::= @var{FILTERCHAIN} [;@var{FILTERGRAPH}]
@var{FILTERGRAPH} ::= [sws_flags=@var{flags};] @var{FILTERCHAIN} [;@var{FILTERGRAPH}]
@end example
@c man end FILTERGRAPH DESCRIPTION
......
......@@ -31,16 +31,12 @@
#undef NDEBUG
#include <assert.h>
extern unsigned int xvid_debug;
int ff_xvid_rate_control_init(MpegEncContext *s){
char *tmp_name;
int fd, i;
xvid_plg_create_t xvid_plg_create = { 0 };
xvid_plugin_2pass2_t xvid_2pass2 = { 0 };
//xvid_debug=-1;
fd=av_tempfile("xvidrc.", &tmp_name, 0, s->avctx);
if (fd == -1) {
av_log(NULL, AV_LOG_ERROR, "Can't create temporary pass2 file.\n");
......@@ -58,7 +54,6 @@ int ff_xvid_rate_control_init(MpegEncContext *s){
frame_types[rce->pict_type], (int)lrintf(rce->qscale / FF_QP2LAMBDA), rce->i_count, s->mb_num - rce->i_count - rce->skip_count,
rce->skip_count, (rce->i_tex_bits + rce->p_tex_bits + rce->misc_bits+7)/8, (rce->header_bits+rce->mv_bits+7)/8);
//av_log(NULL, AV_LOG_ERROR, "%s\n", tmp);
if (write(fd, tmp, strlen(tmp)) < 0) {
av_log(NULL, AV_LOG_ERROR, "Error %s writing 2pass logfile\n", strerror(errno));
return AVERROR(errno);
......
This diff is collapsed.
......@@ -140,7 +140,6 @@ typedef struct {
/// Parameters built from header parameters, do not change during playback
int group_order; ///< order of frame group
int fft_order; ///< order of FFT (actually fftorder+1)
int fft_frame_size; ///< size of fft frame, in components (1 comples = re + im)
int frame_size; ///< size of data frame
int frequency_range;
int sub_sampling; ///< subsampling: 0=25%, 1=50%, 2=100% */
......@@ -1607,13 +1606,17 @@ static void qdm2_fft_tone_synthesizer (QDM2Context *q, int sub_packet)
static void qdm2_calculate_fft (QDM2Context *q, int channel, int sub_packet)
{
const float gain = (q->channels == 1 && q->nb_channels == 2) ? 0.5f : 1.0f;
float *out = q->output_buffer + channel;
int i;
q->fft.complex[channel][0].re *= 2.0f;
q->fft.complex[channel][0].im = 0.0f;
q->rdft_ctx.rdft_calc(&q->rdft_ctx, (FFTSample *)q->fft.complex[channel]);
/* add samples to output buffer */
for (i = 0; i < ((q->fft_frame_size + 15) & ~15); i++)
q->output_buffer[q->channels * i + channel] += ((float *) q->fft.complex[channel])[i] * gain;
for (i = 0; i < FFALIGN(q->fft_size, 8); i++) {
out[0] += q->fft.complex[channel][i].re * gain;
out[q->channels] += q->fft.complex[channel][i].im * gain;
out += 2 * q->channels;
}
}
......@@ -1688,7 +1691,6 @@ static void dump_context(QDM2Context *q)
PRINT("checksum_size",q->checksum_size);
PRINT("channels",q->channels);
PRINT("nb_channels",q->nb_channels);
PRINT("fft_frame_size",q->fft_frame_size);
PRINT("fft_size",q->fft_size);
PRINT("sub_sampling",q->sub_sampling);
PRINT("fft_order",q->fft_order);
......@@ -1843,7 +1845,6 @@ static av_cold int qdm2_decode_init(AVCodecContext *avctx)
}
s->fft_order = av_log2(s->fft_size) + 1;
s->fft_frame_size = 2 * s->fft_size; // complex has two floats
// something like max decodable tones
s->group_order = av_log2(s->group_size) + 1;
......
This diff is collapsed.
......@@ -229,7 +229,6 @@ static av_cold int decode_init(AVCodecContext *avctx)
s->max_subframe_len_bit = 0;
s->subframe_len_bits = av_log2(log2_max_num_subframes) + 1;
num_possible_block_sizes = log2_max_num_subframes + 1;
s->min_samples_per_subframe = s->samples_per_frame / s->max_num_subframes;
s->dynamic_range_compression = s->decode_flags & 0x80;
s->bV3RTM = s->decode_flags & 0x100;
......@@ -940,9 +939,10 @@ static int decode_subframe(WmallDecodeCtx *s)
if (rawpcm_tile) {
int bits = s->bits_per_sample - padding_zeroes;
if (bits <= 0 ) {
av_log(s->avctx, AV_LOG_ERROR, "rawpcm_tile bits invalid\n");
return -1;
if (bits <= 0) {
av_log(s->avctx, AV_LOG_ERROR,
"Invalid number of padding bits in raw PCM tile\n");
return AVERROR_INVALIDDATA;
}
av_dlog(s->avctx, "RAWPCM %d bits per sample. "
"total %d bits, remain=%d\n", bits,
......@@ -1169,8 +1169,7 @@ static int decode_packet(AVCodecContext *avctx, void *data, int *got_frame_ptr,
GetBitContext* gb = &s->pgb;
const uint8_t* buf = avpkt->data;
int buf_size = avpkt->size;
int num_bits_prev_frame, packet_sequence_number,
seekable_frame_in_packet, spliced_packet;
int num_bits_prev_frame, packet_sequence_number, spliced_packet;
if (s->packet_done || s->packet_loss) {
s->packet_done = 0;
......@@ -1185,9 +1184,11 @@ static int decode_packet(AVCodecContext *avctx, void *data, int *got_frame_ptr,
/* parse packet header */
init_get_bits(gb, buf, s->buf_bit_size);
packet_sequence_number = get_bits(gb, 4);
seekable_frame_in_packet = get_bits1(gb);
spliced_packet = get_bits1(gb);
packet_sequence_number = get_bits(gb, 4);
skip_bits(gb, 1); // Skip seekable_frame_in_packet, currently ununused
spliced_packet = get_bits1(gb);
if (spliced_packet)
av_log_missing_feature(avctx, "Bitstream splicing", 1);
/* get number of bits that need to be added to the previous frame */
num_bits_prev_frame = get_bits(gb, s->log2_frame_size);
......
......@@ -594,7 +594,7 @@ cglobal emu_edge_core, 2, 7, 0
%define valw2 r7w
%define valw3 r3w
%if WIN64
%define valw4 r4w
%define valw4 r7w
%else ; unix64
%define valw4 r3w
%endif
......
......@@ -91,11 +91,11 @@ void avfilter_graph_free(AVFilterGraph **graph);
/**
* A linked-list of the inputs/outputs of the filter chain.
*
* This is mainly useful for avfilter_graph_parse(), since this
* function may accept a description of a graph with not connected
* input/output pads. This struct specifies, per each not connected
* pad contained in the graph, the filter context and the pad index
* required for establishing a link.
* This is mainly useful for avfilter_graph_parse() / avfilter_graph_parse2(),
* where it is used to communicate open (unlinked) inputs and outputs from and
* to the caller.
* This struct specifies, per each not connected pad contained in the graph, the
* filter context and the pad index required for establishing a link.
*/
typedef struct AVFilterInOut {
/** unique name for this input/output in the list */
......@@ -112,13 +112,14 @@ typedef struct AVFilterInOut {
} AVFilterInOut;
/**
* Create an AVFilterInOut.
* Must be free with avfilter_inout_free().
* Allocate a single AVFilterInOut entry.
* Must be freed with avfilter_inout_free().
* @return allocated AVFilterInOut on success, NULL on failure.
*/
AVFilterInOut *avfilter_inout_alloc(void);
/**
* Free the AVFilterInOut in *inout, and set its pointer to NULL.
* Free the supplied list of AVFilterInOut and set *inout to NULL.
* If *inout is NULL, do nothing.
*/
void avfilter_inout_free(AVFilterInOut **inout);
......@@ -140,6 +141,41 @@ int avfilter_graph_parse(AVFilterGraph *graph, const char *filters,
AVFilterInOut **inputs, AVFilterInOut **outputs,
void *log_ctx);
/**
* Add a graph described by a string to a graph.
*
* @param[in] graph the filter graph where to link the parsed graph context
* @param[in] filters string to be parsed
* @param[out] inputs a linked list of all free (unlinked) inputs of the
* parsed graph will be returned here. It is to be freed
* by the caller using avfilter_inout_free().
* @param[out] outputs a linked list of all free (unlinked) outputs of the
* parsed graph will be returned here. It is to be freed by the
* caller using avfilter_inout_free().
* @return zero on success, a negative AVERROR code on error
*
* @note the difference between avfilter_graph_parse2() and
* avfilter_graph_parse() is that in avfilter_graph_parse(), the caller provides
* the lists of inputs and outputs, which therefore must be known before calling
* the function. On the other hand, avfilter_graph_parse2() \em returns the
* inputs and outputs that are left unlinked after parsing the graph and the
* caller then deals with them. Another difference is that in
* avfilter_graph_parse(), the inputs parameter describes inputs of the
* <em>already existing</em> part of the graph; i.e. from the point of view of
* the newly created part, they are outputs. Similarly the outputs parameter
* describes outputs of the already existing filters, which are provided as
* inputs to the parsed filters.
* avfilter_graph_parse2() takes the opposite approach -- it makes no reference
* whatsoever to already existing parts of the graph and the inputs parameter
* will on return contain inputs of the newly parsed part of the graph.
* Analogously the outputs parameter will contain outputs of the newly created
* filters.
*/
int avfilter_graph_parse2(AVFilterGraph *graph, const char *filters,
AVFilterInOut **inputs,
AVFilterInOut **outputs);
/**
* Send a command to one or more filter instances.
*
......
......@@ -32,6 +32,7 @@
*
* @param buf buffer containing frame data to be passed down the filtergraph.
* This function will take ownership of buf, the user must not free it.
* A NULL buf signals EOF -- i.e. no more frames will be sent to this filter.
*/
int av_buffersrc_buffer(AVFilterContext *s, AVFilterBufferRef *buf);
......
......@@ -189,13 +189,15 @@ static AVFilterInOut *extract_inout(const char *label, AVFilterInOut **links)
{
AVFilterInOut *ret;
while (*links && strcmp((*links)->name, label))
while (*links && (!(*links)->name || strcmp((*links)->name, label)))
links = &((*links)->next);
ret = *links;
if (ret)
if (ret) {
*links = ret->next;
ret->next = NULL;
}
return ret;
}
......@@ -206,22 +208,31 @@ static void insert_inout(AVFilterInOut **inouts, AVFilterInOut *element)
*inouts = element;
}
static void append_inout(AVFilterInOut **inouts, AVFilterInOut **element)
{
while (*inouts && (*inouts)->next)
inouts = &((*inouts)->next);
if (!*inouts)
*inouts = *element;
else
(*inouts)->next = *element;
*element = NULL;
}
static int link_filter_inouts(AVFilterContext *filt_ctx,
AVFilterInOut **curr_inputs,
AVFilterInOut **open_inputs, void *log_ctx)
{
int pad = filt_ctx->input_count, ret;
int pad, ret;
while (pad--) {
for (pad = 0; pad < filt_ctx->input_count; pad++) {
AVFilterInOut *p = *curr_inputs;
if (!p) {
av_log(log_ctx, AV_LOG_ERROR,
"Not enough inputs specified for the \"%s\" filter.\n",
filt_ctx->filter->name);
return AVERROR(EINVAL);
}
*curr_inputs = (*curr_inputs)->next;
if (p)
*curr_inputs = (*curr_inputs)->next;
else if (!(p = av_mallocz(sizeof(*p))))
return AVERROR(ENOMEM);
if (p->filter_ctx) {
if ((ret = link_filter(p->filter_ctx, p->pad_idx, filt_ctx, pad, log_ctx)) < 0)
......@@ -258,6 +269,7 @@ static int link_filter_inouts(AVFilterContext *filt_ctx,
static int parse_inputs(const char **buf, AVFilterInOut **curr_inputs,
AVFilterInOut **open_outputs, void *log_ctx)
{
AVFilterInOut *parsed_inputs = NULL;
int pad = 0;
while (**buf == '[') {
......@@ -280,12 +292,15 @@ static int parse_inputs(const char **buf, AVFilterInOut **curr_inputs,
match->pad_idx = pad;
}
insert_inout(curr_inputs, match);
append_inout(&parsed_inputs, &match);
*buf += strspn(*buf, WHITESPACES);
pad++;
}
append_inout(&parsed_inputs, curr_inputs);
*curr_inputs = parsed_inputs;
return pad;
}
......@@ -334,10 +349,173 @@ static int parse_outputs(const char **buf, AVFilterInOut **curr_inputs,
return pad;
}
#if FF_API_GRAPH_AVCLASS
#define log_ctx graph
#else
#define log_ctx NULL
#endif
static int parse_sws_flags(const char **buf, AVFilterGraph *graph)
{
char *p = strchr(*buf, ';');
if (strncmp(*buf, "sws_flags=", 10))
return 0;
if (!p) {
av_log(log_ctx, AV_LOG_ERROR, "sws_flags not terminated with ';'.\n");
return AVERROR(EINVAL);
}
*buf += 4; // keep the 'flags=' part
av_freep(&graph->scale_sws_opts);
if (!(graph->scale_sws_opts = av_mallocz(p - *buf + 1)))
return AVERROR(ENOMEM);
av_strlcpy(graph->scale_sws_opts, *buf, p - *buf + 1);
*buf = p + 1;
return 0;
}
int avfilter_graph_parse2(AVFilterGraph *graph, const char *filters,
AVFilterInOut **inputs,
AVFilterInOut **outputs)
{
int index = 0, ret = 0;
char chr = 0;
AVFilterInOut *curr_inputs = NULL, *open_inputs = NULL, *open_outputs = NULL;
filters += strspn(filters, WHITESPACES);
if ((ret = parse_sws_flags(&filters, graph)) < 0)
goto fail;
do {
AVFilterContext *filter;
filters += strspn(filters, WHITESPACES);
if ((ret = parse_inputs(&filters, &curr_inputs, &open_outputs, log_ctx)) < 0)
goto end;
if ((ret = parse_filter(&filter, &filters, graph, index, log_ctx)) < 0)
goto end;
if ((ret = link_filter_inouts(filter, &curr_inputs, &open_inputs, log_ctx)) < 0)
goto end;
if ((ret = parse_outputs(&filters, &curr_inputs, &open_inputs, &open_outputs,
log_ctx)) < 0)
goto end;
filters += strspn(filters, WHITESPACES);
chr = *filters++;
if (chr == ';' && curr_inputs)
append_inout(&open_outputs, &curr_inputs);
index++;
} while (chr == ',' || chr == ';');
if (chr) {
av_log(log_ctx, AV_LOG_ERROR,
"Unable to parse graph description substring: \"%s\"\n",
filters - 1);
ret = AVERROR(EINVAL);
goto end;
}
append_inout(&open_outputs, &curr_inputs);
*inputs = open_inputs;
*outputs = open_outputs;
return 0;
fail:end:
for (; graph->filter_count > 0; graph->filter_count--)
avfilter_free(graph->filters[graph->filter_count - 1]);
av_freep(&graph->filters);
avfilter_inout_free(&open_inputs);
avfilter_inout_free(&open_outputs);
avfilter_inout_free(&curr_inputs);
*inputs = NULL;
*outputs = NULL;
return ret;
}
#undef log_ctx
int avfilter_graph_parse(AVFilterGraph *graph, const char *filters,
AVFilterInOut **open_inputs_ptr, AVFilterInOut **open_outputs_ptr,
void *log_ctx)
{
#if 0
int ret;
AVFilterInOut *open_inputs = open_inputs_ptr ? *open_inputs_ptr : NULL;
AVFilterInOut *open_outputs = open_outputs_ptr ? *open_outputs_ptr : NULL;
AVFilterInOut *cur, *match, *inputs = NULL, *outputs = NULL;
if ((ret = avfilter_graph_parse2(graph, filters, &inputs, &outputs)) < 0)
goto fail;
/* First input can be omitted if it is "[in]" */
if (inputs && !inputs->name)
inputs->name = av_strdup("in");
for (cur = inputs; cur; cur = cur->next) {
if (!cur->name) {
av_log(log_ctx, AV_LOG_ERROR,
"Not enough inputs specified for the \"%s\" filter.\n",
cur->filter_ctx->filter->name);
ret = AVERROR(EINVAL);
goto fail;
}
if (!(match = extract_inout(cur->name, &open_outputs)))
continue;
ret = avfilter_link(match->filter_ctx, match->pad_idx,
cur->filter_ctx, cur->pad_idx);
avfilter_inout_free(&match);
if (ret < 0)
goto fail;
}
/* Last output can be omitted if it is "[out]" */
if (outputs && !outputs->name)
outputs->name = av_strdup("out");
for (cur = outputs; cur; cur = cur->next) {
if (!cur->name) {
av_log(log_ctx, AV_LOG_ERROR,
"Invalid filterchain containing an unlabelled output pad: \"%s\"\n",
filters);
ret = AVERROR(EINVAL);
goto fail;
}
if (!(match = extract_inout(cur->name, &open_inputs)))
continue;
ret = avfilter_link(cur->filter_ctx, cur->pad_idx,
match->filter_ctx, match->pad_idx);
avfilter_inout_free(&match);
if (ret < 0)
goto fail;
}
fail:
if (ret < 0) {
for (; graph->filter_count > 0; graph->filter_count--)
avfilter_free(graph->filters[graph->filter_count - 1]);
av_freep(&graph->filters);
}
avfilter_inout_free(&inputs);
avfilter_inout_free(&outputs);
/* clear open_in/outputs only if not passed as parameters */
if (open_inputs_ptr) *open_inputs_ptr = open_inputs;
else avfilter_inout_free(&open_inputs);
if (open_outputs_ptr) *open_outputs_ptr = open_outputs;
else avfilter_inout_free(&open_outputs);
return ret;
}
#else
int index = 0, ret = 0;
char chr = 0;
......@@ -414,3 +592,5 @@ end:
}
return ret;
}
#endif
......@@ -29,8 +29,8 @@
#include "libavutil/avutil.h"
#define LIBAVFILTER_VERSION_MAJOR 2
#define LIBAVFILTER_VERSION_MINOR 69
#define LIBAVFILTER_VERSION_MICRO 101
#define LIBAVFILTER_VERSION_MINOR 70
#define LIBAVFILTER_VERSION_MICRO 100
#define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \
LIBAVFILTER_VERSION_MINOR, \
......
......@@ -39,6 +39,7 @@ typedef struct {
AVRational time_base; ///< time_base to set in the output link
AVRational sample_aspect_ratio;
char sws_param[256];
int eof;
} BufferSourceContext;
#define CHECK_PARAM_CHANGE(s, c, width, height, format)\
......@@ -55,6 +56,12 @@ int av_vsrc_buffer_add_video_buffer_ref(AVFilterContext *buffer_filter,
AVFilterBufferRef *buf;
int ret;
if (!picref) {
c->eof = 1;
return 0;
} else if (c->eof)
return AVERROR(EINVAL);
if (!av_fifo_space(c->fifo) &&
(ret = av_fifo_realloc2(c->fifo, av_fifo_size(c->fifo) +
sizeof(buf))) < 0)
......@@ -125,6 +132,12 @@ int av_buffersrc_buffer(AVFilterContext *s, AVFilterBufferRef *buf)
BufferSourceContext *c = s->priv;
int ret;
if (!buf) {
c->eof = 1;
return 0;
} else if (c->eof)
return AVERROR(EINVAL);
if (!av_fifo_space(c->fifo) &&
(ret = av_fifo_realloc2(c->fifo, av_fifo_size(c->fifo) +
sizeof(buf))) < 0)
......@@ -144,9 +157,17 @@ int av_buffersrc_buffer(AVFilterContext *s, AVFilterBufferRef *buf)
int av_vsrc_buffer_add_frame(AVFilterContext *buffer_src,
const AVFrame *frame, int flags)
{
BufferSourceContext *c = buffer_src->priv;
AVFilterBufferRef *picref;
int ret;
AVFilterBufferRef *picref =
avfilter_get_video_buffer_ref_from_frame(frame, AV_PERM_WRITE);
if (!frame) {
c->eof = 1;
return 0;
} else if (c->eof)
return AVERROR(EINVAL);
picref = avfilter_get_video_buffer_ref_from_frame(frame, AV_PERM_WRITE);
if (!picref)
return AVERROR(ENOMEM);
ret = av_vsrc_buffer_add_video_buffer_ref(buffer_src, picref, flags);
......@@ -226,6 +247,8 @@ static int request_frame(AVFilterLink *link)
AVFilterBufferRef *buf;
if (!av_fifo_size(c->fifo)) {
if (c->eof)
return AVERROR_EOF;
av_log(link->src, AV_LOG_WARNING,
"request_frame() called with no available frame!\n");
return AVERROR(EINVAL);
......@@ -243,7 +266,10 @@ static int request_frame(AVFilterLink *link)
static int poll_frame(AVFilterLink *link)
{
BufferSourceContext *c = link->src->priv;
return !!av_fifo_size(c->fifo);
int size = av_fifo_size(c->fifo);
if (!size && c->eof)
return AVERROR_EOF;
return size/sizeof(AVFilterBufferRef*);
}
AVFilter avfilter_vsrc_buffer = {
......
......@@ -107,6 +107,7 @@ typedef struct MOVStreamContext {
unsigned int alt_sample_size; ///< always contains sample size from stsz atom
unsigned int sample_count;
int *sample_sizes;
int keyframe_absent;
unsigned int keyframe_count;
int *keyframes;
int time_scale;
......
......@@ -1618,7 +1618,10 @@ static int mov_read_stss(MOVContext *c, AVIOContext *pb, MOVAtom atom)
av_dlog(c->fc, "keyframe_count = %d\n", entries);
if (!entries)
{
sc->keyframe_absent = 1;
return 0;
}
if (entries >= UINT_MAX / sizeof(int))
return AVERROR_INVALIDDATA;
sc->keyframes = av_malloc(entries * sizeof(int));
......@@ -1873,7 +1876,7 @@ static void mov_build_index(MOVContext *mov, AVStream *st)
return;
}
if (!sc->keyframe_count || current_sample+key_off == sc->keyframes[stss_index]) {
if (!sc->keyframe_absent && (!sc->keyframe_count || current_sample+key_off == sc->keyframes[stss_index])) {
keyframe = 1;
if (stss_index + 1 < sc->keyframe_count)
stss_index++;
......
......@@ -1975,6 +1975,8 @@ static int has_duration(AVFormatContext *ic)
if (st->duration != AV_NOPTS_VALUE)
return 1;
}
if (ic->duration)
return 1;
return 0;
}
......
......@@ -469,7 +469,7 @@ yuv2422_X_c_template(SwsContext *c, const int16_t *lumFilter,
{
int i;
for (i = 0; i < (dstW >> 1); i++) {
for (i = 0; i < ((dstW + 1) >> 1); i++) {
int j;
int Y1 = 1 << 18;
int Y2 = 1 << 18;
......@@ -512,7 +512,7 @@ yuv2422_2_c_template(SwsContext *c, const int16_t *buf[2],
int uvalpha1 = 4095 - uvalpha;
int i;
for (i = 0; i < (dstW >> 1); i++) {
for (i = 0; i < ((dstW + 1) >> 1); i++) {
int Y1 = (buf0[i * 2] * yalpha1 + buf1[i * 2] * yalpha) >> 19;
int Y2 = (buf0[i * 2 + 1] * yalpha1 + buf1[i * 2 + 1] * yalpha) >> 19;
int U = (ubuf0[i] * uvalpha1 + ubuf1[i] * uvalpha) >> 19;
......@@ -539,7 +539,7 @@ yuv2422_1_c_template(SwsContext *c, const int16_t *buf0,
int i;
if (uvalpha < 2048) {
for (i = 0; i < (dstW >> 1); i++) {
for (i = 0; i < ((dstW + 1) >> 1); i++) {
int Y1 = (buf0[i * 2 ]+64) >> 7;
int Y2 = (buf0[i * 2 + 1]+64) >> 7;
int U = (ubuf0[i] +64) >> 7;
......@@ -561,7 +561,7 @@ yuv2422_1_c_template(SwsContext *c, const int16_t *buf0,
}
} else {
const int16_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
for (i = 0; i < (dstW >> 1); i++) {
for (i = 0; i < ((dstW + 1) >> 1); i++) {
int Y1 = (buf0[i * 2 ] + 64) >> 7;
int Y2 = (buf0[i * 2 + 1] + 64) >> 7;
int U = (ubuf0[i] + ubuf1[i]+128) >> 8;
......@@ -608,7 +608,7 @@ yuv2rgb48_X_c_template(SwsContext *c, const int16_t *lumFilter,
{
int i;
for (i = 0; i < (dstW >> 1); i++) {
for (i = 0; i < ((dstW + 1) >> 1); i++) {
int j;
int Y1 = -0x40000000;
int Y2 = -0x40000000;
......@@ -671,7 +671,7 @@ yuv2rgb48_2_c_template(SwsContext *c, const int32_t *buf[2],
int uvalpha1 = 4095 - uvalpha;
int i;
for (i = 0; i < (dstW >> 1); i++) {
for (i = 0; i < ((dstW + 1) >> 1); i++) {
int Y1 = (buf0[i * 2] * yalpha1 + buf1[i * 2] * yalpha) >> 14;
int Y2 = (buf0[i * 2 + 1] * yalpha1 + buf1[i * 2 + 1] * yalpha) >> 14;
int U = (ubuf0[i] * uvalpha1 + ubuf1[i] * uvalpha + (-128 << 23)) >> 14;
......@@ -709,7 +709,7 @@ yuv2rgb48_1_c_template(SwsContext *c, const int32_t *buf0,
int i;
if (uvalpha < 2048) {
for (i = 0; i < (dstW >> 1); i++) {
for (i = 0; i < ((dstW + 1) >> 1); i++) {
int Y1 = (buf0[i * 2] ) >> 2;
int Y2 = (buf0[i * 2 + 1]) >> 2;
int U = (ubuf0[i] + (-128 << 11)) >> 2;
......@@ -737,7 +737,7 @@ yuv2rgb48_1_c_template(SwsContext *c, const int32_t *buf0,
}
} else {
const int32_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
for (i = 0; i < (dstW >> 1); i++) {
for (i = 0; i < ((dstW + 1) >> 1); i++) {
int Y1 = (buf0[i * 2] ) >> 2;
int Y2 = (buf0[i * 2 + 1]) >> 2;
int U = (ubuf0[i] + ubuf1[i] + (-128 << 12)) >> 3;
......@@ -952,7 +952,7 @@ yuv2rgb_X_c_template(SwsContext *c, const int16_t *lumFilter,
{
int i;
for (i = 0; i < (dstW >> 1); i++) {
for (i = 0; i < ((dstW + 1) >> 1); i++) {
int j, A1, A2;
int Y1 = 1 << 18;
int Y2 = 1 << 18;
......@@ -1012,7 +1012,7 @@ yuv2rgb_2_c_template(SwsContext *c, const int16_t *buf[2],
int uvalpha1 = 4095 - uvalpha;
int i;
for (i = 0; i < (dstW >> 1); i++) {
for (i = 0; i < ((dstW + 1) >> 1); i++) {
int Y1 = (buf0[i * 2] * yalpha1 + buf1[i * 2] * yalpha) >> 19;
int Y2 = (buf0[i * 2 + 1] * yalpha1 + buf1[i * 2 + 1] * yalpha) >> 19;
int U = (ubuf0[i] * uvalpha1 + ubuf1[i] * uvalpha) >> 19;
......@@ -1050,7 +1050,7 @@ yuv2rgb_1_c_template(SwsContext *c, const int16_t *buf0,
int i;
if (uvalpha < 2048) {
for (i = 0; i < (dstW >> 1); i++) {
for (i = 0; i < ((dstW + 1) >> 1); i++) {
int Y1 = (buf0[i * 2 ] + 64) >> 7;
int Y2 = (buf0[i * 2 + 1] + 64) >> 7;
int U = (ubuf0[i] + 64) >> 7;
......@@ -1077,7 +1077,7 @@ yuv2rgb_1_c_template(SwsContext *c, const int16_t *buf0,
}
} else {
const int16_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
for (i = 0; i < (dstW >> 1); i++) {
for (i = 0; i < ((dstW + 1) >> 1); i++) {
int Y1 = (buf0[i * 2 ] + 64) >> 7;
int Y2 = (buf0[i * 2 + 1] + 64) >> 7;
int U = (ubuf0[i] + ubuf1[i] + 128) >> 8;
......
......@@ -40,7 +40,7 @@ do_lavfi "crop_vflip" "crop=iw-100:ih-100:100:100,vflip"
do_lavfi "drawbox" "drawbox=224:24:88:72:#FF8010@0.5"
do_lavfi "fade" "fade=in:5:15,fade=out:30:15"
do_lavfi "null" "null"
do_lavfi "overlay" "split[m],scale=88:72,pad=96:80:4:4[o2];[m]fifo,[o2]overlay=240:16"
do_lavfi "overlay" "split[m],scale=88:72,pad=96:80:4:4[o2];[m]fifo[o1],[o1][o2]overlay=240:16"
do_lavfi "pad" "pad=iw*1.5:ih*1.5:iw*0.3:ih*0.2"
do_lavfi "pp" "mp=pp=be/de/tn/l5/al"
do_lavfi "pp2" "mp=pp=be/fq:16/fa/lb"
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment