Commit d27edc03 authored by Michael Niedermayer's avatar Michael Niedermayer

Merge commit '511cf612'

* commit '511cf612':
  miscellaneous typo fixes

Conflicts:
	libavcodec/4xm.c
	libavcodec/lagarith.c
	libavcodec/parser.c
	libavcodec/ratecontrol.c
	libavcodec/shorten.c
	libavcodec/vda_h264.c
	libavformat/dvenc.c
	libavformat/wtv.c
	tools/patcheck
Merged-by: 's avatarMichael Niedermayer <michaelni@gmx.at>
parents a41bf09d 511cf612
...@@ -1460,7 +1460,7 @@ HAVE_LIST=" ...@@ -1460,7 +1460,7 @@ HAVE_LIST="
xmm_clobbers xmm_clobbers
" "
# options emitted with CONFIG_ prefix but not available on command line # options emitted with CONFIG_ prefix but not available on the command line
CONFIG_EXTRA=" CONFIG_EXTRA="
aandcttables aandcttables
ac3dsp ac3dsp
......
...@@ -288,7 +288,7 @@ TYPEDEF_HIDES_STRUCT = NO ...@@ -288,7 +288,7 @@ TYPEDEF_HIDES_STRUCT = NO
# causing a significant performance penality. # causing a significant performance penality.
# If the system has enough physical memory increasing the cache will improve the # If the system has enough physical memory increasing the cache will improve the
# performance by keeping more symbols in memory. Note that the value works on # performance by keeping more symbols in memory. Note that the value works on
# a logarithmic scale so increasing the size by one will rougly double the # a logarithmic scale so increasing the size by one will roughly double the
# memory usage. The cache size is given by this formula: # memory usage. The cache size is given by this formula:
# 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0, # 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0,
# corresponding to a cache size of 2^16 = 65536 symbols # corresponding to a cache size of 2^16 = 65536 symbols
......
...@@ -170,7 +170,7 @@ For exported names, each library has its own prefixes. Just check the existing ...@@ -170,7 +170,7 @@ For exported names, each library has its own prefixes. Just check the existing
code and name accordingly. code and name accordingly.
@end itemize @end itemize
@subsection Miscellanous conventions @subsection Miscellaneous conventions
@itemize @bullet @itemize @bullet
@item @item
fprintf and printf are forbidden in libavformat and libavcodec, fprintf and printf are forbidden in libavformat and libavcodec,
......
...@@ -23,7 +23,7 @@ Let's consider the problem of minimizing: ...@@ -23,7 +23,7 @@ Let's consider the problem of minimizing:
rate is the filesize rate is the filesize
distortion is the quality distortion is the quality
lambda is a fixed value choosen as a tradeoff between quality and filesize lambda is a fixed value chosen as a tradeoff between quality and filesize
Is this equivalent to finding the best quality for a given max Is this equivalent to finding the best quality for a given max
filesize? The answer is yes. For each filesize limit there is some lambda filesize? The answer is yes. For each filesize limit there is some lambda
factor for which minimizing above will get you the best quality (using your factor for which minimizing above will get you the best quality (using your
......
...@@ -85,8 +85,8 @@ here are some edges we could choose from: ...@@ -85,8 +85,8 @@ here are some edges we could choose from:
/ \ / \
O-----2--4--O O-----2--4--O
Finding the new best pathes and scores for each point of our new column is Finding the new best paths and scores for each point of our new column is
trivial given we know the previous column best pathes and scores: trivial given we know the previous column best paths and scores:
O-----0-----8 O-----0-----8
\ \
......
...@@ -842,7 +842,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, ...@@ -842,7 +842,7 @@ static int decode_frame(AVCodecContext *avctx, void *data,
cfrm->size + data_size + FF_INPUT_BUFFER_PADDING_SIZE); cfrm->size + data_size + FF_INPUT_BUFFER_PADDING_SIZE);
// explicit check needed as memcpy below might not catch a NULL // explicit check needed as memcpy below might not catch a NULL
if (!cfrm->data) { if (!cfrm->data) {
av_log(f->avctx, AV_LOG_ERROR, "realloc falure\n"); av_log(f->avctx, AV_LOG_ERROR, "realloc failure\n");
return -1; return -1;
} }
......
...@@ -597,7 +597,7 @@ static void psy_3gpp_analyze_channel(FFPsyContext *ctx, int channel, ...@@ -597,7 +597,7 @@ static void psy_3gpp_analyze_channel(FFPsyContext *ctx, int channel,
for (w = 0; w < wi->num_windows*16; w += 16) { for (w = 0; w < wi->num_windows*16; w += 16) {
AacPsyBand *bands = &pch->band[w]; AacPsyBand *bands = &pch->band[w];
//5.4.2.3 "Spreading" & 5.4.3 "Spreaded Energy Calculation" /* 5.4.2.3 "Spreading" & 5.4.3 "Spread Energy Calculation" */
spread_en[0] = bands[0].energy; spread_en[0] = bands[0].energy;
for (g = 1; g < num_bands; g++) { for (g = 1; g < num_bands; g++) {
bands[g].thr = FFMAX(bands[g].thr, bands[g-1].thr * coeffs[g].spread_hi[0]); bands[g].thr = FFMAX(bands[g].thr, bands[g-1].thr * coeffs[g].spread_hi[0]);
...@@ -617,7 +617,7 @@ static void psy_3gpp_analyze_channel(FFPsyContext *ctx, int channel, ...@@ -617,7 +617,7 @@ static void psy_3gpp_analyze_channel(FFPsyContext *ctx, int channel,
band->thr = FFMAX(PSY_3GPP_RPEMIN*band->thr, FFMIN(band->thr, band->thr = FFMAX(PSY_3GPP_RPEMIN*band->thr, FFMIN(band->thr,
PSY_3GPP_RPELEV*pch->prev_band[w+g].thr_quiet)); PSY_3GPP_RPELEV*pch->prev_band[w+g].thr_quiet));
/* 5.6.1.3.1 "Prepatory steps of the perceptual entropy calculation" */ /* 5.6.1.3.1 "Preparatory steps of the perceptual entropy calculation" */
pe += calc_pe_3gpp(band); pe += calc_pe_3gpp(band);
a += band->pe_const; a += band->pe_const;
active_lines += band->active_lines; active_lines += band->active_lines;
......
...@@ -546,7 +546,7 @@ static void decode_transform_coeffs(AC3DecodeContext *s, int blk) ...@@ -546,7 +546,7 @@ static void decode_transform_coeffs(AC3DecodeContext *s, int blk)
for (ch = 1; ch <= s->channels; ch++) { for (ch = 1; ch <= s->channels; ch++) {
/* transform coefficients for full-bandwidth channel */ /* transform coefficients for full-bandwidth channel */
decode_transform_coeffs_ch(s, blk, ch, &m); decode_transform_coeffs_ch(s, blk, ch, &m);
/* tranform coefficients for coupling channel come right after the /* transform coefficients for coupling channel come right after the
coefficients for the first coupled channel*/ coefficients for the first coupled channel*/
if (s->channel_in_cpl[ch]) { if (s->channel_in_cpl[ch]) {
if (!got_cplchan) { if (!got_cplchan) {
......
...@@ -659,7 +659,7 @@ static void count_frame_bits_fixed(AC3EncodeContext *s) ...@@ -659,7 +659,7 @@ static void count_frame_bits_fixed(AC3EncodeContext *s)
* bit allocation parameters do not change between blocks * bit allocation parameters do not change between blocks
* no delta bit allocation * no delta bit allocation
* no skipped data * no skipped data
* no auxilliary data * no auxiliary data
* no E-AC-3 metadata * no E-AC-3 metadata
*/ */
......
...@@ -65,7 +65,7 @@ void ff_acelp_filter_init_mips(ACELPFContext *c); ...@@ -65,7 +65,7 @@ void ff_acelp_filter_init_mips(ACELPFContext *c);
* the coefficients are scaled by 2^15. * the coefficients are scaled by 2^15.
* This array only contains the right half of the filter. * This array only contains the right half of the filter.
* This filter is likely identical to the one used in G.729, though this * This filter is likely identical to the one used in G.729, though this
* could not be determined from the original comments with certainity. * could not be determined from the original comments with certainty.
*/ */
extern const int16_t ff_acelp_interp_filter[61]; extern const int16_t ff_acelp_interp_filter[61];
......
...@@ -172,7 +172,7 @@ static int build_table(VLC *vlc, int table_nb_bits, int nb_codes, ...@@ -172,7 +172,7 @@ static int build_table(VLC *vlc, int table_nb_bits, int nb_codes,
table[i][0] = -1; //codes table[i][0] = -1; //codes
} }
/* first pass: map codes and compute auxillary table sizes */ /* first pass: map codes and compute auxiliary table sizes */
for (i = 0; i < nb_codes; i++) { for (i = 0; i < nb_codes; i++) {
n = codes[i].bits; n = codes[i].bits;
code = codes[i].code; code = codes[i].code;
......
...@@ -757,7 +757,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPac ...@@ -757,7 +757,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPac
} else { } else {
if (!f->key_frame_ok) { if (!f->key_frame_ok) {
av_log(avctx, AV_LOG_ERROR, av_log(avctx, AV_LOG_ERROR,
"Cant decode non keyframe without valid keyframe\n"); "Cannot decode non-keyframe without valid keyframe\n");
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
p->key_frame = 0; p->key_frame = 0;
......
...@@ -644,7 +644,7 @@ static int flic_decode_frame_15_16BPP(AVCodecContext *avctx, ...@@ -644,7 +644,7 @@ static int flic_decode_frame_15_16BPP(AVCodecContext *avctx,
} }
/* Now FLX is strange, in that it is "byte" as opposed to "pixel" run length compressed. /* Now FLX is strange, in that it is "byte" as opposed to "pixel" run length compressed.
* This does not give us any good oportunity to perform word endian conversion * This does not give us any good opportunity to perform word endian conversion
* during decompression. So if it is required (i.e., this is not a LE target, we do * during decompression. So if it is required (i.e., this is not a LE target, we do
* a second pass over the line here, swapping the bytes. * a second pass over the line here, swapping the bytes.
*/ */
......
...@@ -86,7 +86,7 @@ static void fill_colmap(H264Context *h, int map[2][16+32], int list, int field, ...@@ -86,7 +86,7 @@ static void fill_colmap(H264Context *h, int map[2][16+32], int list, int field,
if (!interl) if (!interl)
poc |= 3; poc |= 3;
else if( interl && (poc&3) == 3) //FIXME store all MBAFF references so this isnt needed else if( interl && (poc&3) == 3) // FIXME: store all MBAFF references so this is not needed
poc= (poc&~3) + rfield + 1; poc= (poc&~3) + rfield + 1;
for(j=start; j<end; j++){ for(j=start; j<end; j++){
......
...@@ -235,7 +235,7 @@ ...@@ -235,7 +235,7 @@
/** /**
* Pack two delta values (a,b) into one 16bit word * Pack two delta values (a,b) into one 16bit word
* according with endianess of the host machine. * according with endianness of the host machine.
*/ */
#if HAVE_BIGENDIAN #if HAVE_BIGENDIAN
#define PD(a,b) (((a) << 8) + (b)) #define PD(a,b) (((a) << 8) + (b))
...@@ -282,7 +282,7 @@ static const int16_t delta_tab_3_5[79] = { TAB_3_5 }; ...@@ -282,7 +282,7 @@ static const int16_t delta_tab_3_5[79] = { TAB_3_5 };
/** /**
* Pack four delta values (a,a,b,b) into one 32bit word * Pack four delta values (a,a,b,b) into one 32bit word
* according with endianess of the host machine. * according with endianness of the host machine.
*/ */
#if HAVE_BIGENDIAN #if HAVE_BIGENDIAN
#define PD(a,b) (((a) << 24) + ((a) << 16) + ((b) << 8) + (b)) #define PD(a,b) (((a) << 24) + ((a) << 16) + ((b) << 8) + (b))
......
...@@ -198,7 +198,7 @@ static int lag_read_prob_header(lag_rac *rac, GetBitContext *gb) ...@@ -198,7 +198,7 @@ static int lag_read_prob_header(lag_rac *rac, GetBitContext *gb)
} }
/* Comment from reference source: /* Comment from reference source:
* if (b & 0x80 == 0) { // order of operations is 'wrong'; it has been left this way * if (b & 0x80 == 0) { // order of operations is 'wrong'; it has been left this way
* // since the compression change is negligable and fixing it * // since the compression change is negligible and fixing it
* // breaks backwards compatibility * // breaks backwards compatibility
* b =- (signed int)b; * b =- (signed int)b;
* b &= 0xFF; * b &= 0xFF;
......
...@@ -257,7 +257,7 @@ static av_cold int aac_encode_init(AVCodecContext *avctx) ...@@ -257,7 +257,7 @@ static av_cold int aac_encode_init(AVCodecContext *avctx)
} }
if ((err = aacEncoder_SetParam(s->handle, AACENC_BANDWIDTH, if ((err = aacEncoder_SetParam(s->handle, AACENC_BANDWIDTH,
avctx->cutoff)) != AACENC_OK) { avctx->cutoff)) != AACENC_OK) {
av_log(avctx, AV_LOG_ERROR, "Unable to set the encoder bandwith to %d: %s\n", av_log(avctx, AV_LOG_ERROR, "Unable to set the encoder bandwidth to %d: %s\n",
avctx->cutoff, aac_get_error(err)); avctx->cutoff, aac_get_error(err));
goto error; goto error;
} }
......
...@@ -341,7 +341,7 @@ static int encode_frame(AVCodecContext* avc_context, AVPacket *pkt, ...@@ -341,7 +341,7 @@ static int encode_frame(AVCodecContext* avc_context, AVPacket *pkt,
memcpy(pkt->data, o_packet.packet, o_packet.bytes); memcpy(pkt->data, o_packet.packet, o_packet.bytes);
// HACK: assumes no encoder delay, this is true until libtheora becomes // HACK: assumes no encoder delay, this is true until libtheora becomes
// multithreaded (which will be disabled unless explictly requested) // multithreaded (which will be disabled unless explicitly requested)
pkt->pts = pkt->dts = frame->pts; pkt->pts = pkt->dts = frame->pts;
avc_context->coded_frame->key_frame = !(o_packet.granulepos & h->keyframe_mask); avc_context->coded_frame->key_frame = !(o_packet.granulepos & h->keyframe_mask);
if (avc_context->coded_frame->key_frame) if (avc_context->coded_frame->key_frame)
......
...@@ -95,7 +95,7 @@ void ff_fetch_timestamp(AVCodecParserContext *s, int off, int remove){ ...@@ -95,7 +95,7 @@ void ff_fetch_timestamp(AVCodecParserContext *s, int off, int remove){
if ( s->cur_offset + off >= s->cur_frame_offset[i] if ( s->cur_offset + off >= s->cur_frame_offset[i]
&& (s->frame_offset < s->cur_frame_offset[i] || && (s->frame_offset < s->cur_frame_offset[i] ||
(!s->frame_offset && !s->next_frame_offset)) // first field/frame (!s->frame_offset && !s->next_frame_offset)) // first field/frame
//check is disabled because mpeg-ts doesn't send complete PES packets // check disabled since MPEG-TS does not send complete PES packets
&& /*s->next_frame_offset + off <*/ s->cur_frame_end[i]){ && /*s->next_frame_offset + off <*/ s->cur_frame_end[i]){
s->dts= s->cur_frame_dts[i]; s->dts= s->cur_frame_dts[i];
s->pts= s->cur_frame_pts[i]; s->pts= s->cur_frame_pts[i];
......
...@@ -372,7 +372,7 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt, ...@@ -372,7 +372,7 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
int pass; int pass;
for(pass = 0; pass < NB_PASSES; pass++) { for(pass = 0; pass < NB_PASSES; pass++) {
/* NOTE: a pass is completely omited if no pixels would be /* NOTE: a pass is completely omitted if no pixels would be
output */ output */
pass_row_size = ff_png_pass_row_size(pass, bits_per_pixel, avctx->width); pass_row_size = ff_png_pass_row_size(pass, bits_per_pixel, avctx->width);
if (pass_row_size > 0) { if (pass_row_size > 0) {
......
...@@ -816,7 +816,7 @@ static int init_pass2(MpegEncContext *s) ...@@ -816,7 +816,7 @@ static int init_pass2(MpegEncContext *s)
AVCodecContext *a= s->avctx; AVCodecContext *a= s->avctx;
int i, toobig; int i, toobig;
double fps= get_fps(s->avctx); double fps= get_fps(s->avctx);
double complexity[5]={0,0,0,0,0}; // aproximate bits at quant=1 double complexity[5]={0,0,0,0,0}; // approximate bits at quant=1
uint64_t const_bits[5]={0,0,0,0,0}; // quantizer independent bits uint64_t const_bits[5]={0,0,0,0,0}; // quantizer independent bits
uint64_t all_const_bits; uint64_t all_const_bits;
uint64_t all_available_bits= (uint64_t)(s->bit_rate*(double)rcc->num_entries/fps); uint64_t all_available_bits= (uint64_t)(s->bit_rate*(double)rcc->num_entries/fps);
......
...@@ -406,7 +406,7 @@ int audio_resample(ReSampleContext *s, short *output, short *input, int nb_sampl ...@@ -406,7 +406,7 @@ int audio_resample(ReSampleContext *s, short *output, short *input, int nb_sampl
if (av_audio_convert(s->convert_ctx[1], obuf, ostride, if (av_audio_convert(s->convert_ctx[1], obuf, ostride,
ibuf, istride, nb_samples1 * s->output_channels) < 0) { ibuf, istride, nb_samples1 * s->output_channels) < 0) {
av_log(s->resample_context, AV_LOG_ERROR, av_log(s->resample_context, AV_LOG_ERROR,
"Audio sample format convertion failed\n"); "Audio sample format conversion failed\n");
return 0; return 0;
} }
} }
......
...@@ -740,7 +740,7 @@ static int rv10_decode_frame(AVCodecContext *avctx, ...@@ -740,7 +740,7 @@ static int rv10_decode_frame(AVCodecContext *avctx,
*got_frame = 1; *got_frame = 1;
ff_print_debug_info(s, pict); ff_print_debug_info(s, pict);
} }
s->current_picture_ptr= NULL; //so we can detect if frame_end wasnt called (find some nicer solution...) s->current_picture_ptr= NULL; // so we can detect if frame_end was not called (find some nicer solution...)
} }
return avpkt->size; return avpkt->size;
......
...@@ -526,7 +526,8 @@ static int shorten_decode_frame(AVCodecContext *avctx, void *data, ...@@ -526,7 +526,8 @@ static int shorten_decode_frame(AVCodecContext *avctx, void *data,
/* get Rice code for residual decoding */ /* get Rice code for residual decoding */
if (cmd != FN_ZERO) { if (cmd != FN_ZERO) {
residual_size = get_ur_golomb_shorten(&s->gb, ENERGYSIZE); residual_size = get_ur_golomb_shorten(&s->gb, ENERGYSIZE);
/* this is a hack as version 0 differed in definition of get_sr_golomb_shorten */ /* This is a hack as version 0 differed in the definition
* of get_sr_golomb_shorten(). */
if (s->version == 0) if (s->version == 0)
residual_size--; residual_size--;
} }
......
...@@ -1235,7 +1235,7 @@ static int vorbis_floor1_decode(vorbis_context *vc, ...@@ -1235,7 +1235,7 @@ static int vorbis_floor1_decode(vorbis_context *vc,
if (highroom < lowroom) { if (highroom < lowroom) {
room = highroom * 2; room = highroom * 2;
} else { } else {
room = lowroom * 2; // SPEC mispelling room = lowroom * 2; // SPEC misspelling
} }
if (val) { if (val) {
floor1_flag[low_neigh_offs] = 1; floor1_flag[low_neigh_offs] = 1;
......
...@@ -1099,7 +1099,7 @@ static int decode_subframe(WMAProDecodeCtx *s) ...@@ -1099,7 +1099,7 @@ static int decode_subframe(WMAProDecodeCtx *s)
s->channels_for_cur_subframe = 0; s->channels_for_cur_subframe = 0;
for (i = 0; i < s->avctx->channels; i++) { for (i = 0; i < s->avctx->channels; i++) {
const int cur_subframe = s->channel[i].cur_subframe; const int cur_subframe = s->channel[i].cur_subframe;
/** substract already processed samples */ /** subtract already processed samples */
total_samples -= s->channel[i].decoded_samples; total_samples -= s->channel[i].decoded_samples;
/** and count if there are multiple subframes that match our profile */ /** and count if there are multiple subframes that match our profile */
......
...@@ -51,9 +51,9 @@ struct DVMuxContext { ...@@ -51,9 +51,9 @@ struct DVMuxContext {
AVFifoBuffer *audio_data[2]; /* FIFO for storing excessive amounts of PCM */ AVFifoBuffer *audio_data[2]; /* FIFO for storing excessive amounts of PCM */
int frames; /* current frame number */ int frames; /* current frame number */
int64_t start_time; /* recording start time */ int64_t start_time; /* recording start time */
int has_audio; /* frame under contruction has audio */ int has_audio; /* frame under construction has audio */
int has_video; /* frame under contruction has video */ int has_video; /* frame under construction has video */
uint8_t frame_buf[DV_MAX_FRAME_SIZE]; /* frame under contruction */ uint8_t frame_buf[DV_MAX_FRAME_SIZE]; /* frame under construction */
AVTimecode tc; /* timecode context */ AVTimecode tc; /* timecode context */
}; };
......
...@@ -370,7 +370,7 @@ static int jpeg_parse_packet(AVFormatContext *ctx, PayloadContext *jpeg, ...@@ -370,7 +370,7 @@ static int jpeg_parse_packet(AVFormatContext *ctx, PayloadContext *jpeg,
/* Prepare the JPEG packet. */ /* Prepare the JPEG packet. */
if ((ret = ff_rtp_finalize_packet(pkt, &jpeg->frame, st->index)) < 0) { if ((ret = ff_rtp_finalize_packet(pkt, &jpeg->frame, st->index)) < 0) {
av_log(ctx, AV_LOG_ERROR, av_log(ctx, AV_LOG_ERROR,
"Error occured when getting frame buffer.\n"); "Error occurred when getting frame buffer.\n");
return ret; return ret;
} }
......
...@@ -51,7 +51,7 @@ typedef struct { ...@@ -51,7 +51,7 @@ typedef struct {
char dirname[1024]; char dirname[1024];
uint8_t iobuf[32768]; uint8_t iobuf[32768];
URLContext *out; // Current output stream where all output is written URLContext *out; // Current output stream where all output is written
URLContext *out2; // Auxillary output stream where all output also is written URLContext *out2; // Auxiliary output stream where all output is also written
URLContext *tail_out; // The actual main output stream, if we're currently seeked back to write elsewhere URLContext *tail_out; // The actual main output stream, if we're currently seeked back to write elsewhere
int64_t tail_pos, cur_pos, cur_start_pos; int64_t tail_pos, cur_pos, cur_start_pos;
int packets_written; int packets_written;
......
...@@ -339,7 +339,7 @@ static int spdif_header_mpeg(AVFormatContext *s, AVPacket *pkt) ...@@ -339,7 +339,7 @@ static int spdif_header_mpeg(AVFormatContext *s, AVPacket *pkt)
ctx->data_type = mpeg_data_type [version & 1][layer]; ctx->data_type = mpeg_data_type [version & 1][layer];
ctx->pkt_offset = spdif_mpeg_pkt_offset[version & 1][layer]; ctx->pkt_offset = spdif_mpeg_pkt_offset[version & 1][layer];
} }
// TODO Data type dependant info (normal/karaoke, dynamic range control) // TODO Data type dependent info (normal/karaoke, dynamic range control)
return 0; return 0;
} }
......
...@@ -100,7 +100,7 @@ static void audiogen(AVLFG *rnd, void **data, enum AVSampleFormat sample_fmt, ...@@ -100,7 +100,7 @@ static void audiogen(AVLFG *rnd, void **data, enum AVSampleFormat sample_fmt,
a += M_PI * 1000.0 * 2.0 / sample_rate; a += M_PI * 1000.0 * 2.0 / sample_rate;
} }
/* 1 second of varing frequency between 100 and 10000 Hz */ /* 1 second of varying frequency between 100 and 10000 Hz */
a = 0; a = 0;
for (i = 0; i < 1 * sample_rate && k < nb_samples; i++, k++) { for (i = 0; i < 1 * sample_rate && k < nb_samples; i++, k++) {
v = sin(a) * 0.30; v = sin(a) * 0.30;
......
/* /*
* AltiVec-enhanced yuv-to-yuv convertion routines. * AltiVec-enhanced yuv-to-yuv conversion routines.
* *
* Copyright (C) 2004 Romain Dolbeau <romain@dolbeau.org> * Copyright (C) 2004 Romain Dolbeau <romain@dolbeau.org>
* based on the equivalent C code in swscale.c * based on the equivalent C code in swscale.c
......
...@@ -148,7 +148,7 @@ static void hScale8To19_c(SwsContext *c, int16_t *_dst, int dstW, ...@@ -148,7 +148,7 @@ static void hScale8To19_c(SwsContext *c, int16_t *_dst, int dstW,
} }
} }
// FIXME all pal and rgb srcFormats could do this convertion as well // FIXME all pal and rgb srcFormats could do this conversion as well
// FIXME all scalers more complex than bilinear could do half of this transform // FIXME all scalers more complex than bilinear could do half of this transform
static void chrRangeToJpeg_c(int16_t *dstU, int16_t *dstV, int width) static void chrRangeToJpeg_c(int16_t *dstU, int16_t *dstV, int width)
{ {
......
...@@ -189,7 +189,7 @@ int main(int argc, char **argv) ...@@ -189,7 +189,7 @@ int main(int argc, char **argv)
a += (1000 * FRAC_ONE) / sample_rate; a += (1000 * FRAC_ONE) / sample_rate;
} }
/* 1 second of varing frequency between 100 and 10000 Hz */ /* 1 second of varying frequency between 100 and 10000 Hz */
a = 0; a = 0;
for (i = 0; i < 1 * sample_rate; i++) { for (i = 0; i < 1 * sample_rate; i++) {
v = (int_cos(a) * 10000) >> FRAC_BITS; v = (int_cos(a) * 10000) >> FRAC_BITS;
......
...@@ -158,7 +158,7 @@ cat $* | tr '\n' '@' | $EGREP --color=always -o '[^a-zA-Z0-9_]([a-zA-Z0-9_]*) *= ...@@ -158,7 +158,7 @@ cat $* | tr '\n' '@' | $EGREP --color=always -o '[^a-zA-Z0-9_]([a-zA-Z0-9_]*) *=
cat $TMP | tr '@' '\n' cat $TMP | tr '@' '\n'
# doesnt work # does not work
#cat $* | tr '\n' '@' | $EGREP -o '[^a-zA-Z_0-9]([a-zA-Z][a-zA-Z_0-9]*) *=[^=].*\1' | $EGREP -o '[^a-zA-Z_0-9]([a-zA-Z][a-zA-Z_0-9]*) *=[^=].*\1 *=[^=]' >$TMP && printf "\nPossibly written 2x before read\n" #cat $* | tr '\n' '@' | $EGREP -o '[^a-zA-Z_0-9]([a-zA-Z][a-zA-Z_0-9]*) *=[^=].*\1' | $EGREP -o '[^a-zA-Z_0-9]([a-zA-Z][a-zA-Z_0-9]*) *=[^=].*\1 *=[^=]' >$TMP && printf "\nPossibly written 2x before read\n"
#cat $TMP | tr '@' '\n' #cat $TMP | tr '@' '\n'
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment