Commit 8f483108 authored by James Almer's avatar James Almer

avcodec: Drop deprecated audio resample API

Deprecated in 03/2013.
parent 24a8603a
...@@ -45,8 +45,6 @@ OBJS = allcodecs.o \ ...@@ -45,8 +45,6 @@ OBJS = allcodecs.o \
profiles.o \ profiles.o \
qsv_api.o \ qsv_api.o \
raw.o \ raw.o \
resample.o \
resample2.o \
utils.o \ utils.o \
vorbis_parser.o \ vorbis_parser.o \
xiph.o \ xiph.o \
......
...@@ -5516,103 +5516,6 @@ int avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size, ...@@ -5516,103 +5516,6 @@ int avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size,
* @} * @}
*/ */
#if FF_API_AVCODEC_RESAMPLE
/**
* @defgroup lavc_resample Audio resampling
* @ingroup libavc
* @deprecated use libswresample instead
*
* @{
*/
struct ReSampleContext;
struct AVResampleContext;
typedef struct ReSampleContext ReSampleContext;
/**
* Initialize audio resampling context.
*
* @param output_channels number of output channels
* @param input_channels number of input channels
* @param output_rate output sample rate
* @param input_rate input sample rate
* @param sample_fmt_out requested output sample format
* @param sample_fmt_in input sample format
* @param filter_length length of each FIR filter in the filterbank relative to the cutoff frequency
* @param log2_phase_count log2 of the number of entries in the polyphase filterbank
* @param linear if 1 then the used FIR filter will be linearly interpolated
between the 2 closest, if 0 the closest will be used
* @param cutoff cutoff frequency, 1.0 corresponds to half the output sampling rate
* @return allocated ReSampleContext, NULL if error occurred
*/
attribute_deprecated
ReSampleContext *av_audio_resample_init(int output_channels, int input_channels,
int output_rate, int input_rate,
enum AVSampleFormat sample_fmt_out,
enum AVSampleFormat sample_fmt_in,
int filter_length, int log2_phase_count,
int linear, double cutoff);
attribute_deprecated
int audio_resample(ReSampleContext *s, short *output, short *input, int nb_samples);
/**
* Free resample context.
*
* @param s a non-NULL pointer to a resample context previously
* created with av_audio_resample_init()
*/
attribute_deprecated
void audio_resample_close(ReSampleContext *s);
/**
* Initialize an audio resampler.
* Note, if either rate is not an integer then simply scale both rates up so they are.
* @param filter_length length of each FIR filter in the filterbank relative to the cutoff freq
* @param log2_phase_count log2 of the number of entries in the polyphase filterbank
* @param linear If 1 then the used FIR filter will be linearly interpolated
between the 2 closest, if 0 the closest will be used
* @param cutoff cutoff frequency, 1.0 corresponds to half the output sampling rate
*/
attribute_deprecated
struct AVResampleContext *av_resample_init(int out_rate, int in_rate, int filter_length, int log2_phase_count, int linear, double cutoff);
/**
* Resample an array of samples using a previously configured context.
* @param src an array of unconsumed samples
* @param consumed the number of samples of src which have been consumed are returned here
* @param src_size the number of unconsumed samples available
* @param dst_size the amount of space in samples available in dst
* @param update_ctx If this is 0 then the context will not be modified, that way several channels can be resampled with the same context.
* @return the number of samples written in dst or -1 if an error occurred
*/
attribute_deprecated
int av_resample(struct AVResampleContext *c, short *dst, short *src, int *consumed, int src_size, int dst_size, int update_ctx);
/**
* Compensate samplerate/timestamp drift. The compensation is done by changing
* the resampler parameters, so no audible clicks or similar distortions occur
* @param compensation_distance distance in output samples over which the compensation should be performed
* @param sample_delta number of output samples which should be output less
*
* example: av_resample_compensate(c, 10, 500)
* here instead of 510 samples only 500 samples would be output
*
* note, due to rounding the actual compensation might be slightly different,
* especially if the compensation_distance is large and the in_rate used during init is small
*/
attribute_deprecated
void av_resample_compensate(struct AVResampleContext *c, int sample_delta, int compensation_distance);
attribute_deprecated
void av_resample_close(struct AVResampleContext *c);
/**
* @}
*/
#endif
#if FF_API_AVPICTURE #if FF_API_AVPICTURE
/** /**
* @addtogroup lavc_picture * @addtogroup lavc_picture
......
/*
* samplerate conversion for both audio and video
* Copyright (c) 2000 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* samplerate conversion for both audio and video
*/
#include <string.h>
#include "avcodec.h"
#include "audioconvert.h"
#include "libavutil/opt.h"
#include "libavutil/mem.h"
#include "libavutil/samplefmt.h"
#if FF_API_AVCODEC_RESAMPLE
FF_DISABLE_DEPRECATION_WARNINGS
#define MAX_CHANNELS 8
struct AVResampleContext;
static const char *context_to_name(void *ptr)
{
return "audioresample";
}
static const AVOption options[] = {{NULL}};
static const AVClass audioresample_context_class = {
"ReSampleContext", context_to_name, options, LIBAVUTIL_VERSION_INT
};
struct ReSampleContext {
struct AVResampleContext *resample_context;
short *temp[MAX_CHANNELS];
int temp_len;
float ratio;
/* channel convert */
int input_channels, output_channels, filter_channels;
AVAudioConvert *convert_ctx[2];
enum AVSampleFormat sample_fmt[2]; ///< input and output sample format
unsigned sample_size[2]; ///< size of one sample in sample_fmt
short *buffer[2]; ///< buffers used for conversion to S16
unsigned buffer_size[2]; ///< sizes of allocated buffers
};
/* n1: number of samples */
static void stereo_to_mono(short *output, short *input, int n1)
{
short *p, *q;
int n = n1;
p = input;
q = output;
while (n >= 4) {
q[0] = (p[0] + p[1]) >> 1;
q[1] = (p[2] + p[3]) >> 1;
q[2] = (p[4] + p[5]) >> 1;
q[3] = (p[6] + p[7]) >> 1;
q += 4;
p += 8;
n -= 4;
}
while (n > 0) {
q[0] = (p[0] + p[1]) >> 1;
q++;
p += 2;
n--;
}
}
/* n1: number of samples */
static void mono_to_stereo(short *output, short *input, int n1)
{
short *p, *q;
int n = n1;
int v;
p = input;
q = output;
while (n >= 4) {
v = p[0]; q[0] = v; q[1] = v;
v = p[1]; q[2] = v; q[3] = v;
v = p[2]; q[4] = v; q[5] = v;
v = p[3]; q[6] = v; q[7] = v;
q += 8;
p += 4;
n -= 4;
}
while (n > 0) {
v = p[0]; q[0] = v; q[1] = v;
q += 2;
p += 1;
n--;
}
}
/*
5.1 to stereo input: [fl, fr, c, lfe, rl, rr]
- Left = front_left + rear_gain * rear_left + center_gain * center
- Right = front_right + rear_gain * rear_right + center_gain * center
Where rear_gain is usually around 0.5-1.0 and
center_gain is almost always 0.7 (-3 dB)
*/
static void surround_to_stereo(short **output, short *input, int channels, int samples)
{
int i;
short l, r;
for (i = 0; i < samples; i++) {
int fl,fr,c,rl,rr;
fl = input[0];
fr = input[1];
c = input[2];
// lfe = input[3];
rl = input[4];
rr = input[5];
l = av_clip_int16(fl + (0.5 * rl) + (0.7 * c));
r = av_clip_int16(fr + (0.5 * rr) + (0.7 * c));
/* output l & r. */
*output[0]++ = l;
*output[1]++ = r;
/* increment input. */
input += channels;
}
}
static void deinterleave(short **output, short *input, int channels, int samples)
{
int i, j;
for (i = 0; i < samples; i++) {
for (j = 0; j < channels; j++) {
*output[j]++ = *input++;
}
}
}
static void interleave(short *output, short **input, int channels, int samples)
{
int i, j;
for (i = 0; i < samples; i++) {
for (j = 0; j < channels; j++) {
*output++ = *input[j]++;
}
}
}
static void ac3_5p1_mux(short *output, short *input1, short *input2, int n)
{
int i;
short l, r;
for (i = 0; i < n; i++) {
l = *input1++;
r = *input2++;
*output++ = l; /* left */
*output++ = (l / 2) + (r / 2); /* center */
*output++ = r; /* right */
*output++ = 0; /* left surround */
*output++ = 0; /* right surroud */
*output++ = 0; /* low freq */
}
}
#define SUPPORT_RESAMPLE(ch1, ch2, ch3, ch4, ch5, ch6, ch7, ch8) \
ch8<<7 | ch7<<6 | ch6<<5 | ch5<<4 | ch4<<3 | ch3<<2 | ch2<<1 | ch1<<0
static const uint8_t supported_resampling[MAX_CHANNELS] = {
// output ch: 1 2 3 4 5 6 7 8
SUPPORT_RESAMPLE(1, 1, 0, 0, 0, 0, 0, 0), // 1 input channel
SUPPORT_RESAMPLE(1, 1, 0, 0, 0, 1, 0, 0), // 2 input channels
SUPPORT_RESAMPLE(0, 0, 1, 0, 0, 0, 0, 0), // 3 input channels
SUPPORT_RESAMPLE(0, 0, 0, 1, 0, 0, 0, 0), // 4 input channels
SUPPORT_RESAMPLE(0, 0, 0, 0, 1, 0, 0, 0), // 5 input channels
SUPPORT_RESAMPLE(0, 1, 0, 0, 0, 1, 0, 0), // 6 input channels
SUPPORT_RESAMPLE(0, 0, 0, 0, 0, 0, 1, 0), // 7 input channels
SUPPORT_RESAMPLE(0, 0, 0, 0, 0, 0, 0, 1), // 8 input channels
};
ReSampleContext *av_audio_resample_init(int output_channels, int input_channels,
int output_rate, int input_rate,
enum AVSampleFormat sample_fmt_out,
enum AVSampleFormat sample_fmt_in,
int filter_length, int log2_phase_count,
int linear, double cutoff)
{
ReSampleContext *s;
if (input_channels > MAX_CHANNELS) {
av_log(NULL, AV_LOG_ERROR,
"Resampling with input channels greater than %d is unsupported.\n",
MAX_CHANNELS);
return NULL;
}
if (!(supported_resampling[input_channels-1] & (1<<(output_channels-1)))) {
int i;
av_log(NULL, AV_LOG_ERROR, "Unsupported audio resampling. Allowed "
"output channels for %d input channel%s", input_channels,
input_channels > 1 ? "s:" : ":");
for (i = 0; i < MAX_CHANNELS; i++)
if (supported_resampling[input_channels-1] & (1<<i))
av_log(NULL, AV_LOG_ERROR, " %d", i + 1);
av_log(NULL, AV_LOG_ERROR, "\n");
return NULL;
}
s = av_mallocz(sizeof(ReSampleContext));
if (!s) {
av_log(NULL, AV_LOG_ERROR, "Can't allocate memory for resample context.\n");
return NULL;
}
s->ratio = (float)output_rate / (float)input_rate;
s->input_channels = input_channels;
s->output_channels = output_channels;
s->filter_channels = s->input_channels;
if (s->output_channels < s->filter_channels)
s->filter_channels = s->output_channels;
s->sample_fmt[0] = sample_fmt_in;
s->sample_fmt[1] = sample_fmt_out;
s->sample_size[0] = av_get_bytes_per_sample(s->sample_fmt[0]);
s->sample_size[1] = av_get_bytes_per_sample(s->sample_fmt[1]);
if (s->sample_fmt[0] != AV_SAMPLE_FMT_S16) {
if (!(s->convert_ctx[0] = av_audio_convert_alloc(AV_SAMPLE_FMT_S16, 1,
s->sample_fmt[0], 1, NULL, 0))) {
av_log(s, AV_LOG_ERROR,
"Cannot convert %s sample format to s16 sample format\n",
av_get_sample_fmt_name(s->sample_fmt[0]));
av_free(s);
return NULL;
}
}
if (s->sample_fmt[1] != AV_SAMPLE_FMT_S16) {
if (!(s->convert_ctx[1] = av_audio_convert_alloc(s->sample_fmt[1], 1,
AV_SAMPLE_FMT_S16, 1, NULL, 0))) {
av_log(s, AV_LOG_ERROR,
"Cannot convert s16 sample format to %s sample format\n",
av_get_sample_fmt_name(s->sample_fmt[1]));
av_audio_convert_free(s->convert_ctx[0]);
av_free(s);
return NULL;
}
}
s->resample_context = av_resample_init(output_rate, input_rate,
filter_length, log2_phase_count,
linear, cutoff);
*(const AVClass**)s->resample_context = &audioresample_context_class;
return s;
}
/* resample audio. 'nb_samples' is the number of input samples */
/* XXX: optimize it ! */
int audio_resample(ReSampleContext *s, short *output, short *input, int nb_samples)
{
int i, nb_samples1;
short *bufin[MAX_CHANNELS];
short *bufout[MAX_CHANNELS];
short *buftmp2[MAX_CHANNELS], *buftmp3[MAX_CHANNELS];
short *output_bak = NULL;
int lenout;
if (s->sample_fmt[0] != AV_SAMPLE_FMT_S16) {
int istride[1] = { s->sample_size[0] };
int ostride[1] = { 2 };
const void *ibuf[1] = { input };
void *obuf[1];
unsigned input_size = nb_samples * s->input_channels * 2;
if (!s->buffer_size[0] || s->buffer_size[0] < input_size) {
av_free(s->buffer[0]);
s->buffer_size[0] = input_size;
s->buffer[0] = av_malloc(s->buffer_size[0]);
if (!s->buffer[0]) {
av_log(s->resample_context, AV_LOG_ERROR, "Could not allocate buffer\n");
return 0;
}
}
obuf[0] = s->buffer[0];
if (av_audio_convert(s->convert_ctx[0], obuf, ostride,
ibuf, istride, nb_samples * s->input_channels) < 0) {
av_log(s->resample_context, AV_LOG_ERROR,
"Audio sample format conversion failed\n");
return 0;
}
input = s->buffer[0];
}
lenout= 2*s->output_channels*nb_samples * s->ratio + 16;
if (s->sample_fmt[1] != AV_SAMPLE_FMT_S16) {
int out_size = lenout * av_get_bytes_per_sample(s->sample_fmt[1]) *
s->output_channels;
output_bak = output;
if (!s->buffer_size[1] || s->buffer_size[1] < out_size) {
av_free(s->buffer[1]);
s->buffer_size[1] = out_size;
s->buffer[1] = av_malloc(s->buffer_size[1]);
if (!s->buffer[1]) {
av_log(s->resample_context, AV_LOG_ERROR, "Could not allocate buffer\n");
return 0;
}
}
output = s->buffer[1];
}
/* XXX: move those malloc to resample init code */
for (i = 0; i < s->filter_channels; i++) {
bufin[i] = av_malloc_array((nb_samples + s->temp_len), sizeof(short));
bufout[i] = av_malloc_array(lenout, sizeof(short));
if (!bufin[i] || !bufout[i]) {
av_log(s->resample_context, AV_LOG_ERROR, "Could not allocate buffer\n");
nb_samples1 = 0;
goto fail;
}
memcpy(bufin[i], s->temp[i], s->temp_len * sizeof(short));
buftmp2[i] = bufin[i] + s->temp_len;
}
if (s->input_channels == 2 && s->output_channels == 1) {
buftmp3[0] = output;
stereo_to_mono(buftmp2[0], input, nb_samples);
} else if (s->output_channels >= 2 && s->input_channels == 1) {
buftmp3[0] = bufout[0];
memcpy(buftmp2[0], input, nb_samples * sizeof(short));
} else if (s->input_channels == 6 && s->output_channels ==2) {
buftmp3[0] = bufout[0];
buftmp3[1] = bufout[1];
surround_to_stereo(buftmp2, input, s->input_channels, nb_samples);
} else if (s->output_channels >= s->input_channels && s->input_channels >= 2) {
for (i = 0; i < s->input_channels; i++) {
buftmp3[i] = bufout[i];
}
deinterleave(buftmp2, input, s->input_channels, nb_samples);
} else {
buftmp3[0] = output;
memcpy(buftmp2[0], input, nb_samples * sizeof(short));
}
nb_samples += s->temp_len;
/* resample each channel */
nb_samples1 = 0; /* avoid warning */
for (i = 0; i < s->filter_channels; i++) {
int consumed;
int is_last = i + 1 == s->filter_channels;
nb_samples1 = av_resample(s->resample_context, buftmp3[i], bufin[i],
&consumed, nb_samples, lenout, is_last);
s->temp_len = nb_samples - consumed;
s->temp[i] = av_realloc_array(s->temp[i], s->temp_len, sizeof(short));
memcpy(s->temp[i], bufin[i] + consumed, s->temp_len * sizeof(short));
}
if (s->output_channels == 2 && s->input_channels == 1) {
mono_to_stereo(output, buftmp3[0], nb_samples1);
} else if (s->output_channels == 6 && s->input_channels == 2) {
ac3_5p1_mux(output, buftmp3[0], buftmp3[1], nb_samples1);
} else if ((s->output_channels == s->input_channels && s->input_channels >= 2) ||
(s->output_channels == 2 && s->input_channels == 6)) {
interleave(output, buftmp3, s->output_channels, nb_samples1);
}
if (s->sample_fmt[1] != AV_SAMPLE_FMT_S16) {
int istride[1] = { 2 };
int ostride[1] = { s->sample_size[1] };
const void *ibuf[1] = { output };
void *obuf[1] = { output_bak };
if (av_audio_convert(s->convert_ctx[1], obuf, ostride,
ibuf, istride, nb_samples1 * s->output_channels) < 0) {
av_log(s->resample_context, AV_LOG_ERROR,
"Audio sample format conversion failed\n");
return 0;
}
}
fail:
for (i = 0; i < s->filter_channels; i++) {
av_free(bufin[i]);
av_free(bufout[i]);
}
return nb_samples1;
}
void audio_resample_close(ReSampleContext *s)
{
int i;
av_resample_close(s->resample_context);
for (i = 0; i < s->filter_channels; i++)
av_freep(&s->temp[i]);
av_freep(&s->buffer[0]);
av_freep(&s->buffer[1]);
av_audio_convert_free(s->convert_ctx[0]);
av_audio_convert_free(s->convert_ctx[1]);
av_free(s);
}
FF_ENABLE_DEPRECATION_WARNINGS
#endif
/*
* audio resampling
* Copyright (c) 2004 Michael Niedermayer <michaelni@gmx.at>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* audio resampling
* @author Michael Niedermayer <michaelni@gmx.at>
*/
#include "libavutil/avassert.h"
#include "avcodec.h"
#include "libavutil/common.h"
#if FF_API_AVCODEC_RESAMPLE
#ifndef CONFIG_RESAMPLE_HP
#define FILTER_SHIFT 15
typedef int16_t FELEM;
typedef int32_t FELEM2;
typedef int64_t FELEML;
#define FELEM_MAX INT16_MAX
#define FELEM_MIN INT16_MIN
#define WINDOW_TYPE 9
#elif !defined(CONFIG_RESAMPLE_AUDIOPHILE_KIDDY_MODE)
#define FILTER_SHIFT 30
#define FELEM int32_t
#define FELEM2 int64_t
#define FELEML int64_t
#define FELEM_MAX INT32_MAX
#define FELEM_MIN INT32_MIN
#define WINDOW_TYPE 12
#else
#define FILTER_SHIFT 0
typedef double FELEM;
typedef double FELEM2;
typedef double FELEML;
#define WINDOW_TYPE 24
#endif
typedef struct AVResampleContext{
const AVClass *av_class;
FELEM *filter_bank;
int filter_length;
int ideal_dst_incr;
int dst_incr;
int index;
int frac;
int src_incr;
int compensation_distance;
int phase_shift;
int phase_mask;
int linear;
}AVResampleContext;
/**
* 0th order modified bessel function of the first kind.
*/
static double bessel(double x){
double v=1;
double lastv=0;
double t=1;
int i;
x= x*x/4;
for(i=1; v != lastv; i++){
lastv=v;
t *= x/(i*i);
v += t;
}
return v;
}
/**
* Build a polyphase filterbank.
* @param factor resampling factor
* @param scale wanted sum of coefficients for each filter
* @param type 0->cubic, 1->blackman nuttall windowed sinc, 2..16->kaiser windowed sinc beta=2..16
* @return 0 on success, negative on error
*/
static int build_filter(FELEM *filter, double factor, int tap_count, int phase_count, int scale, int type){
int ph, i;
double x, y, w;
double *tab = av_malloc_array(tap_count, sizeof(*tab));
const int center= (tap_count-1)/2;
if (!tab)
return AVERROR(ENOMEM);
/* if upsampling, only need to interpolate, no filter */
if (factor > 1.0)
factor = 1.0;
for(ph=0;ph<phase_count;ph++) {
double norm = 0;
for(i=0;i<tap_count;i++) {
x = M_PI * ((double)(i - center) - (double)ph / phase_count) * factor;
if (x == 0) y = 1.0;
else y = sin(x) / x;
switch(type){
case 0:{
const float d= -0.5; //first order derivative = -0.5
x = fabs(((double)(i - center) - (double)ph / phase_count) * factor);
if(x<1.0) y= 1 - 3*x*x + 2*x*x*x + d*( -x*x + x*x*x);
else y= d*(-4 + 8*x - 5*x*x + x*x*x);
break;}
case 1:
w = 2.0*x / (factor*tap_count) + M_PI;
y *= 0.3635819 - 0.4891775 * cos(w) + 0.1365995 * cos(2*w) - 0.0106411 * cos(3*w);
break;
default:
w = 2.0*x / (factor*tap_count*M_PI);
y *= bessel(type*sqrt(FFMAX(1-w*w, 0)));
break;
}
tab[i] = y;
norm += y;
}
/* normalize so that an uniform color remains the same */
for(i=0;i<tap_count;i++) {
#ifdef CONFIG_RESAMPLE_AUDIOPHILE_KIDDY_MODE
filter[ph * tap_count + i] = tab[i] / norm;
#else
filter[ph * tap_count + i] = av_clip(lrintf(tab[i] * scale / norm), FELEM_MIN, FELEM_MAX);
#endif
}
}
#if 0
{
#define LEN 1024
int j,k;
double sine[LEN + tap_count];
double filtered[LEN];
double maxff=-2, minff=2, maxsf=-2, minsf=2;
for(i=0; i<LEN; i++){
double ss=0, sf=0, ff=0;
for(j=0; j<LEN+tap_count; j++)
sine[j]= cos(i*j*M_PI/LEN);
for(j=0; j<LEN; j++){
double sum=0;
ph=0;
for(k=0; k<tap_count; k++)
sum += filter[ph * tap_count + k] * sine[k+j];
filtered[j]= sum / (1<<FILTER_SHIFT);
ss+= sine[j + center] * sine[j + center];
ff+= filtered[j] * filtered[j];
sf+= sine[j + center] * filtered[j];
}
ss= sqrt(2*ss/LEN);
ff= sqrt(2*ff/LEN);
sf= 2*sf/LEN;
maxff= FFMAX(maxff, ff);
minff= FFMIN(minff, ff);
maxsf= FFMAX(maxsf, sf);
minsf= FFMIN(minsf, sf);
if(i%11==0){
av_log(NULL, AV_LOG_ERROR, "i:%4d ss:%f ff:%13.6e-%13.6e sf:%13.6e-%13.6e\n", i, ss, maxff, minff, maxsf, minsf);
minff=minsf= 2;
maxff=maxsf= -2;
}
}
}
#endif
av_free(tab);
return 0;
}
AVResampleContext *av_resample_init(int out_rate, int in_rate, int filter_size, int phase_shift, int linear, double cutoff){
AVResampleContext *c= av_mallocz(sizeof(AVResampleContext));
double factor= FFMIN(out_rate * cutoff / in_rate, 1.0);
int phase_count= 1<<phase_shift;
if (!c)
return NULL;
c->phase_shift= phase_shift;
c->phase_mask= phase_count-1;
c->linear= linear;
c->filter_length= FFMAX((int)ceil(filter_size/factor), 1);
c->filter_bank= av_mallocz_array(c->filter_length, (phase_count+1)*sizeof(FELEM));
if (!c->filter_bank)
goto error;
if (build_filter(c->filter_bank, factor, c->filter_length, phase_count, 1<<FILTER_SHIFT, WINDOW_TYPE))
goto error;
memcpy(&c->filter_bank[c->filter_length*phase_count+1], c->filter_bank, (c->filter_length-1)*sizeof(FELEM));
c->filter_bank[c->filter_length*phase_count]= c->filter_bank[c->filter_length - 1];
if(!av_reduce(&c->src_incr, &c->dst_incr, out_rate, in_rate * (int64_t)phase_count, INT32_MAX/2))
goto error;
c->ideal_dst_incr= c->dst_incr;
c->index= -phase_count*((c->filter_length-1)/2);
return c;
error:
av_free(c->filter_bank);
av_free(c);
return NULL;
}
void av_resample_close(AVResampleContext *c){
av_freep(&c->filter_bank);
av_freep(&c);
}
void av_resample_compensate(AVResampleContext *c, int sample_delta, int compensation_distance){
// sample_delta += (c->ideal_dst_incr - c->dst_incr)*(int64_t)c->compensation_distance / c->ideal_dst_incr;
c->compensation_distance= compensation_distance;
c->dst_incr = c->ideal_dst_incr - c->ideal_dst_incr * (int64_t)sample_delta / compensation_distance;
}
int av_resample(AVResampleContext *c, short *dst, short *src, int *consumed, int src_size, int dst_size, int update_ctx){
int dst_index, i;
int index= c->index;
int frac= c->frac;
int dst_incr_frac= c->dst_incr % c->src_incr;
int dst_incr= c->dst_incr / c->src_incr;
int compensation_distance= c->compensation_distance;
if(compensation_distance == 0 && c->filter_length == 1 && c->phase_shift==0){
int64_t index2= ((int64_t)index)<<32;
int64_t incr= (1LL<<32) * c->dst_incr / c->src_incr;
dst_size= FFMIN(dst_size, (src_size-1-index) * (int64_t)c->src_incr / c->dst_incr);
for(dst_index=0; dst_index < dst_size; dst_index++){
dst[dst_index] = src[index2>>32];
index2 += incr;
}
index += dst_index * dst_incr;
index += (frac + dst_index * (int64_t)dst_incr_frac) / c->src_incr;
frac = (frac + dst_index * (int64_t)dst_incr_frac) % c->src_incr;
}else{
for(dst_index=0; dst_index < dst_size; dst_index++){
FELEM *filter= c->filter_bank + c->filter_length*(index & c->phase_mask);
int sample_index= index >> c->phase_shift;
FELEM2 val=0;
if(sample_index < 0){
for(i=0; i<c->filter_length; i++)
val += src[FFABS(sample_index + i) % src_size] * filter[i];
}else if(sample_index + c->filter_length > src_size){
break;
}else if(c->linear){
FELEM2 v2=0;
for(i=0; i<c->filter_length; i++){
val += src[sample_index + i] * (FELEM2)filter[i];
v2 += src[sample_index + i] * (FELEM2)filter[i + c->filter_length];
}
val+=(v2-val)*(FELEML)frac / c->src_incr;
}else{
for(i=0; i<c->filter_length; i++){
val += src[sample_index + i] * (FELEM2)filter[i];
}
}
#ifdef CONFIG_RESAMPLE_AUDIOPHILE_KIDDY_MODE
dst[dst_index] = av_clip_int16(lrintf(val));
#else
val = (val + (1<<(FILTER_SHIFT-1)))>>FILTER_SHIFT;
dst[dst_index] = (unsigned)(val + 32768) > 65535 ? (val>>31) ^ 32767 : val;
#endif
frac += dst_incr_frac;
index += dst_incr;
if(frac >= c->src_incr){
frac -= c->src_incr;
index++;
}
if(dst_index + 1 == compensation_distance){
compensation_distance= 0;
dst_incr_frac= c->ideal_dst_incr % c->src_incr;
dst_incr= c->ideal_dst_incr / c->src_incr;
}
}
}
*consumed= FFMAX(index, 0) >> c->phase_shift;
if(index>=0) index &= c->phase_mask;
if(compensation_distance){
compensation_distance -= dst_index;
av_assert2(compensation_distance > 0);
}
if(update_ctx){
c->frac= frac;
c->index= index;
c->dst_incr= dst_incr_frac + c->src_incr*dst_incr;
c->compensation_distance= compensation_distance;
}
return dst_index;
}
#endif
...@@ -57,9 +57,6 @@ ...@@ -57,9 +57,6 @@
#ifndef FF_API_AUDIO_CONVERT #ifndef FF_API_AUDIO_CONVERT
#define FF_API_AUDIO_CONVERT (LIBAVCODEC_VERSION_MAJOR < 58) #define FF_API_AUDIO_CONVERT (LIBAVCODEC_VERSION_MAJOR < 58)
#endif #endif
#ifndef FF_API_AVCODEC_RESAMPLE
#define FF_API_AVCODEC_RESAMPLE FF_API_AUDIO_CONVERT
#endif
#ifndef FF_API_LOWRES #ifndef FF_API_LOWRES
#define FF_API_LOWRES (LIBAVCODEC_VERSION_MAJOR < 59) #define FF_API_LOWRES (LIBAVCODEC_VERSION_MAJOR < 59)
#endif #endif
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment