wmavoice.c 80.8 KB
Newer Older
Ronald S. Bultje's avatar
Ronald S. Bultje committed
1 2 3 4
/*
 * Windows Media Audio Voice decoder.
 * Copyright (c) 2009 Ronald S. Bultje
 *
5
 * This file is part of Libav.
Ronald S. Bultje's avatar
Ronald S. Bultje committed
6
 *
7
 * Libav is free software; you can redistribute it and/or
Ronald S. Bultje's avatar
Ronald S. Bultje committed
8 9 10 11
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2.1 of the License, or (at your option) any later version.
 *
12
 * Libav is distributed in the hope that it will be useful,
Ronald S. Bultje's avatar
Ronald S. Bultje committed
13 14 15 16 17
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
18
 * License along with Libav; if not, write to the Free Software
Ronald S. Bultje's avatar
Ronald S. Bultje committed
19 20 21 22
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 */

/**
23
 * @file
Ronald S. Bultje's avatar
Ronald S. Bultje committed
24 25 26 27
 * @brief Windows Media Audio Voice compatible decoder
 * @author Ronald S. Bultje <rsbultje@gmail.com>
 */

28 29
#define UNCHECKED_BITSTREAM_READER 1

Ronald S. Bultje's avatar
Ronald S. Bultje committed
30
#include <math.h>
31

32
#include "libavutil/channel_layout.h"
33
#include "libavutil/float_dsp.h"
34
#include "libavutil/mem.h"
Ronald S. Bultje's avatar
Ronald S. Bultje committed
35
#include "avcodec.h"
36
#include "internal.h"
Ronald S. Bultje's avatar
Ronald S. Bultje committed
37 38 39 40 41 42 43
#include "get_bits.h"
#include "put_bits.h"
#include "wmavoice_data.h"
#include "celp_filters.h"
#include "acelp_vectors.h"
#include "acelp_filters.h"
#include "lsp.h"
44 45
#include "dct.h"
#include "rdft.h"
46
#include "sinewin.h"
Ronald S. Bultje's avatar
Ronald S. Bultje committed
47 48 49

#define MAX_BLOCKS           8   ///< maximum number of blocks per frame
#define MAX_LSPS             16  ///< maximum filter order
Ronald S. Bultje's avatar
Ronald S. Bultje committed
50 51
#define MAX_LSPS_ALIGN16     16  ///< same as #MAX_LSPS; needs to be multiple
                                 ///< of 16 for ASM input buffer alignment
Ronald S. Bultje's avatar
Ronald S. Bultje committed
52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135
#define MAX_FRAMES           3   ///< maximum number of frames per superframe
#define MAX_FRAMESIZE        160 ///< maximum number of samples per frame
#define MAX_SIGNAL_HISTORY   416 ///< maximum excitation signal history
#define MAX_SFRAMESIZE       (MAX_FRAMESIZE * MAX_FRAMES)
                                 ///< maximum number of samples per superframe
#define SFRAME_CACHE_MAXSIZE 256 ///< maximum cache size for frame data that
                                 ///< was split over two packets
#define VLC_NBITS            6   ///< number of bits to read per VLC iteration

/**
 * Frame type VLC coding.
 */
static VLC frame_type_vlc;

/**
 * Adaptive codebook types.
 */
enum {
    ACB_TYPE_NONE       = 0, ///< no adaptive codebook (only hardcoded fixed)
    ACB_TYPE_ASYMMETRIC = 1, ///< adaptive codebook with per-frame pitch, which
                             ///< we interpolate to get a per-sample pitch.
                             ///< Signal is generated using an asymmetric sinc
                             ///< window function
                             ///< @note see #wmavoice_ipol1_coeffs
    ACB_TYPE_HAMMING    = 2  ///< Per-block pitch with signal generation using
                             ///< a Hamming sinc window function
                             ///< @note see #wmavoice_ipol2_coeffs
};

/**
 * Fixed codebook types.
 */
enum {
    FCB_TYPE_SILENCE    = 0, ///< comfort noise during silence
                             ///< generated from a hardcoded (fixed) codebook
                             ///< with per-frame (low) gain values
    FCB_TYPE_HARDCODED  = 1, ///< hardcoded (fixed) codebook with per-block
                             ///< gain values
    FCB_TYPE_AW_PULSES  = 2, ///< Pitch-adaptive window (AW) pulse signals,
                             ///< used in particular for low-bitrate streams
    FCB_TYPE_EXC_PULSES = 3, ///< Innovation (fixed) codebook pulse sets in
                             ///< combinations of either single pulses or
                             ///< pulse pairs
};

/**
 * Description of frame types.
 */
static const struct frame_type_desc {
    uint8_t n_blocks;     ///< amount of blocks per frame (each block
                          ///< (contains 160/#n_blocks samples)
    uint8_t log_n_blocks; ///< log2(#n_blocks)
    uint8_t acb_type;     ///< Adaptive codebook type (ACB_TYPE_*)
    uint8_t fcb_type;     ///< Fixed codebook type (FCB_TYPE_*)
    uint8_t dbl_pulses;   ///< how many pulse vectors have pulse pairs
                          ///< (rather than just one single pulse)
                          ///< only if #fcb_type == #FCB_TYPE_EXC_PULSES
    uint16_t frame_size;  ///< the amount of bits that make up the block
                          ///< data (per frame)
} frame_descs[17] = {
    { 1, 0, ACB_TYPE_NONE,       FCB_TYPE_SILENCE,    0,   0 },
    { 2, 1, ACB_TYPE_NONE,       FCB_TYPE_HARDCODED,  0,  28 },
    { 2, 1, ACB_TYPE_ASYMMETRIC, FCB_TYPE_AW_PULSES,  0,  46 },
    { 2, 1, ACB_TYPE_ASYMMETRIC, FCB_TYPE_EXC_PULSES, 2,  80 },
    { 2, 1, ACB_TYPE_ASYMMETRIC, FCB_TYPE_EXC_PULSES, 5, 104 },
    { 4, 2, ACB_TYPE_ASYMMETRIC, FCB_TYPE_EXC_PULSES, 0, 108 },
    { 4, 2, ACB_TYPE_ASYMMETRIC, FCB_TYPE_EXC_PULSES, 2, 132 },
    { 4, 2, ACB_TYPE_ASYMMETRIC, FCB_TYPE_EXC_PULSES, 5, 168 },
    { 2, 1, ACB_TYPE_HAMMING,    FCB_TYPE_EXC_PULSES, 0,  64 },
    { 2, 1, ACB_TYPE_HAMMING,    FCB_TYPE_EXC_PULSES, 2,  80 },
    { 2, 1, ACB_TYPE_HAMMING,    FCB_TYPE_EXC_PULSES, 5, 104 },
    { 4, 2, ACB_TYPE_HAMMING,    FCB_TYPE_EXC_PULSES, 0, 108 },
    { 4, 2, ACB_TYPE_HAMMING,    FCB_TYPE_EXC_PULSES, 2, 132 },
    { 4, 2, ACB_TYPE_HAMMING,    FCB_TYPE_EXC_PULSES, 5, 168 },
    { 8, 3, ACB_TYPE_HAMMING,    FCB_TYPE_EXC_PULSES, 0, 176 },
    { 8, 3, ACB_TYPE_HAMMING,    FCB_TYPE_EXC_PULSES, 2, 208 },
    { 8, 3, ACB_TYPE_HAMMING,    FCB_TYPE_EXC_PULSES, 5, 256 }
};

/**
 * WMA Voice decoding context.
 */
typedef struct {
    /**
136
     * @name Global values specified in the stream header / extradata or used all over.
Ronald S. Bultje's avatar
Ronald S. Bultje committed
137 138 139 140 141 142 143 144 145 146 147 148 149 150
     * @{
     */
    GetBitContext gb;             ///< packet bitreader. During decoder init,
                                  ///< it contains the extradata from the
                                  ///< demuxer. During decoding, it contains
                                  ///< packet data.
    int8_t vbm_tree[25];          ///< converts VLC codes to frame type

    int spillover_bitsize;        ///< number of bits used to specify
                                  ///< #spillover_nbits in the packet header
                                  ///< = ceil(log2(ctx->block_align << 3))
    int history_nsamples;         ///< number of samples in history for signal
                                  ///< prediction (through ACB)

Ronald S. Bultje's avatar
Ronald S. Bultje committed
151
    /* postfilter specific values */
Ronald S. Bultje's avatar
Ronald S. Bultje committed
152 153
    int do_apf;                   ///< whether to apply the averaged
                                  ///< projection filter (APF)
Ronald S. Bultje's avatar
Ronald S. Bultje committed
154 155 156 157 158 159
    int denoise_strength;         ///< strength of denoising in Wiener filter
                                  ///< [0-11]
    int denoise_tilt_corr;        ///< Whether to apply tilt correction to the
                                  ///< Wiener filter coefficients (postfilter)
    int dc_level;                 ///< Predicted amount of DC noise, based
                                  ///< on which a DC removal filter is used
Ronald S. Bultje's avatar
Ronald S. Bultje committed
160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187

    int lsps;                     ///< number of LSPs per frame [10 or 16]
    int lsp_q_mode;               ///< defines quantizer defaults [0, 1]
    int lsp_def_mode;             ///< defines different sets of LSP defaults
                                  ///< [0, 1]
    int frame_lsp_bitsize;        ///< size (in bits) of LSPs, when encoded
                                  ///< per-frame (independent coding)
    int sframe_lsp_bitsize;       ///< size (in bits) of LSPs, when encoded
                                  ///< per superframe (residual coding)

    int min_pitch_val;            ///< base value for pitch parsing code
    int max_pitch_val;            ///< max value + 1 for pitch parsing
    int pitch_nbits;              ///< number of bits used to specify the
                                  ///< pitch value in the frame header
    int block_pitch_nbits;        ///< number of bits used to specify the
                                  ///< first block's pitch value
    int block_pitch_range;        ///< range of the block pitch
    int block_delta_pitch_nbits;  ///< number of bits used to specify the
                                  ///< delta pitch between this and the last
                                  ///< block's pitch value, used in all but
                                  ///< first block
    int block_delta_pitch_hrange; ///< 1/2 range of the delta (full range is
                                  ///< from -this to +this-1)
    uint16_t block_conv_table[4]; ///< boundaries for block pitch unit/scale
                                  ///< conversion

    /**
     * @}
188 189 190
     *
     * @name Packet values specified in the packet header or related to a packet.
     *
Ronald S. Bultje's avatar
Ronald S. Bultje committed
191 192 193 194 195
     * A packet is considered to be a single unit of data provided to this
     * decoder by the demuxer.
     * @{
     */
    int spillover_nbits;          ///< number of bits of the previous packet's
196
                                  ///< last superframe preceding this
Ronald S. Bultje's avatar
Ronald S. Bultje committed
197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219
                                  ///< packet's first full superframe (useful
                                  ///< for re-synchronization also)
    int has_residual_lsps;        ///< if set, superframes contain one set of
                                  ///< LSPs that cover all frames, encoded as
                                  ///< independent and residual LSPs; if not
                                  ///< set, each frame contains its own, fully
                                  ///< independent, LSPs
    int skip_bits_next;           ///< number of bits to skip at the next call
                                  ///< to #wmavoice_decode_packet() (since
                                  ///< they're part of the previous superframe)

    uint8_t sframe_cache[SFRAME_CACHE_MAXSIZE + FF_INPUT_BUFFER_PADDING_SIZE];
                                  ///< cache for superframe data split over
                                  ///< multiple packets
    int sframe_cache_size;        ///< set to >0 if we have data from an
                                  ///< (incomplete) superframe from a previous
                                  ///< packet that spilled over in the current
                                  ///< packet; specifies the amount of bits in
                                  ///< #sframe_cache
    PutBitContext pb;             ///< bitstream writer for #sframe_cache

    /**
     * @}
220 221
     *
     * @name Frame and superframe values
Ronald S. Bultje's avatar
Ronald S. Bultje committed
222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261
     * Superframe and frame data - these can change from frame to frame,
     * although some of them do in that case serve as a cache / history for
     * the next frame or superframe.
     * @{
     */
    double prev_lsps[MAX_LSPS];   ///< LSPs of the last frame of the previous
                                  ///< superframe
    int last_pitch_val;           ///< pitch value of the previous frame
    int last_acb_type;            ///< frame type [0-2] of the previous frame
    int pitch_diff_sh16;          ///< ((cur_pitch_val - #last_pitch_val)
                                  ///< << 16) / #MAX_FRAMESIZE
    float silence_gain;           ///< set for use in blocks if #ACB_TYPE_NONE

    int aw_idx_is_ext;            ///< whether the AW index was encoded in
                                  ///< 8 bits (instead of 6)
    int aw_pulse_range;           ///< the range over which #aw_pulse_set1()
                                  ///< can apply the pulse, relative to the
                                  ///< value in aw_first_pulse_off. The exact
                                  ///< position of the first AW-pulse is within
                                  ///< [pulse_off, pulse_off + this], and
                                  ///< depends on bitstream values; [16 or 24]
    int aw_n_pulses[2];           ///< number of AW-pulses in each block; note
                                  ///< that this number can be negative (in
                                  ///< which case it basically means "zero")
    int aw_first_pulse_off[2];    ///< index of first sample to which to
                                  ///< apply AW-pulses, or -0xff if unset
    int aw_next_pulse_off_cache;  ///< the position (relative to start of the
                                  ///< second block) at which pulses should
                                  ///< start to be positioned, serves as a
                                  ///< cache for pitch-adaptive window pulses
                                  ///< between blocks

    int frame_cntr;               ///< current frame index [0 - 0xFFFE]; is
                                  ///< only used for comfort noise in #pRNG()
    float gain_pred_err[6];       ///< cache for gain prediction
    float excitation_history[MAX_SIGNAL_HISTORY];
                                  ///< cache of the signal of previous
                                  ///< superframes, used as a history for
                                  ///< signal generation
    float synth_history[MAX_LSPS]; ///< see #excitation_history
Ronald S. Bultje's avatar
Ronald S. Bultje committed
262 263
    /**
     * @}
264 265 266
     *
     * @name Postfilter values
     *
Ronald S. Bultje's avatar
Ronald S. Bultje committed
267
     * Variables used for postfilter implementation, mostly history for
Ronald S. Bultje's avatar
Ronald S. Bultje committed
268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284
     * smoothing and so on, and context variables for FFT/iFFT.
     * @{
     */
    RDFTContext rdft, irdft;      ///< contexts for FFT-calculation in the
                                  ///< postfilter (for denoise filter)
    DCTContext dct, dst;          ///< contexts for phase shift (in Hilbert
                                  ///< transform, part of postfilter)
    float sin[511], cos[511];     ///< 8-bit cosine/sine windows over [-pi,pi]
                                  ///< range
    float postfilter_agc;         ///< gain control memory, used in
                                  ///< #adaptive_gain_control()
    float dcf_mem[2];             ///< DC filter history
    float zero_exc_pf[MAX_SIGNAL_HISTORY + MAX_SFRAMESIZE];
                                  ///< zero filter output (i.e. excitation)
                                  ///< by postfilter
    float denoise_filter_cache[MAX_FRAMESIZE];
    int   denoise_filter_cache_size; ///< samples in #denoise_filter_cache
285
    DECLARE_ALIGNED(32, float, tilted_lpcs_pf)[0x80];
Ronald S. Bultje's avatar
Ronald S. Bultje committed
286
                                  ///< aligned buffer for LPC tilting
287
    DECLARE_ALIGNED(32, float, denoise_coeffs_pf)[0x80];
Ronald S. Bultje's avatar
Ronald S. Bultje committed
288
                                  ///< aligned buffer for denoise coefficients
289
    DECLARE_ALIGNED(32, float, synth_filter_out_buf)[0x80 + MAX_LSPS_ALIGN16];
Ronald S. Bultje's avatar
Ronald S. Bultje committed
290 291
                                  ///< aligned buffer for postfilter speech
                                  ///< synthesis
Ronald S. Bultje's avatar
Ronald S. Bultje committed
292 293 294 295 296 297
    /**
     * @}
     */
} WMAVoiceContext;

/**
298
 * Set up the variable bit mode (VBM) tree from container extradata.
Ronald S. Bultje's avatar
Ronald S. Bultje committed
299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322
 * @param gb bit I/O context.
 *           The bit context (s->gb) should be loaded with byte 23-46 of the
 *           container extradata (i.e. the ones containing the VBM tree).
 * @param vbm_tree pointer to array to which the decoded VBM tree will be
 *                 written.
 * @return 0 on success, <0 on error.
 */
static av_cold int decode_vbmtree(GetBitContext *gb, int8_t vbm_tree[25])
{
    static const uint8_t bits[] = {
         2,  2,  2,  4,  4,  4,
         6,  6,  6,  8,  8,  8,
        10, 10, 10, 12, 12, 12,
        14, 14, 14, 14
    };
    static const uint16_t codes[] = {
          0x0000, 0x0001, 0x0002,        //              00/01/10
          0x000c, 0x000d, 0x000e,        //           11+00/01/10
          0x003c, 0x003d, 0x003e,        //         1111+00/01/10
          0x00fc, 0x00fd, 0x00fe,        //       111111+00/01/10
          0x03fc, 0x03fd, 0x03fe,        //     11111111+00/01/10
          0x0ffc, 0x0ffd, 0x0ffe,        //   1111111111+00/01/10
          0x3ffc, 0x3ffd, 0x3ffe, 0x3fff // 111111111111+xx
    };
323
    int cntr[8] = { 0 }, n, res;
Ronald S. Bultje's avatar
Ronald S. Bultje committed
324

325
    memset(vbm_tree, 0xff, sizeof(vbm_tree[0]) * 25);
Ronald S. Bultje's avatar
Ronald S. Bultje committed
326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361
    for (n = 0; n < 17; n++) {
        res = get_bits(gb, 3);
        if (cntr[res] > 3) // should be >= 3 + (res == 7))
            return -1;
        vbm_tree[res * 3 + cntr[res]++] = n;
    }
    INIT_VLC_STATIC(&frame_type_vlc, VLC_NBITS, sizeof(bits),
                    bits, 1, 1, codes, 2, 2, 132);
    return 0;
}

/**
 * Set up decoder with parameters from demuxer (extradata etc.).
 */
static av_cold int wmavoice_decode_init(AVCodecContext *ctx)
{
    int n, flags, pitch_range, lsp16_flag;
    WMAVoiceContext *s = ctx->priv_data;

    /**
     * Extradata layout:
     * - byte  0-18: WMAPro-in-WMAVoice extradata (see wmaprodec.c),
     * - byte 19-22: flags field (annoyingly in LE; see below for known
     *               values),
     * - byte 23-46: variable bitmode tree (really just 17 * 3 bits,
     *               rest is 0).
     */
    if (ctx->extradata_size != 46) {
        av_log(ctx, AV_LOG_ERROR,
               "Invalid extradata size %d (should be 46)\n",
               ctx->extradata_size);
        return -1;
    }
    flags                = AV_RL32(ctx->extradata + 18);
    s->spillover_bitsize = 3 + av_ceil_log2(ctx->block_align);
    s->do_apf            =    flags & 0x1;
Ronald S. Bultje's avatar
Ronald S. Bultje committed
362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383
    if (s->do_apf) {
        ff_rdft_init(&s->rdft,  7, DFT_R2C);
        ff_rdft_init(&s->irdft, 7, IDFT_C2R);
        ff_dct_init(&s->dct,  6, DCT_I);
        ff_dct_init(&s->dst,  6, DST_I);

        ff_sine_window_init(s->cos, 256);
        memcpy(&s->sin[255], s->cos, 256 * sizeof(s->cos[0]));
        for (n = 0; n < 255; n++) {
            s->sin[n]       = -s->sin[510 - n];
            s->cos[510 - n] =  s->cos[n];
        }
    }
    s->denoise_strength  =   (flags >> 2) & 0xF;
    if (s->denoise_strength >= 12) {
        av_log(ctx, AV_LOG_ERROR,
               "Invalid denoise filter strength %d (max=11)\n",
               s->denoise_strength);
        return -1;
    }
    s->denoise_tilt_corr = !!(flags & 0x40);
    s->dc_level          =   (flags >> 7) & 0xF;
Ronald S. Bultje's avatar
Ronald S. Bultje committed
384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407
    s->lsp_q_mode        = !!(flags & 0x2000);
    s->lsp_def_mode      = !!(flags & 0x4000);
    lsp16_flag           =    flags & 0x1000;
    if (lsp16_flag) {
        s->lsps               = 16;
        s->frame_lsp_bitsize  = 34;
        s->sframe_lsp_bitsize = 60;
    } else {
        s->lsps               = 10;
        s->frame_lsp_bitsize  = 24;
        s->sframe_lsp_bitsize = 48;
    }
    for (n = 0; n < s->lsps; n++)
        s->prev_lsps[n] = M_PI * (n + 1.0) / (s->lsps + 1.0);

    init_get_bits(&s->gb, ctx->extradata + 22, (ctx->extradata_size - 22) << 3);
    if (decode_vbmtree(&s->gb, s->vbm_tree) < 0) {
        av_log(ctx, AV_LOG_ERROR, "Invalid VBM tree; broken extradata?\n");
        return -1;
    }

    s->min_pitch_val    = ((ctx->sample_rate << 8)      /  400 + 50) >> 8;
    s->max_pitch_val    = ((ctx->sample_rate << 8) * 37 / 2000 + 50) >> 8;
    pitch_range         = s->max_pitch_val - s->min_pitch_val;
408 409 410 411
    if (pitch_range <= 0) {
        av_log(ctx, AV_LOG_ERROR, "Invalid pitch range; broken extradata?\n");
        return -1;
    }
Ronald S. Bultje's avatar
Ronald S. Bultje committed
412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432
    s->pitch_nbits      = av_ceil_log2(pitch_range);
    s->last_pitch_val   = 40;
    s->last_acb_type    = ACB_TYPE_NONE;
    s->history_nsamples = s->max_pitch_val + 8;

    if (s->min_pitch_val < 1 || s->history_nsamples > MAX_SIGNAL_HISTORY) {
        int min_sr = ((((1 << 8) - 50) * 400) + 0xFF) >> 8,
            max_sr = ((((MAX_SIGNAL_HISTORY - 8) << 8) + 205) * 2000 / 37) >> 8;

        av_log(ctx, AV_LOG_ERROR,
               "Unsupported samplerate %d (min=%d, max=%d)\n",
               ctx->sample_rate, min_sr, max_sr); // 322-22097 Hz

        return -1;
    }

    s->block_conv_table[0]      = s->min_pitch_val;
    s->block_conv_table[1]      = (pitch_range * 25) >> 6;
    s->block_conv_table[2]      = (pitch_range * 44) >> 6;
    s->block_conv_table[3]      = s->max_pitch_val - 1;
    s->block_delta_pitch_hrange = (pitch_range >> 3) & ~0xF;
433 434 435 436
    if (s->block_delta_pitch_hrange <= 0) {
        av_log(ctx, AV_LOG_ERROR, "Invalid delta pitch hrange; broken extradata?\n");
        return -1;
    }
Ronald S. Bultje's avatar
Ronald S. Bultje committed
437 438 439 440 441 442
    s->block_delta_pitch_nbits  = 1 + av_ceil_log2(s->block_delta_pitch_hrange);
    s->block_pitch_range        = s->block_conv_table[2] +
                                  s->block_conv_table[3] + 1 +
                                  2 * (s->block_conv_table[1] - 2 * s->min_pitch_val);
    s->block_pitch_nbits        = av_ceil_log2(s->block_pitch_range);

443 444
    ctx->channels               = 1;
    ctx->channel_layout         = AV_CH_LAYOUT_MONO;
445
    ctx->sample_fmt             = AV_SAMPLE_FMT_FLT;
Ronald S. Bultje's avatar
Ronald S. Bultje committed
446 447 448 449

    return 0;
}

Ronald S. Bultje's avatar
Ronald S. Bultje committed
450
/**
451
 * @name Postfilter functions
Ronald S. Bultje's avatar
Ronald S. Bultje committed
452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521
 * Postfilter functions (gain control, wiener denoise filter, DC filter,
 * kalman smoothening, plus surrounding code to wrap it)
 * @{
 */
/**
 * Adaptive gain control (as used in postfilter).
 *
 * Identical to #ff_adaptive_gain_control() in acelp_vectors.c, except
 * that the energy here is calculated using sum(abs(...)), whereas the
 * other codecs (e.g. AMR-NB, SIPRO) use sqrt(dotproduct(...)).
 *
 * @param out output buffer for filtered samples
 * @param in input buffer containing the samples as they are after the
 *           postfilter steps so far
 * @param speech_synth input buffer containing speech synth before postfilter
 * @param size input buffer size
 * @param alpha exponential filter factor
 * @param gain_mem pointer to filter memory (single float)
 */
static void adaptive_gain_control(float *out, const float *in,
                                  const float *speech_synth,
                                  int size, float alpha, float *gain_mem)
{
    int i;
    float speech_energy = 0.0, postfilter_energy = 0.0, gain_scale_factor;
    float mem = *gain_mem;

    for (i = 0; i < size; i++) {
        speech_energy     += fabsf(speech_synth[i]);
        postfilter_energy += fabsf(in[i]);
    }
    gain_scale_factor = (1.0 - alpha) * speech_energy / postfilter_energy;

    for (i = 0; i < size; i++) {
        mem = alpha * mem + gain_scale_factor;
        out[i] = in[i] * mem;
    }

    *gain_mem = mem;
}

/**
 * Kalman smoothing function.
 *
 * This function looks back pitch +/- 3 samples back into history to find
 * the best fitting curve (that one giving the optimal gain of the two
 * signals, i.e. the highest dot product between the two), and then
 * uses that signal history to smoothen the output of the speech synthesis
 * filter.
 *
 * @param s WMA Voice decoding context
 * @param pitch pitch of the speech signal
 * @param in input speech signal
 * @param out output pointer for smoothened signal
 * @param size input/output buffer size
 *
 * @returns -1 if no smoothening took place, e.g. because no optimal
 *          fit could be found, or 0 on success.
 */
static int kalman_smoothen(WMAVoiceContext *s, int pitch,
                           const float *in, float *out, int size)
{
    int n;
    float optimal_gain = 0, dot;
    const float *ptr = &in[-FFMAX(s->min_pitch_val, pitch - 3)],
                *end = &in[-FFMIN(s->max_pitch_val, pitch + 3)],
                *best_hist_ptr;

    /* find best fitting point in history */
    do {
522
        dot = avpriv_scalarproduct_float_c(in, ptr, size);
Ronald S. Bultje's avatar
Ronald S. Bultje committed
523 524 525 526 527 528 529 530
        if (dot > optimal_gain) {
            optimal_gain  = dot;
            best_hist_ptr = ptr;
        }
    } while (--ptr >= end);

    if (optimal_gain <= 0)
        return -1;
531
    dot = avpriv_scalarproduct_float_c(best_hist_ptr, best_hist_ptr, size);
Ronald S. Bultje's avatar
Ronald S. Bultje committed
532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560
    if (dot <= 0) // would be 1.0
        return -1;

    if (optimal_gain <= dot) {
        dot = dot / (dot + 0.6 * optimal_gain); // 0.625-1.000
    } else
        dot = 0.625;

    /* actual smoothing */
    for (n = 0; n < size; n++)
        out[n] = best_hist_ptr[n] + dot * (in[n] - best_hist_ptr[n]);

    return 0;
}

/**
 * Get the tilt factor of a formant filter from its transfer function
 * @see #tilt_factor() in amrnbdec.c, which does essentially the same,
 *      but somehow (??) it does a speech synthesis filter in the
 *      middle, which is missing here
 *
 * @param lpcs LPC coefficients
 * @param n_lpcs Size of LPC buffer
 * @returns the tilt factor
 */
static float tilt_factor(const float *lpcs, int n_lpcs)
{
    float rh0, rh1;

561 562
    rh0 = 1.0     + avpriv_scalarproduct_float_c(lpcs,  lpcs,    n_lpcs);
    rh1 = lpcs[0] + avpriv_scalarproduct_float_c(lpcs, &lpcs[1], n_lpcs - 1);
Ronald S. Bultje's avatar
Ronald S. Bultje committed
563 564 565 566 567 568 569 570 571 572 573 574 575 576 577

    return rh1 / rh0;
}

/**
 * Derive denoise filter coefficients (in real domain) from the LPCs.
 */
static void calc_input_response(WMAVoiceContext *s, float *lpcs,
                                int fcb_type, float *coeffs, int remainder)
{
    float last_coeff, min = 15.0, max = -15.0;
    float irange, angle_mul, gain_mul, range, sq;
    int n, idx;

    /* Create frequency power spectrum of speech input (i.e. RDFT of LPCs) */
578
    s->rdft.rdft_calc(&s->rdft, lpcs);
Ronald S. Bultje's avatar
Ronald S. Bultje committed
579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601
#define log_range(var, assign) do { \
        float tmp = log10f(assign);  var = tmp; \
        max       = FFMAX(max, tmp); min = FFMIN(min, tmp); \
    } while (0)
    log_range(last_coeff,  lpcs[1]         * lpcs[1]);
    for (n = 1; n < 64; n++)
        log_range(lpcs[n], lpcs[n * 2]     * lpcs[n * 2] +
                           lpcs[n * 2 + 1] * lpcs[n * 2 + 1]);
    log_range(lpcs[0],     lpcs[0]         * lpcs[0]);
#undef log_range
    range    = max - min;
    lpcs[64] = last_coeff;

    /* Now, use this spectrum to pick out these frequencies with higher
     * (relative) power/energy (which we then take to be "not noise"),
     * and set up a table (still in lpc[]) of (relative) gains per frequency.
     * These frequencies will be maintained, while others ("noise") will be
     * decreased in the filter output. */
    irange    = 64.0 / range; // so irange*(max-value) is in the range [0, 63]
    gain_mul  = range * (fcb_type == FCB_TYPE_HARDCODED ? (5.0 / 13.0) :
                                                          (5.0 / 14.7));
    angle_mul = gain_mul * (8.0 * M_LN10 / M_PI);
    for (n = 0; n <= 64; n++) {
602
        float pwr;
Ronald S. Bultje's avatar
Ronald S. Bultje committed
603 604

        idx = FFMAX(0, lrint((max - lpcs[n]) * irange) - 1);
605 606
        pwr = wmavoice_denoise_power_table[s->denoise_strength][idx];
        lpcs[n] = angle_mul * pwr;
Ronald S. Bultje's avatar
Ronald S. Bultje committed
607 608

        /* 70.57 =~ 1/log10(1.0331663) */
609
        idx = (pwr * gain_mul - 0.0295) * 70.570526123;
Ronald S. Bultje's avatar
Ronald S. Bultje committed
610 611 612 613 614 615 616 617 618 619 620
        if (idx > 127) { // fallback if index falls outside table range
            coeffs[n] = wmavoice_energy_table[127] *
                        powf(1.0331663, idx - 127);
        } else
            coeffs[n] = wmavoice_energy_table[FFMAX(0, idx)];
    }

    /* calculate the Hilbert transform of the gains, which we do (since this
     * is a sinus input) by doing a phase shift (in theory, H(sin())=cos()).
     * Hilbert_Transform(RDFT(x)) = Laplace_Transform(x), which calculates the
     * "moment" of the LPCs in this filter. */
621 622
    s->dct.dct_calc(&s->dct, lpcs);
    s->dst.dct_calc(&s->dst, lpcs);
Ronald S. Bultje's avatar
Ronald S. Bultje committed
623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642

    /* Split out the coefficient indexes into phase/magnitude pairs */
    idx = 255 + av_clip(lpcs[64],               -255, 255);
    coeffs[0]  = coeffs[0]  * s->cos[idx];
    idx = 255 + av_clip(lpcs[64] - 2 * lpcs[63], -255, 255);
    last_coeff = coeffs[64] * s->cos[idx];
    for (n = 63;; n--) {
        idx = 255 + av_clip(-lpcs[64] - 2 * lpcs[n - 1], -255, 255);
        coeffs[n * 2 + 1] = coeffs[n] * s->sin[idx];
        coeffs[n * 2]     = coeffs[n] * s->cos[idx];

        if (!--n) break;

        idx = 255 + av_clip( lpcs[64] - 2 * lpcs[n - 1], -255, 255);
        coeffs[n * 2 + 1] = coeffs[n] * s->sin[idx];
        coeffs[n * 2]     = coeffs[n] * s->cos[idx];
    }
    coeffs[1] = last_coeff;

    /* move into real domain */
643
    s->irdft.rdft_calc(&s->irdft, coeffs);
Ronald S. Bultje's avatar
Ronald S. Bultje committed
644 645 646 647 648 649 650 651 652 653 654

    /* tilt correction and normalize scale */
    memset(&coeffs[remainder], 0, sizeof(coeffs[0]) * (128 - remainder));
    if (s->denoise_tilt_corr) {
        float tilt_mem = 0;

        coeffs[remainder - 1] = 0;
        ff_tilt_compensation(&tilt_mem,
                             -1.8 * tilt_factor(coeffs, remainder - 1),
                             coeffs, remainder);
    }
655 656
    sq = (1.0 / 64.0) * sqrtf(1 / avpriv_scalarproduct_float_c(coeffs, coeffs,
                                                               remainder));
Ronald S. Bultje's avatar
Ronald S. Bultje committed
657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680
    for (n = 0; n < remainder; n++)
        coeffs[n] *= sq;
}

/**
 * This function applies a Wiener filter on the (noisy) speech signal as
 * a means to denoise it.
 *
 * - take RDFT of LPCs to get the power spectrum of the noise + speech;
 * - using this power spectrum, calculate (for each frequency) the Wiener
 *    filter gain, which depends on the frequency power and desired level
 *    of noise subtraction (when set too high, this leads to artifacts)
 *    We can do this symmetrically over the X-axis (so 0-4kHz is the inverse
 *    of 4-8kHz);
 * - by doing a phase shift, calculate the Hilbert transform of this array
 *    of per-frequency filter-gains to get the filtering coefficients;
 * - smoothen/normalize/de-tilt these filter coefficients as desired;
 * - take RDFT of noisy sound, apply the coefficients and take its IRDFT
 *    to get the denoised speech signal;
 * - the leftover (i.e. output of the IRDFT on denoised speech data beyond
 *    the frame boundary) are saved and applied to subsequent frames by an
 *    overlap-add method (otherwise you get clicking-artifacts).
 *
 * @param s WMA Voice decoding context
681
 * @param fcb_type Frame (codebook) type
Ronald S. Bultje's avatar
Ronald S. Bultje committed
682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713
 * @param synth_pf input: the noisy speech signal, output: denoised speech
 *                 data; should be 16-byte aligned (for ASM purposes)
 * @param size size of the speech data
 * @param lpcs LPCs used to synthesize this frame's speech data
 */
static void wiener_denoise(WMAVoiceContext *s, int fcb_type,
                           float *synth_pf, int size,
                           const float *lpcs)
{
    int remainder, lim, n;

    if (fcb_type != FCB_TYPE_SILENCE) {
        float *tilted_lpcs = s->tilted_lpcs_pf,
              *coeffs = s->denoise_coeffs_pf, tilt_mem = 0;

        tilted_lpcs[0]           = 1.0;
        memcpy(&tilted_lpcs[1], lpcs, sizeof(lpcs[0]) * s->lsps);
        memset(&tilted_lpcs[s->lsps + 1], 0,
               sizeof(tilted_lpcs[0]) * (128 - s->lsps - 1));
        ff_tilt_compensation(&tilt_mem, 0.7 * tilt_factor(lpcs, s->lsps),
                             tilted_lpcs, s->lsps + 2);

        /* The IRDFT output (127 samples for 7-bit filter) beyond the frame
         * size is applied to the next frame. All input beyond this is zero,
         * and thus all output beyond this will go towards zero, hence we can
         * limit to min(size-1, 127-size) as a performance consideration. */
        remainder = FFMIN(127 - size, size - 1);
        calc_input_response(s, tilted_lpcs, fcb_type, coeffs, remainder);

        /* apply coefficients (in frequency spectrum domain), i.e. complex
         * number multiplication */
        memset(&synth_pf[size], 0, sizeof(synth_pf[0]) * (128 - size));
714 715
        s->rdft.rdft_calc(&s->rdft, synth_pf);
        s->rdft.rdft_calc(&s->rdft, coeffs);
Ronald S. Bultje's avatar
Ronald S. Bultje committed
716 717
        synth_pf[0] *= coeffs[0];
        synth_pf[1] *= coeffs[1];
718
        for (n = 1; n < 64; n++) {
Ronald S. Bultje's avatar
Ronald S. Bultje committed
719 720 721 722
            float v1 = synth_pf[n * 2], v2 = synth_pf[n * 2 + 1];
            synth_pf[n * 2]     = v1 * coeffs[n * 2] - v2 * coeffs[n * 2 + 1];
            synth_pf[n * 2 + 1] = v2 * coeffs[n * 2] + v1 * coeffs[n * 2 + 1];
        }
723
        s->irdft.rdft_calc(&s->irdft, synth_pf);
Ronald S. Bultje's avatar
Ronald S. Bultje committed
724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764
    }

    /* merge filter output with the history of previous runs */
    if (s->denoise_filter_cache_size) {
        lim = FFMIN(s->denoise_filter_cache_size, size);
        for (n = 0; n < lim; n++)
            synth_pf[n] += s->denoise_filter_cache[n];
        s->denoise_filter_cache_size -= lim;
        memmove(s->denoise_filter_cache, &s->denoise_filter_cache[size],
                sizeof(s->denoise_filter_cache[0]) * s->denoise_filter_cache_size);
    }

    /* move remainder of filter output into a cache for future runs */
    if (fcb_type != FCB_TYPE_SILENCE) {
        lim = FFMIN(remainder, s->denoise_filter_cache_size);
        for (n = 0; n < lim; n++)
            s->denoise_filter_cache[n] += synth_pf[size + n];
        if (lim < remainder) {
            memcpy(&s->denoise_filter_cache[lim], &synth_pf[size + lim],
                   sizeof(s->denoise_filter_cache[0]) * (remainder - lim));
            s->denoise_filter_cache_size = remainder;
        }
    }
}

/**
 * Averaging projection filter, the postfilter used in WMAVoice.
 *
 * This uses the following steps:
 * - A zero-synthesis filter (generate excitation from synth signal)
 * - Kalman smoothing on excitation, based on pitch
 * - Re-synthesized smoothened output
 * - Iterative Wiener denoise filter
 * - Adaptive gain filter
 * - DC filter
 *
 * @param s WMAVoice decoding context
 * @param synth Speech synthesis output (before postfilter)
 * @param samples Output buffer for filtered samples
 * @param size Buffer size of synth & samples
 * @param lpcs Generated LPCs used for speech synthesis
765
 * @param zero_exc_pf destination for zero synthesis filter (16-byte aligned)
Ronald S. Bultje's avatar
Ronald S. Bultje committed
766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811
 * @param fcb_type Frame type (silence, hardcoded, AW-pulses or FCB-pulses)
 * @param pitch Pitch of the input signal
 */
static void postfilter(WMAVoiceContext *s, const float *synth,
                       float *samples,    int size,
                       const float *lpcs, float *zero_exc_pf,
                       int fcb_type,      int pitch)
{
    float synth_filter_in_buf[MAX_FRAMESIZE / 2],
          *synth_pf = &s->synth_filter_out_buf[MAX_LSPS_ALIGN16],
          *synth_filter_in = zero_exc_pf;

    assert(size <= MAX_FRAMESIZE / 2);

    /* generate excitation from input signal */
    ff_celp_lp_zero_synthesis_filterf(zero_exc_pf, lpcs, synth, size, s->lsps);

    if (fcb_type >= FCB_TYPE_AW_PULSES &&
        !kalman_smoothen(s, pitch, zero_exc_pf, synth_filter_in_buf, size))
        synth_filter_in = synth_filter_in_buf;

    /* re-synthesize speech after smoothening, and keep history */
    ff_celp_lp_synthesis_filterf(synth_pf, lpcs,
                                 synth_filter_in, size, s->lsps);
    memcpy(&synth_pf[-s->lsps], &synth_pf[size - s->lsps],
           sizeof(synth_pf[0]) * s->lsps);

    wiener_denoise(s, fcb_type, synth_pf, size, lpcs);

    adaptive_gain_control(samples, synth_pf, synth, size, 0.99,
                          &s->postfilter_agc);

    if (s->dc_level > 8) {
        /* remove ultra-low frequency DC noise / highpass filter;
         * coefficients are identical to those used in SIPR decoding,
         * and very closely resemble those used in AMR-NB decoding. */
        ff_acelp_apply_order_2_transfer_function(samples, samples,
            (const float[2]) { -1.99997,      1.0 },
            (const float[2]) { -1.9330735188, 0.93589198496 },
            0.93980580475, s->dcf_mem, size);
    }
}
/**
 * @}
 */

Ronald S. Bultje's avatar
Ronald S. Bultje committed
812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844
/**
 * Dequantize LSPs
 * @param lsps output pointer to the array that will hold the LSPs
 * @param num number of LSPs to be dequantized
 * @param values quantized values, contains n_stages values
 * @param sizes range (i.e. max value) of each quantized value
 * @param n_stages number of dequantization runs
 * @param table dequantization table to be used
 * @param mul_q LSF multiplier
 * @param base_q base (lowest) LSF values
 */
static void dequant_lsps(double *lsps, int num,
                         const uint16_t *values,
                         const uint16_t *sizes,
                         int n_stages, const uint8_t *table,
                         const double *mul_q,
                         const double *base_q)
{
    int n, m;

    memset(lsps, 0, num * sizeof(*lsps));
    for (n = 0; n < n_stages; n++) {
        const uint8_t *t_off = &table[values[n] * num];
        double base = base_q[n], mul = mul_q[n];

        for (m = 0; m < num; m++)
            lsps[m] += base + mul * t_off[m];

        table += sizes[n] * num;
    }
}

/**
845
 * @name LSP dequantization routines
Ronald S. Bultje's avatar
Ronald S. Bultje committed
846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988
 * LSP dequantization routines, for 10/16LSPs and independent/residual coding.
 * @note we assume enough bits are available, caller should check.
 * lsp10i() consumes 24 bits; lsp10r() consumes an additional 24 bits;
 * lsp16i() consumes 34 bits; lsp16r() consumes an additional 26 bits.
 * @{
 */
/**
 * Parse 10 independently-coded LSPs.
 */
static void dequant_lsp10i(GetBitContext *gb, double *lsps)
{
    static const uint16_t vec_sizes[4] = { 256, 64, 32, 32 };
    static const double mul_lsf[4] = {
        5.2187144800e-3,    1.4626986422e-3,
        9.6179549166e-4,    1.1325736225e-3
    };
    static const double base_lsf[4] = {
        M_PI * -2.15522e-1, M_PI * -6.1646e-2,
        M_PI * -3.3486e-2,  M_PI * -5.7408e-2
    };
    uint16_t v[4];

    v[0] = get_bits(gb, 8);
    v[1] = get_bits(gb, 6);
    v[2] = get_bits(gb, 5);
    v[3] = get_bits(gb, 5);

    dequant_lsps(lsps, 10, v, vec_sizes, 4, wmavoice_dq_lsp10i,
                 mul_lsf, base_lsf);
}

/**
 * Parse 10 independently-coded LSPs, and then derive the tables to
 * generate LSPs for the other frames from them (residual coding).
 */
static void dequant_lsp10r(GetBitContext *gb,
                           double *i_lsps, const double *old,
                           double *a1, double *a2, int q_mode)
{
    static const uint16_t vec_sizes[3] = { 128, 64, 64 };
    static const double mul_lsf[3] = {
        2.5807601174e-3,    1.2354460219e-3,   1.1763821673e-3
    };
    static const double base_lsf[3] = {
        M_PI * -1.07448e-1, M_PI * -5.2706e-2, M_PI * -5.1634e-2
    };
    const float (*ipol_tab)[2][10] = q_mode ?
        wmavoice_lsp10_intercoeff_b : wmavoice_lsp10_intercoeff_a;
    uint16_t interpol, v[3];
    int n;

    dequant_lsp10i(gb, i_lsps);

    interpol = get_bits(gb, 5);
    v[0]     = get_bits(gb, 7);
    v[1]     = get_bits(gb, 6);
    v[2]     = get_bits(gb, 6);

    for (n = 0; n < 10; n++) {
        double delta = old[n] - i_lsps[n];
        a1[n]        = ipol_tab[interpol][0][n] * delta + i_lsps[n];
        a1[10 + n]   = ipol_tab[interpol][1][n] * delta + i_lsps[n];
    }

    dequant_lsps(a2, 20, v, vec_sizes, 3, wmavoice_dq_lsp10r,
                 mul_lsf, base_lsf);
}

/**
 * Parse 16 independently-coded LSPs.
 */
static void dequant_lsp16i(GetBitContext *gb, double *lsps)
{
    static const uint16_t vec_sizes[5] = { 256, 64, 128, 64, 128 };
    static const double mul_lsf[5] = {
        3.3439586280e-3,    6.9908173703e-4,
        3.3216608306e-3,    1.0334960326e-3,
        3.1899104283e-3
    };
    static const double base_lsf[5] = {
        M_PI * -1.27576e-1, M_PI * -2.4292e-2,
        M_PI * -1.28094e-1, M_PI * -3.2128e-2,
        M_PI * -1.29816e-1
    };
    uint16_t v[5];

    v[0] = get_bits(gb, 8);
    v[1] = get_bits(gb, 6);
    v[2] = get_bits(gb, 7);
    v[3] = get_bits(gb, 6);
    v[4] = get_bits(gb, 7);

    dequant_lsps( lsps,     5,  v,     vec_sizes,    2,
                 wmavoice_dq_lsp16i1,  mul_lsf,     base_lsf);
    dequant_lsps(&lsps[5],  5, &v[2], &vec_sizes[2], 2,
                 wmavoice_dq_lsp16i2, &mul_lsf[2], &base_lsf[2]);
    dequant_lsps(&lsps[10], 6, &v[4], &vec_sizes[4], 1,
                 wmavoice_dq_lsp16i3, &mul_lsf[4], &base_lsf[4]);
}

/**
 * Parse 16 independently-coded LSPs, and then derive the tables to
 * generate LSPs for the other frames from them (residual coding).
 */
static void dequant_lsp16r(GetBitContext *gb,
                           double *i_lsps, const double *old,
                           double *a1, double *a2, int q_mode)
{
    static const uint16_t vec_sizes[3] = { 128, 128, 128 };
    static const double mul_lsf[3] = {
        1.2232979501e-3,   1.4062241527e-3,   1.6114744851e-3
    };
    static const double base_lsf[3] = {
        M_PI * -5.5830e-2, M_PI * -5.2908e-2, M_PI * -5.4776e-2
    };
    const float (*ipol_tab)[2][16] = q_mode ?
        wmavoice_lsp16_intercoeff_b : wmavoice_lsp16_intercoeff_a;
    uint16_t interpol, v[3];
    int n;

    dequant_lsp16i(gb, i_lsps);

    interpol = get_bits(gb, 5);
    v[0]     = get_bits(gb, 7);
    v[1]     = get_bits(gb, 7);
    v[2]     = get_bits(gb, 7);

    for (n = 0; n < 16; n++) {
        double delta = old[n] - i_lsps[n];
        a1[n]        = ipol_tab[interpol][0][n] * delta + i_lsps[n];
        a1[16 + n]   = ipol_tab[interpol][1][n] * delta + i_lsps[n];
    }

    dequant_lsps( a2,     10,  v,     vec_sizes,    1,
                 wmavoice_dq_lsp16r1,  mul_lsf,     base_lsf);
    dequant_lsps(&a2[10], 10, &v[1], &vec_sizes[1], 1,
                 wmavoice_dq_lsp16r2, &mul_lsf[1], &base_lsf[1]);
    dequant_lsps(&a2[20], 12, &v[2], &vec_sizes[2], 1,
                 wmavoice_dq_lsp16r3, &mul_lsf[2], &base_lsf[2]);
}

/**
 * @}
989
 * @name Pitch-adaptive window coding functions
Ronald S. Bultje's avatar
Ronald S. Bultje committed
990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053
 * The next few functions are for pitch-adaptive window coding.
 * @{
 */
/**
 * Parse the offset of the first pitch-adaptive window pulses, and
 * the distribution of pulses between the two blocks in this frame.
 * @param s WMA Voice decoding context private data
 * @param gb bit I/O context
 * @param pitch pitch for each block in this frame
 */
static void aw_parse_coords(WMAVoiceContext *s, GetBitContext *gb,
                            const int *pitch)
{
    static const int16_t start_offset[94] = {
        -11,  -9,  -7,  -5,  -3,  -1,   1,   3,   5,   7,   9,  11,
         13,  15,  18,  17,  19,  20,  21,  22,  23,  24,  25,  26,
         27,  28,  29,  30,  31,  32,  33,  35,  37,  39,  41,  43,
         45,  47,  49,  51,  53,  55,  57,  59,  61,  63,  65,  67,
         69,  71,  73,  75,  77,  79,  81,  83,  85,  87,  89,  91,
         93,  95,  97,  99, 101, 103, 105, 107, 109, 111, 113, 115,
        117, 119, 121, 123, 125, 127, 129, 131, 133, 135, 137, 139,
        141, 143, 145, 147, 149, 151, 153, 155, 157, 159
    };
    int bits, offset;

    /* position of pulse */
    s->aw_idx_is_ext = 0;
    if ((bits = get_bits(gb, 6)) >= 54) {
        s->aw_idx_is_ext = 1;
        bits += (bits - 54) * 3 + get_bits(gb, 2);
    }

    /* for a repeated pulse at pulse_off with a pitch_lag of pitch[], count
     * the distribution of the pulses in each block contained in this frame. */
    s->aw_pulse_range        = FFMIN(pitch[0], pitch[1]) > 32 ? 24 : 16;
    for (offset = start_offset[bits]; offset < 0; offset += pitch[0]) ;
    s->aw_n_pulses[0]        = (pitch[0] - 1 + MAX_FRAMESIZE / 2 - offset) / pitch[0];
    s->aw_first_pulse_off[0] = offset - s->aw_pulse_range / 2;
    offset                  += s->aw_n_pulses[0] * pitch[0];
    s->aw_n_pulses[1]        = (pitch[1] - 1 + MAX_FRAMESIZE - offset) / pitch[1];
    s->aw_first_pulse_off[1] = offset - (MAX_FRAMESIZE + s->aw_pulse_range) / 2;

    /* if continuing from a position before the block, reset position to
     * start of block (when corrected for the range over which it can be
     * spread in aw_pulse_set1()). */
    if (start_offset[bits] < MAX_FRAMESIZE / 2) {
        while (s->aw_first_pulse_off[1] - pitch[1] + s->aw_pulse_range > 0)
            s->aw_first_pulse_off[1] -= pitch[1];
        if (start_offset[bits] < 0)
            while (s->aw_first_pulse_off[0] - pitch[0] + s->aw_pulse_range > 0)
                s->aw_first_pulse_off[0] -= pitch[0];
    }
}

/**
 * Apply second set of pitch-adaptive window pulses.
 * @param s WMA Voice decoding context private data
 * @param gb bit I/O context
 * @param block_idx block index in frame [0, 1]
 * @param fcb structure containing fixed codebook vector info
 */
static void aw_pulse_set2(WMAVoiceContext *s, GetBitContext *gb,
                          int block_idx, AMRFixed *fcb)
{
1054 1055
    uint16_t use_mask_mem[9]; // only 5 are used, rest is padding
    uint16_t *use_mask = use_mask_mem + 2;
Ronald S. Bultje's avatar
Ronald S. Bultje committed
1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086
    /* in this function, idx is the index in the 80-bit (+ padding) use_mask
     * bit-array. Since use_mask consists of 16-bit values, the lower 4 bits
     * of idx are the position of the bit within a particular item in the
     * array (0 being the most significant bit, and 15 being the least
     * significant bit), and the remainder (>> 4) is the index in the
     * use_mask[]-array. This is faster and uses less memory than using a
     * 80-byte/80-int array. */
    int pulse_off = s->aw_first_pulse_off[block_idx],
        pulse_start, n, idx, range, aidx, start_off = 0;

    /* set offset of first pulse to within this block */
    if (s->aw_n_pulses[block_idx] > 0)
        while (pulse_off + s->aw_pulse_range < 1)
            pulse_off += fcb->pitch_lag;

    /* find range per pulse */
    if (s->aw_n_pulses[0] > 0) {
        if (block_idx == 0) {
            range = 32;
        } else /* block_idx = 1 */ {
            range = 8;
            if (s->aw_n_pulses[block_idx] > 0)
                pulse_off = s->aw_next_pulse_off_cache;
        }
    } else
        range = 16;
    pulse_start = s->aw_n_pulses[block_idx] > 0 ? pulse_off - range / 2 : 0;

    /* aw_pulse_set1() already applies pulses around pulse_off (to be exactly,
     * in the range of [pulse_off, pulse_off + s->aw_pulse_range], and thus
     * we exclude that range from being pulsed again in this function. */
1087
    memset(&use_mask[-2], 0, 2 * sizeof(use_mask[0]));
Ronald S. Bultje's avatar
Ronald S. Bultje committed
1088 1089 1090 1091 1092 1093 1094
    memset( use_mask,   -1, 5 * sizeof(use_mask[0]));
    memset(&use_mask[5], 0, 2 * sizeof(use_mask[0]));
    if (s->aw_n_pulses[block_idx] > 0)
        for (idx = pulse_off; idx < MAX_FRAMESIZE / 2; idx += fcb->pitch_lag) {
            int excl_range         = s->aw_pulse_range; // always 16 or 24
            uint16_t *use_mask_ptr = &use_mask[idx >> 4];
            int first_sh           = 16 - (idx & 15);
1095
            *use_mask_ptr++       &= 0xFFFFu << first_sh;
Ronald S. Bultje's avatar
Ronald S. Bultje committed
1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198
            excl_range            -= first_sh;
            if (excl_range >= 16) {
                *use_mask_ptr++    = 0;
                *use_mask_ptr     &= 0xFFFF >> (excl_range - 16);
            } else
                *use_mask_ptr     &= 0xFFFF >> excl_range;
        }

    /* find the 'aidx'th offset that is not excluded */
    aidx = get_bits(gb, s->aw_n_pulses[0] > 0 ? 5 - 2 * block_idx : 4);
    for (n = 0; n <= aidx; pulse_start++) {
        for (idx = pulse_start; idx < 0; idx += fcb->pitch_lag) ;
        if (idx >= MAX_FRAMESIZE / 2) { // find from zero
            if (use_mask[0])      idx = 0x0F;
            else if (use_mask[1]) idx = 0x1F;
            else if (use_mask[2]) idx = 0x2F;
            else if (use_mask[3]) idx = 0x3F;
            else if (use_mask[4]) idx = 0x4F;
            else                  return;
            idx -= av_log2_16bit(use_mask[idx >> 4]);
        }
        if (use_mask[idx >> 4] & (0x8000 >> (idx & 15))) {
            use_mask[idx >> 4] &= ~(0x8000 >> (idx & 15));
            n++;
            start_off = idx;
        }
    }

    fcb->x[fcb->n] = start_off;
    fcb->y[fcb->n] = get_bits1(gb) ? -1.0 : 1.0;
    fcb->n++;

    /* set offset for next block, relative to start of that block */
    n = (MAX_FRAMESIZE / 2 - start_off) % fcb->pitch_lag;
    s->aw_next_pulse_off_cache = n ? fcb->pitch_lag - n : 0;
}

/**
 * Apply first set of pitch-adaptive window pulses.
 * @param s WMA Voice decoding context private data
 * @param gb bit I/O context
 * @param block_idx block index in frame [0, 1]
 * @param fcb storage location for fixed codebook pulse info
 */
static void aw_pulse_set1(WMAVoiceContext *s, GetBitContext *gb,
                          int block_idx, AMRFixed *fcb)
{
    int val = get_bits(gb, 12 - 2 * (s->aw_idx_is_ext && !block_idx));
    float v;

    if (s->aw_n_pulses[block_idx] > 0) {
        int n, v_mask, i_mask, sh, n_pulses;

        if (s->aw_pulse_range == 24) { // 3 pulses, 1:sign + 3:index each
            n_pulses = 3;
            v_mask   = 8;
            i_mask   = 7;
            sh       = 4;
        } else { // 4 pulses, 1:sign + 2:index each
            n_pulses = 4;
            v_mask   = 4;
            i_mask   = 3;
            sh       = 3;
        }

        for (n = n_pulses - 1; n >= 0; n--, val >>= sh) {
            fcb->y[fcb->n] = (val & v_mask) ? -1.0 : 1.0;
            fcb->x[fcb->n] = (val & i_mask) * n_pulses + n +
                                 s->aw_first_pulse_off[block_idx];
            while (fcb->x[fcb->n] < 0)
                fcb->x[fcb->n] += fcb->pitch_lag;
            if (fcb->x[fcb->n] < MAX_FRAMESIZE / 2)
                fcb->n++;
        }
    } else {
        int num2 = (val & 0x1FF) >> 1, delta, idx;

        if (num2 < 1 * 79)      { delta = 1; idx = num2 + 1; }
        else if (num2 < 2 * 78) { delta = 3; idx = num2 + 1 - 1 * 77; }
        else if (num2 < 3 * 77) { delta = 5; idx = num2 + 1 - 2 * 76; }
        else                    { delta = 7; idx = num2 + 1 - 3 * 75; }
        v = (val & 0x200) ? -1.0 : 1.0;

        fcb->no_repeat_mask |= 3 << fcb->n;
        fcb->x[fcb->n]       = idx - delta;
        fcb->y[fcb->n]       = v;
        fcb->x[fcb->n + 1]   = idx;
        fcb->y[fcb->n + 1]   = (val & 1) ? -v : v;
        fcb->n              += 2;
    }
}

/**
 * @}
 *
 * Generate a random number from frame_cntr and block_idx, which will lief
 * in the range [0, 1000 - block_size] (so it can be used as an index in a
 * table of size 1000 of which you want to read block_size entries).
 *
 * @param frame_cntr current frame number
 * @param block_num current block index
 * @param block_size amount of entries we want to read from a table
 *                   that has 1000 entries
1199
 * @return a (non-)random number in the [0, 1000 - block_size] range.
Ronald S. Bultje's avatar
Ronald S. Bultje committed
1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319
 */
static int pRNG(int frame_cntr, int block_num, int block_size)
{
    /* array to simplify the calculation of z:
     * y = (x % 9) * 5 + 6;
     * z = (49995 * x) / y;
     * Since y only has 9 values, we can remove the division by using a
     * LUT and using FASTDIV-style divisions. For each of the 9 values
     * of y, we can rewrite z as:
     * z = x * (49995 / y) + x * ((49995 % y) / y)
     * In this table, each col represents one possible value of y, the
     * first number is 49995 / y, and the second is the FASTDIV variant
     * of 49995 % y / y. */
    static const unsigned int div_tbl[9][2] = {
        { 8332,  3 * 715827883U }, // y =  6
        { 4545,  0 * 390451573U }, // y = 11
        { 3124, 11 * 268435456U }, // y = 16
        { 2380, 15 * 204522253U }, // y = 21
        { 1922, 23 * 165191050U }, // y = 26
        { 1612, 23 * 138547333U }, // y = 31
        { 1388, 27 * 119304648U }, // y = 36
        { 1219, 16 * 104755300U }, // y = 41
        { 1086, 39 *  93368855U }  // y = 46
    };
    unsigned int z, y, x = MUL16(block_num, 1877) + frame_cntr;
    if (x >= 0xFFFF) x -= 0xFFFF;   // max value of x is 8*1877+0xFFFE=0x13AA6,
                                    // so this is effectively a modulo (%)
    y = x - 9 * MULH(477218589, x); // x % 9
    z = (uint16_t) (x * div_tbl[y][0] + UMULH(x, div_tbl[y][1]));
                                    // z = x * 49995 / (y * 5 + 6)
    return z % (1000 - block_size);
}

/**
 * Parse hardcoded signal for a single block.
 * @note see #synth_block().
 */
static void synth_block_hardcoded(WMAVoiceContext *s, GetBitContext *gb,
                                 int block_idx, int size,
                                 const struct frame_type_desc *frame_desc,
                                 float *excitation)
{
    float gain;
    int n, r_idx;

    assert(size <= MAX_FRAMESIZE);

    /* Set the offset from which we start reading wmavoice_std_codebook */
    if (frame_desc->fcb_type == FCB_TYPE_SILENCE) {
        r_idx = pRNG(s->frame_cntr, block_idx, size);
        gain  = s->silence_gain;
    } else /* FCB_TYPE_HARDCODED */ {
        r_idx = get_bits(gb, 8);
        gain  = wmavoice_gain_universal[get_bits(gb, 6)];
    }

    /* Clear gain prediction parameters */
    memset(s->gain_pred_err, 0, sizeof(s->gain_pred_err));

    /* Apply gain to hardcoded codebook and use that as excitation signal */
    for (n = 0; n < size; n++)
        excitation[n] = wmavoice_std_codebook[r_idx + n] * gain;
}

/**
 * Parse FCB/ACB signal for a single block.
 * @note see #synth_block().
 */
static void synth_block_fcb_acb(WMAVoiceContext *s, GetBitContext *gb,
                                int block_idx, int size,
                                int block_pitch_sh2,
                                const struct frame_type_desc *frame_desc,
                                float *excitation)
{
    static const float gain_coeff[6] = {
        0.8169, -0.06545, 0.1726, 0.0185, -0.0359, 0.0458
    };
    float pulses[MAX_FRAMESIZE / 2], pred_err, acb_gain, fcb_gain;
    int n, idx, gain_weight;
    AMRFixed fcb;

    assert(size <= MAX_FRAMESIZE / 2);
    memset(pulses, 0, sizeof(*pulses) * size);

    fcb.pitch_lag      = block_pitch_sh2 >> 2;
    fcb.pitch_fac      = 1.0;
    fcb.no_repeat_mask = 0;
    fcb.n              = 0;

    /* For the other frame types, this is where we apply the innovation
     * (fixed) codebook pulses of the speech signal. */
    if (frame_desc->fcb_type == FCB_TYPE_AW_PULSES) {
        aw_pulse_set1(s, gb, block_idx, &fcb);
        aw_pulse_set2(s, gb, block_idx, &fcb);
    } else /* FCB_TYPE_EXC_PULSES */ {
        int offset_nbits = 5 - frame_desc->log_n_blocks;

        fcb.no_repeat_mask = -1;
        /* similar to ff_decode_10_pulses_35bits(), but with single pulses
         * (instead of double) for a subset of pulses */
        for (n = 0; n < 5; n++) {
            float sign;
            int pos1, pos2;

            sign           = get_bits1(gb) ? 1.0 : -1.0;
            pos1           = get_bits(gb, offset_nbits);
            fcb.x[fcb.n]   = n + 5 * pos1;
            fcb.y[fcb.n++] = sign;
            if (n < frame_desc->dbl_pulses) {
                pos2           = get_bits(gb, offset_nbits);
                fcb.x[fcb.n]   = n + 5 * pos2;
                fcb.y[fcb.n++] = (pos1 < pos2) ? -sign : sign;
            }
        }
    }
    ff_set_fixed_vector(pulses, &fcb, 1.0, size);

    /* Calculate gain for adaptive & fixed codebook signal.
     * see ff_amr_set_fixed_gain(). */
    idx = get_bits(gb, 7);
1320 1321
    fcb_gain = expf(avpriv_scalarproduct_float_c(s->gain_pred_err,
                                                 gain_coeff, 6) -
Ronald S. Bultje's avatar
Ronald S. Bultje committed
1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366
                    5.2409161640 + wmavoice_gain_codebook_fcb[idx]);
    acb_gain = wmavoice_gain_codebook_acb[idx];
    pred_err = av_clipf(wmavoice_gain_codebook_fcb[idx],
                        -2.9957322736 /* log(0.05) */,
                         1.6094379124 /* log(5.0)  */);

    gain_weight = 8 >> frame_desc->log_n_blocks;
    memmove(&s->gain_pred_err[gain_weight], s->gain_pred_err,
            sizeof(*s->gain_pred_err) * (6 - gain_weight));
    for (n = 0; n < gain_weight; n++)
        s->gain_pred_err[n] = pred_err;

    /* Calculation of adaptive codebook */
    if (frame_desc->acb_type == ACB_TYPE_ASYMMETRIC) {
        int len;
        for (n = 0; n < size; n += len) {
            int next_idx_sh16;
            int abs_idx    = block_idx * size + n;
            int pitch_sh16 = (s->last_pitch_val << 16) +
                             s->pitch_diff_sh16 * abs_idx;
            int pitch      = (pitch_sh16 + 0x6FFF) >> 16;
            int idx_sh16   = ((pitch << 16) - pitch_sh16) * 8 + 0x58000;
            idx            = idx_sh16 >> 16;
            if (s->pitch_diff_sh16) {
                if (s->pitch_diff_sh16 > 0) {
                    next_idx_sh16 = (idx_sh16) &~ 0xFFFF;
                } else
                    next_idx_sh16 = (idx_sh16 + 0x10000) &~ 0xFFFF;
                len = av_clip((idx_sh16 - next_idx_sh16) / s->pitch_diff_sh16 / 8,
                              1, size - n);
            } else
                len = size;

            ff_acelp_interpolatef(&excitation[n], &excitation[n - pitch],
                                  wmavoice_ipol1_coeffs, 17,
                                  idx, 9, len);
        }
    } else /* ACB_TYPE_HAMMING */ {
        int block_pitch = block_pitch_sh2 >> 2;
        idx             = block_pitch_sh2 & 3;
        if (idx) {
            ff_acelp_interpolatef(excitation, &excitation[-block_pitch],
                                  wmavoice_ipol2_coeffs, 4,
                                  idx, 8, size);
        } else
Eli Friedman's avatar
Eli Friedman committed
1367
            av_memcpy_backptr((uint8_t *) excitation, sizeof(float) * block_pitch,
Ronald S. Bultje's avatar
Ronald S. Bultje committed
1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425
                              sizeof(float) * size);
    }

    /* Interpolate ACB/FCB and use as excitation signal */
    ff_weighted_vector_sumf(excitation, excitation, pulses,
                            acb_gain, fcb_gain, size);
}

/**
 * Parse data in a single block.
 * @note we assume enough bits are available, caller should check.
 *
 * @param s WMA Voice decoding context private data
 * @param gb bit I/O context
 * @param block_idx index of the to-be-read block
 * @param size amount of samples to be read in this block
 * @param block_pitch_sh2 pitch for this block << 2
 * @param lsps LSPs for (the end of) this frame
 * @param prev_lsps LSPs for the last frame
 * @param frame_desc frame type descriptor
 * @param excitation target memory for the ACB+FCB interpolated signal
 * @param synth target memory for the speech synthesis filter output
 * @return 0 on success, <0 on error.
 */
static void synth_block(WMAVoiceContext *s, GetBitContext *gb,
                        int block_idx, int size,
                        int block_pitch_sh2,
                        const double *lsps, const double *prev_lsps,
                        const struct frame_type_desc *frame_desc,
                        float *excitation, float *synth)
{
    double i_lsps[MAX_LSPS];
    float lpcs[MAX_LSPS];
    float fac;
    int n;

    if (frame_desc->acb_type == ACB_TYPE_NONE)
        synth_block_hardcoded(s, gb, block_idx, size, frame_desc, excitation);
    else
        synth_block_fcb_acb(s, gb, block_idx, size, block_pitch_sh2,
                            frame_desc, excitation);

    /* convert interpolated LSPs to LPCs */
    fac = (block_idx + 0.5) / frame_desc->n_blocks;
    for (n = 0; n < s->lsps; n++) // LSF -> LSP
        i_lsps[n] = cos(prev_lsps[n] + fac * (lsps[n] - prev_lsps[n]));
    ff_acelp_lspd2lpc(i_lsps, lpcs, s->lsps >> 1);

    /* Speech synthesis */
    ff_celp_lp_synthesis_filterf(synth, lpcs, excitation, size, s->lsps);
}

/**
 * Synthesize output samples for a single frame.
 * @note we assume enough bits are available, caller should check.
 *
 * @param ctx WMA Voice decoder context
 * @param gb bit I/O context (s->gb or one for cross-packet superframes)
Ronald S. Bultje's avatar
Ronald S. Bultje committed
1426
 * @param frame_idx Frame number within superframe [0-2]
Ronald S. Bultje's avatar
Ronald S. Bultje committed
1427 1428 1429 1430 1431 1432 1433 1434
 * @param samples pointer to output sample buffer, has space for at least 160
 *                samples
 * @param lsps LSP array
 * @param prev_lsps array of previous frame's LSPs
 * @param excitation target buffer for excitation signal
 * @param synth target buffer for synthesized speech data
 * @return 0 on success, <0 on error.
 */
Ronald S. Bultje's avatar
Ronald S. Bultje committed
1435
static int synth_frame(AVCodecContext *ctx, GetBitContext *gb, int frame_idx,
Ronald S. Bultje's avatar
Ronald S. Bultje committed
1436 1437 1438 1439 1440 1441 1442 1443 1444
                       float *samples,
                       const double *lsps, const double *prev_lsps,
                       float *excitation, float *synth)
{
    WMAVoiceContext *s = ctx->priv_data;
    int n, n_blocks_x2, log_n_blocks_x2, cur_pitch_val;
    int pitch[MAX_BLOCKS], last_block_pitch;

    /* Parse frame type ("frame header"), see frame_descs */
1445
    int bd_idx = s->vbm_tree[get_vlc2(gb, frame_type_vlc.table, 6, 3)], block_nsamples;
Ronald S. Bultje's avatar
Ronald S. Bultje committed
1446 1447 1448 1449 1450 1451 1452

    if (bd_idx < 0) {
        av_log(ctx, AV_LOG_ERROR,
               "Invalid frame type VLC code, skipping\n");
        return -1;
    }

1453 1454
    block_nsamples = MAX_FRAMESIZE / frame_descs[bd_idx].n_blocks;

Ronald S. Bultje's avatar
Ronald S. Bultje committed
1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560
    /* Pitch calculation for ACB_TYPE_ASYMMETRIC ("pitch-per-frame") */
    if (frame_descs[bd_idx].acb_type == ACB_TYPE_ASYMMETRIC) {
        /* Pitch is provided per frame, which is interpreted as the pitch of
         * the last sample of the last block of this frame. We can interpolate
         * the pitch of other blocks (and even pitch-per-sample) by gradually
         * incrementing/decrementing prev_frame_pitch to cur_pitch_val. */
        n_blocks_x2      = frame_descs[bd_idx].n_blocks << 1;
        log_n_blocks_x2  = frame_descs[bd_idx].log_n_blocks + 1;
        cur_pitch_val    = s->min_pitch_val + get_bits(gb, s->pitch_nbits);
        cur_pitch_val    = FFMIN(cur_pitch_val, s->max_pitch_val - 1);
        if (s->last_acb_type == ACB_TYPE_NONE ||
            20 * abs(cur_pitch_val - s->last_pitch_val) >
                (cur_pitch_val + s->last_pitch_val))
            s->last_pitch_val = cur_pitch_val;

        /* pitch per block */
        for (n = 0; n < frame_descs[bd_idx].n_blocks; n++) {
            int fac = n * 2 + 1;

            pitch[n] = (MUL16(fac,                 cur_pitch_val) +
                        MUL16((n_blocks_x2 - fac), s->last_pitch_val) +
                        frame_descs[bd_idx].n_blocks) >> log_n_blocks_x2;
        }

        /* "pitch-diff-per-sample" for calculation of pitch per sample */
        s->pitch_diff_sh16 =
            ((cur_pitch_val - s->last_pitch_val) << 16) / MAX_FRAMESIZE;
    }

    /* Global gain (if silence) and pitch-adaptive window coordinates */
    switch (frame_descs[bd_idx].fcb_type) {
    case FCB_TYPE_SILENCE:
        s->silence_gain = wmavoice_gain_silence[get_bits(gb, 8)];
        break;
    case FCB_TYPE_AW_PULSES:
        aw_parse_coords(s, gb, pitch);
        break;
    }

    for (n = 0; n < frame_descs[bd_idx].n_blocks; n++) {
        int bl_pitch_sh2;

        /* Pitch calculation for ACB_TYPE_HAMMING ("pitch-per-block") */
        switch (frame_descs[bd_idx].acb_type) {
        case ACB_TYPE_HAMMING: {
            /* Pitch is given per block. Per-block pitches are encoded as an
             * absolute value for the first block, and then delta values
             * relative to this value) for all subsequent blocks. The scale of
             * this pitch value is semi-logaritmic compared to its use in the
             * decoder, so we convert it to normal scale also. */
            int block_pitch,
                t1 = (s->block_conv_table[1] - s->block_conv_table[0]) << 2,
                t2 = (s->block_conv_table[2] - s->block_conv_table[1]) << 1,
                t3 =  s->block_conv_table[3] - s->block_conv_table[2] + 1;

            if (n == 0) {
                block_pitch = get_bits(gb, s->block_pitch_nbits);
            } else
                block_pitch = last_block_pitch - s->block_delta_pitch_hrange +
                                 get_bits(gb, s->block_delta_pitch_nbits);
            /* Convert last_ so that any next delta is within _range */
            last_block_pitch = av_clip(block_pitch,
                                       s->block_delta_pitch_hrange,
                                       s->block_pitch_range -
                                           s->block_delta_pitch_hrange);

            /* Convert semi-log-style scale back to normal scale */
            if (block_pitch < t1) {
                bl_pitch_sh2 = (s->block_conv_table[0] << 2) + block_pitch;
            } else {
                block_pitch -= t1;
                if (block_pitch < t2) {
                    bl_pitch_sh2 =
                        (s->block_conv_table[1] << 2) + (block_pitch << 1);
                } else {
                    block_pitch -= t2;
                    if (block_pitch < t3) {
                        bl_pitch_sh2 =
                            (s->block_conv_table[2] + block_pitch) << 2;
                    } else
                        bl_pitch_sh2 = s->block_conv_table[3] << 2;
                }
            }
            pitch[n] = bl_pitch_sh2 >> 2;
            break;
        }

        case ACB_TYPE_ASYMMETRIC: {
            bl_pitch_sh2 = pitch[n] << 2;
            break;
        }

        default: // ACB_TYPE_NONE has no pitch
            bl_pitch_sh2 = 0;
            break;
        }

        synth_block(s, gb, n, block_nsamples, bl_pitch_sh2,
                    lsps, prev_lsps, &frame_descs[bd_idx],
                    &excitation[n * block_nsamples],
                    &synth[n * block_nsamples]);
    }

    /* Averaging projection filter, if applicable. Else, just copy samples
     * from synthesis buffer */
    if (s->do_apf) {
Ronald S. Bultje's avatar
Ronald S. Bultje committed
1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577
        double i_lsps[MAX_LSPS];
        float lpcs[MAX_LSPS];

        for (n = 0; n < s->lsps; n++) // LSF -> LSP
            i_lsps[n] = cos(0.5 * (prev_lsps[n] + lsps[n]));
        ff_acelp_lspd2lpc(i_lsps, lpcs, s->lsps >> 1);
        postfilter(s, synth, samples, 80, lpcs,
                   &s->zero_exc_pf[s->history_nsamples + MAX_FRAMESIZE * frame_idx],
                   frame_descs[bd_idx].fcb_type, pitch[0]);

        for (n = 0; n < s->lsps; n++) // LSF -> LSP
            i_lsps[n] = cos(lsps[n]);
        ff_acelp_lspd2lpc(i_lsps, lpcs, s->lsps >> 1);
        postfilter(s, &synth[80], &samples[80], 80, lpcs,
                   &s->zero_exc_pf[s->history_nsamples + MAX_FRAMESIZE * frame_idx + 80],
                   frame_descs[bd_idx].fcb_type, pitch[0]);
    } else
1578
        memcpy(samples, synth, 160 * sizeof(synth[0]));
Ronald S. Bultje's avatar
Ronald S. Bultje committed
1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646

    /* Cache values for next frame */
    s->frame_cntr++;
    if (s->frame_cntr >= 0xFFFF) s->frame_cntr -= 0xFFFF; // i.e. modulo (%)
    s->last_acb_type = frame_descs[bd_idx].acb_type;
    switch (frame_descs[bd_idx].acb_type) {
    case ACB_TYPE_NONE:
        s->last_pitch_val = 0;
        break;
    case ACB_TYPE_ASYMMETRIC:
        s->last_pitch_val = cur_pitch_val;
        break;
    case ACB_TYPE_HAMMING:
        s->last_pitch_val = pitch[frame_descs[bd_idx].n_blocks - 1];
        break;
    }

    return 0;
}

/**
 * Ensure minimum value for first item, maximum value for last value,
 * proper spacing between each value and proper ordering.
 *
 * @param lsps array of LSPs
 * @param num size of LSP array
 *
 * @note basically a double version of #ff_acelp_reorder_lsf(), might be
 *       useful to put in a generic location later on. Parts are also
 *       present in #ff_set_min_dist_lsf() + #ff_sort_nearly_sorted_floats(),
 *       which is in float.
 */
static void stabilize_lsps(double *lsps, int num)
{
    int n, m, l;

    /* set minimum value for first, maximum value for last and minimum
     * spacing between LSF values.
     * Very similar to ff_set_min_dist_lsf(), but in double. */
    lsps[0]       = FFMAX(lsps[0],       0.0015 * M_PI);
    for (n = 1; n < num; n++)
        lsps[n]   = FFMAX(lsps[n],       lsps[n - 1] + 0.0125 * M_PI);
    lsps[num - 1] = FFMIN(lsps[num - 1], 0.9985 * M_PI);

    /* reorder (looks like one-time / non-recursed bubblesort).
     * Very similar to ff_sort_nearly_sorted_floats(), but in double. */
    for (n = 1; n < num; n++) {
        if (lsps[n] < lsps[n - 1]) {
            for (m = 1; m < num; m++) {
                double tmp = lsps[m];
                for (l = m - 1; l >= 0; l--) {
                    if (lsps[l] <= tmp) break;
                    lsps[l + 1] = lsps[l];
                }
                lsps[l + 1] = tmp;
            }
            break;
        }
    }
}

/**
 * Test if there's enough bits to read 1 superframe.
 *
 * @param orig_gb bit I/O context used for reading. This function
 *                does not modify the state of the bitreader; it
 *                only uses it to copy the current stream position
 * @param s WMA Voice decoding context private data
1647
 * @return -1 if unsupported, 1 on not enough bits or 0 if OK.
Ronald S. Bultje's avatar
Ronald S. Bultje committed
1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733
 */
static int check_bits_for_superframe(GetBitContext *orig_gb,
                                     WMAVoiceContext *s)
{
    GetBitContext s_gb, *gb = &s_gb;
    int n, need_bits, bd_idx;
    const struct frame_type_desc *frame_desc;

    /* initialize a copy */
    init_get_bits(gb, orig_gb->buffer, orig_gb->size_in_bits);
    skip_bits_long(gb, get_bits_count(orig_gb));
    assert(get_bits_left(gb) == get_bits_left(orig_gb));

    /* superframe header */
    if (get_bits_left(gb) < 14)
        return 1;
    if (!get_bits1(gb))
        return -1;                        // WMAPro-in-WMAVoice superframe
    if (get_bits1(gb)) skip_bits(gb, 12); // number of  samples in superframe
    if (s->has_residual_lsps) {           // residual LSPs (for all frames)
        if (get_bits_left(gb) < s->sframe_lsp_bitsize)
            return 1;
        skip_bits_long(gb, s->sframe_lsp_bitsize);
    }

    /* frames */
    for (n = 0; n < MAX_FRAMES; n++) {
        int aw_idx_is_ext = 0;

        if (!s->has_residual_lsps) {     // independent LSPs (per-frame)
           if (get_bits_left(gb) < s->frame_lsp_bitsize) return 1;
           skip_bits_long(gb, s->frame_lsp_bitsize);
        }
        bd_idx = s->vbm_tree[get_vlc2(gb, frame_type_vlc.table, 6, 3)];
        if (bd_idx < 0)
            return -1;                   // invalid frame type VLC code
        frame_desc = &frame_descs[bd_idx];
        if (frame_desc->acb_type == ACB_TYPE_ASYMMETRIC) {
            if (get_bits_left(gb) < s->pitch_nbits)
                return 1;
            skip_bits_long(gb, s->pitch_nbits);
        }
        if (frame_desc->fcb_type == FCB_TYPE_SILENCE) {
            skip_bits(gb, 8);
        } else if (frame_desc->fcb_type == FCB_TYPE_AW_PULSES) {
            int tmp = get_bits(gb, 6);
            if (tmp >= 0x36) {
                skip_bits(gb, 2);
                aw_idx_is_ext = 1;
            }
        }

        /* blocks */
        if (frame_desc->acb_type == ACB_TYPE_HAMMING) {
            need_bits = s->block_pitch_nbits +
                (frame_desc->n_blocks - 1) * s->block_delta_pitch_nbits;
        } else if (frame_desc->fcb_type == FCB_TYPE_AW_PULSES) {
            need_bits = 2 * !aw_idx_is_ext;
        } else
            need_bits = 0;
        need_bits += frame_desc->frame_size;
        if (get_bits_left(gb) < need_bits)
            return 1;
        skip_bits_long(gb, need_bits);
    }

    return 0;
}

/**
 * Synthesize output samples for a single superframe. If we have any data
 * cached in s->sframe_cache, that will be used instead of whatever is loaded
 * in s->gb.
 *
 * WMA Voice superframes contain 3 frames, each containing 160 audio samples,
 * to give a total of 480 samples per frame. See #synth_frame() for frame
 * parsing. In addition to 3 frames, superframes can also contain the LSPs
 * (if these are globally specified for all frames (residually); they can
 * also be specified individually per-frame. See the s->has_residual_lsps
 * option), and can specify the number of samples encoded in this superframe
 * (if less than 480), usually used to prevent blanks at track boundaries.
 *
 * @param ctx WMA Voice decoder context
 * @return 0 on success, <0 on error or 1 if there was not enough data to
 *         fully parse the superframe
 */
1734 1735
static int synth_superframe(AVCodecContext *ctx, AVFrame *frame,
                            int *got_frame_ptr)
Ronald S. Bultje's avatar
Ronald S. Bultje committed
1736 1737 1738
{
    WMAVoiceContext *s = ctx->priv_data;
    GetBitContext *gb = &s->gb, s_gb;
1739
    int n, res, n_samples = 480;
Ronald S. Bultje's avatar
Ronald S. Bultje committed
1740 1741 1742 1743 1744
    double lsps[MAX_FRAMES][MAX_LSPS];
    const double *mean_lsf = s->lsps == 16 ?
        wmavoice_mean_lsf16[s->lsp_def_mode] : wmavoice_mean_lsf10[s->lsp_def_mode];
    float excitation[MAX_SIGNAL_HISTORY + MAX_SFRAMESIZE + 12];
    float synth[MAX_LSPS + MAX_SFRAMESIZE];
1745
    float *samples;
Ronald S. Bultje's avatar
Ronald S. Bultje committed
1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757

    memcpy(synth,      s->synth_history,
           s->lsps             * sizeof(*synth));
    memcpy(excitation, s->excitation_history,
           s->history_nsamples * sizeof(*excitation));

    if (s->sframe_cache_size > 0) {
        gb = &s_gb;
        init_get_bits(gb, s->sframe_cache, s->sframe_cache_size);
        s->sframe_cache_size = 0;
    }

1758
    if ((res = check_bits_for_superframe(gb, s)) == 1) {
1759
        *got_frame_ptr = 0;
1760 1761
        return 1;
    }
Ronald S. Bultje's avatar
Ronald S. Bultje committed
1762 1763 1764 1765 1766 1767

    /* First bit is speech/music bit, it differentiates between WMAVoice
     * speech samples (the actual codec) and WMAVoice music samples, which
     * are really WMAPro-in-WMAVoice-superframes. I've never seen those in
     * the wild yet. */
    if (!get_bits1(gb)) {
1768
        avpriv_request_sample(ctx, "WMAPro-in-WMAVoice");
1769
        return AVERROR_PATCHWELCOME;
Ronald S. Bultje's avatar
Ronald S. Bultje committed
1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801
    }

    /* (optional) nr. of samples in superframe; always <= 480 and >= 0 */
    if (get_bits1(gb)) {
        if ((n_samples = get_bits(gb, 12)) > 480) {
            av_log(ctx, AV_LOG_ERROR,
                   "Superframe encodes >480 samples (%d), not allowed\n",
                   n_samples);
            return -1;
        }
    }
    /* Parse LSPs, if global for the superframe (can also be per-frame). */
    if (s->has_residual_lsps) {
        double prev_lsps[MAX_LSPS], a1[MAX_LSPS * 2], a2[MAX_LSPS * 2];

        for (n = 0; n < s->lsps; n++)
            prev_lsps[n] = s->prev_lsps[n] - mean_lsf[n];

        if (s->lsps == 10) {
            dequant_lsp10r(gb, lsps[2], prev_lsps, a1, a2, s->lsp_q_mode);
        } else /* s->lsps == 16 */
            dequant_lsp16r(gb, lsps[2], prev_lsps, a1, a2, s->lsp_q_mode);

        for (n = 0; n < s->lsps; n++) {
            lsps[0][n]  = mean_lsf[n] + (a1[n]           - a2[n * 2]);
            lsps[1][n]  = mean_lsf[n] + (a1[s->lsps + n] - a2[n * 2 + 1]);
            lsps[2][n] += mean_lsf[n];
        }
        for (n = 0; n < 3; n++)
            stabilize_lsps(lsps[n], s->lsps);
    }

1802
    /* get output buffer */
1803
    frame->nb_samples = 480;
1804
    if ((res = ff_get_buffer(ctx, frame, 0)) < 0) {
1805 1806
        av_log(ctx, AV_LOG_ERROR, "get_buffer() failed\n");
        return res;
1807
    }
1808 1809
    frame->nb_samples = n_samples;
    samples = (float *)frame->data[0];
1810

1811
    /* Parse frames, optionally preceded by per-frame (independent) LSPs. */
Ronald S. Bultje's avatar
Ronald S. Bultje committed
1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825
    for (n = 0; n < 3; n++) {
        if (!s->has_residual_lsps) {
            int m;

            if (s->lsps == 10) {
                dequant_lsp10i(gb, lsps[n]);
            } else /* s->lsps == 16 */
                dequant_lsp16i(gb, lsps[n]);

            for (m = 0; m < s->lsps; m++)
                lsps[n][m] += mean_lsf[m];
            stabilize_lsps(lsps[n], s->lsps);
        }

Ronald S. Bultje's avatar
Ronald S. Bultje committed
1826
        if ((res = synth_frame(ctx, gb, n,
Ronald S. Bultje's avatar
Ronald S. Bultje committed
1827 1828 1829
                               &samples[n * MAX_FRAMESIZE],
                               lsps[n], n == 0 ? s->prev_lsps : lsps[n - 1],
                               &excitation[s->history_nsamples + n * MAX_FRAMESIZE],
1830
                               &synth[s->lsps + n * MAX_FRAMESIZE]))) {
1831
            *got_frame_ptr = 0;
Ronald S. Bultje's avatar
Ronald S. Bultje committed
1832
            return res;
1833
        }
Ronald S. Bultje's avatar
Ronald S. Bultje committed
1834 1835 1836 1837 1838 1839 1840 1841 1842 1843
    }

    /* Statistics? FIXME - we don't check for length, a slight overrun
     * will be caught by internal buffer padding, and anything else
     * will be skipped, not read. */
    if (get_bits1(gb)) {
        res = get_bits(gb, 4);
        skip_bits(gb, 10 * (res + 1));
    }

1844
    *got_frame_ptr = 1;
Ronald S. Bultje's avatar
Ronald S. Bultje committed
1845 1846 1847 1848 1849 1850 1851 1852

    /* Update history */
    memcpy(s->prev_lsps,           lsps[2],
           s->lsps             * sizeof(*s->prev_lsps));
    memcpy(s->synth_history,      &synth[MAX_SFRAMESIZE],
           s->lsps             * sizeof(*synth));
    memcpy(s->excitation_history, &excitation[MAX_SFRAMESIZE],
           s->history_nsamples * sizeof(*excitation));
Ronald S. Bultje's avatar
Ronald S. Bultje committed
1853 1854 1855
    if (s->do_apf)
        memmove(s->zero_exc_pf,       &s->zero_exc_pf[MAX_SFRAMESIZE],
                s->history_nsamples * sizeof(*s->zero_exc_pf));
Ronald S. Bultje's avatar
Ronald S. Bultje committed
1856 1857 1858 1859 1860 1861 1862 1863 1864

    return 0;
}

/**
 * Parse the packet header at the start of each packet (input data to this
 * decoder).
 *
 * @param s WMA Voice decoding context private data
1865
 * @return 1 if not enough bits were available, or 0 on success.
Ronald S. Bultje's avatar
Ronald S. Bultje committed
1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894
 */
static int parse_packet_header(WMAVoiceContext *s)
{
    GetBitContext *gb = &s->gb;
    unsigned int res;

    if (get_bits_left(gb) < 11)
        return 1;
    skip_bits(gb, 4);          // packet sequence number
    s->has_residual_lsps = get_bits1(gb);
    do {
        res = get_bits(gb, 6); // number of superframes per packet
                               // (minus first one if there is spillover)
        if (get_bits_left(gb) < 6 * (res == 0x3F) + s->spillover_bitsize)
            return 1;
    } while (res == 0x3F);
    s->spillover_nbits   = get_bits(gb, s->spillover_bitsize);

    return 0;
}

/**
 * Copy (unaligned) bits from gb/data/size to pb.
 *
 * @param pb target buffer to copy bits into
 * @param data source buffer to copy bits from
 * @param size size of the source data, in bytes
 * @param gb bit I/O context specifying the current position in the source.
 *           data. This function might use this to align the bit position to
1895
 *           a whole-byte boundary before calling #avpriv_copy_bits() on aligned
Ronald S. Bultje's avatar
Ronald S. Bultje committed
1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910
 *           source data
 * @param nbits the amount of bits to copy from source to target
 *
 * @note after calling this function, the current position in the input bit
 *       I/O context is undefined.
 */
static void copy_bits(PutBitContext *pb,
                      const uint8_t *data, int size,
                      GetBitContext *gb, int nbits)
{
    int rmn_bytes, rmn_bits;

    rmn_bits = rmn_bytes = get_bits_left(gb);
    if (rmn_bits < nbits)
        return;
1911 1912
    if (nbits > pb->size_in_bits - put_bits_count(pb))
        return;
Ronald S. Bultje's avatar
Ronald S. Bultje committed
1913 1914 1915
    rmn_bits &= 7; rmn_bytes >>= 3;
    if ((rmn_bits = FFMIN(rmn_bits, nbits)) > 0)
        put_bits(pb, rmn_bits, get_bits(gb, rmn_bits));
1916
    avpriv_copy_bits(pb, data + size - rmn_bytes,
Ronald S. Bultje's avatar
Ronald S. Bultje committed
1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931
                 FFMIN(nbits - rmn_bits, rmn_bytes << 3));
}

/**
 * Packet decoding: a packet is anything that the (ASF) demuxer contains,
 * and we expect that the demuxer / application provides it to us as such
 * (else you'll probably get garbage as output). Every packet has a size of
 * ctx->block_align bytes, starts with a packet header (see
 * #parse_packet_header()), and then a series of superframes. Superframe
 * boundaries may exceed packets, i.e. superframes can split data over
 * multiple (two) packets.
 *
 * For more information about frames, see #synth_superframe().
 */
static int wmavoice_decode_packet(AVCodecContext *ctx, void *data,
1932
                                  int *got_frame_ptr, AVPacket *avpkt)
Ronald S. Bultje's avatar
Ronald S. Bultje committed
1933 1934 1935 1936 1937 1938
{
    WMAVoiceContext *s = ctx->priv_data;
    GetBitContext *gb = &s->gb;
    int size, res, pos;

    /* Packets are sometimes a multiple of ctx->block_align, with a packet
1939
     * header at each ctx->block_align bytes. However, Libav's ASF demuxer
Ronald S. Bultje's avatar
Ronald S. Bultje committed
1940 1941 1942 1943
     * feeds us ASF packets, which may concatenate multiple "codec" packets
     * in a single "muxer" packet, so we artificially emulate that by
     * capping the packet size at ctx->block_align. */
    for (size = avpkt->size; size > ctx->block_align; size -= ctx->block_align);
1944
    if (!size) {
1945
        *got_frame_ptr = 0;
Ronald S. Bultje's avatar
Ronald S. Bultje committed
1946
        return 0;
1947
    }
Ronald S. Bultje's avatar
Ronald S. Bultje committed
1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965
    init_get_bits(&s->gb, avpkt->data, size << 3);

    /* size == ctx->block_align is used to indicate whether we are dealing with
     * a new packet or a packet of which we already read the packet header
     * previously. */
    if (size == ctx->block_align) { // new packet header
        if ((res = parse_packet_header(s)) < 0)
            return res;

        /* If the packet header specifies a s->spillover_nbits, then we want
         * to push out all data of the previous packet (+ spillover) before
         * continuing to parse new superframes in the current packet. */
        if (s->spillover_nbits > 0) {
            if (s->sframe_cache_size > 0) {
                int cnt = get_bits_count(gb);
                copy_bits(&s->pb, avpkt->data, size, gb, s->spillover_nbits);
                flush_put_bits(&s->pb);
                s->sframe_cache_size += s->spillover_nbits;
1966
                if ((res = synth_superframe(ctx, data, got_frame_ptr)) == 0 &&
1967
                    *got_frame_ptr) {
Ronald S. Bultje's avatar
Ronald S. Bultje committed
1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983
                    cnt += s->spillover_nbits;
                    s->skip_bits_next = cnt & 7;
                    return cnt >> 3;
                } else
                    skip_bits_long (gb, s->spillover_nbits - cnt +
                                    get_bits_count(gb)); // resync
            } else
                skip_bits_long(gb, s->spillover_nbits);  // resync
        }
    } else if (s->skip_bits_next)
        skip_bits(gb, s->skip_bits_next);

    /* Try parsing superframes in current packet */
    s->sframe_cache_size = 0;
    s->skip_bits_next = 0;
    pos = get_bits_left(gb);
1984
    if ((res = synth_superframe(ctx, data, got_frame_ptr)) < 0) {
Ronald S. Bultje's avatar
Ronald S. Bultje committed
1985
        return res;
1986
    } else if (*got_frame_ptr) {
Ronald S. Bultje's avatar
Ronald S. Bultje committed
1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005
        int cnt = get_bits_count(gb);
        s->skip_bits_next = cnt & 7;
        return cnt >> 3;
    } else if ((s->sframe_cache_size = pos) > 0) {
        /* rewind bit reader to start of last (incomplete) superframe... */
        init_get_bits(gb, avpkt->data, size << 3);
        skip_bits_long(gb, (size << 3) - pos);
        assert(get_bits_left(gb) == pos);

        /* ...and cache it for spillover in next packet */
        init_put_bits(&s->pb, s->sframe_cache, SFRAME_CACHE_MAXSIZE);
        copy_bits(&s->pb, avpkt->data, size, gb, s->sframe_cache_size);
        // FIXME bad - just copy bytes as whole and add use the
        // skip_bits_next field
    }

    return size;
}

Ronald S. Bultje's avatar
Ronald S. Bultje committed
2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019
static av_cold int wmavoice_decode_end(AVCodecContext *ctx)
{
    WMAVoiceContext *s = ctx->priv_data;

    if (s->do_apf) {
        ff_rdft_end(&s->rdft);
        ff_rdft_end(&s->irdft);
        ff_dct_end(&s->dct);
        ff_dct_end(&s->dst);
    }

    return 0;
}

Ronald S. Bultje's avatar
Ronald S. Bultje committed
2020 2021 2022 2023 2024
static av_cold void wmavoice_flush(AVCodecContext *ctx)
{
    WMAVoiceContext *s = ctx->priv_data;
    int n;

Ronald S. Bultje's avatar
Ronald S. Bultje committed
2025
    s->postfilter_agc    = 0;
Ronald S. Bultje's avatar
Ronald S. Bultje committed
2026 2027 2028 2029 2030 2031 2032 2033 2034 2035
    s->sframe_cache_size = 0;
    s->skip_bits_next    = 0;
    for (n = 0; n < s->lsps; n++)
        s->prev_lsps[n] = M_PI * (n + 1.0) / (s->lsps + 1.0);
    memset(s->excitation_history, 0,
           sizeof(*s->excitation_history) * MAX_SIGNAL_HISTORY);
    memset(s->synth_history,      0,
           sizeof(*s->synth_history)      * MAX_LSPS);
    memset(s->gain_pred_err,      0,
           sizeof(s->gain_pred_err));
Ronald S. Bultje's avatar
Ronald S. Bultje committed
2036 2037 2038 2039 2040 2041 2042 2043 2044 2045

    if (s->do_apf) {
        memset(&s->synth_filter_out_buf[MAX_LSPS_ALIGN16 - s->lsps], 0,
               sizeof(*s->synth_filter_out_buf) * s->lsps);
        memset(s->dcf_mem,              0,
               sizeof(*s->dcf_mem)              * 2);
        memset(s->zero_exc_pf,          0,
               sizeof(*s->zero_exc_pf)          * s->history_nsamples);
        memset(s->denoise_filter_cache, 0, sizeof(s->denoise_filter_cache));
    }
Ronald S. Bultje's avatar
Ronald S. Bultje committed
2046 2047
}

2048
AVCodec ff_wmavoice_decoder = {
2049 2050
    .name           = "wmavoice",
    .type           = AVMEDIA_TYPE_AUDIO,
2051
    .id             = AV_CODEC_ID_WMAVOICE,
2052 2053 2054 2055
    .priv_data_size = sizeof(WMAVoiceContext),
    .init           = wmavoice_decode_init,
    .close          = wmavoice_decode_end,
    .decode         = wmavoice_decode_packet,
2056
    .capabilities   = CODEC_CAP_SUBFRAMES | CODEC_CAP_DR1,
2057 2058
    .flush          = wmavoice_flush,
    .long_name      = NULL_IF_CONFIG_SMALL("Windows Media Audio Voice"),
Ronald S. Bultje's avatar
Ronald S. Bultje committed
2059
};