Libav
amrnbdec.c
Go to the documentation of this file.
1 /*
2  * AMR narrowband decoder
3  * Copyright (c) 2006-2007 Robert Swain
4  * Copyright (c) 2009 Colin McQuillan
5  *
6  * This file is part of Libav.
7  *
8  * Libav is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * Libav is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with Libav; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 
43 #include <string.h>
44 #include <math.h>
45 
47 #include "libavutil/float_dsp.h"
48 #include "avcodec.h"
49 #include "libavutil/common.h"
50 #include "celp_filters.h"
51 #include "acelp_filters.h"
52 #include "acelp_vectors.h"
53 #include "acelp_pitch_delay.h"
54 #include "lsp.h"
55 #include "amr.h"
56 #include "internal.h"
57 
58 #include "amrnbdata.h"
59 
60 #define AMR_BLOCK_SIZE 160
61 #define AMR_SAMPLE_BOUND 32768.0
62 
63 
72 #define AMR_SAMPLE_SCALE (2.0 / 32768.0)
73 
75 #define PRED_FAC_MODE_12k2 0.65
76 
77 #define LSF_R_FAC (8000.0 / 32768.0)
78 #define MIN_LSF_SPACING (50.0488 / 8000.0)
79 #define PITCH_LAG_MIN_MODE_12k2 18
80 
81 
82 #define MIN_ENERGY -14.0
83 
89 #define SHARP_MAX 0.79449462890625
90 
92 #define AMR_TILT_RESPONSE 22
93 
94 #define AMR_TILT_GAMMA_T 0.8
95 
96 #define AMR_AGC_ALPHA 0.9
97 
98 typedef struct AMRContext {
102 
104  double lsp[4][LP_FILTER_ORDER];
106 
107  float lsf_q[4][LP_FILTER_ORDER];
109 
110  float lpc[4][LP_FILTER_ORDER];
111 
113 
115  float *excitation;
116 
119 
120  float prediction_error[4];
121  float pitch_gain[5];
122  float fixed_gain[5];
123 
124  float beta;
127 
131 
132  float postfilter_mem[10];
133  float tilt_mem;
135  float high_pass_mem[2];
136 
138 
139 } AMRContext;
140 
142 static void weighted_vector_sumd(double *out, const double *in_a,
143  const double *in_b, double weight_coeff_a,
144  double weight_coeff_b, int length)
145 {
146  int i;
147 
148  for (i = 0; i < length; i++)
149  out[i] = weight_coeff_a * in_a[i]
150  + weight_coeff_b * in_b[i];
151 }
152 
154 {
155  AMRContext *p = avctx->priv_data;
156  int i;
157 
158  if (avctx->channels > 1) {
159  avpriv_report_missing_feature(avctx, "multi-channel AMR");
160  return AVERROR_PATCHWELCOME;
161  }
162 
163  avctx->channels = 1;
165  avctx->sample_rate = 8000;
166  avctx->sample_fmt = AV_SAMPLE_FMT_FLT;
167 
168  // p->excitation always points to the same position in p->excitation_buf
170 
171  for (i = 0; i < LP_FILTER_ORDER; i++) {
172  p->prev_lsp_sub4[i] = lsp_sub4_init[i] * 1000 / (float)(1 << 15);
173  p->lsf_avg[i] = p->lsf_q[3][i] = lsp_avg_init[i] / (float)(1 << 15);
174  }
175 
176  for (i = 0; i < 4; i++)
178 
179  return 0;
180 }
181 
182 
194 static enum Mode unpack_bitstream(AMRContext *p, const uint8_t *buf,
195  int buf_size)
196 {
197  enum Mode mode;
198 
199  // Decode the first octet.
200  mode = buf[0] >> 3 & 0x0F; // frame type
201  p->bad_frame_indicator = (buf[0] & 0x4) != 0x4; // quality bit
202 
203  if (mode >= N_MODES || buf_size < frame_sizes_nb[mode] + 1) {
204  return NO_DATA;
205  }
206 
207  if (mode < MODE_DTX)
208  ff_amr_bit_reorder((uint16_t *) &p->frame, sizeof(AMRNBFrame), buf + 1,
210 
211  return mode;
212 }
213 
214 
217 
225 static void interpolate_lsf(float lsf_q[4][LP_FILTER_ORDER], float *lsf_new)
226 {
227  int i;
228 
229  for (i = 0; i < 4; i++)
230  ff_weighted_vector_sumf(lsf_q[i], lsf_q[3], lsf_new,
231  0.25 * (3 - i), 0.25 * (i + 1),
232  LP_FILTER_ORDER);
233 }
234 
246 static void lsf2lsp_for_mode12k2(AMRContext *p, double lsp[LP_FILTER_ORDER],
247  const float lsf_no_r[LP_FILTER_ORDER],
248  const int16_t *lsf_quantizer[5],
249  const int quantizer_offset,
250  const int sign, const int update)
251 {
252  int16_t lsf_r[LP_FILTER_ORDER]; // residual LSF vector
253  float lsf_q[LP_FILTER_ORDER]; // quantified LSF vector
254  int i;
255 
256  for (i = 0; i < LP_FILTER_ORDER >> 1; i++)
257  memcpy(&lsf_r[i << 1], &lsf_quantizer[i][quantizer_offset],
258  2 * sizeof(*lsf_r));
259 
260  if (sign) {
261  lsf_r[4] *= -1;
262  lsf_r[5] *= -1;
263  }
264 
265  if (update)
266  memcpy(p->prev_lsf_r, lsf_r, LP_FILTER_ORDER * sizeof(*lsf_r));
267 
268  for (i = 0; i < LP_FILTER_ORDER; i++)
269  lsf_q[i] = lsf_r[i] * (LSF_R_FAC / 8000.0) + lsf_no_r[i] * (1.0 / 8000.0);
270 
271  ff_set_min_dist_lsf(lsf_q, MIN_LSF_SPACING, LP_FILTER_ORDER);
272 
273  if (update)
274  interpolate_lsf(p->lsf_q, lsf_q);
275 
276  ff_acelp_lsf2lspd(lsp, lsf_q, LP_FILTER_ORDER);
277 }
278 
284 static void lsf2lsp_5(AMRContext *p)
285 {
286  const uint16_t *lsf_param = p->frame.lsf;
287  float lsf_no_r[LP_FILTER_ORDER]; // LSFs without the residual vector
288  const int16_t *lsf_quantizer[5];
289  int i;
290 
291  lsf_quantizer[0] = lsf_5_1[lsf_param[0]];
292  lsf_quantizer[1] = lsf_5_2[lsf_param[1]];
293  lsf_quantizer[2] = lsf_5_3[lsf_param[2] >> 1];
294  lsf_quantizer[3] = lsf_5_4[lsf_param[3]];
295  lsf_quantizer[4] = lsf_5_5[lsf_param[4]];
296 
297  for (i = 0; i < LP_FILTER_ORDER; i++)
298  lsf_no_r[i] = p->prev_lsf_r[i] * LSF_R_FAC * PRED_FAC_MODE_12k2 + lsf_5_mean[i];
299 
300  lsf2lsp_for_mode12k2(p, p->lsp[1], lsf_no_r, lsf_quantizer, 0, lsf_param[2] & 1, 0);
301  lsf2lsp_for_mode12k2(p, p->lsp[3], lsf_no_r, lsf_quantizer, 2, lsf_param[2] & 1, 1);
302 
303  // interpolate LSP vectors at subframes 1 and 3
304  weighted_vector_sumd(p->lsp[0], p->prev_lsp_sub4, p->lsp[1], 0.5, 0.5, LP_FILTER_ORDER);
305  weighted_vector_sumd(p->lsp[2], p->lsp[1] , p->lsp[3], 0.5, 0.5, LP_FILTER_ORDER);
306 }
307 
313 static void lsf2lsp_3(AMRContext *p)
314 {
315  const uint16_t *lsf_param = p->frame.lsf;
316  int16_t lsf_r[LP_FILTER_ORDER]; // residual LSF vector
317  float lsf_q[LP_FILTER_ORDER]; // quantified LSF vector
318  const int16_t *lsf_quantizer;
319  int i, j;
320 
321  lsf_quantizer = (p->cur_frame_mode == MODE_7k95 ? lsf_3_1_MODE_7k95 : lsf_3_1)[lsf_param[0]];
322  memcpy(lsf_r, lsf_quantizer, 3 * sizeof(*lsf_r));
323 
324  lsf_quantizer = lsf_3_2[lsf_param[1] << (p->cur_frame_mode <= MODE_5k15)];
325  memcpy(lsf_r + 3, lsf_quantizer, 3 * sizeof(*lsf_r));
326 
327  lsf_quantizer = (p->cur_frame_mode <= MODE_5k15 ? lsf_3_3_MODE_5k15 : lsf_3_3)[lsf_param[2]];
328  memcpy(lsf_r + 6, lsf_quantizer, 4 * sizeof(*lsf_r));
329 
330  // calculate mean-removed LSF vector and add mean
331  for (i = 0; i < LP_FILTER_ORDER; i++)
332  lsf_q[i] = (lsf_r[i] + p->prev_lsf_r[i] * pred_fac[i]) * (LSF_R_FAC / 8000.0) + lsf_3_mean[i] * (1.0 / 8000.0);
333 
334  ff_set_min_dist_lsf(lsf_q, MIN_LSF_SPACING, LP_FILTER_ORDER);
335 
336  // store data for computing the next frame's LSFs
337  interpolate_lsf(p->lsf_q, lsf_q);
338  memcpy(p->prev_lsf_r, lsf_r, LP_FILTER_ORDER * sizeof(*lsf_r));
339 
340  ff_acelp_lsf2lspd(p->lsp[3], lsf_q, LP_FILTER_ORDER);
341 
342  // interpolate LSP vectors at subframes 1, 2 and 3
343  for (i = 1; i <= 3; i++)
344  for(j = 0; j < LP_FILTER_ORDER; j++)
345  p->lsp[i-1][j] = p->prev_lsp_sub4[j] +
346  (p->lsp[3][j] - p->prev_lsp_sub4[j]) * 0.25 * i;
347 }
348 
350 
351 
354 
358 static void decode_pitch_lag_1_6(int *lag_int, int *lag_frac, int pitch_index,
359  const int prev_lag_int, const int subframe)
360 {
361  if (subframe == 0 || subframe == 2) {
362  if (pitch_index < 463) {
363  *lag_int = (pitch_index + 107) * 10923 >> 16;
364  *lag_frac = pitch_index - *lag_int * 6 + 105;
365  } else {
366  *lag_int = pitch_index - 368;
367  *lag_frac = 0;
368  }
369  } else {
370  *lag_int = ((pitch_index + 5) * 10923 >> 16) - 1;
371  *lag_frac = pitch_index - *lag_int * 6 - 3;
372  *lag_int += av_clip(prev_lag_int - 5, PITCH_LAG_MIN_MODE_12k2,
373  PITCH_DELAY_MAX - 9);
374  }
375 }
376 
378  const AMRNBSubframe *amr_subframe,
379  const int subframe)
380 {
381  int pitch_lag_int, pitch_lag_frac;
382  enum Mode mode = p->cur_frame_mode;
383 
384  if (p->cur_frame_mode == MODE_12k2) {
385  decode_pitch_lag_1_6(&pitch_lag_int, &pitch_lag_frac,
386  amr_subframe->p_lag, p->pitch_lag_int,
387  subframe);
388  } else
389  ff_decode_pitch_lag(&pitch_lag_int, &pitch_lag_frac,
390  amr_subframe->p_lag,
391  p->pitch_lag_int, subframe,
392  mode != MODE_4k75 && mode != MODE_5k15,
393  mode <= MODE_6k7 ? 4 : (mode == MODE_7k95 ? 5 : 6));
394 
395  p->pitch_lag_int = pitch_lag_int; // store previous lag in a uint8_t
396 
397  pitch_lag_frac <<= (p->cur_frame_mode != MODE_12k2);
398 
399  pitch_lag_int += pitch_lag_frac > 0;
400 
401  /* Calculate the pitch vector by interpolating the past excitation at the
402  pitch lag using a b60 hamming windowed sinc function. */
403  ff_acelp_interpolatef(p->excitation, p->excitation + 1 - pitch_lag_int,
404  ff_b60_sinc, 6,
405  pitch_lag_frac + 6 - 6*(pitch_lag_frac > 0),
406  10, AMR_SUBFRAME_SIZE);
407 
408  memcpy(p->pitch_vector, p->excitation, AMR_SUBFRAME_SIZE * sizeof(float));
409 }
410 
412 
413 
416 
420 static void decode_10bit_pulse(int code, int pulse_position[8],
421  int i1, int i2, int i3)
422 {
423  // coded using 7+3 bits with the 3 LSBs being, individually, the LSB of 1 of
424  // the 3 pulses and the upper 7 bits being coded in base 5
425  const uint8_t *positions = base_five_table[code >> 3];
426  pulse_position[i1] = (positions[2] << 1) + ( code & 1);
427  pulse_position[i2] = (positions[1] << 1) + ((code >> 1) & 1);
428  pulse_position[i3] = (positions[0] << 1) + ((code >> 2) & 1);
429 }
430 
438 static void decode_8_pulses_31bits(const int16_t *fixed_index,
439  AMRFixed *fixed_sparse)
440 {
441  int pulse_position[8];
442  int i, temp;
443 
444  decode_10bit_pulse(fixed_index[4], pulse_position, 0, 4, 1);
445  decode_10bit_pulse(fixed_index[5], pulse_position, 2, 6, 5);
446 
447  // coded using 5+2 bits with the 2 LSBs being, individually, the LSB of 1 of
448  // the 2 pulses and the upper 5 bits being coded in base 5
449  temp = ((fixed_index[6] >> 2) * 25 + 12) >> 5;
450  pulse_position[3] = temp % 5;
451  pulse_position[7] = temp / 5;
452  if (pulse_position[7] & 1)
453  pulse_position[3] = 4 - pulse_position[3];
454  pulse_position[3] = (pulse_position[3] << 1) + ( fixed_index[6] & 1);
455  pulse_position[7] = (pulse_position[7] << 1) + ((fixed_index[6] >> 1) & 1);
456 
457  fixed_sparse->n = 8;
458  for (i = 0; i < 4; i++) {
459  const int pos1 = (pulse_position[i] << 2) + i;
460  const int pos2 = (pulse_position[i + 4] << 2) + i;
461  const float sign = fixed_index[i] ? -1.0 : 1.0;
462  fixed_sparse->x[i ] = pos1;
463  fixed_sparse->x[i + 4] = pos2;
464  fixed_sparse->y[i ] = sign;
465  fixed_sparse->y[i + 4] = pos2 < pos1 ? -sign : sign;
466  }
467 }
468 
484 static void decode_fixed_sparse(AMRFixed *fixed_sparse, const uint16_t *pulses,
485  const enum Mode mode, const int subframe)
486 {
487  assert(MODE_4k75 <= mode && mode <= MODE_12k2);
488 
489  if (mode == MODE_12k2) {
490  ff_decode_10_pulses_35bits(pulses, fixed_sparse, gray_decode, 5, 3);
491  } else if (mode == MODE_10k2) {
492  decode_8_pulses_31bits(pulses, fixed_sparse);
493  } else {
494  int *pulse_position = fixed_sparse->x;
495  int i, pulse_subset;
496  const int fixed_index = pulses[0];
497 
498  if (mode <= MODE_5k15) {
499  pulse_subset = ((fixed_index >> 3) & 8) + (subframe << 1);
500  pulse_position[0] = ( fixed_index & 7) * 5 + track_position[pulse_subset];
501  pulse_position[1] = ((fixed_index >> 3) & 7) * 5 + track_position[pulse_subset + 1];
502  fixed_sparse->n = 2;
503  } else if (mode == MODE_5k9) {
504  pulse_subset = ((fixed_index & 1) << 1) + 1;
505  pulse_position[0] = ((fixed_index >> 1) & 7) * 5 + pulse_subset;
506  pulse_subset = (fixed_index >> 4) & 3;
507  pulse_position[1] = ((fixed_index >> 6) & 7) * 5 + pulse_subset + (pulse_subset == 3 ? 1 : 0);
508  fixed_sparse->n = pulse_position[0] == pulse_position[1] ? 1 : 2;
509  } else if (mode == MODE_6k7) {
510  pulse_position[0] = (fixed_index & 7) * 5;
511  pulse_subset = (fixed_index >> 2) & 2;
512  pulse_position[1] = ((fixed_index >> 4) & 7) * 5 + pulse_subset + 1;
513  pulse_subset = (fixed_index >> 6) & 2;
514  pulse_position[2] = ((fixed_index >> 8) & 7) * 5 + pulse_subset + 2;
515  fixed_sparse->n = 3;
516  } else { // mode <= MODE_7k95
517  pulse_position[0] = gray_decode[ fixed_index & 7];
518  pulse_position[1] = gray_decode[(fixed_index >> 3) & 7] + 1;
519  pulse_position[2] = gray_decode[(fixed_index >> 6) & 7] + 2;
520  pulse_subset = (fixed_index >> 9) & 1;
521  pulse_position[3] = gray_decode[(fixed_index >> 10) & 7] + pulse_subset + 3;
522  fixed_sparse->n = 4;
523  }
524  for (i = 0; i < fixed_sparse->n; i++)
525  fixed_sparse->y[i] = (pulses[1] >> i) & 1 ? 1.0 : -1.0;
526  }
527 }
528 
537 static void pitch_sharpening(AMRContext *p, int subframe, enum Mode mode,
538  AMRFixed *fixed_sparse)
539 {
540  // The spec suggests the current pitch gain is always used, but in other
541  // modes the pitch and codebook gains are joinly quantized (sec 5.8.2)
542  // so the codebook gain cannot depend on the quantized pitch gain.
543  if (mode == MODE_12k2)
544  p->beta = FFMIN(p->pitch_gain[4], 1.0);
545 
546  fixed_sparse->pitch_lag = p->pitch_lag_int;
547  fixed_sparse->pitch_fac = p->beta;
548 
549  // Save pitch sharpening factor for the next subframe
550  // MODE_4k75 only updates on the 2nd and 4th subframes - this follows from
551  // the fact that the gains for two subframes are jointly quantized.
552  if (mode != MODE_4k75 || subframe & 1)
553  p->beta = av_clipf(p->pitch_gain[4], 0.0, SHARP_MAX);
554 }
556 
557 
560 
573 static float fixed_gain_smooth(AMRContext *p , const float *lsf,
574  const float *lsf_avg, const enum Mode mode)
575 {
576  float diff = 0.0;
577  int i;
578 
579  for (i = 0; i < LP_FILTER_ORDER; i++)
580  diff += fabs(lsf_avg[i] - lsf[i]) / lsf_avg[i];
581 
582  // If diff is large for ten subframes, disable smoothing for a 40-subframe
583  // hangover period.
584  p->diff_count++;
585  if (diff <= 0.65)
586  p->diff_count = 0;
587 
588  if (p->diff_count > 10) {
589  p->hang_count = 0;
590  p->diff_count--; // don't let diff_count overflow
591  }
592 
593  if (p->hang_count < 40) {
594  p->hang_count++;
595  } else if (mode < MODE_7k4 || mode == MODE_10k2) {
596  const float smoothing_factor = av_clipf(4.0 * diff - 1.6, 0.0, 1.0);
597  const float fixed_gain_mean = (p->fixed_gain[0] + p->fixed_gain[1] +
598  p->fixed_gain[2] + p->fixed_gain[3] +
599  p->fixed_gain[4]) * 0.2;
600  return smoothing_factor * p->fixed_gain[4] +
601  (1.0 - smoothing_factor) * fixed_gain_mean;
602  }
603  return p->fixed_gain[4];
604 }
605 
615 static void decode_gains(AMRContext *p, const AMRNBSubframe *amr_subframe,
616  const enum Mode mode, const int subframe,
617  float *fixed_gain_factor)
618 {
619  if (mode == MODE_12k2 || mode == MODE_7k95) {
620  p->pitch_gain[4] = qua_gain_pit [amr_subframe->p_gain ]
621  * (1.0 / 16384.0);
622  *fixed_gain_factor = qua_gain_code[amr_subframe->fixed_gain]
623  * (1.0 / 2048.0);
624  } else {
625  const uint16_t *gains;
626 
627  if (mode >= MODE_6k7) {
628  gains = gains_high[amr_subframe->p_gain];
629  } else if (mode >= MODE_5k15) {
630  gains = gains_low [amr_subframe->p_gain];
631  } else {
632  // gain index is only coded in subframes 0,2 for MODE_4k75
633  gains = gains_MODE_4k75[(p->frame.subframe[subframe & 2].p_gain << 1) + (subframe & 1)];
634  }
635 
636  p->pitch_gain[4] = gains[0] * (1.0 / 16384.0);
637  *fixed_gain_factor = gains[1] * (1.0 / 4096.0);
638  }
639 }
640 
642 
643 
646 
657 static void apply_ir_filter(float *out, const AMRFixed *in,
658  const float *filter)
659 {
660  float filter1[AMR_SUBFRAME_SIZE],
661  filter2[AMR_SUBFRAME_SIZE];
662  int lag = in->pitch_lag;
663  float fac = in->pitch_fac;
664  int i;
665 
666  if (lag < AMR_SUBFRAME_SIZE) {
667  ff_celp_circ_addf(filter1, filter, filter, lag, fac,
669 
670  if (lag < AMR_SUBFRAME_SIZE >> 1)
671  ff_celp_circ_addf(filter2, filter, filter1, lag, fac,
673  }
674 
675  memset(out, 0, sizeof(float) * AMR_SUBFRAME_SIZE);
676  for (i = 0; i < in->n; i++) {
677  int x = in->x[i];
678  float y = in->y[i];
679  const float *filterp;
680 
681  if (x >= AMR_SUBFRAME_SIZE - lag) {
682  filterp = filter;
683  } else if (x >= AMR_SUBFRAME_SIZE - (lag << 1)) {
684  filterp = filter1;
685  } else
686  filterp = filter2;
687 
688  ff_celp_circ_addf(out, out, filterp, x, y, AMR_SUBFRAME_SIZE);
689  }
690 }
691 
704 static const float *anti_sparseness(AMRContext *p, AMRFixed *fixed_sparse,
705  const float *fixed_vector,
706  float fixed_gain, float *out)
707 {
708  int ir_filter_nr;
709 
710  if (p->pitch_gain[4] < 0.6) {
711  ir_filter_nr = 0; // strong filtering
712  } else if (p->pitch_gain[4] < 0.9) {
713  ir_filter_nr = 1; // medium filtering
714  } else
715  ir_filter_nr = 2; // no filtering
716 
717  // detect 'onset'
718  if (fixed_gain > 2.0 * p->prev_sparse_fixed_gain) {
719  p->ir_filter_onset = 2;
720  } else if (p->ir_filter_onset)
721  p->ir_filter_onset--;
722 
723  if (!p->ir_filter_onset) {
724  int i, count = 0;
725 
726  for (i = 0; i < 5; i++)
727  if (p->pitch_gain[i] < 0.6)
728  count++;
729  if (count > 2)
730  ir_filter_nr = 0;
731 
732  if (ir_filter_nr > p->prev_ir_filter_nr + 1)
733  ir_filter_nr--;
734  } else if (ir_filter_nr < 2)
735  ir_filter_nr++;
736 
737  // Disable filtering for very low level of fixed_gain.
738  // Note this step is not specified in the technical description but is in
739  // the reference source in the function Ph_disp.
740  if (fixed_gain < 5.0)
741  ir_filter_nr = 2;
742 
744  && ir_filter_nr < 2) {
745  apply_ir_filter(out, fixed_sparse,
746  (p->cur_frame_mode == MODE_7k95 ?
748  ir_filters_lookup)[ir_filter_nr]);
749  fixed_vector = out;
750  }
751 
752  // update ir filter strength history
753  p->prev_ir_filter_nr = ir_filter_nr;
754  p->prev_sparse_fixed_gain = fixed_gain;
755 
756  return fixed_vector;
757 }
758 
760 
761 
764 
775 static int synthesis(AMRContext *p, float *lpc,
776  float fixed_gain, const float *fixed_vector,
777  float *samples, uint8_t overflow)
778 {
779  int i;
780  float excitation[AMR_SUBFRAME_SIZE];
781 
782  // if an overflow has been detected, the pitch vector is scaled down by a
783  // factor of 4
784  if (overflow)
785  for (i = 0; i < AMR_SUBFRAME_SIZE; i++)
786  p->pitch_vector[i] *= 0.25;
787 
788  ff_weighted_vector_sumf(excitation, p->pitch_vector, fixed_vector,
789  p->pitch_gain[4], fixed_gain, AMR_SUBFRAME_SIZE);
790 
791  // emphasize pitch vector contribution
792  if (p->pitch_gain[4] > 0.5 && !overflow) {
793  float energy = avpriv_scalarproduct_float_c(excitation, excitation,
794  AMR_SUBFRAME_SIZE);
795  float pitch_factor =
796  p->pitch_gain[4] *
797  (p->cur_frame_mode == MODE_12k2 ?
798  0.25 * FFMIN(p->pitch_gain[4], 1.0) :
799  0.5 * FFMIN(p->pitch_gain[4], SHARP_MAX));
800 
801  for (i = 0; i < AMR_SUBFRAME_SIZE; i++)
802  excitation[i] += pitch_factor * p->pitch_vector[i];
803 
804  ff_scale_vector_to_given_sum_of_squares(excitation, excitation, energy,
805  AMR_SUBFRAME_SIZE);
806  }
807 
808  ff_celp_lp_synthesis_filterf(samples, lpc, excitation, AMR_SUBFRAME_SIZE,
810 
811  // detect overflow
812  for (i = 0; i < AMR_SUBFRAME_SIZE; i++)
813  if (fabsf(samples[i]) > AMR_SAMPLE_BOUND) {
814  return 1;
815  }
816 
817  return 0;
818 }
819 
821 
822 
825 
831 static void update_state(AMRContext *p)
832 {
833  memcpy(p->prev_lsp_sub4, p->lsp[3], LP_FILTER_ORDER * sizeof(p->lsp[3][0]));
834 
835  memmove(&p->excitation_buf[0], &p->excitation_buf[AMR_SUBFRAME_SIZE],
836  (PITCH_DELAY_MAX + LP_FILTER_ORDER + 1) * sizeof(float));
837 
838  memmove(&p->pitch_gain[0], &p->pitch_gain[1], 4 * sizeof(float));
839  memmove(&p->fixed_gain[0], &p->fixed_gain[1], 4 * sizeof(float));
840 
841  memmove(&p->samples_in[0], &p->samples_in[AMR_SUBFRAME_SIZE],
842  LP_FILTER_ORDER * sizeof(float));
843 }
844 
846 
847 
850 
857 static float tilt_factor(float *lpc_n, float *lpc_d)
858 {
859  float rh0, rh1; // autocorrelation at lag 0 and 1
860 
861  // LP_FILTER_ORDER prior zeros are needed for ff_celp_lp_synthesis_filterf
862  float impulse_buffer[LP_FILTER_ORDER + AMR_TILT_RESPONSE] = { 0 };
863  float *hf = impulse_buffer + LP_FILTER_ORDER; // start of impulse response
864 
865  hf[0] = 1.0;
866  memcpy(hf + 1, lpc_n, sizeof(float) * LP_FILTER_ORDER);
868  LP_FILTER_ORDER);
869 
871  rh1 = avpriv_scalarproduct_float_c(hf, hf + 1, AMR_TILT_RESPONSE - 1);
872 
873  // The spec only specifies this check for 12.2 and 10.2 kbit/s
874  // modes. But in the ref source the tilt is always non-negative.
875  return rh1 >= 0.0 ? rh1 / rh0 * AMR_TILT_GAMMA_T : 0.0;
876 }
877 
886 static void postfilter(AMRContext *p, float *lpc, float *buf_out)
887 {
888  int i;
889  float *samples = p->samples_in + LP_FILTER_ORDER; // Start of input
890 
891  float speech_gain = avpriv_scalarproduct_float_c(samples, samples,
893 
894  float pole_out[AMR_SUBFRAME_SIZE + LP_FILTER_ORDER]; // Output of pole filter
895  const float *gamma_n, *gamma_d; // Formant filter factor table
896  float lpc_n[LP_FILTER_ORDER], lpc_d[LP_FILTER_ORDER]; // Transfer function coefficients
897 
898  if (p->cur_frame_mode == MODE_12k2 || p->cur_frame_mode == MODE_10k2) {
899  gamma_n = ff_pow_0_7;
900  gamma_d = ff_pow_0_75;
901  } else {
902  gamma_n = ff_pow_0_55;
903  gamma_d = ff_pow_0_7;
904  }
905 
906  for (i = 0; i < LP_FILTER_ORDER; i++) {
907  lpc_n[i] = lpc[i] * gamma_n[i];
908  lpc_d[i] = lpc[i] * gamma_d[i];
909  }
910 
911  memcpy(pole_out, p->postfilter_mem, sizeof(float) * LP_FILTER_ORDER);
912  ff_celp_lp_synthesis_filterf(pole_out + LP_FILTER_ORDER, lpc_d, samples,
913  AMR_SUBFRAME_SIZE, LP_FILTER_ORDER);
914  memcpy(p->postfilter_mem, pole_out + AMR_SUBFRAME_SIZE,
915  sizeof(float) * LP_FILTER_ORDER);
916 
917  ff_celp_lp_zero_synthesis_filterf(buf_out, lpc_n,
918  pole_out + LP_FILTER_ORDER,
919  AMR_SUBFRAME_SIZE, LP_FILTER_ORDER);
920 
921  ff_tilt_compensation(&p->tilt_mem, tilt_factor(lpc_n, lpc_d), buf_out,
923 
924  ff_adaptive_gain_control(buf_out, buf_out, speech_gain, AMR_SUBFRAME_SIZE,
926 }
927 
929 
930 static int amrnb_decode_frame(AVCodecContext *avctx, void *data,
931  int *got_frame_ptr, AVPacket *avpkt)
932 {
933 
934  AMRContext *p = avctx->priv_data; // pointer to private data
935  AVFrame *frame = data;
936  const uint8_t *buf = avpkt->data;
937  int buf_size = avpkt->size;
938  float *buf_out; // pointer to the output data buffer
939  int i, subframe, ret;
940  float fixed_gain_factor;
941  AMRFixed fixed_sparse = {0}; // fixed vector up to anti-sparseness processing
942  float spare_vector[AMR_SUBFRAME_SIZE]; // extra stack space to hold result from anti-sparseness processing
943  float synth_fixed_gain; // the fixed gain that synthesis should use
944  const float *synth_fixed_vector; // pointer to the fixed vector that synthesis should use
945 
946  /* get output buffer */
947  frame->nb_samples = AMR_BLOCK_SIZE;
948  if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
949  av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
950  return ret;
951  }
952  buf_out = (float *)frame->data[0];
953 
954  p->cur_frame_mode = unpack_bitstream(p, buf, buf_size);
955  if (p->cur_frame_mode == NO_DATA) {
956  av_log(avctx, AV_LOG_ERROR, "Corrupt bitstream\n");
957  return AVERROR_INVALIDDATA;
958  }
959  if (p->cur_frame_mode == MODE_DTX) {
960  avpriv_request_sample(avctx, "dtx mode");
961  return AVERROR_PATCHWELCOME;
962  }
963 
964  if (p->cur_frame_mode == MODE_12k2) {
965  lsf2lsp_5(p);
966  } else
967  lsf2lsp_3(p);
968 
969  for (i = 0; i < 4; i++)
970  ff_acelp_lspd2lpc(p->lsp[i], p->lpc[i], 5);
971 
972  for (subframe = 0; subframe < 4; subframe++) {
973  const AMRNBSubframe *amr_subframe = &p->frame.subframe[subframe];
974 
975  decode_pitch_vector(p, amr_subframe, subframe);
976 
977  decode_fixed_sparse(&fixed_sparse, amr_subframe->pulses,
978  p->cur_frame_mode, subframe);
979 
980  // The fixed gain (section 6.1.3) depends on the fixed vector
981  // (section 6.1.2), but the fixed vector calculation uses
982  // pitch sharpening based on the on the pitch gain (section 6.1.3).
983  // So the correct order is: pitch gain, pitch sharpening, fixed gain.
984  decode_gains(p, amr_subframe, p->cur_frame_mode, subframe,
985  &fixed_gain_factor);
986 
987  pitch_sharpening(p, subframe, p->cur_frame_mode, &fixed_sparse);
988 
989  if (fixed_sparse.pitch_lag == 0) {
990  av_log(avctx, AV_LOG_ERROR, "The file is corrupted, pitch_lag = 0 is not allowed\n");
991  return AVERROR_INVALIDDATA;
992  }
993  ff_set_fixed_vector(p->fixed_vector, &fixed_sparse, 1.0,
995 
996  p->fixed_gain[4] =
997  ff_amr_set_fixed_gain(fixed_gain_factor,
999  p->fixed_vector,
1002  p->prediction_error,
1004 
1005  // The excitation feedback is calculated without any processing such
1006  // as fixed gain smoothing. This isn't mentioned in the specification.
1007  for (i = 0; i < AMR_SUBFRAME_SIZE; i++)
1008  p->excitation[i] *= p->pitch_gain[4];
1009  ff_set_fixed_vector(p->excitation, &fixed_sparse, p->fixed_gain[4],
1010  AMR_SUBFRAME_SIZE);
1011 
1012  // In the ref decoder, excitation is stored with no fractional bits.
1013  // This step prevents buzz in silent periods. The ref encoder can
1014  // emit long sequences with pitch factor greater than one. This
1015  // creates unwanted feedback if the excitation vector is nonzero.
1016  // (e.g. test sequence T19_795.COD in 3GPP TS 26.074)
1017  for (i = 0; i < AMR_SUBFRAME_SIZE; i++)
1018  p->excitation[i] = truncf(p->excitation[i]);
1019 
1020  // Smooth fixed gain.
1021  // The specification is ambiguous, but in the reference source, the
1022  // smoothed value is NOT fed back into later fixed gain smoothing.
1023  synth_fixed_gain = fixed_gain_smooth(p, p->lsf_q[subframe],
1024  p->lsf_avg, p->cur_frame_mode);
1025 
1026  synth_fixed_vector = anti_sparseness(p, &fixed_sparse, p->fixed_vector,
1027  synth_fixed_gain, spare_vector);
1028 
1029  if (synthesis(p, p->lpc[subframe], synth_fixed_gain,
1030  synth_fixed_vector, &p->samples_in[LP_FILTER_ORDER], 0))
1031  // overflow detected -> rerun synthesis scaling pitch vector down
1032  // by a factor of 4, skipping pitch vector contribution emphasis
1033  // and adaptive gain control
1034  synthesis(p, p->lpc[subframe], synth_fixed_gain,
1035  synth_fixed_vector, &p->samples_in[LP_FILTER_ORDER], 1);
1036 
1037  postfilter(p, p->lpc[subframe], buf_out + subframe * AMR_SUBFRAME_SIZE);
1038 
1039  // update buffers and history
1040  ff_clear_fixed_vector(p->fixed_vector, &fixed_sparse, AMR_SUBFRAME_SIZE);
1041  update_state(p);
1042  }
1043 
1048 
1049  /* Update averaged lsf vector (used for fixed gain smoothing).
1050  *
1051  * Note that lsf_avg should not incorporate the current frame's LSFs
1052  * for fixed_gain_smooth.
1053  * The specification has an incorrect formula: the reference decoder uses
1054  * qbar(n-1) rather than qbar(n) in section 6.1(4) equation 71. */
1056  0.84, 0.16, LP_FILTER_ORDER);
1057 
1058  *got_frame_ptr = 1;
1059 
1060  /* return the amount of bytes consumed if everything was OK */
1061  return frame_sizes_nb[p->cur_frame_mode] + 1; // +7 for rounding and +8 for TOC
1062 }
1063 
1064 
1066  .name = "amrnb",
1067  .long_name = NULL_IF_CONFIG_SMALL("AMR-NB (Adaptive Multi-Rate NarrowBand)"),
1068  .type = AVMEDIA_TYPE_AUDIO,
1069  .id = AV_CODEC_ID_AMR_NB,
1070  .priv_data_size = sizeof(AMRContext),
1073  .capabilities = CODEC_CAP_DR1,
1074  .sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_FLT,
1076 };