/// <summary> /// Finds linear prediction coeffecients and weights /// </summary> /// <param name="b_Q14"></param> /// <param name="WLTP"></param> /// <param name="LTPredCodGain_Q7"></param> /// <param name="r_lpc"></param> /// <param name="lag"></param> /// <param name="Wght_Q15"></param> /// <param name="subfr_length"></param> /// <param name="nb_subfr"></param> /// <param name="mem_offset"></param> /// <param name="corr_rshifts"></param> /// <param name="arch"></param> internal static void silk_find_LTP( short[] b_Q14, /* O LTP coefs [SilkConstants.MAX_NB_SUBFR * SilkConstants.LTP_ORDER] */ int[] WLTP, /* O Weight for LTP quantization [SilkConstants.MAX_NB_SUBFR * SilkConstants.LTP_ORDER * SilkConstants.LTP_ORDER] */ BoxedValueInt LTPredCodGain_Q7, /* O LTP coding gain */ short[] r_lpc, /* I residual signal after LPC signal + state for first 10 ms */ int[] lag, /* I LTP lags [SilkConstants.MAX_NB_SUBFR] */ int[] Wght_Q15, /* I weights [SilkConstants.MAX_NB_SUBFR] */ int subfr_length, /* I subframe length */ int nb_subfr, /* I number of subframes */ int mem_offset, /* I number of samples in LTP memory */ int[] corr_rshifts /* O right shifts applied to correlations [SilkConstants.MAX_NB_SUBFR] */ ) { int i, k, lshift; int r_ptr; int lag_ptr; int b_Q14_ptr; int regu; int WLTP_ptr; int[] b_Q16 = new int[SilkConstants.LTP_ORDER]; int[] delta_b_Q14 = new int[SilkConstants.LTP_ORDER]; int[] d_Q14 = new int[SilkConstants.MAX_NB_SUBFR]; int[] nrg = new int[SilkConstants.MAX_NB_SUBFR]; int g_Q26; int[] w = new int[SilkConstants.MAX_NB_SUBFR]; int WLTP_max, max_abs_d_Q14, max_w_bits; int temp32, denom32; int extra_shifts; int rr_shifts, maxRshifts, maxRshifts_wxtra, LZs; int LPC_res_nrg, LPC_LTP_res_nrg, div_Q16; int[] Rr = new int[SilkConstants.LTP_ORDER]; int[] rr = new int[SilkConstants.MAX_NB_SUBFR]; int wd, m_Q12; b_Q14_ptr = 0; WLTP_ptr = 0; r_ptr = mem_offset; for (k = 0; k < nb_subfr; k++) { lag_ptr = r_ptr - (lag[k] + SilkConstants.LTP_ORDER / 2); SumSqrShift.silk_sum_sqr_shift(out rr[k], out rr_shifts, r_lpc, r_ptr, subfr_length); /* rr[ k ] in Q( -rr_shifts ) */ /* Assure headroom */ LZs = Inlines.silk_CLZ32(rr[k]); if (LZs < LTP_CORRS_HEAD_ROOM) { rr[k] = Inlines.silk_RSHIFT_ROUND(rr[k], LTP_CORRS_HEAD_ROOM - LZs); rr_shifts += (LTP_CORRS_HEAD_ROOM - LZs); } corr_rshifts[k] = rr_shifts; BoxedValueInt boxed_shifts = new BoxedValueInt(corr_rshifts[k]); CorrelateMatrix.silk_corrMatrix(r_lpc, lag_ptr, subfr_length, SilkConstants.LTP_ORDER, LTP_CORRS_HEAD_ROOM, WLTP, WLTP_ptr, boxed_shifts); /* WLTP_ptr in Q( -corr_rshifts[ k ] ) */ corr_rshifts[k] = boxed_shifts.Val; /* The correlation vector always has lower max abs value than rr and/or RR so head room is assured */ CorrelateMatrix.silk_corrVector(r_lpc, lag_ptr, r_lpc, r_ptr, subfr_length, SilkConstants.LTP_ORDER, Rr, corr_rshifts[k]); /* Rr_ptr in Q( -corr_rshifts[ k ] ) */ if (corr_rshifts[k] > rr_shifts) { rr[k] = Inlines.silk_RSHIFT(rr[k], corr_rshifts[k] - rr_shifts); /* rr[ k ] in Q( -corr_rshifts[ k ] ) */ } Inlines.OpusAssert(rr[k] >= 0); regu = 1; regu = Inlines.silk_SMLAWB(regu, rr[k], ((int)((TuningParameters.LTP_DAMPING / 3) * ((long)1 << (16)) + 0.5)) /*Inlines.SILK_CONST(TuningParameters.LTP_DAMPING / 3, 16)*/); regu = Inlines.silk_SMLAWB(regu, Inlines.MatrixGet(WLTP, WLTP_ptr, 0, 0, SilkConstants.LTP_ORDER), ((int)((TuningParameters.LTP_DAMPING / 3) * ((long)1 << (16)) + 0.5)) /*Inlines.SILK_CONST(TuningParameters.LTP_DAMPING / 3, 16)*/); regu = Inlines.silk_SMLAWB(regu, Inlines.MatrixGet(WLTP, WLTP_ptr, SilkConstants.LTP_ORDER - 1, SilkConstants.LTP_ORDER - 1, SilkConstants.LTP_ORDER), ((int)((TuningParameters.LTP_DAMPING / 3) * ((long)1 << (16)) + 0.5)) /*Inlines.SILK_CONST(TuningParameters.LTP_DAMPING / 3, 16)*/); RegularizeCorrelations.silk_regularize_correlations(WLTP, WLTP_ptr, rr, k, regu, SilkConstants.LTP_ORDER); LinearAlgebra.silk_solve_LDL(WLTP, WLTP_ptr, SilkConstants.LTP_ORDER, Rr, b_Q16); /* WLTP_ptr and Rr_ptr both in Q(-corr_rshifts[k]) */ /* Limit and store in Q14 */ silk_fit_LTP(b_Q16, b_Q14, b_Q14_ptr); /* Calculate residual energy */ nrg[k] = ResidualEnergy.silk_residual_energy16_covar(b_Q14, b_Q14_ptr, WLTP, WLTP_ptr, Rr, rr[k], SilkConstants.LTP_ORDER, 14); /* nrg in Q( -corr_rshifts[ k ] ) */ /* temp = Wght[ k ] / ( nrg[ k ] * Wght[ k ] + 0.01f * subfr_length ); */ extra_shifts = Inlines.silk_min_int(corr_rshifts[k], LTP_CORRS_HEAD_ROOM); denom32 = Inlines.silk_LSHIFT_SAT32(Inlines.silk_SMULWB(nrg[k], Wght_Q15[k]), 1 + extra_shifts) + /* Q( -corr_rshifts[ k ] + extra_shifts ) */ Inlines.silk_RSHIFT(Inlines.silk_SMULWB((int)subfr_length, 655), corr_rshifts[k] - extra_shifts); /* Q( -corr_rshifts[ k ] + extra_shifts ) */ denom32 = Inlines.silk_max(denom32, 1); Inlines.OpusAssert(((long)Wght_Q15[k] << 16) < int.MaxValue); /* Wght always < 0.5 in Q0 */ temp32 = Inlines.silk_DIV32(Inlines.silk_LSHIFT((int)Wght_Q15[k], 16), denom32); /* Q( 15 + 16 + corr_rshifts[k] - extra_shifts ) */ temp32 = Inlines.silk_RSHIFT(temp32, 31 + corr_rshifts[k] - extra_shifts - 26); /* Q26 */ /* Limit temp such that the below scaling never wraps around */ WLTP_max = 0; for (i = WLTP_ptr; i < WLTP_ptr + (SilkConstants.LTP_ORDER * SilkConstants.LTP_ORDER); i++) { WLTP_max = Inlines.silk_max(WLTP[i], WLTP_max); } lshift = Inlines.silk_CLZ32(WLTP_max) - 1 - 3; /* keep 3 bits free for vq_nearest_neighbor */ Inlines.OpusAssert(26 - 18 + lshift >= 0); if (26 - 18 + lshift < 31) { temp32 = Inlines.silk_min_32(temp32, Inlines.silk_LSHIFT((int)1, 26 - 18 + lshift)); } Inlines.silk_scale_vector32_Q26_lshift_18(WLTP, WLTP_ptr, temp32, SilkConstants.LTP_ORDER * SilkConstants.LTP_ORDER); /* WLTP_ptr in Q( 18 - corr_rshifts[ k ] ) */ w[k] = Inlines.MatrixGet(WLTP, WLTP_ptr, SilkConstants.LTP_ORDER / 2, SilkConstants.LTP_ORDER / 2, SilkConstants.LTP_ORDER); /* w in Q( 18 - corr_rshifts[ k ] ) */ Inlines.OpusAssert(w[k] >= 0); r_ptr += subfr_length; b_Q14_ptr += SilkConstants.LTP_ORDER; WLTP_ptr += (SilkConstants.LTP_ORDER * SilkConstants.LTP_ORDER); } maxRshifts = 0; for (k = 0; k < nb_subfr; k++) { maxRshifts = Inlines.silk_max_int(corr_rshifts[k], maxRshifts); } /* Compute LTP coding gain */ if (LTPredCodGain_Q7 != null) { LPC_LTP_res_nrg = 0; LPC_res_nrg = 0; Inlines.OpusAssert(LTP_CORRS_HEAD_ROOM >= 2); /* Check that no overflow will happen when adding */ for (k = 0; k < nb_subfr; k++) { LPC_res_nrg = Inlines.silk_ADD32(LPC_res_nrg, Inlines.silk_RSHIFT(Inlines.silk_ADD32(Inlines.silk_SMULWB(rr[k], Wght_Q15[k]), 1), 1 + (maxRshifts - corr_rshifts[k]))); /* Q( -maxRshifts ) */ LPC_LTP_res_nrg = Inlines.silk_ADD32(LPC_LTP_res_nrg, Inlines.silk_RSHIFT(Inlines.silk_ADD32(Inlines.silk_SMULWB(nrg[k], Wght_Q15[k]), 1), 1 + (maxRshifts - corr_rshifts[k]))); /* Q( -maxRshifts ) */ } LPC_LTP_res_nrg = Inlines.silk_max(LPC_LTP_res_nrg, 1); /* avoid division by zero */ div_Q16 = Inlines.silk_DIV32_varQ(LPC_res_nrg, LPC_LTP_res_nrg, 16); LTPredCodGain_Q7.Val = (int)Inlines.silk_SMULBB(3, Inlines.silk_lin2log(div_Q16) - (16 << 7)); Inlines.OpusAssert(LTPredCodGain_Q7.Val == (int)Inlines.silk_SAT16(Inlines.silk_MUL(3, Inlines.silk_lin2log(div_Q16) - (16 << 7)))); } /* smoothing */ /* d = sum( B, 1 ); */ b_Q14_ptr = 0; for (k = 0; k < nb_subfr; k++) { d_Q14[k] = 0; for (i = b_Q14_ptr; i < b_Q14_ptr + SilkConstants.LTP_ORDER; i++) { d_Q14[k] += b_Q14[i]; } b_Q14_ptr += SilkConstants.LTP_ORDER; } /* m = ( w * d' ) / ( sum( w ) + 1e-3 ); */ /* Find maximum absolute value of d_Q14 and the bits used by w in Q0 */ max_abs_d_Q14 = 0; max_w_bits = 0; for (k = 0; k < nb_subfr; k++) { max_abs_d_Q14 = Inlines.silk_max_32(max_abs_d_Q14, Inlines.silk_abs(d_Q14[k])); /* w[ k ] is in Q( 18 - corr_rshifts[ k ] ) */ /* Find bits needed in Q( 18 - maxRshifts ) */ max_w_bits = Inlines.silk_max_32(max_w_bits, 32 - Inlines.silk_CLZ32(w[k]) + corr_rshifts[k] - maxRshifts); } /* max_abs_d_Q14 = (5 << 15); worst case, i.e. SilkConstants.LTP_ORDER * -silk_int16_MIN */ Inlines.OpusAssert(max_abs_d_Q14 <= (5 << 15)); /* How many bits is needed for w*d' in Q( 18 - maxRshifts ) in the worst case, of all d_Q14's being equal to max_abs_d_Q14 */ extra_shifts = max_w_bits + 32 - Inlines.silk_CLZ32(max_abs_d_Q14) - 14; /* Subtract what we got available; bits in output var plus maxRshifts */ extra_shifts -= (32 - 1 - 2 + maxRshifts); /* Keep sign bit free as well as 2 bits for accumulation */ extra_shifts = Inlines.silk_max_int(extra_shifts, 0); maxRshifts_wxtra = maxRshifts + extra_shifts; temp32 = Inlines.silk_RSHIFT(262, maxRshifts + extra_shifts) + 1; /* 1e-3f in Q( 18 - (maxRshifts + extra_shifts) ) */ wd = 0; for (k = 0; k < nb_subfr; k++) { /* w has at least 2 bits of headroom so no overflow should happen */ temp32 = Inlines.silk_ADD32(temp32, Inlines.silk_RSHIFT(w[k], maxRshifts_wxtra - corr_rshifts[k])); /* Q( 18 - maxRshifts_wxtra ) */ wd = Inlines.silk_ADD32(wd, Inlines.silk_LSHIFT(Inlines.silk_SMULWW(Inlines.silk_RSHIFT(w[k], maxRshifts_wxtra - corr_rshifts[k]), d_Q14[k]), 2)); /* Q( 18 - maxRshifts_wxtra ) */ } m_Q12 = Inlines.silk_DIV32_varQ(wd, temp32, 12); b_Q14_ptr = 0; for (k = 0; k < nb_subfr; k++) { /* w[ k ] from Q( 18 - corr_rshifts[ k ] ) to Q( 16 ) */ if (2 - corr_rshifts[k] > 0) { temp32 = Inlines.silk_RSHIFT(w[k], 2 - corr_rshifts[k]); } else { temp32 = Inlines.silk_LSHIFT_SAT32(w[k], corr_rshifts[k] - 2); } g_Q26 = Inlines.silk_MUL( Inlines.silk_DIV32( ((int)((TuningParameters.LTP_SMOOTHING) * ((long)1 << (26)) + 0.5)) /*Inlines.SILK_CONST(TuningParameters.LTP_SMOOTHING, 26)*/, Inlines.silk_RSHIFT(((int)((TuningParameters.LTP_SMOOTHING) * ((long)1 << (26)) + 0.5)) /*Inlines.SILK_CONST(TuningParameters.LTP_SMOOTHING, 26)*/, 10) + temp32), /* Q10 */ Inlines.silk_LSHIFT_SAT32(Inlines.silk_SUB_SAT32((int)m_Q12, Inlines.silk_RSHIFT(d_Q14[k], 2)), 4)); /* Q16 */ temp32 = 0; for (i = 0; i < SilkConstants.LTP_ORDER; i++) { delta_b_Q14[i] = Inlines.silk_max_16(b_Q14[b_Q14_ptr + i], 1638); /* 1638_Q14 = 0.1_Q0 */ temp32 += delta_b_Q14[i]; /* Q14 */ } temp32 = Inlines.silk_DIV32(g_Q26, temp32); /* Q14 . Q12 */ for (i = 0; i < SilkConstants.LTP_ORDER; i++) { b_Q14[b_Q14_ptr + i] = (short)(Inlines.silk_LIMIT_32((int)b_Q14[b_Q14_ptr + i] + Inlines.silk_SMULWB(Inlines.silk_LSHIFT_SAT32(temp32, 4), delta_b_Q14[i]), -16000, 28000)); } b_Q14_ptr += SilkConstants.LTP_ORDER; } }
internal static void silk_find_pred_coefs( SilkChannelEncoder psEnc, /* I/O encoder state */ SilkEncoderControl psEncCtrl, /* I/O encoder control */ short[] res_pitch, /* I Residual from pitch analysis */ short[] x, /* I Speech signal */ int x_ptr, int condCoding /* I The type of conditional coding to use */ ) { int i; int[] invGains_Q16 = new int[SilkConstants.MAX_NB_SUBFR]; int[] local_gains = new int[SilkConstants.MAX_NB_SUBFR]; int[] Wght_Q15 = new int[SilkConstants.MAX_NB_SUBFR]; short[] NLSF_Q15 = new short[SilkConstants.MAX_LPC_ORDER]; int x_ptr2; int x_pre_ptr; short[] LPC_in_pre; int tmp, min_gain_Q16, minInvGain_Q30; int[] LTP_corrs_rshift = new int[SilkConstants.MAX_NB_SUBFR]; /* weighting for weighted least squares */ min_gain_Q16 = int.MaxValue >> 6; for (i = 0; i < psEnc.nb_subfr; i++) { min_gain_Q16 = Inlines.silk_min(min_gain_Q16, psEncCtrl.Gains_Q16[i]); } for (i = 0; i < psEnc.nb_subfr; i++) { /* Divide to Q16 */ Inlines.OpusAssert(psEncCtrl.Gains_Q16[i] > 0); /* Invert and normalize gains, and ensure that maximum invGains_Q16 is within range of a 16 bit int */ invGains_Q16[i] = Inlines.silk_DIV32_varQ(min_gain_Q16, psEncCtrl.Gains_Q16[i], 16 - 2); /* Ensure Wght_Q15 a minimum value 1 */ invGains_Q16[i] = Inlines.silk_max(invGains_Q16[i], 363); /* Square the inverted gains */ Inlines.OpusAssert(invGains_Q16[i] == Inlines.silk_SAT16(invGains_Q16[i])); tmp = Inlines.silk_SMULWB(invGains_Q16[i], invGains_Q16[i]); Wght_Q15[i] = Inlines.silk_RSHIFT(tmp, 1); /* Invert the inverted and normalized gains */ local_gains[i] = Inlines.silk_DIV32(((int)1 << 16), invGains_Q16[i]); } LPC_in_pre = new short[psEnc.nb_subfr * psEnc.predictLPCOrder + psEnc.frame_length]; if (psEnc.indices.signalType == SilkConstants.TYPE_VOICED) { int[] WLTP; /**********/ /* VOICED */ /**********/ Inlines.OpusAssert(psEnc.ltp_mem_length - psEnc.predictLPCOrder >= psEncCtrl.pitchL[0] + SilkConstants.LTP_ORDER / 2); WLTP = new int[psEnc.nb_subfr * SilkConstants.LTP_ORDER * SilkConstants.LTP_ORDER]; /* LTP analysis */ BoxedValueInt boxed_codgain = new BoxedValueInt(psEncCtrl.LTPredCodGain_Q7); FindLTP.silk_find_LTP(psEncCtrl.LTPCoef_Q14, WLTP, boxed_codgain, res_pitch, psEncCtrl.pitchL, Wght_Q15, psEnc.subfr_length, psEnc.nb_subfr, psEnc.ltp_mem_length, LTP_corrs_rshift); psEncCtrl.LTPredCodGain_Q7 = boxed_codgain.Val; /* Quantize LTP gain parameters */ BoxedValueSbyte boxed_periodicity = new BoxedValueSbyte(psEnc.indices.PERIndex); BoxedValueInt boxed_gain = new BoxedValueInt(psEnc.sum_log_gain_Q7); QuantizeLTPGains.silk_quant_LTP_gains(psEncCtrl.LTPCoef_Q14, psEnc.indices.LTPIndex, boxed_periodicity, boxed_gain, WLTP, psEnc.mu_LTP_Q9, psEnc.LTPQuantLowComplexity, psEnc.nb_subfr ); psEnc.indices.PERIndex = boxed_periodicity.Val; psEnc.sum_log_gain_Q7 = boxed_gain.Val; /* Control LTP scaling */ LTPScaleControl.silk_LTP_scale_ctrl(psEnc, psEncCtrl, condCoding); /* Create LTP residual */ LTPAnalysisFilter.silk_LTP_analysis_filter(LPC_in_pre, x, x_ptr - psEnc.predictLPCOrder, psEncCtrl.LTPCoef_Q14, psEncCtrl.pitchL, invGains_Q16, psEnc.subfr_length, psEnc.nb_subfr, psEnc.predictLPCOrder); } else { /************/ /* UNVOICED */ /************/ /* Create signal with prepended subframes, scaled by inverse gains */ x_ptr2 = x_ptr - psEnc.predictLPCOrder; x_pre_ptr = 0; for (i = 0; i < psEnc.nb_subfr; i++) { Inlines.silk_scale_copy_vector16(LPC_in_pre, x_pre_ptr, x, x_ptr2, invGains_Q16[i], psEnc.subfr_length + psEnc.predictLPCOrder); x_pre_ptr += psEnc.subfr_length + psEnc.predictLPCOrder; x_ptr2 += psEnc.subfr_length; } Arrays.MemSetShort(psEncCtrl.LTPCoef_Q14, 0, psEnc.nb_subfr * SilkConstants.LTP_ORDER); psEncCtrl.LTPredCodGain_Q7 = 0; psEnc.sum_log_gain_Q7 = 0; } /* Limit on total predictive coding gain */ if (psEnc.first_frame_after_reset != 0) { minInvGain_Q30 = ((int)((1.0f / SilkConstants.MAX_PREDICTION_POWER_GAIN_AFTER_RESET) * ((long)1 << (30)) + 0.5)) /*Inlines.SILK_CONST(1.0f / SilkConstants.MAX_PREDICTION_POWER_GAIN_AFTER_RESET, 30)*/; } else { minInvGain_Q30 = Inlines.silk_log2lin(Inlines.silk_SMLAWB(16 << 7, (int)psEncCtrl.LTPredCodGain_Q7, ((int)((1.0f / 3f) * ((long)1 << (16)) + 0.5)) /*Inlines.SILK_CONST(1.0f / 3f, 16)*/)); /* Q16 */ minInvGain_Q30 = Inlines.silk_DIV32_varQ(minInvGain_Q30, Inlines.silk_SMULWW(((int)((SilkConstants.MAX_PREDICTION_POWER_GAIN) * ((long)1 << (0)) + 0.5)) /*Inlines.SILK_CONST(SilkConstants.MAX_PREDICTION_POWER_GAIN, 0)*/, Inlines.silk_SMLAWB(((int)((0.25f) * ((long)1 << (18)) + 0.5)) /*Inlines.SILK_CONST(0.25f, 18)*/, ((int)((0.75f) * ((long)1 << (18)) + 0.5)) /*Inlines.SILK_CONST(0.75f, 18)*/, psEncCtrl.coding_quality_Q14)), 14); } /* LPC_in_pre contains the LTP-filtered input for voiced, and the unfiltered input for unvoiced */ FindLPC.silk_find_LPC(psEnc, NLSF_Q15, LPC_in_pre, minInvGain_Q30); /* Quantize LSFs */ NLSF.silk_process_NLSFs(psEnc, psEncCtrl.PredCoef_Q12, NLSF_Q15, psEnc.prev_NLSFq_Q15); /* Calculate residual energy using quantized LPC coefficients */ ResidualEnergy.silk_residual_energy(psEncCtrl.ResNrg, psEncCtrl.ResNrgQ, LPC_in_pre, psEncCtrl.PredCoef_Q12, local_gains, psEnc.subfr_length, psEnc.nb_subfr, psEnc.predictLPCOrder); /* Copy to prediction struct for use in next frame for interpolation */ Array.Copy(NLSF_Q15, psEnc.prev_NLSFq_Q15, SilkConstants.MAX_LPC_ORDER); }