/// <summary> /// Returns all possible autocorrelation shifted by 1 /// </summary> /// <returns>Dictionary with the key meaning shift and value is symbolic autocorrelation</returns> public Dictionary <int, double> InternalGetAutoCorrelationsForAllShifts <T>( IEnumerable <T> data, int shiftBy = 0) { var result = new Dictionary <int, double>(); var autoCor = new Autocorrelation <T>(data.ToArray()); //use only half as the function is symmetric int max = GetCeiledHalf(data.Count()); //perform all cyclic shift //takes long time if (shiftBy == 0) { //skip first and last index, as it will give 1 - full cyclic loop for (int i = 0; i < max; i++) { double value = autoCor.Compute(i); result.Add(i, value); } } //shift only on specified indeces else { //skip first and last index, as it will give 1 - full cyclic loop int i = 0; while (i <= max) { double value = autoCor.Compute(i); result.Add(i, value); i += shiftBy; } } return(result); }
/// <summary> /// Returns all possible autocorrelation shifted by 1 /// </summary> /// <returns>Dictionary with the key meaning shift and value is symbolic autocorrelation</returns> protected ConcurrentDictionary <int, double> InternalConcurrentGetAutoCorrelationsForAllShifts <T>( IEnumerable <T> data, int shiftBy = 0, int concurrencyLevel = 1) { int max = GetCeiledHalf(data.Count()); var result = new ConcurrentDictionary <int, double>(concurrencyLevel, max); var autoCor = new Autocorrelation <T>(data.ToArray()); //perform all cyclic shift //takes long time if (shiftBy == 0) { var options = new ParallelOptions { MaxDegreeOfParallelism = concurrencyLevel }; Parallel.For(0, max, options, i => { double value = autoCor.Compute(i); bool succesful = result.TryAdd(i, value); if (!succesful) { throw new InvalidOperationException( "Failed to add an item to concurrent dictionary. " + "This index is already taken: " + i); } }); } //shift only on specified indeces else { var options = new ParallelOptions { MaxDegreeOfParallelism = concurrencyLevel }; Parallel.ForEach( Enumerable.Range(0, max) /*to include 0 shift*/ .Where(i => i % shiftBy == 0) /*select shifted points*/, options, (i, lo) => { double value = autoCor.Compute(i); bool succesful = result.TryAdd(i, value); if (!succesful) { throw new InvalidOperationException( "Failed to add an item to concurrent dictionary. " + "This index is already taken: " + i); } }); } return(result); }
public void ComputeTestStringData(int shiftIdx, double expResult) { log.Debug("Shift: " + shiftIdx); const string data = "0001010212"; var autoCor = new Autocorrelation <char>(data.ToCharArray()); double actual = autoCor.Compute(shiftIdx); Assert.AreEqual(expResult, actual); }
public void ComputeTestByteData(int shiftIdx, double expResult) { log.Debug("Shift: " + shiftIdx); var data = new byte[] { 0, 0, 0, 1, 0, 1, 0, 2, 1, 2 }; var autoCor = new Autocorrelation <byte>(data); double actual = autoCor.Compute(shiftIdx); Assert.AreEqual(expResult, actual); }
public void ComputeTestByteDataParallel(int parallelTasksToCreat) { //shall be in power of 10 if (parallelTasksToCreat % 10 != 0) { throw new ArgumentException( "must be a paw of 10 for test to calculate exp result"); } int expArraySize = parallelTasksToCreat / 10; //hardcoded data var data = new byte[] { 0, 0, 0, 1, 0, 1, 0, 2, 1, 2 }; //correspondent autocorrelation for a single round rotation var singleExpectedUnorderedResult = new[] { 1, 0.2, 0.5, 0.2, 0.3, 0.4, 0.3, 0.2, 0.5, 0.2 }; //for each full circle add single expected array var expectedResult = new List <double>(); for (int i = 0; i < expArraySize; i++) { expectedResult.AddRange(singleExpectedUnorderedResult); } int shiftBy = -1; var results = new ConcurrentBag <Task <double> >(); //create parallel tasks to shift data for (int i = 0; i < parallelTasksToCreat; i++) { Task <double> result = Task <double> .Factory.StartNew(() => { var autoCor = new Autocorrelation <byte>(data); Interlocked.Increment(ref shiftBy); return(autoCor.Compute(shiftBy)); }); results.Add(result); } //wait for all to compute Task.WaitAll(results.ToArray()); //assert var actual = (from r in results select r.Result).ToArray(); Assert.That(expectedResult, Is.EquivalentTo(actual)); }
/// <summary> /// Returns single value of autocorrelation function as it is a stable line, /// with precission defined as an input parameter /// </summary> /// <returns>Avaraged autocorelation value</returns> public double GetAutoCorrelationsForRandomlyShiftedData <T>( IEnumerable <T> data, int precission = 1000) { data = data.GetFisherYatesShuffle(); var result = new List <double>(); var autoCor = new Autocorrelation <T>(data.ToArray()); int max = Math.Min(precission, data.Count()); for (int i = 1; i <= max; i++) { double value = autoCor.Compute(i); result.Add(value); } return(result.Average()); }
public void DoRandomWalkThread(int start, int end, Landscape landscape, ResearchParameters parameters, IOperator op, StringBuilder dataBuilder, Action <string, float> callback, string connectionId, float step) { for (int j = start; j < end; ++j) { var rwResult = landscape.RandomWalk(parameters.RandomWalkSteps, op); float ac = Autocorrelation.Run(rwResult); float ic = InformationContent.Run(rwResult, parameters.Sensitivity); float pic = PartialInformationContent.Run(rwResult, parameters.Sensitivity); float dbi = DensityBasinInformation.Run(rwResult, parameters.Sensitivity); string line = (float.IsNaN(ac) ? FLOAT_PATTERN : ac.ToString(FLOAT_PATTERN)) + SEPARATOR + (float.IsNaN(ic) ? FLOAT_PATTERN : ic.ToString(FLOAT_PATTERN)) + SEPARATOR + (float.IsNaN(pic) ? FLOAT_PATTERN : pic.ToString(FLOAT_PATTERN)) + SEPARATOR + (float.IsNaN(dbi) ? FLOAT_PATTERN : dbi.ToString(FLOAT_PATTERN)); dataBuilder.AppendLine(line); callback(connectionId, step); } }
internal void celt_decode_lost(int N, int LM) { int c; int i; int C = this.channels; int[][] out_syn = new int[2][]; int[] out_syn_ptrs = new int[2]; CeltMode mode; int nbEBands; int overlap; int noise_based; short[] eBands; mode = this.mode; nbEBands = mode.nbEBands; overlap = mode.overlap; eBands = mode.eBands; c = 0; do { out_syn[c] = this.decode_mem[c]; out_syn_ptrs[c] = CeltConstants.DECODE_BUFFER_SIZE - N; } while (++c < C); noise_based = (loss_count >= 5 || start != 0) ? 1 : 0; if (noise_based != 0) { /* Noise-based PLC/CNG */ int[][] X; uint seed; int end; int effEnd; int decay; end = this.end; effEnd = Inlines.IMAX(start, Inlines.IMIN(end, mode.effEBands)); X = Arrays.InitTwoDimensionalArray <int>(C, N); /**< Interleaved normalised MDCTs */ /* Energy decay */ decay = loss_count == 0 ? ((short)(0.5 + (1.5f) * (((int)1) << (CeltConstants.DB_SHIFT)))) /*Inlines.QCONST16(1.5f, CeltConstants.DB_SHIFT)*/ : ((short)(0.5 + (0.5f) * (((int)1) << (CeltConstants.DB_SHIFT)))) /*Inlines.QCONST16(0.5f, CeltConstants.DB_SHIFT)*/; c = 0; do { for (i = start; i < end; i++) { this.oldEBands[c * nbEBands + i] = Inlines.MAX16(backgroundLogE[c * nbEBands + i], this.oldEBands[c * nbEBands + i] - decay); } } while (++c < C); seed = this.rng; for (c = 0; c < C; c++) { for (i = start; i < effEnd; i++) { int j; int boffs; int blen; boffs = (eBands[i] << LM); blen = (eBands[i + 1] - eBands[i]) << LM; for (j = 0; j < blen; j++) { seed = Bands.celt_lcg_rand(seed); X[c][boffs + j] = (unchecked ((int)seed) >> 20); } VQ.renormalise_vector(X[c], 0, blen, CeltConstants.Q15ONE); } } this.rng = seed; c = 0; do { Arrays.MemMoveInt(this.decode_mem[c], N, 0, CeltConstants.DECODE_BUFFER_SIZE - N + (overlap >> 1)); } while (++c < C); CeltCommon.celt_synthesis(mode, X, out_syn, out_syn_ptrs, this.oldEBands, start, effEnd, C, C, 0, LM, this.downsample, 0); } else { /* Pitch-based PLC */ int[] window; int fade = CeltConstants.Q15ONE; int pitch_index; int[] etmp; int[] exc; if (loss_count == 0) { this.last_pitch_index = pitch_index = CeltCommon.celt_plc_pitch_search(this.decode_mem, C); } else { pitch_index = this.last_pitch_index; fade = ((short)(0.5 + (.8f) * (((int)1) << (15)))) /*Inlines.QCONST16(.8f, 15)*/; } etmp = new int[overlap]; exc = new int[CeltConstants.MAX_PERIOD]; window = mode.window; c = 0; do { int decay; int attenuation; int S1 = 0; int[] buf; int extrapolation_offset; int extrapolation_len; int exc_length; int j; buf = this.decode_mem[c]; for (i = 0; i < CeltConstants.MAX_PERIOD; i++) { exc[i] = Inlines.ROUND16(buf[CeltConstants.DECODE_BUFFER_SIZE - CeltConstants.MAX_PERIOD + i], CeltConstants.SIG_SHIFT); } if (loss_count == 0) { int[] ac = new int[CeltConstants.LPC_ORDER + 1]; /* Compute LPC coefficients for the last MAX_PERIOD samples before * the first loss so we can work in the excitation-filter domain. */ Autocorrelation._celt_autocorr(exc, ac, window, overlap, CeltConstants.LPC_ORDER, CeltConstants.MAX_PERIOD); /* Add a noise floor of -40 dB. */ ac[0] += Inlines.SHR32(ac[0], 13); /* Use lag windowing to stabilize the Levinson-Durbin recursion. */ for (i = 1; i <= CeltConstants.LPC_ORDER; i++) { /*ac[i] *= exp(-.5*(2*M_PI*.002*i)*(2*M_PI*.002*i));*/ ac[i] -= Inlines.MULT16_32_Q15(2 * i * i, ac[i]); } CeltLPC.celt_lpc(this.lpc[c], ac, CeltConstants.LPC_ORDER); } /* We want the excitation for 2 pitch periods in order to look for a * decaying signal, but we can't get more than MAX_PERIOD. */ exc_length = Inlines.IMIN(2 * pitch_index, CeltConstants.MAX_PERIOD); /* Initialize the LPC history with the samples just before the start * of the region for which we're computing the excitation. */ { int[] lpc_mem = new int[CeltConstants.LPC_ORDER]; for (i = 0; i < CeltConstants.LPC_ORDER; i++) { lpc_mem[i] = Inlines.ROUND16(buf[CeltConstants.DECODE_BUFFER_SIZE - exc_length - 1 - i], CeltConstants.SIG_SHIFT); } /* Compute the excitation for exc_length samples before the loss. */ Kernels.celt_fir(exc, (CeltConstants.MAX_PERIOD - exc_length), this.lpc[c], 0, exc, (CeltConstants.MAX_PERIOD - exc_length), exc_length, CeltConstants.LPC_ORDER, lpc_mem); } /* Check if the waveform is decaying, and if so how fast. * We do this to avoid adding energy when concealing in a segment * with decaying energy. */ { int E1 = 1, E2 = 1; int decay_length; int shift = Inlines.IMAX(0, 2 * Inlines.celt_zlog2(Inlines.celt_maxabs16(exc, (CeltConstants.MAX_PERIOD - exc_length), exc_length)) - 20); decay_length = exc_length >> 1; for (i = 0; i < decay_length; i++) { int e; e = exc[CeltConstants.MAX_PERIOD - decay_length + i]; E1 += Inlines.SHR32(Inlines.MULT16_16(e, e), shift); e = exc[CeltConstants.MAX_PERIOD - 2 * decay_length + i]; E2 += Inlines.SHR32(Inlines.MULT16_16(e, e), shift); } E1 = Inlines.MIN32(E1, E2); decay = Inlines.celt_sqrt(Inlines.frac_div32(Inlines.SHR32(E1, 1), E2)); } /* Move the decoder memory one frame to the left to give us room to * add the data for the new frame. We ignore the overlap that extends * past the end of the buffer, because we aren't going to use it. */ Arrays.MemMoveInt(buf, N, 0, CeltConstants.DECODE_BUFFER_SIZE - N); /* Extrapolate from the end of the excitation with a period of * "pitch_index", scaling down each period by an additional factor of * "decay". */ extrapolation_offset = CeltConstants.MAX_PERIOD - pitch_index; /* We need to extrapolate enough samples to cover a complete MDCT * window (including overlap/2 samples on both sides). */ extrapolation_len = N + overlap; /* We also apply fading if this is not the first loss. */ attenuation = Inlines.MULT16_16_Q15(fade, decay); for (i = j = 0; i < extrapolation_len; i++, j++) { int tmp; if (j >= pitch_index) { j -= pitch_index; attenuation = Inlines.MULT16_16_Q15(attenuation, decay); } buf[CeltConstants.DECODE_BUFFER_SIZE - N + i] = Inlines.SHL32((Inlines.MULT16_16_Q15(attenuation, exc[extrapolation_offset + j])), CeltConstants.SIG_SHIFT); /* Compute the energy of the previously decoded signal whose * excitation we're copying. */ tmp = Inlines.ROUND16( buf[CeltConstants.DECODE_BUFFER_SIZE - CeltConstants.MAX_PERIOD - N + extrapolation_offset + j], CeltConstants.SIG_SHIFT); S1 += Inlines.SHR32(Inlines.MULT16_16(tmp, tmp), 8); } { int[] lpc_mem = new int[CeltConstants.LPC_ORDER]; /* Copy the last decoded samples (prior to the overlap region) to * synthesis filter memory so we can have a continuous signal. */ for (i = 0; i < CeltConstants.LPC_ORDER; i++) { lpc_mem[i] = Inlines.ROUND16(buf[CeltConstants.DECODE_BUFFER_SIZE - N - 1 - i], CeltConstants.SIG_SHIFT); } /* Apply the synthesis filter to convert the excitation back into * the signal domain. */ CeltLPC.celt_iir(buf, CeltConstants.DECODE_BUFFER_SIZE - N, this.lpc[c], buf, CeltConstants.DECODE_BUFFER_SIZE - N, extrapolation_len, CeltConstants.LPC_ORDER, lpc_mem); } /* Check if the synthesis energy is higher than expected, which can * happen with the signal changes during our window. If so, * attenuate. */ { int S2 = 0; for (i = 0; i < extrapolation_len; i++) { int tmp = Inlines.ROUND16(buf[CeltConstants.DECODE_BUFFER_SIZE - N + i], CeltConstants.SIG_SHIFT); S2 += Inlines.SHR32(Inlines.MULT16_16(tmp, tmp), 8); } /* This checks for an "explosion" in the synthesis. */ if (!(S1 > Inlines.SHR32(S2, 2))) { for (i = 0; i < extrapolation_len; i++) { buf[CeltConstants.DECODE_BUFFER_SIZE - N + i] = 0; } } else if (S1 < S2) { int ratio = Inlines.celt_sqrt(Inlines.frac_div32(Inlines.SHR32(S1, 1) + 1, S2 + 1)); for (i = 0; i < overlap; i++) { int tmp_g = CeltConstants.Q15ONE - Inlines.MULT16_16_Q15(window[i], CeltConstants.Q15ONE - ratio); buf[CeltConstants.DECODE_BUFFER_SIZE - N + i] = Inlines.MULT16_32_Q15(tmp_g, buf[CeltConstants.DECODE_BUFFER_SIZE - N + i]); } for (i = overlap; i < extrapolation_len; i++) { buf[CeltConstants.DECODE_BUFFER_SIZE - N + i] = Inlines.MULT16_32_Q15(ratio, buf[CeltConstants.DECODE_BUFFER_SIZE - N + i]); } } } /* Apply the pre-filter to the MDCT overlap for the next frame because * the post-filter will be re-applied in the decoder after the MDCT * overlap. */ CeltCommon.comb_filter(etmp, 0, buf, CeltConstants.DECODE_BUFFER_SIZE, this.postfilter_period, this.postfilter_period, overlap, -this.postfilter_gain, -this.postfilter_gain, this.postfilter_tapset, this.postfilter_tapset, null, 0); /* Simulate TDAC on the concealed audio so that it blends with the * MDCT of the next frame. */ for (i = 0; i < overlap / 2; i++) { buf[CeltConstants.DECODE_BUFFER_SIZE + i] = Inlines.MULT16_32_Q15(window[i], etmp[overlap - 1 - i]) + Inlines.MULT16_32_Q15(window[overlap - i - 1], etmp[i]); } } while (++c < C); } this.loss_count = loss_count + 1; }
internal static void pitch_downsample(int[][] x, int[] x_lp, int len, int C) { int i; int[] ac = new int[5]; int tmp = CeltConstants.Q15ONE; int[] lpc = new int[4]; int[] mem = new int[] { 0, 0, 0, 0, 0 }; int[] lpc2 = new int[5]; int c1 = ((short)(0.5 + (0.8f) * (((int)1) << (15)))) /*Inlines.QCONST16(0.8f, 15)*/; int shift; int maxabs = Inlines.celt_maxabs32(x[0], 0, len); if (C == 2) { int maxabs_1 = Inlines.celt_maxabs32(x[1], 0, len); maxabs = Inlines.MAX32(maxabs, maxabs_1); } if (maxabs < 1) { maxabs = 1; } shift = Inlines.celt_ilog2(maxabs) - 10; if (shift < 0) { shift = 0; } if (C == 2) { shift++; } int halflen = len >> 1; // cached for performance for (i = 1; i < halflen; i++) { x_lp[i] = (Inlines.SHR32(Inlines.HALF32(Inlines.HALF32(x[0][(2 * i - 1)] + x[0][(2 * i + 1)]) + x[0][2 * i]), shift)); } x_lp[0] = (Inlines.SHR32(Inlines.HALF32(Inlines.HALF32(x[0][1]) + x[0][0]), shift)); if (C == 2) { for (i = 1; i < halflen; i++) { x_lp[i] += (Inlines.SHR32(Inlines.HALF32(Inlines.HALF32(x[1][(2 * i - 1)] + x[1][(2 * i + 1)]) + x[1][2 * i]), shift)); } x_lp[0] += (Inlines.SHR32(Inlines.HALF32(Inlines.HALF32(x[1][1]) + x[1][0]), shift)); } Autocorrelation._celt_autocorr(x_lp, ac, null, 0, 4, halflen); /* Noise floor -40 dB */ ac[0] += Inlines.SHR32(ac[0], 13); /* Lag windowing */ for (i = 1; i <= 4; i++) { /*ac[i] *= exp(-.5*(2*M_PI*.002*i)*(2*M_PI*.002*i));*/ ac[i] -= Inlines.MULT16_32_Q15((2 * i * i), ac[i]); } CeltLPC.celt_lpc(lpc, ac, 4); for (i = 0; i < 4; i++) { tmp = Inlines.MULT16_16_Q15(((short)(0.5 + (.9f) * (((int)1) << (15)))) /*Inlines.QCONST16(.9f, 15)*/, tmp); lpc[i] = Inlines.MULT16_16_Q15(lpc[i], tmp); } /* Add a zero */ lpc2[0] = (lpc[0] + ((short)(0.5 + (0.8f) * (((int)1) << (CeltConstants.SIG_SHIFT)))) /*Inlines.QCONST16(0.8f, CeltConstants.SIG_SHIFT)*/); lpc2[1] = (lpc[1] + Inlines.MULT16_16_Q15(c1, lpc[0])); lpc2[2] = (lpc[2] + Inlines.MULT16_16_Q15(c1, lpc[1])); lpc2[3] = (lpc[3] + Inlines.MULT16_16_Q15(c1, lpc[2])); lpc2[4] = Inlines.MULT16_16_Q15(c1, lpc[3]); celt_fir5(x_lp, lpc2, x_lp, halflen, mem); }
/**************************************************************/ /* Compute noise shaping coefficients and initial gain values */ /**************************************************************/ internal static void silk_noise_shape_analysis( SilkChannelEncoder psEnc, /* I/O Encoder state FIX */ SilkEncoderControl psEncCtrl, /* I/O Encoder control FIX */ short[] pitch_res, /* I LPC residual from pitch analysis */ int pitch_res_ptr, short[] x, /* I Input signal [ frame_length + la_shape ] */ int x_ptr ) { SilkShapeState psShapeSt = psEnc.sShape; int k, i, nSamples, Qnrg, b_Q14, warping_Q16, scale = 0; int SNR_adj_dB_Q7, HarmBoost_Q16, HarmShapeGain_Q16, Tilt_Q16, tmp32; int nrg, pre_nrg_Q30, log_energy_Q7, log_energy_prev_Q7, energy_variation_Q7; int delta_Q16, BWExp1_Q16, BWExp2_Q16, gain_mult_Q16, gain_add_Q16, strength_Q16, b_Q8; int[] auto_corr = new int[SilkConstants.MAX_SHAPE_LPC_ORDER + 1]; int[] refl_coef_Q16 = new int[SilkConstants.MAX_SHAPE_LPC_ORDER]; int[] AR1_Q24 = new int[SilkConstants.MAX_SHAPE_LPC_ORDER]; int[] AR2_Q24 = new int[SilkConstants.MAX_SHAPE_LPC_ORDER]; short[] x_windowed; int pitch_res_ptr2; int x_ptr2; /* Point to start of first LPC analysis block */ x_ptr2 = x_ptr - psEnc.la_shape; /****************/ /* GAIN CONTROL */ /****************/ SNR_adj_dB_Q7 = psEnc.SNR_dB_Q7; /* Input quality is the average of the quality in the lowest two VAD bands */ psEncCtrl.input_quality_Q14 = (int)Inlines.silk_RSHIFT((int)psEnc.input_quality_bands_Q15[0] + psEnc.input_quality_bands_Q15[1], 2); /* Coding quality level, between 0.0_Q0 and 1.0_Q0, but in Q14 */ psEncCtrl.coding_quality_Q14 = Inlines.silk_RSHIFT(Sigmoid.silk_sigm_Q15(Inlines.silk_RSHIFT_ROUND(SNR_adj_dB_Q7 - ((int)((20.0f) * ((long)1 << (7)) + 0.5)) /*Inlines.SILK_CONST(20.0f, 7)*/, 4)), 1); /* Reduce coding SNR during low speech activity */ if (psEnc.useCBR == 0) { b_Q8 = ((int)((1.0f) * ((long)1 << (8)) + 0.5)) /*Inlines.SILK_CONST(1.0f, 8)*/ - psEnc.speech_activity_Q8; b_Q8 = Inlines.silk_SMULWB(Inlines.silk_LSHIFT(b_Q8, 8), b_Q8); SNR_adj_dB_Q7 = Inlines.silk_SMLAWB(SNR_adj_dB_Q7, Inlines.silk_SMULBB(((int)((0 - TuningParameters.BG_SNR_DECR_dB) * ((long)1 << (7)) + 0.5)) /*Inlines.SILK_CONST(0 - TuningParameters.BG_SNR_DECR_dB, 7)*/ >> (4 + 1), b_Q8), /* Q11*/ Inlines.silk_SMULWB(((int)((1.0f) * ((long)1 << (14)) + 0.5)) /*Inlines.SILK_CONST(1.0f, 14)*/ + psEncCtrl.input_quality_Q14, psEncCtrl.coding_quality_Q14)); /* Q12*/ } if (psEnc.indices.signalType == SilkConstants.TYPE_VOICED) { /* Reduce gains for periodic signals */ SNR_adj_dB_Q7 = Inlines.silk_SMLAWB(SNR_adj_dB_Q7, ((int)((TuningParameters.HARM_SNR_INCR_dB) * ((long)1 << (8)) + 0.5)) /*Inlines.SILK_CONST(TuningParameters.HARM_SNR_INCR_dB, 8)*/, psEnc.LTPCorr_Q15); } else { /* For unvoiced signals and low-quality input, adjust the quality slower than SNR_dB setting */ SNR_adj_dB_Q7 = Inlines.silk_SMLAWB(SNR_adj_dB_Q7, Inlines.silk_SMLAWB(((int)((6.0f) * ((long)1 << (9)) + 0.5)) /*Inlines.SILK_CONST(6.0f, 9)*/, -((int)((0.4f) * ((long)1 << (18)) + 0.5)) /*Inlines.SILK_CONST(0.4f, 18)*/, psEnc.SNR_dB_Q7), ((int)((1.0f) * ((long)1 << (14)) + 0.5)) /*Inlines.SILK_CONST(1.0f, 14)*/ - psEncCtrl.input_quality_Q14); } /*************************/ /* SPARSENESS PROCESSING */ /*************************/ /* Set quantizer offset */ if (psEnc.indices.signalType == SilkConstants.TYPE_VOICED) { /* Initially set to 0; may be overruled in process_gains(..) */ psEnc.indices.quantOffsetType = 0; psEncCtrl.sparseness_Q8 = 0; } else { /* Sparseness measure, based on relative fluctuations of energy per 2 milliseconds */ nSamples = Inlines.silk_LSHIFT(psEnc.fs_kHz, 1); energy_variation_Q7 = 0; log_energy_prev_Q7 = 0; pitch_res_ptr2 = pitch_res_ptr; for (k = 0; k < Inlines.silk_SMULBB(SilkConstants.SUB_FRAME_LENGTH_MS, psEnc.nb_subfr) / 2; k++) { SumSqrShift.silk_sum_sqr_shift(out nrg, out scale, pitch_res, pitch_res_ptr2, nSamples); nrg += Inlines.silk_RSHIFT(nSamples, scale); /* Q(-scale)*/ log_energy_Q7 = Inlines.silk_lin2log(nrg); if (k > 0) { energy_variation_Q7 += Inlines.silk_abs(log_energy_Q7 - log_energy_prev_Q7); } log_energy_prev_Q7 = log_energy_Q7; pitch_res_ptr2 += nSamples; } psEncCtrl.sparseness_Q8 = Inlines.silk_RSHIFT(Sigmoid.silk_sigm_Q15(Inlines.silk_SMULWB(energy_variation_Q7 - ((int)((5.0f) * ((long)1 << (7)) + 0.5)) /*Inlines.SILK_CONST(5.0f, 7)*/, ((int)((0.1f) * ((long)1 << (16)) + 0.5)) /*Inlines.SILK_CONST(0.1f, 16)*/)), 7); /* Set quantization offset depending on sparseness measure */ if (psEncCtrl.sparseness_Q8 > ((int)((TuningParameters.SPARSENESS_THRESHOLD_QNT_OFFSET) * ((long)1 << (8)) + 0.5)) /*Inlines.SILK_CONST(TuningParameters.SPARSENESS_THRESHOLD_QNT_OFFSET, 8)*/) { psEnc.indices.quantOffsetType = 0; } else { psEnc.indices.quantOffsetType = 1; } /* Increase coding SNR for sparse signals */ SNR_adj_dB_Q7 = Inlines.silk_SMLAWB(SNR_adj_dB_Q7, ((int)((TuningParameters.SPARSE_SNR_INCR_dB) * ((long)1 << (15)) + 0.5)) /*Inlines.SILK_CONST(TuningParameters.SPARSE_SNR_INCR_dB, 15)*/, psEncCtrl.sparseness_Q8 - ((int)((0.5f) * ((long)1 << (8)) + 0.5)) /*Inlines.SILK_CONST(0.5f, 8)*/); } /*******************************/ /* Control bandwidth expansion */ /*******************************/ /* More BWE for signals with high prediction gain */ strength_Q16 = Inlines.silk_SMULWB(psEncCtrl.predGain_Q16, ((int)((TuningParameters.FIND_PITCH_WHITE_NOISE_FRACTION) * ((long)1 << (16)) + 0.5)) /*Inlines.SILK_CONST(TuningParameters.FIND_PITCH_WHITE_NOISE_FRACTION, 16)*/); BWExp1_Q16 = BWExp2_Q16 = Inlines.silk_DIV32_varQ(((int)((TuningParameters.BANDWIDTH_EXPANSION) * ((long)1 << (16)) + 0.5)) /*Inlines.SILK_CONST(TuningParameters.BANDWIDTH_EXPANSION, 16)*/, Inlines.silk_SMLAWW(((int)((1.0f) * ((long)1 << (16)) + 0.5)) /*Inlines.SILK_CONST(1.0f, 16)*/, strength_Q16, strength_Q16), 16); delta_Q16 = Inlines.silk_SMULWB(((int)((1.0f) * ((long)1 << (16)) + 0.5)) /*Inlines.SILK_CONST(1.0f, 16)*/ - Inlines.silk_SMULBB(3, psEncCtrl.coding_quality_Q14), ((int)((TuningParameters.LOW_RATE_BANDWIDTH_EXPANSION_DELTA) * ((long)1 << (16)) + 0.5)) /*Inlines.SILK_CONST(TuningParameters.LOW_RATE_BANDWIDTH_EXPANSION_DELTA, 16)*/); BWExp1_Q16 = Inlines.silk_SUB32(BWExp1_Q16, delta_Q16); BWExp2_Q16 = Inlines.silk_ADD32(BWExp2_Q16, delta_Q16); /* BWExp1 will be applied after BWExp2, so make it relative */ BWExp1_Q16 = Inlines.silk_DIV32_16(Inlines.silk_LSHIFT(BWExp1_Q16, 14), Inlines.silk_RSHIFT(BWExp2_Q16, 2)); if (psEnc.warping_Q16 > 0) { /* Slightly more warping in analysis will move quantization noise up in frequency, where it's better masked */ warping_Q16 = Inlines.silk_SMLAWB(psEnc.warping_Q16, (int)psEncCtrl.coding_quality_Q14, ((int)((0.01f) * ((long)1 << (18)) + 0.5)) /*Inlines.SILK_CONST(0.01f, 18)*/); } else { warping_Q16 = 0; } /********************************************/ /* Compute noise shaping AR coefs and gains */ /********************************************/ x_windowed = new short[psEnc.shapeWinLength]; for (k = 0; k < psEnc.nb_subfr; k++) { /* Apply window: sine slope followed by flat part followed by cosine slope */ int shift, slope_part, flat_part; flat_part = psEnc.fs_kHz * 3; slope_part = Inlines.silk_RSHIFT(psEnc.shapeWinLength - flat_part, 1); ApplySineWindow.silk_apply_sine_window(x_windowed, 0, x, x_ptr2, 1, slope_part); shift = slope_part; Array.Copy(x, x_ptr2 + shift, x_windowed, shift, flat_part); shift += flat_part; ApplySineWindow.silk_apply_sine_window(x_windowed, shift, x, x_ptr2 + shift, 2, slope_part); /* Update pointer: next LPC analysis block */ x_ptr2 += psEnc.subfr_length; BoxedValueInt scale_boxed = new BoxedValueInt(scale); if (psEnc.warping_Q16 > 0) { /* Calculate warped auto correlation */ Autocorrelation.silk_warped_autocorrelation(auto_corr, scale_boxed, x_windowed, warping_Q16, psEnc.shapeWinLength, psEnc.shapingLPCOrder); } else { /* Calculate regular auto correlation */ Autocorrelation.silk_autocorr(auto_corr, scale_boxed, x_windowed, psEnc.shapeWinLength, psEnc.shapingLPCOrder + 1); } scale = scale_boxed.Val; /* Add white noise, as a fraction of energy */ auto_corr[0] = Inlines.silk_ADD32(auto_corr[0], Inlines.silk_max_32(Inlines.silk_SMULWB(Inlines.silk_RSHIFT(auto_corr[0], 4), ((int)((TuningParameters.SHAPE_WHITE_NOISE_FRACTION) * ((long)1 << (20)) + 0.5)) /*Inlines.SILK_CONST(TuningParameters.SHAPE_WHITE_NOISE_FRACTION, 20)*/), 1)); /* Calculate the reflection coefficients using schur */ nrg = Schur.silk_schur64(refl_coef_Q16, auto_corr, psEnc.shapingLPCOrder); Inlines.OpusAssert(nrg >= 0); /* Convert reflection coefficients to prediction coefficients */ K2A.silk_k2a_Q16(AR2_Q24, refl_coef_Q16, psEnc.shapingLPCOrder); Qnrg = -scale; /* range: -12...30*/ Inlines.OpusAssert(Qnrg >= -12); Inlines.OpusAssert(Qnrg <= 30); /* Make sure that Qnrg is an even number */ if ((Qnrg & 1) != 0) { Qnrg -= 1; nrg >>= 1; } tmp32 = Inlines.silk_SQRT_APPROX(nrg); Qnrg >>= 1; /* range: -6...15*/ psEncCtrl.Gains_Q16[k] = Inlines.silk_LSHIFT_SAT32(tmp32, 16 - Qnrg); if (psEnc.warping_Q16 > 0) { /* Adjust gain for warping */ gain_mult_Q16 = warped_gain(AR2_Q24, warping_Q16, psEnc.shapingLPCOrder); Inlines.OpusAssert(psEncCtrl.Gains_Q16[k] >= 0); if (Inlines.silk_SMULWW(Inlines.silk_RSHIFT_ROUND(psEncCtrl.Gains_Q16[k], 1), gain_mult_Q16) >= (int.MaxValue >> 1)) { psEncCtrl.Gains_Q16[k] = int.MaxValue; } else { psEncCtrl.Gains_Q16[k] = Inlines.silk_SMULWW(psEncCtrl.Gains_Q16[k], gain_mult_Q16); } } /* Bandwidth expansion for synthesis filter shaping */ BWExpander.silk_bwexpander_32(AR2_Q24, psEnc.shapingLPCOrder, BWExp2_Q16); /* Compute noise shaping filter coefficients */ Array.Copy(AR2_Q24, AR1_Q24, psEnc.shapingLPCOrder); /* Bandwidth expansion for analysis filter shaping */ Inlines.OpusAssert(BWExp1_Q16 <= ((int)((1.0f) * ((long)1 << (16)) + 0.5)) /*Inlines.SILK_CONST(1.0f, 16)*/); BWExpander.silk_bwexpander_32(AR1_Q24, psEnc.shapingLPCOrder, BWExp1_Q16); /* Ratio of prediction gains, in energy domain */ pre_nrg_Q30 = LPCInversePredGain.silk_LPC_inverse_pred_gain_Q24(AR2_Q24, psEnc.shapingLPCOrder); nrg = LPCInversePredGain.silk_LPC_inverse_pred_gain_Q24(AR1_Q24, psEnc.shapingLPCOrder); /*psEncCtrl.GainsPre[ k ] = 1.0f - 0.7f * ( 1.0f - pre_nrg / nrg ) = 0.3f + 0.7f * pre_nrg / nrg;*/ pre_nrg_Q30 = Inlines.silk_LSHIFT32(Inlines.silk_SMULWB(pre_nrg_Q30, ((int)((0.7f) * ((long)1 << (15)) + 0.5)) /*Inlines.SILK_CONST(0.7f, 15)*/), 1); psEncCtrl.GainsPre_Q14[k] = (int)((int)((0.3f) * ((long)1 << (14)) + 0.5)) /*Inlines.SILK_CONST(0.3f, 14)*/ + Inlines.silk_DIV32_varQ(pre_nrg_Q30, nrg, 14); /* Convert to monic warped prediction coefficients and limit absolute values */ limit_warped_coefs(AR2_Q24, AR1_Q24, warping_Q16, ((int)((3.999f) * ((long)1 << (24)) + 0.5)) /*Inlines.SILK_CONST(3.999f, 24)*/, psEnc.shapingLPCOrder); /* Convert from Q24 to Q13 and store in int16 */ for (i = 0; i < psEnc.shapingLPCOrder; i++) { psEncCtrl.AR1_Q13[k * SilkConstants.MAX_SHAPE_LPC_ORDER + i] = (short)Inlines.silk_SAT16(Inlines.silk_RSHIFT_ROUND(AR1_Q24[i], 11)); psEncCtrl.AR2_Q13[k * SilkConstants.MAX_SHAPE_LPC_ORDER + i] = (short)Inlines.silk_SAT16(Inlines.silk_RSHIFT_ROUND(AR2_Q24[i], 11)); } } /*****************/ /* Gain tweaking */ /*****************/ /* Increase gains during low speech activity and put lower limit on gains */ gain_mult_Q16 = Inlines.silk_log2lin(-Inlines.silk_SMLAWB(-((int)((16.0f) * ((long)1 << (7)) + 0.5)) /*Inlines.SILK_CONST(16.0f, 7)*/, SNR_adj_dB_Q7, ((int)((0.16f) * ((long)1 << (16)) + 0.5)) /*Inlines.SILK_CONST(0.16f, 16)*/)); gain_add_Q16 = Inlines.silk_log2lin(Inlines.silk_SMLAWB(((int)((16.0f) * ((long)1 << (7)) + 0.5)) /*Inlines.SILK_CONST(16.0f, 7)*/, ((int)((SilkConstants.MIN_QGAIN_DB) * ((long)1 << (7)) + 0.5)) /*Inlines.SILK_CONST(SilkConstants.MIN_QGAIN_DB, 7)*/, ((int)((0.16f) * ((long)1 << (16)) + 0.5)) /*Inlines.SILK_CONST(0.16f, 16)*/)); Inlines.OpusAssert(gain_mult_Q16 > 0); for (k = 0; k < psEnc.nb_subfr; k++) { psEncCtrl.Gains_Q16[k] = Inlines.silk_SMULWW(psEncCtrl.Gains_Q16[k], gain_mult_Q16); Inlines.OpusAssert(psEncCtrl.Gains_Q16[k] >= 0); psEncCtrl.Gains_Q16[k] = Inlines.silk_ADD_POS_SAT32(psEncCtrl.Gains_Q16[k], gain_add_Q16); } gain_mult_Q16 = ((int)((1.0f) * ((long)1 << (16)) + 0.5)) /*Inlines.SILK_CONST(1.0f, 16)*/ + Inlines.silk_RSHIFT_ROUND(Inlines.silk_MLA(((int)((TuningParameters.INPUT_TILT) * ((long)1 << (26)) + 0.5)) /*Inlines.SILK_CONST(TuningParameters.INPUT_TILT, 26)*/, psEncCtrl.coding_quality_Q14, ((int)((TuningParameters.HIGH_RATE_INPUT_TILT) * ((long)1 << (12)) + 0.5)) /*Inlines.SILK_CONST(TuningParameters.HIGH_RATE_INPUT_TILT, 12)*/), 10); for (k = 0; k < psEnc.nb_subfr; k++) { psEncCtrl.GainsPre_Q14[k] = Inlines.silk_SMULWB(gain_mult_Q16, psEncCtrl.GainsPre_Q14[k]); } /************************************************/ /* Control low-frequency shaping and noise tilt */ /************************************************/ /* Less low frequency shaping for noisy inputs */ strength_Q16 = Inlines.silk_MUL(((int)((TuningParameters.LOW_FREQ_SHAPING) * ((long)1 << (4)) + 0.5)) /*Inlines.SILK_CONST(TuningParameters.LOW_FREQ_SHAPING, 4)*/, Inlines.silk_SMLAWB(((int)((1.0f) * ((long)1 << (12)) + 0.5)) /*Inlines.SILK_CONST(1.0f, 12)*/, ((int)((TuningParameters.LOW_QUALITY_LOW_FREQ_SHAPING_DECR) * ((long)1 << (13)) + 0.5)) /*Inlines.SILK_CONST(TuningParameters.LOW_QUALITY_LOW_FREQ_SHAPING_DECR, 13)*/, psEnc.input_quality_bands_Q15[0] - ((int)((1.0f) * ((long)1 << (15)) + 0.5)) /*Inlines.SILK_CONST(1.0f, 15)*/)); strength_Q16 = Inlines.silk_RSHIFT(Inlines.silk_MUL(strength_Q16, psEnc.speech_activity_Q8), 8); if (psEnc.indices.signalType == SilkConstants.TYPE_VOICED) { /* Reduce low frequencies quantization noise for periodic signals, depending on pitch lag */ /*f = 400; freqz([1, -0.98 + 2e-4 * f], [1, -0.97 + 7e-4 * f], 2^12, Fs); axis([0, 1000, -10, 1])*/ int fs_kHz_inv = Inlines.silk_DIV32_16(((int)((0.2f) * ((long)1 << (14)) + 0.5)) /*Inlines.SILK_CONST(0.2f, 14)*/, psEnc.fs_kHz); for (k = 0; k < psEnc.nb_subfr; k++) { b_Q14 = fs_kHz_inv + Inlines.silk_DIV32_16(((int)((3.0f) * ((long)1 << (14)) + 0.5)) /*Inlines.SILK_CONST(3.0f, 14)*/, psEncCtrl.pitchL[k]); /* Pack two coefficients in one int32 */ psEncCtrl.LF_shp_Q14[k] = Inlines.silk_LSHIFT(((int)((1.0f) * ((long)1 << (14)) + 0.5)) /*Inlines.SILK_CONST(1.0f, 14)*/ - b_Q14 - Inlines.silk_SMULWB(strength_Q16, b_Q14), 16); psEncCtrl.LF_shp_Q14[k] |= (b_Q14 - ((int)((1.0f) * ((long)1 << (14)) + 0.5)) /*Inlines.SILK_CONST(1.0f, 14)*/) & 0xFFFF; // opus bug: again, cast to ushort was done here where bitwise masking was intended } Inlines.OpusAssert(((int)((TuningParameters.HARM_HP_NOISE_COEF) * ((long)1 << (24)) + 0.5)) /*Inlines.SILK_CONST(TuningParameters.HARM_HP_NOISE_COEF, 24)*/ < ((int)((0.5f) * ((long)1 << (24)) + 0.5)) /*Inlines.SILK_CONST(0.5f, 24)*/); /* Guarantees that second argument to SMULWB() is within range of an short*/ Tilt_Q16 = -((int)((TuningParameters.HP_NOISE_COEF) * ((long)1 << (16)) + 0.5)) /*Inlines.SILK_CONST(TuningParameters.HP_NOISE_COEF, 16)*/ - Inlines.silk_SMULWB(((int)((1.0f) * ((long)1 << (16)) + 0.5)) /*Inlines.SILK_CONST(1.0f, 16)*/ - ((int)((TuningParameters.HP_NOISE_COEF) * ((long)1 << (16)) + 0.5)) /*Inlines.SILK_CONST(TuningParameters.HP_NOISE_COEF, 16)*/, Inlines.silk_SMULWB(((int)((TuningParameters.HARM_HP_NOISE_COEF) * ((long)1 << (24)) + 0.5)) /*Inlines.SILK_CONST(TuningParameters.HARM_HP_NOISE_COEF, 24)*/, psEnc.speech_activity_Q8)); } else { b_Q14 = Inlines.silk_DIV32_16(21299, psEnc.fs_kHz); /* 1.3_Q0 = 21299_Q14*/ /* Pack two coefficients in one int32 */ psEncCtrl.LF_shp_Q14[0] = Inlines.silk_LSHIFT(((int)((1.0f) * ((long)1 << (14)) + 0.5)) /*Inlines.SILK_CONST(1.0f, 14)*/ - b_Q14 - Inlines.silk_SMULWB(strength_Q16, Inlines.silk_SMULWB(((int)((0.6f) * ((long)1 << (16)) + 0.5)) /*Inlines.SILK_CONST(0.6f, 16)*/, b_Q14)), 16); psEncCtrl.LF_shp_Q14[0] |= (b_Q14 - ((int)((1.0f) * ((long)1 << (14)) + 0.5)) /*Inlines.SILK_CONST(1.0f, 14)*/) & 0xFFFF; // opus bug: cast to ushort is better expressed as a bitwise operator, otherwise runtime analysis might flag it as an overflow error for (k = 1; k < psEnc.nb_subfr; k++) { psEncCtrl.LF_shp_Q14[k] = psEncCtrl.LF_shp_Q14[0]; } Tilt_Q16 = -((int)((TuningParameters.HP_NOISE_COEF) * ((long)1 << (16)) + 0.5)) /*Inlines.SILK_CONST(TuningParameters.HP_NOISE_COEF, 16)*/; } /****************************/ /* HARMONIC SHAPING CONTROL */ /****************************/ /* Control boosting of harmonic frequencies */ HarmBoost_Q16 = Inlines.silk_SMULWB(Inlines.silk_SMULWB(((int)((1.0f) * ((long)1 << (17)) + 0.5)) /*Inlines.SILK_CONST(1.0f, 17)*/ - Inlines.silk_LSHIFT(psEncCtrl.coding_quality_Q14, 3), psEnc.LTPCorr_Q15), ((int)((TuningParameters.LOW_RATE_HARMONIC_BOOST) * ((long)1 << (16)) + 0.5)) /*Inlines.SILK_CONST(TuningParameters.LOW_RATE_HARMONIC_BOOST, 16)*/); /* More harmonic boost for noisy input signals */ HarmBoost_Q16 = Inlines.silk_SMLAWB(HarmBoost_Q16, ((int)((1.0f) * ((long)1 << (16)) + 0.5)) /*Inlines.SILK_CONST(1.0f, 16)*/ - Inlines.silk_LSHIFT(psEncCtrl.input_quality_Q14, 2), ((int)((TuningParameters.LOW_INPUT_QUALITY_HARMONIC_BOOST) * ((long)1 << (16)) + 0.5)) /*Inlines.SILK_CONST(TuningParameters.LOW_INPUT_QUALITY_HARMONIC_BOOST, 16)*/); if (SilkConstants.USE_HARM_SHAPING != 0 && psEnc.indices.signalType == SilkConstants.TYPE_VOICED) { /* More harmonic noise shaping for high bitrates or noisy input */ HarmShapeGain_Q16 = Inlines.silk_SMLAWB(((int)((TuningParameters.HARMONIC_SHAPING) * ((long)1 << (16)) + 0.5)) /*Inlines.SILK_CONST(TuningParameters.HARMONIC_SHAPING, 16)*/, ((int)((1.0f) * ((long)1 << (16)) + 0.5)) /*Inlines.SILK_CONST(1.0f, 16)*/ - Inlines.silk_SMULWB(((int)((1.0f) * ((long)1 << (18)) + 0.5)) /*Inlines.SILK_CONST(1.0f, 18)*/ - Inlines.silk_LSHIFT(psEncCtrl.coding_quality_Q14, 4), psEncCtrl.input_quality_Q14), ((int)((TuningParameters.HIGH_RATE_OR_LOW_QUALITY_HARMONIC_SHAPING) * ((long)1 << (16)) + 0.5)) /*Inlines.SILK_CONST(TuningParameters.HIGH_RATE_OR_LOW_QUALITY_HARMONIC_SHAPING, 16)*/); /* Less harmonic noise shaping for less periodic signals */ HarmShapeGain_Q16 = Inlines.silk_SMULWB(Inlines.silk_LSHIFT(HarmShapeGain_Q16, 1), Inlines.silk_SQRT_APPROX(Inlines.silk_LSHIFT(psEnc.LTPCorr_Q15, 15))); } else { HarmShapeGain_Q16 = 0; } /*************************/ /* Smooth over subframes */ /*************************/ for (k = 0; k < SilkConstants.MAX_NB_SUBFR; k++) { psShapeSt.HarmBoost_smth_Q16 = Inlines.silk_SMLAWB(psShapeSt.HarmBoost_smth_Q16, HarmBoost_Q16 - psShapeSt.HarmBoost_smth_Q16, ((int)((TuningParameters.SUBFR_SMTH_COEF) * ((long)1 << (16)) + 0.5)) /*Inlines.SILK_CONST(TuningParameters.SUBFR_SMTH_COEF, 16)*/); psShapeSt.HarmShapeGain_smth_Q16 = Inlines.silk_SMLAWB(psShapeSt.HarmShapeGain_smth_Q16, HarmShapeGain_Q16 - psShapeSt.HarmShapeGain_smth_Q16, ((int)((TuningParameters.SUBFR_SMTH_COEF) * ((long)1 << (16)) + 0.5)) /*Inlines.SILK_CONST(TuningParameters.SUBFR_SMTH_COEF, 16)*/); psShapeSt.Tilt_smth_Q16 = Inlines.silk_SMLAWB(psShapeSt.Tilt_smth_Q16, Tilt_Q16 - psShapeSt.Tilt_smth_Q16, ((int)((TuningParameters.SUBFR_SMTH_COEF) * ((long)1 << (16)) + 0.5)) /*Inlines.SILK_CONST(TuningParameters.SUBFR_SMTH_COEF, 16)*/); psEncCtrl.HarmBoost_Q14[k] = (int)Inlines.silk_RSHIFT_ROUND(psShapeSt.HarmBoost_smth_Q16, 2); psEncCtrl.HarmShapeGain_Q14[k] = (int)Inlines.silk_RSHIFT_ROUND(psShapeSt.HarmShapeGain_smth_Q16, 2); psEncCtrl.Tilt_Q14[k] = (int)Inlines.silk_RSHIFT_ROUND(psShapeSt.Tilt_smth_Q16, 2); } }
/* Find pitch lags */ internal static void silk_find_pitch_lags( SilkChannelEncoder psEnc, /* I/O encoder state */ SilkEncoderControl psEncCtrl, /* I/O encoder control */ short[] res, /* O residual */ short[] x, /* I Speech signal */ int x_ptr ) { int buf_len, i, scale; int thrhld_Q13, res_nrg; int x_buf, x_buf_ptr; short[] Wsig; int Wsig_ptr; int[] auto_corr = new int[SilkConstants.MAX_FIND_PITCH_LPC_ORDER + 1]; short[] rc_Q15 = new short[SilkConstants.MAX_FIND_PITCH_LPC_ORDER]; int[] A_Q24 = new int[SilkConstants.MAX_FIND_PITCH_LPC_ORDER]; short[] A_Q12 = new short[SilkConstants.MAX_FIND_PITCH_LPC_ORDER]; /******************************************/ /* Set up buffer lengths etc based on Fs */ /******************************************/ buf_len = psEnc.la_pitch + psEnc.frame_length + psEnc.ltp_mem_length; /* Safety check */ Inlines.OpusAssert(buf_len >= psEnc.pitch_LPC_win_length); x_buf = x_ptr - psEnc.ltp_mem_length; /*************************************/ /* Estimate LPC AR coefficients */ /*************************************/ /* Calculate windowed signal */ Wsig = new short[psEnc.pitch_LPC_win_length]; /* First LA_LTP samples */ x_buf_ptr = x_buf + buf_len - psEnc.pitch_LPC_win_length; Wsig_ptr = 0; ApplySineWindow.silk_apply_sine_window(Wsig, Wsig_ptr, x, x_buf_ptr, 1, psEnc.la_pitch); /* Middle un - windowed samples */ Wsig_ptr += psEnc.la_pitch; x_buf_ptr += psEnc.la_pitch; Array.Copy(x, x_buf_ptr, Wsig, Wsig_ptr, (psEnc.pitch_LPC_win_length - Inlines.silk_LSHIFT(psEnc.la_pitch, 1))); /* Last LA_LTP samples */ Wsig_ptr += psEnc.pitch_LPC_win_length - Inlines.silk_LSHIFT(psEnc.la_pitch, 1); x_buf_ptr += psEnc.pitch_LPC_win_length - Inlines.silk_LSHIFT(psEnc.la_pitch, 1); ApplySineWindow.silk_apply_sine_window(Wsig, Wsig_ptr, x, x_buf_ptr, 2, psEnc.la_pitch); /* Calculate autocorrelation sequence */ BoxedValueInt boxed_scale = new BoxedValueInt(); Autocorrelation.silk_autocorr(auto_corr, boxed_scale, Wsig, psEnc.pitch_LPC_win_length, psEnc.pitchEstimationLPCOrder + 1); scale = boxed_scale.Val; /* Add white noise, as fraction of energy */ auto_corr[0] = Inlines.silk_SMLAWB(auto_corr[0], auto_corr[0], ((int)((TuningParameters.FIND_PITCH_WHITE_NOISE_FRACTION) * ((long)1 << (16)) + 0.5)) /*Inlines.SILK_CONST(TuningParameters.FIND_PITCH_WHITE_NOISE_FRACTION, 16)*/) + 1; /* Calculate the reflection coefficients using schur */ res_nrg = Schur.silk_schur(rc_Q15, auto_corr, psEnc.pitchEstimationLPCOrder); /* Prediction gain */ psEncCtrl.predGain_Q16 = Inlines.silk_DIV32_varQ(auto_corr[0], Inlines.silk_max_int(res_nrg, 1), 16); /* Convert reflection coefficients to prediction coefficients */ K2A.silk_k2a(A_Q24, rc_Q15, psEnc.pitchEstimationLPCOrder); /* Convert From 32 bit Q24 to 16 bit Q12 coefs */ for (i = 0; i < psEnc.pitchEstimationLPCOrder; i++) { A_Q12[i] = (short)Inlines.silk_SAT16(Inlines.silk_RSHIFT(A_Q24[i], 12)); } /* Do BWE */ BWExpander.silk_bwexpander(A_Q12, psEnc.pitchEstimationLPCOrder, ((int)((TuningParameters.FIND_PITCH_BANDWIDTH_EXPANSION) * ((long)1 << (16)) + 0.5)) /*Inlines.SILK_CONST(TuningParameters.FIND_PITCH_BANDWIDTH_EXPANSION, 16)*/); /*****************************************/ /* LPC analysis filtering */ /*****************************************/ Filters.silk_LPC_analysis_filter(res, 0, x, x_buf, A_Q12, 0, buf_len, psEnc.pitchEstimationLPCOrder); if (psEnc.indices.signalType != SilkConstants.TYPE_NO_VOICE_ACTIVITY && psEnc.first_frame_after_reset == 0) { /* Threshold for pitch estimator */ thrhld_Q13 = ((int)((0.6f) * ((long)1 << (13)) + 0.5)) /*Inlines.SILK_CONST(0.6f, 13)*/; thrhld_Q13 = Inlines.silk_SMLABB(thrhld_Q13, ((int)((-0.004f) * ((long)1 << (13)) + 0.5)) /*Inlines.SILK_CONST(-0.004f, 13)*/, psEnc.pitchEstimationLPCOrder); thrhld_Q13 = Inlines.silk_SMLAWB(thrhld_Q13, ((int)((-0.1f) * ((long)1 << (21)) + 0.5)) /*Inlines.SILK_CONST(-0.1f, 21)*/, psEnc.speech_activity_Q8); thrhld_Q13 = Inlines.silk_SMLABB(thrhld_Q13, ((int)((-0.15f) * ((long)1 << (13)) + 0.5)) /*Inlines.SILK_CONST(-0.15f, 13)*/, Inlines.silk_RSHIFT(psEnc.prevSignalType, 1)); thrhld_Q13 = Inlines.silk_SMLAWB(thrhld_Q13, ((int)((-0.1f) * ((long)1 << (14)) + 0.5)) /*Inlines.SILK_CONST(-0.1f, 14)*/, psEnc.input_tilt_Q15); thrhld_Q13 = Inlines.silk_SAT16(thrhld_Q13); /*****************************************/ /* Call pitch estimator */ /*****************************************/ BoxedValueShort boxed_lagIndex = new BoxedValueShort(psEnc.indices.lagIndex); BoxedValueSbyte boxed_contourIndex = new BoxedValueSbyte(psEnc.indices.contourIndex); BoxedValueInt boxed_LTPcorr = new BoxedValueInt(psEnc.LTPCorr_Q15); if (PitchAnalysisCore.silk_pitch_analysis_core(res, psEncCtrl.pitchL, boxed_lagIndex, boxed_contourIndex, boxed_LTPcorr, psEnc.prevLag, psEnc.pitchEstimationThreshold_Q16, (int)thrhld_Q13, psEnc.fs_kHz, psEnc.pitchEstimationComplexity, psEnc.nb_subfr) == 0) { psEnc.indices.signalType = SilkConstants.TYPE_VOICED; } else { psEnc.indices.signalType = SilkConstants.TYPE_UNVOICED; } psEnc.indices.lagIndex = boxed_lagIndex.Val; psEnc.indices.contourIndex = boxed_contourIndex.Val; psEnc.LTPCorr_Q15 = boxed_LTPcorr.Val; } else { Arrays.MemSetInt(psEncCtrl.pitchL, 0, SilkConstants.MAX_NB_SUBFR); psEnc.indices.lagIndex = 0; psEnc.indices.contourIndex = 0; psEnc.LTPCorr_Q15 = 0; } }
public void Test1() { AbstractChromosomeFactory factory = new SolutionFactory(); int[] routeWeights = new int[] { 20000, 50000, 120000, 200000, 350000 }; int distanceWeight = 1; string[] customerTypes = new string[] { "C1", "C2", "R1", "R2", "RC1", "RC2" }; Dictionary <string, int> customerNumbers = new Dictionary <string, int>() { { "2", 20000 }, { "4", 50000 }, { "6", 120000 }, { "8", 200000 }, { "10", 350000 } }; string[] customerInstances = new string[] { "1", "2", "3", "4", "5", "6", "7", "8", "9", "10" }; CrossoverOperator[] crossoverOps = new CrossoverOperator[] { new OrderCrossover(), new PartiallyMatchedCrossover(), new CycleCrossover(), new UniformBasedOrderCrossover() }; MutationOperator[] mutationOps = new MutationOperator[] { new SwapOperator(), new InsertionOperator(), new InversionOperator(), new DisplacementOperator() }; int randomWalkNumber = 2000, randomWalkSteps = 5000; string floatPattern = "0.000", separator = ","; float epsilon = 0.05f; foreach (var type in customerTypes) { foreach (var number in customerNumbers) { foreach (var instance in customerInstances) { string instanceId = type + '_' + number.Key + '_' + instance; VrptwProblem problem = reader.ReadFromFile(FILE_PATH + @"\" + instanceId + ".txt"); FitnessFunction ff = new FitnessFunction(number.Value, distanceWeight); Landscape landscape = new Landscape(problem, factory, ff); foreach (var op in crossoverOps) { string path = RESULT_PATH + @"\" + instanceId + "_" + op.GetId() + ".csv"; if (!File.Exists(path)) { File.Create(path).Close(); File.ReadAllText(path); using (TextWriter tw = new StreamWriter(path)) { tw.WriteLine("AC, IC, PIC, DBI"); for (int i = 0; i < randomWalkNumber; ++i) { var rwResult = landscape.RandomWalk(randomWalkSteps, op); float ac = Autocorrelation.Run(rwResult); float ic = InformationContent.Run(rwResult, epsilon); float pic = PartialInformationContent.Run(rwResult, epsilon); float dbi = DensityBasinInformation.Run(rwResult, epsilon); string line = ac.ToString(floatPattern) + separator + ic.ToString(floatPattern) + separator + pic.ToString(floatPattern) + separator + dbi.ToString(floatPattern); tw.WriteLine(line); } } } } } } } }