/// <summary> /// Returns prediction for a sample at position /// </summary> public static float CalcBurgPred( AudioData audioData, int position) { var historyLengthSamples = audioData.AudioProcessingSettings.HistoryLengthSamples; // use output audio as an input because it already contains // fixed samples before sample at position var audioShort = new double[historyLengthSamples + 1]; for (var index = 0; index < historyLengthSamples + 1; index++) { audioShort[index] = audioData.GetOutputSample( position - historyLengthSamples + index); } var fba = new FastBurgAlgorithm64(audioShort); fba.Train(historyLengthSamples, audioData.AudioProcessingSettings.CoefficientsNumber * 2, historyLengthSamples); return((float)fba.GetForwardPrediction()); }
private static float CalcBurgPredFromInput( AudioData audioData, int position) { var historyLengthSamples = audioData.AudioProcessingSettings.HistoryLengthSamples; var audioShort = new double[historyLengthSamples + 1]; for (var index = 0; index < historyLengthSamples + 1; index++) { audioShort[index] = audioData.GetInputSample(position - historyLengthSamples + index); } var fba = new FastBurgAlgorithm64(audioShort); fba.Train(historyLengthSamples, audioData.AudioProcessingSettings.CoefficientsNumber * 2, historyLengthSamples); return((float)fba.GetForwardPrediction()); }
/// <summary> /// Calculates prediction errors for a channel using CPU (Parallel.For) /// </summary> private static void CalculateBurgPredictionErrCpu( AudioData audioData, IProgress <double> progress) { var historyLengthSamples = audioData.AudioProcessingSettings.HistoryLengthSamples; var forwardPredictions = new float[audioData.LengthSamples()]; var backwardPredictions = new float[audioData.LengthSamples()]; var inputaudio = new double[audioData.LengthSamples()]; for (var index = 0; index < audioData.LengthSamples(); index++) { inputaudio[index] = audioData.GetInputSample(index); } // we will use steps to report progress var step = audioData.LengthSamples() / 100; for (var index = historyLengthSamples + 1; index <= audioData.LengthSamples(); index += step) { progress.Report((double)100 * index / audioData.LengthSamples()); var endPosition = index + step; if (endPosition > audioData.LengthSamples()) { endPosition = audioData.LengthSamples(); } Parallel.For(index, endPosition, indexParallelFor => { var fba = new FastBurgAlgorithm64(inputaudio); fba.Train(indexParallelFor, audioData.AudioProcessingSettings.CoefficientsNumber, audioData.AudioProcessingSettings.HistoryLengthSamples); forwardPredictions[indexParallelFor] = (float)fba.GetForwardPrediction(); backwardPredictions[indexParallelFor - audioData.AudioProcessingSettings.HistoryLengthSamples - 1] = (float)fba.GetBackwardPrediction(); } ); } progress.Report(0); // for first samples forward predictions were not calculated // we use backward predictions only for (var index = 0; index < historyLengthSamples; index++) { audioData.SetPredictionErr(index, audioData.GetInputSample(index) - backwardPredictions[index]); } // finds prediction error based on forward and backward predictions for (var index = historyLengthSamples; index < audioData.LengthSamples() - historyLengthSamples; index++) { audioData.SetPredictionErr(index, audioData.GetInputSample(index) - (forwardPredictions[index] + backwardPredictions[index]) / 2); } // for last samples backward predictions were not calculated // we use forward predictions only for (var index = historyLengthSamples; index < audioData.LengthSamples() - historyLengthSamples; index++) { audioData.SetPredictionErr(index, audioData.GetInputSample(index) - forwardPredictions[index]); } }