/// <summary> /// Builds pairs of peaks from the current frame and its target area. /// /// This is a very naive approach that just iterates linearly through frames and their peaks /// and generates a pair if the constraints of the target area permit, until the max number /// of pairs has been generated. /// </summary> /// <param name="peakHistory">the history structure to read the peaks from</param> /// <param name="peakPairs">the list to store the pairs in</param> private void FindPairsNaive(PeakHistory peakHistory, List <PeakPair> peakPairs) { var halfWidth = profile.TargetZoneWidth / 2; var index = peakHistory.Index; foreach (var peak in peakHistory.Lists[0]) { int count = 0; for (int distance = profile.TargetZoneDistance; distance < peakHistory.Length; distance++) { foreach (var targetPeak in peakHistory.Lists[distance]) { if (peak.Index >= targetPeak.Index - halfWidth && peak.Index <= targetPeak.Index + halfWidth) { peakPairs.Add(new PeakPair { Index = index, Peak1 = peak, Peak2 = targetPeak, Distance = distance }); if (++count >= profile.PeakFanout) { break; } } } if (count >= profile.PeakFanout) { break; } } } }
/// <summary> /// Builds pairs of peaks from the current frame and its target area. /// /// This approach generates all possible pairs and then picks the most distinct ones /// according to their average peak energy. The idea is that these peaks are the ones /// that most probably survive in high noise environments. /// This approach takes a bit longer to compute compared to the naive approach, but /// generates much more diverse peaks, spread more evenly across the hash space (this is /// just a speculation; not validated). Compared to the naive approach, this results /// in much faster hash matching and also a lot more matches. /// </summary> /// <param name="peakHistory">the history structure to read the peaks from</param> /// <param name="peakPairs">the list to store the pairs in</param> private void FindPairsWithMaxEnergy(PeakHistory peakHistory, List <PeakPair> peakPairs) { var halfWidth = profile.TargetZoneWidth / 2; // Get pairs from peaks // This is a very naive approach that can be improved, e.g. by taking the average peak value into account, // which would result in a list of the most prominent peak pairs. // For now, this just iterates linearly through frames and their peaks and generates a pair if the // constraints of the target area permit, until the max number of pairs has been generated. var index = peakHistory.Index; foreach (var peak in peakHistory.Lists[0]) { for (int distance = profile.TargetZoneDistance; distance < peakHistory.Length; distance++) { foreach (var targetPeak in peakHistory.Lists[distance]) { if (peak.Index >= targetPeak.Index - halfWidth && peak.Index <= targetPeak.Index + halfWidth) { peakPairs.Add(new PeakPair { Index = index, Peak1 = peak, Peak2 = targetPeak, Distance = distance }); } } } } peakPairs.Sort((pp1, pp2) => { var avg1 = pp1.AverageEnergy; var avg2 = pp2.AverageEnergy; if (avg1 < avg2) { return(1); } else if (avg1 > avg2) { return(-1); } return(0); }); int maxPeaks = Math.Min(profile.PeakFanout, peakPairs.Count); if (peakPairs.Count > maxPeaks) { peakPairs.RemoveRange(maxPeaks, peakPairs.Count - maxPeaks); // select the n most prominent peak pairs } }
public void Generate(AudioTrack track) { IAudioStream audioStream = new ResamplingStream( new MonoStream(AudioStreamFactory.FromFileInfoIeee32(track.FileInfo)), ResamplingQuality.Medium, profile.SamplingRate); STFT stft = new STFT(audioStream, profile.WindowSize, profile.HopSize, WindowType.Hann, STFT.OutputFormat.Decibel); int index = 0; int indices = stft.WindowCount; int processedFrames = 0; float[] spectrum = new float[profile.WindowSize / 2]; float[] smoothedSpectrum = new float[spectrum.Length - profile.SpectrumSmoothingLength + 1]; // the smooved frequency spectrum of the current frame var spectrumSmoother = new SimpleMovingAverage(profile.SpectrumSmoothingLength); float[] spectrumTemporalAverage = new float[spectrum.Length]; // a running average of each spectrum bin over time float[] spectrumResidual = new float[spectrum.Length]; // the difference between the current spectrum and the moving average spectrum var peakHistory = new PeakHistory(1 + profile.TargetZoneDistance + profile.TargetZoneLength, spectrum.Length / 2); var peakPairs = new List <PeakPair>(profile.PeaksPerFrame * profile.PeakFanout); // keep a single instance of the list to avoid instantiation overhead var subFingerprints = new List <SubFingerprint>(); while (stft.HasNext()) { // Get the FFT spectrum stft.ReadFrame(spectrum); // Skip frames whose average spectrum volume is below the threshold // This skips silent frames (zero samples) that only contain very low noise from the FFT // and that would screw up the temporal spectrum average below for the following frames. if (spectrum.Average() < spectrumMinThreshold) { index++; continue; } // Smooth the frequency spectrum to remove small peaks if (profile.SpectrumSmoothingLength > 0) { spectrumSmoother.Clear(); for (int i = 0; i < spectrum.Length; i++) { var avg = spectrumSmoother.Add(spectrum[i]); if (i >= profile.SpectrumSmoothingLength) { smoothedSpectrum[i - profile.SpectrumSmoothingLength] = avg; } } } // Update the temporal moving bin average if (processedFrames == 0) { // Init averages on first frame for (int i = 0; i < spectrum.Length; i++) { spectrumTemporalAverage[i] = spectrum[i]; } } else { // Update averages on all subsequent frames for (int i = 0; i < spectrum.Length; i++) { spectrumTemporalAverage[i] = ExponentialMovingAverage.UpdateMovingAverage( spectrumTemporalAverage[i], profile.SpectrumTemporalSmoothingCoefficient, spectrum[i]); } } // Calculate the residual // The residual is the difference of the current spectrum to the temporal average spectrum. The higher // a bin residual is, the steeper the increase in energy in that peak. for (int i = 0; i < spectrum.Length; i++) { spectrumResidual[i] = spectrum[i] - spectrumTemporalAverage[i] - 90f; } // Find local peaks in the residual // The advantage of finding peaks in the residual instead of the spectrum is that spectrum energy is usually // concentrated in the low frequencies, resulting in a clustering of the highest peaks in the lows. Getting // peaks from the residual distributes the peaks more evenly across the spectrum. var peaks = peakHistory.List; // take oldest list, peaks.Clear(); // clear it, and FindLocalMaxima(spectrumResidual, peaks); // refill with new peaks // Pick the largest n peaks int numMaxima = Math.Min(peaks.Count, profile.PeaksPerFrame); if (numMaxima > 0) { peaks.Sort((p1, p2) => p1.Value == p2.Value ? 0 : p1.Value < p2.Value ? 1 : -1); // order peaks by height if (peaks.Count > numMaxima) { peaks.RemoveRange(numMaxima, peaks.Count - numMaxima); // select the n tallest peaks by deleting the rest } peaks.Sort((p1, p2) => p1.Index == p2.Index ? 0 : p1.Index < p2.Index ? -1 : 1); // sort peaks by index (not really necessary) } peakHistory.Add(index, peaks); if (FrameProcessed != null) { // Mark peaks as 0dB for spectrogram display purposes foreach (var peak in peaks) { spectrum[peak.Index] = 0; spectrumResidual[peak.Index] = 0; } FrameProcessed(this, new FrameProcessedEventArgs { AudioTrack = track, Index = index, Indices = indices, Spectrum = spectrum, SpectrumResidual = spectrumResidual }); } processedFrames++; index++; if (processedFrames >= peakHistory.Length) { peakPairs.Clear(); FindPairsWithMaxEnergy(peakHistory, peakPairs); ConvertPairsToSubFingerprints(peakPairs, subFingerprints); } if (subFingerprints.Count > 512) { FireFingerprintHashesGenerated(track, indices, subFingerprints); subFingerprints.Clear(); } } // Flush the remaining peaks of the last frames from the history to get all remaining pairs for (int i = 0; i < profile.TargetZoneLength; i++) { var peaks = peakHistory.List; peaks.Clear(); peakHistory.Add(-1, peaks); peakPairs.Clear(); FindPairsWithMaxEnergy(peakHistory, peakPairs); ConvertPairsToSubFingerprints(peakPairs, subFingerprints); } FireFingerprintHashesGenerated(track, indices, subFingerprints); audioStream.Close(); }