// [Ignore("The update from 864f7a491e2ea0e938161bd390c1c931ecbdf63c possibly broke this test and I do not know how to repair it. // TODO @towsey")] public void TwoOscillationTests() { { var sourceRecording = PathHelper.ResolveAsset("Recordings", "BAC2_20071008-085040.wav"); // 1. get the config dictionary var configDict = Oscillations2014.GetDefaultConfigDictionary(sourceRecording); // 2. Create temp directory to store output if (!this.outputDirectory.Exists) { this.outputDirectory.Create(); } // 3. Generate the FREQUENCY x OSCILLATIONS Graphs and csv data var tuple = Oscillations2014.GenerateOscillationDataAndImages(sourceRecording, configDict, true); // construct name of expected image file to save var sourceName = Path.GetFileNameWithoutExtension(sourceRecording.Name); var stem = sourceName + ".FreqOscilSpectrogram"; // construct name of expected matrix osc spectrogram to save file var expectedMatrixFile = PathHelper.ResolveAsset("Oscillations2014", stem + ".Matrix.EXPECTED.csv"); // SAVE THE OUTPUT if true // WARNING: this will overwrite fixtures if (false) { // 1: save image of oscillation spectrogram string imageName = stem + ".EXPECTED.png"; string imagePath = Path.Combine(PathHelper.ResolveAssetPath("Oscillations2014"), imageName); tuple.Item1.Save(imagePath, ImageFormat.Png); // 2: Save matrix of oscillation data stored in freqOscilMatrix1 //Csv.WriteMatrixToCsv(expectedMatrixFile, tuple.Item2); Binary.Serialize(expectedMatrixFile, tuple.Item2); } // Run two tests. Have to deserialise the expected data files // 1: Compare image files - check that image dimensions are correct Assert.AreEqual(350, tuple.Item1.Width); Assert.AreEqual(675, tuple.Item1.Height); // 2. Compare matrix data var expectedMatrix = Binary.Deserialize <double[, ]>(expectedMatrixFile); //TODO Following test fails when using CSV reader because the reader cuts out first line of the matrix //var expectedMatrix = Csv.ReadMatrixFromCsv<double>(expectedMatrixFile); CollectionAssert.That.AreEqual(expectedMatrix, tuple.Item2, 0.000001); } }
public void SpectralIndexOsc_Test() { { var sourceRecording = PathHelper.ResolveAsset("Recordings", "BAC2_20071008-085040.wav"); // 1. Create temp directory to store output if (!this.outputDirectory.Exists) { this.outputDirectory.Create(); } // 2. Get the spectral index var recordingSegment = new AudioRecording(sourceRecording.FullName); var frameLength = Oscillations2014.DefaultFrameLength; var sampleLength = Oscillations2014.DefaultSampleLength; var threshold = Oscillations2014.DefaultSensitivityThreshold; var spectralIndex = Oscillations2014.GetSpectralIndex_Osc(recordingSegment, frameLength, sampleLength, threshold); // 3. construct name of spectral index vector // SAVE THE OUTPUT if true // WARNING: this will overwrite fixtures var sourceName = Path.GetFileNameWithoutExtension(sourceRecording.Name); var stem = sourceName + ".SpectralIndex.OSC"; var expectedIndexPath = PathHelper.ResolveAsset("Oscillations2014", stem + ".EXPECTED.csv"); if (false) { // 4. Save spectral index vector to file //Csv.WriteToCsv(expectedIndexPath, spectralIndex); //Json.Serialise(expectedIndexPath, spectralIndex); Binary.Serialize(expectedIndexPath, spectralIndex); // 5. Get the vector as image and save as image file // no need to do tests on this image but it is useful to visualise output var expectedVectorImage = ImageTools.DrawVectorInColour(DataTools.reverseArray(spectralIndex), cellWidth: 10); var expectedImagePath = PathHelper.ResolveAsset("Oscillations2014", stem + ".png"); expectedVectorImage.Save(expectedImagePath.FullName, ImageFormat.Png); } // 6. Get the vector as image and save as image file // no need to do tests on this image but it is useful to compare with expected visual output var currentVectorImage = ImageTools.DrawVectorInColour(DataTools.reverseArray(spectralIndex), cellWidth: 10); var currentImagePath = Path.Combine(this.outputDirectory.FullName, stem + ".png"); currentVectorImage.Save(currentImagePath, ImageFormat.Png); // 7. Run test. Compare vectors // TODO this test fails when using CSV reader because the reader cuts out first element/line of the vector //var expectedVector = (double[])Csv.ReadFromCsv<double>(expectedIndexPath); var expectedVector = Binary.Deserialize <double[]>(expectedIndexPath); CollectionAssert.That.AreEqual(expectedVector, spectralIndex, 0.000001); } }
public void TwoOscillationTests() { { var sourceRecording = PathHelper.ResolveAsset("Recordings", "BAC2_20071008-085040.wav"); var configFile = PathHelper.ResolveAsset("Oscillations2014", "Towsey.Sonogram.yml"); // 1. get the config dictionary var configDict = Oscillations2014.GetConfigDictionary(configFile, true); configDict[ConfigKeys.Recording.Key_RecordingCallName] = sourceRecording.FullName; configDict[ConfigKeys.Recording.Key_RecordingFileName] = sourceRecording.Name; // 2. Create temp directory to store output if (!this.outputDirectory.Exists) { this.outputDirectory.Create(); } // 3. Generate the FREQUENCY x OSCILLATIONS Graphs and csv data var tuple = Oscillations2014.GenerateOscillationDataAndImages(sourceRecording, configDict, true); // Calculate the sample length i.e. number of frames spanned to calculate oscillations per second int sampleLength = Oscillations2014.DefaultSampleLength; if (configDict.ContainsKey(AnalysisKeys.OscilDetection2014SampleLength)) { sampleLength = int.Parse(configDict[AnalysisKeys.OscilDetection2014SampleLength]); } // construct name of expected image file to save var sourceName = Path.GetFileNameWithoutExtension(sourceRecording.Name); var stem = sourceName + ".FreqOscilSpectrogram_" + sampleLength; // construct name of expected matrix osc spectrogram to save file var expectedMatrixFile = PathHelper.ResolveAsset("Oscillations2014", stem + ".Matrix.EXPECTED.bin"); // construct name of expected matrix osc spectrogram to save file var expectedSpectrumFile = PathHelper.ResolveAsset("Oscillations2014", stem + ".Vector.EXPECTED.bin"); // Run this once to generate expected image and data files (############ IMPORTANT: remember to move saved files OUT of bin/Debug directory!) // SAVE THE OUTPUT if true if (false) { // 1: save image of oscillation spectrogram string imageName = stem + ".EXPECTED.png"; string imagePath = Path.Combine(this.outputDirectory.FullName, imageName); tuple.Item1.Save(imagePath, ImageFormat.Png); // 2: Save matrix of oscillation data stored in freqOscilMatrix1 // Acoustics.Shared.Csv.Csv.WriteMatrixToCsv(expectedMatrixFile.FullName, tuple.Item2); Binary.Serialize(expectedMatrixFile, tuple.Item2); // 3: save oscillationsSpectrum OR the OSC spectral index. // Acoustics.Shared.Csv.Csv.WriteToCsv(spectralFile, tuple.Item3); // Json.Serialise(expectedSpectrumFile, tuple.Item3); Binary.Serialize(expectedSpectrumFile, tuple.Item3); } // Run three tests. Have to deserialise the expected data files // 1: Compare image files - check that image dimensions are correct Assert.AreEqual(356, tuple.Item1.Width); Assert.AreEqual(678, tuple.Item1.Height); // 2. Compare matrix data var expectedMatrix = Binary.Deserialize <double[, ]>(expectedMatrixFile); CollectionAssert.AreEqual(expectedMatrix, tuple.Item2); // 3. Compare OSC spectral index // var expectedVector = Json.Deserialise<double[]>(expectedSpectrumFile); var expectedVector = Binary.Deserialize <double[]>(expectedSpectrumFile); CollectionAssert.AreEqual(expectedVector, tuple.Item3); } }
//////public static IndexCalculateResult Analysis( public static SpectralIndexValuesForContentDescription Analysis( AudioRecording recording, TimeSpan segmentOffsetTimeSpan, int sampleRateOfOriginalAudioFile, bool returnSonogramInfo = false) { // returnSonogramInfo = true; // if debugging double epsilon = recording.Epsilon; int sampleRate = recording.WavReader.SampleRate; //var segmentDuration = TimeSpan.FromSeconds(recording.WavReader.Time.TotalSeconds); var indexCalculationDuration = TimeSpan.FromSeconds(ContentSignatures.IndexCalculationDurationInSeconds); // Get FRAME parameters for the calculation of Acoustic Indices int frameSize = ContentSignatures.FrameSize; int frameStep = frameSize; // that is, windowOverlap = zero double frameStepDuration = frameStep / (double)sampleRate; // fraction of a second var frameStepTimeSpan = TimeSpan.FromTicks((long)(frameStepDuration * TimeSpan.TicksPerSecond)); // INITIALISE a RESULTS STRUCTURE TO return // initialize a result object in which to store SummaryIndexValues and SpectralIndexValues etc. var config = new IndexCalculateConfig(); // sets some default values int freqBinCount = frameSize / 2; var indexProperties = GetIndexProperties(); ////////var result = new IndexCalculateResult(freqBinCount, indexProperties, indexCalculationDuration, segmentOffsetTimeSpan, config); var spectralIndices = new SpectralIndexValuesForContentDescription(); ///////result.SummaryIndexValues = null; ///////SpectralIndexValues spectralIndices = result.SpectralIndexValues; // set up default spectrogram to return ///////result.Sg = returnSonogramInfo ? GetSonogram(recording, windowSize: 1024) : null; ///////result.Hits = null; ///////result.TrackScores = new List<Plot>(); // ################################## FINISHED SET-UP // ################################## NOW GET THE AMPLITUDE SPECTROGRAM // EXTRACT ENVELOPE and SPECTROGRAM FROM RECORDING SEGMENT // Note that the amplitude spectrogram has had the DC bin removed. i.e. has only 256 columns. var dspOutput1 = DSP_Frames.ExtractEnvelopeAndFfts(recording, frameSize, frameStep); var amplitudeSpectrogram = dspOutput1.AmplitudeSpectrogram; // (B) ################################## EXTRACT OSC SPECTRAL INDEX DIRECTLY FROM THE RECORDING ################################## // Get the oscillation spectral index OSC separately from signal because need a different frame size etc. var sampleLength = Oscillations2014.DefaultSampleLength; var frameLength = Oscillations2014.DefaultFrameLength; var sensitivity = Oscillations2014.DefaultSensitivityThreshold; var spectralIndexShort = Oscillations2014.GetSpectralIndex_Osc(recording, frameLength, sampleLength, sensitivity); // double length of the vector because want to work with 256 element vector for spectrogram purposes spectralIndices.OSC = DataTools.VectorDoubleLengthByAverageInterpolation(spectralIndexShort); // (C) ################################## EXTRACT SPECTRAL INDICES FROM THE AMPLITUDE SPECTROGRAM ################################## // IFF there has been UP-SAMPLING, calculate bin of the original audio nyquist. this will be less than SR/2. // original sample rate can be anything 11.0-44.1 kHz. int originalNyquist = sampleRateOfOriginalAudioFile / 2; // if up-sampling has been done if (dspOutput1.NyquistFreq > originalNyquist) { dspOutput1.NyquistFreq = originalNyquist; dspOutput1.NyquistBin = (int)Math.Floor(originalNyquist / dspOutput1.FreqBinWidth); // note that bin width does not change } // ii: CALCULATE THE ACOUSTIC COMPLEXITY INDEX spectralIndices.ACI = AcousticComplexityIndex.CalculateAci(amplitudeSpectrogram); // iii: CALCULATE the H(t) or Temporal ENTROPY Spectrum and then reverse the values i.e. calculate 1-Ht for energy concentration double[] temporalEntropySpectrum = AcousticEntropy.CalculateTemporalEntropySpectrum(amplitudeSpectrogram); for (int i = 0; i < temporalEntropySpectrum.Length; i++) { temporalEntropySpectrum[i] = 1 - temporalEntropySpectrum[i]; } spectralIndices.ENT = temporalEntropySpectrum; // (C) ################################## EXTRACT SPECTRAL INDICES FROM THE DECIBEL SPECTROGRAM ################################## // i: Convert amplitude spectrogram to decibels and calculate the dB background noise profile double[,] decibelSpectrogram = MFCCStuff.DecibelSpectra(dspOutput1.AmplitudeSpectrogram, dspOutput1.WindowPower, sampleRate, epsilon); double[] spectralDecibelBgn = NoiseProfile.CalculateBackgroundNoise(decibelSpectrogram); spectralIndices.BGN = spectralDecibelBgn; // ii: Calculate the noise reduced decibel spectrogram derived from segment recording. // REUSE the var decibelSpectrogram but this time using dspOutput1. decibelSpectrogram = MFCCStuff.DecibelSpectra(dspOutput1.AmplitudeSpectrogram, dspOutput1.WindowPower, sampleRate, epsilon); decibelSpectrogram = SNR.TruncateBgNoiseFromSpectrogram(decibelSpectrogram, spectralDecibelBgn); decibelSpectrogram = SNR.RemoveNeighbourhoodBackgroundNoise(decibelSpectrogram, nhThreshold: 2.0); // iii: CALCULATE noise reduced AVERAGE DECIBEL SPECTRUM spectralIndices.PMN = SpectrogramTools.CalculateAvgDecibelSpectrumFromDecibelSpectrogram(decibelSpectrogram); // ###################################################################################################################################################### // iv: CALCULATE SPECTRAL COVER. NOTE: at this point, decibelSpectrogram is noise reduced. All values >= 0.0 // FreqBinWidth can be accessed, if required, through dspOutput1.FreqBinWidth // dB THRESHOLD for calculating spectral coverage double dBThreshold = ActivityAndCover.DefaultActivityThresholdDb; // Calculate lower and upper boundary bin ids. // Boundary between low & mid frequency bands is to avoid low freq bins containing anthropogenic noise. These biased index values away from bio-phony. int midFreqBound = config.MidFreqBound; int lowFreqBound = config.LowFreqBound; int lowerBinBound = (int)Math.Ceiling(lowFreqBound / dspOutput1.FreqBinWidth); int middleBinBound = (int)Math.Ceiling(midFreqBound / dspOutput1.FreqBinWidth); var spActivity = ActivityAndCover.CalculateSpectralEvents(decibelSpectrogram, dBThreshold, frameStepTimeSpan, lowerBinBound, middleBinBound); //spectralIndices.CVR = spActivity.CoverSpectrum; spectralIndices.EVN = spActivity.EventSpectrum; ///////result.TrackScores = null; ///////return result; return(spectralIndices); } // end calculation of Six Spectral Indices
/// <summary> /// ################ THE KEY ANALYSIS METHOD for TRILLS /// /// See Anthony's ExempliGratia.Recognize() method in order to see how to use methods for config profiles. /// </summary> /// <param name="recording"></param> /// <param name="sonoConfig"></param> /// <param name="lwConfig"></param> /// <param name="returnDebugImage"></param> /// <param name="segmentStartOffset"></param> /// <returns></returns> private static Tuple <BaseSonogram, double[, ], double[], List <AcousticEvent>, Image> Analysis( AudioRecording recording, SonogramConfig sonoConfig, LitoriaWatjulumConfig lwConfig, bool returnDebugImage, TimeSpan segmentStartOffset) { double intensityThreshold = lwConfig.IntensityThreshold; double minDuration = lwConfig.MinDurationOfTrill; // seconds double maxDuration = lwConfig.MaxDurationOfTrill; // seconds double minPeriod = lwConfig.MinPeriod; // seconds double maxPeriod = lwConfig.MaxPeriod; // seconds if (recording == null) { LoggedConsole.WriteLine("AudioRecording == null. Analysis not possible."); return(null); } //i: MAKE SONOGRAM //TimeSpan tsRecordingtDuration = recording.Duration(); int sr = recording.SampleRate; double freqBinWidth = sr / (double)sonoConfig.WindowSize; double framesPerSecond = freqBinWidth; // duration of DCT in seconds - want it to be about 3X or 4X the expected maximum period double dctDuration = 4 * maxPeriod; // duration of DCT in frames int dctLength = (int)Math.Round(framesPerSecond * dctDuration); // set up the cosine coefficients double[,] cosines = MFCCStuff.Cosines(dctLength, dctLength); int upperBandMinBin = (int)Math.Round(lwConfig.UpperBandMinHz / freqBinWidth) + 1; int upperBandMaxBin = (int)Math.Round(lwConfig.UpperBandMaxHz / freqBinWidth) + 1; int lowerBandMinBin = (int)Math.Round(lwConfig.LowerBandMinHz / freqBinWidth) + 1; int lowerBandMaxBin = (int)Math.Round(lwConfig.LowerBandMaxHz / freqBinWidth) + 1; BaseSonogram sonogram = new SpectrogramStandard(sonoConfig, recording.WavReader); int rowCount = sonogram.Data.GetLength(0); //int colCount = sonogram.Data.GetLength(1); double[] lowerArray = MatrixTools.GetRowAveragesOfSubmatrix(sonogram.Data, 0, lowerBandMinBin, rowCount - 1, lowerBandMaxBin); double[] upperArray = MatrixTools.GetRowAveragesOfSubmatrix(sonogram.Data, 0, upperBandMinBin, rowCount - 1, upperBandMaxBin); //lowerArray = DataTools.filterMovingAverage(lowerArray, 3); //upperArray = DataTools.filterMovingAverage(upperArray, 3); double[] amplitudeScores = DataTools.SumMinusDifference(lowerArray, upperArray); double[] differenceScores = DspFilters.SubtractBaseline(amplitudeScores, 7); // Could smooth here rather than above. Above seemed slightly better? //amplitudeScores = DataTools.filterMovingAverage(amplitudeScores, 7); //differenceScores = DataTools.filterMovingAverage(differenceScores, 7); //iii: CONVERT decibel sum-diff SCORES TO ACOUSTIC TRILL EVENTS var predictedTrillEvents = AcousticEvent.ConvertScoreArray2Events( amplitudeScores, lwConfig.LowerBandMinHz, lwConfig.UpperBandMaxHz, sonogram.FramesPerSecond, freqBinWidth, lwConfig.DecibelThreshold, minDuration, maxDuration, segmentStartOffset); for (int i = 0; i < differenceScores.Length; i++) { if (differenceScores[i] < 1.0) { differenceScores[i] = 0.0; } } // LOOK FOR TRILL EVENTS // init the score array double[] scores = new double[rowCount]; // var hits = new double[rowCount, colCount]; double[,] hits = null; // init confirmed events var confirmedEvents = new List <AcousticEvent>(); // add names into the returned events foreach (var ae in predictedTrillEvents) { int eventStart = ae.Oblong.RowTop; int eventWidth = ae.Oblong.RowWidth; int step = 2; double maximumIntensity = 0.0; // scan the event to get oscillation period and intensity for (int i = eventStart - (dctLength / 2); i < eventStart + eventWidth - (dctLength / 2); i += step) { // Look for oscillations in the difference array double[] differenceArray = DataTools.Subarray(differenceScores, i, dctLength); double oscilFreq; double period; double intensity; Oscillations2014.GetOscillation(differenceArray, framesPerSecond, cosines, out oscilFreq, out period, out intensity); bool periodWithinBounds = period > minPeriod && period < maxPeriod; //Console.WriteLine($"step={i} period={period:f4}"); if (!periodWithinBounds) { continue; } for (int j = 0; j < dctLength; j++) //lay down score for sample length { if (scores[i + j] < intensity) { scores[i + j] = intensity; } } if (maximumIntensity < intensity) { maximumIntensity = intensity; } } // add abbreviatedSpeciesName into event if (maximumIntensity >= intensityThreshold) { ae.Name = $"{lwConfig.AbbreviatedSpeciesName}.{lwConfig.ProfileNames[0]}"; ae.Score_MaxInEvent = maximumIntensity; ae.Profile = lwConfig.ProfileNames[0]; confirmedEvents.Add(ae); } } //###################################################################### // LOOK FOR TINK EVENTS // CONVERT decibel sum-diff SCORES TO ACOUSTIC EVENTS double minDurationOfTink = lwConfig.MinDurationOfTink; // seconds double maxDurationOfTink = lwConfig.MaxDurationOfTink; // seconds // want stronger threshold for tink because brief. double tinkDecibelThreshold = lwConfig.DecibelThreshold + 3.0; var predictedTinkEvents = AcousticEvent.ConvertScoreArray2Events( amplitudeScores, lwConfig.LowerBandMinHz, lwConfig.UpperBandMaxHz, sonogram.FramesPerSecond, freqBinWidth, tinkDecibelThreshold, minDurationOfTink, maxDurationOfTink, segmentStartOffset); foreach (var ae2 in predictedTinkEvents) { // Prune the list of potential acoustic events, for example using Cosine Similarity. //rowtop, rowWidth //int eventStart = ae2.Oblong.RowTop; //int eventWidth = ae2.Oblong.RowWidth; //int step = 2; //double maximumIntensity = 0.0; // add abbreviatedSpeciesName into event //if (maximumIntensity >= intensityThreshold) //{ ae2.Name = $"{lwConfig.AbbreviatedSpeciesName}.{lwConfig.ProfileNames[1]}"; //ae2.Score_MaxInEvent = maximumIntensity; ae2.Profile = lwConfig.ProfileNames[1]; confirmedEvents.Add(ae2); //} } //###################################################################### var scorePlot = new Plot(lwConfig.SpeciesName, scores, intensityThreshold); Image debugImage = null; if (returnDebugImage) { // display a variety of debug score arrays double[] normalisedScores; double normalisedThreshold; DataTools.Normalise(amplitudeScores, lwConfig.DecibelThreshold, out normalisedScores, out normalisedThreshold); var sumDiffPlot = new Plot("Sum Minus Difference", normalisedScores, normalisedThreshold); DataTools.Normalise(differenceScores, lwConfig.DecibelThreshold, out normalisedScores, out normalisedThreshold); var differencePlot = new Plot("Baseline Removed", normalisedScores, normalisedThreshold); var debugPlots = new List <Plot> { scorePlot, sumDiffPlot, differencePlot }; debugImage = DrawDebugImage(sonogram, confirmedEvents, debugPlots, hits); } // return new sonogram because it makes for more easy interpretation of the image var returnSonoConfig = new SonogramConfig { SourceFName = recording.BaseName, WindowSize = 512, WindowOverlap = 0, // the default window is HAMMING //WindowFunction = WindowFunctions.HANNING.ToString(), //WindowFunction = WindowFunctions.NONE.ToString(), // if do not use noise reduction can get a more sensitive recogniser. //NoiseReductionType = NoiseReductionType.NONE, NoiseReductionType = SNR.KeyToNoiseReductionType("STANDARD"), }; BaseSonogram returnSonogram = new SpectrogramStandard(returnSonoConfig, recording.WavReader); return(Tuple.Create(returnSonogram, hits, scores, confirmedEvents, debugImage)); } //Analysis()
/// <summary> /// THE KEY ANALYSIS METHOD. /// </summary> public static Tuple <BaseSonogram, double[, ], double[], List <AcousticEvent>, Image> Analysis( AudioRecording recording, SonogramConfig sonoConfig, LitoriaBicolorConfig lbConfig, bool drawDebugImage, TimeSpan segmentStartOffset) { double decibelThreshold = lbConfig.DecibelThreshold; //dB double intensityThreshold = lbConfig.IntensityThreshold; //double eventThreshold = lbConfig.EventThreshold; //in 0-1 if (recording == null) { LoggedConsole.WriteLine("AudioRecording == null. Analysis not possible."); return(null); } //i: MAKE SONOGRAM //TimeSpan tsRecordingtDuration = recording.Duration(); int sr = recording.SampleRate; double freqBinWidth = sr / (double)sonoConfig.WindowSize; double framesPerSecond = freqBinWidth; // duration of DCT in seconds - want it to be about 3X or 4X the expected maximum period double dctDuration = 3 * lbConfig.MaxPeriod; // duration of DCT in frames int dctLength = (int)Math.Round(framesPerSecond * dctDuration); // set up the cosine coefficients double[,] cosines = MFCCStuff.Cosines(dctLength, dctLength); int upperBandMinBin = (int)Math.Round(lbConfig.UpperBandMinHz / freqBinWidth) + 1; int upperBandMaxBin = (int)Math.Round(lbConfig.UpperBandMaxHz / freqBinWidth) + 1; int lowerBandMinBin = (int)Math.Round(lbConfig.LowerBandMinHz / freqBinWidth) + 1; int lowerBandMaxBin = (int)Math.Round(lbConfig.LowerBandMaxHz / freqBinWidth) + 1; BaseSonogram sonogram = new SpectrogramStandard(sonoConfig, recording.WavReader); int rowCount = sonogram.Data.GetLength(0); int colCount = sonogram.Data.GetLength(1); double[] lowerArray = MatrixTools.GetRowAveragesOfSubmatrix(sonogram.Data, 0, lowerBandMinBin, rowCount - 1, lowerBandMaxBin); double[] upperArray = MatrixTools.GetRowAveragesOfSubmatrix(sonogram.Data, 0, upperBandMinBin, rowCount - 1, upperBandMaxBin); //lowerArray = DataTools.filterMovingAverage(lowerArray, 3); //upperArray = DataTools.filterMovingAverage(upperArray, 3); double[] amplitudeScores = DataTools.SumMinusDifference(lowerArray, upperArray); double[] differenceScores = DspFilters.PreEmphasis(amplitudeScores, 1.0); // Could smooth here rather than above. Above seemed slightly better? amplitudeScores = DataTools.filterMovingAverage(amplitudeScores, 7); differenceScores = DataTools.filterMovingAverage(differenceScores, 7); //iii: CONVERT decibel sum-diff SCORES TO ACOUSTIC EVENTS var predictedEvents = AcousticEvent.ConvertScoreArray2Events( amplitudeScores, lbConfig.LowerBandMinHz, lbConfig.UpperBandMaxHz, sonogram.FramesPerSecond, freqBinWidth, decibelThreshold, lbConfig.MinDuration, lbConfig.MaxDuration, segmentStartOffset); for (int i = 0; i < differenceScores.Length; i++) { if (differenceScores[i] < 1.0) { differenceScores[i] = 0.0; } } // init the score array double[] scores = new double[rowCount]; //iii: CONVERT SCORES TO ACOUSTIC EVENTS // var hits = new double[rowCount, colCount]; double[,] hits = null; // init confirmed events var confirmedEvents = new List <AcousticEvent>(); // add names into the returned events foreach (var ae in predictedEvents) { //rowtop, rowWidth int eventStart = ae.Oblong.RowTop; int eventWidth = ae.Oblong.RowWidth; int step = 2; double maximumIntensity = 0.0; // scan the event to get oscillation period and intensity for (int i = eventStart - (dctLength / 2); i < eventStart + eventWidth - (dctLength / 2); i += step) { // Look for oscillations in the difference array double[] differenceArray = DataTools.Subarray(differenceScores, i, dctLength); Oscillations2014.GetOscillationUsingDct(differenceArray, framesPerSecond, cosines, out var oscilFreq, out var period, out var intensity); bool periodWithinBounds = period > lbConfig.MinPeriod && period < lbConfig.MaxPeriod; //Console.WriteLine($"step={i} period={period:f4}"); if (!periodWithinBounds) { continue; } // lay down score for sample length for (int j = 0; j < dctLength; j++) { if (scores[i + j] < intensity) { scores[i + j] = intensity; } } if (maximumIntensity < intensity) { maximumIntensity = intensity; } } // add abbreviatedSpeciesName into event if (maximumIntensity >= intensityThreshold) { ae.Name = "L.b"; ae.Score_MaxInEvent = maximumIntensity; confirmedEvents.Add(ae); } } //###################################################################### // calculate the cosine similarity scores var scorePlot = new Plot(lbConfig.SpeciesName, scores, intensityThreshold); //DEBUG IMAGE this recognizer only. MUST set false for deployment. Image debugImage = null; if (drawDebugImage) { // display a variety of debug score arrays //DataTools.Normalise(scores, eventDecibelThreshold, out normalisedScores, out normalisedThreshold); //var debugPlot = new Plot("Score", normalisedScores, normalisedThreshold); //DataTools.Normalise(upperArray, eventDecibelThreshold, out normalisedScores, out normalisedThreshold); //var upperPlot = new Plot("Upper", normalisedScores, normalisedThreshold); //DataTools.Normalise(lowerArray, eventDecibelThreshold, out normalisedScores, out normalisedThreshold); //var lowerPlot = new Plot("Lower", normalisedScores, normalisedThreshold); DataTools.Normalise(amplitudeScores, decibelThreshold, out var normalisedScores, out var normalisedThreshold); var sumDiffPlot = new Plot("SumMinusDifference", normalisedScores, normalisedThreshold); DataTools.Normalise(differenceScores, 3.0, out normalisedScores, out normalisedThreshold); var differencePlot = new Plot("Difference", normalisedScores, normalisedThreshold); var debugPlots = new List <Plot> { scorePlot, sumDiffPlot, differencePlot }; // other debug plots //var debugPlots = new List<Plot> { scorePlot, upperPlot, lowerPlot, sumDiffPlot, differencePlot }; debugImage = DisplayDebugImage(sonogram, confirmedEvents, debugPlots, hits); } // return new sonogram because it makes for more easy interpretation of the image var returnSonoConfig = new SonogramConfig { SourceFName = recording.BaseName, WindowSize = 512, WindowOverlap = 0, // the default window is HAMMING //WindowFunction = WindowFunctions.HANNING.ToString(), //WindowFunction = WindowFunctions.NONE.ToString(), // if do not use noise reduction can get a more sensitive recogniser. //NoiseReductionType = NoiseReductionType.NONE, NoiseReductionType = SNR.KeyToNoiseReductionType("STANDARD"), }; BaseSonogram returnSonogram = new SpectrogramStandard(returnSonoConfig, recording.WavReader); return(Tuple.Create(returnSonogram, hits, scores, confirmedEvents, debugImage)); } //Analysis()
public static IndexCalculateResult Analysis( AudioRecording recording, TimeSpan subsegmentOffsetTimeSpan, Dictionary <string, IndexProperties> indexProperties, int sampleRateOfOriginalAudioFile, TimeSpan segmentStartOffset, IndexCalculateConfig config, bool returnSonogramInfo = false) { // returnSonogramInfo = true; // if debugging double epsilon = recording.Epsilon; int signalLength = recording.WavReader.GetChannel(0).Length; int sampleRate = recording.WavReader.SampleRate; var segmentDuration = TimeSpan.FromSeconds(recording.WavReader.Time.TotalSeconds); var indexCalculationDuration = config.IndexCalculationDurationTimeSpan; int nyquist = sampleRate / 2; // Get FRAME parameters for the calculation of Acoustic Indices //WARNING: DO NOT USE Frame Overlap when calculating acoustic indices. // It yields ACI, BGN, POW and EVN results that are significantly different from the default. // I have not had time to check if the difference is meaningful. Best to avoid. //int frameSize = (int?)config[AnalysisKeys.FrameLength] ?? IndexCalculateConfig.DefaultWindowSize; int frameSize = config.FrameLength; int frameStep = frameSize; // that is, windowOverlap = zero double frameStepDuration = frameStep / (double)sampleRate; // fraction of a second var frameStepTimeSpan = TimeSpan.FromTicks((long)(frameStepDuration * TimeSpan.TicksPerSecond)); int midFreqBound = config.MidFreqBound; int lowFreqBound = config.LowFreqBound; int freqBinCount = frameSize / 2; // double freqBinWidth = recording.Nyquist / (double)freqBinCount; // get duration in seconds and sample count and frame count double subsegmentDurationInSeconds = indexCalculationDuration.TotalSeconds; int subsegmentSampleCount = (int)(subsegmentDurationInSeconds * sampleRate); double subsegmentFrameCount = subsegmentSampleCount / (double)frameStep; subsegmentFrameCount = (int)Math.Ceiling(subsegmentFrameCount); // In order not to lose the last fractional frame, round up the frame number // and get the exact number of samples in the integer number of frames. // Do this because when IndexCalculationDuration = 100ms, the number of frames is only 8. subsegmentSampleCount = (int)(subsegmentFrameCount * frameStep); // get start and end samples of the subsegment and noise segment double localOffsetInSeconds = subsegmentOffsetTimeSpan.TotalSeconds - segmentStartOffset.TotalSeconds; int startSample = (int)(localOffsetInSeconds * sampleRate); int endSample = startSample + subsegmentSampleCount - 1; // Default behaviour: set SUBSEGMENT = total recording var subsegmentRecording = recording; // But if the indexCalculationDuration < segmentDuration if (indexCalculationDuration < segmentDuration) { // minimum samples needed to calculate acoustic indices. This value was chosen somewhat arbitrarily. // It allowes for case where IndexCalculationDuration = 100ms which is approx 8 frames int minimumViableSampleCount = frameSize * 8; int availableSignal = signalLength - startSample; // if (the required audio is beyond recording OR insufficient for analysis) then backtrack. if (availableSignal < minimumViableSampleCount) { // Back-track so we can fill a whole result. // This is a silent correction, equivalent to having a segment overlap for the last segment. var oldStart = startSample; startSample = signalLength - subsegmentSampleCount; endSample = signalLength; Logger.Trace(" Backtrack subsegment to fill missing data from imperfect audio cuts because not enough samples available. " + (oldStart - startSample) + " samples overlap."); } var subsamples = DataTools.Subarray(recording.WavReader.Samples, startSample, subsegmentSampleCount); var wr = new Acoustics.Tools.Wav.WavReader(subsamples, 1, 16, sampleRate); subsegmentRecording = new AudioRecording(wr); } // INITIALISE a RESULTS STRUCTURE TO return // initialize a result object in which to store SummaryIndexValues and SpectralIndexValues etc. var result = new IndexCalculateResult(freqBinCount, indexProperties, indexCalculationDuration, subsegmentOffsetTimeSpan, config); SummaryIndexValues summaryIndices = result.SummaryIndexValues; SpectralIndexValues spectralIndices = result.SpectralIndexValues; // set up default spectrogram to return result.Sg = returnSonogramInfo ? GetSonogram(recording, windowSize: 1024) : null; result.Hits = null; result.TrackScores = new List <Plot>(); // ################################## FINSIHED SET-UP // ################################## NOW GET THE AMPLITUDE SPECTORGRAMS // EXTRACT ENVELOPE and SPECTROGRAM FROM SUBSEGMENT var dspOutput1 = DSP_Frames.ExtractEnvelopeAndFfts(subsegmentRecording, frameSize, frameStep); // Select band according to min and max bandwidth int minBand = (int)(dspOutput1.AmplitudeSpectrogram.GetLength(1) * config.MinBandWidth); int maxBand = (int)(dspOutput1.AmplitudeSpectrogram.GetLength(1) * config.MaxBandWidth) - 1; dspOutput1.AmplitudeSpectrogram = MatrixTools.Submatrix( dspOutput1.AmplitudeSpectrogram, 0, minBand, dspOutput1.AmplitudeSpectrogram.GetLength(0) - 1, maxBand); // TODO: Michael to review whether bandwidth filter should be moved to DSP_Frames?? // Recalculate NyquistBin and FreqBinWidth, because they change with band selection //dspOutput1.NyquistBin = dspOutput1.AmplitudeSpectrogram.GetLength(1) - 1; //dspOutput1.FreqBinWidth = sampleRate / (double)dspOutput1.AmplitudeSpectrogram.GetLength(1) / 2; // Linear or Octave or Mel frequency scale? Set Linear as default. var freqScale = new FrequencyScale(nyquist: nyquist, frameSize: frameSize, hertzGridInterval: 1000); var freqScaleType = config.FrequencyScale; bool octaveScale = freqScaleType == FreqScaleType.Linear125Octaves7Tones28Nyquist32000; bool melScale = freqScaleType == FreqScaleType.Mel; if (octaveScale) { // only allow one octave scale at the moment - for Jasco marine recordings. // ASSUME fixed Occtave scale - USEFUL ONLY FOR JASCO 64000sr MARINE RECORDINGS // If you wish to use other octave scale types then need to put in the config file and and set up recovery here. freqScale = new FrequencyScale(FreqScaleType.Linear125Octaves7Tones28Nyquist32000); // Recalculate the spectrogram according to octave scale. This option works only when have high SR recordings. dspOutput1.AmplitudeSpectrogram = OctaveFreqScale.AmplitudeSpectra( dspOutput1.AmplitudeSpectrogram, dspOutput1.WindowPower, sampleRate, epsilon, freqScale); dspOutput1.NyquistBin = dspOutput1.AmplitudeSpectrogram.GetLength(1) - 1; // ASSUMPTION!!! Nyquist is in top Octave bin - not necessarily true!! } else if (melScale) { int minFreq = 0; int maxFreq = recording.Nyquist; dspOutput1.AmplitudeSpectrogram = MFCCStuff.MelFilterBank( dspOutput1.AmplitudeSpectrogram, config.MelScale, recording.Nyquist, minFreq, maxFreq); dspOutput1.NyquistBin = dspOutput1.AmplitudeSpectrogram.GetLength(1) - 1; // TODO: This doesn't make any sense, since the frequency width changes for each bin. Probably need to set this to NaN. // TODO: Whatever uses this value below, should probably be changed to not be depending on it. dspOutput1.FreqBinWidth = sampleRate / (double)dspOutput1.AmplitudeSpectrogram.GetLength(1) / 2; } // NOW EXTRACT SIGNAL FOR BACKGROUND NOISE CALCULATION // If the index calculation duration >= 30 seconds, then calculate BGN from the existing segment of recording. bool doSeparateBgnNoiseCalculation = indexCalculationDuration.TotalSeconds + (2 * config.BgNoiseBuffer.TotalSeconds) < segmentDuration.TotalSeconds / 2; var dspOutput2 = dspOutput1; if (doSeparateBgnNoiseCalculation) { // GET a longer SUBSEGMENT FOR NOISE calculation with 5 sec buffer on either side. // If the index calculation duration is shorter than 30 seconds, then need to calculate BGN noise from a longer length of recording // i.e. need to add noiseBuffer either side. Typical noiseBuffer value = 5 seconds int sampleBuffer = (int)(config.BgNoiseBuffer.TotalSeconds * sampleRate); var bgnRecording = AudioRecording.GetRecordingSubsegment(recording, startSample, endSample, sampleBuffer); // EXTRACT ENVELOPE and SPECTROGRAM FROM BACKGROUND NOISE SUBSEGMENT dspOutput2 = DSP_Frames.ExtractEnvelopeAndFfts(bgnRecording, frameSize, frameStep); // If necessary, recalculate the spectrogram according to octave scale. This option works only when have high SR recordings. if (octaveScale) { // ASSUME fixed Occtave scale - USEFUL ONLY FOR JASCO 64000sr MARINE RECORDINGS // If you wish to use other octave scale types then need to put in the config file and and set up recovery here. dspOutput2.AmplitudeSpectrogram = OctaveFreqScale.AmplitudeSpectra( dspOutput2.AmplitudeSpectrogram, dspOutput2.WindowPower, sampleRate, epsilon, freqScale); dspOutput2.NyquistBin = dspOutput2.AmplitudeSpectrogram.GetLength(1) - 1; // ASSUMPTION!!! Nyquist is in top Octave bin - not necessarily true!! } } // ###################################### BEGIN CALCULATION OF INDICES ################################## // (A) ################################## EXTRACT SUMMARY INDICES FROM THE SIGNAL WAVEFORM ################################## // average absolute value over the minute recording - not useful // double[] avAbsolute = dspOutput1.Average; double[] signalEnvelope = dspOutput1.Envelope; double avgSignalEnvelope = signalEnvelope.Average(); // 10 times log of amplitude squared summaryIndices.AvgSignalAmplitude = 20 * Math.Log10(avgSignalEnvelope); // Deal with case where the signal waveform is continuous flat with values < 0.001. Has happened!! // Although signal appears zero, this condition is required. if (avgSignalEnvelope < 0.0001) { Logger.Debug("Segment skipped because avSignalEnvelope is < 0.001!"); summaryIndices.ZeroSignal = 1.0; return(result); } // i. Check for clipping and high amplitude rates per second summaryIndices.HighAmplitudeIndex = dspOutput1.HighAmplitudeCount / subsegmentDurationInSeconds; summaryIndices.ClippingIndex = dspOutput1.ClipCount / subsegmentDurationInSeconds; // ii. Calculate bg noise in dB // Convert signal envelope to dB and subtract background noise. Default noise SD to calculate threshold = ZERO double signalBgn = NoiseRemovalModal.CalculateBackgroundNoise(dspOutput2.Envelope); summaryIndices.BackgroundNoise = signalBgn; // iii: FRAME ENERGIES - convert signal to decibels and subtract background noise. double[] dBEnvelope = SNR.Signal2Decibels(dspOutput1.Envelope); double[] dBEnvelopeSansNoise = SNR.SubtractAndTruncate2Zero(dBEnvelope, signalBgn); // iv: ACTIVITY for NOISE REDUCED SIGNAL ENVELOPE // Calculate fraction of frames having acoustic activity var activity = ActivityAndCover.CalculateActivity(dBEnvelopeSansNoise, frameStepTimeSpan); summaryIndices.Activity = activity.FractionOfActiveFrames; // v. average number of events per second whose duration > one frame // average event duration in milliseconds - no longer calculated //summaryIndices.AvgEventDuration = activity.avEventDuration; summaryIndices.EventsPerSecond = activity.EventCount / subsegmentDurationInSeconds; // vi. Calculate SNR and active frames SNR summaryIndices.Snr = dBEnvelopeSansNoise.Max(); summaryIndices.AvgSnrOfActiveFrames = activity.ActiveAvDb; // vii. ENTROPY of ENERGY ENVELOPE -- 1-Ht because want measure of concentration of acoustic energy. double entropy = DataTools.EntropyNormalised(DataTools.SquareValues(signalEnvelope)); summaryIndices.TemporalEntropy = 1 - entropy; // Note that the spectrogram has had the DC bin removed. i.e. has only 256 columns. double[,] amplitudeSpectrogram = dspOutput1.AmplitudeSpectrogram; // get amplitude spectrogram. // CALCULATE various NDSI (Normalised difference soundscape Index) FROM THE AMPLITUDE SPECTROGRAM // These options proved to be highly correlated. Therefore only use tuple.Item 1 which derived from Power Spectral Density. var tuple3 = SpectrogramTools.CalculateAvgSpectrumAndVarianceSpectrumFromAmplitudeSpectrogram(amplitudeSpectrogram); summaryIndices.Ndsi = SpectrogramTools.CalculateNdsi(tuple3.Item1, sampleRate, 1000, 2000, 8000); // (B) ################################## EXTRACT OSC SPECTRAL INDEX DIRECTLY FROM THE RECORDING ################################## // Get the oscillation spectral index OSC separately from signal because need a different frame size etc. var sampleLength = Oscillations2014.DefaultSampleLength; var frameLength = Oscillations2014.DefaultFrameLength; var sensitivity = Oscillations2014.DefaultSensitivityThreshold; var spectralIndexShort = Oscillations2014.GetSpectralIndex_Osc(subsegmentRecording, frameLength, sampleLength, sensitivity); // double length of the vector because want to work with 256 element vector for LDFC purposes spectralIndices.OSC = DataTools.VectorDoubleLengthByAverageInterpolation(spectralIndexShort); // (C) ################################## EXTRACT SPECTRAL INDICES FROM THE AMPLITUDE SPECTROGRAM ################################## // i: CALCULATE SPECTRUM OF THE SUM OF FREQ BIN AMPLITUDES - used for later calculation of ACI spectralIndices.SUM = MatrixTools.SumColumns(amplitudeSpectrogram); // Calculate lower and upper boundary bin ids. // Boundary between low & mid frequency bands is to avoid low freq bins containing anthropogenic noise. These biased index values away from biophony. // Boundary of upper bird-band is to avoid high freq artefacts due to mp3. int lowerBinBound = (int)Math.Ceiling(lowFreqBound / dspOutput1.FreqBinWidth); int middleBinBound = (int)Math.Ceiling(midFreqBound / dspOutput1.FreqBinWidth); // calculate number of freq bins in the bird-band. int midBandBinCount = middleBinBound - lowerBinBound + 1; if (octaveScale) { // the above frequency bin bounds do not apply with octave scale. Need to recalculate them suitable for Octave scale recording. lowFreqBound = freqScale.LinearBound; lowerBinBound = freqScale.GetBinIdForHerzValue(lowFreqBound); midFreqBound = 8000; // This value appears suitable for Jasco Marine recordings. Not much happens above 8kHz. //middleBinBound = freqScale.GetBinIdForHerzValue(midFreqBound); middleBinBound = freqScale.GetBinIdInReducedSpectrogramForHerzValue(midFreqBound); midBandBinCount = middleBinBound - lowerBinBound + 1; } // IFF there has been UP-SAMPLING, calculate bin of the original audio nyquist. this will be less than SR/2. // original sample rate can be anything 11.0-44.1 kHz. int originalNyquist = sampleRateOfOriginalAudioFile / 2; // if upsampling has been done if (dspOutput1.NyquistFreq > originalNyquist) { dspOutput1.NyquistFreq = originalNyquist; dspOutput1.NyquistBin = (int)Math.Floor(originalNyquist / dspOutput1.FreqBinWidth); // note that binwidth does not change } // ii: CALCULATE THE ACOUSTIC COMPLEXITY INDEX spectralIndices.DIF = AcousticComplexityIndex.SumOfAmplitudeDifferences(amplitudeSpectrogram); double[] aciSpectrum = AcousticComplexityIndex.CalculateAci(amplitudeSpectrogram); spectralIndices.ACI = aciSpectrum; // remove low freq band of ACI spectrum and store average ACI value double[] reducedAciSpectrum = DataTools.Subarray(aciSpectrum, lowerBinBound, midBandBinCount); summaryIndices.AcousticComplexity = reducedAciSpectrum.Average(); // iii: CALCULATE the H(t) or Temporal ENTROPY Spectrum and then reverse the values i.e. calculate 1-Ht for energy concentration double[] temporalEntropySpectrum = AcousticEntropy.CalculateTemporalEntropySpectrum(amplitudeSpectrogram); for (int i = 0; i < temporalEntropySpectrum.Length; i++) { temporalEntropySpectrum[i] = 1 - temporalEntropySpectrum[i]; } spectralIndices.ENT = temporalEntropySpectrum; // iv: remove background noise from the amplitude spectrogram // First calculate the noise profile from the amplitude sepctrogram double[] spectralAmplitudeBgn = NoiseProfile.CalculateBackgroundNoise(dspOutput2.AmplitudeSpectrogram); amplitudeSpectrogram = SNR.TruncateBgNoiseFromSpectrogram(amplitudeSpectrogram, spectralAmplitudeBgn); // AMPLITUDE THRESHOLD for smoothing background, nhThreshold, assumes background noise ranges around -40dB. // This value corresponds to approximately 6dB above backgorund. amplitudeSpectrogram = SNR.RemoveNeighbourhoodBackgroundNoise(amplitudeSpectrogram, nhThreshold: 0.015); ////ImageTools.DrawMatrix(spectrogramData, @"C:\SensorNetworks\WavFiles\Crows\image.png", false); ////DataTools.writeBarGraph(modalValues); result.AmplitudeSpectrogram = amplitudeSpectrogram; // v: ENTROPY OF AVERAGE SPECTRUM & VARIANCE SPECTRUM - at this point the spectrogram is a noise reduced amplitude spectrogram var tuple = AcousticEntropy.CalculateSpectralEntropies(amplitudeSpectrogram, lowerBinBound, midBandBinCount); // ENTROPY of spectral averages - Reverse the values i.e. calculate 1-Hs and 1-Hv, and 1-Hcov for energy concentration summaryIndices.EntropyOfAverageSpectrum = 1 - tuple.Item1; // ENTROPY of spectrum of Variance values summaryIndices.EntropyOfVarianceSpectrum = 1 - tuple.Item2; // ENTROPY of spectrum of Coefficient of Variation values summaryIndices.EntropyOfCoVSpectrum = 1 - tuple.Item3; // vi: ENTROPY OF DISTRIBUTION of maximum SPECTRAL PEAKS. // First extract High band SPECTROGRAM which is now noise reduced double entropyOfPeaksSpectrum = AcousticEntropy.CalculateEntropyOfSpectralPeaks(amplitudeSpectrogram, lowerBinBound, middleBinBound); summaryIndices.EntropyOfPeaksSpectrum = 1 - entropyOfPeaksSpectrum; // ###################################################################################################################################################### // (C) ################################## EXTRACT SPECTRAL INDICES FROM THE DECIBEL SPECTROGRAM ################################## // i: Convert amplitude spectrogram to deciBels and calculate the dB background noise profile double[,] deciBelSpectrogram = MFCCStuff.DecibelSpectra(dspOutput2.AmplitudeSpectrogram, dspOutput2.WindowPower, sampleRate, epsilon); double[] spectralDecibelBgn = NoiseProfile.CalculateBackgroundNoise(deciBelSpectrogram); spectralIndices.BGN = spectralDecibelBgn; // ii: Calculate the noise reduced decibel spectrogram derived from segment recording. // REUSE the var decibelSpectrogram but this time using dspOutput1. deciBelSpectrogram = MFCCStuff.DecibelSpectra(dspOutput1.AmplitudeSpectrogram, dspOutput1.WindowPower, sampleRate, epsilon); deciBelSpectrogram = SNR.TruncateBgNoiseFromSpectrogram(deciBelSpectrogram, spectralDecibelBgn); deciBelSpectrogram = SNR.RemoveNeighbourhoodBackgroundNoise(deciBelSpectrogram, nhThreshold: 2.0); // iii: CALCULATE noise reduced AVERAGE DECIBEL SPECTRUM spectralIndices.PMN = SpectrogramTools.CalculateAvgDecibelSpectrumFromDecibelSpectrogram(deciBelSpectrogram); // iv: CALCULATE SPECTRAL COVER. // NOTE: at this point, decibelSpectrogram is noise reduced. All values >= 0.0 // FreqBinWidth can be accessed, if required, through dspOutput1.FreqBinWidth double dBThreshold = ActivityAndCover.DefaultActivityThresholdDb; // dB THRESHOLD for calculating spectral coverage var spActivity = ActivityAndCover.CalculateSpectralEvents(deciBelSpectrogram, dBThreshold, frameStepTimeSpan, lowerBinBound, middleBinBound); spectralIndices.CVR = spActivity.CoverSpectrum; spectralIndices.EVN = spActivity.EventSpectrum; summaryIndices.HighFreqCover = spActivity.HighFreqBandCover; summaryIndices.MidFreqCover = spActivity.MidFreqBandCover; summaryIndices.LowFreqCover = spActivity.LowFreqBandCover; // ###################################################################################################################################################### // v: CALCULATE SPECTRAL PEAK TRACKS and RIDGE indices. // NOTE: at this point, the var decibelSpectrogram is noise reduced. i.e. all its values >= 0.0 // Detecting ridges or spectral peak tracks requires using a 5x5 mask which has edge effects. // This becomes significant if we have a short indexCalculationDuration. // Consequently if the indexCalculationDuration < 10 seconds then we revert back to the recording and cut out a recording segment that includes // a buffer for edge effects. In most cases however, we can just use the decibel spectrogram already calculated and ignore the edge effects. double peakThreshold = 6.0; //dB SpectralPeakTracks sptInfo; if (indexCalculationDuration.TotalSeconds < 10.0) { // calculate a new decibel spectrogram sptInfo = SpectralPeakTracks.CalculateSpectralPeakTracks(recording, startSample, endSample, frameSize, octaveScale, peakThreshold); } else { // use existing decibel spectrogram sptInfo = new SpectralPeakTracks(deciBelSpectrogram, peakThreshold); } spectralIndices.SPT = sptInfo.SptSpectrum; spectralIndices.RHZ = sptInfo.RhzSpectrum; spectralIndices.RVT = sptInfo.RvtSpectrum; spectralIndices.RPS = sptInfo.RpsSpectrum; spectralIndices.RNG = sptInfo.RngSpectrum; summaryIndices.SptDensity = sptInfo.TrackDensity; // these are two other indices that I tried but they do not seem to add anything of interest. //summaryIndices.AvgSptDuration = sptInfo.AvTrackDuration; //summaryIndices.SptPerSecond = sptInfo.TotalTrackCount / subsegmentSecondsDuration; // ###################################################################################################################################################### // vi: CLUSTERING - FIRST DETERMINE IF IT IS WORTH DOING // return if (activeFrameCount too small || eventCount == 0 || short index calc duration) because no point doing clustering if (activity.ActiveFrameCount <= 2 || Math.Abs(activity.EventCount) < 0.01 || indexCalculationDuration.TotalSeconds < 15) { // IN ADDITION return if indexCalculationDuration < 15 seconds because no point doing clustering on short time segment // NOTE: Activity was calculated with 3dB threshold AFTER backgroundnoise removal. //summaryIndices.AvgClusterDuration = TimeSpan.Zero; summaryIndices.ClusterCount = 0; summaryIndices.ThreeGramCount = 0; return(result); } // YES WE WILL DO CLUSTERING! to determine cluster count (spectral diversity) and spectral persistence. // Only use midband decibel SPECTRUM. In June 2016, the mid-band (i.e. the bird-band) was set to lowerBound=1000Hz, upperBound=8000hz. // Actually do clustering of binary spectra. Must first threshold double binaryThreshold = SpectralClustering.DefaultBinaryThresholdInDecibels; var midBandSpectrogram = MatrixTools.Submatrix(deciBelSpectrogram, 0, lowerBinBound, deciBelSpectrogram.GetLength(0) - 1, middleBinBound); var clusterInfo = SpectralClustering.ClusterTheSpectra(midBandSpectrogram, lowerBinBound, middleBinBound, binaryThreshold); // Store two summary index values from cluster info summaryIndices.ClusterCount = clusterInfo.ClusterCount; summaryIndices.ThreeGramCount = clusterInfo.TriGramUniqueCount; // As of May 2017, no longer store clustering results superimposed on spectrogram. // If you want to see this, then call the TEST methods in class SpectralClustering.cs. // ####################################################################################################################################################### // vii: set up other info to return var freqPeaks = SpectralPeakTracks.ConvertSpectralPeaksToNormalisedArray(deciBelSpectrogram); var scores = new List <Plot> { new Plot("Decibels", DataTools.normalise(dBEnvelopeSansNoise), ActivityAndCover.DefaultActivityThresholdDb), new Plot("Active Frames", DataTools.Bool2Binary(activity.ActiveFrames), 0.0), new Plot("Max Frequency", freqPeaks, 0.0), // relative location of freq maxima in spectra }; result.Hits = sptInfo.Peaks; result.TrackScores = scores; return(result); } // end Calculation of Summary and Spectral Indices
/// <summary> /// Do your analysis. This method is called once per segment (typically one-minute segments). /// </summary> public override RecognizerResults Recognize(AudioRecording recording, Config configuration, TimeSpan segmentStartOffset, Lazy <IndexCalculateResult[]> getSpectralIndexes, DirectoryInfo outputDirectory, int?imageWidth) { string speciesName = configuration[AnalysisKeys.SpeciesName] ?? "<no species>"; string abbreviatedSpeciesName = configuration[AnalysisKeys.AbbreviatedSpeciesName] ?? "<no.sp>"; const int frameSize = 256; const double windowOverlap = 0.0; double noiseReductionParameter = configuration.GetDoubleOrNull("SeverityOfNoiseRemoval") ?? 2.0; int minHz = configuration.GetInt(AnalysisKeys.MinHz); int maxHz = configuration.GetInt(AnalysisKeys.MaxHz); // ignore oscillations below this threshold freq int minOscilFreq = configuration.GetInt(AnalysisKeys.MinOscilFreq); // ignore oscillations above this threshold freq int maxOscilFreq = configuration.GetInt(AnalysisKeys.MaxOscilFreq); // duration of DCT in seconds //double dctDuration = (double)configuration[AnalysisKeys.DctDuration]; // minimum acceptable value of a DCT coefficient double dctThreshold = configuration.GetDouble(AnalysisKeys.DctThreshold); // min duration of event in seconds double minDuration = configuration.GetDouble(AnalysisKeys.MinDuration); // max duration of event in seconds double maxDuration = configuration.GetDouble(AnalysisKeys.MaxDuration); // min score for an acceptable event double decibelThreshold = configuration.GetDouble(AnalysisKeys.DecibelThreshold); // min score for an acceptable event double eventThreshold = configuration.GetDouble(AnalysisKeys.EventThreshold); if (recording.WavReader.SampleRate != 22050) { throw new InvalidOperationException("Requires a 22050Hz file"); } // i: MAKE SONOGRAM var sonoConfig = new SonogramConfig { SourceFName = recording.BaseName, WindowSize = frameSize, WindowOverlap = windowOverlap, NoiseReductionType = NoiseReductionType.Standard, NoiseReductionParameter = noiseReductionParameter, }; var recordingDuration = recording.Duration; int sr = recording.SampleRate; double freqBinWidth = sr / (double)sonoConfig.WindowSize; int minBin = (int)Math.Round(minHz / freqBinWidth) + 1; int maxBin = (int)Math.Round(maxHz / freqBinWidth) + 1; // duration of DCT in seconds - want it to be about 3X or 4X the expected maximum period double framesPerSecond = freqBinWidth; double minPeriod = 1 / (double)maxOscilFreq; double maxPeriod = 1 / (double)minOscilFreq; double dctDuration = 5 * maxPeriod; // duration of DCT in frames int dctLength = (int)Math.Round(framesPerSecond * dctDuration); // set up the cosine coefficients double[,] cosines = MFCCStuff.Cosines(dctLength, dctLength); BaseSonogram sonogram = new SpectrogramStandard(sonoConfig, recording.WavReader); int rowCount = sonogram.Data.GetLength(0); double[] amplitudeArray = MatrixTools.GetRowAveragesOfSubmatrix(sonogram.Data, 0, minBin, rowCount - 1, maxBin); // remove baseline from amplitude array var highPassFilteredSignal = DspFilters.SubtractBaseline(amplitudeArray, 7); // remove hi freq content from amplitude array var lowPassFilteredSignal = DataTools.filterMovingAverageOdd(amplitudeArray, 11); var dctScores = new double[highPassFilteredSignal.Length]; const int step = 2; for (int i = dctLength; i < highPassFilteredSignal.Length - dctLength; i += step) { if (highPassFilteredSignal[i] < decibelThreshold) { continue; } double[] subArray = DataTools.Subarray(highPassFilteredSignal, i, dctLength); // Look for oscillations in the highPassFilteredSignal Oscillations2014.GetOscillationUsingDct(subArray, framesPerSecond, cosines, out var oscilFreq, out var period, out var intensity); bool periodWithinBounds = period > minPeriod && period < maxPeriod; if (!periodWithinBounds) { continue; } if (intensity < dctThreshold) { continue; } //lay down score for sample length for (int j = 0; j < dctLength; j++) { if (dctScores[i + j] < intensity && lowPassFilteredSignal[i + j] > decibelThreshold) { dctScores[i + j] = intensity; } } } //iii: CONVERT decibel sum-diff SCORES TO ACOUSTIC EVENTS var acousticEvents = AcousticEvent.ConvertScoreArray2Events( dctScores, minHz, maxHz, sonogram.FramesPerSecond, freqBinWidth, eventThreshold, minDuration, maxDuration, segmentStartOffset); // ###################################################################### acousticEvents.ForEach(ae => { ae.SpeciesName = speciesName; ae.SegmentDurationSeconds = recordingDuration.TotalSeconds; ae.SegmentStartSeconds = segmentStartOffset.TotalSeconds; ae.Name = abbreviatedSpeciesName; }); var plot = new Plot(this.DisplayName, dctScores, eventThreshold); var plots = new List <Plot> { plot }; // DEBUG IMAGE this recognizer only. MUST set false for deployment. bool displayDebugImage = MainEntry.InDEBUG; if (displayDebugImage) { // display a variety of debug score arrays DataTools.Normalise(amplitudeArray, decibelThreshold, out var normalisedScores, out var normalisedThreshold); var ampltdPlot = new Plot("amplitude", normalisedScores, normalisedThreshold); DataTools.Normalise(highPassFilteredSignal, decibelThreshold, out normalisedScores, out normalisedThreshold); var demeanedPlot = new Plot("Hi Pass", normalisedScores, normalisedThreshold); DataTools.Normalise(lowPassFilteredSignal, decibelThreshold, out normalisedScores, out normalisedThreshold); var lowPassPlot = new Plot("Low Pass", normalisedScores, normalisedThreshold); var debugPlots = new List <Plot> { ampltdPlot, lowPassPlot, demeanedPlot, plot }; Image debugImage = SpectrogramTools.GetSonogramPlusCharts(sonogram, acousticEvents, debugPlots, null); var debugPath = outputDirectory.Combine(FilenameHelpers.AnalysisResultName(Path.GetFileNameWithoutExtension(recording.BaseName), this.Identifier, "png", "DebugSpectrogram")); debugImage.Save(debugPath.FullName); } return(new RecognizerResults() { Sonogram = sonogram, Hits = null, Plots = plots, Events = acousticEvents, }); }
public static void Main(Arguments arguments) { // 1. set up the necessary files FileInfo sourceRecording = arguments.Source; FileInfo configFile = arguments.Config.ToFileInfo(); DirectoryInfo opDir = arguments.Output; opDir.Create(); if (arguments.StartOffset.HasValue ^ arguments.EndOffset.HasValue) { throw new InvalidStartOrEndException("If StartOffset or EndOffset is specified, then both must be specified"); } var offsetsProvided = arguments.StartOffset.HasValue && arguments.EndOffset.HasValue; // set default offsets - only use defaults if not provided in argments list TimeSpan?startOffset = null; TimeSpan?endOffset = null; if (offsetsProvided) { startOffset = TimeSpan.FromSeconds(arguments.StartOffset.Value); endOffset = TimeSpan.FromSeconds(arguments.EndOffset.Value); } const string Title = "# MAKE A SONOGRAM FROM AUDIO RECORDING and do OscillationsGeneric activity."; string date = "# DATE AND TIME: " + DateTime.Now; LoggedConsole.WriteLine(Title); LoggedConsole.WriteLine(date); LoggedConsole.WriteLine("# Input audio file: " + sourceRecording.Name); string sourceName = Path.GetFileNameWithoutExtension(sourceRecording.FullName); // 2. get the config dictionary Config configuration = ConfigFile.Deserialize(configFile); // below three lines are examples of retrieving info from Config config // string analysisIdentifier = configuration[AnalysisKeys.AnalysisName]; // bool saveIntermediateWavFiles = (bool?)configuration[AnalysisKeys.SaveIntermediateWavFiles] ?? false; // scoreThreshold = (double?)configuration[AnalysisKeys.EventThreshold] ?? scoreThreshold; // Resample rate must be 2 X the desired Nyquist. Default is that of recording. var resampleRate = configuration.GetIntOrNull(AnalysisKeys.ResampleRate) ?? AppConfigHelper.DefaultTargetSampleRate; var configDict = new Dictionary <string, string>(configuration.ToDictionary()); // #NOISE REDUCTION PARAMETERS //string noisereduce = configDict[ConfigKeys.Mfcc.Key_NoiseReductionType]; configDict[AnalysisKeys.NoiseDoReduction] = "false"; configDict[AnalysisKeys.NoiseReductionType] = "NONE"; configDict[AnalysisKeys.AddAxes] = configuration[AnalysisKeys.AddAxes] ?? "true"; configDict[AnalysisKeys.AddSegmentationTrack] = configuration[AnalysisKeys.AddSegmentationTrack] ?? "true"; configDict[ConfigKeys.Recording.Key_RecordingCallName] = sourceRecording.FullName; configDict[ConfigKeys.Recording.Key_RecordingFileName] = sourceRecording.Name; configDict[AnalysisKeys.AddTimeScale] = configuration[AnalysisKeys.AddTimeScale] ?? "true"; configDict[AnalysisKeys.AddAxes] = configuration[AnalysisKeys.AddAxes] ?? "true"; configDict[AnalysisKeys.AddSegmentationTrack] = configuration[AnalysisKeys.AddSegmentationTrack] ?? "true"; // #################################################################### // print out the sonogram parameters LoggedConsole.WriteLine("\nPARAMETERS"); foreach (KeyValuePair <string, string> kvp in configDict) { LoggedConsole.WriteLine("{0} = {1}", kvp.Key, kvp.Value); } LoggedConsole.WriteLine("Sample Length for detecting oscillations = {0}", SampleLength); // 3: GET RECORDING FileInfo tempAudioSegment = new FileInfo(Path.Combine(opDir.FullName, "tempWavFile.wav")); // delete the temp audio file if it already exists. if (File.Exists(tempAudioSegment.FullName)) { File.Delete(tempAudioSegment.FullName); } // This line creates a temporary version of the source file downsampled as per entry in the config file MasterAudioUtility.SegmentToWav(sourceRecording, tempAudioSegment, new AudioUtilityRequest() { TargetSampleRate = resampleRate }); // 1) get amplitude spectrogram AudioRecording recordingSegment = new AudioRecording(tempAudioSegment.FullName); SonogramConfig sonoConfig = new SonogramConfig(configDict); // default values config BaseSonogram sonogram = new AmplitudeSonogram(sonoConfig, recordingSegment.WavReader); Console.WriteLine("FramesPerSecond = {0}", sonogram.FramesPerSecond); // remove the DC bin sonogram.Data = MatrixTools.Submatrix(sonogram.Data, 0, 1, sonogram.FrameCount - 1, sonogram.Configuration.FreqBinCount); // ############################################################### // DO LocalContrastNormalisation //int fieldSize = 9; //sonogram.Data = LocalContrastNormalisation.ComputeLCN(sonogram.Data, fieldSize); // LocalContrastNormalisation over frequency bins is better and faster. int neighbourhood = 15; double contrastLevel = 0.5; sonogram.Data = NoiseRemoval_Briggs.NoiseReduction_byLCNDivision(sonogram.Data, neighbourhood, contrastLevel); // ############################################################### // lowering the sensitivity threshold increases the number of hits. if (configDict.ContainsKey(AnalysisKeys.OscilDetection2014SensitivityThreshold)) { Oscillations2014.DefaultSensitivityThreshold = double.Parse(configDict[AnalysisKeys.OscilDetection2014SensitivityThreshold]); } if (configDict.ContainsKey(AnalysisKeys.OscilDetection2014SampleLength)) { Oscillations2014.DefaultSampleLength = int.Parse(configDict[AnalysisKeys.OscilDetection2014SensitivityThreshold]); } var list1 = new List <Image>(); //var result = Oscillations2014.GetFreqVsOscillationsDataAndImage(sonogram, 64, "Autocorr-FFT"); //list1.Add(result.FreqOscillationImage); var result = Oscillations2014.GetFreqVsOscillationsDataAndImage(sonogram, "Autocorr-FFT"); list1.Add(result.FreqOscillationImage); result = Oscillations2014.GetFreqVsOscillationsDataAndImage(sonogram, "Autocorr-SVD-FFT"); list1.Add(result.FreqOscillationImage); result = Oscillations2014.GetFreqVsOscillationsDataAndImage(sonogram, "Autocorr-WPD"); list1.Add(result.FreqOscillationImage); Image compositeOscImage1 = ImageTools.CombineImagesInLine(list1.ToArray()); // ############################################################### // init the sonogram image stack var sonogramList = new List <Image>(); var image = sonogram.GetImageFullyAnnotated("AMPLITUDE SPECTROGRAM"); sonogramList.Add(image); //string testPath = @"C:\SensorNetworks\Output\Sonograms\amplitudeSonogram.png"; //image.Save(testPath, ImageFormat.Png); Image envelopeImage = ImageTrack.DrawWaveEnvelopeTrack(recordingSegment, image.Width); sonogramList.Add(envelopeImage); // 2) now draw the standard decibel spectrogram sonogram = new SpectrogramStandard(sonoConfig, recordingSegment.WavReader); // ############################################################### list1 = new List <Image>(); //result = Oscillations2014.GetFreqVsOscillationsDataAndImage(sonogram, 64, "Autocorr-FFT"); //list1.Add(result.FreqOscillationImage); result = Oscillations2014.GetFreqVsOscillationsDataAndImage(sonogram, "Autocorr-FFT"); list1.Add(result.FreqOscillationImage); result = Oscillations2014.GetFreqVsOscillationsDataAndImage(sonogram, "Autocorr-SVD-FFT"); list1.Add(result.FreqOscillationImage); result = Oscillations2014.GetFreqVsOscillationsDataAndImage(sonogram, "Autocorr-WPD"); list1.Add(result.FreqOscillationImage); Image compositeOscImage2 = ImageTools.CombineImagesInLine(list1.ToArray()); // ############################################################### //image = sonogram.GetImageFullyAnnotated("DECIBEL SPECTROGRAM"); //list.Add(image); // combine eight images list1 = new List <Image>(); list1.Add(compositeOscImage1); list1.Add(compositeOscImage2); Image compositeOscImage3 = ImageTools.CombineImagesVertically(list1.ToArray()); string imagePath3 = Path.Combine(opDir.FullName, sourceName + "_freqOscilMatrix.png"); compositeOscImage3.Save(imagePath3, ImageFormat.Png); Image segmentationImage = ImageTrack.DrawSegmentationTrack( sonogram, EndpointDetectionConfiguration.K1Threshold, EndpointDetectionConfiguration.K2Threshold, image.Width); sonogramList.Add(segmentationImage); // 3) now draw the noise reduced decibel spectrogram sonoConfig.NoiseReductionType = NoiseReductionType.Standard; sonoConfig.NoiseReductionParameter = configuration.GetDoubleOrNull(AnalysisKeys.NoiseBgThreshold) ?? 3.0; sonogram = new SpectrogramStandard(sonoConfig, recordingSegment.WavReader); image = sonogram.GetImageFullyAnnotated("NOISE-REDUCED DECIBEL SPECTROGRAM"); sonogramList.Add(image); // ############################################################### // deriving osscilation graph from this noise reduced spectrogram did not work well //Oscillations2014.SaveFreqVsOscillationsDataAndImage(sonogram, sampleLength, algorithmName, opDir); // ############################################################### Image compositeSonogram = ImageTools.CombineImagesVertically(sonogramList); string imagePath2 = Path.Combine(opDir.FullName, sourceName + ".png"); compositeSonogram.Save(imagePath2, ImageFormat.Png); LoggedConsole.WriteLine("\n##### FINISHED FILE ###################################################\n"); }