public static void CropEvents(List <AcousticEvent> events, double[] activity, double minDurationInSeconds) { double severity = 0.2; int length = activity.Length; foreach (AcousticEvent ev in events) { int start = ev.oblong.r1; int end = ev.oblong.r2; double[] subArray = DataTools.Subarray(activity, start, end - start + 1); int[] bounds = DataTools.Peaks_CropLowAmplitude(subArray, severity); int newMinRow = start + bounds[0]; int newMaxRow = start + bounds[1]; if (newMaxRow >= length) { newMaxRow = length - 1; } Oblong o = new Oblong(newMinRow, ev.oblong.c1, newMaxRow, ev.oblong.c2); ev.oblong = o; ev.TimeStart = newMinRow * ev.FrameOffset; ev.TimeEnd = newMaxRow * ev.FrameOffset; ev.Duration = ev.TimeEnd - ev.TimeStart; //int frameCount = (int)Math.Round(ev.Duration / ev.FrameOffset); } for (int i = events.Count - 1; i >= 0; i--) { if (events[i].Duration < minDurationInSeconds) { events.Remove(events[i]); } } }
public static List <AcousticEvent> ReadGroundParrotTemplateAsList(BaseSonogram sonogram) { var timeScale = sonogram.FrameStep; var hzScale = (int)sonogram.FBinWidth; int rows = GroundParrotTemplate1.GetLength(0); int cols = GroundParrotTemplate1.GetLength(1); double timeOffset = GroundParrotTemplate1[0, 0]; var gpTemplate = new List <AcousticEvent>(); for (int r = 0; r < rows; r++) { int t1 = (int)Math.Round((GroundParrotTemplate1[r, 0] - timeOffset) / timeScale); int t2 = (int)Math.Round((GroundParrotTemplate1[r, 1] - timeOffset) / timeScale); int f2 = (int)Math.Round(GroundParrotTemplate1[r, 2] / hzScale); int f1 = (int)Math.Round(GroundParrotTemplate1[r, 3] / hzScale); Oblong o = new Oblong(t1, f1, t2, f2); gpTemplate.Add( new AcousticEvent( TimeSpan.Zero, o, sonogram.NyquistFrequency, sonogram.Configuration.FreqBinCount, sonogram.FrameDuration, sonogram.FrameStep, sonogram.FrameCount)); } return(gpTemplate); }
} //Analysis() public static void CropEvents(List <AcousticEvent> events, double[] intensity) { double severity = 0.1; int length = intensity.Length; foreach (AcousticEvent ev in events) { int start = ev.Oblong.RowTop; int end = ev.Oblong.RowBottom; double[] subArray = DataTools.Subarray(intensity, start, end - start + 1); int[] bounds = DataTools.Peaks_CropLowAmplitude(subArray, severity); int newMinRow = start + bounds[0]; int newMaxRow = start + bounds[1]; if (newMaxRow >= length) { newMaxRow = length - 1; } Oblong o = new Oblong(newMinRow, ev.Oblong.ColumnLeft, newMaxRow, ev.Oblong.ColumnRight); ev.Oblong = o; ev.TimeStart = newMinRow * ev.FrameOffset; ev.TimeEnd = newMaxRow * ev.FrameOffset; } }
private static bool EnumMonitorsProc(IntPtr hMonitor, IntPtr hdcMonitor, ref RectStruct lprcMonitor, IntPtr dwData) { MonitorInfoEx mi = new MonitorInfoEx(); mi.Size = (uint)Marshal.SizeOf(mi); bool success = GetMonitorInfo(hMonitor, ref mi); if (success) { ScreenInfo si = new ScreenInfo(); si.MonitorArea = Oblong.FromRectStruct(mi.Monitor); si.WorkArea = Oblong.FromRectStruct(mi.WorkArea); si.DeviceName = mi.DeviceName; si.IsPrimaryScreen = ((mi.Flags & MONITORINFOF_PRIMARY) == 1);; DEVMODE DeviceMode = new DEVMODE(); DeviceMode.Initialize(); if (EnumDisplaySettingsEx(ToLPTStr(mi.DeviceName), -1, ref DeviceMode)) { si.Scaling = Math.Round(((double)DeviceMode.dmPelsHeight / (mi.Monitor.bottom - mi.Monitor.top)) * 100); } si.NativeWorkArea = new Oblong((int)(mi.WorkArea.left * si.Scaling) / 100, (int)(mi.WorkArea.top * si.Scaling) / 100, (int)(mi.WorkArea.right * si.Scaling) / 100, (int)(mi.WorkArea.bottom * si.Scaling) / 100); si.NativeArea = new Oblong((int)(mi.Monitor.left * si.Scaling) / 100, (int)(mi.Monitor.top * si.Scaling) / 100, (int)(mi.Monitor.right * si.Scaling) / 100, (int)(mi.Monitor.bottom * si.Scaling) / 100); Display.Screens.Add(si); } return(true); }
public void SetBounds <T>(T @event, Oblong source) where T : SpectralEvent { @event.EventStartSeconds = this.GetStartTimeInSecondsOfFrame(source.RowTop); @event.EventEndSeconds = this.GetEndTimeInSecondsOfFrame(source.RowBottom); @event.LowFrequencyHertz = this.GetHertzFromFreqBin(source.ColumnLeft); @event.HighFrequencyHertz = this.GetHertzFromFreqBin(source.ColumnRight); }
public static shape getSgape(string flag) { shape area; switch (flag) { case "1": area = new Square(); return(area); case "2": area = new Oblong(); return(area); case "3": area = new Triangle(); return(area); default: Console.WriteLine("没有所选的形状。"); return(null); } }
public static double[] DetectEPR(List <AcousticEvent> template, BaseSonogram sonogram, double[] odScores, double odThreshold) { int length = sonogram.FrameCount; double[] eprScores = new double[length]; Oblong ob1 = template[0].Oblong; // the first chirp in template Oblong obZ = template[template.Count - 1].Oblong; // the last chirp in template int templateLength = obZ.RowBottom; for (int frame = 0; frame < length - templateLength; frame++) { if (odScores[frame] < odThreshold) { continue; } // get best freq band and max score for the first rectangle. double maxScore = -double.MaxValue; int freqBinOffset = 0; for (int bin = -5; bin < 15; bin++) { Oblong ob = new Oblong(ob1.RowTop + frame, ob1.ColumnLeft + bin, ob1.RowBottom + frame, ob1.ColumnRight + bin); double score = GetLocationScore(sonogram, ob); if (score > maxScore) { maxScore = score; freqBinOffset = bin; } } //if location score exceeds threshold of 6 dB then get remaining scores. if (maxScore < 6.0) { continue; } foreach (AcousticEvent ae in template) { Oblong ob = new Oblong(ae.Oblong.RowTop + frame, ae.Oblong.ColumnLeft + freqBinOffset, ae.Oblong.RowBottom + frame, ae.Oblong.ColumnRight + freqBinOffset); double score = GetLocationScore(sonogram, ob); eprScores[frame] += score; } eprScores[frame] /= template.Count; } return(eprScores); }
public override ShapeBase Clone() { var shape = new Oblong { Location = Location, Size = Size, DrawMethod = DrawMethod, OutlineColor = OutlineColor, OutlineWidth = OutlineWidth, FillColor = FillColor, IsClosedFigure = IsClosedFigure, Vertices = (VertexCollection)Vertices.Clone() }; return(shape); }
/// <summary> /// reutrns the difference between the maximum dB value in a retangular location and the average of the boundary dB values. /// </summary> /// <param name="sonogram"></param> /// <param name="ob"></param> /// <returns></returns> public static double GetLocationScore(BaseSonogram sonogram, Oblong ob) { double max = -double.MaxValue; for (int r = ob.RowTop; r < ob.RowBottom; r++) { for (int c = ob.ColumnLeft; c < ob.ColumnRight; c++) { if (sonogram.Data[r, c] > max) { max = sonogram.Data[r, c]; } } } //calculate average boundary value int boundaryLength = 2 * (ob.RowBottom - ob.RowTop + 1 + ob.ColumnRight - ob.ColumnLeft + 1); double boundaryValue = 0.0; for (int r = ob.RowTop; r < ob.RowBottom; r++) { boundaryValue += sonogram.Data[r, ob.ColumnLeft] + sonogram.Data[r, ob.ColumnRight]; } for (int c = ob.ColumnLeft; c < ob.ColumnRight; c++) { boundaryValue += sonogram.Data[ob.RowTop, c] + sonogram.Data[ob.RowBottom, c]; } boundaryValue /= boundaryLength; double score = max - boundaryValue; if (score < 0.0) { score = 0.0; } return(score); }
/// <summary> /// Does the Analysis /// Returns a DataTable /// </summary> /// <param name="fiSegmentOfSourceFile"></param> /// <param name="configDict"></param> /// <param name="diOutputDir"></param> /// <param name="opFileName"></param> /// <param name="segmentStartOffset"></param> /// <param name="config"></param> /// <param name="segmentAudioFile"></param> public static Tuple <BaseSonogram, double[, ], double[], List <AcousticEvent>, TimeSpan> Analysis(FileInfo fiSegmentOfSourceFile, Dictionary <string, string> configDict, DirectoryInfo diOutputDir, string opFileName, TimeSpan segmentStartOffset) { //set default values int bandWidth = 500; //detect bars in bands of this width. int frameSize = 1024; double windowOverlap = 0.0; double intensityThreshold = double.Parse(configDict[key_INTENSITY_THRESHOLD]); //intensityThreshold = 0.01; AudioRecording recording = AudioRecording.GetAudioRecording(fiSegmentOfSourceFile, RESAMPLE_RATE, diOutputDir.FullName, opFileName); if (recording == null) { LoggedConsole.WriteLine("############ WARNING: Recording could not be obtained - likely file does not exist."); return(null); } int sr = recording.SampleRate; double binWidth = recording.SampleRate / (double)frameSize; double frameDuration = frameSize / (double)sr; double frameOffset = frameDuration * (1 - windowOverlap); //seconds between start of each frame double framesPerSecond = 1 / frameOffset; TimeSpan tsRecordingtDuration = recording.Duration; int colStep = (int)Math.Round(bandWidth / binWidth); //i: GET SONOGRAM AS MATRIX double epsilon = Math.Pow(0.5, recording.BitsPerSample - 1); var results2 = DSP_Frames.ExtractEnvelopeAndAmplSpectrogram(recording.WavReader.Samples, sr, epsilon, frameSize, windowOverlap); double[] avAbsolute = results2.Average; //average absolute value over the minute recording //double[] envelope = results2.Item2; double[,] spectrogram = results2.AmplitudeSpectrogram; //amplitude spectrogram. Note that column zero is the DC or average energy value and can be ignored. double windowPower = results2.WindowPower; //############################ NEXT LINE FOR DEBUGGING ONLY //spectrogram = GetTestSpectrogram(spectrogram.GetLength(0), spectrogram.GetLength(1), 0.01, 0.03); var output = DetectGratingEvents(spectrogram, colStep, intensityThreshold); var amplitudeArray = output.Item2; //for debug purposes only //convert List of Dictionary events to List of ACousticevents. //also set up the hits matrix. int rowCount = spectrogram.GetLength(0); int colCount = spectrogram.GetLength(1); var hitsMatrix = new double[rowCount, colCount]; var acousticEvents = new List <AcousticEvent>(); double minFrameCount = 8; //this assumes that the minimum grid is 2 * 4 = 8 long foreach (Dictionary <string, double> item in output.Item1) { int minRow = (int)item[key_START_FRAME]; int maxRow = (int)item[key_END_FRAME]; int frameCount = maxRow - minRow + 1; if (frameCount < minFrameCount) { continue; //only want events that are over a minimum length } int minCol = (int)item[key_MIN_FREQBIN]; int maxCol = (int)item[key_MAX_FREQBIN]; double periodicity = item[key_PERIODICITY]; double[] subarray = DataTools.Subarray(avAbsolute, minRow, maxRow - minRow + 1); double severity = 0.1; int[] bounds = DataTools.Peaks_CropToFirstAndLast(subarray, severity); minRow = minRow + bounds[0]; maxRow = minRow + bounds[1]; if (maxRow >= rowCount) { maxRow = rowCount - 1; } Oblong o = new Oblong(minRow, minCol, maxRow, maxCol); var ae = new AcousticEvent(segmentStartOffset, o, results2.NyquistFreq, frameSize, frameDuration, frameOffset, frameCount); ae.Name = string.Format("p={0:f0}", periodicity); ae.Score = item[key_SCORE]; ae.ScoreNormalised = item[key_SCORE] / 0.5; acousticEvents.Add(ae); //display event on the hits matrix for (int r = minRow; r < maxRow; r++) { for (int c = minCol; c < maxCol; c++) { hitsMatrix[r, c] = periodicity; } } } //foreach //set up the songogram to return. Use the existing amplitude sonogram int bitsPerSample = recording.WavReader.BitsPerSample; //NoiseReductionType nrt = SNR.Key2NoiseReductionType("NONE"); NoiseReductionType nrt = SNR.KeyToNoiseReductionType("STANDARD"); var sonogram = (BaseSonogram)SpectrogramStandard.GetSpectralSonogram(recording.BaseName, frameSize, windowOverlap, bitsPerSample, windowPower, sr, tsRecordingtDuration, nrt, spectrogram); sonogram.DecibelsNormalised = new double[sonogram.FrameCount]; for (int i = 0; i < sonogram.FrameCount; i++) //foreach frame or time step { sonogram.DecibelsNormalised[i] = 2 * Math.Log10(avAbsolute[i]); } sonogram.DecibelsNormalised = DataTools.normalise(sonogram.DecibelsNormalised); return(Tuple.Create(sonogram, hitsMatrix, amplitudeArray, acousticEvents, tsRecordingtDuration)); } //Analysis()
/// <summary> /// EXPANATION: A vertical track is a near click or rapidly frequency-modulated tone. A good example is the whip component of the whip-bird call. /// They would typically be only a few time-frames duration. /// THis method averages dB log values incorrectly but it is faster than doing many log conversions and is accurate enough for the purpose. /// </summary> public static (List <AcousticEvent> Events, double[] CombinedIntensity) GetVerticalTracks( SpectrogramStandard sonogram, int minHz, int maxHz, int nyquist, double decibelThreshold, int minBandwidthHertz, int maxBandwidthHertz, bool combineProximalSimilarEvents, TimeSpan segmentStartOffset) { var sonogramData = sonogram.Data; int frameCount = sonogramData.GetLength(0); int binCount = sonogramData.GetLength(1); var frameDuration = sonogram.FrameDuration; var frameStep = sonogram.FrameStep; var frameOverStep = frameDuration - frameStep; double binWidth = nyquist / (double)binCount; int minBin = (int)Math.Round(minHz / binWidth); int maxBin = (int)Math.Round(maxHz / binWidth); // list of accumulated acoustic events var events = new List <AcousticEvent>(); var temporalIntensityArray = new double[frameCount]; //Find all frame peaks and place in peaks matrix var peaks = new double[frameCount, binCount]; for (int row = 1; row < frameCount - 1; row++) { for (int col = minBin; col < maxBin; col++) { if (sonogramData[row, col] < decibelThreshold) { continue; } // if given matrix element is greater than in frame either side bool isPeak = (sonogramData[row, col] > sonogramData[row - 1, col]) && (sonogramData[row, col] > sonogramData[row + 1, col]); if (isPeak) { peaks[row, col] = sonogramData[row, col]; } } } // Look for track starts and initialise them as events. // Cannot include edge rows & columns because of edge effects. // Each row is a time frame which is a spectrum. Each column is a frequency bin var combinedIntensityArray = new double[frameCount]; for (int col = minBin; col < maxBin; col++) { for (int row = 2; row < frameCount - 2; row++) { // Visit each frame peak in order. Each may be start of possible track if (peaks[row, col] < decibelThreshold) { continue; } //have the beginning of a potential track var track = GetVerticalTrack(peaks, row, col, maxBin, decibelThreshold); // calculate first and last of the frame IDs in the original spectrogram int trackStartFrame = track.GetStartFrame(); int trackEndFrame = track.GetEndFrame(); // next two for debug purposes //int trackMinBin = track.GetBottomFreqBin(); //int trackTopBin = track.GetTopFreqBin(); //If track has lies within the correct bandWidth range, then create an event int trackBandWidth = track.GetTrackBandWidthHertz(binWidth); if (trackBandWidth >= minBandwidthHertz && trackBandWidth <= maxBandwidthHertz) { // get the oblong and init an event double trackDuration = ((trackEndFrame - trackStartFrame) * frameStep) + frameOverStep; var oblong = new Oblong(trackStartFrame, col, trackEndFrame, track.GetTopFreqBin()); var ae = new AcousticEvent(segmentStartOffset, oblong, nyquist, binCount, frameDuration, frameStep, frameCount) { // get the track as matrix TheTrack = track, }; events.Add(ae); // fill the intensity array var amplitudeArray = track.GetAmplitudeOverTimeFrames(); for (int i = 0; i < amplitudeArray.Length; i++) { combinedIntensityArray[row + i] += amplitudeArray[i]; } } } // rows/frames } // end cols/bins // combine proximal events that occupy similar frequency band if (combineProximalSimilarEvents) { TimeSpan startDifference = TimeSpan.FromSeconds(0.5); int hertzDifference = 500; events = AcousticEvent.CombineSimilarProximalEvents(events, startDifference, hertzDifference); } return(events, temporalIntensityArray); }
/// <summary> /// This method returns spectral peak tracks enclosed in acoustic events. /// It averages dB log values incorrectly but it is faster than doing many log conversions. /// </summary> public static (List <AcousticEvent> Events, double[] CombinedIntensity) GetSpectralPeakTracks( SpectrogramStandard sonogram, int minHz, int maxHz, int nyquist, double decibelThreshold, double minDuration, double maxDuration, bool combinePossibleHarmonics, TimeSpan segmentStartOffset) { var sonogramData = sonogram.Data; int frameCount = sonogramData.GetLength(0); int binCount = sonogramData.GetLength(1); double binWidth = nyquist / (double)binCount; int minBin = (int)Math.Round(minHz / binWidth); int maxBin = (int)Math.Round(maxHz / binWidth); int bandwidthBinCount = maxBin - minBin + 1; var frameDuration = sonogram.FrameDuration; var frameStep = sonogram.FrameStep; var frameOverStep = frameDuration - frameStep; // list of accumulated acoustic events var events = new List <AcousticEvent>(); //Find all spectral peaks and place in peaks matrix var peaks = new double[frameCount, bandwidthBinCount]; for (int row = 0; row < frameCount; row++) { for (int col = minBin - 1; col < maxBin - 1; col++) { if (sonogramData[row, col] < decibelThreshold) { continue; } // if given matrix element is greater than in freq bin either side bool isPeak = (sonogramData[row, col] > sonogramData[row, col - 1]) && (sonogramData[row, col] > sonogramData[row, col + 1]); if (isPeak) { peaks[row, col] = sonogramData[row, col]; } } } // Look for track starts and initialise them as events. // Cannot include edge rows & columns because of edge effects. // Each row is a time frame which is a spectrum. Each column is a frequency bin var combinedIntensityArray = new double[frameCount]; for (int row = 0; row < frameCount; row++) { for (int col = 3; col < bandwidthBinCount - 3; col++) { // Visit each spectral peak in order. Each may be start of possible track if (peaks[row, col] < decibelThreshold) { continue; } //have the beginning of a potential track var track = GetTrack(peaks, row, col, decibelThreshold); int trackStartFrame = track.GetStartFrame(); int trackEndFrame = track.GetEndFrame(); double trackDuration = ((trackEndFrame - trackStartFrame) * frameStep) + frameOverStep; // calculate max and min bin IDs in the original spectrogram int trackBottomBin = track.GetBottomFreqBin(); int trackTopBin = track.GetTopFreqBin(); //If track has length within duration bounds, then create an event if (trackDuration >= minDuration && trackDuration <= maxDuration) { var oblong = new Oblong(track.GetStartFrame(), trackBottomBin, track.GetEndFrame(), trackTopBin); var ae = new AcousticEvent(segmentStartOffset, oblong, nyquist, binCount, frameDuration, frameStep, frameCount) { // get the track as matrix TheTrack = track, }; events.Add(ae); // fill the intensity array var amplitudeTrack = track.GetAmplitudeOverTimeFrames(); for (int i = 0; i < amplitudeTrack.Length; i++) { combinedIntensityArray[row + i] += amplitudeTrack[i]; } } } } // Combine coincident events that are stacked one above other. // This will help in some cases to combine related events. var startDifference = TimeSpan.FromSeconds(0.2); var hertzGap = 200; if (combinePossibleHarmonics) { events = AcousticEvent.CombinePotentialStackedTracks(events, startDifference, hertzGap); } return(events, combinedIntensityArray); }