public void AudioAnalyzer_ProcessInput_IsSecondOutputFrameTimeCorrect() { var sut = new AudioAnalyzer(1200, 2, 48000, 800, 400, 2048, false); RegisterOutputHandler(sut); sut.ProcessInput(inputFrame); sut.ProcessInput(inputFrame); Assert.AreEqual(TimeSpan.FromTicks(10166666), outputFrames[1].Time); }
public void AudioAnalyzer_Buffer_WrapOverEnd() { var sut = new AudioAnalyzer(2400, 2, 48000, 700, 0, 2048, false); RegisterOutputHandler(sut); sut.ProcessInput(inputFrame); sut.ProcessInput(inputFrame); Assert.AreEqual(2, outputFrames.Count); }
public void AudioAnalyzer_Buffer_SequenceReadIsCorrect() { // Generate audio signal with steps after 600 frames, packaged in frame of length 800 frames. // Output frames should contain exactly steps var sut = new AudioAnalyzer(2400, 2, 48000, 600, 300, 2048, false); RegisterOutputHandler(sut); AudioFrame[] frames = new AudioFrame[3] { new AudioFrame(4 * 2 * 800), new AudioFrame(4 * 2 * 800), new AudioFrame(4 * 2 * 800) }; frames[0].Generate(2, 0, (frameIndex, channelIndex) => { return(frameIndex >= 600 ? 1.0f : 0.0f); }); frames[1].Generate(2, 800, (frameIndex, channelIndex) => { return(frameIndex >= 1200 ? 2.0f : 1.0f); }); frames[2].Generate(2, 1600, (frameIndex, channelIndex) => { return(frameIndex >= 1800 ? 3.0f : 2.0f); }); foreach (var frame in frames) { sut.ProcessInput(frame); } for (int outputFrameIndex = 0; outputFrameIndex < 4; outputFrameIndex++) { float expectedValue = (float)outputFrameIndex; Assert.AreEqual(expectedValue, outputFrames[outputFrameIndex].Peak[0], "Channel 0"); Assert.AreEqual(expectedValue, outputFrames[outputFrameIndex].Peak[1], "Channel 1"); } }
public void AudioAnalyzer_Sync_Performance() { var sut = new AudioAnalyzer(48000, 2, 48000, 800, 400, 2048, false); System.Diagnostics.Stopwatch sw = new System.Diagnostics.Stopwatch(); List <TimeSpan> outTimes = new List <TimeSpan>(); sut.Output += new Windows.Foundation.TypedEventHandler <AudioAnalyzer, VisualizationDataFrame>( (a, data) => { outTimes.Add(sw.Elapsed); } ); AudioFrame frame = new AudioFrame(24000 * 4 * 2); // 0.5 sec worth of audio data sw.Start(); sut.ProcessInput(frame); List <TimeSpan> durations = new List <TimeSpan>(); for (int i = 0; i < outTimes.Count(); i++) { durations.Add(outTimes[i].Subtract(i != 0 ? outTimes[i - 1] : TimeSpan.Zero)); } double avg = durations.Average((time) => { return(time.TotalMilliseconds); }); Logger.LogMessage($"Analyzer performance {avg}ms per run"); Assert.IsTrue(avg < 5); }
public void AudioAnalyzer_ProcessInput_IsFirstOutputFrameTimeCorrect() { var sut = new AudioAnalyzer(1600, 2, 48000, 800, 400, 2048, false); RegisterOutputHandler(sut); sut.ProcessInput(inputFrame); Assert.AreEqual(TimeSpan.FromSeconds(1), outputFrames.First().Time); }
public void AudioAnalyzer_ProcessInput_IsOutputFrameDurationCorrect() { var sut = new AudioAnalyzer(1600, 2, 48000, 800, 400, 2048, false); RegisterOutputHandler(sut); sut.ProcessInput(inputFrame); Assert.AreEqual(outputFrames.First().Duration, TimeSpan.FromTicks(166666)); }
public void AudioAnalyzer_ProcessInput_GeneratesOneOutputFrame() { var sut = new AudioAnalyzer(1200, 2, 48000, 800, 400, 2048, false); RegisterOutputHandler(sut); sut.ProcessInput(inputFrame); Assert.AreEqual(1, outputFrames.Count); }
public void AudioAnalyzer_ProcessInput_IsSpectrumNotNull() { var sut = new AudioAnalyzer(1600, 2, 48000, 800, 400, 2048, false); RegisterOutputHandler(sut); sut.ProcessInput(inputFrame); Assert.IsNotNull(outputFrames.First().Spectrum); }
public void AudioAnalyzer_ProcessInput_IsPeakCorrectForSawTooth() { var sut = new AudioAnalyzer(1600, 2, 48000, 800, 400, 2048, false); RegisterOutputHandler(sut); sut.AnalyzerTypes = AnalyzerType.Peak; sut.ProcessInput(inputFrame); Assert.AreEqual(0.1f, outputFrames.First().Peak[1]); }
public void AudioAnalyzer_ProcessInput_IsRmsCorrectForSawTooth() { var sut = new AudioAnalyzer(2400, 2, 48000, 800, 400, 2048, false); RegisterOutputHandler(sut); sut.AnalyzerTypes = AnalyzerType.RMS; sut.ProcessInput(inputFrame); Assert.AreEqual((float)0.1f / Math.Sqrt(3), outputFrames.First().RMS[1], 2e-5); }
public void AudioAnalyzer_ProcessInput_AnalyzerTypeCleared_IsPeakNull() { var sut = new AudioAnalyzer(1600, 2, 48000, 800, 400, 2048, false); RegisterOutputHandler(sut); sut.AnalyzerTypes = AnalyzerType.None; sut.ProcessInput(inputFrame); Assert.IsNull(outputFrames.First().Peak); }
public void AudioAnalyzer_ProcessInput_SetIsSuspendedStopsAsyncProcessing() { var sut = new AudioAnalyzer(1200, 2, 48000, 800, 400, 2048, true); RegisterOutputHandler(sut); sut.IsSuspended = true; sut.ProcessInput(inputFrame); Assert.AreEqual(0, outputFrames.Count); }
public void AudioAnalyzer_ProcessInput_IsTimeCorrectAfterFlushWithSeed() { var sut = new AudioAnalyzer(1600, 2, 48000, 800, 400, 2048, false); RegisterOutputHandler(sut); sut.Flush(480000); sut.ProcessInput(inputFrame); Assert.AreEqual(outputFrames.First().Time, TimeSpan.FromSeconds(10)); }
public void AudioAnalyzer_ProcessInput_ResetIsSuspendedContinuesSyncProcessing() { var sut = new AudioAnalyzer(1200, 2, 48000, 800, 400, 2048, false); RegisterOutputHandler(sut); sut.IsSuspended = true; sut.ProcessInput(inputFrame); sut.IsSuspended = false; Assert.AreEqual(1, outputFrames.Count); }
public void AudioAnalyzer_ProcessInput_IsTimeCorrectAfterFlushWithFrameTimeNotSet() { var sut = new AudioAnalyzer(1200, 2, 48000, 800, 400, 2048, false); RegisterOutputHandler(sut); inputFrame.RelativeTime = null; sut.Flush(); sut.ProcessInput(inputFrame); Assert.AreEqual(outputFrames.First().Time, TimeSpan.Zero); }
public async Task AudioAnalyzer_ProcessInputAsync_GeneratesOneOutputFrame() { var sut = new AudioAnalyzer(1600, 2, 48000, 800, 400, 2048, true); RegisterOutputHandler(sut); sut.ProcessInput(inputFrame); await Task.Delay(50); Assert.AreEqual(1, outputFrames.Count); }
public async Task AudioAnalyzer_ProcessInput_ResetIsSuspendedContinuesAsyncProcessing() { var sut = new AudioAnalyzer(1600, 2, 48000, 800, 400, 2048, true); RegisterOutputHandler(sut); sut.IsSuspended = true; sut.ProcessInput(inputFrame); sut.IsSuspended = false; await Task.Delay(50); Assert.AreEqual(1, outputFrames.Count); }
public void AudioAnalyzer_Buffer_OverlapAreaIsClear() { var sut = new AudioAnalyzer(3200, 2, 48000, 800, 800, 2048, false); RegisterOutputHandler(sut); AudioFrame silence = new AudioFrame(4 * 2 * 800); silence.Generate(2, 0, (frameIndex, channelIndex) => { return(0.0f); }); sut.ProcessInput(silence); Assert.AreEqual(0.0f, outputFrames.First().Spectrum[0].Sum()); Assert.AreEqual(0.0f, outputFrames.First().Spectrum[1].Sum()); }
public void AudioAnalyzer_ProcessInput_AreShortFramesHandledCorrectly() { // Simulate operation in AudioGraph - 10ms frames, output should be generated only on 6th input frame var sut = new AudioAnalyzer(2400, 2, 48000, 800, 400, 2048, false); RegisterOutputHandler(sut); var frame = new AudioFrame(480 * 2 * sizeof(float)); int[] expectedOutFrames = { 0, 1, 1, 2, 3, 3 }; for (int i = 0; i < 6; i++) { sut.ProcessInput(frame); Assert.AreEqual(expectedOutFrames[i], outputFrames.Count(), $"Pass {i}"); } }
void SetupSpectrum(int T) { var sut = new AudioAnalyzer(4096, 2, 48000, 1024, 0, 2048, false); RegisterOutputHandler(sut); sut.AnalyzerTypes = AnalyzerType.Spectrum; var sineFrame = new AudioFrame(2048 * sizeof(float)); // Generate signal with 0 channel sine wave of period T sineFrame.Generate(2, 0, (frameIndex, channelIndex) => { return(channelIndex == 0 ? (float)Math.Sin(Math.PI * frameIndex / (double)T) : 0.0f); }); sut.ProcessInput(sineFrame); }