protected void SampleAnalyzerLoop() { while (sampleAnalyzerMustStop.WaitOne(0) == false) { if (ProTONEConfig.IsSignalAnalisysActive()) { AudioSample smp = null; if (samples.TryDequeue(out smp) && smp != null) { ExtractSamples(smp); } Thread.Yield(); } else { Thread.Sleep(1); } } }
// ISampleGrabberCB Members public int BufferCB(double sampleTime, IntPtr pBuffer, int bufferLen) { if (ProTONEConfig.IsSignalAnalisysActive()) { try { if (sampleGrabberConfigured.WaitOne(0) && _actualAudioFormat != null) { const int fraction = 128; byte[] allBytes = new byte[bufferLen]; Marshal.Copy(pBuffer, allBytes, 0, bufferLen); int pos = 0; double chunkTimeLen = (double)fraction / (double)_actualAudioFormat.nSamplesPerSec; int chunkSize = fraction * _actualAudioFormat.nBlockAlign; int i = 0; do { int size = Math.Min(chunkSize, bufferLen - pos); AudioSample smp = new AudioSample(); smp.RawSamples = new byte[size]; smp.SampleTime = sampleTime + i * chunkTimeLen; Array.Copy(allBytes, pos, smp.RawSamples, 0, size); samples.Enqueue(smp); pos += size; i++; }while (pos < bufferLen); } } catch { } } return(0); }
private void ExtractSamples(AudioSample smp) { if (smp == null || _actualAudioFormat == null || mediaPosition == null) return; double mediaTime = 0; mediaPosition.get_CurrentPosition(out mediaTime); double delay = smp.SampleTime - mediaTime; // Sync the sample. // Use extended sleep since Thread.Sleep is too low-resolution. //ThreadScheduler.SleepEx(delay); if (delay > 0 && delay < 1) Thread.Sleep(TimeSpan.FromSeconds(delay)); FilterState ms = GetFilterState(); if (smp.RawSamples.Length <= 0 || ms != FilterState.Running || _actualAudioFormat == null) return; int bytesPerChannel = _actualAudioFormat.wBitsPerSample / 8; int totalChannels = _actualAudioFormat.nChannels; int totalChannelsInArray = Math.Min(2, totalChannels); int i = 0; while (i < smp.RawSamples.Length) { double[] channels = new double[totalChannelsInArray]; Array.Clear(channels, 0, totalChannelsInArray); int j = 0; while (j < totalChannelsInArray) { int k = 0; while (k < bytesPerChannel) { if (bytesPerChannel <= 2) channels[j] += (short)(smp.RawSamples[i] << (8 * k)); else channels[j] += (int)(smp.RawSamples[i] << (8 * k)); i++; k++; } j++; } if (channels.Length >= 2) _sampleData.Enqueue(new AudioSampleData((double)channels[0], (double)channels[1])); else _sampleData.Enqueue(new AudioSampleData((double)channels[0], 0)); _gatheredSamples++; if (_gatheredSamples % _waveformWindowSize == 0) { if (ProTONEConfig.SignalAnalisysFunctionActive(SignalAnalisysFunction.VUMeter) || ProTONEConfig.SignalAnalisysFunctionActive(SignalAnalisysFunction.Waveform)) { AnalyzeWaveform(_sampleData.Skip(_sampleData.Count - _waveformWindowSize).Take(_waveformWindowSize).ToArray(), smp.SampleTime); } } Thread.Yield(); } AudioSampleData lostSample = null; while (_sampleData.Count > _fftWindowSize) _sampleData.TryDequeue(out lostSample); if (ProTONEConfig.SignalAnalisysFunctionActive(SignalAnalisysFunction.Spectrogram)) { AnalyzeFFT(_sampleData.ToArray()); } Thread.Yield(); }
// ISampleGrabberCB Members public int BufferCB(double sampleTime, IntPtr pBuffer, int bufferLen) { if (ProTONEConfig.IsSignalAnalisysActive()) { try { if (sampleGrabberConfigured.WaitOne(0) && _actualAudioFormat != null) { const int fraction = 128; byte[] allBytes = new byte[bufferLen]; Marshal.Copy(pBuffer, allBytes, 0, bufferLen); int pos = 0; double chunkTimeLen = (double)fraction / (double)_actualAudioFormat.nSamplesPerSec; int chunkSize = fraction * _actualAudioFormat.nBlockAlign; int i = 0; do { int size = Math.Min(chunkSize, bufferLen - pos); AudioSample smp = new AudioSample(); smp.RawSamples = new byte[size]; smp.SampleTime = sampleTime + i * chunkTimeLen; Array.Copy(allBytes, pos, smp.RawSamples, 0, size); samples.Enqueue(smp); pos += size; i++; } while (pos < bufferLen); } } catch { } } return 0; }
private void ExtractSamples(AudioSample smp) { if (smp == null || _actualAudioFormat == null || mediaPosition == null) { return; } double mediaTime = 0; mediaPosition.get_CurrentPosition(out mediaTime); double delay = smp.SampleTime - mediaTime; double absDelay = Math.Abs(delay); // Discard samples too far in time from current media time if (absDelay > 1) { return; } //CalculateAverageDelay(delay * 1000); if (delay > 0) { Thread.Sleep(TimeSpan.FromSeconds(delay)); } FilterState ms = GetFilterState(); if (smp.RawSamples.Length <= 0 || ms != FilterState.Running || _actualAudioFormat == null) { return; } int bytesPerChannel = _actualAudioFormat.wBitsPerSample / 8; int totalChannels = _actualAudioFormat.nChannels; int totalChannelsInArray = Math.Min(2, totalChannels); int i = 0; while (i < smp.RawSamples.Length) { double[] channels = new double[totalChannelsInArray]; Array.Clear(channels, 0, totalChannelsInArray); int j = 0; while (j < totalChannelsInArray) { int k = 0; while (k < bytesPerChannel) { if (bytesPerChannel <= 2) { channels[j] += (short)(smp.RawSamples[i] << (8 * k)); } else { channels[j] += (int)(smp.RawSamples[i] << (8 * k)); } i++; k++; } j++; } if (channels.Length >= 2) { _sampleData.Enqueue(new AudioSampleData((double)channels[0], (double)channels[1])); } else { _sampleData.Enqueue(new AudioSampleData((double)channels[0], 0)); } _gatheredSamples++; if (_gatheredSamples % _waveformWindowSize == 0) { if (ProTONEConfig.SignalAnalisysFunctionActive(SignalAnalisysFunction.VUMeter) || ProTONEConfig.SignalAnalisysFunctionActive(SignalAnalisysFunction.Waveform) || ProTONEConfig.SignalAnalisysFunctionActive(SignalAnalisysFunction.WCFInterface)) { AnalyzeWaveform(_sampleData.Skip(_sampleData.Count - _waveformWindowSize).Take(_waveformWindowSize).ToArray(), smp.SampleTime); } } Thread.Yield(); } AudioSampleData lostSample = null; while (_sampleData.Count > _fftWindowSize) { _sampleData.TryDequeue(out lostSample); } if (ProTONEConfig.SignalAnalisysFunctionActive(SignalAnalisysFunction.Spectrogram) || ProTONEConfig.SignalAnalisysFunctionActive(SignalAnalisysFunction.WCFInterface)) { AnalyzeFFT(_sampleData.ToArray()); } Thread.Yield(); }