/// <summary> /// Respond to capture event. /// This should return as fast as possible. /// </summary> public void HandleCapturedSamples(object sender, AudioDataEventArgs e) { if (e.Samples == null || e.Samples.Length <= 0) { return; } int sample_sec = 44100; short[] result = null; if (_channels == 1) { result = ResampleBuffer(e, sample_sec); } else { result = ResampleStereo(e, sample_sec); } TimedSample ts = new TimedSample(result, e.CaptureTime); try { if (ProcessorQueue != null) { foreach (var item in ProcessorQueue) { item.ProcessSample(ts); } } } catch { } lock (_sampleBuffer) { _sampleBuffer.Add(ts); } }
/// <summary> /// Join the first two samples together, making a large first sample. /// Resulting sample retains first sample's timecode. /// </summary> private void MergeFirstSample() { lock (_sampleBuffer) { if (_sampleBuffer.Count < 2) { return; // no samples to merge. } short[] übersample = _sampleBuffer[0].Samples.Concat(_sampleBuffer[1].Samples).ToArray(); TimedSample ts = new TimedSample(übersample, _sampleBuffer[0].Seconds); _sampleBuffer.RemoveRange(0, 2); _sampleBuffer.Insert(0, ts); } }
/// <summary> /// Add unused samples back into the sample buffer. /// </summary> /// <remarks>Time stamps need to be properly re-integrated!</remarks> private void PrependUnusedSamples(MediaFrame Frame) { // checks: if (_sampleBuffer == null) { return; } _sampleBuffer.RemoveAll((a) => a == null); // clean out bad transfers if (_sampleBuffer.Count < 1) { return; } if ((ulong)_sampleBuffer[0].Samples.LongLength != Frame.AudioSize) { throw new Exception("Frames unloaded out-of-sync. Frames must be loaded then unloaded in order and one-at-a-time!"); // wrong frame! } // Build new truncated sample: ulong new_sample_count = Frame.AudioSize - Frame.AudioSamplesConsumed; if (new_sample_count < 1) { _sampleBuffer.RemoveAt(0); return; } short[] cut = new short[new_sample_count]; // pun intended ;-) Array.Copy(_sampleBuffer[0].Samples, (long)Frame.AudioSamplesConsumed, cut, 0, (long)new_sample_count); double new_time_stamp = Frame.AudioSampleTime + (Frame.AudioSamplesConsumed / 44100.0); TimedSample sample = new TimedSample(cut, new_time_stamp); lock (_sampleBuffer) { // Over-write the old sample with the new, shorter version _sampleBuffer[0] = sample; // clean out bad transfers: _sampleBuffer.Sort((a, b) => a.Seconds.CompareTo(b.Seconds)); } // merge function to join first two samples if the first is small. if (_sampleBuffer.Count >= 2) { if (_sampleBuffer[0].Samples.Length < 4608) { MergeFirstSample(); // 4 frames } } }
/// <summary> /// Load the buffer into a MediaFrame for the encoder. /// IMPORTANT: You must call UnloadFrame after this method is called. /// For effciency, unload as soon as possible. /// </summary> public void LoadToFrame(ref MediaFrame Frame) { try { TimedSample ts = null; lock (_sampleBuffer) { if (_sampleBuffer.Count < 1) { Frame.AudioSize = 0UL; Frame.AudioSamplesConsumed = 0; return; } _sampleBuffer.RemoveAll(a => a == null); _sampleBuffer.Sort((a, b) => a.Seconds.CompareTo(b.Seconds)); if (_sampleBuffer[0].Samples.Length < FrameSize) { MergeFirstSample(); // Make sure frames are large enough! } if (_sampleBuffer.Count > 0) { ts = _sampleBuffer[0]; } else { Frame.AudioSize = 0; Frame.AudioBuffer = IntPtr.Zero; return; } } _samples = ts.Samples; Frame.AudioSampleTime = ts.Seconds; Frame.AudioSize = (ulong)_samples.LongLength; Frame.AudioSamplesConsumed = 0; // Outgoing sample rate is always 44100, to support iPhone Frame.AudioSampleRate = 44100; // this is used to correct timing on the encoder. _pinSamples = GCHandle.Alloc(_samples, GCHandleType.Pinned); Frame.AudioBuffer = _pinSamples.AddrOfPinnedObject(); } catch (Exception ex) { UnloadFrame(ref Frame); Console.WriteLine("Loading audio frame failed: " + ex.Message); } }
/// <summary> /// Called by plug-in host (encoder) /// </summary> public void ProcessSample(TimedSample Sample) { float[] samps = ResampleForTone(Sample.Samples); foreach (float s in samps) { osc[osc_pos] *= decay; // fade old signal, to reduce response time osc[osc_pos] += s * gain; osc_pos = (osc_pos + 1) % osc.Length; if (osc_pos == 0) { // Prepare BalanceOscillator(); FilterOscillator(); // Tone detection DetectTone(); } } }
/// <summary> /// Force a timed frame into the encoder's buffers. /// May cause unexpected operation. Use with caution! /// </summary> public void ForceInsertFrame(TimedSample AudioFrame) { if (AudioBuffers != null) AudioBuffers.HandleCapturedSamples(this, new AudioDataEventArgs() { Samples = AudioFrame.Samples, CaptureTime = AudioFrame.Seconds }); else throw new Exception("Can't send audio frame to uninitialised buffer. Please include an audio device in your config."); }
/// <summary> /// Respond to capture event. /// This should return as fast as possible. /// </summary> public void HandleCapturedSamples(object sender, AudioDataEventArgs e) { if (e.Samples == null || e.Samples.Length <= 0) return; int sample_sec = 44100; short[] result = null; if (_channels == 1) result = ResampleBuffer(e, sample_sec); else result = ResampleStereo(e, sample_sec); TimedSample ts = new TimedSample(result, e.CaptureTime); try { if (ProcessorQueue != null) { foreach (var item in ProcessorQueue) { item.ProcessSample(ts); } } } catch { } lock (_sampleBuffer) { _sampleBuffer.Add(ts); } }
/// <summary> /// Add unused samples back into the sample buffer. /// </summary> /// <remarks>Time stamps need to be properly re-integrated!</remarks> private void PrependUnusedSamples(MediaFrame Frame) { // checks: if (_sampleBuffer == null) return; _sampleBuffer.RemoveAll((a) => a == null); // clean out bad transfers if (_sampleBuffer.Count < 1) return; if ((ulong)_sampleBuffer[0].Samples.LongLength != Frame.AudioSize) throw new Exception("Frames unloaded out-of-sync. Frames must be loaded then unloaded in order and one-at-a-time!"); // wrong frame! // Build new truncated sample: ulong new_sample_count = Frame.AudioSize - Frame.AudioSamplesConsumed; if (new_sample_count < 1) { _sampleBuffer.RemoveAt(0); return; } short[] cut = new short[new_sample_count]; // pun intended ;-) Array.Copy(_sampleBuffer[0].Samples, (long)Frame.AudioSamplesConsumed, cut, 0, (long)new_sample_count); double new_time_stamp = Frame.AudioSampleTime + (Frame.AudioSamplesConsumed / 44100.0); TimedSample sample = new TimedSample(cut, new_time_stamp); lock (_sampleBuffer) { // Over-write the old sample with the new, shorter version _sampleBuffer[0] = sample; // clean out bad transfers: _sampleBuffer.Sort((a, b) => a.Seconds.CompareTo(b.Seconds)); } // merge function to join first two samples if the first is small. if (_sampleBuffer.Count >= 2) { if (_sampleBuffer[0].Samples.Length < 4608) MergeFirstSample(); // 4 frames } }
/// <summary> /// Join the first two samples together, making a large first sample. /// Resulting sample retains first sample's timecode. /// </summary> private void MergeFirstSample() { lock (_sampleBuffer) { if (_sampleBuffer.Count < 2) return; // no samples to merge. short[] übersample = _sampleBuffer[0].Samples.Concat(_sampleBuffer[1].Samples).ToArray(); TimedSample ts = new TimedSample(übersample, _sampleBuffer[0].Seconds); _sampleBuffer.RemoveRange(0, 2); _sampleBuffer.Insert(0, ts); } }