protected override void GetSampleAsync(MediaStreamType mediaStreamType) { //System.Diagnostics.Debug.WriteLine(">>>>>>>>>>>>>>GetSampleAsync."); DateTime now = DateTime.Now; TimeSpan ts2 = new TimeSpan(now.Ticks); TimeSpan ts1 = new TimeSpan(startTime.Ticks); TimeSpan ts = ts2.Subtract(ts1); System.Diagnostics.Debug.WriteLine("ts minutes " + ts.Minutes); if (ts.TotalMinutes >= 60) { CallStreamComplete(); } byte[] buffer = new byte[synth.BufferSize]; synth.GetNext(buffer); using (var stream = new MemoryStream(buffer)) { MediaStreamSample msSamp = new MediaStreamSample( _audioDesc, stream, 0, synth.BufferSize, _currentTimeStamp, _emptySampleDict); // Move our timestamp and position forward _currentTimeStamp += synth.BufferSize * 10000000 / AvgBytesPerSec; ReportGetSampleCompleted(msSamp); } }
private void OnAudioFilterRead(float[] data, int channels) { //This uses the Unity specific float method we added to get the buffer midiStreamSynthesizer.GetNext(sampleBuffer); for (int i = 0; i < data.Length; i++) { data[i] = sampleBuffer[i] * gain; } }
private void OnAudioFilterRead(float[] data, int channels) { midiStreamSynthesizer.GetNext(sampleBuffer); for (int i = 0; i < data.Length; i++) { data[i] = sampleBuffer[i] * gain; } }
private void SynthesizeMidiSamples() { while (availableSamplesSynthesizerSampleRate.Size < availableSamplesSynthesizerSampleRate.Capacity / 2) { midiStreamSynthesizer.GetNext(newSampleBuffer); for (int i = 0; i < newSampleBuffer.Length; i++) { availableSamplesSynthesizerSampleRate.PushBack(newSampleBuffer[i]); } } }
private void OnAudioFilterRead(float[] data, int channels) { try { midiStreamSynthesizer.GetNext(sampleBuffer); for (int i = 0; i < data.Length; i++) { data[i] = sampleBuffer[i] * gain; } _metronome.SetElapsedTime(midiSequencer.Time + _loopOffset + _BeatOffset); } catch (Exception e) { Debug.LogError(e); throw; } }
public void Mix(float[] data, int channels) { int sample = data.Length / channels; //单通道采样即可 midiStreamSynthesizer.GetNext(sampleBuffer, sample); for (int i = 0; i < sample; i++) { float b = sampleBuffer[i] * volume; for (int c = 0; c < channels; c++) { float a = data[i * channels + c]; data[i * channels + c] = a + b - a * b; } } float[] realout = new float[data.Length]; float[] imagout = new float[data.Length]; Ernzo.DSP.FFT.Compute((uint)data.Length, data, null, realout, imagout, false); }
private void OnAudioFilterRead(float[] data, int channels) { midiStreamSynthesizer.GetNext(sampleBuffer); float[] tempBuffer; tempBuffer = new float[data.Length]; bool bufferEmpty = true; for (int i = 0; i < data.Length; i++) { data[i] = sampleBuffer[i] * gain; tempBuffer[i] = sampleBuffer[i] * gain; if (sampleBuffer[i] != 0) { bufferEmpty = false; } } if (!bufferEmpty) { dataSequence.Add(tempBuffer); bufferCount += tempBuffer.Length; } }
// See http://unity3d.com/support/documentation/ScriptReference/MonoBehaviour.OnAudioFilterRead.html for reference code private void OnAudioFilterRead(float[] data, int channels) { midiStreamSynthesizer.GetNext(sampleBuffer); }