/// <summary> /// Decode and buffer audio data to be played /// </summary> /// <param name="data">The data passed to USpeakOnSerializeAudio()</param> public void ReceiveAudio(byte[] data) { if (settings == null) { Debug.LogWarning("Trying to receive remote audio data without calling InitializeSettings!\nIncoming packet will be ignored"); } if (MuteAll || Mute || (SpeakerMode == SpeakerMode.Local && !DebugPlayback)) { return; } if (SpeakerMode == SpeakerMode.Remote) { talkTimer = 1.0f; } int offset = 0; while (offset < data.Length) { int len = System.BitConverter.ToInt32(data, offset); byte[] frame = new byte[len + 6]; System.Array.Copy(data, offset, frame, 0, frame.Length); USpeakFrameContainer cont = default(USpeakFrameContainer); cont.LoadFrom(frame); playBuffer.Add(USpeakAudioClipCompressor.DecompressAudioClip(cont.encodedData, (int)cont.Samples, 1, false, settings.bandMode, RemoteGain)); offset += frame.Length; } }
// Token: 0x06004D0E RID: 19726 RVA: 0x0019D5EC File Offset: 0x0019B9EC private void DispatchPackets() { while (this._numQueuedPackets > 0) { if (Time.realtimeSinceStartup - this._dispatchStartTime < this._dispatchStartDelay) { break; } int index = this.FindNextValidPacketIndex(); byte[] source = this._queuedPackets[index]; int num = (int)USpeakFrameContainer.ParseFrameIndex(source, 4); if (this._firstPlaybackFrameIndex < 0) { this.DispatchPacketAtIndex(index); this._firstPlaybackFrameIndex = num; this._firstPacketDispatchTime = Time.realtimeSinceStartup; } else { int num2 = Mathf.FloorToInt((Time.realtimeSinceStartup - this._firstPacketDispatchTime) * 1000f); int num3 = num - this._firstPlaybackFrameIndex; if (num3 < 0) { num3 += 65536; } int a = num3 * this.spk.DurationMs; int num4 = Mathf.Min(a, 500); if (num2 < num4) { break; } this.DispatchPacketAtIndex(index); } } }
public ulong PlaySound(byte[] data) { List <AudioClip> clipList = new List <AudioClip>(); int offset = 0; while (offset < data.Length) { int len = System.BitConverter.ToInt32(data, offset); byte[] frame = new byte[len + 6]; System.Array.Copy(data, offset, frame, 0, frame.Length); USpeakFrameContainer cont = default(USpeakFrameContainer); cont.LoadFrom(frame); AudioClip clip = USpeakAudioClipCompressor.DecompressAudioClip(cont.encodedData, (int)cont.Samples, 1, false, BandMode.Narrow, 1); //GetComponent<AudioSource>().clip = clip; //GetComponent<AudioSource>().Play(); clipList.Add(clip); offset += frame.Length; } ulong delay = 0; ulong totalLength = 0; foreach (AudioClip clip in clipList) { USpeakAudioManager.PlayClipAtPoint(clip, transform.position, delay, false); delay += (uint)((44100.0f / (float)8000) * ((uint)clip.samples)); totalLength += (uint)clip.samples; } return((ulong)(totalLength * (float)8000 / 44100.0f) / 1000); }
private void ProcessPendingEncode(float[] pcm) { int num; byte[] buffer = USpeakAudioClipCompressor.CompressAudioData(pcm, 1, out num, this.lastBandMode, this.codecMgr.Codecs[this.lastCodec], LocalGain); USpeakFrameContainer item = new USpeakFrameContainer { Samples = (ushort)num, encodedData = buffer }; this.sendBuffer.Add(item); }
void ProcessPendingEncode(float[] pcm) { // encode data and add it to the send buffer int s; byte[] b = USpeakAudioClipCompressor.CompressAudioData(pcm, 1, out s, bandWidthMode, LocalGain); USpeakFrameContainer cont = default(USpeakFrameContainer); cont.Samples = (ushort)s; cont.encodedData = b; sendBuffer.Add(cont); }
private void ProcessPendingEncode(float[] pcm) { int num; byte[] numArray = USpeakAudioClipCompressor.CompressAudioData(pcm, 1, out num, this.lastBandMode, this.codecMgr.Codecs[this.lastCodec], USpeaker.LocalGain); USpeakFrameContainer uSpeakFrameContainer = new USpeakFrameContainer() { Samples = (ushort)num, encodedData = numArray }; this.sendBuffer.Add(uSpeakFrameContainer); }
public static short[] GetDecodePcm(byte[] data) { int offset = 0; var codec = new SpeexCodec(); List <short> buffer = new List <short>(); while (offset < data.Length) { USpeakFrameContainer cont = default(USpeakFrameContainer); var l = cont.LoadFrom(data, offset); short[] pcm = codec.Decode(cont.encodedData, BandMode.Narrow); offset += l; buffer.AddRange(pcm.ToArray()); } return(buffer.ToArray()); }
public void ReceiveAudio(byte[] data) { byte[] num = null; if (this.settings == null) { UnityEngine.Debug.LogWarning("Trying to receive remote audio data without calling InitializeSettings!\nIncoming packet will be ignored"); return; } if (USpeaker.MuteAll || this.Mute || this.SpeakerMode == SpeakerMode.Local && !this.DebugPlayback) { return; } if (this.SpeakerMode == SpeakerMode.Remote) { this.talkTimer = 1f; } for (int i = 0; i < (int)data.Length; i = i + (int)num.Length) { int num1 = BitConverter.ToInt32(data, i); num = USpeakPoolUtils.GetByte(num1 + 6); Array.Copy(data, i, num, 0, (int)num.Length); USpeakFrameContainer uSpeakFrameContainer = new USpeakFrameContainer(); uSpeakFrameContainer.LoadFrom(num); USpeakPoolUtils.Return(num); float[] singleArray = USpeakAudioClipCompressor.DecompressAudio(uSpeakFrameContainer.encodedData, (int)uSpeakFrameContainer.Samples, 1, false, this.settings.bandMode, this.codecMgr.Codecs[this.Codec], USpeaker.RemoteGain); float length = (float)((int)singleArray.Length) / (float)this.audioFrequency; USpeaker uSpeaker = this; uSpeaker.received = uSpeaker.received + (double)length; Array.Copy(singleArray, 0, this.receivedData, this.index, (int)singleArray.Length); USpeakPoolUtils.Return(singleArray); USpeaker length1 = this; length1.index = length1.index + (int)singleArray.Length; if (this.index >= base.audio.clip.samples) { this.index = 0; } base.audio.clip.SetData(this.receivedData, 0); if (!base.audio.isPlaying) { this.shouldPlay = true; if (this.playDelay <= 0f) { this.playDelay = length * 2f; } } } }
public void ReceiveAudio(byte[] data) { if (this.settings == null) { UnityEngine.Debug.LogWarning("Trying to receive remote audio data without calling InitializeSettings!\nIncoming packet will be ignored"); } else if ((!MuteAll && !this.Mute) && ((this.SpeakerMode != SpeakerMode.Local) || this.DebugPlayback)) { byte[] @byte; if (this.SpeakerMode == SpeakerMode.Remote) { this.talkTimer = 1f; } for (int i = 0; i < data.Length; i += @byte.Length) { @byte = USpeakPoolUtils.GetByte(BitConverter.ToInt32(data, i) + 6); Array.Copy(data, i, @byte, 0, @byte.Length); USpeakFrameContainer container = new USpeakFrameContainer(); container.LoadFrom(@byte); USpeakPoolUtils.Return(@byte); float[] sourceArray = USpeakAudioClipCompressor.DecompressAudio(container.encodedData, container.Samples, 1, false, this.settings.bandMode, this.codecMgr.Codecs[this.Codec], RemoteGain); float num3 = ((float)sourceArray.Length) / ((float)this.audioFrequency); this.received += num3; Array.Copy(sourceArray, 0, this.receivedData, this.index, sourceArray.Length); USpeakPoolUtils.Return(sourceArray); this.index += sourceArray.Length; if (this.index >= base.audio.clip.samples) { this.index = 0; } base.audio.clip.SetData(this.receivedData, 0); if (!base.audio.isPlaying) { this.shouldPlay = true; if (this.playDelay <= 0f) { this.playDelay = num3 * 2f; } } } } }
//Called when new audio data is available from the microphone void OnAudioAvailable(float[] pcmData) { //encode the data, add it to the send buffer //audio data is flushed from the send buffer on a user-configurable timer, to avoid flooding the network AudioClip temp = AudioClip.Create("temp", pcmData.Length, 1, audioFrequency, false, false); temp.SetData(pcmData, 0); int s; byte[] b = USpeakAudioClipCompressor.CompressAudioClip(temp, out s, BandwidthMode, LocalGain); USpeakFrameContainer cont = default(USpeakFrameContainer); cont.Samples = (ushort)s; cont.encodedData = b; sendBuffer.Add(cont); }
/// <summary> /// Decode and buffer audio data to be played /// </summary> /// <param name="data">The data passed to USpeakOnSerializeAudio()</param> public void ReceiveAudio(byte[] data) { if (settings == null) { Debug.LogWarning("Trying to receive remote audio data without calling InitializeSettings!\nIncoming packet will be ignored"); return; } if (MuteAll || Mute || (SpeakerMode == SpeakerMode.Local && !DebugPlayback)) { return; } if (SpeakerMode == SpeakerMode.Remote) { talkTimer = 1.0f; } int offset = 0; while (offset < data.Length) { int len = System.BitConverter.ToInt32(data, offset); byte[] frame = USpeakPoolUtils.GetByte(len + 6); System.Array.Copy(data, offset, frame, 0, frame.Length); USpeakFrameContainer cont = default(USpeakFrameContainer); cont.LoadFrom(frame); USpeakPoolUtils.Return(frame); float[] sample = USpeakAudioClipCompressor.DecompressAudio(cont.encodedData, (int)cont.Samples, 1, false, settings.bandMode, codecMgr.Codecs[Codec], RemoteGain); float sampleTime = ((float)sample.Length / (float)audioFrequency); received += sampleTime; System.Array.Copy(sample, 0, receivedData, index, sample.Length); USpeakPoolUtils.Return(sample); // advance the write position into the audio clip index += sample.Length; // if the write position extends beyond the clip length, wrap around if (index >= audio.clip.samples) { index = 0; } // write received data to audio clip audio.clip.SetData(receivedData, 0); // not already playing audio, schedule audio to be played if (!audio.isPlaying) { shouldPlay = true; //Debug.Log( "Started receiving at time: " + Time.time ); // no play delay set, advance play delay to allow more data to arrive (deal with network latency) if (playDelay <= 0) { playDelay = sampleTime * 5f; } } offset += frame.Length; } }