コード例 #1
0
        // Token: 0x06004CA7 RID: 19623 RVA: 0x0019B4BC File Offset: 0x001998BC
        public static float[] DecompressAudio(byte[] data, int samples, int channels, bool threeD, BandMode mode, ICodec Codec, float gain)
        {
            int frequency = 4000;

            if (mode == BandMode.Narrow)
            {
                frequency = 8000;
            }
            else if (mode == BandMode.Wide)
            {
                frequency = 16000;
            }
            else if (mode == BandMode.UltraWide)
            {
                frequency = 32000;
            }
            else if (mode == BandMode.Opus48k)
            {
                frequency = 48000;
            }
            short[] array  = Codec.Decode(data, mode);
            float[] result = USpeakAudioClipConverter.ShortsToAudioData(array, channels, frequency, threeD, gain);
            USpeakPoolUtils.Return(array);
            return(result);
        }
コード例 #2
0
        /// <summary>
        /// Decompress the given encoded audio data
        /// </summary>
        /// <param name="data">The encoded audio</param>
        /// <param name="samples">The number of encoded samples</param>
        /// <param name="channels">The number of channels</param>
        /// <param name="threeD">Whether the audio is 3D</param>
        /// <param name="mode">The bandwidth mode used to encode the data</param>
        /// <param name="Codec">The codec to decode the data with</param>
        /// <param name="gain">The gain to apply to the decoded audio</param>
        /// <returns>32bit raw audio data</returns>
        public static float[] DecompressAudio(byte[] data, int samples, int channels, bool threeD, BandMode mode, ICodec Codec, float gain)
        {
            int frequency = 4000;

            if (mode == BandMode.Narrow)
            {
                frequency = 8000;
            }
            else if (mode == BandMode.Wide)
            {
                frequency = 16000;
            }

            byte[] d;
            //d = unzip( data );
            d = data;

            short[] pcm = Codec.Decode(d, mode);

            tmp.Clear();
            tmp.AddRange(pcm);

            USpeakPoolUtils.Return(pcm);

            return(USpeakAudioClipConverter.ShortsToAudioData(tmp.ToArray(), channels, frequency, threeD, gain));
        }
コード例 #3
0
        // Token: 0x06003B05 RID: 15109 RVA: 0x00129978 File Offset: 0x00127D78
        public unsafe byte[] Encode(byte[] inputPcmSamples, int sampleLength, out int encodedLength)
        {
            if (this.disposed)
            {
                throw new ObjectDisposedException("OpusEncoder");
            }
            int frame_size = this.FrameCount(inputPcmSamples);

            byte[] @byte = USpeakPoolUtils.GetByte(this.MaxDataBytes);
            int    num;

            fixed(byte *value = (@byte != null && @byte.Length != 0)? @byte : null)
            {
                IntPtr data = new IntPtr((void *)value);

                num = API.opus_encode(this._encoder, inputPcmSamples, frame_size, data, sampleLength);
            }

            encodedLength = num;
            if (num < 0)
            {
                USpeakPoolUtils.Return(@byte);
                string str    = "Encoding failed - ";
                Errors errors = (Errors)num;
                throw new Exception(str + errors.ToString());
            }
            byte[] byte2 = USpeakPoolUtils.GetByte(encodedLength);
            Buffer.BlockCopy(@byte, 0, byte2, 0, encodedLength);
            USpeakPoolUtils.Return(@byte);
            return(byte2);
        }
コード例 #4
0
 // Token: 0x06004CA6 RID: 19622 RVA: 0x0019B47C File Offset: 0x0019987C
 public static byte[] CompressAudioData(float[] samples, int channels, BandMode mode, ICodec Codec, float gain = 1f)
 {
     short[] array  = USpeakAudioClipConverter.AudioDataToShorts(samples, channels, gain);
     byte[]  array2 = Codec.Encode(array, mode);
     USpeakPoolUtils.Return(array);
     byte[] array3 = new byte[array2.Length];
     Array.Copy(array2, array3, array2.Length);
     USpeakPoolUtils.Return(array2);
     return(array3);
 }
コード例 #5
0
 public static byte[] CompressAudioData(float[] samples, int channels, out int sample_count, BandMode mode, ICodec Codec, float gain = 1f)
 {
     USpeakAudioClipCompressor.data.Clear();
     sample_count = 0;
     short[] shorts   = USpeakAudioClipConverter.AudioDataToShorts(samples, channels, gain);
     byte[]  numArray = Codec.Encode(shorts, mode);
     USpeakPoolUtils.Return(shorts);
     USpeakAudioClipCompressor.data.AddRange(numArray);
     USpeakPoolUtils.Return(numArray);
     return(USpeakAudioClipCompressor.data.ToArray());
 }
コード例 #6
0
 private static void CopyStream(Stream input, Stream output)
 {
     byte[] num = USpeakPoolUtils.GetByte(32768);
     while (true)
     {
         int num1 = input.Read(num, 0, (int)num.Length);
         if (num1 <= 0)
         {
             break;
         }
         output.Write(num, 0, num1);
     }
     USpeakPoolUtils.Return(num);
 }
コード例 #7
0
 private static void CopyStream(Stream input, Stream output)
 {
     byte[] @byte = USpeakPoolUtils.GetByte(0x8000);
     while (true)
     {
         int count = input.Read(@byte, 0, @byte.Length);
         if (count <= 0)
         {
             break;
         }
         output.Write(@byte, 0, count);
     }
     USpeakPoolUtils.Return(@byte);
 }
コード例 #8
0
ファイル: USpeaker.cs プロジェクト: sknchan/LegacyRust
 public void ReceiveAudio(byte[] data)
 {
     byte[] num = null;
     if (this.settings == null)
     {
         UnityEngine.Debug.LogWarning("Trying to receive remote audio data without calling InitializeSettings!\nIncoming packet will be ignored");
         return;
     }
     if (USpeaker.MuteAll || this.Mute || this.SpeakerMode == SpeakerMode.Local && !this.DebugPlayback)
     {
         return;
     }
     if (this.SpeakerMode == SpeakerMode.Remote)
     {
         this.talkTimer = 1f;
     }
     for (int i = 0; i < (int)data.Length; i = i + (int)num.Length)
     {
         int num1 = BitConverter.ToInt32(data, i);
         num = USpeakPoolUtils.GetByte(num1 + 6);
         Array.Copy(data, i, num, 0, (int)num.Length);
         USpeakFrameContainer uSpeakFrameContainer = new USpeakFrameContainer();
         uSpeakFrameContainer.LoadFrom(num);
         USpeakPoolUtils.Return(num);
         float[]  singleArray = USpeakAudioClipCompressor.DecompressAudio(uSpeakFrameContainer.encodedData, (int)uSpeakFrameContainer.Samples, 1, false, this.settings.bandMode, this.codecMgr.Codecs[this.Codec], USpeaker.RemoteGain);
         float    length      = (float)((int)singleArray.Length) / (float)this.audioFrequency;
         USpeaker uSpeaker    = this;
         uSpeaker.received = uSpeaker.received + (double)length;
         Array.Copy(singleArray, 0, this.receivedData, this.index, (int)singleArray.Length);
         USpeakPoolUtils.Return(singleArray);
         USpeaker length1 = this;
         length1.index = length1.index + (int)singleArray.Length;
         if (this.index >= base.audio.clip.samples)
         {
             this.index = 0;
         }
         base.audio.clip.SetData(this.receivedData, 0);
         if (!base.audio.isPlaying)
         {
             this.shouldPlay = true;
             if (this.playDelay <= 0f)
             {
                 this.playDelay = length * 2f;
             }
         }
     }
 }
コード例 #9
0
        public static float[] DecompressAudio(byte[] data, int samples, int channels, bool threeD, BandMode mode, ICodec Codec, float gain)
        {
            int num = 4000;

            if (mode == BandMode.Narrow)
            {
                num = 8000;
            }
            else if (mode == BandMode.Wide)
            {
                num = 16000;
            }
            short[] numArray = Codec.Decode(data, mode);
            USpeakAudioClipCompressor.tmp.Clear();
            USpeakAudioClipCompressor.tmp.AddRange(numArray);
            USpeakPoolUtils.Return(numArray);
            return(USpeakAudioClipConverter.ShortsToAudioData(USpeakAudioClipCompressor.tmp.ToArray(), channels, num, threeD, gain));
        }
コード例 #10
0
        public static float[] DecompressAudio(byte[] data, int samples, int channels, bool threeD, BandMode mode, ICodec Codec, float gain)
        {
            int frequency = 0xfa0;

            if (mode == BandMode.Narrow)
            {
                frequency = 0x1f40;
            }
            else if (mode == BandMode.Wide)
            {
                frequency = 0x3e80;
            }
            byte[]  buffer     = data;
            short[] collection = Codec.Decode(buffer, mode);
            tmp.Clear();
            tmp.AddRange(collection);
            USpeakPoolUtils.Return(collection);
            return(USpeakAudioClipConverter.ShortsToAudioData(tmp.ToArray(), channels, frequency, threeD, gain));
        }
コード例 #11
0
        /// <summary>
        /// Compress the given audio data
        /// </summary>
        /// <param name="samples">The raw 32-bit audio data</param>
        /// <param name="channels">The number of channels (nearly always 1)</param>
        /// <param name="sample_count">The number of samples that were encoded</param>
        /// <param name="mode">The chosen bandwidth mode (recording frequency)</param>
        /// <param name="Codec">The codec to encode the audio with</param>
        /// <param name="gain">The gain to apply to the audio</param>
        /// <returns>An encoded byte array</returns>
        public static byte[] CompressAudioData(float[] samples, int channels, out int sample_count, BandMode mode, ICodec Codec, float gain = 1.0f)
        {
            data.Clear();
            sample_count = 0;

            short[] b = USpeakAudioClipConverter.AudioDataToShorts(samples, channels, gain);

            byte[] mlaw = Codec.Encode(b, mode);

            USpeakPoolUtils.Return(b);

            data.AddRange(mlaw);

            USpeakPoolUtils.Return(mlaw);

            //byte[] zipped = zip( data.ToArray() );

            return(data.ToArray());
        }
コード例 #12
0
ファイル: USpeaker.cs プロジェクト: Virobeast2/RCLIENT
 public void ReceiveAudio(byte[] data)
 {
     if (this.settings == null)
     {
         UnityEngine.Debug.LogWarning("Trying to receive remote audio data without calling InitializeSettings!\nIncoming packet will be ignored");
     }
     else if ((!MuteAll && !this.Mute) && ((this.SpeakerMode != SpeakerMode.Local) || this.DebugPlayback))
     {
         byte[] @byte;
         if (this.SpeakerMode == SpeakerMode.Remote)
         {
             this.talkTimer = 1f;
         }
         for (int i = 0; i < data.Length; i += @byte.Length)
         {
             @byte = USpeakPoolUtils.GetByte(BitConverter.ToInt32(data, i) + 6);
             Array.Copy(data, i, @byte, 0, @byte.Length);
             USpeakFrameContainer container = new USpeakFrameContainer();
             container.LoadFrom(@byte);
             USpeakPoolUtils.Return(@byte);
             float[] sourceArray = USpeakAudioClipCompressor.DecompressAudio(container.encodedData, container.Samples, 1, false, this.settings.bandMode, this.codecMgr.Codecs[this.Codec], RemoteGain);
             float   num3        = ((float)sourceArray.Length) / ((float)this.audioFrequency);
             this.received += num3;
             Array.Copy(sourceArray, 0, this.receivedData, this.index, sourceArray.Length);
             USpeakPoolUtils.Return(sourceArray);
             this.index += sourceArray.Length;
             if (this.index >= base.audio.clip.samples)
             {
                 this.index = 0;
             }
             base.audio.clip.SetData(this.receivedData, 0);
             if (!base.audio.isPlaying)
             {
                 this.shouldPlay = true;
                 if (this.playDelay <= 0f)
                 {
                     this.playDelay = num3 * 2f;
                 }
             }
         }
     }
 }
コード例 #13
0
ファイル: SpeexCodec.cs プロジェクト: 602147629/GDGJ_Script
        private short[] SpeexDecode(byte[] input, BandMode mode)
        {
            NSpeex.SpeexDecoder speexDec = null;
            int shortLen = 320;

            switch (mode)
            {
            case BandMode.Narrow:
                speexDec = m_narrow_dec;
                shortLen = 320;
                break;

            case BandMode.Wide:
                speexDec = m_wide_dec;
                shortLen = 640;
                break;

            case BandMode.UltraWide:
                speexDec = m_ultrawide_dec;
                shortLen = 1280;
                break;
            }

            byte[] len_bytes = USpeakPoolUtils.GetByte(4);
            System.Array.Copy(input, len_bytes, 4);

            int dataLength = BitConverter.ToInt32(len_bytes, 0);

            USpeakPoolUtils.Return(len_bytes);

            byte[] actual_bytes = USpeakPoolUtils.GetByte(input.Length - 4);
            Buffer.BlockCopy(input, 4, actual_bytes, 0, input.Length - 4);

            short[] decoded = USpeakPoolUtils.GetShort(shortLen);

            speexDec.Decode(actual_bytes, 0, dataLength, decoded, 0, false);

            USpeakPoolUtils.Return(actual_bytes);

            return(decoded);
        }
コード例 #14
0
ファイル: OpusCodec.cs プロジェクト: Smoothstep/VRChat
    // Token: 0x06004C3B RID: 19515 RVA: 0x00197B6C File Offset: 0x00195F6C
    public short[] Decode(byte[] data, BandMode mode)
    {
        if (!this.isInitialized)
        {
            this.CreateEncoders();
        }
        if (mode != BandMode.Opus48k)
        {
            Debug.LogError(string.Concat(new string[]
            {
                "OpusCodec: Decode: bandwidth mode must be ",
                BandMode.Opus48k.ToString(),
                "! (set to ",
                mode.ToString(),
                ")"
            }));
        }
        int num = 0;

        byte[] array = this._decoder.Decode(data, (data == null) ? 0 : data.Length, out num);
        if (num != this._bytesPerSegment)
        {
            int num2 = (data == null) ? 0 : data.Length;
            Debug.LogError(string.Concat(new object[]
            {
                "OpusCodec: Decode failed! Output PCM data is ",
                num,
                " bbytes, expected ",
                this._bytesPerSegment,
                " (compressed packet size was ",
                num2,
                ")"
            }));
            USpeakPoolUtils.Return(array);
        }
        short[] @short = USpeakPoolUtils.GetShort(this._bytesPerSegment / 2);
        Buffer.BlockCopy(array, 0, @short, 0, this._bytesPerSegment);
        USpeakPoolUtils.Return(array);
        return(@short);
    }
コード例 #15
0
        // Token: 0x06004C40 RID: 19520 RVA: 0x00197DDC File Offset: 0x001961DC
        private short[] SpeexDecode(byte[] input, global::BandMode mode)
        {
            SpeexDecoder speexDecoder = null;
            int          length       = 320;

            if (mode != global::BandMode.Narrow)
            {
                if (mode != global::BandMode.Wide)
                {
                    if (mode == global::BandMode.UltraWide)
                    {
                        speexDecoder = this.m_ultrawide_dec;
                        length       = 1280;
                    }
                }
                else
                {
                    speexDecoder = this.m_wide_dec;
                    length       = 640;
                }
            }
            else
            {
                speexDecoder = this.m_narrow_dec;
                length       = 320;
            }
            byte[] @byte = USpeakPoolUtils.GetByte(4);
            Array.Copy(input, @byte, 4);
            int inCount = BitConverter.ToInt32(@byte, 0);

            USpeakPoolUtils.Return(@byte);
            byte[] byte2 = USpeakPoolUtils.GetByte(input.Length - 4);
            Buffer.BlockCopy(input, 4, byte2, 0, input.Length - 4);
            short[] @short = USpeakPoolUtils.GetShort(length);
            speexDecoder.Decode(byte2, 0, inCount, @short, 0, false);
            USpeakPoolUtils.Return(byte2);
            return(@short);
        }
コード例 #16
0
ファイル: OpusDecoder.cs プロジェクト: Smoothstep/VRChat
        // Token: 0x06003AF6 RID: 15094 RVA: 0x001296A0 File Offset: 0x00127AA0
        public unsafe byte[] Decode(byte[] inputOpusData, int dataLength, out int decodedLength)
        {
            if (this.disposed)
            {
                throw new ObjectDisposedException("OpusDecoder");
            }
            byte[] @byte      = USpeakPoolUtils.GetByte(this.MaxDataBytes);
            int    frame_size = this.FrameCount(this.MaxDataBytes);
            int    num;

            fixed(byte *value = (@byte != null && @byte.Length != 0)? @byte : null)
            {
                IntPtr pcm = new IntPtr((void *)value);

                if (inputOpusData != null)
                {
                    num = API.opus_decode(this._decoder, inputOpusData, dataLength, pcm, frame_size, 0);
                }
                else
                {
                    num = API.opus_decode(this._decoder, null, 0, pcm, this.FrameCount(this._expectedBytesPerSegment), (!this.ForwardErrorCorrection) ? 0 : 1);
                }
            }

            decodedLength = num * 2;
            if (num < 0)
            {
                USpeakPoolUtils.Return(@byte);
                string str    = "Decoding failed - ";
                Errors errors = (Errors)num;
                throw new Exception(str + errors.ToString());
            }
            byte[] byte2 = USpeakPoolUtils.GetByte(decodedLength);
            Buffer.BlockCopy(@byte, 0, byte2, 0, decodedLength);
            USpeakPoolUtils.Return(@byte);
            return(byte2);
        }
コード例 #17
0
ファイル: OpusCodec.cs プロジェクト: Smoothstep/VRChat
    // Token: 0x06004C3A RID: 19514 RVA: 0x00197A78 File Offset: 0x00195E78
    public byte[] Encode(short[] data, BandMode mode)
    {
        if (!this.isInitialized)
        {
            this.CreateEncoders();
        }
        if (mode != BandMode.Opus48k)
        {
            Debug.LogError(string.Concat(new string[]
            {
                "OpusCodec: Encode: bandwidth mode must be ",
                BandMode.Opus48k.ToString(),
                "! (set to ",
                mode.ToString(),
                ")"
            }));
        }
        if (data.Length != this._segmentFrames)
        {
            Debug.LogError(string.Concat(new object[]
            {
                "OpusCodec: Encode failed! Input PCM data is ",
                data.Length,
                " frames, expected ",
                this._segmentFrames
            }));
            return(new byte[0]);
        }
        byte[] @byte = USpeakPoolUtils.GetByte(data.Length * 2);
        Buffer.BlockCopy(data, 0, @byte, 0, data.Length * 2);
        int num = 0;

        byte[] result = this._encoder.Encode(@byte, this._bytesPerSegment, out num);
        USpeakPoolUtils.Return(@byte);
        return(result);
    }
コード例 #18
0
    void Update()
    {
        // only update device list if this USpeaker is going to be recording
        if (SpeakerMode == SpeakerMode.Local)
        {
            // update microphone device list
            if (Time.time >= lastDeviceUpdate)
            {
                lastDeviceUpdate = Time.time + 2f;
                micDeviceList    = Microphone.devices;
            }
        }

        talkTimer -= Time.deltaTime;

        audio.volume = SpeakerVolume;

        if (last3DMode != _3DMode)
        {
            last3DMode = _3DMode;

            StopPlaying();
            audio.clip = AudioClip.Create("vc", audioFrequency * 10, 1, audioFrequency, (_3DMode == ThreeDMode.Full3D), false);
            audio.loop = true;
        }

        //speaker pan mode? Calculate it
        if (_3DMode == ThreeDMode.SpeakerPan)
        {
            Transform listener = Camera.main.transform;
            Vector3   side     = Vector3.Cross(listener.up, listener.forward);
            side.Normalize();

            float x = Vector3.Dot(transform.position - listener.position, side);
            float z = Vector3.Dot(transform.position - listener.position, listener.forward);

            float angle = Mathf.Atan2(x, z);

            float pan = Mathf.Sin(angle);

            audio.pan = pan;
        }

        // currently playing audio
        if (audio.isPlaying)
        {
            // last played time exceeded audio length - add play time
            if (lastTime > audio.time)
            {
                played += audio.clip.length;
            }

            // update last played time
            lastTime = audio.time;

            // we've played past the audio we received - stop playing and wait for more data
            if (played + audio.time >= received)
            {
                StopPlaying();
                shouldPlay = false;
            }
        }
        else
        {
            // should play audio? Play audio after countdown
            if (shouldPlay)
            {
                playDelay -= Time.deltaTime;

                if (playDelay <= 0)
                {
                    audio.Play();
                    // Debug.Log( "started playing at time: " + Time.time );
                }
            }
        }

        if (SpeakerMode == SpeakerMode.Remote)
        {
            return;
        }

        if (audioHandler == null)
        {
            return;
        }

        if (micDeviceList.Length == 0)
        {
            return;
        }
        else
        {
            if (string.IsNullOrEmpty(InputDeviceName))
            {
                InputDeviceName = currentDeviceName;
            }

            if (string.IsNullOrEmpty(currentDeviceName))
            {
                if (waitingToStartRec)
                {
                    micFoundDelay--;
                    if (micFoundDelay <= 0)
                    {
                        micFoundDelay     = 0;
                        waitingToStartRec = false;

                        print("New device found: " + currentDeviceName);
                        InputDeviceID     = 0;
                        InputDeviceName   = micDeviceList[0];
                        currentDeviceName = micDeviceList[0];

                        recording = Microphone.Start(currentDeviceName, true, 5, audioFrequency);

                        lastReadPos = 0;
                        sendBuffer.Clear();
                        recordedChunkCount = 0;

                        UpdateSettings();
                    }
                }
                else
                {
                    waitingToStartRec = true;
                    micFoundDelay     = 5;
                }
            }
            else
            {
                // switch to new device
                if (InputDeviceName != currentDeviceName)
                {
                    Microphone.End(currentDeviceName);
                    print("Using input device: " + InputDeviceName);
                    currentDeviceName = InputDeviceName;

                    recording = Microphone.Start(currentDeviceName, true, 5, audioFrequency);

                    lastReadPos = 0;
                    sendBuffer.Clear();
                    recordedChunkCount = 0;
                }

                // the device list changed
                if (micDeviceList[Mathf.Min(InputDeviceID, micDeviceList.Length - 1)] != currentDeviceName)
                {
                    // attempt to find the existing device
                    bool found = false;
                    for (int i = 0; i < Microphone.devices.Length; i++)
                    {
                        if (micDeviceList[i] == currentDeviceName)
                        {
                            InputDeviceID = i;
                            found         = true;
                        }
                    }

                    // existing device must have been unplugged, switch to the default audio device
                    if (!found)
                    {
                        InputDeviceID     = 0;
                        InputDeviceName   = micDeviceList[0];
                        currentDeviceName = micDeviceList[0];

                        print("Device unplugged, switching to: " + currentDeviceName);

                        recording = Microphone.Start(currentDeviceName, true, 5, audioFrequency);

                        lastReadPos = 0;
                        sendBuffer.Clear();
                        recordedChunkCount = 0;
                    }
                }
            }
        }

        if (lastBandMode != BandwidthMode || lastCodec != Codec)
        {
            UpdateSettings();

            lastBandMode = BandwidthMode;
            lastCodec    = Codec;
        }

        if (recording == null)
        {
            return;
        }

        int readPos = Microphone.GetPosition(currentDeviceName);

        int realReadPos = readPos + recording.samples * recordedChunkCount;

        if (realReadPos < lastReadPos)
        {
            recordedChunkCount++;
        }

        readPos += recording.samples * recordedChunkCount;

        if (readPos <= overlap)
        {
            return;
        }

        bool talkController_shouldSend = (talkController == null || talkController.ShouldSend());

        //read in the latest chunk(s) of audio
        try
        {
            int sz      = readPos - lastReadPos;
            int minSize = codecMgr.Codecs[Codec].GetSampleSize(audioFrequency);

            if (minSize == 0)
            {
                minSize = 100;
            }

            int currentIDX = lastReadPos;
            int numClips   = Mathf.FloorToInt(sz / minSize);

            for (int i = 0; i < numClips; i++)
            {
                float[] d = USpeakPoolUtils.GetFloat(minSize);

                recording.GetData(d, currentIDX % recording.samples);
                if (talkController_shouldSend)
                {
                    talkTimer = 1f;
                    OnAudioAvailable(d);
                }

                USpeakPoolUtils.Return(d);

                currentIDX += minSize;
            }

            lastReadPos = currentIDX;
        }
        catch (System.Exception) { }

        ProcessPendingEncodeBuffer();

        bool allowSend = true;

        if (SendingMode == SendBehavior.RecordThenSend && talkController != null)
        {
            allowSend = !talkController_shouldSend;
        }

        sendTimer += Time.deltaTime;
        if (sendTimer >= sendt && allowSend)
        {
            sendTimer = 0.0f;

            //flush the send buffer
            tempSendBytes.Clear();
            foreach (USpeakFrameContainer frame in sendBuffer)
            {
                tempSendBytes.AddRange(frame.ToByteArray());
            }
            sendBuffer.Clear();

            if (tempSendBytes.Count > 0)
            {
                // Debug.Log( "Sending at time: " + Time.time );
                audioHandler.USpeakOnSerializeAudio(tempSendBytes.ToArray());
            }
        }
    }
コード例 #19
0
ファイル: USpeaker.cs プロジェクト: sknchan/LegacyRust
    private void Update()
    {
        bool     value;
        int      num;
        USpeaker uSpeaker = this;

        uSpeaker.talkTimer = uSpeaker.talkTimer - Time.deltaTime;
        base.audio.volume  = this.SpeakerVolume;
        if (this.last3DMode != this._3DMode)
        {
            this.last3DMode = this._3DMode;
            this.StopPlaying();
            base.audio.clip = AudioClip.Create("vc", this.audioFrequency * 10, 1, this.audioFrequency, this._3DMode == ThreeDMode.Full3D, false);
            base.audio.loop = true;
        }
        if (this._3DMode == ThreeDMode.SpeakerPan)
        {
            Transform transforms = Camera.main.transform;
            Vector3   vector3    = Vector3.Cross(transforms.up, transforms.forward);
            vector3.Normalize();
            float single  = Vector3.Dot(base.transform.position - transforms.position, vector3);
            float single1 = Vector3.Dot(base.transform.position - transforms.position, transforms.forward);
            float single2 = Mathf.Sin(Mathf.Atan2(single, single1));
            base.audio.pan = single2;
        }
        if (base.audio.isPlaying)
        {
            if (this.lastTime > base.audio.time)
            {
                USpeaker uSpeaker1 = this;
                uSpeaker1.played = uSpeaker1.played + (double)base.audio.clip.length;
            }
            this.lastTime = base.audio.time;
            if (this.played + (double)base.audio.time >= this.received)
            {
                this.StopPlaying();
                this.shouldPlay = false;
            }
        }
        else if (this.shouldPlay)
        {
            USpeaker uSpeaker2 = this;
            uSpeaker2.playDelay = uSpeaker2.playDelay - Time.deltaTime;
            if (this.playDelay <= 0f)
            {
                base.audio.Play();
            }
        }
        if (this.SpeakerMode == SpeakerMode.Remote)
        {
            return;
        }
        if (this.audioHandler == null)
        {
            return;
        }
        if (this.devicesCached == null)
        {
            this.devicesCached = Microphone.devices;
            base.InvokeRepeating("RefreshDevices", 4.2f, 4.2f);
        }
        string[] strArrays = this.devicesCached;
        if ((int)strArrays.Length == 0)
        {
            return;
        }
        if (strArrays[Mathf.Min(USpeaker.InputDeviceID, (int)strArrays.Length - 1)] != this.currentDeviceName)
        {
            this.currentDeviceName = strArrays[Mathf.Min(USpeaker.InputDeviceID, (int)strArrays.Length - 1)];
            MonoBehaviour.print(string.Concat("Using input device: ", this.currentDeviceName));
            this.recording   = Microphone.Start(this.currentDeviceName, false, 21, this.audioFrequency);
            this.lastReadPos = 0;
        }
        if (this.lastBandMode != this.BandwidthMode || this.lastCodec != this.Codec)
        {
            this.UpdateSettings();
            this.lastBandMode = this.BandwidthMode;
            this.lastCodec    = this.Codec;
        }
        int position = Microphone.GetPosition(null);

        if (position >= this.audioFrequency * 20)
        {
            position         = 0;
            this.lastReadPos = 0;
            UnityEngine.Object.DestroyImmediate(this.recording);
            Microphone.End(null);
            this.recording = Microphone.Start(this.currentDeviceName, false, 21, this.audioFrequency);
        }
        if (position <= this.overlap)
        {
            return;
        }
        bool?nullable = null;

        try
        {
            int num1       = position - this.lastReadPos;
            int sampleSize = this.codecMgr.Codecs[this.Codec].GetSampleSize(this.audioFrequency);
            if (sampleSize == 0)
            {
                sampleSize = 100;
            }
            if (sampleSize != 0)
            {
                int num2 = this.lastReadPos;
                int num3 = Mathf.FloorToInt((float)(num1 / sampleSize));
                for (int i = 0; i < num3; i++)
                {
                    float[] singleArray = USpeakPoolUtils.GetFloat(sampleSize);
                    this.recording.GetData(singleArray, num2);
                    if (!nullable.HasValue)
                    {
                        bool?nullable1 = new bool?((this.talkController == null ? false : this.talkController.ShouldSend()));
                        nullable = nullable1;
                        value    = nullable1.Value;
                    }
                    else
                    {
                        value = nullable.Value;
                    }
                    if (value)
                    {
                        this.talkTimer = 1f;
                        this.OnAudioAvailable(singleArray);
                    }
                    USpeakPoolUtils.Return(singleArray);
                    num2 = num2 + sampleSize;
                }
                this.lastReadPos = num2;
            }
            else
            {
                if (num1 > sampleSize)
                {
                    float[] singleArray1 = new float[num1 - 1];
                    this.recording.GetData(singleArray1, this.lastReadPos);
                    if (this.talkController == null || this.talkController.ShouldSend())
                    {
                        this.talkTimer = 1f;
                        this.OnAudioAvailable(singleArray1);
                    }
                }
                this.lastReadPos = position;
            }
        }
        catch (Exception exception)
        {
        }
        this.ProcessPendingEncodeBuffer();
        bool flag = true;

        if (this.SendingMode == SendBehavior.RecordThenSend && this.talkController != null)
        {
            if (!nullable.HasValue)
            {
                bool?nullable2 = new bool?(this.talkController.ShouldSend());
                nullable = nullable2;
                num      = (int)nullable2.Value;
            }
            else
            {
                num = (int)nullable.Value;
            }
            flag = num == 0;
        }
        USpeaker uSpeaker3 = this;

        uSpeaker3.sendTimer = uSpeaker3.sendTimer + Time.deltaTime;
        if (this.sendTimer >= this.sendt && flag)
        {
            this.sendTimer = 0f;
            this.tempSendBytes.Clear();
            foreach (USpeakFrameContainer uSpeakFrameContainer in this.sendBuffer)
            {
                this.tempSendBytes.AddRange(uSpeakFrameContainer.ToByteArray());
            }
            this.sendBuffer.Clear();
            if (this.tempSendBytes.Count > 0)
            {
                this.audioHandler.USpeakOnSerializeAudio(this.tempSendBytes.ToArray());
            }
        }
    }
コード例 #20
0
    /// <summary>
    /// Decode and buffer audio data to be played
    /// </summary>
    /// <param name="data">The data passed to USpeakOnSerializeAudio()</param>
    public void ReceiveAudio(byte[] data)
    {
        if (settings == null)
        {
            Debug.LogWarning("Trying to receive remote audio data without calling InitializeSettings!\nIncoming packet will be ignored");
            return;
        }

        if (MuteAll || Mute || (SpeakerMode == SpeakerMode.Local && !DebugPlayback))
        {
            return;
        }

        if (SpeakerMode == SpeakerMode.Remote)
        {
            talkTimer = 1.0f;
        }

        int offset = 0;

        while (offset < data.Length)
        {
            int    len   = System.BitConverter.ToInt32(data, offset);
            byte[] frame = USpeakPoolUtils.GetByte(len + 6);
            System.Array.Copy(data, offset, frame, 0, frame.Length);

            USpeakFrameContainer cont = default(USpeakFrameContainer);
            cont.LoadFrom(frame);

            USpeakPoolUtils.Return(frame);

            float[] sample = USpeakAudioClipCompressor.DecompressAudio(cont.encodedData, (int)cont.Samples, 1, false, settings.bandMode, codecMgr.Codecs[Codec], RemoteGain);

            float sampleTime = ((float)sample.Length / (float)audioFrequency);
            received += sampleTime;

            System.Array.Copy(sample, 0, receivedData, index, sample.Length);

            USpeakPoolUtils.Return(sample);

            // advance the write position into the audio clip
            index += sample.Length;

            // if the write position extends beyond the clip length, wrap around
            if (index >= audio.clip.samples)
            {
                index = 0;
            }

            // write received data to audio clip
            audio.clip.SetData(receivedData, 0);

            // not already playing audio, schedule audio to be played
            if (!audio.isPlaying)
            {
                shouldPlay = true;

                //Debug.Log( "Started receiving at time: " + Time.time );

                // no play delay set, advance play delay to allow more data to arrive (deal with network latency)
                if (playDelay <= 0)
                {
                    playDelay = sampleTime * 5f;
                }
            }

            offset += frame.Length;
        }
    }