コード例 #1
0
        private void CmdSendAudioSegment(byte[] payload)
        {
            var reader  = new UniStreamReader(payload);
            var index   = reader.ReadInt();
            var segment = reader.ReadFloatArray();

            // If the streamer is getting filled, request for a packet skip
            if (streamer)
            {
                streamer.Stream(index, segment);
            }
            else if (channels > 0)
            {
                var segLen = k_MicFrequency / 1000 * k_MicSegLenMS;
                var segCap = 1000 / k_MicSegLenMS;

                // Create an AudioBuffer using the Mic values
                AudioBuffer buffer = new AudioBuffer(
                    k_MicFrequency,
                    channels,
                    segLen,
                    segCap
                    );

                AudioSource source = GetComponent <AudioSource>();

                // Use the buffer to create a streamer
                streamer = AudioStreamerComponent.New(gameObject, buffer, source);

                streamer.Stream(index, segment);
            }

            RpcSendAudioSegment(payload);
        }
コード例 #2
0
ファイル: Packet.cs プロジェクト: ymjoshi/UniVoice
        // ================================================
        // (DE)SERIALIZATION
        // ================================================
        public static Packet Deserialize(byte[] bytes)
        {
            UniStreamReader reader = new UniStreamReader(bytes);

            var packet = new Packet();

            try {
                packet.Sender     = reader.ReadShort();
                packet.Recipients = reader.ReadShortArray();
                packet.Tag        = reader.ReadString();
                packet.Payload    = reader.ReadBytes(bytes.Length - reader.Index);
            }
            catch (Exception e) {
                UnityEngine.Debug.LogError("Packet deserialization error: " + e.Message);
                packet = null;
            }

            return(packet);
        }
コード例 #3
0
ファイル: Voice.cs プロジェクト: ymjoshi/UniVoice
        void Run()
        {
            AudioBuffer   buffer;
            AudioStreamer streamer;

            // MIC SETUP
            Mic = Mic.Instance;
            Mic.StartRecording(k_MicFrequency, k_MicSegLenMS);

            var channels = Mic.Clip.channels;
            var segLen   = k_MicFrequency / 1000 * k_MicSegLenMS;
            var segCap   = 1000 / k_MicSegLenMS;

            // Create an AudioBuffer using the Mic values
            buffer = new AudioBuffer(
                k_MicFrequency,
                channels,
                segLen,
                segCap
                );

            // Use the buffer to create a streamer
            streamer = AudioStreamer.New(buffer, Source);

            // NETWORKING
            // On receiving a message from a peer, see if the tag is "audio", which denotes
            // that the data is an audio segment
            m_Node.OnGetPacket += delegate(ConnectionId cId, Packet packet, bool reliable) {
                var reader = new UniStreamReader(packet.Payload);
                switch (packet.Tag)
                {
                case "audio":
                    var index   = reader.ReadInt();
                    var segment = reader.ReadFloatArray();

                    if (OnGetVoiceSegment != null)
                    {
                        OnGetVoiceSegment(index, segment);
                    }

                    // If the streamer is getting filled, request for a packet skip
                    streamer.Stream(index, segment);
                    break;
                }
            };

            gate = new VolumeGate(120, 1f, 5);
            VolumeGateVisualizer viz = VolumeGateVisualizer.New(gate);

            // When the microphone is ready with a segment
            Mic.OnSampleReady += (index, segment) => {
                // Return checks
                if (m_Node.NodeState == Node.State.Idle || m_Node.NodeState == Node.State.Uninitialized)
                {
                    return;
                }
                if (!Speaking)
                {
                    return;
                }

                // NOISE REMOVAL
                // Very primitive way to reduce the audio input noise
                // by averaging audio samples with a radius (radius value suggested: 2)
                int radius = 2;
                for (int i = radius; i < segment.Length - radius; i++)
                {
                    float temp = 0;
                    for (int j = i - radius; j < i + radius; j++)
                    {
                        temp += segment[j];
                    }
                    segment[i] = temp / (2 * radius + 1);
                }

                if (!gate.Evaluate(segment))
                {
                    return;
                }

                // If Speaking is on, create a payload byte array for AirPeer
                // and send
                if (OnSendVoiceSegment != null)
                {
                    OnSendVoiceSegment(index, segment);
                }
                m_Node.Send(Packet.From(m_Node).WithTag("audio").WithPayload(
                                new UniStreamWriter()
                                .WriteInt(index)
                                .WriteFloatArray(segment)
                                .Bytes
                                ));
            };
        }