示例#1
0
        public override void StartRecording()
        {
            if (!Application.HasUserAuthorization(UserAuthorization.Microphone))
            {
                Debug.LogWarning("StartRecording(): Webplayer microphone access denied");
                return;
            }

            device = DefaultMicrophone;

            prevReadPosition = 0;

            this.recordingFrequency = AudioUtils.GetFrequency(Mode);

            int min, max;

            Microphone.GetDeviceCaps(device, out min, out max);

            if (max == 0)
            {
                max = 48000;
            }
            //if( max == 0 ) max = 16000;

            int frequency = Mathf.Clamp(this.recordingFrequency, min, max);

            resampleBuffer = new BigArray <float>(ChunkSize, 0);
            recordedAudio  = Microphone.Start(device, true, 5, frequency);
        }
 protected void bufferReady(BigArray <float> newData, int frequency)
 {
     if (OnAudioBufferReady != null)
     {
         OnAudioBufferReady(newData, frequency);
     }
 }
示例#3
0
        public static void Resample(BigArray <float> samples, int oldFrequency, int newFrequency)
        {
            if (oldFrequency == newFrequency)
            {
                return;
            }

            temp.Clear();
            float ratio     = (float)oldFrequency / (float)newFrequency;
            int   outSample = 0;

            while (true)
            {
                int inBufferIndex = (int)(outSample++ *ratio);
                if (inBufferIndex < samples.Length)
                {
                    temp.Add(samples[inBufferIndex]);
                }
                else
                {
                    break;
                }
            }

            samples.Resize(temp.Count);
            samples.CopyFrom(temp.Items, 0, 0, temp.Count * 4);
        }
        public SpeexCodec(bool VBR)
        {
            encoders = new Dictionary <int, codecWrapper>()
            {
                { 8000, new codecWrapper(NSpeex.BandMode.Narrow, VBR) },
                { 16000, new codecWrapper(NSpeex.BandMode.Wide, VBR) },
                { 32000, new codecWrapper(NSpeex.BandMode.UltraWide, VBR) },
            };

            chunkBuffer       = new ChunkBuffer();
            tempOutputArray   = new BigArray <float>(1024, 0);
            tempPacketWrapper = new VoicePacketWrapper(0, 16, new byte[0]);
        }
        public override void StartRecording()
        {
            float[] data = new float[testClip.samples * testClip.channels];
            testClip.GetData(data, 0);

            BigArray <float> d = new BigArray <float>(data.Length, 0);

            d.Resize(data.Length);
            d.CopyFrom(data, 0, 0, data.Length * 4);

            //AudioUtils.Resample( d, testClip.frequency, AudioUtils.GetFrequency( ResampleFrequency ) );

            //bufferReady( d, AudioUtils.GetFrequency( ResampleFrequency ) );
            StartCoroutine(yieldChunks(d, testClip.frequency, 1f));
        }
        public void BufferAudio(BigArray <float> audioData)
        {
            if (GetComponent <AudioSource>() == null)
            {
                return;
            }

            float[] temp = TempArray <float> .Obtain(audioData.Length);

            audioData.CopyTo(0, temp, 0, audioData.Length * 4);

            if (Equalize)
            {
                float maxAmp = AudioUtils.GetMaxAmplitude(temp);
                targetGain = TargetEqualizeVolume / maxAmp;

                if (targetGain > MaxEqualization)
                {
                    targetGain = MaxEqualization;
                }

                if (targetGain < currentGain)
                {
                    currentGain = targetGain;
                }

                AudioUtils.ApplyGain(temp, currentGain);
            }

            playClip.SetData(temp, writeHead);
            TempArray <float> .Release(temp);

            writeHead    += audioData.Length;
            totalWritten += audioData.Length;
            writeHead    %= playClip.samples;

            if (!GetComponent <AudioSource>().isPlaying)
            {
                delayForFrames--;
                if (delayForFrames <= 0)
                {
                    GetComponent <AudioSource>().Play();
                }
            }
        }
        /// <summary>
        /// Decode and play back received audio data
        /// </summary>
        protected virtual void ReceiveAudioData(VoicePacketWrapper encodedFrame)
        {
            if (!IsLocal || DebugAudio)
            {
                //Debug.Log("encoded index" + encodedFrame.Index + " next expected" + nextExpectedIndex);
                // discard old samples
                if (encodedFrame.Index < nextExpectedIndex)
                {
                    return;
                }


                // voice controller is muted - don't bother decoding or buffering audio data
                if (Mute)
                {
                    nextExpectedIndex = encodedFrame.Index + 1;
                    return;
                }

                speaker.SetSampleRate(encodedFrame.Frequency * 1000);

                // some frames were lost, generate filler data for them
                // unless the speaker isn't playing any sound, in which case filler data will only delay the stream further
                // OR unless nextExpectedIndex is zero, implying that we haven't received any frames yet
                if (nextExpectedIndex != 0 && encodedFrame.Index != nextExpectedIndex && speaker.PlayingSound)
                {
                    int numMissingFrames = (int)(encodedFrame.Index - nextExpectedIndex);

                    for (int i = 0; i < numMissingFrames; i++)
                    {
                        BigArray <float> filler = codec.GenerateMissingFrame(encodedFrame.Frequency);
                        speaker.BufferAudio(filler);
                    }
                }

                BigArray <float> decoded = codec.DecodeFrame(encodedFrame);
                speaker.BufferAudio(decoded);

                nextExpectedIndex = encodedFrame.Index + 1;
            }
        }
        /// <summary>
        /// Called when new audio is available from the microphone
        /// </summary>
        protected virtual void OnMicrophoneDataReady(BigArray <float> newData, int frequency)
        {
            if (!IsLocal)
            {
                return;
            }

            codec.OnAudioAvailable(newData);

            VoicePacketWrapper?enc = codec.GetNextEncodedFrame(frequency);

            while (enc.HasValue)
            {
                // assign index
                VoicePacketWrapper packet = enc.Value;
                packet.Index = nextFrameIndex++;
                enc          = packet;

                OnAudioDataEncoded(enc.Value);
                enc = codec.GetNextEncodedFrame(frequency);
            }
        }
        private IEnumerator yieldChunks(BigArray <float> data, int chunkSize, float chunkDuration)
        {
            int readHead = 0;

            while (readHead < data.Length)
            {
                int remainder = chunkSize;
                if (readHead + chunkSize >= data.Length)
                {
                    remainder = data.Length - readHead;
                }

                BigArray <float> temp = new BigArray <float>(remainder, 0);
                temp.Resize(remainder);
                temp.CopyFrom(data.Items, readHead * 4, 0, remainder * 4);
                AudioUtils.Resample(temp, testClip.frequency, AudioUtils.GetFrequency(ResampleFrequency));

                bufferReady(temp, AudioUtils.GetFrequency(ResampleFrequency));

                readHead += remainder;

                yield return(new WaitForSeconds(chunkDuration));
            }
        }
示例#10
0
 public void OnAudioAvailable(BigArray <float> rawPCM)
 {
     chunkBuffer.AddSamples(rawPCM);
 }
示例#11
0
        void Update()
        {
            if (!Microphone.IsRecording(device) || recordedAudio == null)
            {
                return;
            }

            float[] tempArray = TempArray <float> .Obtain(ChunkSize);

            // in case of recompile
            if (resampleBuffer == null)
            {
                resampleBuffer = new BigArray <float>(ChunkSize, 0);
            }

            int readPosition = Microphone.GetPosition(device);

            if (readPosition >= (prevReadPosition + ChunkSize))
            {
                while (readPosition >= (prevReadPosition + ChunkSize))
                {
                    if (canTalk())
                    {
                        recordedAudio.GetData(tempArray, prevReadPosition);
                        if (exceedsVolumeThreshold(tempArray))
                        {
                            resample(tempArray);
                            bufferReady(resampleBuffer, this.recordingFrequency);
                        }
                    }

                    prevReadPosition += ChunkSize;
                }
            }
            else if (prevReadPosition > readPosition)
            {
                var endReadPos = readPosition + recordedAudio.samples;
                var diff       = endReadPos - prevReadPosition;
                while (diff >= ChunkSize)
                {
                    if (canTalk())
                    {
                        recordedAudio.GetData(tempArray, prevReadPosition);
                        if (exceedsVolumeThreshold(tempArray))
                        {
                            resample(tempArray);
                            bufferReady(resampleBuffer, this.recordingFrequency);
                        }
                    }

                    prevReadPosition += ChunkSize;
                    if (prevReadPosition >= recordedAudio.samples)
                    {
                        prevReadPosition -= recordedAudio.samples;
                        break;
                    }

                    endReadPos = readPosition + recordedAudio.samples;
                    diff       = endReadPos - prevReadPosition;
                }
            }

            TempArray <float> .Release(tempArray);
        }