private byte[] GetData() { if (ac.clip == null) { return(null); } ac.Stop(); // float[] samples = new float[ac.clip.samples * ac.clip.channels]; // ac.clip.GetData (samples, 0); // byte[] bs = new byte[samples.Length * 4]; // // byte[] every; // for (var i = 0; i < samples.Length; i++) // { // every = BitConverter.GetBytes (samples [i]); // for (var j = 0; j < every.Length; j++) // { // bs [i * 4 + j] = every [j]; // } // } //// Log.debug ("Zip1 - " + bs.Length); // byte[] outs = Tools.Compress (bs); //// Log.debug ("Zip2 - " + outs.Length); // return outs; int s; byte[] b = USpeakAudioClipCompressor.CompressAudioClip(ac.clip, out s, 1.0f); return(b); }
//Called when new audio data is available from the microphone void OnAudioAvailable(float[] pcmData) { //encode the data, add it to the send buffer //audio data is flushed from the send buffer on a user-configurable timer, to avoid flooding the network AudioClip temp = AudioClip.Create("temp", pcmData.Length, 1, audioFrequency, false, false); temp.SetData(pcmData, 0); int s; byte[] b = USpeakAudioClipCompressor.CompressAudioClip(temp, out s, BandwidthMode, LocalGain); USpeakFrameContainer cont = default(USpeakFrameContainer); cont.Samples = (ushort)s; cont.encodedData = b; sendBuffer.Add(cont); }