示例#1
0
        public Task <bool> BeginCaptureAudio()
        {
            if (_recorder != null)
            {
                _recorder.Release();
                _recorder.Dispose();
            }

            _audioCaptureStream = new MemoryStream();
            MediaHelper.WriteWavHeader(_audioCaptureStream, MediaHelper.DefaultAudioSamplingRate);

            _recorder = new AudioRecord(AudioSource.Mic, MediaHelper.DefaultAudioSamplingRate, ChannelIn.Mono, Encoding.Pcm16bit, _audioBuffer.Length);

            if (_recorder.State != State.Initialized)
            {
                _recorder = null;
                return(Task.FromResult(false));
            }

            _recorder.StartRecording();
            _trimAudioZeros = true;

            ReadAudioBufferAsync();
            return(Task.FromResult(true));
        }
示例#2
0
        public void Clear()
        {
            recorder?.Stop();
            recorder?.Release();
            recorder?.Dispose();
            recorder = null;

            audioTrack?.Stop();
            audioTrack?.Release();
            audioTrack?.Dispose();
            audioTrack = null;
        }
示例#3
0
    void readLoop()
    {
        byte[] buffer = new byte[bufferSize];
        while (!stopRecording)
        {
            try
            {
                int num_bytes = audioRecorder.Read(buffer, 0, buffer.Length);

                byte[] data_to_send = new byte[num_bytes];
                Array.Copy(buffer, data_to_send, num_bytes);

                Task.Run(() =>
                {
                    OnSoundDataReceived(data_to_send);
                });
            }
            catch (Exception e)
            {
                Logging.error("Exception occured while recording audio stream: " + e);
                break;
            }
            Thread.Sleep(10);
        }
        audioRecorder.Stop();
        audioRecorder.Release();
        audioRecorder.Dispose();
        audioRecorder = null;
        running       = false;
    }
        private async Task RecordAudioAsync()
        {
            wavPath = Path.Combine(audioDir, Guid.NewGuid().ToString() + "_audio.wav");

            byte[] audioBuffer = new byte[8000];

            audioRecord = new AudioRecord(
                AudioSource.Mic,   // Hardware source of recording.
                sampleRate,        // Frequency
                channelIn,         // Mono or stereo
                encoding,          // Audio encoding
                audioBuffer.Length // Length of the audio clip.
                );

            var id = audioRecord.AudioSessionId;

            audioRecord.StartRecording();

            int totalAudioLen = 0;

            isRecording = true;


            using (System.IO.Stream outputStream = System.IO.File.Open(wavPath, FileMode.Create))
                using (BinaryWriter bWriter = new BinaryWriter(outputStream))
                {
                    //init a header with no length - it will be added later
                    WriteWaveFileHeader(bWriter, maxAudioFreamesLength);

                    /// Keep reading the buffer while there is audio input.
                    while (isRecording && totalAudioLen <= maxAudioFreamesLength)
                    {
                        totalAudioLen += await audioRecord.ReadAsync(audioBuffer, 0, audioBuffer.Length);

                        bWriter.Write(audioBuffer);

                        //analysis
                        var intbuffer = ByteArrayTo16Bit(audioBuffer);
                        var min       = intbuffer.Min();
                        var max       = intbuffer.Max();
                        var avg       = intbuffer.Average(x => (double)x);
                        var sos       = intbuffer.Select(x => (long)x)
                                        .Aggregate((prev, next) => prev + next * next);
                        var rms = Math.Sqrt((double)1 / intbuffer.Length * sos);
                        var fft = FFT(intbuffer);
                    }

                    isRecording = false;

                    //write lenght to header
                    outputStream.Close();
                    bWriter.Close();
                }

            audioRecord.Stop();
            audioRecord.Dispose();

            //this file is now fully written and can be sent to server for analysis
            OnAudioReadyForUpload(new AudioUploadEventArgs(DateTime.Now.ToUniversalTime(), wavPath));
        }
示例#5
0
 public void Dispose()
 {
     Stop();
     _record.Release();
     _readSubject.OnCompleted();
     _readSubject.Dispose();
     _record.Dispose();
 }
示例#6
0
        private void ReadThread()
        {
            _record.StartRecording();
            while (_isrecording)
            {
                var size = _record.Read(_tmpBuffer, 0, _tmpBuffer.Length);

                for (var i = 0; i < _tmpBuffer.Length; i++)
                {
                    _buffer[i] = _tmpBuffer[i] / 32767.0f;//(_tmpBuffer[i] > 100 || _tmpBuffer[i]<-100) ? _tmpBuffer[i] / 32767.0f : 0f;
                }
                _callback?.Invoke(_buffer);
            }
            _record.Stop();
            _record.Release();
            _record.Dispose();
        }
示例#7
0
    private void cleanUp()
    {
        running = false;

        if (audioRecorder != null)
        {
            try
            {
                audioRecorder.Stop();
                audioRecorder.Release();
            }catch (Exception)
            {
            }
            audioRecorder.Dispose();
            audioRecorder = null;
        }

        if (audioEncoder != null)
        {
            try
            {
                audioEncoder.Stop();
                audioEncoder.Release();
            }
            catch (Exception)
            {
            }
            audioEncoder.Dispose();
            audioEncoder = null;
        }

        buffer     = null;
        bufferSize = 0;
        lock (outputBuffers)
        {
            outputBuffers.Clear();
        }
        lock (availableBuffers)
        {
            availableBuffers.Clear();
        }
    }
示例#8
0
 /// <summary>
 /// Flushes any audio bytes in memory but not yet broadcast out to any listeners.
 /// </summary>
 public void Flush()
 {
     audioSource.Dispose();
     // not needed for this implementation
 }
示例#9
0
 public void Dispose()
 {
     _recorder.Dispose();
 }
示例#10
0
        public void record()
        {
            try
            {
                recorder?.Stop();
                recorder?.Release();
                recorder?.Dispose();
                recorder = null;


                int minBufSize = AudioTrack.GetMinBufferSize(sampleRate, channelOut, encoding);
                //       DatagramSocket socket = new DatagramSocket();
                byte[] buffer = new byte[minBufSize];
                //     DatagramPacket packet;
                //     InetAddress destination = InetAddress.GetByName(serverAddress);
                recorder = new AudioRecord(AudioSource.VoiceCommunication, sampleRate, channelIn, encoding, minBufSize * 4);

                if (recorder.State != Android.Media.State.Initialized)
                {
                    return;
                }
                recorder.StartRecording();

                // string Path = System.IO.Path.Combine(System.Environment.GetFolderPath(System.Environment.SpecialFolder.MyDocuments), "temp.pcm");

                //System.IO.File.Delete(Path);

                //recorder.StartRecording();

                //DateTime currentTime = DateTime.Now.AddMilliseconds(500);

                //FileStream fs = new FileStream(Path, FileMode.Append, FileAccess.Write);

                //while (recorder.RecordingState == RecordState.Recording)
                //{
                //    minBufSize = recorder.Read(buffer, 0, buffer.Length);

                //    if (completefile == true)
                //        continue;

                //    fs.Write(buffer, 0, buffer.Length);

                //    if (currentTime < DateTime.Now)
                //    {
                //        fs.Close();

                //        play(File.ReadAllBytes(Path));
                //    //    NetProcess.SendAudioMessage(File.ReadAllBytes(Path));

                //        File.Delete(Path);
                //        var r = File.Exists(Path);

                //        fs = new FileStream(Path, FileMode.Append, FileAccess.Write);

                //        currentTime = DateTime.Now.AddMilliseconds(500);

                //    }
                //}

                ConcurrentQueue <System.IO.MemoryStream> Frames = new ConcurrentQueue <System.IO.MemoryStream>();

                while (recorder?.RecordingState == RecordState.Recording)
                {
                    if (recorder == null)
                    {
                        return;
                    }

                    minBufSize = recorder.Read(buffer, 0, buffer.Length);

                    Frames.Enqueue(new MemoryStream(buffer));

                    if (Frames.Count > 0)
                    {
                        if (NetProcess.TargetPlayerId.Count > 0)
                        {
                            NetProcess.SendAudioMessage(Frames);
                        }

                        Frames.Clear();
                    }
                }
            }
            catch (Exception e)
            {
            }
            finally
            {
                recorder.Stop();
                recorder.Release();
                recorder.Dispose();
                recorder = null;
            }
        }
示例#11
0
 public void Dispose()
 {
     Stop();
     _soundStream.Dispose();
 }
 public void Dispose()
 {
     _audioRecorder.Stop();
     _audioRecorder.Dispose();
     GC.SuppressFinalize(this);
 }
示例#13
0
 public void Dispose()
 {
     _record?.Release();
     _record?.Dispose();
 }
示例#14
0
    public void stop()
    {
        if (!running)
        {
            return;
        }
        running = false;

        if (echoCanceller != null)
        {
            try
            {
                echoCanceller.Release();
                echoCanceller.Dispose();
            }
            catch (Exception)
            {
            }
            echoCanceller = null;
        }

        if (noiseSuppressor != null)
        {
            try
            {
                noiseSuppressor.Release();
                noiseSuppressor.Dispose();
            }
            catch (Exception)
            {
            }
            noiseSuppressor = null;
        }

        if (audioRecorder != null)
        {
            try
            {
                audioRecorder.Stop();
                audioRecorder.Release();
            }
            catch (Exception)
            {
            }
            audioRecorder.Dispose();
            audioRecorder = null;
        }

        if (audioEncoder != null)
        {
            audioEncoder.stop();
            audioEncoder.Dispose();
            audioEncoder = null;
        }

        buffer       = null;
        shortsBuffer = null;
        bufferSize   = 0;
        lock (outputBuffers)
        {
            outputBuffers.Clear();
        }


        AudioManager am = (AudioManager)MainActivity.Instance.GetSystemService(Context.AudioService);

        if (Build.VERSION.SdkInt < BuildVersionCodes.O)
        {
            if (focusListener != null)
            {
#pragma warning disable CS0618 // Type or member is obsolete
                am.AbandonAudioFocus(focusListener);
#pragma warning restore CS0618 // Type or member is obsolete
                focusListener.Dispose();
                focusListener = null;
            }
        }
        else
        {
            if (focusListener != null)
            {
                if (focusRequest != null)
                {
                    am.AbandonAudioFocusRequest(focusRequest);
                    focusRequest.Dispose();
                    focusRequest = null;
                }
                focusListener.Dispose();
                focusListener = null;
            }
        }
    }
示例#15
0
        private async Task RecordAudioContinuously()
        {
            byte[] audioBuffer    = new byte[8000];
            byte[] preAudioBuffer = new byte[8000];

            audioRecord = new AudioRecord(
                AudioSource.Mic,   // Hardware source of recording.
                sampleRate,        // Frequency
                channelIn,         // Mono or stereo
                encoding,          // Audio encoding
                audioBuffer.Length // Length of the audio clip.
                );

            _forceStop = false;

            audioRecord.StartRecording();

            using (MemoryStream memory = new MemoryStream())
                using (BufferedStream stream = new BufferedStream(memory))
                {
                    while (!_forceStop)
                    {
                        //start listening
                        await audioRecord.ReadAsync(audioBuffer, 0, audioBuffer.Length);

                        //analysis
                        var intbuffer = ByteArrayTo16Bit(audioBuffer);

                        var audioData = new AudioData(intbuffer, isRecording);

                        if (audioData.IsAllZeros)
                        {
                            //not sure if it is neccesary
                            isRecording = false;
                            memory.Flush();
                            memory.Clear(); // this one is though
                            continue;
                        }
                        ;

                        //this should be smarter ;)
                        containsVoice = audioData.IdentifyVoice();

                        //send info to MVVM to display
                        OnRecordStatusChanged(new AudioDataEventArgs(audioData));


                        //if voice has been detected, start writing
                        if (containsVoice && !isRecording)
                        {
                            isRecording = true;
                            stream.Write(preAudioBuffer, 0, preAudioBuffer.Length);
                            stream.Write(audioBuffer, 0, audioBuffer.Length);
                        }
                        //if sound is still detected keep on recording
                        else if (containsVoice && isRecording)
                        {
                            //write to buffer
                            stream.Write(audioBuffer, 0, audioBuffer.Length);
                        }
                        //if sound is no longer detected, and is still recording
                        else if (!containsVoice && isRecording)
                        {
                            //save to file
                            wavPath = Path.Combine(audioDir, Guid.NewGuid().ToString() + "_audio.wav");

                            //how much audio do we have
                            if ((int)memory.Length <= 2 * audioBuffer.Length)
                            {
                                //this is probably a false positive, at least no valid sound because to short
                                isRecording = false;
                                continue;
                            }
                            else
                            {
                                //Get one more segment of sound
                                await audioRecord.ReadAsync(audioBuffer, 0, audioBuffer.Length);

                                stream.Write(audioBuffer, 0, audioBuffer.Length);

                                using (System.IO.Stream outputStream = System.IO.File.Open(wavPath, FileMode.Create))
                                    using (BinaryWriter bWriter = new BinaryWriter(outputStream))
                                    {
                                        //write header
                                        WriteWaveFileHeader(bWriter, (int)memory.Length);

                                        memory.WriteTo(outputStream);

                                        //close file
                                        outputStream.Close();
                                        bWriter.Close();

                                        isRecording = false;
                                    }

                                OnAudioReadyForUpload(new AudioUploadEventArgs(DateTime.Now.ToUniversalTime(), wavPath));
                            }
                            //not sure if it is neccesary
                            memory.Flush();
                            memory.Clear(); // this one is though
                        }
                        //no voice
                        else
                        {
                            ;
                        }

                        preAudioBuffer = (byte[])audioBuffer.Clone();
                    }
                    //break out of continously loop

                    //TODO: handle break - does not care if we were recording

                    audioRecord.Stop();
                    audioRecord.Dispose();
                }
        }
示例#16
0
 public void Dispose()
 {
     _r.Dispose();
 }