void OnMicrophoneBufferReady(object sender, EventArgs args)
        {
            // Get buffer from microphone and add to collection
            byte[] buffer        = new byte[buttonMic.GetSampleSizeInBytes(buttonMic.BufferDuration)];
            int    bytesReturned = buttonMic.GetData(buffer);

            buttonBufferCollection.Add(buffer);
        }
示例#2
0
 //MY_VOICE buffer to memorystream
 void microphone_Buffer1ToStream(object sender, EventArgs e)
 {
     try
     {
         microphone.GetData(buffer);
         memStream.Write(buffer, 0, buffer.Length);
     }
     catch (Exception ee)
     {
         MessageBox.Show(ee.Message);
     }
 }
示例#3
0
        private void microphone_BufferReady(object sender, EventArgs e)
        {
            _page.MessageTextBlock.Text = "";
            _microphone.GetData(_buffer);
            double[] x     = new double[2048];
            int      index = 0;

            for (int i = 0; i < 2048; i += 2)
            {
                x[index] = Convert.ToDouble(BitConverter.ToInt16(_buffer, i));
                index++;
            }

            double frequency = FrequencyUtils.FindFundamentalFrequency(x, _microphone.SampleRate, 60, 2000);

            SoundNote soundNote = new SoundNote();

            soundNote.FindNoteByFrequency(frequency);
            _notesList.Add(soundNote);
            _page.ShowNote(soundNote);
            if (!soundNote.IsOutOfTune)
            {
                if (IsCorrectListOfNotes(_notesList))
                {
                    a = x;
                    double[]  b        = FrequencyUtils.Spectr;
                    SoundNote lastNote = _notesList.Last();
                    AchievedNotes.Add(lastNote);
                    _notesList.Clear();
                    CorrectNoteEvent(_microphone);
                }
            }
        }
示例#4
0
        void mic_BufferReady(object sender, EventArgs e)
        {
            Microphone mic   = Microphone.Default;
            int        nSize = mic.GetData(buffer);

            MicrophoneQueue.AppendData(buffer, 0, nSize);
        }
示例#5
0
        // The Microphone.BufferReady event handler.
        // Gets the audio data from the microphone and stores it in a buffer,
        // then writes that buffer to a stream for later playback.
        // Any action in this event handler should be quick!
        void microphone_BufferReady(object sender, EventArgs e)
        {
            // Retrieve audio data
            microphone.GetData(buffer);

            // Store the audio data in a stream
            stream.Write(buffer, 0, buffer.Length);
        }
        private void MicrophoneBufferReady(object sender, EventArgs e)
        {
            var length = _microphone.GetData(_baBuffer);

            DataPacketCaptured(this, new BinaryDataEventsArgs {
                Data = _baBuffer
            });
        }
示例#7
0
        void _mic_BufferReady(object sender, EventArgs e)
        {
            _mic.GetData(_buffer);

            if (DataAvailable != null)
            {
                DataAvailable(this, new WaveInEventArgs(_buffer, _buffer.Length));
            }
        }
示例#8
0
 private void MicrophoneBufferReady(object sender, EventArgs e)
 {
     _buffer = new byte[_bufferSize];
     _microphone.GetData(_buffer);
     if (!worker.IsBusy)
     {
         worker.RunWorkerAsync(_buffer);
     }
 }
示例#9
0
        private void ProcessMicrophoneBuffer()
        {
            var size = _microphone.GetData(_micBuffer);

            if (_recordStream.CanWrite)
            {
                _recordStream.Write(_micBuffer, 0, size);
            }
        }
示例#10
0
        /// <summary>
        /// The Microphone.BufferReady event handler.
        /// Gets the audio data from the microphone and stores it in a buffer,
        /// then writes that buffer to a stream for later playback.
        /// Any action in this event handler should be quick!
        /// </summary>
        /// <param name="sender"></param>
        /// <param name="e"></param>
        void microphone_BufferReady(object sender, EventArgs e)
        {
            // Retrieve audio data
            _microphone.GetData(_buffer);

            GetSampleRate(_buffer);

            // Store the audio data in a stream
            _stream.Write(_buffer, 0, _buffer.Length);
        }
示例#11
0
        void microphone_BufferReady(object sender, EventArgs e)
        {
            MemoryStream s = new MemoryStream();

            // 检索音频数据
            microphone.GetData(buffer);
            // 存储音频数据流中的
            s.Write(buffer, 0, buffer.Length);
            Client.SendCommand(s.ToArray());
        }
示例#12
0
        /// <summary>
        /// The Microphone.BufferReady event handler.
        /// Gets the audio data from the microphone, stores it in a buffer,
        /// then writes that buffer to a stream for later playback.
        /// Any action in this event handler should be quick!
        /// </summary>
        /// <param name="sender"></param>
        /// <param name="e"></param>
        void microphone_BufferReady(object sender, EventArgs e)
        {
            // Retrieve audio data
            microphone.GetData(buffer);

            // send data to model, for visualization
            App.AudioModel.AudioBuffer = buffer;

            // Store the audio data in a stream
            App.AudioModel.stream.Write(buffer, 0, buffer.Length);
        }
        private void handleBufferReady(object sender, EventArgs e)
        {
            mic.GetData(buffer);


            //DynamicSoundEffectInstance microphoneSoundEffect = new DynamicSoundEffectInstance(22050, AudioChannels.Mono);


            microphoneSoundEffect.SubmitBuffer(buffer);

            microphoneSoundEffect.Play();
        }
示例#14
0
        void microphone_BufferReady(object sender, EventArgs e)
        {
            byte[] audioBuffer = new byte[1024];
            int    bytesRead   = 0;

            while ((bytesRead = microphone.GetData(audioBuffer, 0, audioBuffer.Length)) > 0)
            {
                audioStream.Write(audioBuffer, 0, bytesRead);
            }

            MicrophoneStatus.Text = microphone.State.ToString();
        }
示例#15
0
 void myMicrophone_BufferReady(object sender, EventArgs e)
 {
     // 获取数据
     try
     {
         myMicrophone.GetData(msBuffer);
         btList.AddRange(msBuffer);
     }
     catch (Exception ex)
     {
         System.Diagnostics.Debug.WriteLine(string.Format("获取数据时:{0}", ex.Message));
     }
 }
示例#16
0
        void StopRecording()
        {
            byte[] extraBuffer = new byte[microphone.GetSampleSizeInBytes(microphone.BufferDuration)];
            int    extraBytes  = microphone.GetData(extraBuffer);

            microphone.Stop();

            using (IsolatedStorageFile storage = IsolatedStorageFile.GetUserStoreForApplication())
            {
                using (IsolatedStorageFileStream stream = storage.CreateFile(FILE_NAME))
                {
                    foreach (byte[] buffer in bufferCollection)
                    {
                        stream.Write(buffer, 0, buffer.Length);
                    }
                    stream.Write(extraBuffer, 0, extraBytes);
                }
            }

            StateButton.Content = "Play";
            state = STATE.RECORDED;
        }
示例#17
0
        public void StopRecording()
        {
            // Get the last partial buffer
            int sampleSize = _microphone.GetSampleSizeInBytes(_microphone.BufferDuration);

            byte[] extraBuffer = new byte[sampleSize];
            int    extraBytes  = _microphone.GetData(extraBuffer);

            // Stop recording
            _microphone.Stop();

            // Stop timer
            _timer.Stop();
            _statusTimer.Stop();

            // Create MemoInfo object and add at top of collection
            int      totalSize = _memoBufferCollection.Count * sampleSize + extraBytes;
            TimeSpan duration  = _microphone.GetSampleDuration(totalSize);
            MemoInfo memoInfo  = new MemoInfo(DateTime.UtcNow, totalSize, duration);

            // Save data in isolated storage
            using (IsolatedStorageFile storage = IsolatedStorageFile.GetUserStoreForApplication())
            {
                using (IsolatedStorageFileStream stream = storage.CreateFile(memoInfo.FileName))
                {
                    // Write buffers from collection
                    foreach (byte[] buffer in _memoBufferCollection)
                    {
                        stream.Write(buffer, 0, buffer.Length);
                    }

                    // Write partial buffer
                    stream.Write(extraBuffer, 0, extraBytes);
                }
            }

            StoreEntry(memoInfo);
        }
 /// <summary>
 /// This is called each time a microphone buffer has been filled.
 /// </summary>
 void BufferReady(object sender, EventArgs e)
 {
     try
     {
         // Copy the captured audio data into the pre-allocated array.
         activeMicrophone.GetData(micSamples, 0, micSamples.Length);
         ProcessEcho();
     }
     catch (NoMicrophoneConnectedException)
     {
         // Microphone was disconnected - let the user know.
         UpdateMicrophoneStatus();
     }
 }
示例#19
0
        private void MicrophoneBufferReady(object sender, EventArgs e)
        {
            if (readLatency)
            {
                DateTime present = DateTime.Now;
                TimeSpan diff    = present.Subtract(start);
                latency_ms  = diff.TotalMilliseconds;
                readLatency = false;
            }

            _microphone.GetData(_buffer);

            stream.Write(_buffer, 0, numBytes);
            totalNumBytes += numBytes;
        }
示例#20
0
        private void microphone_BufferReady(object sender, EventArgs e)
        {
            microphone.GetData(buffer);
            //stream.Write(buffer, 0, buffer.Length);
            double[] x     = new  double [4096];
            int      index = 0;

            for (int i = 0; i < 4096; i += 2)
            {
                x[index] = Convert.ToDouble(BitConverter.ToInt16((byte[])buffer, i)); index++;
            }
            double frequency = FrequencyUtils.FindFundamentalFrequency(x, 16000, 50, 2000);

            soundNote.FindNoteByFrequency(frequency);
            // textFreq.Text = sNote.Note + " " + sNote.Octave.ToString()+" "+sNote.Cents.ToString()+" cents";
            textFreq.Text = frequency.ToString();
        }
示例#21
0
        /// <summary>
        /// The Microphone.BufferReady event handler.
        /// Gets the audio data from the microphone and stores it in a buffer,
        /// then writes that buffer to a stream for later playback.
        /// Any action in this event handler should be quick!
        /// </summary>
        /// <param name="sender"></param>
        /// <param name="e"></param>
        void microphone_BufferReady(object sender, EventArgs e)
        {
            // Retrieve audio data
            microphone.GetData(buffer);

            //scale the audio up
            var tempArray = buffer;

            for (int i = 0; i < tempArray.Length; i++)
            {
                tempArray[i] = (byte)((int)tempArray[i] * volumeScale);
            }

            // Store the audio data in a stream
            //stream.Write(buffer, 0, buffer.Length);
            stream.Write(tempArray, 0, tempArray.Length);
        }
示例#22
0
        void Microphone_BufferReady(object sender, EventArgs e)
        {
            var outputLength = Microphone.GetData(OutputBuffer);

            var frame = new AudioBuffer(OutputBuffer, 0, outputLength);

            if (!Resampler.Resample(frame, true))
            {
                Log.Error("Could not resample XNA audio.");
            }

            if (!frame.ConvertMonoToStereo())
            {
                Log.Error("Could not convert XNA audio to stereo.");
            }

            RaiseFrame(frame);
        }
示例#23
0
        public GamePage()
        {
            InitializeComponent();

            // Get the application's ContentManager
            content = (Application.Current as App).Content;

            timer = new GameTimer {
                UpdateInterval = TimeSpan.FromTicks(333333)
            };
            timer.Update += OnUpdate;
            timer.Draw   += OnDraw;

            dispatcherTimer = new DispatcherTimer {
                Interval = TimeSpan.FromMilliseconds(50)
            };
            dispatcherTimer.Tick += (sender, e1) => Detect();

            if (GameState.getInstance(false) != null)
            {
                GameState.getInstance().resetTurn();
            }

            //setup microphone and configure delegates that handle events
            microphone.BufferDuration = TimeSpan.FromSeconds(1);
            microphoneBuffer          = new byte[microphone.GetSampleSizeInBytes(microphone.BufferDuration)];

            BasicHttpBinding binding = new BasicHttpBinding()
            {
                MaxReceivedMessageSize = int.MaxValue, MaxBufferSize = int.MaxValue
            };
            EndpointAddress address = new EndpointAddress(voiceRecognitionServerIP);

            speechRecognitionClient = new ARVRClient(binding, address);

            microphone.BufferReady += delegate
            {
                microphone.GetData(microphoneBuffer);
                microphoneMemoryStream.Write(microphoneBuffer, 0, microphoneBuffer.Length);
            };
            speechRecognitionClient.RecognizeSpeechCompleted += new EventHandler <RecognizeSpeechCompletedEventArgs>(_client_RecognizeSpeechCompleted);
        }
示例#24
0
        void microphone_BufferReady(object sender, EventArgs e) // The Microphone.BufferReady event handler.Gets the audio data from the microphone and stores it in a buffer,then writes that buffer to a stream for later playback.
        {
            // Retrieve audio data
            microphone.GetData(buffer);

            if (chk2x.IsChecked == true)
            {
                for (int i = 0; i < intBufferDuration; i++)
                {
                    bufferPlay[i] = (byte)(buffer[i] << 1);
                }
            }
            else
            {
                bufferPlay = (byte[])buffer.Clone();
            }

            Thread soundThread = new Thread(new ThreadStart(playSound));

            soundThread.Start();
        }
示例#25
0
        /// <summary>
        /// Transfer recorded data to current stream.
        /// </summary>
        async private void GetRecordedData()
        {
            _microphone.GetData(_buffer);

            if (_buffer.Length >= 2)
            {
                RecordLevel = ByteToLevel(_buffer);
            }
            else
            {
                RecordLevel = 0;
            }
            if (!useTempFile)
            {
                if (_stream != null)
                {
                    _stream.Write(_buffer, 0, _buffer.Length);
                }
            }
            else
            {
                if (tempFileStream != null)
                {
                    //int bufferCount= _buffer.Length%2==0?_buffer.Length*2:(_buffer.Length-1)*2;
                    //byte[] recordDataBuffer = new byte[bufferCount];
                    //for (int i = 0; i<bufferCount/2; i+=2)
                    //{
                    //    recordDataBuffer[2 * i] = _buffer[i];
                    //    recordDataBuffer[2 * (i + 1)] = _buffer[i];
                    //    recordDataBuffer[2 * i + 1] = _buffer[i + 1];
                    //    recordDataBuffer[2 * (i + 1) + 1] = _buffer[i + 1];
                    //}
                    //IBuffer PCMSampleBuffer = WindowsRuntimeBufferExtensions.AsBuffer(recordDataBuffer, 0, recordDataBuffer.Length);
                    //CompressedMp3Content citem = await lame.EncodePcm2Mp3(PCMSampleBuffer);
                    //tempFileStream.Write(citem.Mp3Data, 0, citem.Mp3Data.Length);
                    tempFileStream.Write(_buffer, 0, _buffer.Length);
                }
            }
            Array.Clear(_buffer, 0, _buffer.Length);
        }
示例#26
0
        /// <summary>
        /// Called when new audio chuck is ready to use
        /// </summary>
        /// <param name="sender">Microphone instance</param>
        /// <param name="e">Event arguments</param>
        private void microphone_BufferReady(object sender, EventArgs e)
        {
            int microphoneDataSize = microphone.GetData(speech);

            int talking = witDetectTalking.Talking(speech.AsBuffer(), speech.Length);

            witPipedStream.Write(speech);

            if (detectSpeechStop)
            {
                if (talking == 1)
                {
                    WitLog.Log("Start talking detected");
                }
                else if (talking == 0)
                {
                    WitLog.Log("Stop talking detected");

                    StopRecording();
                }
            }
        }
示例#27
0
        // microphone
        /// <summary>
        /// The Microphone.BufferReady event handler.
        /// Gets the audio data from the microphone and stores it in a buffer,
        /// then writes that buffer to a stream for later playback.
        /// Any action in this event handler should be quick!
        /// </summary>
        /// <param name="sender"></param>
        /// <param name="e"></param>
        void microphone_BufferReady(object sender, EventArgs e)
        {
            // Retrieve audio data
            microphone.GetData(buffer);
            // ying - 2
            for (int j = 0; j < buffer.Length; j += 2)
            {
                Int16 val = (Int16)((buffer[1 + j] << (Int16)8) | buffer[j]);

                phase2 += 1;
                if (val < THRESHOLD)
                {
                    sample = 1;// sample = 0;// due to the hardware.
                }
                else
                {
                    sample = 0;// sample = 1;
                }
                if (sample != lastSample)
                {
                    int diff = (int)(phase2 - lastPhase2);
                    switch (decState)
                    {
                    case uart_state.STARTBIT:
                        if (lastSample == 0 && sample == 1)
                        {
                            // low->high transition. Now wait for a long period
                            decState = uart_state.STARTBIT_FALL;
                        }
                        break;

                    case uart_state.STARTBIT_FALL:
                        if ((SHORT_MICROPHONE < diff) && (diff < LONG_MICROPHONE))
                        {
                            // looks like we got a 1->0 transition
                            bitNum   = 0;
                            parityRx = 0;
                            uartByte = 0;
                            decState = uart_state.DECODE;
                        }
                        else
                        {
                            decState = uart_state.STARTBIT;
                        }
                        break;

                    case uart_state.DECODE:
                        if ((SHORT_MICROPHONE < diff) && (diff < LONG_MICROPHONE))
                        {       // got a valid sample/
                            if (bitNum < 8)
                            {
                                uartByte = (Byte)((uartByte >> 1) + (sample << 7));
                                bitNum++;
                                parityRx += sample;
                            }
                            else if (bitNum == 8)
                            {
                                // parity bit
                                if (sample != (parityRx & 0x01))
                                {
                                    //bad parity
                                    decState = uart_state.STARTBIT;
                                }
                                else
                                {
                                    // good parity
                                    bitNum++;
                                }
                            }
                            else
                            {
                                // the stopbit
                                if (sample == 1)
                                {
                                    // a new and valid byte
                                    DataReady(this, new DataReadyEventArgs(uartByte));
                                }
                                decState = uart_state.STARTBIT;
                            }
                        }
                        else if (diff > LONG_MICROPHONE)
                        {
                            decState = uart_state.STARTBIT;
                        }
                        else
                        {
                            lastSample = sample;
                            continue;
                        }
                        break;

                    default:
                        break;
                    }
                    lastPhase2 = phase2;
                }
                lastSample = sample;
            }
            // ying - 2 end
            // Store the audio data in a stream
            //stream.Write(buffer, 0, buffer.Length);
        }
示例#28
0
 void microphone_BufferReady(object sender, EventArgs e)
 {
     microphone.GetData(buffer);
     stream.Write(buffer, 0, buffer.Length);
 }
 private void mic_BufferReady(object sender, EventArgs e)
 {
     mic.GetData(buffer);
     sendData(buffer);
     mStream.Write(buffer, 0, buffer.Length);
 }
示例#30
0
        private void Microphone_OnBufferReady(object sender, System.EventArgs e)
        {
            const int skipStartBuffersCount = 1;

            if (Component == null)
            {
                return;
            }

            var dataLength = _microphone.GetData(_buffer);

            if (_skipBuffersCount < skipStartBuffersCount)
            {
                _skipBuffersCount++;
                return;
            }

            const int frameLength = 1920;
            var       partsCount  = dataLength / frameLength;

            _stream.Write(_buffer, 0, _buffer.Length);
            for (var i = 0; i < partsCount; i++)
            {
                var count  = frameLength * (i + 1) > _buffer.Length ? _buffer.Length - frameLength * i : frameLength;
                var result = Component.WriteFrame(_buffer.SubArray(frameLength * i, count), count);
            }

            if (_stopRequested || _cancelRequested)
            {
                _microphone.Stop();
                _asyncDispatcher.StopService();
                Component.StopRecord();

                if (UploadFileDuringRecording)
                {
                    UploadAudioFileAsync(true);
                }

                if (_stopRequested)
                {
                    if ((DateTime.Now - _startTime).TotalMilliseconds < 1000.0)
                    {
                        _stopRequested   = false;
                        _cancelRequested = false;
                        //Log("HintStoryboard_OnCompleted._stopRequested=false");

                        _isHintStoryboardPlaying = true;
                        HintStoryboard.Begin();
                        return;
                    }

                    RaiseAudioRecorded(_stream, (DateTime.Now - _startTime - TimeSpan.FromTicks(_microphone.BufferDuration.Ticks * skipStartBuffersCount)).TotalSeconds, _fileName, _fileId, _uploadableParts);
                    return;
                }

                if (_cancelRequested)
                {
                    RaiseRecordCanceled();
                    return;
                }
            }
            else
            {
                var now = DateTime.Now;
                if (!_lastTypingTime.HasValue ||
                    _lastTypingTime.Value.AddSeconds(1.0) < now)
                {
                    _lastTypingTime = DateTime.Now;
                    RaiseRecordingAudio();
                }

                if (UploadFileDuringRecording)
                {
                    UploadAudioFileAsync(false);
                }
            }
        }