void OnMicrophoneBufferReady(object sender, EventArgs args) { // Get buffer from microphone and add to collection byte[] buffer = new byte[buttonMic.GetSampleSizeInBytes(buttonMic.BufferDuration)]; int bytesReturned = buttonMic.GetData(buffer); buttonBufferCollection.Add(buffer); }
/// <summary> /// Finds a good microphone to use and sets up everything to start recording and playback. /// Once a microphone is selected the game uses it throughout its lifetime. /// If it gets disconnected it will tell the user to reconnect it. /// </summary> private void InitializeMicrophone() { // We already have a microphone, skip out early. if (activeMicrophone != null) { return; } try { // Find the first microphone that's ready to rock. activeMicrophone = PickFirstConnectedMicrophone(); if (activeMicrophone != null) { // Set the capture buffer size for kow latency. // Microphone will call the game back when it has captured at least that much audio data. activeMicrophone.BufferDuration = TimeSpan.FromMilliseconds(100); // Subscribe to the event that's raised when the capture buffer is filled. activeMicrophone.BufferReady += BufferReady; // We will put the mic samples in this buffer. We only want to allocate it once. micSamples = new byte[activeMicrophone.GetSampleSizeInBytes(activeMicrophone.BufferDuration)]; // This is a circular buffer. Samples from the mic will be mixed with the oldest sample in this buffer // and written back out to this buffer. This feedback creates an echo effect. echoBuffer = new byte[activeMicrophone.GetSampleSizeInBytes(TimeSpan.FromSeconds(echoDelay))]; // Create a DynamicSoundEffectInstance in the right format to playback the captured audio. dynamicSound = new DynamicSoundEffectInstance(activeMicrophone.SampleRate, AudioChannels.Mono); dynamicSound.Play(); // Success - now allocate everything we need to draw the audio waveform // Now allocate the graphics resources to draw the waveform effect = new BasicEffect(GraphicsDevice); effect.Projection = Matrix.CreateTranslation(-0.5f, -0.5f, 0) * Matrix.CreateOrthographicOffCenter(GraphicsDevice.Viewport.Bounds.Left, GraphicsDevice.Viewport.Bounds.Right, GraphicsDevice.Viewport.Bounds.Bottom, GraphicsDevice.Viewport.Bounds.Top, -1f, 1f); int sampleCount = echoBuffer.Length / sizeof(short); vertexPosColor = new VertexPositionColor[sampleCount]; for (int index = 0; index < sampleCount; ++index) { vertexPosColor[index] = new VertexPositionColor(new Vector3(), Color.White); } } } catch (NoMicrophoneConnectedException) { // Uh oh, the microphone was disconnected in the middle of initialization. // Let's clean up everything so we can look for another microphone again on the next update. activeMicrophone.BufferReady -= BufferReady; activeMicrophone = null; } }
private byte[] buffer; // Dynamic buffer to retrieve audio data from the microphone //private MemoryStream stream = new MemoryStream(); // Stores the audio data for later playback //private SoundEffectInstance soundInstance; // Used to play back audio //private bool soundIsPlaying = false; // Flag to monitor the state of sound playback // ying - 1 end public HijackX(int sampleRate) { this.sampleRate = sampleRate; // ying uartBitEnc = new float[SAMPLESPERBIT]; // Want to play A = 400Hz // ying angleIncrement = 2 * Math.PI * 440 / sampleRate; angleIncrement = 2 * Math.PI * HIGHFREQ / sampleRate; // Create empty mediaSampleAttributes dictionary for OpenReadAsync mediaSampleAttributes = new Dictionary <MediaSampleAttributeKeys, string>(); // Create re-usable MemoryStream for accumulating audio samples memoryStream = new MemoryStream(); // micphone DispatcherTimer dt = new DispatcherTimer(); dt.Interval = TimeSpan.FromMilliseconds(33); dt.Tick += new EventHandler(dt_Tick); dt.Start(); SAMPLESPERBIT_MICROPHONE = (int)(microphone.SampleRate / HIGHFREQ + 0.5); SHORT_MICROPHONE = SAMPLESPERBIT_MICROPHONE / 2 + SAMPLESPERBIT_MICROPHONE / 4; LONG_MICROPHONE = SAMPLESPERBIT_MICROPHONE + SAMPLESPERBIT_MICROPHONE / 2; // Event handler for getting audio data when the buffer is full microphone.BufferReady += new EventHandler <EventArgs>(microphone_BufferReady); microphone.BufferDuration = TimeSpan.FromMilliseconds(1000); // Allocate memory to hold the audio data buffer = new byte[microphone.GetSampleSizeInBytes(microphone.BufferDuration)]; // Start recording microphone.Start(); }
/// <summary> /// Starts recording, data is stored in memory /// </summary> /// <param name="filePath"></param> public void startRecording(string filePath) { if (this.player != null) { this.handler.InvokeCustomScript(new ScriptCallback(CallbackFunction, this.id, MediaError, MediaErrorPlayModeSet)); } else if (this.recorder == null) { try { this.audioFile = filePath; this.InitializeXnaGameLoop(); this.recorder = Microphone.Default; this.recorder.BufferDuration = TimeSpan.FromMilliseconds(500); this.buffer = new byte[recorder.GetSampleSizeInBytes(this.recorder.BufferDuration)]; this.recorder.BufferReady += new EventHandler <EventArgs>(recorderBufferReady); this.memoryStream = new MemoryStream(); this.WriteWavHeader(this.memoryStream, this.recorder.SampleRate); this.recorder.Start(); FrameworkDispatcher.Update(); this.SetState(MediaRunning); } catch (Exception) { this.handler.InvokeCustomScript(new ScriptCallback(CallbackFunction, this.id, MediaError, MediaErrorStartingRecording)); } } else { this.handler.InvokeCustomScript(new ScriptCallback(CallbackFunction, this.id, MediaError, MediaErrorAlreadyRecording)); } }
// Constructor public MainPage() { InitializeComponent(); DispatcherTimer timer = new DispatcherTimer(); // Run it a little faster than our buffer updates timer.Interval = TimeSpan.FromMilliseconds(80); timer.Tick += OnTimerTick; timer.Start(); FrameworkDispatcher.Update(); // Add in the event handler for when a new buffer of audio is available audioIn.BufferReady += new EventHandler <EventArgs>(Microphone_BufferReady); // XNA is limited to 100ms latency. :( audioIn.BufferDuration = TimeSpan.FromMilliseconds(100); // Create a buffer of the appropriate length int bufferLen = audioIn.GetSampleSizeInBytes(audioIn.BufferDuration); audioBuffer = new byte[bufferLen]; // Create our audio out interface with the same samplerate and channels of the audio input // We couldn't create this above because we needed to get audioIn.SampleRate audioOut = new DynamicSoundEffectInstance(audioIn.SampleRate, AudioChannels.Mono); // Start recording and playing audioIn.Start(); audioOut.Play(); }
/// <summary> /// Initializes new instance of WitMic /// </summary> /// <param name="witPipedStream">Stream to write audio to</param> /// <param name="detectSpeechStop">Voice activity detection feature</param> public WitMic(WitPipedStream witPipedStream, bool detectSpeechStop) { this.witPipedStream = witPipedStream; this.detectSpeechStop = detectSpeechStop; microphone = Microphone.Default; if (microphone == null) { WitLog.Log("Did you enabled ID_CAP_MICROPHONE in WMAppManifest.xml?"); return; } witDetectTalking = new WitVadWrapper(8.0, 16000, 60); microphone.BufferDuration = TimeSpan.FromMilliseconds(100); speech = new byte[microphone.GetSampleSizeInBytes(microphone.BufferDuration)]; microphone.BufferReady += microphone_BufferReady; updateTimer = new DispatcherTimer() { Interval = TimeSpan.FromMilliseconds(1) }; updateTimer.Tick += (s, e) => { FrameworkDispatcher.Update(); }; }
private void Switch_Click(object sender, RoutedEventArgs e) { if (!start) { try { stream = new MemoryStream(); microphone.BufferDuration = TimeSpan.FromMilliseconds(1000); buffer = new byte[microphone.GetSampleSizeInBytes(microphone.BufferDuration)]; microphone.Start(); start = true; Switch.Content = "Stop"; } catch (Exception ex) { MessageBox.Show(ex.ToString()); } dt2.Start(); } else { if (microphone.State == MicrophoneState.Started) { microphone.Stop(); dt2.Stop(); start = false; Switch.Content = "Record"; } } }
public AudioRecorderControl() { InitializeComponent(); _asyncDispatcher = new XnaAsyncDispatcher(TimeSpan.FromMilliseconds(33), OnTimerTick); _microphone = Microphone.Default; if (_microphone == null) { RecordButton.Visibility = Visibility.Collapsed; Visibility = Visibility.Collapsed; IsHitTestVisible = false; return; } var rate = _microphone.SampleRate; _microphone.BufferDuration = TimeSpan.FromMilliseconds(240); _duration = _microphone.BufferDuration; _buffer = new byte[_microphone.GetSampleSizeInBytes(_microphone.BufferDuration)]; Loaded += (o, e) => { _microphone.BufferReady += Microphone_OnBufferReady; }; Unloaded += (o, e) => { _microphone.BufferReady -= Microphone_OnBufferReady; }; }
private void btnStartStop_Click(object sender, System.Windows.RoutedEventArgs e) { if (microphone.State == MicrophoneState.Started) { microphone.Stop(); } if (soundIsStarted == false) { //prevent go to sleep drDisplayRequest = new DisplayRequest(); drDisplayRequest.RequestActive(); soundIsStarted = true; btnStartStop.Content = "STOP"; btnStartStop.Foreground = new SolidColorBrush(System.Windows.Media.Color.FromArgb((byte)255, (byte)255, (byte)0, (byte)0)); // Get audio data in 200ms chunks - optimum microphone.BufferDuration = TimeSpan.FromMilliseconds(200); // Allocate memory to hold the audio data intBufferDuration = microphone.GetSampleSizeInBytes(microphone.BufferDuration); buffer = new byte[intBufferDuration]; bufferPlay = new byte[intBufferDuration]; microphone.Start();// Start recording } else { stopSound(); }; }
public Recorder() { //Microphone config _microphone = Microphone.Default; _microphone.BufferDuration = TimeSpan.FromMilliseconds(100); _duration = _microphone.BufferDuration; numBytes = _microphone.GetSampleSizeInBytes(_microphone.BufferDuration); TimeSpan sample = TimeSpan.FromSeconds(1.0 / _microphone.SampleRate); int numBytesPerSample = _microphone.GetSampleSizeInBytes(sample); _buffer = new byte[numBytes]; _microphone.BufferReady += new EventHandler <EventArgs>(MicrophoneBufferReady); stream = new MemoryStream(); totalNumBytes = 0; }
public void StartVoice()//调用发送函数与接收函数 { microphone.BufferDuration = TimeSpan.FromMilliseconds(500); // 分配内存以保存音频数据 buffer = new byte[microphone.GetSampleSizeInBytes(microphone.BufferDuration)]; // 开始记录 microphone.Start(); }
/// <summary> /// Handles the Click event for the record button. /// Sets up the microphone and data buffers to collect audio data, /// then starts the microphone. Also, updates the UI. /// </summary> /// <param name="sender"></param> /// <param name="e"></param> private void recordButton_Click(object sender, EventArgs e) { // Get audio data in 1/2 second chunks microphone.BufferDuration = TimeSpan.FromMilliseconds(500); // Allocate memory to hold the audio data buffer = new byte[microphone.GetSampleSizeInBytes(microphone.BufferDuration)]; // Set the stream back to zero in case there is already something in it stream.SetLength(0); // Start recording microphone.Start(); SetButtonStates(false, false, true, true); //StatusImage.Source = microphoneImage; }
byte[] msBuffer; //读取数据的缓冲区 public RecPage() { InitializeComponent(); // Microphone.Default静态属性获得默认麦克风的引用 myMicrophone = Microphone.Default; myMicrophone.BufferDuration = TimeSpan.FromMilliseconds(1000); msBuffer = new byte[myMicrophone.GetSampleSizeInBytes(myMicrophone.BufferDuration)]; }
void StartMic() { Microphone mic = Microphone.Default; buffer = new byte[mic.GetSampleSizeInBytes(TimeSpan.FromMilliseconds(100)) * 4]; mic.BufferDuration = TimeSpan.FromMilliseconds(100); mic.BufferReady += new EventHandler <EventArgs>(mic_BufferReady); mic.Start(); }
public MainPage() { InitializeComponent(); this.isPlayVideoLaughs = false; this.isPigletInSorrow = false; _sensor.CurrentValueChanged += new EventHandler <SensorReadingEventArgs <AccelerometerReading> >(sensor_CurrentValueChanged); _sensor.Start(); this.countDefaultAnimation = 0; this.timerAnimationPigletStart = new System.Windows.Threading.DispatcherTimer(); this.timerAnimationPigletStart.Tick += new EventHandler(timerAnimationPigletStart_Tick); this.timerAnimationPigletStart.Interval = new TimeSpan(0, 0, 0, 0, 300); this.timerAnimationPigletStop = new System.Windows.Threading.DispatcherTimer(); this.timerAnimationPigletStop.Tick += new EventHandler(timerAnimationPigletStop_Tick); this.timerAnimationPigletInSorrow = new System.Windows.Threading.DispatcherTimer(); this.timerAnimationPigletInSorrow.Tick += new EventHandler(timerAnimationPigletInSorrow_Tick); this.timerAnimationPigletInSorrow.Interval = new TimeSpan(0, 0, 0, 60, 0); this.rnd = new Random(); int timeRandomDefaultAnimation = this.rnd.Next(1, 3); this.timerAnimationPigletDefault = new System.Windows.Threading.DispatcherTimer(); this.timerAnimationPigletDefault.Tick += new EventHandler(timerRandomDefaultAnimation_Tick); this.timerAnimationPigletDefault.Interval = new TimeSpan(0, 0, 0, timeRandomDefaultAnimation, 0); this.timerPigletListening = new System.Windows.Threading.DispatcherTimer(); this.timerPigletListening.Tick += new EventHandler(timerPigletListening_Tick); this.vadState = STATE_LISTENING; this.stream = new MemoryStream(); this.timerPigletTalk = new DispatcherTimer(); this.timerPigletTalk.Tick += new EventHandler(timerPigletTalk_Tick); this.microphone = Microphone.Default; this.microphone.BufferDuration = TimeSpan.FromMilliseconds(500); this.microphone.BufferReady += new EventHandler <EventArgs>(microphone_BufferReady); this.microphone.Start(); this.buffer = new byte[microphone.GetSampleSizeInBytes(microphone.BufferDuration)]; this.timerTrialTalk = new System.Windows.Threading.DispatcherTimer(); this.timerTrialTalk.Tick += new EventHandler(timerTrialTalk_Tick); this.timerTrialTalk.Interval = TimeSpan.FromSeconds(MAX_TRIAL_TALK_DURATION); this.timerTrialTalk.Start(); this.marketplaceDetailTask = new MarketplaceDetailTask(); #if DEBUG_TRIAL this.marketplaceDetailTask.ContentType = MarketplaceContentType.Applications; this.marketplaceDetailTask.ContentIdentifier = "a5cf363a-044a-46f0-a414-9235cc31f997"; #endif }
/// <summary> /// Create a microphone instance and set it ready to record audio. /// </summary> private MicrophoneWrapper() { _microphone.BufferDuration = TimeSpan.FromMilliseconds(MicrophoneBufferDuration); _buffer = new byte[_microphone.GetSampleSizeInBytes(_microphone.BufferDuration)]; _microphone.BufferReady += (s, e) => { GetRecordedData(); }; }
void StartMic() { /// wnidows phone can only get mic at 100 ms intervals, not to good for speech Microphone mic = Microphone.Default; buffer = new byte[mic.GetSampleSizeInBytes(TimeSpan.FromMilliseconds(100))]; mic.BufferDuration = TimeSpan.FromMilliseconds(100); mic.BufferReady += new EventHandler <EventArgs>(mic_BufferReady); mic.Start(); }
/// <summary> /// Start recording from the Microphone /// </summary> public void StartRecording() { if (IsMicrophoneValid) { _Mic.BufferDuration = TimeSpan.FromSeconds(0.1); buffer = new byte[_Mic.GetSampleSizeInBytes(_Mic.BufferDuration)]; _Mic.BufferReady += handleBufferReady; _Mic.Start(); } }
void StartMic() { Deployment.Current.Dispatcher.BeginInvoke(() => { textBlock1.Text += "in start mic"; }); /// wnidows phone can only get mic at 100 ms intervals, not to good for speech Microphone mic = Microphone.Default; buffer = new byte[mic.GetSampleSizeInBytes(TimeSpan.FromMilliseconds(100)) * 4]; mic.BufferDuration = TimeSpan.FromMilliseconds(100); mic.BufferReady += new EventHandler <EventArgs>(mic_BufferReady); mic.Start(); }
public void Start() { _microphone.BufferReady += MicrophoneBufferReady; _micBuffer = new byte[_microphone.GetSampleSizeInBytes(_microphone.BufferDuration)]; _recordStream = new MemoryStream(); Buffer = null; XnaFrameworkDispatcherService.StartService(); _microphone.Start(); }
/// <summary> /// Starts recording, data is stored in memory /// </summary> /// <param name="filePath"></param> public void startRecording(string filePath) { if (this.player != null) { InvokeCallback(MediaError, MediaErrorPlayModeSet, false); } else if (this.recorder == null) { try { this.audioFile = filePath; this.InitializeXnaGameLoop(); this.recorder = Microphone.Default; this.recorder.BufferDuration = TimeSpan.FromMilliseconds(500); this.buffer = new byte[recorder.GetSampleSizeInBytes(this.recorder.BufferDuration)]; this.recorder.BufferReady += new EventHandler <EventArgs>(recorderBufferReady); MemoryStream stream = new MemoryStream(); this.memoryStream = stream; int numBits = 16; int numBytes = numBits / 8; // inline version from AudioFormatsHelper stream.Write(System.Text.Encoding.UTF8.GetBytes("RIFF"), 0, 4); stream.Write(BitConverter.GetBytes(0), 0, 4); stream.Write(System.Text.Encoding.UTF8.GetBytes("WAVE"), 0, 4); stream.Write(System.Text.Encoding.UTF8.GetBytes("fmt "), 0, 4); stream.Write(BitConverter.GetBytes(16), 0, 4); stream.Write(BitConverter.GetBytes((short)1), 0, 2); stream.Write(BitConverter.GetBytes((short)1), 0, 2); stream.Write(BitConverter.GetBytes(this.recorder.SampleRate), 0, 4); stream.Write(BitConverter.GetBytes(this.recorder.SampleRate * numBytes), 0, 4); stream.Write(BitConverter.GetBytes((short)(numBytes)), 0, 2); stream.Write(BitConverter.GetBytes((short)(numBits)), 0, 2); stream.Write(System.Text.Encoding.UTF8.GetBytes("data"), 0, 4); stream.Write(BitConverter.GetBytes(0), 0, 4); this.recorder.Start(); FrameworkDispatcher.Update(); this.SetState(PlayerState_Running); } catch (Exception) { InvokeCallback(MediaError, MediaErrorStartingRecording, false); //this.handler.InvokeCustomScript(new ScriptCallback(CallbackFunction, this.id, MediaError, MediaErrorStartingRecording),false); } } else { InvokeCallback(MediaError, MediaErrorAlreadyRecording, false); //this.handler.InvokeCustomScript(new ScriptCallback(CallbackFunction, this.id, MediaError, MediaErrorAlreadyRecording),false); } }
void StopRecording() { byte[] extraBuffer = new byte[microphone.GetSampleSizeInBytes(microphone.BufferDuration)]; int extraBytes = microphone.GetData(extraBuffer); microphone.Stop(); using (IsolatedStorageFile storage = IsolatedStorageFile.GetUserStoreForApplication()) { using (IsolatedStorageFileStream stream = storage.CreateFile(FILE_NAME)) { foreach (byte[] buffer in bufferCollection) { stream.Write(buffer, 0, buffer.Length); } stream.Write(extraBuffer, 0, extraBytes); } } StateButton.Content = "Play"; state = STATE.RECORDED; }
/// <summary> /// LoadContent will be called once per game and is the place to load /// all of your content. /// </summary> protected override void LoadContent() { // Create a new SpriteBatch, which can be used to draw textures. spriteBatch = new SpriteBatch(GraphicsDevice); microphoneSoundEffect = new DynamicSoundEffectInstance(22050, AudioChannels.Mono); mic.BufferDuration = TimeSpan.FromMilliseconds(100); buffer = new byte[mic.GetSampleSizeInBytes(mic.BufferDuration)]; mic.BufferReady += handleBufferReady; }
public void StopRecording() { // Get the last partial buffer int sampleSize = _microphone.GetSampleSizeInBytes(_microphone.BufferDuration); byte[] extraBuffer = new byte[sampleSize]; int extraBytes = _microphone.GetData(extraBuffer); // Stop recording _microphone.Stop(); // Stop timer _timer.Stop(); _statusTimer.Stop(); // Create MemoInfo object and add at top of collection int totalSize = _memoBufferCollection.Count * sampleSize + extraBytes; TimeSpan duration = _microphone.GetSampleDuration(totalSize); MemoInfo memoInfo = new MemoInfo(DateTime.UtcNow, totalSize, duration); // Save data in isolated storage using (IsolatedStorageFile storage = IsolatedStorageFile.GetUserStoreForApplication()) { using (IsolatedStorageFileStream stream = storage.CreateFile(memoInfo.FileName)) { // Write buffers from collection foreach (byte[] buffer in _memoBufferCollection) { stream.Write(buffer, 0, buffer.Length); } // Write partial buffer stream.Write(extraBuffer, 0, extraBytes); } } StoreEntry(memoInfo); }
public SoundManager(Analysis page) { this._page = page; _microphone.BufferReady -= microphone_BufferReady; _microphone.BufferReady += microphone_BufferReady; if (_microphone.BufferDuration == TimeSpan.Zero) { _microphone.BufferDuration = TimeSpan.FromMilliseconds(100); } _buffer = new byte[_microphone.GetSampleSizeInBytes(_microphone.BufferDuration)]; _notesList = new List <SoundNote>(); AchievedNotes = new List <SoundNote>(); _microphone.Start(); }
/// <summary> /// Start recording. /// </summary> public void StartRecording() { // Get audio data in 100 ms chunks microphone.BufferDuration = TimeSpan.FromMilliseconds(100); // Allocate memory to hold the audio data buffer = new byte[microphone.GetSampleSizeInBytes(microphone.BufferDuration)]; // Set the stream back to zero in case there is already something in it App.AudioModel.stream.SetLength(0); // Start recording microphone.Start(); }
private void StartVoiceMemoRecording() { if (microphone.State == MicrophoneState.Started) { return; } microphone.BufferReady += Microfone_BufferReady; microphone.BufferDuration = TimeSpan.FromMilliseconds(200); voiceBuffer = new byte[microphone.GetSampleSizeInBytes(microphone.BufferDuration)]; microphone.Start(); SetVoiceMemoPreviewState(); }
private void Button_Click(object sender, RoutedEventArgs e) //Recording Button { try { //setting image var imageUri = new Uri(@"Assets/mic11.png", UriKind.Relative); DisplayImage.Source = new BitmapImage(imageUri); if (check == false) { //RECORDING MY_VOICE this.Grid_Loaded(null, null); microphone.BufferDuration = TimeSpan.FromMilliseconds(500); buffer = new byte[microphone.GetSampleSizeInBytes(microphone.BufferDuration)]; UserHelp1.Text = "Recording Your Voice"; } else { //RECORDING IDEAL_VOICE microphone.BufferDuration = TimeSpan.FromMilliseconds(500); tempBuffer = new byte[microphone.GetSampleSizeInBytes(microphone.BufferDuration)]; UserHelp1.Text = "Recording IDEAL Voice"; } //setting streams empty for the upcoming buffer memStream_2.SetLength(0); memStream.SetLength(0); microphone.Start(); SetButtonStates(false, false, true, false); } catch (Exception ee) { MessageBox.Show(ee.Message); } }
public static void Start(User u, SpeechHelper.SpeechStateCallbackDelegate del, Delegate networkDel) { // StartStreamed is not reentrant - make sure the caller didn't violate the contract if (speechOperationInProgress == true) { return; } // set the flag speechOperationInProgress = true; // store the delegates passed in speechStateDelegate = del; networkDelegate = networkDel; user = u; // initialize the microphone information and speech buffer mic.BufferDuration = TimeSpan.FromSeconds(1); int length = mic.GetSampleSizeInBytes(mic.BufferDuration); speechBuffer = new byte[length]; speechBufferList.Clear(); bufferMutexList.Clear(); numBytes = 0; // trace the speech request TraceHelper.AddMessage("Starting Speech"); // initialize frame index frameCounter = 0; // callback when the mic gathered 1 sec worth of data if (initializedBufferReadyEvent == false) { mic.BufferReady += new EventHandler <EventArgs>(MicBufferReady); initializedBufferReadyEvent = true; } // connect to the web service, and once that completes successfully, // it will invoke the NetworkInterfaceCallback delegate to indicate the network quality // this delegate will then turn around and send the appropriate encoding in the SendPost call NetworkHelper.BeginSpeech( new NetworkInformationCallbackDelegate(NetworkInformationCallback), new NetworkDelegate(NetworkCallback)); }
/// <summary> /// Starts recording, data is stored in memory /// </summary> private void StartRecording() { this.microphone = Microphone.Default; this.microphone.BufferDuration = TimeSpan.FromMilliseconds(500); this.btnTake.IsEnabled = false; this.btnStartStop.Content = RecordingStopCaption; this.buffer = new byte[microphone.GetSampleSizeInBytes(this.microphone.BufferDuration)]; this.microphone.BufferReady += new EventHandler <EventArgs>(MicrophoneBufferReady); this.memoryStream = new MemoryStream(); this.memoryStream.InitializeWavStream(this.microphone.SampleRate); this.duration = new TimeSpan(0); this.microphone.Start(); }