private int intDelay = 200; // Record Time in millesecond... // Short time - increase repetet noise ... // Long time - create echo ... // I think 200ms is optimum ... // If you use headphones - best time will be 500..1000ms ... private async void btnStart_Click(object sender, RoutedEventArgs e) { blnStart = true; btnStart.Visibility = Visibility.Collapsed; btnStop.Visibility = Visibility.Visible; textBlock.Visibility = Visibility.Visible; mediaCaptureAudioPrimery = new Windows.Media.Capture.MediaCapture(); var settings = new Windows.Media.Capture.MediaCaptureInitializationSettings(); settings.StreamingCaptureMode = Windows.Media.Capture.StreamingCaptureMode.Audio; settings.MediaCategory = Windows.Media.Capture.MediaCategory.Other; settings.AudioProcessing = Windows.Media.AudioProcessing.Default; // Use only Default await mediaCaptureAudioPrimery.InitializeAsync(settings); recordProfile = MediaEncodingProfile.CreateWav(Windows.Media.MediaProperties.AudioEncodingQuality.Low); while (blnStart) // Repeate untile stop ... { try { msIRAS0 = new MemoryStream(); streamIRAS0 = msIRAS0.AsRandomAccessStream(); // New Stream ... await mediaCaptureAudioPrimery.StartRecordToStreamAsync(recordProfile, streamIRAS0); // write audio in first stream ... await Task.Delay(intDelay); await mediaCaptureAudioPrimery.StopRecordAsync(); // Stop first stream await PlayThreadMethod(streamIRAS0); // Play from first stream msIRAS1 = new MemoryStream(); streamIRAS1 = msIRAS0.AsRandomAccessStream(); // Second Stream ... await mediaCaptureAudioPrimery.StartRecordToStreamAsync(recordProfile, streamIRAS1); // sweetch stream ... to second stream ... await Task.Delay(intDelay); await mediaCaptureAudioPrimery.StopRecordAsync(); await PlayThreadMethod(streamIRAS1); // Play Second Streem } catch (Exception ex) { Stop(); } } }
/// <summary> /// StartRecording method /// Start to record audio using the microphone. /// The audio stream in stored in memory with no limit of size. /// </summary> /// <param name="MaxStreamSizeInBytes"> /// This parameter defines the max size of the buffer in memory. When the size of the buffer is over this limit, the /// client create another stream and remove the previouw stream. /// By default the value is 0, in that case the audio stream in stored in memory with no limit of size. /// </param> /// <param name="ThresholdDuration"> /// The duration in milliseconds for the calculation of the average audio level. /// With this parameter you define the period during which the average level is measured. /// If the value is 0, no buffer will be sent to Cognitive Services. /// </param> /// <param name="ThresholdLevel"> /// The minimum audio level average necessary to trigger the recording, /// it's a value between 0 and 65535. You can tune this value after several microphone tests. /// If the value is 0, no buffer will be sent to Cognitive Services. /// </param> /// <return>return true if successful. /// </return> public async System.Threading.Tasks.Task <bool> StartContinuousRecording(ulong MaxStreamSizeInBytes, UInt16 ThresholdDuration, UInt16 ThresholdLevel) { thresholdDuration = ThresholdDuration; thresholdLevel = ThresholdLevel; bool bResult = false; maxStreamSizeInBytes = MaxStreamSizeInBytes; if (isRecordingInitialized != true) { await InitializeRecording(); } if (STTStream != null) { STTStream.BufferReady -= STTStream_BufferReady; STTStream.AudioLevel -= STTStream_AudioLevel; STTStream.Dispose(); STTStream = null; } STTStream = SpeechToTextMainStream.Create(maxStreamSizeInBytes, thresholdDuration, thresholdLevel); STTStream.AudioLevel += STTStream_AudioLevel; STTStream.BufferReady += STTStream_BufferReady; if ((STTStream != null) && (isRecordingInitialized == true)) { try { Windows.Media.MediaProperties.MediaEncodingProfile MEP = Windows.Media.MediaProperties.MediaEncodingProfile.CreateWav(Windows.Media.MediaProperties.AudioEncodingQuality.Auto); if (MEP != null) { if (MEP.Audio != null) { uint framerate = 16000; uint bitsPerSample = 16; uint numChannels = 1; uint bytespersecond = 32000; MEP.Audio.Properties[WAVAttributes.MF_MT_AUDIO_SAMPLES_PER_SECOND] = framerate; MEP.Audio.Properties[WAVAttributes.MF_MT_AUDIO_NUM_CHANNELS] = numChannels; MEP.Audio.Properties[WAVAttributes.MF_MT_AUDIO_BITS_PER_SAMPLE] = bitsPerSample; MEP.Audio.Properties[WAVAttributes.MF_MT_AUDIO_AVG_BYTES_PER_SECOND] = bytespersecond; foreach (var Property in MEP.Audio.Properties) { System.Diagnostics.Debug.WriteLine("Property: " + Property.Key.ToString()); System.Diagnostics.Debug.WriteLine("Value: " + Property.Value.ToString()); if (Property.Key == new Guid("5faeeae7-0290-4c31-9e8a-c534f68d9dba")) { framerate = (uint)Property.Value; } if (Property.Key == new Guid("f2deb57f-40fa-4764-aa33-ed4f2d1ff669")) { bitsPerSample = (uint)Property.Value; } if (Property.Key == new Guid("37e48bf5-645e-4c5b-89de-ada9e29b696a")) { numChannels = (uint)Property.Value; } } } if (MEP.Container != null) { foreach (var Property in MEP.Container.Properties) { System.Diagnostics.Debug.WriteLine("Property: " + Property.Key.ToString()); System.Diagnostics.Debug.WriteLine("Value: " + Property.Value.ToString()); } } } await mediaCapture.StartRecordToStreamAsync(MEP, STTStream); bResult = true; isRecording = true; System.Diagnostics.Debug.WriteLine("Recording in audio stream..."); } catch (Exception e) { System.Diagnostics.Debug.WriteLine("Exception while recording in audio stream:" + e.Message); } } return(bResult); }