Пример #1
0
        /// <summary>
        /// Initializes new instance of WitMic
        /// </summary>
        /// <param name="witPipedStream">Stream to write audio to</param>
        /// <param name="detectSpeechStop">Voice activity detection feature</param>
        public WitMic(WitPipedStream witPipedStream, bool detectSpeechStop)
        {
            this.witPipedStream   = witPipedStream;
            this.detectSpeechStop = detectSpeechStop;

            microphone = Microphone.Default;

            if (microphone == null)
            {
                WitLog.Log("Did you enabled ID_CAP_MICROPHONE in WMAppManifest.xml?");

                return;
            }

            witDetectTalking = new WitVadWrapper(8.0, 16000, 60);

            microphone.BufferDuration = TimeSpan.FromMilliseconds(100);

            speech = new byte[microphone.GetSampleSizeInBytes(microphone.BufferDuration)];

            microphone.BufferReady += microphone_BufferReady;

            updateTimer = new DispatcherTimer()
            {
                Interval = TimeSpan.FromMilliseconds(1)
            };
            updateTimer.Tick += (s, e) =>
            {
                FrameworkDispatcher.Update();
            };
        }
Пример #2
0
        /// <summary>
        /// Initializes new instance of WitMic
        /// </summary>
        /// <param name="witPipedStream">Stream to write audio to</param>
        /// <param name="detectSpeechStop">Voice activity detection feature</param>
        public WitMic(WitPipedStream witPipedStream, bool detectSpeechStop)
        {
            this.witPipedStream = witPipedStream;
            this.detectSpeechStop = detectSpeechStop;

            microphone = Microphone.Default;

            if (microphone == null)
            {
                WitLog.Log("Did you enabled ID_CAP_MICROPHONE in WMAppManifest.xml?");

                return;
            }

            witDetectTalking = new WitVadWrapper(8.0, 16000, 60);

            microphone.BufferDuration = TimeSpan.FromMilliseconds(100);

            speech = new byte[microphone.GetSampleSizeInBytes(microphone.BufferDuration)];

            microphone.BufferReady += microphone_BufferReady;

            updateTimer = new DispatcherTimer()
            {
                Interval = TimeSpan.FromMilliseconds(1)
            };
            updateTimer.Tick += (s, e) =>
            {
                FrameworkDispatcher.Update();
            };
        }
 /// <summary>
 /// Initializes new instance of WitSpeechRequestTask class
 /// </summary>
 /// <param name="accessToken">Access Token</param>
 /// <param name="witPipedStream">Audio stream</param>
 /// <param name="type">Type</param>
 /// <param name="encoding">Encoding</param>
 /// <param name="bits">Bits per sample</param>
 /// <param name="rate">Samples per second</param>
 /// <param name="order">Bytes order</param>
 public WitSpeechRequestTask(string accessToken, WitPipedStream witPipedStream, string type, string encoding, int bits, int rate, ByteOrder order)
 {
     this.accessToken = accessToken;
     this.witPipedStream = witPipedStream;
     this.type = type;
     this.encoding = encoding;
     this.bits = bits;
     this.rate = rate;
     this.order = order;
 }
 /// <summary>
 /// Initializes new instance of WitSpeechRequestTask class
 /// </summary>
 /// <param name="accessToken">Access Token</param>
 /// <param name="witPipedStream">Audio stream</param>
 /// <param name="type">Type</param>
 /// <param name="encoding">Encoding</param>
 /// <param name="bits">Bits per sample</param>
 /// <param name="rate">Samples per second</param>
 /// <param name="order">Bytes order</param>
 public WitSpeechRequestTask(string accessToken, WitPipedStream witPipedStream, string type, string encoding, int bits, int rate, ByteOrder order)
 {
     this.accessToken    = accessToken;
     this.witPipedStream = witPipedStream;
     this.type           = type;
     this.encoding       = encoding;
     this.bits           = bits;
     this.rate           = rate;
     this.order          = order;
 }
Пример #5
0
        /// <summary>
        /// Capture intent and entities from a microphone
        /// </summary>
        /// <returns>Captured data</returns>
        public async Task <WitResponse> CaptureVoiceIntent()
        {
            WitPipedStream witPipedStream = new WitPipedStream();

            witMic = new WitMic(witPipedStream, DetectSpeechStop);

            if (witMic.StartRecording())
            {
                return(await StreamRawAudio(witPipedStream, "audio/raw", "signed-integer", 16, 16000, ByteOrder.LITTLE_ENDIAN));
            }
            else
            {
                return(null);
            }
        }
Пример #6
0
        /// <summary>
        /// Streams raw audio data and returns captured intent and entities
        /// </summary>
        /// <param name="witPipedStream">Audio stream</param>
        /// <param name="type">Type</param>
        /// <param name="encoding">Encoding</param>
        /// <param name="bits">Bits per sample</param>
        /// <param name="rate">Samples per second</param>
        /// <param name="order">Bytes order</param>
        /// <returns>Captured data</returns>
        public async Task <WitResponse> StreamRawAudio(WitPipedStream witPipedStream, string type, string encoding, int bits, int rate, ByteOrder order)
        {
            WitSpeechRequestTask witSpeechRequestTask = new WitSpeechRequestTask(accessToken, witPipedStream, type, encoding, bits, rate, order);

            string result = await witSpeechRequestTask.UploadAsync();

            if (result != null)
            {
                WitResponse witResponse = JsonConvert.DeserializeObject <WitResponse>(result);

                return(witResponse);
            }

            return(null);
        }
Пример #7
0
        /// <summary>
        /// Capture intent and entities from a microphone
        /// </summary>
        /// <returns>Captured data</returns>
        public async Task<WitResponse> CaptureVoiceIntent()
        {
            WitPipedStream witPipedStream = new WitPipedStream();

            witMic = new WitMic(witPipedStream, DetectSpeechStop);

            if (witMic.StartRecording())
            {
                return await StreamRawAudio(witPipedStream, "audio/raw", "signed-integer", 16, 16000, ByteOrder.LITTLE_ENDIAN);
            }
            else
            {
                return null;
            }
        }
Пример #8
0
        /// <summary>
        /// Streams raw audio data and returns captured intent and entities
        /// </summary>
        /// <param name="witPipedStream">Audio stream</param>
        /// <param name="type">Type</param>
        /// <param name="encoding">Encoding</param>
        /// <param name="bits">Bits per sample</param>
        /// <param name="rate">Samples per second</param>
        /// <param name="order">Bytes order</param>
        /// <returns>Captured data</returns>
        public async Task<WitResponse> StreamRawAudio(WitPipedStream witPipedStream, string type, string encoding, int bits, int rate, ByteOrder order)
        {
            WitSpeechRequestTask witSpeechRequestTask = new WitSpeechRequestTask(accessToken, witPipedStream, type, encoding, bits, rate, order);

            string result = await witSpeechRequestTask.UploadAsync();

            if (result != null)
            {
                WitResponse witResponse = JsonConvert.DeserializeObject<WitResponse>(result);

                return witResponse;
            }

            return null;
        }