internal void Listener_SpeechKitSend(object sender, AudioDataEventArgs e)
        {
            bool locked = callMutex.WaitOne(5 * 1000);     // Всеравно тайм аут наступет через 5 сек. после прекращения записи на сервисе

            if (locked)
            {
                // recreate connection if we send more them 10 Mb
                this.bytesSent += e.AudioData.Length;
                if (this.bytesSent >= MAX_BYTES_SENT)
                {
                    this._call = null;
                }
                try
                {
                    WriteAudio(e.AudioData);
                }
                catch (Exception ex) //when (ex.StatusCode == StatusCode.DeadlineExceeded)
                {
                    Log.Error($"Error writing data: {ex.Message}\n Retrying... ");
                    this._call = null;
                    WriteAudio(e.AudioData);
                }
                finally
                {
                    if (locked)
                    {
                        callMutex.ReleaseMutex();
                    }
                    locked = false;
                }
            }
        }
Exemplo n.º 2
0
        //TODO: Remove, this is only for testing...
        private void AudioDataAvailable(object o, AudioDataEventArgs args)
        {
            var samplesDesired = args.BytesRecorded / args.Channels;
            var sampleRate     = args.SampleRate;
            var bitsPerSample  = args.BitsPerSample;
            var channels       = args.Channels;

            var left  = new int[samplesDesired];
            var right = new int[samplesDesired];
            var index = 0;

            for (var sample = 0; sample < args.BytesRecorded / (2 * channels); sample++)
            {
                switch (channels)
                {
                case 1:
                    right[sample] = BitConverter.ToInt16(args.Buffer, index);
                    index        += 2;
                    break;

                case 2:
                default:
                    left[sample]  = BitConverter.ToInt16(args.Buffer, index);
                    index        += 2;
                    right[sample] = BitConverter.ToInt16(args.Buffer, index);
                    index        += 2;
                    break;
                }

                var data = (args.PreferedChannel == AudioChannel.Left && channels > 1) ? left[sample] : right[sample];

                DecoderManager.Decoder.ProcessPulse(sampleRate, data, FilterManager.IsEnabled, FilterManager.Filter);
            }
        }
Exemplo n.º 3
0
        /// <summary>
        /// Respond to capture event.
        /// This should return as fast as possible.
        /// </summary>
        public void HandleCapturedSamples(object sender, AudioDataEventArgs e)
        {
            if (e.Samples == null || e.Samples.Length <= 0)
            {
                return;
            }

            int sample_sec = 44100;

            short[] result = null;
            if (_channels == 1)
            {
                result = ResampleBuffer(e, sample_sec);
            }
            else
            {
                result = ResampleStereo(e, sample_sec);
            }

            TimedSample ts = new TimedSample(result, e.CaptureTime);

            try {
                if (ProcessorQueue != null)
                {
                    foreach (var item in ProcessorQueue)
                    {
                        item.ProcessSample(ts);
                    }
                }
            } catch { }

            lock (_sampleBuffer) {
                _sampleBuffer.Add(ts);
            }
        }
Exemplo n.º 4
0
        /// <summary>
        /// Error based resampler.
        /// Only works for MONO.
        /// </summary>
        private short[] ResampleBuffer(AudioDataEventArgs e, double sample_sec)
        {
            int total_samps = e.Samples.Length;
            int out_samples = (int)(total_samps * (sample_sec / (double)(_incomingSampleRate)));

            if (total_samps == out_samples)
            {
                return(e.Samples);                                        // don't bother resampling if it's already correct
            }
            double error_rate = (total_samps / out_samples);

            unchecked {
                short[] result = new short[out_samples];

                short[] source = e.Samples;
                for (int i = 0; i < result.Length; i++)
                {
                    int ec = (int)(i * error_rate);
                    ec = Math.Min(ec, e.Samples.Length - 1);
                    ec = Math.Max(ec, 0);

                    result[i] = e.Samples[ec];
                }
                return(result);
            }
        }
 private void Channel_AudioDataReceived(object sender, AudioDataEventArgs e)
 {
     if (sender == _casparChannel)
     {
         var values = e.AudioData.dBFS.Where(f => f.HasValue).Select(f => f.Value).ToArray();
         if (values.Any())
         {
             AudioLevel = (int)values.Average();
         }
     }
 }
Exemplo n.º 6
0
        public void GotAudioData(object sender, AudioDataEventArgs e)
        {
            if (!this.Enabled)
                return;

            var info = AudioAnalyzer.AnalyzeFrame(e.Data);
            if (info.Consonant != curConsonant && info.Consonant == Consonant.None || curConsonant == Consonant.None)
            {
                //Console.WriteLine(info.Consonant.ToString() + " - " + info.LoudestFreq.ToString());
                if (ConsonantChanged != null)
                    ConsonantChanged(this, new ConsonantChangedEventArgs(curConsonant, info.Consonant));
                curConsonant = info.Consonant;
            }
        }
Exemplo n.º 7
0
        public void ConsumeAudioData(object sender, AudioDataEventArgs args)
        {
            var frame = _audioAssembler.AssembleAudioFrame(
                data: args.AudioData,
                profile: AACProfile.LC,
                samplingFreq: (int)_audioFormat.SampleRate,
                channels: (byte)_audioFormat.Channels);

            if (frame == null)
            {
                return;
            }

            _audio.FeedAudioData(frame);
        }
Exemplo n.º 8
0
        void audioRecorder_BufferReady(object sender, AudioDataEventArgs e)
        {
            int registerResult = 0;

            try
            {
                registerResult = speechRecognizer.RegisterAudioBytes(e.Data);
            }
            catch (Exception ex)
            {
                Debug.WriteLine(ex.Message);
                StopNativeRecorder();
                StopSpeechRecognizerProcessing();
                StateMessageBlock.Text = "all stoped because of error";
            }

            // incoming raw sound
            //Debug.WriteLine("{0} bytes of raw audio recieved, {1} frames processed at PocketSphinx", e.Data.Length, registerResult);
        }
Exemplo n.º 9
0
        public void AudioPacketTest()
        {
            TCPPacketWriter writer = new TCPPacketWriter();
            MemoryStream ms = new MemoryStream();
            writer.SetStream(ms);

            byte[] audioData = new byte[] { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 };
            Packet.WriteAudioData(writer, Guid.Empty, FeenPhone.Audio.Codecs.CodecID.Gsm610ChatCodec, audioData, 10);

            ClientPacketHandler handler = new ClientPacketHandler();

            EventSource.OnAudioData += EventSource_OnAudioData;
            LastAudioDataEventArgs = null;
            handler.Handle(new Queue<byte>(ms.ToArray()));
            EventSource.OnAudioData += EventSource_OnAudioData;

            Assert.IsNotNull(LastAudioDataEventArgs);
            Assert.AreEqual(FeenPhone.Audio.Codecs.CodecID.Gsm610ChatCodec, LastAudioDataEventArgs.Codec);
            Assert.AreEqual(10, LastAudioDataEventArgs.Data.Length);
            Assert.AreEqual(10, LastAudioDataEventArgs.DataLen);
        }
Exemplo n.º 10
0
        /// <summary>
        /// Handles the more complex case of resampling interleaved stereo samples to mono
        /// </summary>
        private short[] ResampleStereo(AudioDataEventArgs e, double sample_sec)
        {
            if (e.Samples.Length < 2)
            {
                return(e.Samples);
            }
            int total_samps = e.Samples.Length;
            int out_samples = (int)(total_samps * (sample_sec / (double)(_incomingSampleRate)));

            out_samples /= _channels;
            out_samples -= out_samples % _channels;
            if (out_samples < 1)
            {
                return(e.Samples);
            }
            double error_rate = (total_samps / out_samples);

            unchecked {
                short[] result = new short[out_samples];

                short[] source = e.Samples;
                for (int c = 0; c < _channels; c++)
                {
                    for (int i = c; i < result.Length; i += _channels)
                    {
                        int ec = (int)(i * error_rate);

                        ec -= ec % _channels;                         // fix to channel slot
                        ec += c;

                        ec = Math.Min(ec, e.Samples.Length - 1);
                        ec = Math.Max(ec, 0);

                        result[i] += (short)(e.Samples[ec] / _channels);
                    }
                }
                return(result);
            }
        }
Exemplo n.º 11
0
        private void OnAdapterRead(object sender, AudioDataEventArgs args)
        {
            if (!_running)
            {
                return;
            }

            if (_phase != Phase.AwaitingInput && args.Discard)
            {
                OnError(new Exception("Unable to process signal on time."));
                Stop(true);
                return;
            }

            foreach (var channel in Sinks.Keys)
            {
                for (var frame = 0; frame < args.Frames; frame++)
                {
                    Sinks[channel].Add(args.Buffer[frame * args.Channels + channel - 1]);
                }
            }
        }
Exemplo n.º 12
0
        private void OnAdapterWrite(object sender, AudioDataEventArgs args)
        {
            if (!_running)
            {
                return;
            }

            if (_phase != Phase.AwaitingInput && args.Discard)
            {
                OnError(new Exception("Unable to generate signal on time."));
                Stop(true);
                return;
            }

            for (var frame = 0; frame < args.Frames; frame++)
            {
                foreach (var channel in _generators.Keys)
                {
                    args.Buffer[frame * args.Channels + channel - 1] = !args.Discard ? Generators[channel].Next() : 0.0;
                }
            }
        }
Exemplo n.º 13
0
        /* Called by NanoClient on freshly received data */
        public void ConsumeAudioData(object sender, AudioDataEventArgs args)
        {
            // TODO: Sorting
            AACFrame frame = _audioAssembler.AssembleAudioFrame(
                data: args.AudioData,
                profile: AACProfile.LC,
                samplingFreq: (int)_audioFormat.SampleRate,
                channels: (byte)_audioFormat.Channels);

            if (!_audioContextInitialized)
            {
                _audioHandler.UpdateCodecParameters(frame.GetCodecSpecificData());
                _audioContextInitialized = true;
            }

            if (frame == null)
            {
                return;
            }

            // Enqueue encoded audio data in decoder
            _audioHandler.PushData(frame);
        }
Exemplo n.º 14
0
        void IAudioConsumer.ConsumeAudioData(object sender, AudioDataEventArgs args)
        {
            AACFrame frame = AudioAssembler.AssembleAudioFrame(
                args.AudioData, AACProfile.LC, 48000, 2);

            if (frame == null)
            {
                return;
            }

            if (_dumpSingleFrames)
            {
                string frameFilename = $"{_fileName}.audio.{audioFrameCount}.{frame.TimeStamp}.raw";
                using (FileStream fs = new FileStream(frameFilename, FileMode.CreateNew))
                {
                    fs.Write(frame.RawData, 0, frame.RawData.Length);
                }
                audioFrameCount++;
            }
            else
            {
                _audioFile.Write(frame.RawData, 0, frame.RawData.Length);
            }
        }
Exemplo n.º 15
0
        /// <summary>
        /// Respond to capture event.
        /// This should return as fast as possible.
        /// </summary>
        public void HandleCapturedSamples(object sender, AudioDataEventArgs e)
        {
            if (e.Samples == null || e.Samples.Length <= 0) return;

            int sample_sec = 44100;

            short[] result = null;
            if (_channels == 1) result = ResampleBuffer(e, sample_sec);
            else result = ResampleStereo(e, sample_sec);

            TimedSample ts = new TimedSample(result, e.CaptureTime);

            try {
                if (ProcessorQueue != null) {
                    foreach (var item in ProcessorQueue) {
                        item.ProcessSample(ts);
                    }
                }
            } catch { }

            lock (_sampleBuffer) {
                _sampleBuffer.Add(ts);
            }
        }
Exemplo n.º 16
0
 private void service_AudioDataReady(object sender, AudioDataEventArgs e)
 {
     OnAudioDataReady(e.Ptr, e.Size);
 }
 public void ConsumeAudioData(object sender, AudioDataEventArgs args)
 {
     _audio.ConsumeAudioData(args.AudioData);
 }
Exemplo n.º 18
0
 private void EventSource_OnAudioData(object sender, AudioDataEventArgs e)
 {
     LastAudioDataEventArgs = e;
 }
Exemplo n.º 19
0
 public void HandleCapturedSamples(object sender, AudioDataEventArgs e)
 {
     throw new NotSupportedException();
 }
 public void OnChatAudioDataReceived(object sender, AudioDataEventArgs args)
 {
 }
		public void HandleCapturedSamples (object sender, AudioDataEventArgs e) {
			foreach (var buf in this) {
				buf.HandleCapturedSamples(sender, e);
			}
		}
Exemplo n.º 22
0
 private void EventSource_OnAudioData(object sender, AudioDataEventArgs e)
 {
     LastAudioDataEventArgs = e;
 }
Exemplo n.º 23
0
 private void Controller_AudioDataReady(object sender, AudioDataEventArgs e)
 {
     PlayAudio(e.Ptr, e.Size);
 }
Exemplo n.º 24
0
        /// <summary>
        /// Error based resampler.
        /// Only works for MONO.
        /// </summary>
        private short[] ResampleBuffer(AudioDataEventArgs e, double sample_sec)
        {
            int total_samps = e.Samples.Length;
            int out_samples = (int)(total_samps * (sample_sec / (double)(_incomingSampleRate)));
            if (total_samps == out_samples) return e.Samples; // don't bother resampling if it's already correct
            double error_rate = (total_samps / out_samples);

            unchecked {
                short[] result = new short[out_samples];

                short[] source = e.Samples;
                for (int i = 0; i < result.Length; i++) {
                    int ec = (int)(i * error_rate);
                    ec = Math.Min(ec, e.Samples.Length - 1);
                    ec = Math.Max(ec, 0);

                    result[i] = e.Samples[ec];
                }
                return result;
            }
        }
Exemplo n.º 25
0
        /// <summary>
        /// Handles the more complex case of resampling interleaved stereo samples to mono
        /// </summary>
        private short[] ResampleStereo(AudioDataEventArgs e, double sample_sec)
        {
            if (e.Samples.Length < 2) return e.Samples;
            int total_samps = e.Samples.Length;
            int out_samples = (int)(total_samps * (sample_sec / (double)(_incomingSampleRate)));
            out_samples /= _channels;
            out_samples -= out_samples % _channels;
            if (out_samples < 1) return e.Samples;
            double error_rate = (total_samps / out_samples);

            unchecked {
                short[] result = new short[out_samples];

                short[] source = e.Samples;
                for (int c = 0; c < _channels; c++) {
                    for (int i = c; i < result.Length; i+=_channels) {
                        int ec = (int)(i * error_rate);

                        ec -= ec % _channels; // fix to channel slot
                        ec += c;

                        ec = Math.Min(ec, e.Samples.Length - 1);
                        ec = Math.Max(ec, 0);

                        result[i] += (short)(e.Samples[ec] / _channels);

                    }

                }
                return result;
            }
        }
Exemplo n.º 26
0
        private async void SynthesizeTxtBuffer(string text, string model)
        {
            UtteranceSynthesisRequest request = MakeRequest(text, model);
            //   request.Hints.Add(new Hints() { Voice = "kuznetsov_male" });

            Metadata callHeaders = this.MakeMetadata();

            callHeaders.Add("x-folder-id", this.FolderId);


            CancellationTokenSource cancellationSource = new CancellationTokenSource();

            var call = synthesizerClient.UtteranceSynthesis(request, headers: callHeaders,
                                                            deadline: DateTime.UtcNow.AddMinutes(5));

            log.Information($"synthizing: {text}");
            var respEnum = call.ResponseStream.ReadAllAsync(cancellationSource.Token).GetAsyncEnumerator();

            try
            {
                ValueTaskAwaiter <bool> tsk = respEnum.MoveNextAsync().GetAwaiter();

                tsk.OnCompleted(() =>
                {
                    if (respEnum.Current != null)
                    {
                        byte[] data = respEnum.Current.AudioChunk.Data.ToByteArray();
                        TextToSpeachResultsRecieved?.Invoke(this, AudioDataEventArgs.FromByateArray(data, data.Length));
                        log.Information($"Audio chunk {data.Length} bytes recieved.");
                    }
                    else
                    {
                        log.Warning("No data in response");
                    }
                });


                while (!tsk.IsCompleted)
                {
                    Thread.Sleep(200);
                }

                return;
            }
            catch (Exception ex)
            {
                log.Error(ex.Message);
            }
            finally
            {
                if (respEnum != null)
                {
                    await respEnum.DisposeAsync();
                }

                /* IAsyncEnumerator<UtteranceSynthesisResponse> respEnum = respEnumerable.GetAsyncEnumerator();
                 *
                 * while (!respEnum.MoveNextAsync().GetAwaiter().IsCompleted)
                 * {
                 *   Thread.Sleep(200);
                 * }
                 *
                 * byte[] data = respEnum.Current.AudioChunk.Data.ToByteArray();
                 * TextToSpeachResultsRecieved?.Invoke(this, AudioDataEventArgs.FromByateArray(data,
                 *  data.Length));
                 *
                 * await respEnum.DisposeAsync();
                 * call.Dispose();*/
            }
        }
Exemplo n.º 27
0
 protected virtual void OnRecordStatusChanged(AudioDataEventArgs e)
 {
     RecordStatusChanged?.Invoke(this, e);
 }