コード例 #1
0
        /// <summary>Creates a new instance of LiveAudioDtmfAnalyzer.</summary>
        /// <param name="waveIn">The audio data source.</param>
        /// <param name="forceMono">Indicates whether the audio data should be converted to mono
        /// first. Default is true.</param>
        public LiveAudioDtmfAnalyzer(IWaveIn waveIn, bool forceMono = true)
        {
            this.waveIn = waveIn;
            var config = new DetectorConfig();

            dtmfAudio = DtmfAudio.CreateFrom(new StreamingSampleSource(config, Buffer(waveIn), forceMono), config);
        }
コード例 #2
0
        public StreamingSampleSource(DetectorConfig config, BufferedWaveProvider source, bool forceMono = true)
        {
            sourceBuffer = source;

            var sampleProvider = source.ToSampleProvider();

            if (forceMono)
            {
                sampleProvider = sampleProvider.AsMono();
            }

            samples = sampleProvider.DownsampleTo(config.MaxSampleRate);
        }
コード例 #3
0
        public StaticSampleSource(DetectorConfig config, IWaveProvider source, bool forceMono = true)
        {
            var sampleProvider = source.ToSampleProvider();

            if (forceMono)
            {
                sampleProvider = sampleProvider.AsMono();
            }

            samples = sampleProvider.DownsampleTo(config.MaxSampleRate).Blockwise(config.SampleBlockSize);

            // Optimistically assume that we are going to read at least BlockSize bytes.
            numSamplesRead = samples.BlockSize;
        }
コード例 #4
0
        /// <summary>Reads a WaveStream and enumerates all present DTMF tones.</summary>
        /// <remarks>By default this method forces a mono conversion by averaging all audio channels first. Turn it off with the
        ///  forceMono flag in order to analyze each channel separately.</remarks>
        /// <param name="waveFile">The audio data to analyze.</param>
        /// <param name="forceMono">Indicates whether the audio data should be converted to mono first. Default is true.</param>
        /// <returns>All detected DTMF tones along with their positions (i.e. audio channel, start time, and duration).</returns>
        public static IEnumerable <DtmfOccurence> DtmfTones(this WaveStream waveFile, bool forceMono = true)
        {
            var config        = new DetectorConfig();
            var dtmfAudio     = DtmfAudio.CreateFrom(new StaticSampleSource(config, waveFile, forceMono), config);
            var detectedTones = new Queue <DtmfOccurence>();

            while (detectedTones.Any() || detectedTones.AddNextFrom(dtmfAudio, waveFile))
            {
                if (detectedTones.Any())
                {
                    yield return(detectedTones.Dequeue());
                }
            }

            // Yield any tones that might have been cut off by EOF.
            foreach (var tone in detectedTones)
            {
                yield return(tone);
            }
        }
コード例 #5
0
        public void Start(IWaveIn waveIn)
        {
            var config = new DetectorConfig();

            config.PowerThreshold = _config.Tolerance;

            var analyzer = new LiveAudioDtmfAnalyzer(config, waveIn);

            //analyzer.DtmfToneStarted += start => _log.Add($"{start.DtmfTone.Key} key started on {start.Position.TimeOfDay} (channel {start.Channel})");
            //analyzer.DtmfToneStopped += end => _log.Add($"{end.DtmfTone.Key} key stopped after {end.Duration.TotalSeconds}s (channel {end.Channel})");

            analyzer.DtmfToneStarted += start =>
            {
                EvaluatorStarted?.Invoke(this, new EvaluatorEventArgs(start, null, DateTime.UtcNow));
            };

            analyzer.DtmfToneStopped += end =>
            {
                _logger.Debug($"AudioEvaluator->DtmfToneStopped {end.TimeStamp.ToString("O")}");

                EvaluatorFinished?.Invoke(this, new EvaluatorEventArgs(null, end, DateTime.UtcNow));

                if (end.Duration > new TimeSpan(0, 0, 0, 0, 0))
                {
                    _logger.Debug($"AudioEvaluator->Finished Tones: {_finishedTones.Count}");

                    _logger.Debug($"AudioEvaluator->Current Finished Tones:");
                    _logger.Debug($"----------------------------------------------------------");

                    lock (_lock)
                    {
                        foreach (var tone in _finishedTones)
                        {
                            _logger.Debug(
                                $"AudioEvaluator->Finished Tone: {tone.DtmfTone.HighTone} finished in {tone.Duration.TotalMilliseconds}ms");
                        }
                    }

                    _logger.Debug($"==========================================================");

                    var existingTone = _finishedTones.FirstOrDefault(x => x.DtmfTone.HighTone == end.DtmfTone.HighTone &&
                                                                     (end.TimeStamp.Subtract(x.TimeStamp).TotalMilliseconds <= 1000 &&
                                                                      end.TimeStamp.Subtract(x.TimeStamp).TotalMilliseconds >= -1000));

                    if (existingTone != null && existingTone.Duration < new TimeSpan(0, 0, 0, 0, 1500))
                    {
                        _logger.Debug($"AudioEvaluator->DtmfToneStopped: Existing tone for {existingTone.DtmfTone.HighTone} adding {end.Duration.TotalMilliseconds}ms");
                        existingTone.Duration += end.Duration;
                    }
                    else
                    {
                        _logger.Debug($"AudioEvaluator->DtmfToneStopped: No Existing tone for {end.DtmfTone.HighTone} for duration {end.Duration}");
                        _finishedTones.Add(end);
                    }
                }

                CheckFinishedTonesForTriggers();
            };

            analyzer.StartCapturing();
        }