Beispiel #1
0
        public IOSAudioDriver( )
        {
            _engine = new AVAudioEngine();
            _mixer  = new AVAudioMixerNode();

            _engine.AttachNode(_mixer);
            _engine.Connect(_mixer, _engine.MainMixerNode, _engine.MainMixerNode.GetBusOutputFormat(0));

            _players = new Player[MaxPlayers];
            for (int i = 0; i < MaxPlayers; i++)
            {
                var player = new Player();
                player.Callback = player.OnPlayedBack;
                player.Done     = true;
                player.Node     = new AVAudioPlayerNode();
                player.PlayId   = 1;
                player.Id       = (uint)i;
                _players[i]     = player;

                _engine.AttachNode(player.Node);
                //_engine.Connect(player.Node, _engine.MainMixerNode, _format);
            }

            _engine.Prepare();
            _engine.StartAndReturnError(out var error);
        }
Beispiel #2
0
    private void initPlayer()
    {
        audioEngine = new AVAudioEngine();
        NSError error = new NSError();

        if (!AVAudioSession.SharedInstance().SetPreferredSampleRate(sampleRate, out error))
        {
            throw new Exception("Error setting preffered sample rate for player: " + error);
        }
        AVAudioSession.SharedInstance().SetCategory(AVAudioSessionCategory.PlayAndRecord, AVAudioSessionCategoryOptions.InterruptSpokenAudioAndMixWithOthers);
        AVAudioSession.SharedInstance().SetActive(true);

        audioPlayer = new AVAudioPlayerNode();
        setVolume(AVAudioSession.SharedInstance().OutputVolume);
        inputAudioFormat = new AVAudioFormat(AVAudioCommonFormat.PCMFloat32, sampleRate, (uint)channels, false);

        audioEngine.AttachNode(audioPlayer);
        audioEngine.Connect(audioPlayer, audioEngine.MainMixerNode, inputAudioFormat);
        audioEngine.Prepare();
        if (!audioEngine.StartAndReturnError(out error))
        {
            throw new Exception("Error starting playback audio engine: " + error);
        }
        audioPlayer.Play();
    }
Beispiel #3
0
        // 音声認識の開始処理
        public void StartRecognizing()
        {
            RecognizedText = string.Empty;
            IsRecognizing  = true;

            // 音声認識の許可をユーザーに求める。
            SFSpeechRecognizer.RequestAuthorization((SFSpeechRecognizerAuthorizationStatus status) =>
            {
                switch (status)
                {
                case SFSpeechRecognizerAuthorizationStatus.Authorized:
                    // 音声認識がユーザーに許可された場合、必要なインスタンスを生成した後に音声認識の本処理を実行する。
                    // SFSpeechRecognizerのインスタンス生成時、コンストラクタの引数でlocaleを指定しなくても、
                    // 端末の標準言語が日本語なら日本語は問題なく認識される。
                    audioEngine        = new AVAudioEngine();
                    speechRecognizer   = new SFSpeechRecognizer();
                    recognitionRequest = new SFSpeechAudioBufferRecognitionRequest();
                    startRecognitionSession();
                    break;

                default:
                    // 音声認識がユーザーに許可されなかった場合、処理を終了する。
                    return;
                }
            }
                                                    );
        }
        protected virtual IObservable <string> Listen(bool completeOnEndOfSpeech) => Observable.Create <string>(ob =>
        {
            NSError error = null;

            var audioEngine           = new AVAudioEngine();
            var speechRecognizer      = new NSSpeechRecognizer();
            speechRecognizer.Delegate = new NSSpeechRecognizerDelegate();
            return(() =>
            {
            });
        });
Beispiel #5
0
        public void AVAudioIONodeTests_AudioUnitTest()
        {
            Asserts.EnsureYosemite();

            AVAudioEngine eng  = new AVAudioEngine();
            AVAudioIONode node = eng.OutputNode;
            AUUnit        unit = node.AudioUnit;

            unit.GetElementCount(AudioUnitScopeType.Global);
            // Make sure this doens't crash.
        }
 /// <inheritdoc />
 public VoiceToTextServiceImpl()
 {
     try
     {
         _audioEngine      = new AVAudioEngine();
         _speechRecognizer = new SFSpeechRecognizer();
     }
     catch (Exception ex)
     {
         System.Diagnostics.Debug.WriteLine(ex);
     }
 }
        private void ResetSpeechToText()
        {
            _speechRecognizer  = new SFSpeechRecognizer();
            _speechAudioEngine = new AVAudioEngine();

            _speechRecognitionRequest = new SFSpeechAudioBufferRecognitionRequest();
            _speechRecognitionRequest.ShouldReportPartialResults = true;
            _speechRecognitionRequest.TaskHint          = SFSpeechRecognitionTaskHint.Search;
            _speechRecognitionRequest.ContextualStrings = new string[] { "for", "the", "a", "an" };

            _speechRecognitionTask = null;
        }
Beispiel #8
0
        public CoreMidiSynthesizer()
        {
            details  = this;
            _engine  = new AVAudioEngine();
            _sampler = new AVAudioUnitSampler();
            _engine.AttachNode(_sampler);
            _engine.Connect(_sampler, _engine.MainMixerNode, format: new AVAudioFormat(44100, 1));

            LoadSoundFontIntoSampler(0);
            AddObservers();
            StartEngine();
            SetSessionPlayback();
        }
Beispiel #9
0
        public SpeechToText()
        {
            _audioEngine              = new AVAudioEngine();
            _speechRecognizer         = new SFSpeechRecognizer();
            _speechRecognitionRequest = new SFSpeechAudioBufferRecognitionRequest();
            _speechRecognitionTask    = new SFSpeechRecognitionTask();

            SFSpeechRecognizer.RequestAuthorization((SFSpeechRecognizerAuthorizationStatus status) =>
            {
                // We're going to assume that you've selected to authorise the request, otherwise,
                // you're wasting your own time.
            });
        }
Beispiel #10
0
    public void stop()
    {
        if (!running)
        {
            return;
        }

        running = false;

        AVAudioSession.SharedInstance().SetActive(false);

        if (audioPlayer != null)
        {
            try
            {
                audioPlayer.Stop();
                audioPlayer.Reset();
            }
            catch (Exception)
            {
            }

            audioPlayer.Dispose();
            audioPlayer = null;
        }

        if (audioDecoder != null)
        {
            audioDecoder.stop();
            audioDecoder.Dispose();
            audioDecoder = null;
        }

        if (audioEngine != null)
        {
            try
            {
                audioEngine.Stop();
                audioEngine.Reset();
            }
            catch (Exception)
            {
            }

            audioEngine.Dispose();
            audioEngine = null;
        }
    }
Beispiel #11
0
            static void StopInstances()
            {
                AudioEngine?.InputNode?.RemoveTapOnBus(0);
                AudioEngine?.Stop();
                AudioEngine?.Dispose();
                AudioEngine = null;

                LiveSpeechRequest?.EndAudio();
                LiveSpeechRequest?.Dispose();
                LiveSpeechRequest = null;

                SpeechRecognizer?.Dispose();
                SpeechRecognizer = null;

                //Timer?.Dispose();
                //Timer = null;
            }
Beispiel #12
0
        public void AVAudioIONodeTests_AudioUnitTest()
        {
            TestRuntime.AssertNotVirtualMachine();

            Asserts.EnsureYosemite();

            using (AVAudioEngine eng = new AVAudioEngine()) {
                using (AVAudioIONode node = eng.OutputNode) {
                    using (AUUnit unit = node.AudioUnit)
                        unit.GetElementCount(AudioUnitScopeType.Global);
                    using (AUUnit unit = node.AudioUnit)
                        unit.GetElementCount(AudioUnitScopeType.Global);
                    using (AUUnit unit = node.AudioUnit)
                        unit.GetElementCount(AudioUnitScopeType.Global);
                }
            }
            // Make sure this doens't crash.
        }
Beispiel #13
0
    private void initRecorder()
    {
        audioRecorder = new AVAudioEngine();
        NSError error = new NSError();

        if (!AVAudioSession.SharedInstance().SetPreferredSampleRate(sampleRate, out error))
        {
            throw new Exception("Error setting preffered sample rate for recorder: " + error);
        }
        AVAudioSession.SharedInstance().SetCategory(AVAudioSessionCategory.PlayAndRecord, AVAudioSessionCategoryOptions.InterruptSpokenAudioAndMixWithOthers);
        AVAudioSession.SharedInstance().SetActive(true);
        AVAudioFormat recording_format = new AVAudioFormat(AVAudioCommonFormat.PCMInt16, sampleRate, (uint)channels, false);
        uint          buffer_size      = (uint)CodecTools.getPcmFrameByteSize(sampleRate, bitRate, channels) * 1000;

        audioRecorder.InputNode.InstallTapOnBus(0, buffer_size, recording_format, onDataAvailable);
        audioRecorder.Prepare();
        if (!audioRecorder.StartAndReturnError(out error))
        {
            throw new Exception("Error starting recording audio engine: " + error);
        }
    }
Beispiel #14
0
        ///<Summary>
        /// Load wave or mp3 audio file from the Android assets folder
        ///</Summary>
        public bool Load(string fileName)
        {
            DeletePlayer();

            NSError error = new NSError();

            if (!String.IsNullOrWhiteSpace(fileName))
            {
                string directory = Path.GetDirectoryName(fileName);
                string filename  = Path.GetFileNameWithoutExtension(fileName);
                string extension = Path.GetExtension(fileName).Substring(1);
                NSUrl  url       = NSBundle.MainBundle.GetUrlForResource(filename, extension, directory);
                audioFile = new AVAudioFile(url, out error);
            }

            if (audioFile != null)
            {
                componentDescription = new AudioComponentDescription();
                componentDescription.ComponentType    = AudioComponentType.FormatConverter;
                componentDescription.ComponentSubType = (int)AudioUnitSubType.Varispeed;

                engine = new AVAudioEngine();
                player = new AVAudioPlayerNode();
                pitch  = new AVAudioUnitTimePitch(componentDescription);


                engine.AttachNode(player);
                engine.AttachNode(pitch);

                engine.Connect(player, pitch, audioFile.ProcessingFormat);
                engine.Connect(pitch, engine.MainMixerNode, audioFile.ProcessingFormat);

                engine.Prepare();
                NSError startError = new NSError();
                engine.StartAndReturnError(out startError);
            }

            return(true);
        }
Beispiel #15
0
    public void stop()
    {
        if (!running)
        {
            return;
        }
        running = false;

        AVAudioSession.SharedInstance().SetActive(false);

        if (audioRecorder != null)
        {
            try
            {
                audioRecorder.InputNode.RemoveTapOnBus(0);
                audioRecorder.Stop();
                audioRecorder.Reset();
            }
            catch (Exception)
            {
            }
            audioRecorder.Dispose();
            audioRecorder = null;
        }

        if (audioEncoder != null)
        {
            audioEncoder.stop();
            audioEncoder.Dispose();
            audioEncoder = null;
        }

        lock (outputBuffers)
        {
            outputBuffers.Clear();
        }
    }
        private void StartAnalysing()
        {
            Model = GetModel();

            AudioEngine   = new AVAudioEngine();
            AnalysisQueue = new DispatchQueue("com.r2.SoundAnalysis", false);

            var inputFormat = AudioEngine.InputNode.GetBusInputFormat(0);
            var request     = new SNClassifySoundRequest(Model, out var soundRequestError);

            Analyser = new SNAudioStreamAnalyzer(inputFormat);
            Analyser.AddRequest(request, this, out var addRequestError);

            AudioEngine.InputNode.InstallTapOnBus(
                bus: 0,
                bufferSize: 8192,
                format: inputFormat,
                tapBlock: (buffer, when) =>
                AnalysisQueue.DispatchAsync(() =>
                                            Analyser.Analyze(buffer, when.SampleTime)));

            AudioEngine.Prepare();
            AudioEngine.StartAndReturnError(out var initEngineError);
        }
Beispiel #17
0
        void DeletePlayer()
        {
            Stop();

            if (player != null && player.Playing)
            {
                player.Stop();
            }

            if (engine != null && engine.Running)
            {
                engine.Stop();
            }

            if (player != null && engine != null && pitch != null)
            {
                engine.Dispose();
                player.Dispose();
                pitch.Dispose();
                engine = null;
                player = null;
                pitch  = null;
            }
        }
Beispiel #18
0
            static void StartRecording()
            {
                lock (Lock)
                {
                    if (SpeechRecognizer == null)
                    {
                        SpeechRecognizer  = new SFSpeechRecognizer();
                        LiveSpeechRequest = new SFSpeechAudioBufferRecognitionRequest();
                    }

                    var audioSession = AVAudioSession.SharedInstance();

                    audioSession.SetCategory(AVAudioSessionCategory.PlayAndRecord);
                    audioSession.SetMode(AVAudioSession.ModeDefault, out NSError error);
                    audioSession.OverrideOutputAudioPort(AVAudioSessionPortOverride.Speaker, out NSError speakerError);
                    audioSession.SetActive(true);

                    if (LogErrorAndStop(error) || LogErrorAndStop(speakerError))
                    {
                        return;
                    }

                    AudioEngine = new AVAudioEngine();
                    var node = AudioEngine.InputNode;

                    LiveSpeechRequest.ShouldReportPartialResults = true;

                    RecognitionTask = SpeechRecognizer.GetRecognitionTask(LiveSpeechRequest, (SFSpeechRecognitionResult result, NSError err) =>
                    {
                        if (LogErrorAndStop(err))
                        {
                            return;
                        }

                        var currentText = result.BestTranscription.FormattedString;

                        if (currentText.HasValue())
                        {
                            Listeners?.Invoke(currentText, result.Final);
                        }

                        if (IsContinuous)
                        {
                            Timer = new System.Timers.Timer(20000)
                            {
                                Enabled = true
                            };
                            Timer.Elapsed += (s, ev) =>
                            {
                                StopInstances();
                                StartRecording();
                            };

                            Timer.Start();
                        }
                    });

                    var recordingFormat = node.GetBusOutputFormat(0);
                    node.InstallTapOnBus(0, 1024, recordingFormat, (AVAudioPcmBuffer buffer, AVAudioTime when) =>
                    {
                        LiveSpeechRequest.Append(buffer);
                    });

                    if (AudioEngine == null)
                    {
                        Stop();
                        return;
                    }

                    AudioEngine?.Prepare();
                    AudioEngine?.StartAndReturnError(out error);

                    if (LogErrorAndStop(error))
                    {
                        return;
                    }
                }
            }
Beispiel #19
0
        public void AudioSetupStart()
        {
            FloatQueue        = new Queue <float>();
            engine            = new AVAudioEngine();
            nodeEQ            = new AVAudioUnitEQ(1);
            nodeEQ.GlobalGain = 1;
            engine.AttachNode(nodeEQ);

            AVAudioUnitEQFilterParameters filter = nodeEQ.Bands[0];

            filter.FilterType = AVAudioUnitEQFilterType.LowPass;
            filter.Frequency  = 1000; //In hertz
            filter.Bandwidth  = 1;
            filter.Bypass     = false;
            // in db -96 db through 24 d
            filter.Gain = 50;

            //not sure if this is necessary
            nodeEQ.Bands[0] = filter;

            //1
            AVAudioFormat format2 = engine.MainMixerNode.GetBusOutputFormat(0);

            //2
            //AVAudioPcmBuffer buffMix = new AVAudioPcmBuffer(engine.MainMixerNode.GetBusInputFormat(0),2);
            //AVAudioTime timeMix = engine.MainMixerNode.LastRenderTime;
            //AVAudioNodeTapBlock MixerBlock = new AVAudioNodeTapBlock((buffMix, timeMix) =>

            //2
            engine.MainMixerNode.InstallTapOnBus(0, 1024, format2, (AVAudioPcmBuffer buffMix, AVAudioTime when) =>
            {
                //Console.WriteLine("Called");

                //3     **Dont have an 'Updater' also not checking for null**
                IntPtr channelData = buffMix.FloatChannelData;

                int lengthOfBuffer = (int)buffMix.FrameLength;

                int frame_length = (int)buffMix.FrameLength;

                /*
                 * byte[] bytesArray = new byte[lengthOfBuffer];
                 *
                 * Marshal.Copy(channelData, bytesArray, 0, lengthOfBuffer);
                 */
                /*
                 * double total = 0;
                 * int nonZero = 0;
                 * for (int a = 0; a < buffMix.FrameLength - 4; a+=1)
                 * {
                 *  //float tempx = BitConverter.ToSingle(bytesArray, a);
                 *  float tempx = bytesArray[a];
                 *  Console.WriteLine(tempx);
                 *  double temp = Math.Pow(tempx, 2);
                 *  total += temp;
                 *  if (temp.Equals(0))
                 *      nonZero++;
                 * }
                 * int tester;
                 * //Need to figure out how the buffer works, if at all
                 * total = Math.Sqrt(total / nonZero);
                 * double avgPower = 20 * Math.Log10(total);
                 * avgPower /= 160;
                 *
                 * if (avgPower > .9)
                 *  High_Level_Detected++;
                 * FloatQueue.Enqueue((float)avgPower);
                 * //Console.WriteLine(avgPower);
                 *
                 * Marshal.FreeHGlobal(channelData);
                 */
                //var ns = buffMix.MutableCopy(); //test later

                T_Proccess tws   = new T_Proccess(channelData, lengthOfBuffer, frame_length);
                Thread processer = new Thread(new ThreadStart(tws.ThreadProc));
                processer.Start();
            });

            AVAudioFormat format = engine.InputNode.GetBusInputFormat(0);

            engine.Connect(engine.InputNode, engine.MainMixerNode, format);
            engine.Connect(nodeEQ, engine.MainMixerNode, format);

            StartEngine();
            started = true;
        }
Beispiel #20
0
 public Recorder()
 {
     AudioEngine       = new AVAudioEngine();
     SpeechRecognizer  = new SFSpeechRecognizer();
     LiveSpeechRequest = new SFSpeechAudioBufferRecognitionRequest();
 }
Beispiel #21
0
        public void StartRecording(string voice, bool longTimeout = false)
        {
            if (!SpeechEnabled)
            {
                return;
            }
            // Setup audio session
            LastResult  = "";
            AudioEngine = new AVAudioEngine();
            NSLocale voiceLocale = NSLocale.FromLocaleIdentifier(voice);

            SpeechRecognizer  = new SFSpeechRecognizer(voiceLocale);
            LiveSpeechRequest = new SFSpeechAudioBufferRecognitionRequest();

            NSError error;
            var     audioSession = AVAudioSession.SharedInstance();

            audioSession.SetCategory(AVAudioSessionCategory.Record);
            audioSession.SetMode(AVAudioSession.ModeMeasurement, out error);
            if (error != null)
            {
                OnSpeechError?.Invoke("Audio session error: " + error.ToString());
                return;
            }
            audioSession.SetActive(true, AVAudioSessionSetActiveOptions.NotifyOthersOnDeactivation);

            LiveSpeechRequest.ShouldReportPartialResults = true;

            var node = AudioEngine.InputNode;

            if (node == null)
            {
                OnSpeechError?.Invoke("Couldn't initialize Speech Input");
                return;
            }

            RecognitionTask = SpeechRecognizer.GetRecognitionTask(LiveSpeechRequest, (SFSpeechRecognitionResult result, NSError err) => {
                if (IsCancelled)
                {
                    node.RemoveTapOnBus(0);
                    return;
                }
                if (err != null)
                {
                    OnSpeechError?.Invoke(err.ToString());
                }
                else if (result != null)
                {
                    LastResult = result.BestTranscription.FormattedString;
                    Console.WriteLine("You said: \"{0}\". Final: {1}",
                                      LastResult, result.Final);
                    m_lastSpeech = DateTime.Now;
                    if (result.Final)// || !IsRecording) {
                    {
                        OnSpeechOK?.Invoke(LastResult);
                    }
                }
                if ((result != null && result.Final) || err != null || !IsRecording)
                {
                    IsRecording = false;
                    //node.RemoveTapOnBus(0);
                    AudioEngine.Stop();
                    m_speechTimer.Close();
                }
            });

            var recordingFormat = node.GetBusOutputFormat(0);

            node.InstallTapOnBus(0, 1024, recordingFormat, (AVAudioPcmBuffer buffer, AVAudioTime when) => {
                //Console.WriteLine("--> {0}: {1} {2}.{3}", buffer.FrameLength, when.HostTime);
                // Append buffer to recognition request
                LiveSpeechRequest.Append(buffer);
            });

            // Start recording
            AudioEngine.Prepare();
            AudioEngine.StartAndReturnError(out error);

            if (error != null)
            {
                OnSpeechError?.Invoke("Speech init error: " + error.ToString());
                IsRecording = false;
                return;
            }
            IsRecording   = true;
            IsCancelled   = false;
            LastResult    = "";
            m_lastSpeech  = DateTime.MaxValue;
            m_startSpeech = DateTime.Now;
            m_timeout     = longTimeout ? m_phraseTimeout : m_wordTimeout;

            m_speechTimer           = new System.Timers.Timer(250);
            m_speechTimer.AutoReset = true;
            m_speechTimer.Elapsed  += (sender, e) => {
                CheckRecording();
            };
            m_speechTimer.Start();
        }
 public void InitializeProperties()
 {
     AudioEngine       = new AVAudioEngine();
     SpeechRecognizer  = new SFSpeechRecognizer();
     LiveSpeechRequest = new SFSpeechAudioBufferRecognitionRequest();
 }