Exemple #1
0
        public override void SetData(short[] data, int offset)
        {
            if (null == Buffer)
            {
                Format             = new AVAudioFormat(AVAudioCommonFormat.PCMFloat32, (double)Frequency, (uint)(ChannelFormat == AudioChannelFormat.Mono ? 1 : 2), true);
                Buffer             = new AVAudioPcmBuffer(Format, (uint)SampleCount);
                Buffer.FrameLength = (uint)(SampleCount / sizeof(float));
            }

            unsafe
            {
                try
                {
                    float *f = (float *)Marshal.ReadIntPtr(Buffer.FloatChannelData, 0 * IntPtr.Size);
                    for (int i = 0; i < SampleCount; i++)
                    {
                        f[i] = data[i] / 32767.0f;
                    }
                }
                catch (Exception e)
                {
                    Console.WriteLine(e.ToString());
                }
            }
        }
Exemple #2
0
        public void AttachSampler(AudioSampler audioSampler, SCNNode node)
        {
            // Add the audio Player to the scenekit node so that it gets correct positional
            // adjustments
            node.AddAudioPlayer(audioSampler.AudioPlayer);

            // NOTE: AVAudioNodes that are not AVAudioPlayerNodes are not
            // automatically added to an AVAudioEngine by SceneKit. So we add
            // this audio node to the SCNNode so that it can get position updates
            // but we connect it manually to the AVAudioEnvironmentNode that we
            // get passed to us. This comes from ARSCNView in GameSceneViewController.
            if (this.AudioEnvironment.Engine != null)
            {
                var engine = this.AudioEnvironment.Engine;

                // attach the node
                var audioNode = audioSampler.AudioNode;
                engine.AttachNode(audioNode);

                // connect
                var engineFormat = engine.OutputNode.GetBusInputFormat(0);
                var format       = new AVAudioFormat(engineFormat.SampleRate, 1);
                engine.Connect(audioNode, AudioEnvironment, format);

                // addd to the local collection
                this.audioSamplers.Add(audioSampler);
            }
        }
Exemple #3
0
    private void initPlayer()
    {
        audioEngine = new AVAudioEngine();
        NSError error = new NSError();

        if (!AVAudioSession.SharedInstance().SetPreferredSampleRate(sampleRate, out error))
        {
            throw new Exception("Error setting preffered sample rate for player: " + error);
        }
        AVAudioSession.SharedInstance().SetCategory(AVAudioSessionCategory.PlayAndRecord, AVAudioSessionCategoryOptions.InterruptSpokenAudioAndMixWithOthers);
        AVAudioSession.SharedInstance().SetActive(true);

        audioPlayer = new AVAudioPlayerNode();
        setVolume(AVAudioSession.SharedInstance().OutputVolume);
        inputAudioFormat = new AVAudioFormat(AVAudioCommonFormat.PCMFloat32, sampleRate, (uint)channels, false);

        audioEngine.AttachNode(audioPlayer);
        audioEngine.Connect(audioPlayer, audioEngine.MainMixerNode, inputAudioFormat);
        audioEngine.Prepare();
        if (!audioEngine.StartAndReturnError(out error))
        {
            throw new Exception("Error starting playback audio engine: " + error);
        }
        audioPlayer.Play();
    }
        public AUv3FilterDemo(AudioComponentDescription description, AudioComponentInstantiationOptions options, out NSError error) :
            base(description, options, out error)
        {
            var defaultFormat = new AVAudioFormat(44100.0, 2);

            Kernel.Init((int)defaultFormat.ChannelCount, defaultFormat.SampleRate);

            AUParameter cutoffParam = AUParameterTree.CreateParameter(
                "cutoff", "Cutoff", 0, 12, 2000,
                AudioUnitParameterUnit.Hertz, null,
                0, null, null
                );

            AUParameter resonanceParam = AUParameterTree.CreateParameter(
                "resonance", "Resonance", 1, -20, 20,
                AudioUnitParameterUnit.Decibels, null,
                0, null, null
                );

            cutoffParam.Value    = 400f;
            resonanceParam.Value = -5.0f;
            Kernel.SetParameter((ulong)FilterParam.Cutoff, cutoffParam.Value);
            Kernel.SetParameter((ulong)FilterParam.Resonance, resonanceParam.Value);

            ParameterTree = AUParameterTree.CreateTree(
                new [] {
                cutoffParam,
                resonanceParam
            }
                );

            inputBus.Init(defaultFormat, 8);

            NSError err;

            outputBus = new AUAudioUnitBus(defaultFormat, out err);

            inputBusArray  = new AUAudioUnitBusArray(this, AUAudioUnitBusType.Input, new [] { inputBus.Bus });
            outputBusArray = new AUAudioUnitBusArray(this, AUAudioUnitBusType.Output, new [] { outputBus });

            var filterKernel = Kernel;

            ParameterTree.ImplementorValueObserver           = (param, value) => filterKernel.SetParameter(param.Address, value);
            ParameterTree.ImplementorValueProvider           = param => filterKernel.GetParameter((nuint)param.Address);
            ParameterTree.ImplementorStringFromValueCallback = (AUParameter param, ref float?value) => {
                switch (param.Address)
                {
                case (ulong)FilterParam.Cutoff:
                    return(param.Value.ToString());

                case (ulong)FilterParam.Resonance:
                    return(param.Value.ToString());

                default:
                    return("?");
                }
            };

            MaximumFramesToRender = 512;
        }
Exemple #5
0
 public void TestNotEqualOperatorNull()
 {
     using (var format = new AVAudioFormat())
     {
         Assert.IsTrue(format != null, "format != null");
         Assert.IsTrue(null != format, "null != format");
     }
     using (AVAudioFormat nullFormat = null)
     {
         Assert.IsFalse(nullFormat != null, "nullFormat != null");
         Assert.IsFalse(null != nullFormat, "null != nullFormat");
     }
 }
        public void Init(AVAudioFormat defaultFormat, uint maxChannels)
        {
            MaxFrames = 0;
            pcmBuffer = null;
            OriginalAudioBufferList = null;
            MutableAudioBufferList  = null;
            NSError error;

            Bus = new AUAudioUnitBus(defaultFormat, out error)
            {
                MaximumChannelCount = maxChannels
            };
        }
Exemple #7
0
    private void initRecorder()
    {
        audioRecorder = new AVAudioEngine();
        NSError error = new NSError();

        if (!AVAudioSession.SharedInstance().SetPreferredSampleRate(sampleRate, out error))
        {
            throw new Exception("Error setting preffered sample rate for recorder: " + error);
        }
        AVAudioSession.SharedInstance().SetCategory(AVAudioSessionCategory.PlayAndRecord, AVAudioSessionCategoryOptions.InterruptSpokenAudioAndMixWithOthers);
        AVAudioSession.SharedInstance().SetActive(true);
        AVAudioFormat recording_format = new AVAudioFormat(AVAudioCommonFormat.PCMInt16, sampleRate, (uint)channels, false);
        uint          buffer_size      = (uint)CodecTools.getPcmFrameByteSize(sampleRate, bitRate, channels) * 1000;

        audioRecorder.InputNode.InstallTapOnBus(0, buffer_size, recording_format, onDataAvailable);
        audioRecorder.Prepare();
        if (!audioRecorder.StartAndReturnError(out error))
        {
            throw new Exception("Error starting recording audio engine: " + error);
        }
    }
Exemple #8
0
 public void TestEqualOperatorSameInstace()
 {
     using (var format = new AVAudioFormat())
         Assert.IsTrue(format == format, "format == format");
 }
Exemple #9
0
 public IOSAudioClip(int samples, AudioChannelFormat channelFormat, int frequency) : base(samples, channelFormat, frequency)
 {
     Format = new AVAudioFormat((double)frequency, (uint)(channelFormat == AudioChannelFormat.Mono ? 1 : 2));
     Buffer = new AVAudioPcmBuffer(Format, (uint)samples);
 }
Exemple #10
0
        public IAsyncOperation <SpeechRecognitionResult> RecognizeAsync()
        {
            _initialSilenceTimeout          = new Timer();
            _initialSilenceTimeout.Interval = Math.Max(Timeouts.InitialSilenceTimeout.TotalMilliseconds, 5000);
            _initialSilenceTimeout.Elapsed += OnTimeout;

            _endSilenceTimeout          = new Timer();
            _endSilenceTimeout.Interval = Math.Max(Timeouts.EndSilenceTimeout.TotalMilliseconds, 150);
            _endSilenceTimeout.Elapsed += OnTimeout;

            // Cancel the previous task if it's running.
            _recognitionTask?.Cancel();
            _recognitionTask = null;

            var     audioSession = AVAudioSession.SharedInstance();
            NSError err;

            err = audioSession.SetCategory(AVAudioSessionCategory.Record);
            audioSession.SetMode(AVAudioSession.ModeMeasurement, out err);
            err = audioSession.SetActive(true, AVAudioSessionSetActiveOptions.NotifyOthersOnDeactivation);

            // Configure request to get partial results
            _recognitionRequest = new SFSpeechAudioBufferRecognitionRequest
            {
                ShouldReportPartialResults = true,
                TaskHint = SFSpeechRecognitionTaskHint.Dictation
            };

            var inputNode = _audioEngine.InputNode;

            if (inputNode == null)
            {
                throw new InvalidProgramException("Audio engine has no input node");
            }

            var tcs = new TaskCompletionSource <SpeechRecognitionResult>();

            // Keep a reference to the task so that it can be cancelled.
            _recognitionTask = _speechRecognizer.GetRecognitionTask(_recognitionRequest, (result, error) =>
            {
                var isFinal   = false;
                var bestMatch = default(SpeechRecognitionResult);

                if (result != null)
                {
                    _initialSilenceTimeout.Stop();
                    _endSilenceTimeout.Stop();
                    _endSilenceTimeout.Start();

                    bestMatch = new SpeechRecognitionResult()
                    {
                        Text       = result.BestTranscription.FormattedString,
                        Alternates = result.Transcriptions?
                                     .Select(t => new SpeechRecognitionResult()
                        {
                            Text = t.FormattedString
                        })
                                     .ToList()
                    };
                    isFinal = result.Final;

                    OnHypothesisGenerated(bestMatch.Text);
                }

                if (error != null || isFinal)
                {
                    _initialSilenceTimeout.Stop();
                    _endSilenceTimeout.Stop();

                    _audioEngine.Stop();

                    inputNode.RemoveTapOnBus(0);
                    inputNode.Reset();

                    audioSession = AVAudioSession.SharedInstance();
                    err          = audioSession.SetCategory(AVAudioSessionCategory.Playback);
                    audioSession.SetMode(AVAudioSession.ModeDefault, out err);
                    err = audioSession.SetActive(false, AVAudioSessionSetActiveOptions.NotifyOthersOnDeactivation);

                    _recognitionTask = null;

                    OnStateChanged(SpeechRecognizerState.Idle);

                    if (bestMatch != null)
                    {
                        tcs.TrySetResult(bestMatch);
                    }
                    else
                    {
                        tcs.TrySetException(new Exception($"Error during speech recognition: {error.LocalizedDescription}"));
                    }
                }
            });

            var recordingFormat = new AVAudioFormat(sampleRate: 44100, channels: 1);

            inputNode.InstallTapOnBus(0, 1024, recordingFormat, (buffer, when) => {
                _recognitionRequest?.Append(buffer);
            });

            _initialSilenceTimeout.Start();

            _audioEngine.Prepare();
            _audioEngine.StartAndReturnError(out err);

            OnStateChanged(SpeechRecognizerState.Capturing);

            return(tcs.Task.AsAsyncOperation());
        }
Exemple #11
0
        public void AudioSetupStart()
        {
            FloatQueue        = new Queue <float>();
            engine            = new AVAudioEngine();
            nodeEQ            = new AVAudioUnitEQ(1);
            nodeEQ.GlobalGain = 1;
            engine.AttachNode(nodeEQ);

            AVAudioUnitEQFilterParameters filter = nodeEQ.Bands[0];

            filter.FilterType = AVAudioUnitEQFilterType.LowPass;
            filter.Frequency  = 1000; //In hertz
            filter.Bandwidth  = 1;
            filter.Bypass     = false;
            // in db -96 db through 24 d
            filter.Gain = 50;

            //not sure if this is necessary
            nodeEQ.Bands[0] = filter;

            //1
            AVAudioFormat format2 = engine.MainMixerNode.GetBusOutputFormat(0);

            //2
            //AVAudioPcmBuffer buffMix = new AVAudioPcmBuffer(engine.MainMixerNode.GetBusInputFormat(0),2);
            //AVAudioTime timeMix = engine.MainMixerNode.LastRenderTime;
            //AVAudioNodeTapBlock MixerBlock = new AVAudioNodeTapBlock((buffMix, timeMix) =>

            //2
            engine.MainMixerNode.InstallTapOnBus(0, 1024, format2, (AVAudioPcmBuffer buffMix, AVAudioTime when) =>
            {
                //Console.WriteLine("Called");

                //3     **Dont have an 'Updater' also not checking for null**
                IntPtr channelData = buffMix.FloatChannelData;

                int lengthOfBuffer = (int)buffMix.FrameLength;

                int frame_length = (int)buffMix.FrameLength;

                /*
                 * byte[] bytesArray = new byte[lengthOfBuffer];
                 *
                 * Marshal.Copy(channelData, bytesArray, 0, lengthOfBuffer);
                 */
                /*
                 * double total = 0;
                 * int nonZero = 0;
                 * for (int a = 0; a < buffMix.FrameLength - 4; a+=1)
                 * {
                 *  //float tempx = BitConverter.ToSingle(bytesArray, a);
                 *  float tempx = bytesArray[a];
                 *  Console.WriteLine(tempx);
                 *  double temp = Math.Pow(tempx, 2);
                 *  total += temp;
                 *  if (temp.Equals(0))
                 *      nonZero++;
                 * }
                 * int tester;
                 * //Need to figure out how the buffer works, if at all
                 * total = Math.Sqrt(total / nonZero);
                 * double avgPower = 20 * Math.Log10(total);
                 * avgPower /= 160;
                 *
                 * if (avgPower > .9)
                 *  High_Level_Detected++;
                 * FloatQueue.Enqueue((float)avgPower);
                 * //Console.WriteLine(avgPower);
                 *
                 * Marshal.FreeHGlobal(channelData);
                 */
                //var ns = buffMix.MutableCopy(); //test later

                T_Proccess tws   = new T_Proccess(channelData, lengthOfBuffer, frame_length);
                Thread processer = new Thread(new ThreadStart(tws.ThreadProc));
                processer.Start();
            });

            AVAudioFormat format = engine.InputNode.GetBusInputFormat(0);

            engine.Connect(engine.InputNode, engine.MainMixerNode, format);
            engine.Connect(nodeEQ, engine.MainMixerNode, format);

            StartEngine();
            started = true;
        }