Example #1
0
        private static unsafe float[] ReadSamplesFromFrame(AudioFrameOutputNode frameOutputNode)
        {
            using (var frame = frameOutputNode.GetFrame())
            {
                using (var buffer = frame.LockBuffer(AudioBufferAccessMode.Write))
                {
                    using (var reference = buffer.CreateReference())
                    {
                        ((IMemoryBufferByteAccess)reference).GetBuffer(out byte *unsafeBuffer, out uint numberOfBytes);

                        var numberOfSamples = (int)numberOfBytes / ElementSize;
                        if (numberOfSamples <= 0)
                        {
                            return(null);
                        }

                        var samples = new float[numberOfSamples];

                        var length = Math.Min(numberOfSamples, samples.Length);
                        for (int i = 0; i < length; i++)
                        {
                            samples[i] = ((float *)unsafeBuffer)[i];
                        }

                        return(samples);
                    }
                }
            }
        }
Example #2
0
        /// <summary>
        /// Stops the audio stream.
        /// </summary>
        public Task Stop()
        {
            if (Active)
            {
                Active = false;

                outputNode?.Stop();
                audioGraph?.Stop();

                OnActiveChanged?.Invoke(this, false);
            }

            outputNode?.Dispose();
            outputNode = null;

            if (audioGraph != null)
            {
                audioGraph.QuantumStarted             -= Graph_QuantumStarted;
                audioGraph.UnrecoverableErrorOccurred -= Graph_UnrecoverableErrorOccurred;
                audioGraph.Dispose();
                audioGraph = null;
            }

            return(Task.CompletedTask);
        }
Example #3
0
        protected override async void OnNavigatedTo(NavigationEventArgs e)
        {
            base.OnNavigatedTo(e);
            var mediaSource = MediaSource.CreateFromUri(new Uri("ms-appx:///Test/GirlishLover.m4a"));
            await mediaSource.OpenAsync();

            this.mpe.Source = mediaSource;
            this.mpe.MediaPlayer.MediaOpened += this.MediaPlayer_MediaOpened;

            var settings = new AudioGraphSettings(Windows.Media.Render.AudioRenderCategory.Other)
            {
                QuantumSizeSelectionMode = QuantumSizeSelectionMode.LowestLatency
            };
            var result = await AudioGraph.CreateAsync(settings);

            this.audioGraph = result.Graph;

            this.outNode = this.audioGraph.CreateFrameOutputNode();

            this.fileNode           = (await this.audioGraph.CreateFileInputNodeAsync(await StorageFile.GetFileFromApplicationUriAsync(new Uri("ms-appx:///Test/GirlishLover.m4a")))).FileInputNode;
            this.fileNode.LoopCount = 0;
            this.fileNode.AddOutgoingConnection(this.outNode);
            this.fileNode.FileCompleted    += this.FileNode_FileCompleted;
            this.audioGraph.QuantumStarted += this.AudioGraph_QuantumStarted;

            this.audioGraph.Start();
        }
Example #4
0
        private async Task InitializeAudioAsync()
        {
            // Create an AudioGraph with default settings
            AudioGraphSettings settings = new AudioGraphSettings(AudioRenderCategory.Media);

            settings.EncodingProperties = AudioEncodingProperties.CreatePcm(22050, 1, 16);

            CreateAudioGraphResult result = await AudioGraph.CreateAsync(settings);

            if (result.Status != AudioGraphCreationStatus.Success)
            {
                return;
            }

            _graph = result.Graph;

            // Create a device output node
            CreateAudioDeviceOutputNodeResult deviceOutputNodeResult = await _graph.CreateDeviceOutputNodeAsync();

            if (deviceOutputNodeResult.Status != AudioDeviceNodeCreationStatus.Success)
            {
                return;
            }

            _deviceOutputNode = deviceOutputNodeResult.DeviceOutputNode;

            CreateAudioDeviceInputNodeResult deviceInputNodeResult = await _graph.CreateDeviceInputNodeAsync(MediaCategory.Other);

            if (deviceInputNodeResult.Status != AudioDeviceNodeCreationStatus.Success)
            {
                return;
            }

            _deviceInputNode = deviceInputNodeResult.DeviceInputNode;

            // Create the FrameInputNode at the same format as the graph, except explicitly set mono.
            AudioEncodingProperties nodeEncodingProperties = _graph.EncodingProperties;

            nodeEncodingProperties.ChannelCount = 1;
            _frameInputNode = _graph.CreateFrameInputNode(nodeEncodingProperties);
            _frameInputNode.AddOutgoingConnection(_deviceOutputNode);


            _frameOutputNode = _graph.CreateFrameOutputNode(nodeEncodingProperties);
            _deviceInputNode.AddOutgoingConnection(_frameOutputNode);

            // Initialize the Frame Input Node in the stopped state
            _frameInputNode.Stop();

            // Hook up an event handler so we can start generating samples when needed
            // This event is triggered when the node is required to provide data
            _frameInputNode.QuantumStarted += node_QuantumStarted;

            _graph.QuantumProcessed += GraphOnQuantumProcessed;

            // Start the graph since we will only start/stop the frame input node
            _graph.Start();
        }
Example #5
0
 private async Task InitProcessorNode()
 {
     audioFrameProcessor = ag.CreateFrameOutputNode();
     if (audioFrameProcessor != null)
     {
         rootPage.ShowLogs("Processor Node initialized successfully:");
         ag.QuantumStarted += Ag_QuantumStarted;
     }
 }
Example #6
0
        async Task Init()
        {
            try
            {
                await Stop();

                var pcmEncoding = AudioEncodingProperties.CreatePcm((uint)SampleRate, (uint)ChannelCount, (uint)BitsPerSample);
                // apparently this is not _really_ used/supported here, as the audio data seems to come thru as floats (so basically MediaEncodingSubtypes.Float?)
                pcmEncoding.Subtype = MediaEncodingSubtypes.Pcm;

                var graphSettings = new AudioGraphSettings(AudioRenderCategory.Media)
                {
                    EncodingProperties = pcmEncoding,
                    DesiredRenderDeviceAudioProcessing = AudioProcessing.Raw
                                                         // these do not seem to take effect on certain hardware and MSFT recommends SystemDefault when recording to a file anyway
                                                         //	We'll buffer audio data ourselves to improve RMS calculation across larger samples
                                                         //QuantumSizeSelectionMode = QuantumSizeSelectionMode.ClosestToDesired,
                                                         //DesiredSamplesPerQuantum = 4096
                };

                // create our audio graph... this will be a device input node feeding audio data into a frame output node
                var graphResult = await AudioGraph.CreateAsync(graphSettings);

                if (graphResult.Status == AudioGraphCreationStatus.Success)
                {
                    audioGraph = graphResult.Graph;

                    // take input from whatever the default communications device is set to me on windows
                    var inputResult = await audioGraph.CreateDeviceInputNodeAsync(MediaCategory.Communications, pcmEncoding);

                    if (inputResult.Status == AudioDeviceNodeCreationStatus.Success)
                    {
                        // create the output node
                        outputNode = audioGraph.CreateFrameOutputNode(pcmEncoding);

                        // wire the input to the output
                        inputResult.DeviceInputNode.AddOutgoingConnection(outputNode);

                        // Attach to QuantumStarted event in order to receive synchronous updates from audio graph (to capture incoming audio)
                        audioGraph.QuantumStarted             += Graph_QuantumStarted;
                        audioGraph.UnrecoverableErrorOccurred += Graph_UnrecoverableErrorOccurred;
                    }
                    else
                    {
                        throw new Exception($"audioGraph.CreateDeviceInputNodeAsync() returned non-Success status: {inputResult.Status}");
                    }
                }
                else
                {
                    throw new Exception($"AudioGraph.CreateAsync() returned non-Success status: {graphResult.Status}");
                }
            }
            catch
            {
                throw;
            }
        }
        public async Task Initialize()
        {
            // Default the language to English
            this.ListenState    = ListenState.Initializing;
            this.SourceLanguage = Language.English;
            try {
                await this.speechTranslateClient.Connect("en-US", "en", null, this.DisplayResult, this.SendAudioOut);

                var pcmEncoding = AudioEncodingProperties.CreatePcm(16000, 1, 16);

                var result = await AudioGraph.CreateAsync(
                    new AudioGraphSettings(AudioRenderCategory.Speech)
                {
                    DesiredRenderDeviceAudioProcessing = AudioProcessing.Raw,
                    AudioRenderCategory = AudioRenderCategory.Speech,
                    EncodingProperties  = pcmEncoding
                });

                if (result.Status == AudioGraphCreationStatus.Success)
                {
                    this.graph = result.Graph;

                    var microphone = await DeviceInformation.CreateFromIdAsync(((DeviceInformation)(await DeviceInformation.FindAllAsync(MediaDevice.GetAudioCaptureSelector())).First()).Id);

                    this.speechTranslateOutputMode = this.graph.CreateFrameOutputNode(pcmEncoding);
                    this.graph.QuantumProcessed   += (s, a) => this.SendToSpeechTranslate(this.speechTranslateOutputMode.GetFrame());

                    this.speechTranslateOutputMode.Start();

                    var micInputResult = await this.graph.CreateDeviceInputNodeAsync(MediaCategory.Speech, pcmEncoding, microphone);

                    if (micInputResult.Status == AudioDeviceNodeCreationStatus.Success)
                    {
                        micInputResult.DeviceInputNode.AddOutgoingConnection(this.speechTranslateOutputMode);
                        micInputResult.DeviceInputNode.Start();
                    }
                    else
                    {
                        throw new InvalidOperationException();
                    }

                    // start the graph
                    this.graph.Start();
                    this.ListenState = ListenState.NotListening;
                }
                else
                {
                    this.ListenState = ListenState.Error;
                }
            }
            catch (Exception e)
            {
                this.ListenState = ListenState.Error;
                Logger.GetInstance().LogLine(e.Message);
            }
        }
        private async void File_Click(object sender, RoutedEventArgs e)
        {
            // If another file is already loaded into the FileInput node
            if (fileInput != null)
            {
                // Release the file and dispose the contents of the node
                fileInput.Dispose();
                // Stop playback since a new file is being loaded. Also reset the button UI
                if (graphButton.Content.Equals("Stop Graph"))
                {
                    TogglePlay();
                }
            }

            FileOpenPicker filePicker = new FileOpenPicker();

            filePicker.SuggestedStartLocation = PickerLocationId.MusicLibrary;
            filePicker.FileTypeFilter.Add(".mp3");
            filePicker.FileTypeFilter.Add(".wav");
            filePicker.FileTypeFilter.Add(".wma");
            filePicker.FileTypeFilter.Add(".m4a");
            filePicker.ViewMode = PickerViewMode.Thumbnail;
            StorageFile file = await filePicker.PickSingleFileAsync();

            // File can be null if cancel is hit in the file picker
            if (file == null)
            {
                return;
            }

            CreateAudioFileInputNodeResult fileInputResult = await graph.CreateFileInputNodeAsync(file);

            frameOutputNode         = graph.CreateFrameOutputNode();
            graph.QuantumProcessed += AudioGraph_QuantumProcessed;
            if (AudioFileNodeCreationStatus.Success != fileInputResult.Status)
            {
                // Cannot read input file
                rootPage.NotifyUser(String.Format("Cannot read input file because {0}", fileInputResult.Status.ToString()), NotifyType.ErrorMessage);
                return;
            }

            fileInput = fileInputResult.FileInputNode;
            fileInput.AddOutgoingConnection(deviceOutput, 0.5);
            fileButton.Background = new SolidColorBrush(Colors.Green);

            // Trim the file: set the start time to 3 seconds from the beginning
            // fileInput.EndTime can be used to trim from the end of file
            fileInput.StartTime = TimeSpan.FromSeconds(0);

            // Enable buttons in UI to start graph, loop and change playback speed factor
            graphButton.IsEnabled = true;
            loopToggle.IsEnabled  = true;
            //playSpeedSlider.IsEnabled = true;
        }
        /// <summary>
        /// Creates the underlying AudioGraph for input audio and initializes it, including by
        /// subscribing the optional debug output file to data availability.
        /// </summary>
        /// <returns> A task that completes once the underlying AudioGraph is initialized. </returns>
        private async Task PerformAudioSetupAsync()
        {
            var settings = new AudioGraphSettings(AudioRenderCategory.Speech)
            {
                EncodingProperties = this.outputEncoding,
            };
            var graphResult = await AudioGraph.CreateAsync(settings);

            if (graphResult.Status != AudioGraphCreationStatus.Success)
            {
                var message = $"Failed to initialize AudioGraph with creation status: {graphResult.Status.ToString()}";
                throw new InvalidOperationException(message, graphResult.ExtendedError);
            }

            this.inputGraph = graphResult.Graph;

            this.logger.Log(LogMessageLevel.AudioLogs, $"Audio graph created: {graphResult.Status}");

            if (this.agentSession != null)
            {
                this.logger.Log(LogMessageLevel.AudioLogs, $"{Environment.TickCount} Initializing audio from session");
                this.inputNode = await this.agentSession.CreateAudioDeviceInputNodeAsync(this.inputGraph);
            }
            else
            {
                this.logger.Log(LogMessageLevel.AudioLogs, $"{Environment.TickCount} Initializing audio from real-time input");
                var nodeResult = await this.inputGraph.CreateDeviceInputNodeAsync(MediaCategory.Speech);

                if (nodeResult.Status != AudioDeviceNodeCreationStatus.Success)
                {
                    throw new InvalidOperationException($"Cannot make a real-time device input node.", nodeResult.ExtendedError);
                }

                this.inputNode = nodeResult.DeviceInputNode;
            }

            this.outputNode = this.inputGraph.CreateFrameOutputNode();
            this.inputNode.AddOutgoingConnection(this.outputNode);
            this.inputGraph.QuantumStarted += this.OnQuantumStarted;
            this.disposed = false;

            if (!this.dataAvailableInitialized)
            {
                this.dataAvailableInitialized = true;
                this.DataAvailable           += async(bytes) =>
                {
                    using (await this.debugAudioOutputFileSemaphore.AutoReleaseWaitAsync())
                    {
                        this.debugAudioOutputFileStream?.Write(bytes.ToArray(), 0, bytes.Count);
                    }
                };
            }
        }
Example #10
0
        public async Task InitializeAsync()
        {
            DebugUtil.CheckAppThread();

            AudioGraphSettings settings = new AudioGraphSettings(AudioRenderCategory.Media);

            // settings.DesiredRenderDeviceAudioProcessing = AudioProcessing.Raw;
            settings.QuantumSizeSelectionMode = QuantumSizeSelectionMode.LowestLatency;

            CreateAudioGraphResult result = await AudioGraph.CreateAsync(settings);

            DebugUtil.Assert(result.Status == AudioGraphCreationStatus.Success, "Failed to create audio graph");

            _audioGraph = result.Graph;

            int latencyInSamples = _audioGraph.LatencyInSamples;

            // Create a device output node
            CreateAudioDeviceOutputNodeResult deviceOutputNodeResult = _audioGraph.CreateDeviceOutputNodeAsync().GetResults();

            DebugUtil.Assert(deviceOutputNodeResult.Status == AudioDeviceNodeCreationStatus.Success,
                             $"Audio Device Output unavailable because {deviceOutputNodeResult.Status}");

            _deviceOutputNode = deviceOutputNodeResult.DeviceOutputNode;

            _inputCaptureNode = _audioGraph.CreateFrameOutputNode();                                                            // Create a device input node using the default audio input device

            CreateAudioDeviceInputNodeResult deviceInputNodeResult = await _audioGraph.CreateDeviceInputNodeAsync(MediaCategory.Other);

            DebugUtil.Assert(deviceInputNodeResult.Status == AudioDeviceNodeCreationStatus.Success,
                             $"Audio Device Input unavailable because {deviceInputNodeResult.Status}");

            _deviceInputNode = deviceInputNodeResult.DeviceInputNode;

            _deviceInputNode.AddOutgoingConnection(_inputCaptureNode);
            _deviceInputNode.AddOutgoingConnection(_deviceOutputNode);

            /*
             * echoEffect = new EchoEffectDefinition(_graph);
             * echoEffect.WetDryMix = 0.7f;
             * echoEffect.Feedback = 0.5f;
             * echoEffect.Delay = 500.0f;
             * submixNode.EffectDefinitions.Add(echoEffect);
             *
             * // Disable the effect in the beginning. Enable in response to user action (UI toggle switch)
             * submixNode.DisableEffectsByDefinition(echoEffect);
             */

            // All nodes can have an OutgoingGain property
            // Setting the gain on the Submix node attenuates the output of the node
            //_submixNode.OutgoingGain = 0.5;
        }
Example #11
0
        public static async Task CreateDeviceInputNode()
        {
            Console.WriteLine("Creating AudioGraphs");
            // Create an AudioGraph with default settings
            AudioGraphSettings graphsettings = new AudioGraphSettings(AudioRenderCategory.GameChat);

            graphsettings.EncodingProperties               = new AudioEncodingProperties();
            graphsettings.EncodingProperties.Subtype       = "Float";
            graphsettings.EncodingProperties.SampleRate    = 48000;
            graphsettings.EncodingProperties.ChannelCount  = 2;
            graphsettings.EncodingProperties.BitsPerSample = 32;
            graphsettings.EncodingProperties.Bitrate       = 3072000;
            //settings.DesiredSamplesPerQuantum = 960;
            //settings.QuantumSizeSelectionMode = QuantumSizeSelectionMode.ClosestToDesired;
            CreateAudioGraphResult graphresult = await AudioGraph.CreateAsync(graphsettings);

            if (graphresult.Status != AudioGraphCreationStatus.Success)
            {
                // Cannot create graph
                return;
            }

            ingraph = graphresult.Graph;


            AudioGraphSettings nodesettings = new AudioGraphSettings(AudioRenderCategory.GameChat);

            nodesettings.EncodingProperties       = AudioEncodingProperties.CreatePcm(48000, 2, 16);
            nodesettings.DesiredSamplesPerQuantum = 960;
            nodesettings.QuantumSizeSelectionMode = QuantumSizeSelectionMode.ClosestToDesired;
            frameOutputNode         = ingraph.CreateFrameOutputNode(outgraph.EncodingProperties);
            quantum                 = 0;
            ingraph.QuantumStarted += Graph_QuantumStarted;

            Windows.Devices.Enumeration.DeviceInformation selectedDevice =
                await Windows.Devices.Enumeration.DeviceInformation.CreateFromIdAsync(Windows.Media.Devices.MediaDevice.GetDefaultAudioCaptureId(Windows.Media.Devices.AudioDeviceRole.Default));

            CreateAudioDeviceInputNodeResult result =
                await ingraph.CreateDeviceInputNodeAsync(MediaCategory.Media, nodesettings.EncodingProperties, selectedDevice);

            if (result.Status != AudioDeviceNodeCreationStatus.Success)
            {
                // Cannot create device output node
                return;
            }

            deviceInputNode = result.DeviceInputNode;
            deviceInputNode.AddOutgoingConnection(frameOutputNode);
            frameOutputNode.Start();
            ingraph.Start();
        }
Example #12
0
        public async Task <bool> ResetAudioInput(DeviceInformation microphoneDevice = null)
        {
            if (microphoneDevice == null)
            {
                var inputDevices = await GetAllMicrophoneDevices();

                if (inputDevices.Count == 0)
                {
                    InputDeviceState = InputDeviceState.MicrophoneNotDetected;
                    return(false);
                }

                microphoneDevice = inputDevices[0];
            }

            var inputAudioEnocdingProperties = AudioEncodingProperties.CreatePcm(
                OpusConvertConstants.SamplingRate,
                1,
                16
                );

            var deviceInputNodeCreateResult = await _AudioGraph.CreateDeviceInputNodeAsync(
                Windows.Media.Capture.MediaCategory.GameChat,
                inputAudioEnocdingProperties,
                microphoneDevice
                );

            if (deviceInputNodeCreateResult.Status != AudioDeviceNodeCreationStatus.Success)
            {
                if (deviceInputNodeCreateResult.Status == AudioDeviceNodeCreationStatus.AccessDenied)
                {
                    InputDeviceState = InputDeviceState.AccessDenied;
                }
                else
                {
                    InputDeviceState = InputDeviceState.UnknowunError;
                }

                return(false);
            }

            _InputNode       = deviceInputNodeCreateResult.DeviceInputNode;
            _FrameOutputNode = _AudioGraph.CreateFrameOutputNode(inputAudioEnocdingProperties);
            _InputNode.AddOutgoingConnection(_FrameOutputNode);

            InputDeviceState = InputDeviceState.Avairable;

            return(true);
        }
Example #13
0
 private async Task LoadAudioFromFile(StorageFile file)
 {
     // We initialize an instance of AudioGraph
     AudioGraphSettings settings = 
         new AudioGraphSettings(
             Windows.Media.Render.AudioRenderCategory.Media
             );
     CreateAudioGraphResult result1 = await AudioGraph.CreateAsync(settings);
     if (result1.Status != AudioGraphCreationStatus.Success)
     {
         ShowMessage("AudioGraph creation error: " + result1.Status.ToString());
     }
     audioGraph = result1.Graph;
     
     if (audioGraph == null)
         return;
     // We initialize FileInputNode
     CreateAudioFileInputNodeResult result2 = 
         await audioGraph.CreateFileInputNodeAsync(file);
     if (result2.Status != AudioFileNodeCreationStatus.Success)
     {
         ShowMessage("FileInputNode creation error: " + result2.Status.ToString());
     }
     fileInputNode = result2.FileInputNode;
     if (fileInputNode == null)
         return;
     // We read audio file encoding properties to pass them to FrameOutputNode creator
     AudioEncodingProperties audioEncodingProperties = fileInputNode.EncodingProperties;
     // We initialize FrameOutputNode and connect it to fileInputNode
     frameOutputNode = audioGraph.CreateFrameOutputNode(audioEncodingProperties);
     fileInputNode.AddOutgoingConnection(frameOutputNode);
     // We add a handler achiving the end of a file
     fileInputNode.FileCompleted += FileInput_FileCompleted;
     // We add a handler which will transfer every audio frame into audioData 
     audioGraph.QuantumStarted += AudioGraph_QuantumStarted;
     // We initialize audioData
     int numOfSamples = (int)Math.Ceiling(
         (decimal)0.0000001
         * fileInputNode.Duration.Ticks
         * fileInputNode.EncodingProperties.SampleRate
         );
     audioData = new float[numOfSamples];
     
     audioDataCurrentPosition = 0;
     // We start process which will read audio file frame by frame
     // and will generated events QuantumStarted when a frame is in memory
     audioGraph.Start();
 }
Example #14
0
        public async Task Start()
        {
            try
            {
                m_mutex.WaitOne();

                // Construct the audio graph
                var result = await AudioGraph.CreateAsync(
                    new AudioGraphSettings(AudioRenderCategory.Speech)
                {
                    DesiredRenderDeviceAudioProcessing = AudioProcessing.Raw,
                    AudioRenderCategory = AudioRenderCategory.Speech
                });

                if (result.Status != AudioGraphCreationStatus.Success)
                {
                    throw new Exception("AudioGraph creation error: " + result.Status);
                }

                m_audioGraph = result.Graph;

                var pcmEncoding = AudioEncodingProperties.CreatePcm(16000, 1, 32);
                m_frameOutputNode = m_audioGraph.CreateFrameOutputNode(pcmEncoding);
                var encodingProperties = m_frameOutputNode.EncodingProperties;
                encodingProperties = m_audioGraph.EncodingProperties;

                var inputResult = await m_audioGraph.CreateDeviceInputNodeAsync(MediaCategory.Speech, pcmEncoding);

                if (inputResult.Status != AudioDeviceNodeCreationStatus.Success)
                {
                    throw new Exception("AudioGraph CreateDeviceInputNodeAsync error: " + inputResult.Status);
                }

                m_deviceInputNode = inputResult.DeviceInputNode;
                m_deviceInputNode.AddOutgoingConnection(m_frameOutputNode);
                m_audioGraph.QuantumStarted += node_QuantumStarted;
                encodingProperties           = m_audioGraph.EncodingProperties;
                m_audioGraph.Start();
            }
            catch (Exception ex)
            {
                Utils.Toasts.ShowToast("", "AudioInput Start Exception: " + ex.Message);
            }

            m_mutex.ReleaseMutex();
        }
Example #15
0
 /// <summary>
 /// Heavy dispose in graph
 /// </summary>
 public static void HeavyDisposeInGraph()
 {
     // Clear data
     ingraph?.Dispose();
     frameOutputNode = null;
     deviceInputNode = null;
     ingraph         = null;
     AudioInSpec1    = 0;
     AudioInSpec2    = 0;
     AudioInSpec3    = 0;
     AudioInSpec4    = 0;
     AudioInSpec5    = 0;
     AudioInSpec6    = 0;
     AudioInSpec7    = 0;
     AudioInSpec8    = 0;
     AudioInSpec9    = 0;
     AudioInAverage  = 0;
 }
Example #16
0
        private async void StartButton_Click(object sender, RoutedEventArgs e)
        {
            DeviceInformation  SelectedDevice = DevicesBox.SelectedItem as DeviceInformation;
            AudioGraphSettings settings       = new AudioGraphSettings(AudioRenderCategory.Media)
            {
                QuantumSizeSelectionMode = QuantumSizeSelectionMode.LowestLatency
            };

            CreateAudioGraphResult result = await AudioGraph.CreateAsync(settings);


            graph = result.Graph;

            // Create a device output node
            CreateAudioDeviceOutputNodeResult deviceOutputNodeResult = await graph.CreateDeviceOutputNodeAsync();

            AudioDeviceOutputNode deviceOutputNode = deviceOutputNodeResult.DeviceOutputNode;

            // Create a device input node using the default audio input device
            CreateAudioDeviceInputNodeResult deviceInputNodeResult = await graph.CreateDeviceInputNodeAsync(MediaCategory.Other, graph.EncodingProperties, SelectedDevice);

            if (deviceInputNodeResult.Status != AudioDeviceNodeCreationStatus.Success)
            {
                // Cannot create device input node
                System.Diagnostics.Debug.WriteLine(String.Format("Audio Device Input unavailable because {0}", deviceInputNodeResult.Status.ToString()));

                return;
            }

            AudioDeviceInputNode deviceInputNode = deviceInputNodeResult.DeviceInputNode;

            frameOutputNode = graph.CreateFrameOutputNode();
            deviceInputNode.AddOutgoingConnection(frameOutputNode);

            AudioFrameInputNode frameInputNode = graph.CreateFrameInputNode();

            frameInputNode.AddOutgoingConnection(deviceOutputNode);

            // Attach to QuantumStarted event in order to receive synchronous updates from audio graph (to capture incoming audio).
            graph.QuantumStarted += GraphOnQuantumProcessed;

            graph.Start();
        }
Example #17
0
        private async void CreateAudioGraphAsync()
        {
            AudioGraphSettings settings = new AudioGraphSettings(AudioRenderCategory.Media)
            {
                //settings.DesiredSamplesPerQuantum = fftLength;
                DesiredRenderDeviceAudioProcessing = AudioProcessing.Default,
                QuantumSizeSelectionMode           = QuantumSizeSelectionMode.ClosestToDesired
            };

            CreateAudioGraphResult graphResult = await AudioGraph.CreateAsync(settings);

            if (graphResult.Status != AudioGraphCreationStatus.Success)
            {
                throw new InvalidOperationException($"Graph creation failed {graphResult.Status}");
            }

            _graph = graphResult.Graph;

            //CreateAudioDeviceInputNodeResult inputNodeResult = await _graph.CreateDeviceInputNodeAsync(MediaCategory.Media);
            CreateAudioDeviceInputNodeResult inputNodeResult = await _graph.CreateDeviceInputNodeAsync(MediaCategory.Other);

            if (inputNodeResult.Status == AudioDeviceNodeCreationStatus.Success)
            {
                _inputNode = inputNodeResult.DeviceInputNode;


                _frameOutputNode = _graph.CreateFrameOutputNode();
                _inputNode.AddOutgoingConnection(_frameOutputNode);
                _frameOutputNode.Start();
                _graph.QuantumProcessed += AudioGraph_QuantumProcessed;

                // Because we are using lowest latency setting, we need to handle device disconnection errors
                _graph.UnrecoverableErrorOccurred += Graph_UnrecoverableErrorOccurred;

                _graph.Start();
            }
            else
            {
                MessageDialog md = new MessageDialog("Cannot access microphone");
                await md.ShowAsync();
            }
        }
Example #18
0
        /// <summary>
        /// Tyrs to creates the frame output node and trys to set the outgoing connection to it. Also calculates audioFrameUpdateMinimum.
        /// </summary>
        /// <returns>Whether or not the attempt was successful</returns>
        private static async Task <bool> CreateNodes()
        {
            try
            {
                CreateAudioDeviceInputNodeResult deviceInputNodeResult = await graph.CreateDeviceInputNodeAsync(MediaCategory.Other);

                deviceInputNode = deviceInputNodeResult.DeviceInputNode;

                frameOutputNode       = graph.CreateFrameOutputNode(graph.EncodingProperties);
                graph.QuantumStarted += Graph_QuantumStarted;

                audioFrameUpdateMinimum = Convert.ToInt32(samplesPerQuantumLimit / graph.SamplesPerQuantum);
                deviceInputNode.AddOutgoingConnection(frameOutputNode);

                return(true);
            }
            catch (Exception)
            {
                return(false);
            }
        }
        /// <summary>
        /// Handle the IDisposable pattern, specifically for the managed resources here.
        /// </summary>
        /// <param name="disposing"> whether managed resources are being disposed. </param>
        public async void Dispose(bool disposing)
        {
            if (!this.disposed)
            {
                if (disposing)
                {
                    this.inputNode?.Dispose();
                    this.outputNode?.Dispose();
                    this.inputGraph?.Dispose();
                    this.debugAudioOutputFileSemaphore?.Dispose();
                    this.debugAudioOutputFileStream?.Dispose();
                }

                this.inputGraph = null;
                this.inputNode  = null;
                this.outputNode = null;
                this.debugAudioOutputFileSemaphore = null;
                this.debugAudioOutputFileStream    = null;

                this.disposed = true;
            }
        }
Example #20
0
        private async void Init()
        {
            AudioGraphSettings settings = new AudioGraphSettings(AudioRenderCategory.Media);

            settings.QuantumSizeSelectionMode = QuantumSizeSelectionMode.LowestLatency;

            CreateAudioGraphResult result = await AudioGraph.CreateAsync(settings);

            if (result.Status != AudioGraphCreationStatus.Success)
            {
                mainPage.MessageBox("Could not create input device for Mic To MIDI!");
                return;
            }

            audioGraph = result.Graph;
            CreateAudioDeviceInputNodeResult deviceInputNodeResult = await audioGraph.CreateDeviceInputNodeAsync(MediaCategory.Other);

            if (deviceInputNodeResult.Status != AudioDeviceNodeCreationStatus.Success)
            {
                mainPage.MessageBox(String.Format("Audio Device Input unavailable because {0}", deviceInputNodeResult.Status.ToString()));
                return;
            }

            deviceInputNode = deviceInputNodeResult.DeviceInputNode;

            frameOutputNode = audioGraph.CreateFrameOutputNode();
            deviceInputNode.AddOutgoingConnection(frameOutputNode);
            //audioGraph.QuantumStarted += AudioGraph_QuantumStarted;

            audioGraph.Start();
            deviceInputNode.Start();
            frameOutputNode.Start();

            timer          = new DispatcherTimer();
            timer.Interval = new TimeSpan(0, 0, 0, 0, 1); // 1 ms
            timer.Tick    += Timer_Tick;
            timer.Start();
            periodLengthUK101 = 0;
        }
Example #21
0
        public async Task Start()
        {
            var pcmEncoding = AudioEncodingProperties.CreatePcm(16000, 1, 16);

            // Construct the audio graph
            // mic -> Machine Translate Service
            // Machine Translation text to speech output -> speaker
            var result = await AudioGraph.CreateAsync(
                new AudioGraphSettings(AudioRenderCategory.Speech)
            {
                DesiredRenderDeviceAudioProcessing = AudioProcessing.Raw,
                AudioRenderCategory = AudioRenderCategory.Speech,
                EncodingProperties  = pcmEncoding
            });

            if (result.Status != AudioGraphCreationStatus.Success)
            {
                throw new Exception("AudioGraph creation error: " + result.Status);
            }

            m_audioGraph = result.Graph;

            m_frameOutputNode = m_audioGraph.CreateFrameOutputNode(pcmEncoding);

            var inputResult = await m_audioGraph.CreateDeviceInputNodeAsync(MediaCategory.Speech, pcmEncoding);

            if (inputResult.Status != AudioDeviceNodeCreationStatus.Success)
            {
                throw new Exception("AudioGraph CreateDeviceInputNodeAsync error: " + result.Status);
            }

            m_deviceInputNode = inputResult.DeviceInputNode;
            m_deviceInputNode.AddOutgoingConnection(m_frameOutputNode);
            m_audioGraph.QuantumStarted += node_QuantumStarted;
            m_audioGraph.Start();
        }
Example #22
0
 private void OnCreateInputCompleted(IAsyncOperation <CreateAudioDeviceInputNodeResult> asyncInfo, AsyncStatus asyncStatus)
 {
     if (asyncStatus == AsyncStatus.Completed)
     {
         CreateAudioDeviceInputNodeResult result = asyncInfo.GetResults();
         if (result.Status == AudioDeviceNodeCreationStatus.Success)
         {
             this.m_AudioDevideInputNode = result.DeviceInputNode;
             this.m_AudioFrameOutputNode = this.m_AudioGraph.CreateFrameOutputNode();
             this.m_AudioDevideInputNode.AddOutgoingConnection(this.m_AudioFrameOutputNode);
             this.m_AudioGraph.QuantumStarted += M_AudioGraph_QuantumStarted;
             //this.m_AudioGraph.QuantumProcessed += M_AudioGraph_QuantumProcessed;
             this.m_AudioGraph.Start();
         }
         else
         {
             ShowMessage($"Failed to create audio device input node: {result.Status}");
         }
     }
     else
     {
         ShowMessage($"Failed to create audio device input node: {asyncStatus}");
     }
 }
Example #23
0
        private async Task Play()
        {
            if (IsPlaying)
            {
                Pause();
                return;
            }

            if (_audioGraph == null)
            {
                var settings = new AudioGraphSettings(AudioRenderCategory.Media)
                {
                    PrimaryRenderDevice = SelectedDevice
                };

                var createResult = await AudioGraph.CreateAsync(settings);

                if (createResult.Status != AudioGraphCreationStatus.Success)
                {
                    return;
                }

                _audioGraph = createResult.Graph;
                _audioGraph.UnrecoverableErrorOccurred += OnAudioGraphError;
            }

            if (_deviceOutputNode == null)
            {
                var deviceResult = await _audioGraph.CreateDeviceOutputNodeAsync();

                if (deviceResult.Status != AudioDeviceNodeCreationStatus.Success)
                {
                    return;
                }
                _deviceOutputNode = deviceResult.DeviceOutputNode;
            }

            if (_frameOutputNode == null)
            {
                _frameOutputNode              = _audioGraph.CreateFrameOutputNode();
                _audioGraph.QuantumProcessed += GraphOnQuantumProcessed;
            }

            if (_fileInputNode == null)
            {
                if (CurrentPlayingFile == null)
                {
                    return;
                }

                var fileResult = await _audioGraph.CreateFileInputNodeAsync(CurrentPlayingFile);

                if (fileResult.Status != AudioFileNodeCreationStatus.Success)
                {
                    return;
                }
                _fileInputNode = fileResult.FileInputNode;
                _fileInputNode.AddOutgoingConnection(_deviceOutputNode);
                _fileInputNode.AddOutgoingConnection(_frameOutputNode);
                Duration = _fileInputNode.Duration;
                _fileInputNode.PlaybackSpeedFactor = PlaybackSpeed / 100.0;
                _fileInputNode.OutgoingGain        = Volume / 100.0;
                _fileInputNode.FileCompleted      += FileInputNodeOnFileCompleted;
            }

            Debug.WriteLine($" CompletedQuantumCount: {_audioGraph.CompletedQuantumCount}");
            Debug.WriteLine($"SamplesPerQuantum: {_audioGraph.SamplesPerQuantum}");
            Debug.WriteLine($"LatencyInSamples: {_audioGraph.LatencyInSamples}");
            var channelCount = (int)_audioGraph.EncodingProperties.ChannelCount;

            _fftProvider = new FftProvider(channelCount, FftSize.Fft2048);
            _audioGraph.Start();
            IsPlaying = true;
        }
        LoadAudioFromFile(
            StorageFile file,
            IProgress <string> status)
        {
            _finished = false;
            status.Report("Reading audio file");

            // Initialize FileInputNode
            var inputNodeCreationResult =
                await _audioGraph.CreateFileInputNodeAsync(file);

            if (inputNodeCreationResult.Status != AudioFileNodeCreationStatus.Success)
            {
                return(inputNodeCreationResult);
            }

            _fileInputNode = inputNodeCreationResult.FileInputNode;


            // Read audio file encoding properties to pass them
            //to FrameOutputNode creator

            var audioEncodingProperties =
                _fileInputNode.EncodingProperties;

            // Initialize FrameOutputNode and connect it to fileInputNode
            _frameOutputNode = _audioGraph.CreateFrameOutputNode(
                audioEncodingProperties
                );
            _frameOutputNode.Stop();
            _fileInputNode.AddOutgoingConnection(_frameOutputNode);

            // Add a handler for achiving the end of a file
            _fileInputNode.FileCompleted += FileInput_FileCompleted;
            // Add a handler which will transfer every audio frame into audioData
            _audioGraph.QuantumStarted += FileInput_QuantumStarted;

            // Initialize audioData
            var numOfSamples = (int)Math.Ceiling(
                (decimal)0.0000001
                * _fileInputNode.Duration.Ticks
                * _fileInputNode.EncodingProperties.SampleRate
                );

            if (audioEncodingProperties.ChannelCount == 1)
            {
                SetAudioData(new AudioDataMono(new float[numOfSamples]));
            }
            else
            {
                SetAudioData(new AudioDataStereo(new float[numOfSamples],
                                                 new float[numOfSamples]));
            }

            _audioDataCurrentPosition = 0;

            // Start process which will read audio file frame by frame
            // and will generated events QuantumStarted when a frame is in memory
            _audioGraph.Start();

            // didn't find a better way to wait for data
            while (!_finished)
            {
                await Task.Delay(50);
            }

            // crear status line
            status.Report("");

            return(inputNodeCreationResult);
        }
Example #25
0
        /// <summary>
        /// Create input audio graph
        /// </summary>
        /// <param name="deviceId">Override for default input device id</param>
        public static async Task <bool> CreateInputDeviceNode(string deviceId = null)
        {
            // If not in use, redo dispose
            if (ingraph != null && deviceId != InputDeviceID)
            {
                HeavyDisposeInGraph();
            }
            // Increment use counter
            else
            {
                inGraphCount++;
            }

            Console.WriteLine("Creating AudioGraphs");

            // Create an AudioGraph with default settings
            AudioGraphSettings graphsettings = new AudioGraphSettings(AudioRenderCategory.Media);

            graphsettings.EncodingProperties               = new AudioEncodingProperties();
            graphsettings.EncodingProperties.Subtype       = "Float";
            graphsettings.EncodingProperties.SampleRate    = 48000;
            graphsettings.EncodingProperties.ChannelCount  = 2;
            graphsettings.EncodingProperties.BitsPerSample = 32;
            graphsettings.EncodingProperties.Bitrate       = 3072000;
            CreateAudioGraphResult graphresult = await AudioGraph.CreateAsync(graphsettings);

            if (graphresult.Status != AudioGraphCreationStatus.Success)
            {
                // Cannot create graph
                inGraphCount--;
                LocalState.VoiceState.SelfMute = true;
                VoiceManager.lockMute          = true;
                return(false);
            }

            // "Save" graph
            ingraph = graphresult.Graph;

            // Create frameOutputNode
            AudioGraphSettings nodesettings = new AudioGraphSettings(AudioRenderCategory.GameChat);

            nodesettings.EncodingProperties       = AudioEncodingProperties.CreatePcm(48000, 2, 32);
            nodesettings.DesiredSamplesPerQuantum = 960;
            nodesettings.QuantumSizeSelectionMode = QuantumSizeSelectionMode.ClosestToDesired;
            frameOutputNode         = ingraph.CreateFrameOutputNode(ingraph.EncodingProperties);
            quantum                 = 0;
            ingraph.QuantumStarted += Graph_QuantumStarted;

            // Determine selected device
            DeviceInformation selectedDevice;

            if (deviceId == "Default" || deviceId == null)
            {
                string device = Windows.Media.Devices.MediaDevice.GetDefaultAudioCaptureId(Windows.Media.Devices.AudioDeviceRole.Default);
                if (!string.IsNullOrEmpty(device))
                {
                    selectedDevice = await DeviceInformation.CreateFromIdAsync(device);

                    Windows.Media.Devices.MediaDevice.DefaultAudioCaptureDeviceChanged += MediaDevice_DefaultAudioCaptureDeviceChanged;
                }
                else
                {
                    inGraphCount--;
                    LocalState.VoiceState.SelfMute = true;
                    VoiceManager.lockMute          = true;
                    return(false);
                }
            }
            else
            {
                try
                {
                    selectedDevice = await DeviceInformation.CreateFromIdAsync(deviceId);
                }
                catch
                {
                    selectedDevice = await DeviceInformation.CreateFromIdAsync(Windows.Media.Devices.MediaDevice.GetDefaultAudioCaptureId(Windows.Media.Devices.AudioDeviceRole.Default));

                    deviceId = "Default";
                }
            }

            CreateAudioDeviceInputNodeResult result =
                await ingraph.CreateDeviceInputNodeAsync(MediaCategory.Media, nodesettings.EncodingProperties, selectedDevice);

            if (result.Status != AudioDeviceNodeCreationStatus.Success)
            {
                // Cannot create device output node
                inGraphCount--;
                LocalState.VoiceState.SelfMute = true;
                VoiceManager.lockMute          = true;
                return(false);
            }


            // Attach input device
            deviceInputNode = result.DeviceInputNode;
            deviceInputNode.AddOutgoingConnection(frameOutputNode);
            InputDeviceID = deviceId;

            // Begin playing
            frameOutputNode.Start();
            ingraph.Start();
            return(true);
        }
Example #26
0
 private void CreateToxOutputNode()
 {
     _toxOutputNode = _audioGraph.CreateFrameOutputNode();
     _audioGraph.QuantumProcessed += AudioGraphQuantumProcessedHandler;
     _microphoneInputNode.AddOutgoingConnection(_toxOutputNode);
 }
Example #27
0
        //</SnippetGenerateAudioData>

        //<SnippetCreateFrameOutputNode>
        private void CreateFrameOutputNode()
        {
            frameOutputNode              = audioGraph.CreateFrameOutputNode();
            audioGraph.QuantumProcessed += AudioGraph_QuantumProcessed;
        }
Example #28
0
        protected override async void OnNavigatedTo(NavigationEventArgs e)
        {
            var audioInputDevices = await DeviceInformation.FindAllAsync(DeviceClass.AudioCapture);

            foreach (var device in audioInputDevices)
            {
                if (device.Name.ToLower().Contains("usb"))
                {
                    audioInput = device;
                    break;
                }
            }
            if (audioInput == null)
            {
                Debug.WriteLine("Could not find USB audio card");
                return;
            }
            var audioOutputDevices = await DeviceInformation.FindAllAsync(DeviceClass.AudioRender);

            foreach (var device in audioOutputDevices)
            {
                if (device.Name.ToLower().Contains("usb"))
                {
                    audioOutput = device;
                }
                else
                {
                    raspiAudioOutput = device;
                }
            }
            if (audioOutput == null)
            {
                Debug.WriteLine("Could not find USB audio card");
                return;
            }

            // Set up LED strips
            await leftStrip.Begin();

            await rightStrip.Begin();

            //await AudioTest();
            AudioGraphSettings audioGraphSettings = new AudioGraphSettings(AudioRenderCategory.Media);

            audioGraphSettings.DesiredSamplesPerQuantum           = 440;
            audioGraphSettings.DesiredRenderDeviceAudioProcessing = AudioProcessing.Default;
            audioGraphSettings.QuantumSizeSelectionMode           = QuantumSizeSelectionMode.ClosestToDesired;
            audioGraphSettings.PrimaryRenderDevice = raspiAudioOutput;
            CreateAudioGraphResult audioGraphResult = await AudioGraph.CreateAsync(audioGraphSettings);

            if (audioGraphResult.Status != AudioGraphCreationStatus.Success)
            {
                Debug.WriteLine("AudioGraph creation failed! " + audioGraphResult.Status);
                return;
            }
            audioGraph = audioGraphResult.Graph;
            //Debug.WriteLine(audioGraph.SamplesPerQuantum);
            CreateAudioDeviceInputNodeResult inputNodeResult = await audioGraph.CreateDeviceInputNodeAsync(MediaCategory.Media, audioGraph.EncodingProperties, audioInput);

            if (inputNodeResult.Status != AudioDeviceNodeCreationStatus.Success)
            {
                Debug.WriteLine("AudioDeviceInputNode creation failed! " + inputNodeResult.Status);
                return;
            }
            AudioDeviceInputNode inputNode = inputNodeResult.DeviceInputNode;
            CreateAudioDeviceOutputNodeResult outputNodeResult = await audioGraph.CreateDeviceOutputNodeAsync();

            if (outputNodeResult.Status != AudioDeviceNodeCreationStatus.Success)
            {
                Debug.WriteLine("AudioDeviceOutputNode creation failed!" + outputNodeResult.Status);
            }
            AudioDeviceOutputNode outputNode = outputNodeResult.DeviceOutputNode;

            frameOutputNode = audioGraph.CreateFrameOutputNode();
            inputNode.AddOutgoingConnection(frameOutputNode);
            inputNode.AddOutgoingConnection(outputNode);
            cube.SetSpeedStripLedColors(LedColorLists.rainbowColors);
            audioGraph.QuantumProcessed           += AudioGraph_QuantumProcessed;
            audioGraph.UnrecoverableErrorOccurred += AudioGraph_UnrecoverableErrorOccurred;
            audioGraph.Start();
            outputNode.Start();
            inputNode.Start();
            frameOutputNode.Start();
            cube.Reset();
            cube.Update();
            //await MathFunc();
            //cube.ApplyColorFunction((x, y, z) =>
            //{
            //    Color c = Color.FromArgb(255,
            //        (byte)((x / 14.0) * 255.0),
            //        (byte)((y / 14.0) * 255.0),
            //        (byte)((z / 14.0) * 255.0));
            //    return c;
            //});
            //cube.SetLedColors();
            //cube.Update();
            //cube.bottomFrontEdge.SetColor(Colors.Red);
            //cube.bottomRightEdge.SetColor(Colors.OrangeRed);
            //cube.bottomBackEdge.SetColor(Colors.Yellow);
            //cube.bottomLeftEdge.SetColor(Colors.Green);
            //cube.frontLeftEdge.SetColor(Colors.Blue);
            //cube.frontTopEdge.SetColor(Colors.Purple);
            //cube.rightLeftEdge.Brightness = 10;
            //cube.rightLeftEdge.SetColor(Colors.Red);
            //cube.rightTopEdge.Brightness = 10;
            //cube.rightTopEdge.SetColor(Colors.OrangeRed);
            //cube.backLeftEdge.Brightness = 10;
            //cube.backLeftEdge.SetColor(Colors.Yellow);
            //cube.backTopEdge.Brightness = 10;
            //cube.backTopEdge.SetColor(Colors.Green);
            //cube.leftLeftEdge.Brightness = 10;
            //cube.leftLeftEdge.SetColor(Colors.Blue);
            //cube.leftTopEdge.Brightness = 10;
            //cube.leftTopEdge.SetColor(Colors.Purple);
            //cube.Update();
            //await RainbowTest();
            //cube.Brightness = 30;
            //await FlashTest();
            //SetAll();
            //await FadeTest();
            //cube.Reset();
            //cube.Update();
            //await cube.rightLeftEdge.DoLine();
            //ZackTest();
        }
        // Create the AudioGraph
        private async Task CreateAudioGraph()
        {
            // Create a new AudioGraph settings object to store the options, here you can play with latence/output device etc
            AudioGraphSettings settings = new AudioGraphSettings(AudioRenderCategory.SoundEffects);
            settings.QuantumSizeSelectionMode = QuantumSizeSelectionMode.ClosestToDesired;
            settings.DesiredSamplesPerQuantum = desiredSamples;
            settings.DesiredRenderDeviceAudioProcessing = Windows.Media.AudioProcessing.Default;
            CreateAudioGraphResult result = await AudioGraph.CreateAsync(settings);

            if (result.Status != AudioGraphCreationStatus.Success)
            {
                // Cannot create graph
                ShowErrorMessage(string.Format("AudioGraph Creation Error because {0}", result.Status.ToString()));
                return;
            }
            graph = result.Graph;


            // Create a device input node using the default audio input device
            CreateAudioDeviceInputNodeResult deviceInputNodeResult = await graph.CreateDeviceInputNodeAsync(MediaCategory.Other);

            if (deviceInputNodeResult.Status != AudioDeviceNodeCreationStatus.Success)
            {
                // Cannot create device input node
                ShowErrorMessage(string.Format("Audio Device Input unavailable because {0}", deviceInputNodeResult.Status.ToString()));
                return;
            }
            deviceInputNode = deviceInputNodeResult.DeviceInputNode;

            
            // Create the output node to send to data processing and add the event handler for when a quantum is processed
            frameOutputNode = graph.CreateFrameOutputNode();
            graph.QuantumProcessed += AudioGraph_QuantumProcessed;


            // Link the nodes together
            deviceInputNode.AddOutgoingConnection(frameOutputNode);


            // Because we are using lowest latency setting, we need to handle device disconnection errors
            graph.UnrecoverableErrorOccurred += Graph_UnrecoverableErrorOccurred;
        }
Example #30
0
 private void CreateToxOutputNode()
 {
     _toxOutputNode = _audioGraph.CreateFrameOutputNode();
     _audioGraph.QuantumProcessed += AudioGraphQuantumProcessedHandler;
     _microphoneInputNode.AddOutgoingConnection(_toxOutputNode);
 }
        public async void StartListening(ServiceViewModel model)
        {
            this.viewModel = model;

            await this.viewModel.SpeechClient.Clear();

            var fromValue  = this.viewModel.SelectedSpeechLanguage.Abbreviation;
            var toValue    = this.viewModel.SelectedTextLanguage.Abbreviation;
            var voiceValue = this.viewModel.SelectedSpeechVoice.Name;

            await this.viewModel.SpeechHelper.Connect(fromValue, toValue, voiceValue, this.DisplayResult, this.SendAudioOut);

            var pcmEncoding = Windows.Media.MediaProperties.AudioEncodingProperties.CreatePcm(16000, 1, 16);

            var result = await Windows.Media.Audio.AudioGraph.CreateAsync(
                new Windows.Media.Audio.AudioGraphSettings(Windows.Media.Render.AudioRenderCategory.Speech)
            {
                DesiredRenderDeviceAudioProcessing = Windows.Media.AudioProcessing.Raw,
                AudioRenderCategory = Windows.Media.Render.AudioRenderCategory.Speech,
                EncodingProperties  = pcmEncoding
            });

            if (result.Status == Windows.Media.Audio.AudioGraphCreationStatus.Success)
            {
                this.graph = result.Graph;

                var microphone = await DeviceInformation.CreateFromIdAsync(this.viewModel.SelectedMicrophone.Id);

                this.speechTranslateOutputMode = this.graph.CreateFrameOutputNode(pcmEncoding);
                this.graph.QuantumProcessed   += (s, a) => this.SendToSpeechTranslate(this.speechTranslateOutputMode.GetFrame());

                this.speechTranslateOutputMode.Start();

                var micInputResult = await this.graph.CreateDeviceInputNodeAsync(Windows.Media.Capture.MediaCategory.Speech, pcmEncoding, microphone);

                if (micInputResult.Status == Windows.Media.Audio.AudioDeviceNodeCreationStatus.Success)
                {
                    micInputResult.DeviceInputNode.AddOutgoingConnection(this.speechTranslateOutputMode);
                    micInputResult.DeviceInputNode.Start();
                }
                else
                {
                    throw new InvalidOperationException();
                }

                var speakerOutputResult = await this.graph.CreateDeviceOutputNodeAsync();

                if (speakerOutputResult.Status == Windows.Media.Audio.AudioDeviceNodeCreationStatus.Success)
                {
                    this.speakerOutputNode = speakerOutputResult.DeviceOutputNode;
                    this.speakerOutputNode.Start();
                }
                else
                {
                    throw new InvalidOperationException();
                }

                this.textToSpeechOutputNode = this.graph.CreateFrameInputNode(pcmEncoding);
                this.textToSpeechOutputNode.AddOutgoingConnection(this.speakerOutputNode);
                this.textToSpeechOutputNode.Start();

                this.graph.Start();
            }
        }
 private void CreateFrameOutputNode()
 {
     _frameOutputNode            = _audioGraph.CreateFrameOutputNode();
     _audioGraph.QuantumStarted += AudioGraph_QuantumStarted;
 }