private async Task InitializeAudioAsync() { // Create an AudioGraph with default settings AudioGraphSettings settings = new AudioGraphSettings(AudioRenderCategory.Media); settings.EncodingProperties = AudioEncodingProperties.CreatePcm(22050, 1, 16); CreateAudioGraphResult result = await AudioGraph.CreateAsync(settings); if (result.Status != AudioGraphCreationStatus.Success) { return; } _graph = result.Graph; // Create a device output node CreateAudioDeviceOutputNodeResult deviceOutputNodeResult = await _graph.CreateDeviceOutputNodeAsync(); if (deviceOutputNodeResult.Status != AudioDeviceNodeCreationStatus.Success) { return; } _deviceOutputNode = deviceOutputNodeResult.DeviceOutputNode; CreateAudioDeviceInputNodeResult deviceInputNodeResult = await _graph.CreateDeviceInputNodeAsync(MediaCategory.Other); if (deviceInputNodeResult.Status != AudioDeviceNodeCreationStatus.Success) { return; } _deviceInputNode = deviceInputNodeResult.DeviceInputNode; // Create the FrameInputNode at the same format as the graph, except explicitly set mono. AudioEncodingProperties nodeEncodingProperties = _graph.EncodingProperties; nodeEncodingProperties.ChannelCount = 1; _frameInputNode = _graph.CreateFrameInputNode(nodeEncodingProperties); _frameInputNode.AddOutgoingConnection(_deviceOutputNode); _frameOutputNode = _graph.CreateFrameOutputNode(nodeEncodingProperties); _deviceInputNode.AddOutgoingConnection(_frameOutputNode); // Initialize the Frame Input Node in the stopped state _frameInputNode.Stop(); // Hook up an event handler so we can start generating samples when needed // This event is triggered when the node is required to provide data _frameInputNode.QuantumStarted += node_QuantumStarted; _graph.QuantumProcessed += GraphOnQuantumProcessed; // Start the graph since we will only start/stop the frame input node _graph.Start(); }
private async Task InitProcessorNode() { audioFrameProcessor = ag.CreateFrameOutputNode(); if (audioFrameProcessor != null) { rootPage.ShowLogs("Processor Node initialized successfully:"); ag.QuantumStarted += Ag_QuantumStarted; } }
async Task Init() { try { await Stop(); var pcmEncoding = AudioEncodingProperties.CreatePcm((uint)SampleRate, (uint)ChannelCount, (uint)BitsPerSample); // apparently this is not _really_ used/supported here, as the audio data seems to come thru as floats (so basically MediaEncodingSubtypes.Float?) pcmEncoding.Subtype = MediaEncodingSubtypes.Pcm; var graphSettings = new AudioGraphSettings(AudioRenderCategory.Media) { EncodingProperties = pcmEncoding, DesiredRenderDeviceAudioProcessing = AudioProcessing.Raw // these do not seem to take effect on certain hardware and MSFT recommends SystemDefault when recording to a file anyway // We'll buffer audio data ourselves to improve RMS calculation across larger samples //QuantumSizeSelectionMode = QuantumSizeSelectionMode.ClosestToDesired, //DesiredSamplesPerQuantum = 4096 }; // create our audio graph... this will be a device input node feeding audio data into a frame output node var graphResult = await AudioGraph.CreateAsync(graphSettings); if (graphResult.Status == AudioGraphCreationStatus.Success) { audioGraph = graphResult.Graph; // take input from whatever the default communications device is set to me on windows var inputResult = await audioGraph.CreateDeviceInputNodeAsync(MediaCategory.Communications, pcmEncoding); if (inputResult.Status == AudioDeviceNodeCreationStatus.Success) { // create the output node outputNode = audioGraph.CreateFrameOutputNode(pcmEncoding); // wire the input to the output inputResult.DeviceInputNode.AddOutgoingConnection(outputNode); // Attach to QuantumStarted event in order to receive synchronous updates from audio graph (to capture incoming audio) audioGraph.QuantumStarted += Graph_QuantumStarted; audioGraph.UnrecoverableErrorOccurred += Graph_UnrecoverableErrorOccurred; } else { throw new Exception($"audioGraph.CreateDeviceInputNodeAsync() returned non-Success status: {inputResult.Status}"); } } else { throw new Exception($"AudioGraph.CreateAsync() returned non-Success status: {graphResult.Status}"); } } catch { throw; } }
private async void File_Click(object sender, RoutedEventArgs e) { // If another file is already loaded into the FileInput node if (fileInput != null) { // Release the file and dispose the contents of the node fileInput.Dispose(); // Stop playback since a new file is being loaded. Also reset the button UI if (graphButton.Content.Equals("Stop Graph")) { TogglePlay(); } } FileOpenPicker filePicker = new FileOpenPicker(); filePicker.SuggestedStartLocation = PickerLocationId.MusicLibrary; filePicker.FileTypeFilter.Add(".mp3"); filePicker.FileTypeFilter.Add(".wav"); filePicker.FileTypeFilter.Add(".wma"); filePicker.FileTypeFilter.Add(".m4a"); filePicker.ViewMode = PickerViewMode.Thumbnail; StorageFile file = await filePicker.PickSingleFileAsync(); // File can be null if cancel is hit in the file picker if (file == null) { return; } CreateAudioFileInputNodeResult fileInputResult = await graph.CreateFileInputNodeAsync(file); frameOutputNode = graph.CreateFrameOutputNode(); graph.QuantumProcessed += AudioGraph_QuantumProcessed; if (AudioFileNodeCreationStatus.Success != fileInputResult.Status) { // Cannot read input file rootPage.NotifyUser(String.Format("Cannot read input file because {0}", fileInputResult.Status.ToString()), NotifyType.ErrorMessage); return; } fileInput = fileInputResult.FileInputNode; fileInput.AddOutgoingConnection(deviceOutput, 0.5); fileButton.Background = new SolidColorBrush(Colors.Green); // Trim the file: set the start time to 3 seconds from the beginning // fileInput.EndTime can be used to trim from the end of file fileInput.StartTime = TimeSpan.FromSeconds(0); // Enable buttons in UI to start graph, loop and change playback speed factor graphButton.IsEnabled = true; loopToggle.IsEnabled = true; //playSpeedSlider.IsEnabled = true; }
public async Task InitializeAsync() { DebugUtil.CheckAppThread(); AudioGraphSettings settings = new AudioGraphSettings(AudioRenderCategory.Media); // settings.DesiredRenderDeviceAudioProcessing = AudioProcessing.Raw; settings.QuantumSizeSelectionMode = QuantumSizeSelectionMode.LowestLatency; CreateAudioGraphResult result = await AudioGraph.CreateAsync(settings); DebugUtil.Assert(result.Status == AudioGraphCreationStatus.Success, "Failed to create audio graph"); _audioGraph = result.Graph; int latencyInSamples = _audioGraph.LatencyInSamples; // Create a device output node CreateAudioDeviceOutputNodeResult deviceOutputNodeResult = _audioGraph.CreateDeviceOutputNodeAsync().GetResults(); DebugUtil.Assert(deviceOutputNodeResult.Status == AudioDeviceNodeCreationStatus.Success, $"Audio Device Output unavailable because {deviceOutputNodeResult.Status}"); _deviceOutputNode = deviceOutputNodeResult.DeviceOutputNode; _inputCaptureNode = _audioGraph.CreateFrameOutputNode(); // Create a device input node using the default audio input device CreateAudioDeviceInputNodeResult deviceInputNodeResult = await _audioGraph.CreateDeviceInputNodeAsync(MediaCategory.Other); DebugUtil.Assert(deviceInputNodeResult.Status == AudioDeviceNodeCreationStatus.Success, $"Audio Device Input unavailable because {deviceInputNodeResult.Status}"); _deviceInputNode = deviceInputNodeResult.DeviceInputNode; _deviceInputNode.AddOutgoingConnection(_inputCaptureNode); _deviceInputNode.AddOutgoingConnection(_deviceOutputNode); /* * echoEffect = new EchoEffectDefinition(_graph); * echoEffect.WetDryMix = 0.7f; * echoEffect.Feedback = 0.5f; * echoEffect.Delay = 500.0f; * submixNode.EffectDefinitions.Add(echoEffect); * * // Disable the effect in the beginning. Enable in response to user action (UI toggle switch) * submixNode.DisableEffectsByDefinition(echoEffect); */ // All nodes can have an OutgoingGain property // Setting the gain on the Submix node attenuates the output of the node //_submixNode.OutgoingGain = 0.5; }
private AudioFrameOutputNode AttachSpeechRecognitionMode(IAudioInputNode inputNode) { var speechRecognitionNode = graph.CreateFrameOutputNode(encoding.Audio); graph.QuantumStarted += (AudioGraph sender, object args) => { AudioFrame frame = speechRecognitionNode.GetFrame(); ProcessFrameOutput(frame); }; inputNode.AddOutgoingConnection(speechRecognitionNode); return(speechRecognitionNode); }
public static async Task CreateDeviceInputNode() { Console.WriteLine("Creating AudioGraphs"); // Create an AudioGraph with default settings AudioGraphSettings graphsettings = new AudioGraphSettings(AudioRenderCategory.GameChat); graphsettings.EncodingProperties = new AudioEncodingProperties(); graphsettings.EncodingProperties.Subtype = "Float"; graphsettings.EncodingProperties.SampleRate = 48000; graphsettings.EncodingProperties.ChannelCount = 2; graphsettings.EncodingProperties.BitsPerSample = 32; graphsettings.EncodingProperties.Bitrate = 3072000; //settings.DesiredSamplesPerQuantum = 960; //settings.QuantumSizeSelectionMode = QuantumSizeSelectionMode.ClosestToDesired; CreateAudioGraphResult graphresult = await AudioGraph.CreateAsync(graphsettings); if (graphresult.Status != AudioGraphCreationStatus.Success) { // Cannot create graph return; } ingraph = graphresult.Graph; AudioGraphSettings nodesettings = new AudioGraphSettings(AudioRenderCategory.GameChat); nodesettings.EncodingProperties = AudioEncodingProperties.CreatePcm(48000, 2, 16); nodesettings.DesiredSamplesPerQuantum = 960; nodesettings.QuantumSizeSelectionMode = QuantumSizeSelectionMode.ClosestToDesired; frameOutputNode = ingraph.CreateFrameOutputNode(outgraph.EncodingProperties); quantum = 0; ingraph.QuantumStarted += Graph_QuantumStarted; Windows.Devices.Enumeration.DeviceInformation selectedDevice = await Windows.Devices.Enumeration.DeviceInformation.CreateFromIdAsync(Windows.Media.Devices.MediaDevice.GetDefaultAudioCaptureId(Windows.Media.Devices.AudioDeviceRole.Default)); CreateAudioDeviceInputNodeResult result = await ingraph.CreateDeviceInputNodeAsync(MediaCategory.Media, nodesettings.EncodingProperties, selectedDevice); if (result.Status != AudioDeviceNodeCreationStatus.Success) { // Cannot create device output node return; } deviceInputNode = result.DeviceInputNode; deviceInputNode.AddOutgoingConnection(frameOutputNode); frameOutputNode.Start(); ingraph.Start(); }
public async Task <bool> ResetAudioInput(DeviceInformation microphoneDevice = null) { if (microphoneDevice == null) { var inputDevices = await GetAllMicrophoneDevices(); if (inputDevices.Count == 0) { InputDeviceState = InputDeviceState.MicrophoneNotDetected; return(false); } microphoneDevice = inputDevices[0]; } var inputAudioEnocdingProperties = AudioEncodingProperties.CreatePcm( OpusConvertConstants.SamplingRate, 1, 16 ); var deviceInputNodeCreateResult = await _AudioGraph.CreateDeviceInputNodeAsync( Windows.Media.Capture.MediaCategory.GameChat, inputAudioEnocdingProperties, microphoneDevice ); if (deviceInputNodeCreateResult.Status != AudioDeviceNodeCreationStatus.Success) { if (deviceInputNodeCreateResult.Status == AudioDeviceNodeCreationStatus.AccessDenied) { InputDeviceState = InputDeviceState.AccessDenied; } else { InputDeviceState = InputDeviceState.UnknowunError; } return(false); } _InputNode = deviceInputNodeCreateResult.DeviceInputNode; _FrameOutputNode = _AudioGraph.CreateFrameOutputNode(inputAudioEnocdingProperties); _InputNode.AddOutgoingConnection(_FrameOutputNode); InputDeviceState = InputDeviceState.Avairable; return(true); }
private async Task CreateAudioGraph() { // Create an AudioGraph with default settings AudioGraphSettings settings = new AudioGraphSettings(AudioRenderCategory.Media); settings.QuantumSizeSelectionMode = QuantumSizeSelectionMode.LowestLatency; CreateAudioGraphResult result = await AudioGraph.CreateAsync(settings); if (result.Status != AudioGraphCreationStatus.Success) { // Cannot create graph _rootPage.NotifyUser(String.Format("AudioGraph Creation Error because {0}", result.Status.ToString()), NotifyType.ErrorMessage); return; } _graph = result.Graph; // Create a device output node CreateAudioDeviceOutputNodeResult deviceOutputResult = await _graph.CreateDeviceOutputNodeAsync(); if (deviceOutputResult.Status != AudioDeviceNodeCreationStatus.Success) { // Cannot create device output _rootPage.NotifyUser(String.Format("Audio Device Output unavailable because {0}", deviceOutputResult.Status.ToString()), NotifyType.ErrorMessage); speakerContainer.Background = new SolidColorBrush(Colors.Red); return; } _deviceOutputNode = deviceOutputResult.DeviceOutputNode; _rootPage.NotifyUser("Device Output Node successfully created", NotifyType.StatusMessage); speakerContainer.Background = new SolidColorBrush(Colors.Green); // Create a device input node using the default audio input device CreateAudioDeviceInputNodeResult deviceInputNodeResult = await _graph.CreateDeviceInputNodeAsync(MediaCategory.Media); if (deviceInputNodeResult.Status != AudioDeviceNodeCreationStatus.Success) { // Cannot create device input node _rootPage.NotifyUser(String.Format("Audio Device Input unavailable because {0}", deviceInputNodeResult.Status.ToString()), NotifyType.ErrorMessage); return; } _deviceInputNode = deviceInputNodeResult.DeviceInputNode; var frameOutputNode = _graph.CreateFrameOutputNode(); _graph.QuantumProcessed += AudioGraph_QuantumProcessed; _deviceInputNode.AddOutgoingConnection(frameOutputNode); }
private async Task LoadAudioFromFile(StorageFile file) { // We initialize an instance of AudioGraph AudioGraphSettings settings = new AudioGraphSettings( Windows.Media.Render.AudioRenderCategory.Media ); CreateAudioGraphResult result1 = await AudioGraph.CreateAsync(settings); if (result1.Status != AudioGraphCreationStatus.Success) { ShowMessage("AudioGraph creation error: " + result1.Status.ToString()); } audioGraph = result1.Graph; if (audioGraph == null) return; // We initialize FileInputNode CreateAudioFileInputNodeResult result2 = await audioGraph.CreateFileInputNodeAsync(file); if (result2.Status != AudioFileNodeCreationStatus.Success) { ShowMessage("FileInputNode creation error: " + result2.Status.ToString()); } fileInputNode = result2.FileInputNode; if (fileInputNode == null) return; // We read audio file encoding properties to pass them to FrameOutputNode creator AudioEncodingProperties audioEncodingProperties = fileInputNode.EncodingProperties; // We initialize FrameOutputNode and connect it to fileInputNode frameOutputNode = audioGraph.CreateFrameOutputNode(audioEncodingProperties); fileInputNode.AddOutgoingConnection(frameOutputNode); // We add a handler achiving the end of a file fileInputNode.FileCompleted += FileInput_FileCompleted; // We add a handler which will transfer every audio frame into audioData audioGraph.QuantumStarted += AudioGraph_QuantumStarted; // We initialize audioData int numOfSamples = (int)Math.Ceiling( (decimal)0.0000001 * fileInputNode.Duration.Ticks * fileInputNode.EncodingProperties.SampleRate ); audioData = new float[numOfSamples]; audioDataCurrentPosition = 0; // We start process which will read audio file frame by frame // and will generated events QuantumStarted when a frame is in memory audioGraph.Start(); }
public async Task Start() { try { m_mutex.WaitOne(); // Construct the audio graph var result = await AudioGraph.CreateAsync( new AudioGraphSettings(AudioRenderCategory.Speech) { DesiredRenderDeviceAudioProcessing = AudioProcessing.Raw, AudioRenderCategory = AudioRenderCategory.Speech }); if (result.Status != AudioGraphCreationStatus.Success) { throw new Exception("AudioGraph creation error: " + result.Status); } m_audioGraph = result.Graph; var pcmEncoding = AudioEncodingProperties.CreatePcm(16000, 1, 32); m_frameOutputNode = m_audioGraph.CreateFrameOutputNode(pcmEncoding); var encodingProperties = m_frameOutputNode.EncodingProperties; encodingProperties = m_audioGraph.EncodingProperties; var inputResult = await m_audioGraph.CreateDeviceInputNodeAsync(MediaCategory.Speech, pcmEncoding); if (inputResult.Status != AudioDeviceNodeCreationStatus.Success) { throw new Exception("AudioGraph CreateDeviceInputNodeAsync error: " + inputResult.Status); } m_deviceInputNode = inputResult.DeviceInputNode; m_deviceInputNode.AddOutgoingConnection(m_frameOutputNode); m_audioGraph.QuantumStarted += node_QuantumStarted; encodingProperties = m_audioGraph.EncodingProperties; m_audioGraph.Start(); } catch (Exception ex) { Utils.Toasts.ShowToast("", "AudioInput Start Exception: " + ex.Message); } m_mutex.ReleaseMutex(); }
private async void StartButton_Click(object sender, RoutedEventArgs e) { DeviceInformation SelectedDevice = DevicesBox.SelectedItem as DeviceInformation; AudioGraphSettings settings = new AudioGraphSettings(AudioRenderCategory.Media) { QuantumSizeSelectionMode = QuantumSizeSelectionMode.LowestLatency }; CreateAudioGraphResult result = await AudioGraph.CreateAsync(settings); graph = result.Graph; // Create a device output node CreateAudioDeviceOutputNodeResult deviceOutputNodeResult = await graph.CreateDeviceOutputNodeAsync(); AudioDeviceOutputNode deviceOutputNode = deviceOutputNodeResult.DeviceOutputNode; // Create a device input node using the default audio input device CreateAudioDeviceInputNodeResult deviceInputNodeResult = await graph.CreateDeviceInputNodeAsync(MediaCategory.Other, graph.EncodingProperties, SelectedDevice); if (deviceInputNodeResult.Status != AudioDeviceNodeCreationStatus.Success) { // Cannot create device input node System.Diagnostics.Debug.WriteLine(String.Format("Audio Device Input unavailable because {0}", deviceInputNodeResult.Status.ToString())); return; } AudioDeviceInputNode deviceInputNode = deviceInputNodeResult.DeviceInputNode; frameOutputNode = graph.CreateFrameOutputNode(); deviceInputNode.AddOutgoingConnection(frameOutputNode); AudioFrameInputNode frameInputNode = graph.CreateFrameInputNode(); frameInputNode.AddOutgoingConnection(deviceOutputNode); // Attach to QuantumStarted event in order to receive synchronous updates from audio graph (to capture incoming audio). graph.QuantumStarted += GraphOnQuantumProcessed; graph.Start(); }
private async void CreateAudioGraphAsync() { AudioGraphSettings settings = new AudioGraphSettings(AudioRenderCategory.Media) { //settings.DesiredSamplesPerQuantum = fftLength; DesiredRenderDeviceAudioProcessing = AudioProcessing.Default, QuantumSizeSelectionMode = QuantumSizeSelectionMode.ClosestToDesired }; CreateAudioGraphResult graphResult = await AudioGraph.CreateAsync(settings); if (graphResult.Status != AudioGraphCreationStatus.Success) { throw new InvalidOperationException($"Graph creation failed {graphResult.Status}"); } _graph = graphResult.Graph; //CreateAudioDeviceInputNodeResult inputNodeResult = await _graph.CreateDeviceInputNodeAsync(MediaCategory.Media); CreateAudioDeviceInputNodeResult inputNodeResult = await _graph.CreateDeviceInputNodeAsync(MediaCategory.Other); if (inputNodeResult.Status == AudioDeviceNodeCreationStatus.Success) { _inputNode = inputNodeResult.DeviceInputNode; _frameOutputNode = _graph.CreateFrameOutputNode(); _inputNode.AddOutgoingConnection(_frameOutputNode); _frameOutputNode.Start(); _graph.QuantumProcessed += AudioGraph_QuantumProcessed; // Because we are using lowest latency setting, we need to handle device disconnection errors _graph.UnrecoverableErrorOccurred += Graph_UnrecoverableErrorOccurred; _graph.Start(); } else { MessageDialog md = new MessageDialog("Cannot access microphone"); await md.ShowAsync(); } }
/// <summary> /// Tyrs to creates the frame output node and trys to set the outgoing connection to it. Also calculates audioFrameUpdateMinimum. /// </summary> /// <returns>Whether or not the attempt was successful</returns> private static async Task <bool> CreateNodes() { try { CreateAudioDeviceInputNodeResult deviceInputNodeResult = await graph.CreateDeviceInputNodeAsync(MediaCategory.Other); deviceInputNode = deviceInputNodeResult.DeviceInputNode; frameOutputNode = graph.CreateFrameOutputNode(graph.EncodingProperties); graph.QuantumStarted += Graph_QuantumStarted; audioFrameUpdateMinimum = Convert.ToInt32(samplesPerQuantumLimit / graph.SamplesPerQuantum); deviceInputNode.AddOutgoingConnection(frameOutputNode); return(true); } catch (Exception) { return(false); } }
private async void Init() { AudioGraphSettings settings = new AudioGraphSettings(AudioRenderCategory.Media); settings.QuantumSizeSelectionMode = QuantumSizeSelectionMode.LowestLatency; CreateAudioGraphResult result = await AudioGraph.CreateAsync(settings); if (result.Status != AudioGraphCreationStatus.Success) { mainPage.MessageBox("Could not create input device for Mic To MIDI!"); return; } audioGraph = result.Graph; CreateAudioDeviceInputNodeResult deviceInputNodeResult = await audioGraph.CreateDeviceInputNodeAsync(MediaCategory.Other); if (deviceInputNodeResult.Status != AudioDeviceNodeCreationStatus.Success) { mainPage.MessageBox(String.Format("Audio Device Input unavailable because {0}", deviceInputNodeResult.Status.ToString())); return; } deviceInputNode = deviceInputNodeResult.DeviceInputNode; frameOutputNode = audioGraph.CreateFrameOutputNode(); deviceInputNode.AddOutgoingConnection(frameOutputNode); //audioGraph.QuantumStarted += AudioGraph_QuantumStarted; audioGraph.Start(); deviceInputNode.Start(); frameOutputNode.Start(); timer = new DispatcherTimer(); timer.Interval = new TimeSpan(0, 0, 0, 0, 1); // 1 ms timer.Tick += Timer_Tick; timer.Start(); periodLengthUK101 = 0; }
public async Task Start() { var pcmEncoding = AudioEncodingProperties.CreatePcm(16000, 1, 16); // Construct the audio graph // mic -> Machine Translate Service // Machine Translation text to speech output -> speaker var result = await AudioGraph.CreateAsync( new AudioGraphSettings(AudioRenderCategory.Speech) { DesiredRenderDeviceAudioProcessing = AudioProcessing.Raw, AudioRenderCategory = AudioRenderCategory.Speech, EncodingProperties = pcmEncoding }); if (result.Status != AudioGraphCreationStatus.Success) { throw new Exception("AudioGraph creation error: " + result.Status); } m_audioGraph = result.Graph; m_frameOutputNode = m_audioGraph.CreateFrameOutputNode(pcmEncoding); var inputResult = await m_audioGraph.CreateDeviceInputNodeAsync(MediaCategory.Speech, pcmEncoding); if (inputResult.Status != AudioDeviceNodeCreationStatus.Success) { throw new Exception("AudioGraph CreateDeviceInputNodeAsync error: " + result.Status); } m_deviceInputNode = inputResult.DeviceInputNode; m_deviceInputNode.AddOutgoingConnection(m_frameOutputNode); m_audioGraph.QuantumStarted += node_QuantumStarted; m_audioGraph.Start(); }
//</SnippetGenerateAudioData> //<SnippetCreateFrameOutputNode> private void CreateFrameOutputNode() { frameOutputNode = audioGraph.CreateFrameOutputNode(); audioGraph.QuantumProcessed += AudioGraph_QuantumProcessed; }
private async Task Play() { if (IsPlaying) { Pause(); return; } if (_audioGraph == null) { var settings = new AudioGraphSettings(AudioRenderCategory.Media) { PrimaryRenderDevice = SelectedDevice }; var createResult = await AudioGraph.CreateAsync(settings); if (createResult.Status != AudioGraphCreationStatus.Success) { return; } _audioGraph = createResult.Graph; _audioGraph.UnrecoverableErrorOccurred += OnAudioGraphError; } if (_deviceOutputNode == null) { var deviceResult = await _audioGraph.CreateDeviceOutputNodeAsync(); if (deviceResult.Status != AudioDeviceNodeCreationStatus.Success) { return; } _deviceOutputNode = deviceResult.DeviceOutputNode; } if (_frameOutputNode == null) { _frameOutputNode = _audioGraph.CreateFrameOutputNode(); _audioGraph.QuantumProcessed += GraphOnQuantumProcessed; } if (_fileInputNode == null) { if (CurrentPlayingFile == null) { return; } var fileResult = await _audioGraph.CreateFileInputNodeAsync(CurrentPlayingFile); if (fileResult.Status != AudioFileNodeCreationStatus.Success) { return; } _fileInputNode = fileResult.FileInputNode; _fileInputNode.AddOutgoingConnection(_deviceOutputNode); _fileInputNode.AddOutgoingConnection(_frameOutputNode); Duration = _fileInputNode.Duration; _fileInputNode.PlaybackSpeedFactor = PlaybackSpeed / 100.0; _fileInputNode.OutgoingGain = Volume / 100.0; _fileInputNode.FileCompleted += FileInputNodeOnFileCompleted; } Debug.WriteLine($" CompletedQuantumCount: {_audioGraph.CompletedQuantumCount}"); Debug.WriteLine($"SamplesPerQuantum: {_audioGraph.SamplesPerQuantum}"); Debug.WriteLine($"LatencyInSamples: {_audioGraph.LatencyInSamples}"); var channelCount = (int)_audioGraph.EncodingProperties.ChannelCount; _fftProvider = new FftProvider(channelCount, FftSize.Fft2048); _audioGraph.Start(); IsPlaying = true; }
LoadAudioFromFile( StorageFile file, IProgress <string> status) { _finished = false; status.Report("Reading audio file"); // Initialize FileInputNode var inputNodeCreationResult = await _audioGraph.CreateFileInputNodeAsync(file); if (inputNodeCreationResult.Status != AudioFileNodeCreationStatus.Success) { return(inputNodeCreationResult); } _fileInputNode = inputNodeCreationResult.FileInputNode; // Read audio file encoding properties to pass them //to FrameOutputNode creator var audioEncodingProperties = _fileInputNode.EncodingProperties; // Initialize FrameOutputNode and connect it to fileInputNode _frameOutputNode = _audioGraph.CreateFrameOutputNode( audioEncodingProperties ); _frameOutputNode.Stop(); _fileInputNode.AddOutgoingConnection(_frameOutputNode); // Add a handler for achiving the end of a file _fileInputNode.FileCompleted += FileInput_FileCompleted; // Add a handler which will transfer every audio frame into audioData _audioGraph.QuantumStarted += FileInput_QuantumStarted; // Initialize audioData var numOfSamples = (int)Math.Ceiling( (decimal)0.0000001 * _fileInputNode.Duration.Ticks * _fileInputNode.EncodingProperties.SampleRate ); if (audioEncodingProperties.ChannelCount == 1) { SetAudioData(new AudioDataMono(new float[numOfSamples])); } else { SetAudioData(new AudioDataStereo(new float[numOfSamples], new float[numOfSamples])); } _audioDataCurrentPosition = 0; // Start process which will read audio file frame by frame // and will generated events QuantumStarted when a frame is in memory _audioGraph.Start(); // didn't find a better way to wait for data while (!_finished) { await Task.Delay(50); } // crear status line status.Report(""); return(inputNodeCreationResult); }
/// <summary> /// Create input audio graph /// </summary> /// <param name="deviceId">Override for default input device id</param> public static async Task <bool> CreateInputDeviceNode(string deviceId = null) { // If not in use, redo dispose if (ingraph != null && deviceId != InputDeviceID) { HeavyDisposeInGraph(); } // Increment use counter else { inGraphCount++; } Console.WriteLine("Creating AudioGraphs"); // Create an AudioGraph with default settings AudioGraphSettings graphsettings = new AudioGraphSettings(AudioRenderCategory.Media); graphsettings.EncodingProperties = new AudioEncodingProperties(); graphsettings.EncodingProperties.Subtype = "Float"; graphsettings.EncodingProperties.SampleRate = 48000; graphsettings.EncodingProperties.ChannelCount = 2; graphsettings.EncodingProperties.BitsPerSample = 32; graphsettings.EncodingProperties.Bitrate = 3072000; CreateAudioGraphResult graphresult = await AudioGraph.CreateAsync(graphsettings); if (graphresult.Status != AudioGraphCreationStatus.Success) { // Cannot create graph inGraphCount--; LocalState.VoiceState.SelfMute = true; VoiceManager.lockMute = true; return(false); } // "Save" graph ingraph = graphresult.Graph; // Create frameOutputNode AudioGraphSettings nodesettings = new AudioGraphSettings(AudioRenderCategory.GameChat); nodesettings.EncodingProperties = AudioEncodingProperties.CreatePcm(48000, 2, 32); nodesettings.DesiredSamplesPerQuantum = 960; nodesettings.QuantumSizeSelectionMode = QuantumSizeSelectionMode.ClosestToDesired; frameOutputNode = ingraph.CreateFrameOutputNode(ingraph.EncodingProperties); quantum = 0; ingraph.QuantumStarted += Graph_QuantumStarted; // Determine selected device DeviceInformation selectedDevice; if (deviceId == "Default" || deviceId == null) { string device = Windows.Media.Devices.MediaDevice.GetDefaultAudioCaptureId(Windows.Media.Devices.AudioDeviceRole.Default); if (!string.IsNullOrEmpty(device)) { selectedDevice = await DeviceInformation.CreateFromIdAsync(device); Windows.Media.Devices.MediaDevice.DefaultAudioCaptureDeviceChanged += MediaDevice_DefaultAudioCaptureDeviceChanged; } else { inGraphCount--; LocalState.VoiceState.SelfMute = true; VoiceManager.lockMute = true; return(false); } } else { try { selectedDevice = await DeviceInformation.CreateFromIdAsync(deviceId); } catch { selectedDevice = await DeviceInformation.CreateFromIdAsync(Windows.Media.Devices.MediaDevice.GetDefaultAudioCaptureId(Windows.Media.Devices.AudioDeviceRole.Default)); deviceId = "Default"; } } CreateAudioDeviceInputNodeResult result = await ingraph.CreateDeviceInputNodeAsync(MediaCategory.Media, nodesettings.EncodingProperties, selectedDevice); if (result.Status != AudioDeviceNodeCreationStatus.Success) { // Cannot create device output node inGraphCount--; LocalState.VoiceState.SelfMute = true; VoiceManager.lockMute = true; return(false); } // Attach input device deviceInputNode = result.DeviceInputNode; deviceInputNode.AddOutgoingConnection(frameOutputNode); InputDeviceID = deviceId; // Begin playing frameOutputNode.Start(); ingraph.Start(); return(true); }
protected override async void OnNavigatedTo(NavigationEventArgs e) { var audioInputDevices = await DeviceInformation.FindAllAsync(DeviceClass.AudioCapture); foreach (var device in audioInputDevices) { if (device.Name.ToLower().Contains("usb")) { audioInput = device; break; } } if (audioInput == null) { Debug.WriteLine("Could not find USB audio card"); return; } var audioOutputDevices = await DeviceInformation.FindAllAsync(DeviceClass.AudioRender); foreach (var device in audioOutputDevices) { if (device.Name.ToLower().Contains("usb")) { audioOutput = device; } else { raspiAudioOutput = device; } } if (audioOutput == null) { Debug.WriteLine("Could not find USB audio card"); return; } // Set up LED strips await leftStrip.Begin(); await rightStrip.Begin(); //await AudioTest(); AudioGraphSettings audioGraphSettings = new AudioGraphSettings(AudioRenderCategory.Media); audioGraphSettings.DesiredSamplesPerQuantum = 440; audioGraphSettings.DesiredRenderDeviceAudioProcessing = AudioProcessing.Default; audioGraphSettings.QuantumSizeSelectionMode = QuantumSizeSelectionMode.ClosestToDesired; audioGraphSettings.PrimaryRenderDevice = raspiAudioOutput; CreateAudioGraphResult audioGraphResult = await AudioGraph.CreateAsync(audioGraphSettings); if (audioGraphResult.Status != AudioGraphCreationStatus.Success) { Debug.WriteLine("AudioGraph creation failed! " + audioGraphResult.Status); return; } audioGraph = audioGraphResult.Graph; //Debug.WriteLine(audioGraph.SamplesPerQuantum); CreateAudioDeviceInputNodeResult inputNodeResult = await audioGraph.CreateDeviceInputNodeAsync(MediaCategory.Media, audioGraph.EncodingProperties, audioInput); if (inputNodeResult.Status != AudioDeviceNodeCreationStatus.Success) { Debug.WriteLine("AudioDeviceInputNode creation failed! " + inputNodeResult.Status); return; } AudioDeviceInputNode inputNode = inputNodeResult.DeviceInputNode; CreateAudioDeviceOutputNodeResult outputNodeResult = await audioGraph.CreateDeviceOutputNodeAsync(); if (outputNodeResult.Status != AudioDeviceNodeCreationStatus.Success) { Debug.WriteLine("AudioDeviceOutputNode creation failed!" + outputNodeResult.Status); } AudioDeviceOutputNode outputNode = outputNodeResult.DeviceOutputNode; frameOutputNode = audioGraph.CreateFrameOutputNode(); inputNode.AddOutgoingConnection(frameOutputNode); inputNode.AddOutgoingConnection(outputNode); cube.SetSpeedStripLedColors(LedColorLists.rainbowColors); audioGraph.QuantumProcessed += AudioGraph_QuantumProcessed; audioGraph.UnrecoverableErrorOccurred += AudioGraph_UnrecoverableErrorOccurred; audioGraph.Start(); outputNode.Start(); inputNode.Start(); frameOutputNode.Start(); cube.Reset(); cube.Update(); //await MathFunc(); //cube.ApplyColorFunction((x, y, z) => //{ // Color c = Color.FromArgb(255, // (byte)((x / 14.0) * 255.0), // (byte)((y / 14.0) * 255.0), // (byte)((z / 14.0) * 255.0)); // return c; //}); //cube.SetLedColors(); //cube.Update(); //cube.bottomFrontEdge.SetColor(Colors.Red); //cube.bottomRightEdge.SetColor(Colors.OrangeRed); //cube.bottomBackEdge.SetColor(Colors.Yellow); //cube.bottomLeftEdge.SetColor(Colors.Green); //cube.frontLeftEdge.SetColor(Colors.Blue); //cube.frontTopEdge.SetColor(Colors.Purple); //cube.rightLeftEdge.Brightness = 10; //cube.rightLeftEdge.SetColor(Colors.Red); //cube.rightTopEdge.Brightness = 10; //cube.rightTopEdge.SetColor(Colors.OrangeRed); //cube.backLeftEdge.Brightness = 10; //cube.backLeftEdge.SetColor(Colors.Yellow); //cube.backTopEdge.Brightness = 10; //cube.backTopEdge.SetColor(Colors.Green); //cube.leftLeftEdge.Brightness = 10; //cube.leftLeftEdge.SetColor(Colors.Blue); //cube.leftTopEdge.Brightness = 10; //cube.leftTopEdge.SetColor(Colors.Purple); //cube.Update(); //await RainbowTest(); //cube.Brightness = 30; //await FlashTest(); //SetAll(); //await FadeTest(); //cube.Reset(); //cube.Update(); //await cube.rightLeftEdge.DoLine(); //ZackTest(); }
private void CreateFrameOutputNode() { _frameOutputNode = _audioGraph.CreateFrameOutputNode(); _audioGraph.QuantumStarted += AudioGraph_QuantumStarted; }
private void CreateToxOutputNode() { _toxOutputNode = _audioGraph.CreateFrameOutputNode(); _audioGraph.QuantumProcessed += AudioGraphQuantumProcessedHandler; _microphoneInputNode.AddOutgoingConnection(_toxOutputNode); }
// Create the AudioGraph private async Task CreateAudioGraph() { // Create a new AudioGraph settings object to store the options, here you can play with latence/output device etc AudioGraphSettings settings = new AudioGraphSettings(AudioRenderCategory.SoundEffects); settings.QuantumSizeSelectionMode = QuantumSizeSelectionMode.ClosestToDesired; settings.DesiredSamplesPerQuantum = desiredSamples; settings.DesiredRenderDeviceAudioProcessing = Windows.Media.AudioProcessing.Default; CreateAudioGraphResult result = await AudioGraph.CreateAsync(settings); if (result.Status != AudioGraphCreationStatus.Success) { // Cannot create graph ShowErrorMessage(string.Format("AudioGraph Creation Error because {0}", result.Status.ToString())); return; } graph = result.Graph; // Create a device input node using the default audio input device CreateAudioDeviceInputNodeResult deviceInputNodeResult = await graph.CreateDeviceInputNodeAsync(MediaCategory.Other); if (deviceInputNodeResult.Status != AudioDeviceNodeCreationStatus.Success) { // Cannot create device input node ShowErrorMessage(string.Format("Audio Device Input unavailable because {0}", deviceInputNodeResult.Status.ToString())); return; } deviceInputNode = deviceInputNodeResult.DeviceInputNode; // Create the output node to send to data processing and add the event handler for when a quantum is processed frameOutputNode = graph.CreateFrameOutputNode(); graph.QuantumProcessed += AudioGraph_QuantumProcessed; // Link the nodes together deviceInputNode.AddOutgoingConnection(frameOutputNode); // Because we are using lowest latency setting, we need to handle device disconnection errors graph.UnrecoverableErrorOccurred += Graph_UnrecoverableErrorOccurred; }