public async Task Init()
        {
            AudioGraphSettings audioGraphSettings = new AudioGraphSettings(Windows.Media.Render.AudioRenderCategory.Media);
            var result = await AudioGraph.CreateAsync(audioGraphSettings);

            if (result == null || result.Status != AudioGraphCreationStatus.Success)
            {
                return;
            }
            audioGraph = result.Graph;

            var createAudioDeviceOutputResult = await audioGraph.CreateDeviceOutputNodeAsync();

            if (createAudioDeviceOutputResult == null || createAudioDeviceOutputResult.Status != AudioDeviceNodeCreationStatus.Success)
            {
                return;
            }
            deviceOutputNode = createAudioDeviceOutputResult.DeviceOutputNode;

            AudioEncodingProperties audioEncodingProperties = new AudioEncodingProperties();

            audioEncodingProperties.BitsPerSample = 32;
            audioEncodingProperties.ChannelCount  = 2;
            audioEncodingProperties.SampleRate    = 44100;
            audioEncodingProperties.Subtype       = MediaEncodingSubtypes.Float;

            audioFrameInputNode = audioGraph.CreateFrameInputNode(audioEncodingProperties);
            audioFrameInputNode.QuantumStarted += FrameInputNode_QuantumStarted;

            audioFrameInputNode.AddOutgoingConnection(deviceOutputNode);
            audioGraph.Start();
        }
Beispiel #2
0
        public void Stop()
        {
            m_audioDataMutex.WaitOne();
            m_isRunning  = false;
            m_isFlushing = false;

            if (m_audioGraph != null)
            {
                m_audioGraph.Stop();
            }

            if (m_deviceOutputNode != null)
            {
                m_deviceOutputNode.Dispose();
                m_deviceOutputNode = null;
            }

            if (m_frameInputNode != null)
            {
                m_frameInputNode.Dispose();
                m_frameInputNode = null;
            }

            if (m_audioGraph != null)
            {
                m_audioGraph.Dispose();
                m_audioGraph = null;
            }
            m_audioData = null;
            m_audioDataMutex.ReleaseMutex();
        }
Beispiel #3
0
        public void StartAudioOutput(Discord.Audio.AudioInStream audioInStream)
        {
            AudioInStream = audioInStream;

            // 音声出力用のオーディオグラフ入力ノードを作成
            // Note: Channels = 2 かつ BitRateがDisocrdに合わせて16bitじゃなきゃダメ
            #region DO NOT TOUCH

            _FrameInputNode = _AudioGraph.CreateFrameInputNode(
                AudioEncodingProperties.CreatePcm(
                    OpusConvertConstants.SamplingRate,
                    OpusConvertConstants.Channels,
                    16
                    ));

            #endregion

            // デフォルトの出力ノードに接続
            _FrameInputNode.AddOutgoingConnection(_OutputNode);


            _FrameInputNode.QuantumStarted += FrameInputNode_QuantumStarted;

            _FrameInputNode.Start();

            _AudioGraph.Start();
        }
        private void FrameInputNode_QuantumStarted(
            AudioFrameInputNode sender,
            FrameInputNodeQuantumStartedEventArgs args)
        {
            if (_audioDataCurrentPosition == 0)
            {
                _fileOutputNode.Start();
            }

            // doesn't matter how many samples requested
            var frame = ProcessOutputFrame(_audioGraph.SamplesPerQuantum);

            _frameInputNode.AddFrame(frame);

            if (_finished)
            {
                _fileOutputNode?.Stop();
                _audioGraph?.Stop();
            }

            // to not report too many times
            if (_audioGraph == null)
            {
                return;
            }
            if (_audioGraph.CompletedQuantumCount % 100 == 0)
            {
                var dProgress =
                    (double)100 *
                    _audioDataCurrentPosition /
                    _audioData.LengthSamples();
                _ioProgress?.Report(dProgress);
            }
        }
Beispiel #5
0
        /// <summary>
        /// 音声出力の
        /// </summary>
        /// <param name="sender"></param>
        /// <param name="args"></param>
        private async void FrameInputNode_QuantumStarted(AudioFrameInputNode sender, FrameInputNodeQuantumStartedEventArgs args)
        {
            if (AudioInStream == null)
            {
                return;
                //throw new Exception("not connected to discord audio channel.");
            }

            if (AudioInStream.AvailableFrames == 0)
            {
                return;
            }

            uint numSamplesNeeded = (uint)args.RequiredSamples;

            if (numSamplesNeeded == 0)
            {
                return;
            }

            // audioDataのサイズはAudioInStream内のFrameが示すバッファサイズと同一サイズにしておくべきだけど
            var sampleNeededBytes = numSamplesNeeded * OpusConvertConstants.SampleBytes * OpusConvertConstants.Channels;

            // Note: staticで持たせるべき?
            var audioData = new byte[sampleNeededBytes];

            var result = await AudioInStream.ReadAsync(audioData, 0, (int)sampleNeededBytes);



            AudioFrame audioFrame = GenerateAudioData(audioData, (uint)result);

            sender.AddFrame(audioFrame);
        }
Beispiel #6
0
        private async void ToxInputNodeQuantumStartedHandler(AudioFrameInputNode sender,
                                                             FrameInputNodeQuantumStartedEventArgs args)
        {
            if (!await _receiveBuffer.OutputAvailableAsync())
            {
                return;
            }

            short[] shorts;
            var     successfulReceive = _receiveBuffer.TryReceive(out shorts);

            if (!successfulReceive)
            {
                return;
            }

            // GenerateAudioData can provide PCM audio data by directly synthesizing it or reading from a file.
            // Need to know how many samples are required. In this case, the node is running at the same rate as the rest of the graph
            // For minimum latency, only provide the required amount of samples. Extra samples will introduce additional latency.
            var numSamplesNeeded = (uint)args.RequiredSamples;

            if (numSamplesNeeded == 0)
            {
                return;
            }

            var audioData = GenerateAudioData(numSamplesNeeded, shorts);

            _toxInputNode.AddFrame(audioData);
        }
 private void DisposeGraph()
 {
     Stop();
     InputNode  = null;
     OutputNode = null;
     Graph      = null;
     SampleRate = NullSampleRate;
 }
        double previousSample;  // for wrapping from buffer to buffer

        public Note(AudioGraph graph, AudioFrameInputNode node, double freq)
        {
            parentGraph    = graph;
            noteSynth      = node;
            frequency      = freq;
            angle          = 0.0;
            previousSample = 0.0;  // always start notes on the baseline
        }
Beispiel #9
0
        public void StopAudioOutput()
        {
            AudioInStream = null;

            _FrameInputNode?.Stop();
            _FrameInputNode?.Dispose();
            _FrameInputNode = null;
        }
Beispiel #10
0
        private async Task InitializeAudioAsync()
        {
            // Create an AudioGraph with default settings
            AudioGraphSettings settings = new AudioGraphSettings(AudioRenderCategory.Media);

            settings.EncodingProperties = AudioEncodingProperties.CreatePcm(22050, 1, 16);

            CreateAudioGraphResult result = await AudioGraph.CreateAsync(settings);

            if (result.Status != AudioGraphCreationStatus.Success)
            {
                return;
            }

            _graph = result.Graph;

            // Create a device output node
            CreateAudioDeviceOutputNodeResult deviceOutputNodeResult = await _graph.CreateDeviceOutputNodeAsync();

            if (deviceOutputNodeResult.Status != AudioDeviceNodeCreationStatus.Success)
            {
                return;
            }

            _deviceOutputNode = deviceOutputNodeResult.DeviceOutputNode;

            CreateAudioDeviceInputNodeResult deviceInputNodeResult = await _graph.CreateDeviceInputNodeAsync(MediaCategory.Other);

            if (deviceInputNodeResult.Status != AudioDeviceNodeCreationStatus.Success)
            {
                return;
            }

            _deviceInputNode = deviceInputNodeResult.DeviceInputNode;

            // Create the FrameInputNode at the same format as the graph, except explicitly set mono.
            AudioEncodingProperties nodeEncodingProperties = _graph.EncodingProperties;

            nodeEncodingProperties.ChannelCount = 1;
            _frameInputNode = _graph.CreateFrameInputNode(nodeEncodingProperties);
            _frameInputNode.AddOutgoingConnection(_deviceOutputNode);


            _frameOutputNode = _graph.CreateFrameOutputNode(nodeEncodingProperties);
            _deviceInputNode.AddOutgoingConnection(_frameOutputNode);

            // Initialize the Frame Input Node in the stopped state
            _frameInputNode.Stop();

            // Hook up an event handler so we can start generating samples when needed
            // This event is triggered when the node is required to provide data
            _frameInputNode.QuantumStarted += node_QuantumStarted;

            _graph.QuantumProcessed += GraphOnQuantumProcessed;

            // Start the graph since we will only start/stop the frame input node
            _graph.Start();
        }
        public NoteMC(AudioGraph graph, AudioFrameInputNode node, double freq)
        {
            parentGraph = graph;
            noteSynth   = node;
            AddFrequency(freq);
            AddAngle(0.0);

            previousSample = 0.0;  // always start notes on the baseline
        }
        private void node_QuantumStarted(AudioFrameInputNode sender, FrameInputNodeQuantumStartedEventArgs args)
        {
            uint numSamplesNeeded = (uint)args.RequiredSamples;

            if (numSamplesNeeded != 0)
            {
                AudioFrame audioData = ReadAudioData(numSamplesNeeded);
                frameInputNode.AddFrame(audioData);
            }
        }
        private void InputNodeQuantumStartedHandler(AudioFrameInputNode sender, FrameInputNodeQuantumStartedEventArgs args)
        {
            if (args.RequiredSamples < 1)
            {
                return;
            }

            AudioFrame frame = GenerateAudioData(args.RequiredSamples);

            sender.AddFrame(frame);
        }
 // Creates FrameInputNode for taking in audio frames
 private void CreateFrameInputNode()
 {
     // Create the FrameInputNode at the same format as the graph, except explicitly set mono.
     AudioEncodingProperties nodeEncodingProperties = audioGraph.EncodingProperties;
     frameInputNode = audioGraph.CreateFrameInputNode(nodeEncodingProperties);
     // Initialize the Frame Input Node in the stopped state
     frameInputNode.Stop();
     // Hook up an event handler so we can start generating samples when needed
     // This event is triggered when the node is required to provide data
     frameInputNode.QuantumStarted += node_QuantumStarted;
 }
 // For creating audio frames on the fly
 private void node_QuantumStarted(AudioFrameInputNode sender, FrameInputNodeQuantumStartedEventArgs args)
 {
     // GenerateAudioData can provide PCM audio data by directly synthesizing it or reading from a file.
     // Need to know how many samples are required. In this case, the node is running at the same rate as the rest of the graph
     // For minimum latency, only provide the required amount of samples. Extra samples will introduce additional latency.
     uint numSamplesNeeded = (uint)args.RequiredSamples;
     if (numSamplesNeeded != 0)
     {
         AudioFrame audioData = GenerateAudioData(numSamplesNeeded);
         frameInputNode.AddFrame(audioData);
     }
 }
Beispiel #16
0
        private void CreateToxInputNode()
        {
            // Create the FrameInputNode at the same format as the graph, except explicitly set mono.
            var nodeEncodingProperties = _audioGraph.EncodingProperties;

            nodeEncodingProperties.ChannelCount = 1;
            _toxInputNode = _audioGraph.CreateFrameInputNode(nodeEncodingProperties);

            // Hook up an event handler so we can start generating samples when needed
            // This event is triggered when the node is required to provide data
            _toxInputNode.QuantumStarted += ToxInputNodeQuantumStartedHandler;
        }
Beispiel #17
0
        private async void MockAudioGraph(AudioFrame audioFrame)
        {
            AudioGraphSettings settings = new AudioGraphSettings(Windows.Media.Render.AudioRenderCategory.Media);
            var result = await AudioGraph.CreateAsync(settings);

            var graph = result.Graph;

            audioFrameInputNode = graph.CreateFrameInputNode();

            //<SnippetAudioFrameInputNode>
            audioFrameInputNode.AddFrame(audioFrame);
            //</SnippetAudioFrameInputNode>
        }
        private unsafe void FrameInputNode_QuantumStarted(AudioFrameInputNode sender, FrameInputNodeQuantumStartedEventArgs args)
        {
            var        bufferSize = args.RequiredSamples * sizeof(float) * 2;
            AudioFrame audioFrame = new AudioFrame((uint)bufferSize);

            if (fileStream == null)
            {
                return;
            }
            using (var audioBuffer = audioFrame.LockBuffer(AudioBufferAccessMode.Write))
            {
                using (var bufferReference = audioBuffer.CreateReference())
                {
                    byte * dataInBytes;
                    uint   capacityInBytes;
                    float *dataInFloat;

                    // Get the buffer from the AudioFrame
                    ((IMemoryBufferByteAccess)bufferReference).GetBuffer(out dataInBytes, out capacityInBytes);
                    dataInFloat = (float *)dataInBytes;

                    var managedBuffer = new byte[capacityInBytes];

                    var lastLength = fileStream.Length - fileStream.Position;
                    int readLength = (int)(lastLength < capacityInBytes ? lastLength : capacityInBytes);
                    if (readLength <= 0)
                    {
                        fileStream.Close();
                        fileStream = null;
                        return;
                    }
                    fileStream.Read(managedBuffer, 0, readLength);

                    for (int i = 0; i < readLength; i += 8)
                    {
                        dataInBytes[i + 4] = managedBuffer[i + 0];
                        dataInBytes[i + 5] = managedBuffer[i + 1];
                        dataInBytes[i + 6] = managedBuffer[i + 2];
                        dataInBytes[i + 7] = managedBuffer[i + 3];
                        dataInBytes[i + 0] = managedBuffer[i + 4];
                        dataInBytes[i + 1] = managedBuffer[i + 5];
                        dataInBytes[i + 2] = managedBuffer[i + 6];
                        dataInBytes[i + 3] = managedBuffer[i + 7];
                    }
                }
            }

            audioFrameInputNode.AddFrame(audioFrame);
        }
Beispiel #19
0
        private void InputNodeQuantumStarted(AudioFrameInputNode inputNode, FrameInputNodeQuantumStartedEventArgs e, Track track)
        {
            if (Status == AudioStatus.Playing)
            {
                var samples = track.Read(e.RequiredSamples);

                if (samples != null)
                {
                    using (var frame = GenerateFrameFromSamples(samples))
                    {
                        inputNode.AddFrame(frame);
                    }
                }
            }
        }
Beispiel #20
0
        private async Task CreateAudioGraph()
        {
            // Create an AudioGraph with default settings
            AudioGraphSettings     settings = new AudioGraphSettings(AudioRenderCategory.Media);
            CreateAudioGraphResult result   = await AudioGraph.CreateAsync(settings);

            if (result.Status != AudioGraphCreationStatus.Success)
            {
                // Cannot create graph
                rootPage.NotifyUser(String.Format("AudioGraph Creation Error because {0}", result.Status.ToString()), NotifyType.ErrorMessage);
                return;
            }

            graph = result.Graph;

            // Create a device output node
            CreateAudioDeviceOutputNodeResult deviceOutputNodeResult = await graph.CreateDeviceOutputNodeAsync();

            if (deviceOutputNodeResult.Status != AudioDeviceNodeCreationStatus.Success)
            {
                // Cannot create device output node
                rootPage.NotifyUser(String.Format("Audio Device Output unavailable because {0}", deviceOutputNodeResult.Status.ToString()), NotifyType.ErrorMessage);
                speakerContainer.Background = new SolidColorBrush(Colors.Red);
            }

            deviceOutputNode = deviceOutputNodeResult.DeviceOutputNode;
            rootPage.NotifyUser("Device Output Node successfully created", NotifyType.StatusMessage);
            speakerContainer.Background = new SolidColorBrush(Colors.Green);

            // Create the FrameInputNode at the same format as the graph, except explicitly set mono.
            AudioEncodingProperties nodeEncodingProperties = graph.EncodingProperties;

            nodeEncodingProperties.ChannelCount = 1;
            frameInputNode = graph.CreateFrameInputNode(nodeEncodingProperties);
            frameInputNode.AddOutgoingConnection(deviceOutputNode);
            frameContainer.Background = new SolidColorBrush(Colors.Green);

            // Initialize the Frame Input Node in the stopped state
            frameInputNode.Stop();

            // Hook up an event handler so we can start generating samples when needed
            // This event is triggered when the node is required to provide data
            frameInputNode.QuantumStarted += node_QuantumStarted;

            // Start the graph since we will only start/stop the frame input node
            graph.Start();
        }
Beispiel #21
0
        private AudioGraphOutput(AudioGraph audioGraph, AudioDeviceOutputNode deviceOutputNode)
        {
            AudioGraph = audioGraph ?? throw new ArgumentNullException(nameof(audioGraph));
            AudioEncodingProperties nodeEncodingProperties = audioGraph.EncodingProperties;

            //nodeEncodingProperties.ChannelCount = Channels;
            frameInputNode = audioGraph.CreateFrameInputNode(nodeEncodingProperties);
            frameInputNode.AddOutgoingConnection(deviceOutputNode);
            // Initialize the Frame Input Node in the stopped state
            frameInputNode.Stop();

            // Hook up an event handler so we can start generating samples when needed
            // This event is triggered when the node is required to provide data
            frameInputNode.QuantumStarted += Node_QuantumStarted;
            sampleSize = sizeof(float) * AudioGraph.EncodingProperties.ChannelCount;
            sampleCap  = int.MaxValue - (int)(int.MaxValue % sampleSize);
        }
        private async Task ReconstructGraph(uint sampleRate)
        {
            GraphReconstructionInProgress = true;

            if (sampleRate == NullSampleRate) //If invalid sample rate do not create graph but return to allow trying again
            {
                GraphReconstructionInProgress = false;
                return;
            }

            DisposeGraph();

            SampleRate = sampleRate;
            MinNumSamplesForPlayback    = (int)(SampleRate * PlaybackDelaySeconds);
            MaxNumSamplesForTargetDelay = (int)(SampleRate * MaxAllowedDelaySeconds);

            var graphResult = await AudioGraph.CreateAsync(new AudioGraphSettings(Windows.Media.Render.AudioRenderCategory.GameMedia));

            if (graphResult.Status != AudioGraphCreationStatus.Success)
            {
                DisposeGraph();
                throw new Exception($"Unable to create audio graph: {graphResult.Status.ToString()}");
            }
            Graph = graphResult.Graph;
            Graph.Stop();

            var outNodeResult = await Graph.CreateDeviceOutputNodeAsync();

            if (outNodeResult.Status != AudioDeviceNodeCreationStatus.Success)
            {
                DisposeGraph();
                throw new Exception($"Unable to create device node: {outNodeResult.Status.ToString()}");
            }
            OutputNode = outNodeResult.DeviceOutputNode;

            var nodeProperties = Graph.EncodingProperties;

            nodeProperties.ChannelCount = NumChannels;
            nodeProperties.SampleRate   = SampleRate;

            InputNode = Graph.CreateFrameInputNode(nodeProperties);
            InputNode.QuantumStarted += InputNodeQuantumStartedHandler;
            InputNode.AddOutgoingConnection(OutputNode);

            GraphReconstructionInProgress = false;
        }
Beispiel #23
0
        private void OnQuantumStarted(AudioFrameInputNode node, FrameInputNodeQuantumStartedEventArgs args)
        {
            var numSamplesNeeded = args.RequiredSamples;

            if (numSamplesNeeded != 0)
            {
                var audioData = GenerateAudioData(numSamplesNeeded);
                m_frameInputNode.AddFrame(audioData);
            }

            if (!m_isRunning && !m_isFlushing)
            {
                OnAudioComplete();
                m_frameInputNode.Stop();
                m_audioGraph.Stop();
            }
        }
Beispiel #24
0
        private async Task CreateAudioGraph(uint samplingRate)
        {
            // Create an AudioGraph with default settings
            var encoding = MediaEncodingProfile.CreateWav(AudioEncodingQuality.Auto);

            encoding.Audio = AudioEncodingProperties.CreatePcm(samplingRate, 1, 16);
            var settings = new AudioGraphSettings(AudioRenderCategory.Speech);

            settings.EncodingProperties = encoding.Audio;
            CreateAudioGraphResult result = await AudioGraph.CreateAsync(settings);

            if (result.Status != AudioGraphCreationStatus.Success)
            {
                return;
            }

            graph = result.Graph;
            graph.EncodingProperties.SampleRate = samplingRate;

            // Create a device output node
            CreateAudioDeviceOutputNodeResult deviceOutputNodeResult = await graph.CreateDeviceOutputNodeAsync();

            if (deviceOutputNodeResult.Status != AudioDeviceNodeCreationStatus.Success)
            {
                return;
            }

            deviceOutputNode = deviceOutputNodeResult.DeviceOutputNode;

            // Create the FrameInputNode at the same format as the graph, except explicitly set mono.
            AudioEncodingProperties nodeEncodingProperties = graph.EncodingProperties;

            nodeEncodingProperties.ChannelCount = 1;
            frameInputNode = graph.CreateFrameInputNode(nodeEncodingProperties);
            frameInputNode.AddOutgoingConnection(deviceOutputNode);

            // Initialize the Frame Input Node in the stopped state
            frameInputNode.Stop();

            frameInputNode.AudioFrameCompleted += FrameInputNode_AudioFrameCompleted;
            //frameInputNode.QuantumStarted += node_QuantumStarted;

            // Start the graph since we will only start/stop the frame input node
            graph.Start();
        }
Beispiel #25
0
 /// <summary>
 /// Heavy dipose out graph
 /// </summary>
 public static void HeavyDisposeOutGraph()
 {
     // Clear data
     outgraph?.Dispose();
     frameInputNode   = null;
     deviceOutputNode = null;
     outgraph         = null;
     AudioOutSpec1    = 0;
     AudioOutSpec2    = 0;
     AudioOutSpec3    = 0;
     AudioOutSpec4    = 0;
     AudioOutSpec5    = 0;
     AudioOutSpec6    = 0;
     AudioOutSpec7    = 0;
     AudioOutSpec8    = 0;
     AudioOutSpec9    = 0;
     AudioOutAverage  = 0;
 }
Beispiel #26
0
        private async void StartButton_Click(object sender, RoutedEventArgs e)
        {
            DeviceInformation  SelectedDevice = DevicesBox.SelectedItem as DeviceInformation;
            AudioGraphSettings settings       = new AudioGraphSettings(AudioRenderCategory.Media)
            {
                QuantumSizeSelectionMode = QuantumSizeSelectionMode.LowestLatency
            };

            CreateAudioGraphResult result = await AudioGraph.CreateAsync(settings);


            graph = result.Graph;

            // Create a device output node
            CreateAudioDeviceOutputNodeResult deviceOutputNodeResult = await graph.CreateDeviceOutputNodeAsync();

            AudioDeviceOutputNode deviceOutputNode = deviceOutputNodeResult.DeviceOutputNode;

            // Create a device input node using the default audio input device
            CreateAudioDeviceInputNodeResult deviceInputNodeResult = await graph.CreateDeviceInputNodeAsync(MediaCategory.Other, graph.EncodingProperties, SelectedDevice);

            if (deviceInputNodeResult.Status != AudioDeviceNodeCreationStatus.Success)
            {
                // Cannot create device input node
                System.Diagnostics.Debug.WriteLine(String.Format("Audio Device Input unavailable because {0}", deviceInputNodeResult.Status.ToString()));

                return;
            }

            AudioDeviceInputNode deviceInputNode = deviceInputNodeResult.DeviceInputNode;

            frameOutputNode = graph.CreateFrameOutputNode();
            deviceInputNode.AddOutgoingConnection(frameOutputNode);

            AudioFrameInputNode frameInputNode = graph.CreateFrameInputNode();

            frameInputNode.AddOutgoingConnection(deviceOutputNode);

            // Attach to QuantumStarted event in order to receive synchronous updates from audio graph (to capture incoming audio).
            graph.QuantumStarted += GraphOnQuantumProcessed;

            graph.Start();
        }
        /// <summary>
        /// Setup an AudioGraph with PCM input node and output for media playback
        /// </summary>
        private async Task CreateAudioGraph()
        {
            AudioGraphSettings     graphSettings = new AudioGraphSettings(Windows.Media.Render.AudioRenderCategory.Media);
            CreateAudioGraphResult graphResult   = await AudioGraph.CreateAsync(graphSettings);

            if (graphResult.Status != AudioGraphCreationStatus.Success)
            {
                UpdateUI(() =>
                {
                    this.Messages.Add(new MessageDisplay($"Error in AudioGraph construction: {graphResult.Status.ToString()}", Sender.Other));
                });
            }

            audioGraph = graphResult.Graph;

            CreateAudioDeviceOutputNodeResult outputResult = await audioGraph.CreateDeviceOutputNodeAsync();

            if (outputResult.Status != AudioDeviceNodeCreationStatus.Success)
            {
                UpdateUI(() =>
                {
                    this.Messages.Add(new MessageDisplay($"Error in audio OutputNode construction: {outputResult.Status.ToString()}", Sender.Other));
                });
            }

            outputNode = outputResult.DeviceOutputNode;

            // Create the FrameInputNode using PCM format; 16kHz, 1 channel, 16 bits per sample
            AudioEncodingProperties nodeEncodingProperties = AudioEncodingProperties.CreatePcm(16000, 1, 16);

            frameInputNode = audioGraph.CreateFrameInputNode(nodeEncodingProperties);
            frameInputNode.AddOutgoingConnection(outputNode);

            // Initialize the FrameInputNode in the stopped state
            frameInputNode.Stop();

            // Hook up an event handler so we can start generating samples when needed
            // This event is triggered when the node is required to provide data
            frameInputNode.QuantumStarted += node_QuantumStarted;

            audioGraph.Start();
        }
Beispiel #28
0
        public static async Task CreateDeviceOutputNode()
        {
            Console.WriteLine("Creating AudioGraphs");
            // Create an AudioGraph with default settings
            AudioGraphSettings graphsettings = new AudioGraphSettings(AudioRenderCategory.GameChat);

            graphsettings.EncodingProperties               = new AudioEncodingProperties();
            graphsettings.EncodingProperties.Subtype       = "Float";
            graphsettings.EncodingProperties.SampleRate    = 48000;
            graphsettings.EncodingProperties.ChannelCount  = 2;
            graphsettings.EncodingProperties.BitsPerSample = 32;
            graphsettings.EncodingProperties.Bitrate       = 3072000;
            CreateAudioGraphResult graphresult = await AudioGraph.CreateAsync(graphsettings);

            if (graphresult.Status != AudioGraphCreationStatus.Success)
            {
                // Cannot create graph
                return;
            }

            outgraph = graphresult.Graph;



            // Create a device output node
            CreateAudioDeviceOutputNodeResult deviceOutputNodeResult = await outgraph.CreateDeviceOutputNodeAsync();

            if (deviceOutputNodeResult.Status != AudioDeviceNodeCreationStatus.Success)
            {
                // Cannot create device output node
            }

            deviceOutputNode = deviceOutputNodeResult.DeviceOutputNode;

            // Create the FrameInputNode at the same format as the graph, except explicitly set stereo.
            frameInputNode = outgraph.CreateFrameInputNode(outgraph.EncodingProperties);
            frameInputNode.AddOutgoingConnection(deviceOutputNode);
            frameInputNode.Start();
            ready = true;
            outgraph.Start();
        }
Beispiel #29
0
        public async Task Start()
        {
            m_isFlushing       = false;
            m_isRunning        = false;
            m_waveBufferReader = null;

            var settings = new AudioGraphSettings(AudioRenderCategory.Media);
            var result   = await AudioGraph.CreateAsync(settings);

            if (result.Status != AudioGraphCreationStatus.Success)
            {
                throw new Exception("AudioGraph creation error: " + result.Status);
            }

            m_audioGraph = result.Graph;


            var outputDeviceResult = await m_audioGraph.CreateDeviceOutputNodeAsync();

            if (outputDeviceResult.Status != AudioDeviceNodeCreationStatus.Success)
            {
                throw new Exception("Unable to create audio playback device: " + result.Status);
            }

            m_deviceOutputNode = outputDeviceResult.DeviceOutputNode;

            // Create the FrameInputNode at the same format as the graph,
            var nodeEncodingProperties = m_audioGraph.EncodingProperties;

            nodeEncodingProperties.ChannelCount = 1;
            nodeEncodingProperties.SampleRate   = 16000;
            m_frameInputNode = m_audioGraph.CreateFrameInputNode(nodeEncodingProperties);
            m_frameInputNode.AddOutgoingConnection(m_deviceOutputNode);
            m_frameInputNode.QuantumStarted += OnQuantumStarted;

            m_isRunning  = true;
            m_isFlushing = false;
            m_audioGraph.Start();
        }
        private async void Page_Loaded(object sender, RoutedEventArgs e)
        {
            // midi

            var s = MidiInPort.GetDeviceSelector();
            var information = await DeviceInformation.FindAllAsync(s);

            var list = information.ToList();
            port = await MidiInPort.FromIdAsync(list.ElementAt(2).Id);
            port.MessageReceived += Port_MessageReceived;

            // audio
            var settings = new AudioGraphSettings(AudioRenderCategory.GameEffects);
            settings.QuantumSizeSelectionMode = QuantumSizeSelectionMode.LowestLatency;
            var creation = await AudioGraph.CreateAsync(settings);

            graph = creation.Graph;
            output = await graph.CreateDeviceOutputNodeAsync();

            var encoding = graph.EncodingProperties;
            encoding.ChannelCount = 1;
            input = graph.CreateFrameInputNode(encoding);
            input.AddOutgoingConnection(output.DeviceOutputNode);
            input.Stop();

            input.QuantumStarted += Input_QuantumStarted;

            graph.Start();

            // midi notes (pitch to note)

            float a = 440; // a is 440 hz...
            for (int x = 0; x < 127; ++x)
            {
                notes[x] = (a / 32f) * (float)Math.Pow(2f, ((x - 9f) / 12f));
            }
        }
        public async void StartListening(ServiceViewModel model)
        {
            this.viewModel = model;

            await this.viewModel.SpeechClient.Clear();

            var fromValue  = this.viewModel.SelectedSpeechLanguage.Abbreviation;
            var toValue    = this.viewModel.SelectedTextLanguage.Abbreviation;
            var voiceValue = this.viewModel.SelectedSpeechVoice.Name;

            await this.viewModel.SpeechHelper.Connect(fromValue, toValue, voiceValue, this.DisplayResult, this.SendAudioOut);

            var pcmEncoding = Windows.Media.MediaProperties.AudioEncodingProperties.CreatePcm(16000, 1, 16);

            var result = await Windows.Media.Audio.AudioGraph.CreateAsync(
                new Windows.Media.Audio.AudioGraphSettings(Windows.Media.Render.AudioRenderCategory.Speech)
            {
                DesiredRenderDeviceAudioProcessing = Windows.Media.AudioProcessing.Raw,
                AudioRenderCategory = Windows.Media.Render.AudioRenderCategory.Speech,
                EncodingProperties  = pcmEncoding
            });

            if (result.Status == Windows.Media.Audio.AudioGraphCreationStatus.Success)
            {
                this.graph = result.Graph;

                var microphone = await DeviceInformation.CreateFromIdAsync(this.viewModel.SelectedMicrophone.Id);

                this.speechTranslateOutputMode = this.graph.CreateFrameOutputNode(pcmEncoding);
                this.graph.QuantumProcessed   += (s, a) => this.SendToSpeechTranslate(this.speechTranslateOutputMode.GetFrame());

                this.speechTranslateOutputMode.Start();

                var micInputResult = await this.graph.CreateDeviceInputNodeAsync(Windows.Media.Capture.MediaCategory.Speech, pcmEncoding, microphone);

                if (micInputResult.Status == Windows.Media.Audio.AudioDeviceNodeCreationStatus.Success)
                {
                    micInputResult.DeviceInputNode.AddOutgoingConnection(this.speechTranslateOutputMode);
                    micInputResult.DeviceInputNode.Start();
                }
                else
                {
                    throw new InvalidOperationException();
                }

                var speakerOutputResult = await this.graph.CreateDeviceOutputNodeAsync();

                if (speakerOutputResult.Status == Windows.Media.Audio.AudioDeviceNodeCreationStatus.Success)
                {
                    this.speakerOutputNode = speakerOutputResult.DeviceOutputNode;
                    this.speakerOutputNode.Start();
                }
                else
                {
                    throw new InvalidOperationException();
                }

                this.textToSpeechOutputNode = this.graph.CreateFrameInputNode(pcmEncoding);
                this.textToSpeechOutputNode.AddOutgoingConnection(this.speakerOutputNode);
                this.textToSpeechOutputNode.Start();

                this.graph.Start();
            }
        }
Beispiel #32
0
        void ISynthesizer.SetUp()
        {
            using (WavePlayer player = WavePlayer.CreateWavePlayer())
            {
            }

            AudioGraphSettings settings = new AudioGraphSettings(AudioRenderCategory.Media)
            {
                //QuantumSizeSelectionMode = QuantumSizeSelectionMode.LowestLatency
            };

            AudioGraph.CreateAsync(settings).AsTask().ContinueWith(graphTask =>
            {
                CreateAudioGraphResult graphResult = graphTask.Result;

                if(graphResult.Status != AudioGraphCreationStatus.Success)
                {
                    this.EmitFailed();
                }
                else
                {
                    graphResult.Graph.CreateDeviceOutputNodeAsync().AsTask().ContinueWith(nodeTask =>
                    {
                        CreateAudioDeviceOutputNodeResult nodeResult = nodeTask.Result;

                        if(nodeResult.Status != AudioDeviceNodeCreationStatus.Success)
                        {
                            this.EmitFailed();
                        }
                        else
                        {
                            _audioGraph = graphResult.Graph;
                            _frameInputNode = _audioGraph.CreateFrameInputNode();
                            _frameInputNode.AddOutgoingConnection(nodeResult.DeviceOutputNode);
                            _frameInputNode.QuantumStarted += this.OnQuantumStarted;
                            _channelsNumber = _audioGraph.EncodingProperties.ChannelCount;
                            _waveSource = new WaveSource(_audioGraph.EncodingProperties.SampleRate, _channelsNumber);
                            this.EmitReady();
                        }
                    });
                }
            });
        }
        private void Input_QuantumStarted(AudioFrameInputNode sender, FrameInputNodeQuantumStartedEventArgs args)
        {
            uint samplesNeeded = (uint)args.RequiredSamples;

            if (samplesNeeded != 0)
            {
                AudioFrame frame = GenerateAudio(samplesNeeded);
                input.AddFrame(frame);
            }
        }
 public AudioFrameInputNodeEvents(AudioFrameInputNode This)
 {
     this.This = This;
 }
Beispiel #35
0
        private void CreateToxInputNode()
        {
            // Create the FrameInputNode at the same format as the graph, except explicitly set mono.
            var nodeEncodingProperties = _audioGraph.EncodingProperties;
            nodeEncodingProperties.ChannelCount = 1;
            _toxInputNode = _audioGraph.CreateFrameInputNode(nodeEncodingProperties);

            // Hook up an event handler so we can start generating samples when needed
            // This event is triggered when the node is required to provide data
            _toxInputNode.QuantumStarted += ToxInputNodeQuantumStartedHandler;
        }
Beispiel #36
0
        private async void ToxInputNodeQuantumStartedHandler(AudioFrameInputNode sender,
            FrameInputNodeQuantumStartedEventArgs args)
        {
            if (!await _receiveBuffer.OutputAvailableAsync())
                return;

            short[] shorts;
            var successfulReceive = _receiveBuffer.TryReceive(out shorts);
            if (!successfulReceive)
                return;

            // GenerateAudioData can provide PCM audio data by directly synthesizing it or reading from a file.
            // Need to know how many samples are required. In this case, the node is running at the same rate as the rest of the graph
            // For minimum latency, only provide the required amount of samples. Extra samples will introduce additional latency.
            var numSamplesNeeded = (uint) args.RequiredSamples;
            if (numSamplesNeeded == 0)
                return;

            var audioData = GenerateAudioData(numSamplesNeeded, shorts);
            _toxInputNode.AddFrame(audioData);
        }
        private async Task CreateAudioGraph()
        {
            // Create an AudioGraph with default settings
            AudioGraphSettings settings = new AudioGraphSettings(AudioRenderCategory.Media);
            CreateAudioGraphResult result = await AudioGraph.CreateAsync(settings);

            if (result.Status != AudioGraphCreationStatus.Success)
            {
                // Cannot create graph
                rootPage.NotifyUser(String.Format("AudioGraph Creation Error because {0}", result.Status.ToString()), NotifyType.ErrorMessage);
                return;
            }

            graph = result.Graph;

            // Create a device output node
            CreateAudioDeviceOutputNodeResult deviceOutputNodeResult = await graph.CreateDeviceOutputNodeAsync();
            if (deviceOutputNodeResult.Status != AudioDeviceNodeCreationStatus.Success)
            {
                // Cannot create device output node
                rootPage.NotifyUser(String.Format("Audio Device Output unavailable because {0}", deviceOutputNodeResult.Status.ToString()), NotifyType.ErrorMessage);
                speakerContainer.Background = new SolidColorBrush(Colors.Red);
            }

            deviceOutputNode = deviceOutputNodeResult.DeviceOutputNode;
            rootPage.NotifyUser("Device Output Node successfully created", NotifyType.StatusMessage);
            speakerContainer.Background = new SolidColorBrush(Colors.Green);

            // Create the FrameInputNode at the same format as the graph, except explicitly set mono.
            AudioEncodingProperties nodeEncodingProperties = graph.EncodingProperties;
            nodeEncodingProperties.ChannelCount = 1;
            frameInputNode = graph.CreateFrameInputNode(nodeEncodingProperties);
            frameInputNode.AddOutgoingConnection(deviceOutputNode);
            frameContainer.Background = new SolidColorBrush(Colors.Green);

            // Initialize the Frame Input Node in the stopped state
            frameInputNode.Stop();

            // Hook up an event handler so we can start generating samples when needed
            // This event is triggered when the node is required to provide data
            frameInputNode.QuantumStarted += node_QuantumStarted;
            
            // Start the graph since we will only start/stop the frame input node
            graph.Start();
        }
        private void node_QuantumStarted(AudioFrameInputNode sender, FrameInputNodeQuantumStartedEventArgs args)
        {
            // GenerateAudioData can provide PCM audio data by directly synthesizing it or reading from a file.
            // Need to know how many samples are required. In this case, the node is running at the same rate as the rest of the graph
            // For minimum latency, only provide the required amount of samples. Extra samples will introduce additional latency.
            uint numSamplesNeeded = (uint) args.RequiredSamples;

            if(numSamplesNeeded != 0)
            {
                AudioFrame audioData = GenerateAudioData(numSamplesNeeded);
                frameInputNode.AddFrame(audioData);
            }
        }
Beispiel #39
0
 private void OnQuantumStarted(AudioFrameInputNode sender, FrameInputNodeQuantumStartedEventArgs e)
 {
     if(e.RequiredSamples > 0)
     {
         sender.AddFrame(GenerateAudioFrame(e.RequiredSamples));
     }
 }