Example #1
0
        private async void ToxInputNodeQuantumStartedHandler(AudioFrameInputNode sender,
                                                             FrameInputNodeQuantumStartedEventArgs args)
        {
            if (!await _receiveBuffer.OutputAvailableAsync())
            {
                return;
            }

            short[] shorts;
            var     successfulReceive = _receiveBuffer.TryReceive(out shorts);

            if (!successfulReceive)
            {
                return;
            }

            // GenerateAudioData can provide PCM audio data by directly synthesizing it or reading from a file.
            // Need to know how many samples are required. In this case, the node is running at the same rate as the rest of the graph
            // For minimum latency, only provide the required amount of samples. Extra samples will introduce additional latency.
            var numSamplesNeeded = (uint)args.RequiredSamples;

            if (numSamplesNeeded == 0)
            {
                return;
            }

            var audioData = GenerateAudioData(numSamplesNeeded, shorts);

            _toxInputNode.AddFrame(audioData);
        }
        private void FrameInputNode_QuantumStarted(
            AudioFrameInputNode sender,
            FrameInputNodeQuantumStartedEventArgs args)
        {
            if (_audioDataCurrentPosition == 0)
            {
                _fileOutputNode.Start();
            }

            // doesn't matter how many samples requested
            var frame = ProcessOutputFrame(_audioGraph.SamplesPerQuantum);

            _frameInputNode.AddFrame(frame);

            if (_finished)
            {
                _fileOutputNode?.Stop();
                _audioGraph?.Stop();
            }

            // to not report too many times
            if (_audioGraph == null)
            {
                return;
            }
            if (_audioGraph.CompletedQuantumCount % 100 == 0)
            {
                var dProgress =
                    (double)100 *
                    _audioDataCurrentPosition /
                    _audioData.LengthSamples();
                _ioProgress?.Report(dProgress);
            }
        }
Example #3
0
        /// <summary>
        /// 音声出力の
        /// </summary>
        /// <param name="sender"></param>
        /// <param name="args"></param>
        private async void FrameInputNode_QuantumStarted(AudioFrameInputNode sender, FrameInputNodeQuantumStartedEventArgs args)
        {
            if (AudioInStream == null)
            {
                return;
                //throw new Exception("not connected to discord audio channel.");
            }

            if (AudioInStream.AvailableFrames == 0)
            {
                return;
            }

            uint numSamplesNeeded = (uint)args.RequiredSamples;

            if (numSamplesNeeded == 0)
            {
                return;
            }

            // audioDataのサイズはAudioInStream内のFrameが示すバッファサイズと同一サイズにしておくべきだけど
            var sampleNeededBytes = numSamplesNeeded * OpusConvertConstants.SampleBytes * OpusConvertConstants.Channels;

            // Note: staticで持たせるべき?
            var audioData = new byte[sampleNeededBytes];

            var result = await AudioInStream.ReadAsync(audioData, 0, (int)sampleNeededBytes);



            AudioFrame audioFrame = GenerateAudioData(audioData, (uint)result);

            sender.AddFrame(audioFrame);
        }
Example #4
0
        unsafe public static void AddFrame(float[] framedata, uint samples)
        {
            if (!ready)
            {
                return;
            }
            //if (!started)
            //{
            //    //graph.Start();
            //    //started = true;
            //}
            AudioFrame frame = new AudioFrame(samples * 2 * sizeof(float));

            using (AudioBuffer buffer = frame.LockBuffer(AudioBufferAccessMode.Write))
                using (IMemoryBufferReference reference = buffer.CreateReference())
                {
                    byte *dataInBytes;
                    uint  capacityInBytes;

                    // Get the buffer from the AudioFrame
                    ((IMemoryBufferByteAccess)reference).GetBuffer(out dataInBytes, out capacityInBytes);
                    // Cast to float since the data we are generating is float
                    float *dataInFloat = (float *)dataInBytes;
                    fixed(float *frames = framedata)
                    {
                        for (int i = 0; i < samples * 2; i++)
                        {
                            dataInFloat[i] = frames[i];
                        }
                    }
                }
            //List<float[]> amplitudeData = FFT.Processing.HelperMethods.ProcessFrameOutput(frame);
            //List<float[]> channelData = FFT.Processing.HelperMethods.GetFftData(FFT.Processing.HelperMethods.ConvertTo512(amplitudeData, outgraph), outgraph);

            //float[] leftChannel = channelData[1];

            //AudioSpec1 = HelperMethods.Max(leftChannel, 0, 1);
            //AudioSpec2 = HelperMethods.Max(leftChannel, 2, 3);
            //AudioSpec3 = HelperMethods.Max(leftChannel, 3, 4);
            //AudioSpec4 = HelperMethods.Max(leftChannel, 4, 5);
            //AudioSpec5 = HelperMethods.Max(leftChannel, 5, 6);
            //AudioSpec6 = HelperMethods.Max(leftChannel, 7, 8);
            //AudioSpec7 = HelperMethods.Max(leftChannel, 9, 10);
            //AudioSpec8 = HelperMethods.Max(leftChannel, 10, 12);
            //AudioSpec9 = HelperMethods.Max(leftChannel, 14, 26);
            frameInputNode.AddFrame(frame);
        }
        private void node_QuantumStarted(AudioFrameInputNode sender, FrameInputNodeQuantumStartedEventArgs args)
        {
            uint numSamplesNeeded = (uint)args.RequiredSamples;

            if (numSamplesNeeded != 0)
            {
                AudioFrame audioData = ReadAudioData(numSamplesNeeded);
                frameInputNode.AddFrame(audioData);
            }
        }
        private void InputNodeQuantumStartedHandler(AudioFrameInputNode sender, FrameInputNodeQuantumStartedEventArgs args)
        {
            if (args.RequiredSamples < 1)
            {
                return;
            }

            AudioFrame frame = GenerateAudioData(args.RequiredSamples);

            sender.AddFrame(frame);
        }
 // For creating audio frames on the fly
 private void node_QuantumStarted(AudioFrameInputNode sender, FrameInputNodeQuantumStartedEventArgs args)
 {
     // GenerateAudioData can provide PCM audio data by directly synthesizing it or reading from a file.
     // Need to know how many samples are required. In this case, the node is running at the same rate as the rest of the graph
     // For minimum latency, only provide the required amount of samples. Extra samples will introduce additional latency.
     uint numSamplesNeeded = (uint)args.RequiredSamples;
     if (numSamplesNeeded != 0)
     {
         AudioFrame audioData = GenerateAudioData(numSamplesNeeded);
         frameInputNode.AddFrame(audioData);
     }
 }
Example #8
0
        private async void MockAudioGraph(AudioFrame audioFrame)
        {
            AudioGraphSettings settings = new AudioGraphSettings(Windows.Media.Render.AudioRenderCategory.Media);
            var result = await AudioGraph.CreateAsync(settings);

            var graph = result.Graph;

            audioFrameInputNode = graph.CreateFrameInputNode();

            //<SnippetAudioFrameInputNode>
            audioFrameInputNode.AddFrame(audioFrame);
            //</SnippetAudioFrameInputNode>
        }
Example #9
0
 public bool Write(Int16[] samples)
 {
     if ((graph == null))
     {
         return(true);
     }
     frameInputNode.AddFrame(GenerateAudioData(samples));
     FrameAdded++;
     if (FrameAdded - FrameCompleted >= 5)
     {
         frameInputNode.Start();
     }
     return(true);
 }
        private unsafe void FrameInputNode_QuantumStarted(AudioFrameInputNode sender, FrameInputNodeQuantumStartedEventArgs args)
        {
            var        bufferSize = args.RequiredSamples * sizeof(float) * 2;
            AudioFrame audioFrame = new AudioFrame((uint)bufferSize);

            if (fileStream == null)
            {
                return;
            }
            using (var audioBuffer = audioFrame.LockBuffer(AudioBufferAccessMode.Write))
            {
                using (var bufferReference = audioBuffer.CreateReference())
                {
                    byte * dataInBytes;
                    uint   capacityInBytes;
                    float *dataInFloat;

                    // Get the buffer from the AudioFrame
                    ((IMemoryBufferByteAccess)bufferReference).GetBuffer(out dataInBytes, out capacityInBytes);
                    dataInFloat = (float *)dataInBytes;

                    var managedBuffer = new byte[capacityInBytes];

                    var lastLength = fileStream.Length - fileStream.Position;
                    int readLength = (int)(lastLength < capacityInBytes ? lastLength : capacityInBytes);
                    if (readLength <= 0)
                    {
                        fileStream.Close();
                        fileStream = null;
                        return;
                    }
                    fileStream.Read(managedBuffer, 0, readLength);

                    for (int i = 0; i < readLength; i += 8)
                    {
                        dataInBytes[i + 4] = managedBuffer[i + 0];
                        dataInBytes[i + 5] = managedBuffer[i + 1];
                        dataInBytes[i + 6] = managedBuffer[i + 2];
                        dataInBytes[i + 7] = managedBuffer[i + 3];
                        dataInBytes[i + 0] = managedBuffer[i + 4];
                        dataInBytes[i + 1] = managedBuffer[i + 5];
                        dataInBytes[i + 2] = managedBuffer[i + 6];
                        dataInBytes[i + 3] = managedBuffer[i + 7];
                    }
                }
            }

            audioFrameInputNode.AddFrame(audioFrame);
        }
Example #11
0
        private void InputNodeQuantumStarted(AudioFrameInputNode inputNode, FrameInputNodeQuantumStartedEventArgs e, Track track)
        {
            if (Status == AudioStatus.Playing)
            {
                var samples = track.Read(e.RequiredSamples);

                if (samples != null)
                {
                    using (var frame = GenerateFrameFromSamples(samples))
                    {
                        inputNode.AddFrame(frame);
                    }
                }
            }
        }
Example #12
0
        private void OnQuantumStarted(AudioFrameInputNode node, FrameInputNodeQuantumStartedEventArgs args)
        {
            var numSamplesNeeded = args.RequiredSamples;

            if (numSamplesNeeded != 0)
            {
                var audioData = GenerateAudioData(numSamplesNeeded);
                m_frameInputNode.AddFrame(audioData);
            }

            if (!m_isRunning && !m_isFlushing)
            {
                OnAudioComplete();
                m_frameInputNode.Stop();
                m_audioGraph.Stop();
            }
        }
Example #13
0
        /// <summary>
        /// Add frame to out graph queue
        /// </summary>
        /// <param name="framedata">raw frame data</param>
        /// <param name="samples">sample count</param>
        public static unsafe void AddFrame(float[] framedata, uint samples)
        {
            // not ready, return
            if (!ready)
            {
                return;
            }

            AudioFrame frame = new AudioFrame(samples * 2 * sizeof(float));

            using (AudioBuffer buffer = frame.LockBuffer(AudioBufferAccessMode.Write))
                using (IMemoryBufferReference reference = buffer.CreateReference())
                {
                    // Get the buffer from the AudioFrame
                    ((IMemoryBufferByteAccess)reference).GetBuffer(out byte *dataInBytes, out uint _);

                    // Cast to float since the data we are generating is float
                    float *dataInFloat = (float *)dataInBytes;
                    fixed(float *frames = framedata)
                    {
                        for (int i = 0; i < samples * 2; i++)
                        {
                            dataInFloat[i] = frames[i];
                        }
                    }
                }

            // Don't bother if deafend
            if (LocalState.VoiceState.SelfDeaf || LocalState.VoiceState.ServerDeaf)
            {
                AudioOutSpec1   = 0;
                AudioOutSpec2   = 0;
                AudioOutSpec3   = 0;
                AudioOutSpec4   = 0;
                AudioOutSpec5   = 0;
                AudioOutSpec6   = 0;
                AudioOutSpec7   = 0;
                AudioOutSpec8   = 0;
                AudioOutSpec9   = 0;
                AudioOutAverage = 0;
            }
            else
            {
                // Determine FFT data
                List <float[]> amplitudeData = FFT.Processing.HelperMethods.ProcessFrameOutput(frame);
                List <float[]> channelData   = FFT.Processing.HelperMethods.GetFftData(FFT.Processing.HelperMethods.ConvertTo512(amplitudeData, outgraph), outgraph);

                float[] leftChannel = channelData[1];

                // Assign each FFT data out channel
                AudioOutSpec1   = HelperMethods.Max(leftChannel, 0, 1);
                AudioOutSpec2   = HelperMethods.Max(leftChannel, 2, 3);
                AudioOutSpec3   = HelperMethods.Max(leftChannel, 3, 4);
                AudioOutSpec4   = HelperMethods.Max(leftChannel, 4, 5);
                AudioOutSpec5   = HelperMethods.Max(leftChannel, 5, 6);
                AudioOutSpec6   = HelperMethods.Max(leftChannel, 7, 8);
                AudioOutSpec7   = HelperMethods.Max(leftChannel, 9, 10);
                AudioOutSpec8   = HelperMethods.Max(leftChannel, 10, 12);
                AudioOutSpec9   = HelperMethods.Max(leftChannel, 14, 26);
                AudioOutAverage = (AudioOutSpec1 + AudioOutSpec2 + AudioOutSpec3 + AudioOutSpec4 + AudioOutSpec5 + AudioOutSpec5 + AudioOutSpec6 + AudioOutSpec7 + AudioOutSpec8 + AudioOutSpec9) / 9;
            }

            // Add frame to queue
            frameInputNode.AddFrame(frame);
        }
Example #14
0
        private async void InitializeAsync()
        {
            await UpdatePlaylistAsync();

            MasterVolume = 100;

            await AudioSourceManager.InitializeAsync(new FileSystem(), "GroorineCore");


            var settings = new AudioGraphSettings(AudioRenderCategory.Media)
            {
            };

            CreateAudioGraphResult result = await AudioGraph.CreateAsync(settings);

            if (result.Status != AudioGraphCreationStatus.Success)
            {
                await new MessageDialog("Can't create AudioGraph! Application will stop...").ShowAsync();
                Application.Current.Exit();
            }


            _graph = result.Graph;



            CreateAudioDeviceOutputNodeResult deviceOutputNodeResult = await _graph.CreateDeviceOutputNodeAsync();

            if (deviceOutputNodeResult.Status != AudioDeviceNodeCreationStatus.Success)
            {
                await new MessageDialog("Can't create DeviceOutputNode! Application will stop...").ShowAsync();
                Application.Current.Exit();
            }
            _deviceOutputNode = deviceOutputNodeResult.DeviceOutputNode;

            AudioEncodingProperties nodeEncodingProperties = _graph.EncodingProperties;

            nodeEncodingProperties.ChannelCount = 2;


            _frameInputNode = _graph.CreateFrameInputNode(nodeEncodingProperties);
            _frameInputNode.AddOutgoingConnection(_deviceOutputNode);

            _frameInputNode.Stop();
            _player = new Player((int)nodeEncodingProperties.SampleRate);



            _player.PropertyChanged += (sender, args) =>
            {
                switch (args.PropertyName)
                {
                case nameof(_player.IsPlaying):
                    _synchronizationContext.Post(o =>
                    {
                        if (!_player.IsPlaying && !_player.IsPausing && IsPlaying)
                        {
                            IsPlaying = CanStop = false;
                        }
                    }, null);
                    break;
                }
            };


            _frameInputNode.QuantumStarted += (sender, args) =>
            {
                var numSamplesNeeded = (uint)args.RequiredSamples;

                if (numSamplesNeeded != 0)
                {
                    //_synchronizationContext.Post(o =>
                    //{
                    //	foreach (var a in Channels)
                    //		a.Update();
                    AudioFrame audioData = GenerateAudioData(numSamplesNeeded);
                    _frameInputNode.AddFrame(audioData);
                    //}, null);
                }
            };

            _graph.Start();
            _frameInputNode.Start();

            /*
             * _player = new Player();
             *
             * _buffer = _player.CreateBuffer(50);
             *
             * _bwp = new BufferedWaveProvider(new WaveFormat(44100, 16, 2));
             * _nativePlayer = new WasapiOutRT(AudioClientShareMode.Shared, 50);
             * _nativePlayer.Init(() => _bwp);
             * _nativePlayer.Play();
             */
            IsInitialized = true;

            /*
             * while (true)
             * {
             *      _player.GetBuffer(_buffer);
             *
             *      var b = ToByte(_buffer);
             *      _bwp.AddSamples(b, 0, b.Length);
             *      while (_bwp.BufferedBytes > _buffer.Length * 2)
             *              await Task.Delay(1);
             * }
             */
        }
Example #15
0
 private void OnQuantumStarted(AudioFrameInputNode sender, FrameInputNodeQuantumStartedEventArgs e)
 {
     if(e.RequiredSamples > 0)
     {
         sender.AddFrame(GenerateAudioFrame(e.RequiredSamples));
     }
 }