private async void ToxInputNodeQuantumStartedHandler(AudioFrameInputNode sender, FrameInputNodeQuantumStartedEventArgs args) { if (!await _receiveBuffer.OutputAvailableAsync()) { return; } short[] shorts; var successfulReceive = _receiveBuffer.TryReceive(out shorts); if (!successfulReceive) { return; } // GenerateAudioData can provide PCM audio data by directly synthesizing it or reading from a file. // Need to know how many samples are required. In this case, the node is running at the same rate as the rest of the graph // For minimum latency, only provide the required amount of samples. Extra samples will introduce additional latency. var numSamplesNeeded = (uint)args.RequiredSamples; if (numSamplesNeeded == 0) { return; } var audioData = GenerateAudioData(numSamplesNeeded, shorts); _toxInputNode.AddFrame(audioData); }
private void FrameInputNode_QuantumStarted( AudioFrameInputNode sender, FrameInputNodeQuantumStartedEventArgs args) { if (_audioDataCurrentPosition == 0) { _fileOutputNode.Start(); } // doesn't matter how many samples requested var frame = ProcessOutputFrame(_audioGraph.SamplesPerQuantum); _frameInputNode.AddFrame(frame); if (_finished) { _fileOutputNode?.Stop(); _audioGraph?.Stop(); } // to not report too many times if (_audioGraph == null) { return; } if (_audioGraph.CompletedQuantumCount % 100 == 0) { var dProgress = (double)100 * _audioDataCurrentPosition / _audioData.LengthSamples(); _ioProgress?.Report(dProgress); } }
/// <summary> /// 音声出力の /// </summary> /// <param name="sender"></param> /// <param name="args"></param> private async void FrameInputNode_QuantumStarted(AudioFrameInputNode sender, FrameInputNodeQuantumStartedEventArgs args) { if (AudioInStream == null) { return; //throw new Exception("not connected to discord audio channel."); } if (AudioInStream.AvailableFrames == 0) { return; } uint numSamplesNeeded = (uint)args.RequiredSamples; if (numSamplesNeeded == 0) { return; } // audioDataのサイズはAudioInStream内のFrameが示すバッファサイズと同一サイズにしておくべきだけど var sampleNeededBytes = numSamplesNeeded * OpusConvertConstants.SampleBytes * OpusConvertConstants.Channels; // Note: staticで持たせるべき? var audioData = new byte[sampleNeededBytes]; var result = await AudioInStream.ReadAsync(audioData, 0, (int)sampleNeededBytes); AudioFrame audioFrame = GenerateAudioData(audioData, (uint)result); sender.AddFrame(audioFrame); }
private void node_QuantumStarted(AudioFrameInputNode sender, FrameInputNodeQuantumStartedEventArgs args) { uint numSamplesNeeded = (uint)args.RequiredSamples; if (numSamplesNeeded != 0) { AudioFrame audioData = ReadAudioData(numSamplesNeeded); frameInputNode.AddFrame(audioData); } }
private void InputNodeQuantumStartedHandler(AudioFrameInputNode sender, FrameInputNodeQuantumStartedEventArgs args) { if (args.RequiredSamples < 1) { return; } AudioFrame frame = GenerateAudioData(args.RequiredSamples); sender.AddFrame(frame); }
// For creating audio frames on the fly private void node_QuantumStarted(AudioFrameInputNode sender, FrameInputNodeQuantumStartedEventArgs args) { // GenerateAudioData can provide PCM audio data by directly synthesizing it or reading from a file. // Need to know how many samples are required. In this case, the node is running at the same rate as the rest of the graph // For minimum latency, only provide the required amount of samples. Extra samples will introduce additional latency. uint numSamplesNeeded = (uint)args.RequiredSamples; if (numSamplesNeeded != 0) { AudioFrame audioData = GenerateAudioData(numSamplesNeeded); frameInputNode.AddFrame(audioData); } }
private void InputNodeQuantumStarted(AudioFrameInputNode inputNode, FrameInputNodeQuantumStartedEventArgs e, Track track) { if (Status == AudioStatus.Playing) { var samples = track.Read(e.RequiredSamples); if (samples != null) { using (var frame = GenerateFrameFromSamples(samples)) { inputNode.AddFrame(frame); } } } }
private void OnQuantumStarted(AudioFrameInputNode node, FrameInputNodeQuantumStartedEventArgs args) { var numSamplesNeeded = args.RequiredSamples; if (numSamplesNeeded != 0) { var audioData = GenerateAudioData(numSamplesNeeded); m_frameInputNode.AddFrame(audioData); } if (!m_isRunning && !m_isFlushing) { OnAudioComplete(); m_frameInputNode.Stop(); m_audioGraph.Stop(); } }
private void node_QuantumStarted(AudioFrameInputNode sender, FrameInputNodeQuantumStartedEventArgs args) { // GenerateAudioData can provide PCM audio data by directly synthesizing it or reading from a file. // Need to know how many samples are required. In this case, the node is running at the same rate as the rest of the graph // For minimum latency, only provide the required amount of samples. Extra samples will introduce additional latency. uint numSamplesNeeded = (uint) args.RequiredSamples; if(numSamplesNeeded != 0) { AudioFrame audioData = GenerateAudioData(numSamplesNeeded); frameInputNode.AddFrame(audioData); } }
private async void ToxInputNodeQuantumStartedHandler(AudioFrameInputNode sender, FrameInputNodeQuantumStartedEventArgs args) { if (!await _receiveBuffer.OutputAvailableAsync()) return; short[] shorts; var successfulReceive = _receiveBuffer.TryReceive(out shorts); if (!successfulReceive) return; // GenerateAudioData can provide PCM audio data by directly synthesizing it or reading from a file. // Need to know how many samples are required. In this case, the node is running at the same rate as the rest of the graph // For minimum latency, only provide the required amount of samples. Extra samples will introduce additional latency. var numSamplesNeeded = (uint) args.RequiredSamples; if (numSamplesNeeded == 0) return; var audioData = GenerateAudioData(numSamplesNeeded, shorts); _toxInputNode.AddFrame(audioData); }
private unsafe void FrameInputNode_QuantumStarted(AudioFrameInputNode sender, FrameInputNodeQuantumStartedEventArgs args) { var bufferSize = args.RequiredSamples * sizeof(float) * 2; AudioFrame audioFrame = new AudioFrame((uint)bufferSize); if (fileStream == null) { return; } using (var audioBuffer = audioFrame.LockBuffer(AudioBufferAccessMode.Write)) { using (var bufferReference = audioBuffer.CreateReference()) { byte * dataInBytes; uint capacityInBytes; float *dataInFloat; // Get the buffer from the AudioFrame ((IMemoryBufferByteAccess)bufferReference).GetBuffer(out dataInBytes, out capacityInBytes); dataInFloat = (float *)dataInBytes; var managedBuffer = new byte[capacityInBytes]; var lastLength = fileStream.Length - fileStream.Position; int readLength = (int)(lastLength < capacityInBytes ? lastLength : capacityInBytes); if (readLength <= 0) { fileStream.Close(); fileStream = null; return; } fileStream.Read(managedBuffer, 0, readLength); for (int i = 0; i < readLength; i += 8) { dataInBytes[i + 4] = managedBuffer[i + 0]; dataInBytes[i + 5] = managedBuffer[i + 1]; dataInBytes[i + 6] = managedBuffer[i + 2]; dataInBytes[i + 7] = managedBuffer[i + 3]; dataInBytes[i + 0] = managedBuffer[i + 4]; dataInBytes[i + 1] = managedBuffer[i + 5]; dataInBytes[i + 2] = managedBuffer[i + 6]; dataInBytes[i + 3] = managedBuffer[i + 7]; } } } audioFrameInputNode.AddFrame(audioFrame); }
private void Input_QuantumStarted(AudioFrameInputNode sender, FrameInputNodeQuantumStartedEventArgs args) { uint samplesNeeded = (uint)args.RequiredSamples; if (samplesNeeded != 0) { AudioFrame frame = GenerateAudio(samplesNeeded); input.AddFrame(frame); } }
private void OnQuantumStarted(AudioFrameInputNode sender, FrameInputNodeQuantumStartedEventArgs e) { if(e.RequiredSamples > 0) { sender.AddFrame(GenerateAudioFrame(e.RequiredSamples)); } }