void Start() { var format = ChannelEnumConverter.GetSoundFormatFromSpeakerMode(AudioSettings.speakerMode); var channels = ChannelEnumConverter.GetChannelCountFromSoundFormat(format); AudioSettings.GetDSPBufferSize(out var bufferLength, out var numBuffers); var sampleRate = AudioSettings.outputSampleRate; m_Graph = DSPGraph.Create(format, channels, bufferLength, sampleRate); var driver = new DefaultDSPGraphDriver { Graph = m_Graph }; driver.AttachToDefaultOutput(); using (var block = m_Graph.CreateCommandBlock()) { m_NoiseFilter = block.CreateDSPNode <NoiseFilter.Parameters, NoiseFilter.Providers, NoiseFilter>(); block.AddOutletPort(m_NoiseFilter, 2); m_LowPass = StateVariableFilter.Create(block, StateVariableFilter.FilterType.Lowpass); block.Connect(m_NoiseFilter, 0, m_LowPass, 0); block.Connect(m_LowPass, 0, m_Graph.RootDSP, 0); } }
void Start() { var format = ChannelEnumConverter.GetSoundFormatFromSpeakerMode(AudioSettings.speakerMode); var channels = ChannelEnumConverter.GetChannelCountFromSoundFormat(format); AudioSettings.GetDSPBufferSize(out var bufferLength, out var numBuffers); var sampleRate = AudioSettings.outputSampleRate; m_Graph = DSPGraph.Create(format, channels, bufferLength, sampleRate); var driver = new DefaultDSPGraphDriver { Graph = m_Graph }; driver.AttachToDefaultOutput(); using (var block = m_Graph.CreateCommandBlock()) { m_SineWave = block.CreateDSPNode<SinWaveNode.Parameters, SinWaveNode.Providers, SinWaveNode>(); block.AddOutletPort(m_SineWave, 2, SoundFormat.Stereo); m_SawWave = block.CreateDSPNode<SawWaveNode.Parameters, SawWaveNode.Providers, SawWaveNode>(); block.AddOutletPort(m_SawWave, 2, SoundFormat.Stereo); m_MixNode = block.CreateDSPNode<MixNode.Parameters, MixNode.Providers, MixNode>(); block.AddInletPort(m_MixNode, 2, SoundFormat.Stereo); block.AddInletPort(m_MixNode, 2, SoundFormat.Stereo); block.AddOutletPort(m_MixNode, 2, SoundFormat.Stereo); block.Connect(m_SineWave, 0, m_MixNode, 0); block.Connect(m_SawWave, 0, m_MixNode, 1); block.Connect(m_MixNode, 0, m_Graph.RootDSP, 0); } }
void Start() { var format = ChannelEnumConverter.GetSoundFormatFromSpeakerMode(AudioSettings.speakerMode); var channels = ChannelEnumConverter.GetChannelCountFromSoundFormat(format); AudioSettings.GetDSPBufferSize(out var bufferLength, out var numBuffers); var sampleRate = AudioSettings.outputSampleRate; m_Graph = DSPGraph.Create(format, channels, bufferLength, sampleRate); var driver = new DefaultDSPGraphDriver { Graph = m_Graph }; m_Output = driver.AttachToDefaultOutput(); // Add an event handler delegate to the graph for ClipStopped. So we are notified // of when a clip is stopped in the node and can handle the resources on the main thread. m_HandlerID = m_Graph.AddNodeEventHandler <ClipStopped>((node, evt) => { Debug.Log("Received ClipStopped event on main thread, cleaning resources"); }); // All async interaction with the graph must be done through a DSPCommandBlock. // Create it here and complete it once all commands are added. var block = m_Graph.CreateCommandBlock(); m_Node = block.CreateDSPNode <PlayClipNode.Parameters, PlayClipNode.SampleProviders, PlayClipNode>(); // Currently input and output ports are dynamic and added via this API to a node. // This will change to a static definition of nodes in the future. block.AddOutletPort(m_Node, 2, SoundFormat.Stereo); // Connect the node to the root of the graph. m_Connection = block.Connect(m_Node, 0, m_Graph.RootDSP, 0); // We are done, fire off the command block atomically to the mixer thread. block.Complete(); }
protected override void OnCreate() { this._available = new NativeQueue <DSPNode>(Allocator.Persistent); this._all = new NativeList <DSPNode>(Allocator.Persistent); var format = ChannelEnumConverter.GetSoundFormatFromSpeakerMode(AudioSettings.speakerMode); var channels = ChannelEnumConverter.GetChannelCountFromSoundFormat(format); AudioSettings.GetDSPBufferSize(out var bufferLength, out _); var sampleRate = AudioSettings.outputSampleRate; Debug.Log($"Audio SampleRate will be {sampleRate}"); this._graph = DSPGraph.Create(format, channels, bufferLength, sampleRate); var driver = new DefaultDSPGraphDriver { Graph = this._graph }; this._output = driver.AttachToDefaultOutput(); this._graph.AddNodeEventHandler <AudioSampleNodeCompleted>(this.ReleaseNode); }
protected override void OnCreate() { //Initialize containers first m_mixNodePortFreelist = new NativeList <int>(Allocator.Persistent); m_mixNodePortCount = new NativeReference <int>(Allocator.Persistent); m_ildNodePortCount = new NativeReference <int>(Allocator.Persistent); m_packedFrameCounterBufferId = new NativeReference <long>(Allocator.Persistent); m_audioFrame = new NativeReference <int>(Allocator.Persistent); m_lastReadBufferId = new NativeReference <int>(Allocator.Persistent); m_buffersInFlight = new List <ManagedIldBuffer>(); worldBlackboardEntity.AddComponentDataIfMissing(new AudioSettings { audioFramesPerUpdate = 3, audioSubframesPerFrame = 1, logWarningIfBuffersAreStarved = false }); //Create graph and driver var format = ChannelEnumConverter.GetSoundFormatFromSpeakerMode(UnityEngine.AudioSettings.speakerMode); var channels = ChannelEnumConverter.GetChannelCountFromSoundFormat(format); UnityEngine.AudioSettings.GetDSPBufferSize(out m_samplesPerSubframe, out _); m_sampleRate = UnityEngine.AudioSettings.outputSampleRate; m_graph = DSPGraph.Create(format, channels, m_samplesPerSubframe, m_sampleRate); m_driver = new LatiosDSPGraphDriver { Graph = m_graph }; m_outputHandle = m_driver.AttachToDefaultOutput(); var commandBlock = m_graph.CreateCommandBlock(); m_mixNode = commandBlock.CreateDSPNode <MixStereoPortsNode.Parameters, MixStereoPortsNode.SampleProviders, MixStereoPortsNode>(); commandBlock.AddOutletPort(m_mixNode, 2); m_mixToOutputConnection = commandBlock.Connect(m_mixNode, 0, m_graph.RootDSP, 0); m_ildNode = commandBlock.CreateDSPNode <ReadIldBuffersNode.Parameters, ReadIldBuffersNode.SampleProviders, ReadIldBuffersNode>(); unsafe { commandBlock.UpdateAudioKernel <SetReadIldBuffersNodePackedFrameBufferId, ReadIldBuffersNode.Parameters, ReadIldBuffersNode.SampleProviders, ReadIldBuffersNode>( new SetReadIldBuffersNodePackedFrameBufferId { ptr = (long *)m_packedFrameCounterBufferId.GetUnsafePtr() }, m_ildNode); } commandBlock.Complete(); //Create queries m_aliveListenersQuery = Fluent.WithAll <AudioListener>(true).Build(); m_deadListenersQuery = Fluent.Without <AudioListener>().WithAll <ListenerGraphState>().Build(); m_oneshotsToDestroyWhenFinishedQuery = Fluent.WithAll <AudioSourceOneShot>().WithAll <AudioSourceDestroyOneShotWhenFinished>(true).Build(); m_oneshotsQuery = Fluent.WithAll <AudioSourceOneShot>().Build(); m_loopedQuery = Fluent.WithAll <AudioSourceLooped>().Build(); //Force initialization of Burst commandBlock = m_graph.CreateCommandBlock(); var dummyNode = commandBlock.CreateDSPNode <MixPortsToStereoNode.Parameters, MixPortsToStereoNode.SampleProviders, MixPortsToStereoNode>(); StateVariableFilterNode.Create(commandBlock, StateVariableFilterNode.FilterType.Bandpass, 0f, 0f, 0f, 1); commandBlock.UpdateAudioKernel <MixPortsToStereoNodeUpdate, MixPortsToStereoNode.Parameters, MixPortsToStereoNode.SampleProviders, MixPortsToStereoNode>( new MixPortsToStereoNodeUpdate { leftChannelCount = 0 }, dummyNode); commandBlock.UpdateAudioKernel <ReadIldBuffersNodeUpdate, ReadIldBuffersNode.Parameters, ReadIldBuffersNode.SampleProviders, ReadIldBuffersNode>(new ReadIldBuffersNodeUpdate { ildBuffer = new IldBuffer(), }, m_ildNode); commandBlock.Cancel(); }