public void StartRecording() { if (string.IsNullOrEmpty(m_Device)) { Debug.Log("No Microphone found!"); return; } if (Microphone.IsRecording(m_Device)) { StopRecording(); } if (m_AudioSource.isPlaying) { m_AudioSource.Stop(); } m_AudioSource.clip = null; m_AudioSource.loop = true; m_AudioSource.clip = Microphone.Start(m_Device, true, 1, AudioSettings.outputSampleRate); m_AudioSource.Play(); int dspBufferSize, dspNumBuffers; AudioSettings.GetDSPBufferSize(out dspBufferSize, out dspNumBuffers); m_AudioSource.timeSamples = (Microphone.GetPosition(m_Device) + AudioSettings.outputSampleRate - 3 * dspBufferSize * dspNumBuffers) % AudioSettings.outputSampleRate; }
public static void Init() { if (UniqueInstance != null) { Debug.LogWarning("GATInfo can only be initialized once!"); return; } int nbOfChannels; switch (AudioSettings.speakerMode) { case AudioSpeakerMode.Mono: nbOfChannels = 1; break; case AudioSpeakerMode.Stereo: nbOfChannels = 2; break; case AudioSpeakerMode.Quad: nbOfChannels = 4; break; case AudioSpeakerMode.Surround: nbOfChannels = 5; break; case AudioSpeakerMode.Mode5point1: nbOfChannels = 6; break; case AudioSpeakerMode.Mode7point1: nbOfChannels = 8; break; default: nbOfChannels = 2; break; } int bufferSize; int numBuffers; AudioSettings.GetDSPBufferSize(out bufferSize, out numBuffers); double dspBufferDuration = (( double )(bufferSize)) / AudioSettings.outputSampleRate; UniqueInstance = new GATInfo(nbOfChannels, bufferSize, dspBufferDuration); if (RequestedSampleRate != 0 && OutputSampleRate != RequestedSampleRate) { Debug.LogWarning("Requested sample rate of " + RequestedSampleRate + " is not available on this platform."); } #if GAT_DEBUG Debug.Log("Number of channels: " + nbOfChannels); Debug.Log("dsp buffer size: " + bufferSize + " duration: " + dspBufferDuration + "sample rate: " + OutputSampleRate); #endif }
// Message sent by PhotonVoiceRecorder void PhotonVoiceCreated(Recorder.PhotonVoiceCreatedParams p) { var localVoice = p.Voice; if (localVoice.Info.Channels != 1) { throw new Exception("WebRTCAudioProcessor: only mono audio signals supported."); } if (!(localVoice is Voice.LocalVoiceAudioShort)) { throw new Exception("WebRTCAudioProcessor: only short audio voice supported (Set PhotonVoiceRecorder.TypeConvert option)."); } var v = (Voice.LocalVoiceAudioShort)localVoice; // can't access the AudioSettings properties in InitAEC if it's called from not main thread this.reverseChannels = new Dictionary <AudioSpeakerMode, int>() { { AudioSpeakerMode.Raw, 0 }, { AudioSpeakerMode.Mono, 1 }, { AudioSpeakerMode.Stereo, 2 }, { AudioSpeakerMode.Quad, 4 }, { AudioSpeakerMode.Surround, 5 }, { AudioSpeakerMode.Mode5point1, 6 }, { AudioSpeakerMode.Mode7point1, 8 }, { AudioSpeakerMode.Prologic, 0 }, }[AudioSettings.speakerMode]; int playBufSize; int playBufNum; AudioSettings.GetDSPBufferSize(out playBufSize, out playBufNum); proc = new Voice.WebRTCAudioProcessor(new Voice.Unity.Logger(), localVoice.Info.FrameSize, localVoice.Info.SamplingRate, localVoice.Info.Channels, AudioSettings.outputSampleRate, this.reverseChannels); v.AddPostProcessor(proc); Debug.Log("WebRTCAudioDSP initialized."); }
void Update() { if (useMicrophone != prevUseMicrophone) { prevUseMicrophone = useMicrophone; if (useMicrophone) { foreach (string m in Microphone.devices) { device = m; break; } source = GetComponent <AudioSource>(); prevClip = source.clip; source.Stop(); source.clip = Microphone.Start(null, true, 1, AudioSettings.outputSampleRate); source.Play(); int dspBufferSize, dspNumBuffers; AudioSettings.GetDSPBufferSize(out dspBufferSize, out dspNumBuffers); source.timeSamples = (Microphone.GetPosition(device) + AudioSettings.outputSampleRate - 3 * dspBufferSize * dspNumBuffers) % AudioSettings.outputSampleRate; } else { Microphone.End(device); source.clip = prevClip; source.Play(); } } }
void Awake() { // create a chuck myChuckId = Chuck.Manager.InitializeFilter(); // initialize my buffer int numBuffers; AudioSettings.GetDSPBufferSize(out myBufferLength, out numBuffers); myOutBuffer = new float[myBufferLength * myNumChannels]; // setup group for reliable ordering mySource = GetComponent <AudioSource>(); mySource.outputAudioMixerGroup = Chuck.FindAudioMixerGroup("ChuckMainInstanceDestination"); // setup mic if (useMicrophone) { SetupMic(); } // has init hasInit = true; // when scene is unloaded, check whether we need to clear the chuck SceneManager.sceneUnloaded += OnSceneUnloaded; // don't delete me? if (persistToNextScene) { DontDestroyOnLoad(this.gameObject); } }
// private uint ksmpsIndex = 0; #endregion PRIVATE_FIELDS private void Awake() { if (csoundUnityGameObject) { csoundUnity = csoundUnityGameObject.GetComponent <CsoundUnity>(); if (!csoundUnity) { Debug.LogError("CsoundUnity was not found?"); } } AudioSettings.GetDSPBufferSize(out bufferSize, out numBuffers); audioSource = GetComponent <AudioSource>(); if (!audioSource) { Debug.LogError("AudioSource was not found?"); } audioSource.velocityUpdateMode = AudioVelocityUpdateMode.Fixed; audioSource.spatialBlend = 1.0f; if (selectedAudioChannelIndexByChannel == null) { selectedAudioChannelIndexByChannel = new int[2]; } // TODO: force doppler level of the AudioSource to 0, to avoid audio artefacts ? // audioSource.dopplerLevel = 0; }
void Initialize() { Debug.Log("Dirac Initialize"); if (AudioSettings.outputSampleRate != 44100) { Debug.LogError("DiracLE pitch shifting only works at 44100 kHz. Use the InitScene to force 44.1 kHz output on iOS."); this.enabled = false; return; } int numBuffers; AudioSettings.GetDSPBufferSize(out _bufferSize, out numBuffers); AllocateNativeResources(_bufferSize); _initialized = true; _source = gameObject.GetComponent <AudioSource>(); if (_source != null) { if (_source.playOnAwake == true && _source.clip != null) { _playingState = PlayingState.Playing; _doProcess = true; _buffersNeedReset = true; } } if (alwaysProcess) { _doProcess = true; } }
private void Awake() { bool micAvailable = false; foreach (string m in Microphone.devices) { Debug.Log(m); if (m.Equals(_microphoneName)) { micAvailable = true; } } if (!micAvailable) { Debug.LogWarningFormat("Microphone {0} is not available", _microphoneName); return; } source = GetComponent <AudioSource>(); source.Stop(); source.loop = true; source.clip = Microphone.Start(_microphoneName, true, 1, AudioSettings.outputSampleRate); source.Play(); int dspBufferSize, dspNumBuffers; AudioSettings.GetDSPBufferSize(out dspBufferSize, out dspNumBuffers); source.timeSamples = (Microphone.GetPosition(device) + AudioSettings.outputSampleRate - 3 * dspBufferSize * dspNumBuffers) % AudioSettings.outputSampleRate; _spectrum = new float[512]; }
void Start() { AudioSettings.GetDSPBufferSize(out bufferSize, out numBuffers); Debug.Log("bufferSize = " + bufferSize); Debug.Log("numBuffers = " + numBuffers); StartWriting("test.wav"); }
/// Initializes the audio system with the current audio configuration. /// @note This should only be called from the main Unity thread. public static void Initialize(CardboardAudioListener listener, Quality quality) { if (!initialized) { // Initialize the audio system. #if UNITY_4_5 || UNITY_4_6 || UNITY_4_7 sampleRate = AudioSettings.outputSampleRate; numChannels = (int)AudioSettings.speakerMode; int numBuffers = -1; AudioSettings.GetDSPBufferSize(out framesPerBuffer, out numBuffers); #else AudioConfiguration config = AudioSettings.GetConfiguration(); sampleRate = config.sampleRate; numChannels = (int)config.speakerMode; framesPerBuffer = config.dspBufferSize; #endif if (numChannels != (int)AudioSpeakerMode.Stereo) { Debug.LogError("Only 'Stereo' speaker mode is supported by Cardboard."); return; } Initialize(quality, sampleRate, numChannels, framesPerBuffer); listenerTransform = listener.transform; initialized = true; Debug.Log("Cardboard audio system is initialized (Quality: " + quality + ", Sample Rate: " + sampleRate + ", Channels: " + numChannels + ", Frames Per Buffer: " + framesPerBuffer + ")."); } else if (listener.transform != listenerTransform) { Debug.LogError("Only one CardboardAudioListener component is allowed in the scene."); //CardboardAudioListener.Destroy(listener); } }
void OnEnable() { int bufferLength = 0; int numBuffers = 0; AudioSettings.GetDSPBufferSize(out bufferLength, out numBuffers); #if UNITY_5 _numChannels = GetNumChannels(AudioSettings.driverCapabilities); if (AudioSettings.speakerMode != AudioSpeakerMode.Raw && AudioSettings.speakerMode < AudioSettings.driverCapabilities) { _numChannels = GetNumChannels(AudioSettings.speakerMode); } Debug.Log(string.Format("[AVProUnityAudiocapture] SampleRate: {0}hz SpeakerMode: {1} BestDriverMode: {2} (DSP using {3} buffers of {4} bytes using {5} channels)", AudioSettings.outputSampleRate, AudioSettings.speakerMode.ToString(), AudioSettings.driverCapabilities.ToString(), numBuffers, bufferLength, _numChannels)); #else _numChannels = GetNumChannels(AudioSettings.driverCapabilities); if (AudioSettings.speakerMode != AudioSpeakerMode.Raw && AudioSettings.speakerMode < AudioSettings.driverCapabilities) { _numChannels = GetNumChannels(AudioSettings.speakerMode); } Debug.Log(string.Format("[AVProUnityAudiocapture] SampleRate: {0}hz SpeakerMode: {1} BestDriverMode: {2} (DSP using {3} buffers of {4} bytes using {5} channels)", AudioSettings.outputSampleRate, AudioSettings.speakerMode.ToString(), AudioSettings.driverCapabilities.ToString(), numBuffers, bufferLength, _numChannels)); #endif _buffer = new float[bufferLength * _numChannels * numBuffers * BufferSize]; _readBuffer = new float[bufferLength * _numChannels * numBuffers * BufferSize]; _bufferIndex = 0; _bufferHandle = GCHandle.Alloc(_readBuffer, GCHandleType.Pinned); _overflowCount = 0; }
public void Initialize(string sendName) { this.sendName = sendName; sendToPD = !string.IsNullOrEmpty(sendName); if (sendToPD) { int bufferSize; int bufferAmount; AudioSettings.GetDSPBufferSize(out bufferSize, out bufferAmount); if (!dataToSend.ContainsKey(sendName)) { dataToSend[sendName] = new float[bufferSize * 2]; } if (!sendAmount.ContainsKey(sendName)) { sendAmount[sendName] = 0; } if (!sendCount.ContainsKey(sendName)) { sendCount[sendName] = 0; } sendAmount[sendName] += 1; } initialized = true; }
// Token: 0x0600375D RID: 14173 RVA: 0x0011AFF0 File Offset: 0x001193F0 private void Awake() { if (OSPManager.sInstance == null) { OSPManager.sInstance = this; bool flag = true; int outputSampleRate = AudioSettings.outputSampleRate; int num; int num2; AudioSettings.GetDSPBufferSize(out num, out num2); Debug.LogWarning(string.Format("OSP: Queried SampleRate: {0:F0} BufferSize: {1:F0}", outputSampleRate, num)); if (flag) { } Debug.LogWarning(string.Format("OSP: sample rate: {0:F0}", outputSampleRate)); Debug.LogWarning(string.Format("OSP: buffer size: {0:F0}", num)); Debug.LogWarning(string.Format("OSP: num buffers: {0:F0}", num2)); OSPManager.sOSPInit = OSPManager.OSP_Init(outputSampleRate, num); OSPManager.OSP_SetBypass(this.bypass); OSPManager.OSP_SetGlobalScale(this.globalScale); OSPManager.OSP_SetGain(this.gain); OSPManager.OSP_SetFalloffRangeGlobal(this.falloffNear, this.falloffFar); this.dirtyReflection = true; return; } Debug.LogWarning(string.Format("OSPManager-Awake: Only one instance of OSPManager can exist in the scene.", new object[0])); }
private int getBufferSize() { int bufferlength, numbuffers; AudioSettings.GetDSPBufferSize(out bufferlength, out numbuffers); return(bufferlength); }
void Start() { AudioSettings.GetDSPBufferSize(out bufferSize, out numBuffers); //audioSources = new AudioSource[3]; //audioSources[0] = GameObject.FindWithTag("RecSlot1").GetComponent<AudioSource>(); //audioSources[1] = GameObject.FindWithTag("RecSlot2").GetComponent<AudioSource>(); //audioSources[2] = GameObject.FindWithTag("RecSlot3").GetComponent<AudioSource>(); }
/* * ----------------------- * InitializeSoundSystem() * initialize persistent sound emitter objects that live across scene loads * ----------------------- */ void InitializeSoundSystem() { int bufferLength = 960; int numBuffers = 4; AudioSettings.GetDSPBufferSize(out bufferLength, out numBuffers); if (Application.isPlaying) { Debug.Log("[AudioManager] Audio Sample Rate: " + AudioSettings.outputSampleRate); Debug.Log("[AudioManager] Audio Buffer Length: " + bufferLength + " Size: " + numBuffers); } // find the audio listener for playing regular 2D sounds AudioListener audioListenerObject = GameObject.FindObjectOfType <AudioListener>() as AudioListener; if (audioListenerObject == null) { Debug.LogError("[AudioManager] Missing AudioListener object! Add one to the scene."); } else { staticListenerPosition = audioListenerObject.transform; } // we allocate maxSoundEmitters + reserved channels soundEmitters = new SoundEmitter[maxSoundEmitters + (int)EmitterChannel.Any]; // see if the sound emitters have already been created, if so, nuke it, it shouldn't exist in the scene upon load soundEmitterParent = GameObject.Find("__SoundEmitters__"); if (soundEmitterParent != null) { // delete any sound emitters hanging around Destroy(soundEmitterParent); } // create them all soundEmitterParent = new GameObject("__SoundEmitters__"); for (int i = 0; i < maxSoundEmitters + (int)EmitterChannel.Any; i++) { GameObject emitterObject = new GameObject("SoundEmitter_" + i); emitterObject.transform.parent = soundEmitterParent.transform; emitterObject.transform.position = Vector3.zero; // don't ever save this to the scene emitterObject.hideFlags = HideFlags.DontSaveInEditor; // add the sound emitter components soundEmitters[i] = emitterObject.AddComponent <SoundEmitter>(); soundEmitters[i].SetDefaultParent(soundEmitterParent.transform); soundEmitters[i].SetChannel(i); soundEmitters[i].Stop(); // save off the original index soundEmitters[i].originalIdx = i; } // reset the free emitter lists ResetFreeEmitters(); soundEmitterParent.hideFlags = HideFlags.DontSaveInEditor; audioMaxFallOffDistanceSqr = audioMaxFallOffDistance * audioMaxFallOffDistance; }
void Start() { int buflength, numbufs; AudioSettings.GetDSPBufferSize(out buflength, out numbufs); var channels = AudioUnit.speakerModeToChannels(AudioSettings.speakerMode); ampData = new float[buflength * channels]; }
void Update() { if (useMicrophone != prevUseMicrophone) { prevUseMicrophone = useMicrophone; if (useMicrophone) { foreach (string m in Microphone.devices) { device = m; break; } source = GetComponent <AudioSource>(); prevClip = source.clip; source.Stop(); source.clip = Microphone.Start(null, true, 1, AudioSettings.outputSampleRate); source.Play(); int dspBufferSize, dspNumBuffers; AudioSettings.GetDSPBufferSize(out dspBufferSize, out dspNumBuffers); source.timeSamples = (Microphone.GetPosition(device) + AudioSettings.outputSampleRate - 3 * dspBufferSize * dspNumBuffers) % AudioSettings.outputSampleRate; } else { Microphone.End(device); source.clip = prevClip; source.Play(); } } MicLoudness = LevelMax(); if (MicLoudness > volumeBuffer) { volumeBuffer = MicLoudness; bufferDecrease = 0.0005f; } if (MicLoudness < volumeBuffer) { volumeBuffer -= bufferDecrease; bufferDecrease *= 1.2f; } if (volumeBuffer > highestVolume) { highestVolume = volumeBuffer; } //Debug.Log(volumeBuffer); float scale = PitchUtils.map(volumeBuffer, 0, highestVolume, 0.75f, 1.5f); hand1.localScale = new Vector3(scale, scale, scale); hand2.localScale = new Vector3(scale, scale, scale); }
void Start() { // re-initialise the audio settings to make sure that // multi channel output is activated after chaning to soundflower // findme fix: adding lines here makes script behave oddly. In Awake() it causes all audio to fail // AudioConfiguration config = AudioSettings.GetConfiguration (); // AudioSettings.Reset (config); AudioSettings.GetDSPBufferSize(out bufferSize, out numBuffers); // get the audio buffer sizes }
private void InitAudioClip() { int num1 = 1; int num2 = 1; AudioSettings.GetDSPBufferSize(ref num1, ref num2); // ISSUE: method pointer this.granularClip = AudioClip.Create(((Object)this.sourceClip).get_name() + " (granular)", num1, this.sourceClip.get_channels(), this.sampleRate, true, new AudioClip.PCMReaderCallback((object)this, __methodptr(OnAudioRead))); this.sourceChannels = this.sourceClip.get_channels(); }
/// <summary> /// 오디오 설정 변경 발생시 /// 이어폰이나 HDMI 등을 연결시 발생. /// </summary> /// <param name="deviceWasChanged"></param> void OnAudioConfigurationChanged(bool deviceWasChanged) { int bufferLength, numBuffers; AudioSettings.GetDSPBufferSize(out bufferLength, out numBuffers); AudioConfiguration config = AudioSettings.GetConfiguration(); string m_audio_info = string.Format("Audio : {0 : #, #} Hz {1} {2} samples {3} buffers", config.sampleRate, config.speakerMode.ToString(), config.dspBufferSize, numBuffers); Debugs.Log("[AudioInfo]", m_audio_info); }
void Start() { SampleRate = AudioSettings.outputSampleRate; int buflength, numbufs; AudioSettings.GetDSPBufferSize(out buflength, out numbufs); var channels = AudioUnit.speakerModeToChannels(AudioSettings.speakerMode); cutoffData = new float[buflength * channels]; }
void Start() { // SampleRate = AudioSettings.outputSampleRate; int buflength, numbufs; AudioSettings.GetDSPBufferSize(out buflength, out numbufs); Channels = AudioUnit.speakerModeToChannels(AudioSettings.speakerMode); audioData = new float[buflength * Channels]; amplitudeData = new float[buflength * Channels]; }
private bool InitSynth() { // Get peer AudioSource audioSource = GetComponent <AudioSource>(); if (audioSource == null) { DaggerfallUnity.LogMessage("DaggerfallSongPlayer: Could not find AudioSource component."); return(false); } // Create synthesizer and load bank if (midiSynthesizer == null) { // Get number of channels if (AudioSettings.driverCapabilities.ToString() == "Mono") { channels = 1; } else { channels = 2; } // Create synth AudioSettings.GetDSPBufferSize(out bufferLength, out numBuffers); midiSynthesizer = new Synthesizer(sampleRate, channels, bufferLength / numBuffers, numBuffers, polyphony); // Load bank data byte[] bankData = LoadBank(SoundBank); if (bankData == null) { return(false); } else { midiSynthesizer.LoadBank(new MyMemoryFile(bankData, SoundBank)); midiSynthesizer.ResetSynthControls(); // Need to do this for bank to load properly, don't know why } } // Create sequencer if (midiSequencer == null) { midiSequencer = new MidiFileSequencer(midiSynthesizer); } // Check init if (midiSynthesizer == null || midiSequencer == null) { DaggerfallUnity.LogMessage("DaggerfallSongPlayer: Failed to init synth."); return(false); } return(true); }
private IDictionary <string, GCHandle> m_callbacks = new Dictionary <string, GCHandle>(); //a map of GCHandles pinned callbacks in memory: kept for unpinning during Dispose() /* * constructor sets up the OPCODE6DIR64 directory that holds the Csound plugins. * also creates an instance of Csound and compiles it */ public CsoundUnityBridge(string csoundDir, string csdFile) { Debug.Log($"CsoundUnityBridge constructor from dir: {csoundDir}\ncsdFile: \n{csdFile}"); #if (UNITY_EDITOR_WIN || UNITY_STANDALONE_WIN) Csound6.NativeMethods.csoundSetGlobalEnv("OPCODE6DIR64", csoundDir); //Csound6.NativeMethods.csoundSetGlobalEnv("SFDIR", Application.streamingAssetsPath + "/CsoundFiles"); //Csound6.NativeMethods.csoundSetGlobalEnv("SSDIR", Application.streamingAssetsPath + "/CsoundFiles"); //Csound6.NativeMethods.csoundSetGlobalEnv("SADIR", Application.streamingAssetsPath + "/CsoundFiles"); #elif UNITY_EDITOR_OSX || UNITY_STANDALONE_OSX //if (Directory.Exists(csoundDir+"/CsoundLib64.framework/Resources/Opcodes64")) var opcodePath = Path.GetFullPath(Path.Combine(csoundDir, "CsoundLib64.bundle/Contents/MacOS")); Debug.Log($"opcodePath {opcodePath} exists? " + Directory.Exists(opcodePath)); Csound6.NativeMethods.csoundSetGlobalEnv("OPCODE6DIR64", opcodePath); #elif UNITY_ANDROID Csound6.NativeMethods.csoundSetGlobalEnv("OPCODE6DIR64", csoundDir); //Csound6.NativeMethods.csoundSetGlobalEnv("SFDIR", Application.persistentDataPath); //Csound6.NativeMethods.csoundSetGlobalEnv("SSDIR", Application.persistentDataPath); //Csound6.NativeMethods.csoundSetGlobalEnv("SADIR", Application.persistentDataPath); #endif Csound6.NativeMethods.csoundInitialize(1); csound = Csound6.NativeMethods.csoundCreate(System.IntPtr.Zero); if (csound == null) { Debug.LogError("Couldn't create Csound!"); return; } int systemBufferSize; int systemNumBuffers; AudioSettings.GetDSPBufferSize(out systemBufferSize, out systemNumBuffers); Debug.Log($"System buffer size: {systemBufferSize}, buffer count: {systemNumBuffers}, samplerate: {AudioSettings.outputSampleRate}"); Csound6.NativeMethods.csoundSetHostImplementedAudioIO(csound, 1, 0); Csound6.NativeMethods.csoundCreateMessageBuffer(csound, 0); //string[] runargs = new string[] { "csound", csdFile, "--sample-rate=" + AudioSettings.outputSampleRate, "--ksmps=32" }; //Debug.Log("CsoundUnity is overriding the orchestra sample rate to match that of Unity."); //Debug.Log("CsoundUnity is overriding the orchestra ksmps value to best match Unity's audio settings, i.e, 32 ksmps"); //int ret = Csound6.NativeMethods.csoundCompile(csound, 4, runargs); //Csound6.NativeMethods.csoundSetOption(csound, $"--sample-rate={AudioSettings.outputSampleRate}"); //Csound6.NativeMethods.csoundSetOption(csound, "--ksmps=32"); Csound6.NativeMethods.csoundSetOption(csound, "-n"); Csound6.NativeMethods.csoundSetOption(csound, "-d"); var parms = GetParams(); parms.control_rate_override = AudioSettings.outputSampleRate; parms.sample_rate_override = AudioSettings.outputSampleRate; //parms.e0dbfs_override = 1; SetParams(parms); int ret = Csound6.NativeMethods.csoundCompileCsdText(csound, csdFile); Csound6.NativeMethods.csoundStart(csound); var res = PerformKsmps(); Debug.Log($"PerformKsmps: {res}"); compiledOk = ret == 0 ? true : false; Debug.Log($"CsoundCompile: {compiledOk}"); }
static int GetDSPBufferSize(IntPtr L) { LuaScriptMgr.CheckArgsCount(L, 2); int arg0 = LuaScriptMgr.GetNetObject <int>(L, 1); int arg1 = LuaScriptMgr.GetNetObject <int>(L, 2); AudioSettings.GetDSPBufferSize(out arg0, out arg1); LuaScriptMgr.Push(L, arg0); LuaScriptMgr.Push(L, arg1); return(2); }
void IProcessorConfig.Init() { faustApi = new FaustApi(); int bufferSize; int noOfBuffers; AudioSettings.GetDSPBufferSize(out bufferSize, out noOfBuffers); faustApi.Init(2, bufferSize, AudioSettings.outputSampleRate); faustProcessor = new FaustProcessor(); faustProcessor.SetFaustApi(faustApi); }
void Start() { AudioSettings.GetDSPBufferSize(out bufferSize, out numBuffers); enterName = enterNameObj.GetComponent <InputField>(); if (GetComponent <AudioListener>() == null) { print("put audiolistener on recorder!"); } }
private int OpenPd() { int bufferSize; int noOfBuffers; AudioSettings.GetDSPBufferSize(out bufferSize, out noOfBuffers); numberOfTicks = bufferSize / LibPD.BlockSize; int unitySR = AudioSettings.outputSampleRate; return(LibPD.OpenAudio(2, 2, unitySR)); }
void Start() { SampleRate = AudioSettings.outputSampleRate; Omega = (1.0 / AudioSettings.outputSampleRate) * TWOPI; int buflength, numbufs; AudioSettings.GetDSPBufferSize(out buflength, out numbufs); Channels = AudioUnit.speakerModeToChannels(AudioSettings.speakerMode); frqData = new float[buflength * Channels]; pwData = new float[buflength * Channels]; }