private void Update() { CheckForErrorOnCall(MicStream.MicSetGain(InputGain)); if (Input.GetKeyDown(KeyCode.Q)) { CheckForErrorOnCall(MicStream.MicStartStream(KeepAllData, false)); } else if (Input.GetKeyDown(KeyCode.W)) { CheckForErrorOnCall(MicStream.MicStopStream()); } else if (Input.GetKeyDown(KeyCode.A)) { CheckForErrorOnCall(MicStream.MicStartRecording(SaveFileName, false)); } else if (Input.GetKeyDown(KeyCode.S)) { string outputPath = MicStream.MicStopRecording(); Debug.Log("Saved microphone audio to " + outputPath); CheckForErrorOnCall(MicStream.MicStopStream()); } gameObject.transform.localScale = new Vector3(minObjectScale + averageAmplitude, minObjectScale + averageAmplitude, minObjectScale + averageAmplitude); }
private void Awake() { // Create and initialize the chorus filter. if (UseChorus) { chorusFilter = gameObject.AddComponent <AudioChorusFilter>(); chorusFilter.enabled = true; UpdateChorusFilter(); } // Create and initialize the echo filter. if (UseEcho) { echoFilter = gameObject.AddComponent <AudioEchoFilter>(); echoFilter.enabled = true; UpdateEchoFilter(); } // Confgure the microphone stream to use the high quality voice source // at the application's output sample rate. MicStream.MicInitializeCustomRate( (int)MicStream.StreamCategory.HIGH_QUALITY_VOICE, AudioSettings.outputSampleRate); // Set the initial microphone gain. MicStream.MicSetGain(InputGain); // Start the stream. // Do not keep the data and do not preview. MicStream.MicStartStream(false, false); MicStream.MicPause(); }
private void OnAudioFilterRead(float[] buffer, int numChannels) { try { audioDataMutex.WaitOne(); if (micStarted && hasServerConnection) { if (CheckForErrorOnCall(MicStream.MicGetFrame(buffer, buffer.Length, numChannels))) { int dataSize = buffer.Length * 4; if (micBuffer.Write(buffer, 0, dataSize) != dataSize) { Debug.LogError("Send buffer filled up. Some audio will be lost."); } } } } catch (Exception e) { Debug.LogError(e.Message); } finally { audioDataMutex.ReleaseMutex(); } }
private void EnableMicrophone() { bool enable = false; // Check to see if the user is within MaxDistance. float distance = Mathf.Abs( Vector3.Distance( ParentObject.transform.position, Camera.main.transform.position)); if (distance <= MaxDistance) { RaycastHit hitInfo; // Check to see if the user is facing the object. // We raycast in the direction of the user's gaze and check for collision with the Echo layer. enable = Physics.Raycast(Camera.main.transform.position, Camera.main.transform.forward, out hitInfo, 20f, LayerMask.GetMask("Echoer"), QueryTriggerInteraction.Collide); } if (enable) { // Resume the microphone stream. MicStream.MicResume(); } else { // Pause the microphone stream. MicStream.MicPause(); } }
/// <summary> /// method only used for holotoolkit-recording version. Callback to receive recorded data. /// </summary> /// <param name="buffer"></param> /// <param name="numChannels"></param> private void OnAudioFilterRead(float[] buffer, int numChannels) { if (useUnityMic) { return; } Debug.Log("OAFR." + buffer + "." + numChannels); // this is where we call into the DLL and let it fill our audio buffer for us CheckForErrorOnCall(MicStream.MicGetFrame(buffer, buffer.Length, numChannels)); if (_numChannels != numChannels) { _numChannels = numChannels; } if (audioBuffer == null) { audioBuffer = buffer; } else { audioBuffer = audioBuffer.Concat(buffer).ToArray(); } float sumOfValues = 0; // figure out the average amplitude from this new data for (int i = 0; i < buffer.Length; i++) { sumOfValues += Mathf.Abs(buffer[i]); } averageAmplitude = sumOfValues / buffer.Length; }
// on device, deal with all the ways that we could suspend our program in as few lines as possible private void OnApplicationPause(bool pause) { if (pause) { CheckForErrorOnCall(MicStream.MicPause()); } else { CheckForErrorOnCall(MicStream.MicResume()); } }
private void OnAudioFilterRead(float[] buffer, int numChannels) { if (!_isStart) return; lock (this) { CheckForErrorOnCall(MicStream.MicGetFrame(buffer, buffer.Length, numChannels)); foreach (var f in buffer) { samplingData.Add(FloatToInt16(f)); } } }
private void OnAudioFilterRead(float[] buffer, int numChannels) { // this is where we call into the DLL and let it fill our audio buffer for us CheckForErrorOnCall(MicStream.MicGetFrame(buffer, buffer.Length, numChannels)); float sumOfValues = 0; // figure out the average amplitude from this new data for (int i = 0; i < buffer.Length; i++) { sumOfValues += Mathf.Abs(buffer[i]); } averageAmplitude = sumOfValues / buffer.Length; }
/// <summary> /// starts the recording. /// </summary> void StartRecording() { WEKITSpeechManager.Instance.PauseRecognizer(); Debug.Log("Stopped listening to voice commands."); isRecording = true; if (useUnityMic) { //Check if there is at least one microphone connected if (Microphone.devices.Length <= 0) { //Throw a warning message at the console if there isn't Debug.LogWarning("Microphone not connected!"); } else //At least one microphone is present { //Set 'micConnected' to true micConnected = true; //Get the default microphone recording capabilities Microphone.GetDeviceCaps(null, out minFreq, out maxFreq); //According to the documentation, if minFreq and maxFreq are zero, the microphone supports any frequency... if (minFreq == 0 && maxFreq == 0) { //...meaning 44100 Hz can be used as the recording sampling rate maxFreq = 44100; } //Get the attached AudioSource component goAudioSource = this.GetComponent <AudioSource>(); //If the audio from any microphone isn't being captured if (!Microphone.IsRecording(null)) { //Start recording and store the audio captured from the microphone at the AudioClip in the AudioSource goAudioSource.clip = Microphone.Start(null, true, 20, maxFreq); } } } else { CheckForErrorOnCall(MicStream.MicInitializeCustomRate((int)StreamType, AudioSettings.outputSampleRate)); CheckForErrorOnCall(MicStream.MicSetGain(InputGain)); //CheckForErrorOnCall(MicStream.MicStartStream(KeepAllData, false)); SaveFileName = "WEKIT_Audio_" + DateTime.Now.ToFileTimeUtc() + ".wav"; CheckForErrorOnCall(MicStream.MicStartRecording(SaveFileName, false)); } Debug.Log("Started audio recording"); changeColor(Color.red); }
private void Awake() { audioSource = GetComponent <AudioSource>(); int errorCode = MicStream.MicInitializeCustomRate((int)Streamtype, AudioSettings.outputSampleRate); CheckForErrorOnCall(errorCode); if (errorCode == 0 || errorCode == (int)MicStream.ErrorCodes.ALREADY_RUNNING) { if (CheckForErrorOnCall(MicStream.MicSetGain(InputGain))) { audioSource.volume = HearSelf ? 1.0f : 0.0f; micStarted = CheckForErrorOnCall(MicStream.MicStartStream(false, false)); } } }
private void Update() { CheckForErrorOnCall(MicStream.MicSetGain(InputGain)); audioSource.volume = HearSelf ? 1.0f : 0.0f; try { audioDataMutex.WaitOne(); var connection = GetActiveConnection(); hasServerConnection = (connection != null); if (hasServerConnection) { while (micBuffer.UsedCapacity >= 4 * AudioPacketSize) { TransmitAudio(connection); } } } catch (Exception e) { Debug.LogError(e.Message); } finally { audioDataMutex.ReleaseMutex(); } #region DebugInfo if (SaveTestClip && testCircularBuffer.UsedCapacity == testCircularBuffer.TotalCapacity) { float[] testBuffer = new float[testCircularBuffer.UsedCapacity / 4]; testCircularBuffer.Read(testBuffer, 0, testBuffer.Length * 4); testCircularBuffer.Reset(); TestClip = AudioClip.Create("testclip", testBuffer.Length / 2, 2, 48000, false); TestClip.SetData(testBuffer, 0); if (!testSource) { GameObject testObj = new GameObject("testclip"); testObj.transform.parent = transform; testSource = testObj.AddComponent <AudioSource>(); } testSource.PlayClip(TestClip); SaveTestClip = false; } #endregion }
// Update is called once per frame void Update () { if (Input.GetKeyDown(KeyCode.W)) { samplingData.Clear(); CheckForErrorOnCall(MicStream.MicStartStream(KeepAllData, false)); CheckForErrorOnCall(MicStream.MicSetGain(InputGain)); _isStart = true; } else if (Input.GetKeyDown(KeyCode.S)) { _isStart = false; CheckForErrorOnCall(MicStream.MicStopStream()); WriteAudioData(); } }
/// <summary> /// makes sure, the audio annotation object is displayed in the right color according to audio mode. /// Green: audio playing /// Red: audio recording. /// </summary> void Update() { if (isRecording) { if (useUnityMic) { changeColor(Color.red); } else { CheckForErrorOnCall(MicStream.MicSetGain(InputGain)); float blue = (minSize + averageAmplitude + 1 / minSize + 2) * 255; changeColor(new Color(255, 0, blue)); } } else if (goAudioSource.isPlaying) { changeColor(Color.green); } }
private void Awake() { CheckForErrorOnCall(MicStream.MicInitializeCustomRate((int)StreamType, AudioSettings.outputSampleRate)); CheckForErrorOnCall(MicStream.MicSetGain(InputGain)); if (!ListenToAudioSource) { this.gameObject.GetComponent <AudioSource>().volume = 0; // can set to zero to mute mic monitoring } if (AutomaticallyStartStream) { CheckForErrorOnCall(MicStream.MicStartStream(KeepAllData, false)); } print("MicStream selector demo"); print("press Q to start stream to audio source, W will stop that stream"); print("It will start a recording and save it to a wav file. S will stop that recording."); print("Since this all goes through the AudioSource, you can mute the mic while using it there, or do anything else you would do with an AudioSource"); print("In this demo, we start the stream automatically, and then change the size of the gameobject based on microphone signal amplitude"); }
/// <summary> /// stops the recording. /// </summary> void StopRecording() { isRecording = false; if (useUnityMic) { Microphone.End(null); //Stop the audio recording } else { OutputPath = MicStream.MicStopRecording(); Debug.Log("Saved audio recording to " + OutputPath + ", " + SaveFileName); CheckForErrorOnCall(MicStream.MicStopStream()); //Debug.Log("Recorded " + audioBuffer.Length + " audio data entries."); //goAudioSource.clip = AudioClip.Create("Recording", audioBuffer.Length, _numChannels, AudioSettings.outputSampleRate, false); //goAudioSource.clip.SetData(audioBuffer, 0); CheckForErrorOnCall(MicStream.MicDestroy()); } Debug.Log("Stopped audio recording."); WEKITSpeechManager.Instance.ContinueRecognizer(); Debug.Log("Listening to voice commands again."); changeColor(currentColor); }
private void Update() { DateTime now = DateTime.Now; // Enable / disable the echo as appropriate if ((UpdateInterval * 1000.0f) <= (now - lastUpdate).Milliseconds) { // Update the input gain. MicStream.MicSetGain(InputGain); // Update the filter properties. if (UseChorus) { UpdateChorusFilter(); } if (UseEcho) { UpdateEchoFilter(); } EnableMicrophone(); lastUpdate = now; } }
private bool CheckForErrorOnCall(int returnCode) { return(MicStream.CheckForErrorOnCall(returnCode)); }
private void OnDestroy() { CheckForErrorOnCall(MicStream.MicDestroy()); }
private void OnAudioFilterRead(float[] buffer, int numChannels) { MicStream.MicGetFrame(buffer, buffer.Length, numChannels); }
private static void CheckForErrorOnCall(int returnCode) { MicStream.CheckForErrorOnCall(returnCode); }
private void OnDestroy() { MicStream.MicDestroy(); }
private void Awake() { CheckForErrorOnCall(MicStream.MicInitializeCustomRate((int)StreamType, AudioSettings.outputSampleRate)); }