void Start() { src = GetComponent <AudioSource>(); if (Microphone.devices.Length > 0) { string device = Microphone.devices[0].ToString(); Microphone.GetDeviceCaps(null, out minFreq, out maxFreq); //According to the documentation, if minFreq and maxFreq are zero, the microphone supports any frequency... if (minFreq == 0 && maxFreq == 0) { //...meaning 44100 Hz can be used as the recording sampling rate maxFreq = 44100; } src.clip = Microphone.Start(device, true, 5, maxFreq); src.outputAudioMixerGroup = microphoneMixer; src.Play(); } else { print("NO MICROPHONES FOUND"); } }
void Awake() { dictationRecognizer = new DictationRecognizer(); dictationRecognizer.DictationHypothesis += DictationRecognizer_DictationHypothesis; dictationRecognizer.DictationResult += DictationRecognizer_DictationResult; dictationRecognizer.DictationComplete += DictationRecognizer_DictationComplete; dictationRecognizer.DictationError += DictationRecognizer_DictationError; // Query the maximum frequency of the default microphone. Use 'unused' to ignore the minimum frequency. int unused; Microphone.GetDeviceCaps(deviceName, out unused, out samplingRate); // Use this string to cache the text currently displayed in the text box. textSoFar = new StringBuilder(); // Use this to reset the UI once the Microphone is done recording after it was started. hasRecordingStarted = false; }
void Start() { GetComponent <AudioSource>().Stop(); GetComponent <AudioSource>().loop = true; int minFreq, maxFreq; Microphone.GetDeviceCaps(null, out minFreq, out maxFreq); GetComponent <AudioSource>().clip = Microphone.Start(null, true, 1, maxFreq > 0 ? maxFreq : 44100); while (GetComponent <AudioSource>().clip != null) { int delay = Microphone.GetPosition(null); if (delay > 0) { GetComponent <AudioSource>().Play(); Debug.Log("Latency = " + (1000.0f / GetComponent <AudioSource>().clip.frequency *delay) + " msec"); break; } } }
// based on VJkit's VJMicrohpone.cs public void HandleLiveInputSwitch(string newDeviceName) { // Clean up the old one liveAudioSource.Stop(); liveAudioSource.clip = null; Microphone.End(currentLiveDeviceName); StopCoroutine("LaunchLiveAudioSource"); // Start the new one // Update the sample rate for the new device int newDeviceMinFreq = 0; int newDeviceMaxFreq = 0; Microphone.GetDeviceCaps(newDeviceName, out newDeviceMinFreq, out newDeviceMaxFreq); if (newDeviceMinFreq > 0 && newDeviceMaxFreq > 0) { liveAudioSampleRate = Mathf.Clamp(liveAudioSampleRate, newDeviceMinFreq, newDeviceMaxFreq); } currentLiveDeviceName = newDeviceName; StartCoroutine("LaunchLiveAudioSource"); }
public static int GetFreqForMic(string deviceName = null) { int minFreq; int maxFreq; Microphone.GetDeviceCaps(deviceName, out minFreq, out maxFreq); if (minFreq >= 16000) { if (FindClosestFreq(minFreq, maxFreq) != 0) { return(FindClosestFreq(minFreq, maxFreq)); } else { return(minFreq); } } else { return(maxFreq); } }
// Inicia el micrófono... private void ActiveMicrophone() { string nameDevice = null; int minFreq = 0; int maxFreq = 0; foreach (string s in Microphone.devices) { Microphone.GetDeviceCaps(nameDevice, out minFreq, out maxFreq); Debug.Log("Device Name: " + s + " [" + minFreq.ToString() + "-" + maxFreq.ToString() + "]"); nameDevice = s; } if (Microphone.devices.Length != 0) { microphone = Microphone.Start(nameDevice, true, 10, FREQUENCY_RATE); isSpeaking = true; } else { Debug.Log("Microfono no encotrado"); } }
/// <summary> /// Constructor. /// </summary> public DictationInputSource() : base("Dictation") { if (!Application.isPlaying) { return; } source = this; dictationResult = string.Empty; dictationRecognizer = new DictationRecognizer(); dictationRecognizer.DictationHypothesis += DictationRecognizer_DictationHypothesis; dictationRecognizer.DictationResult += DictationRecognizer_DictationResult; dictationRecognizer.DictationComplete += DictationRecognizer_DictationComplete; dictationRecognizer.DictationError += DictationRecognizer_DictationError; // Query the maximum frequency of the default microphone. int minSamplingRate; // Not used. Microphone.GetDeviceCaps(DeviceName, out minSamplingRate, out samplingRate); Run(); }
public void InitMic() { _audio.Stop(); _audio.loop = true; _audio.mute = false; int minFreq, maxFreq; Microphone.GetDeviceCaps(null, out minFreq, out maxFreq); _audio.clip = Microphone.Start(Microphone.devices[GetDevicesIndex], true, 10, 44100); while (_audio.clip != null) { int delay = Microphone.GetPosition(null); if (delay > 0) { _audio.Play(); Debug.Log("Latency = " + (1000.0f / GetComponent <AudioSource>().clip.frequency *delay) + " msec"); break; } } }
public bool StartRecording() { /*if (NetworkId == 0 && !VoiceChatSettings.Instance.LocalDebug) * { * Debug.LogError("NetworkId is not set"); * return false; * }*/ if (recording) { Debug.LogError("Already recording"); return(false); } targetFrequency = VoiceChatSettings.Instance.Frequency; targetSampleSize = VoiceChatSettings.Instance.SampleSize; int minFreq; int maxFreq; Microphone.GetDeviceCaps(Device, out minFreq, out maxFreq); recordFrequency = minFreq == 0 && maxFreq == 0 ? 44100 : maxFreq; recordSampleSize = recordFrequency / (targetFrequency / targetSampleSize); clip = Microphone.Start(Device, true, 1, recordFrequency); sampleBuffer = new float[recordSampleSize]; fftBuffer = new float[VoiceChatUtils.ClosestPowerOfTwo(targetSampleSize)]; recording = true; if (StartedRecording != null) { StartedRecording(); } return(recording); }
private IEnumerator Start() { recorder = null; cameraInput = null; audioInput = null; microphoneSource = null; timerGoing = false; foreach (var device in Microphone.devices) { Debug.Log("Name: " + device); } Microphone.GetDeviceCaps(null, out minFreq, out maxFreq); Debug.Log("minFreq:" + minFreq); Debug.Log("maxFreq:" + maxFreq); if (minFreq == 0 && maxFreq == 0) { //...meaning 44100 Hz can be used as the recording sampling rate maxFreq = 44100; } startRecordBtn.SetActive(true); stoprecordBtn.SetActive(false); // Start microphone microphoneSource = this.GetComponent <AudioSource>(); microphoneSource.mute = microphoneSource.loop = true; microphoneSource.bypassEffects = microphoneSource.bypassListenerEffects = false; microphoneSource.clip = Microphone.Start(null, true, 1, maxFreq); yield return(new WaitUntil(() => Microphone.GetPosition(null) > 0)); microphoneSource.Play(); }
//Use this for initialization void Start() { //An integer that stores the number of connected microphones numMics = Microphone.devices.Length; //Check if there is at least one microphone connected if (numMics <= 0) { //Throw a warning message at the console if there isn't Debug.LogWarning("No microphone connected!"); } else //At least one microphone is present { //Set 'micConnected' to true micConnected = true; //Initialize the minFreqs and maxFreqs array to hold the same number of integers as there are microphones minFreqs = new int[numMics]; maxFreqs = new int[numMics]; //Get the recording capabilities of each microphone for (int i = 0; i < numMics; i++) { Microphone.GetDeviceCaps(Microphone.devices[i], out minFreqs[i], out maxFreqs[i]); //According to the documentation, if both minimum and maximum frequencies are zero, the microphone supports any recording frequency... if (minFreqs[i] == 0 && maxFreqs[i] == 0) { //...meaning 44100 Hz can be used as the recording sampling rate for the current microphone maxFreqs[i] = 44100; } } //Get the attached AudioSource component goAudioSource = this.GetComponent <AudioSource>(); } }
/// <summary> /// Find the microphone to use and return it's sample rate /// </summary> /// <returns>New Mic's sample rate</returns> internal int InitializeMic() { //Make sure the requested mic index exists if (Microphone.devices.Length <= MicNumberToUse) { Debug.LogWarning("No microphone connected!"); return(-1); } _currentMic = Microphone.devices[MicNumberToUse]; Debug.LogError(_currentMic); int minFreq; int maxFreq; Microphone.GetDeviceCaps(_currentMic, out minFreq, out maxFreq); int micSampleRate = MumbleClient.GetNearestSupportedSampleRate(maxFreq); NumSamplesPerOutgoingPacket = MumbleConstants.NUM_FRAMES_PER_OUTGOING_PACKET * micSampleRate / 100; if (micSampleRate != 48000) { Debug.LogWarning("Using a possibly unsupported sample rate of " + micSampleRate + " things might get weird"); } Debug.Log("Device: " + _currentMic + " has freq: " + minFreq + " to " + maxFreq + " setting to: " + micSampleRate); _voiceHoldSamples = Mathf.RoundToInt(micSampleRate * VoiceHoldSeconds); if (SendAudioOnStart && (VoiceSendingType == MicType.AlwaysSend || VoiceSendingType == MicType.Amplitude)) { StartSendingAudio(micSampleRate); } return(micSampleRate); }
//Use this for initialization void Start() { //Check if there is at least one microphone connected if (Microphone.devices.Length <= 0) { //Throw a warning message at the console if there isn't Debug.LogWarning("Microphone not connected!"); } else //At least one microphone is present { //Set 'micConnected' to true micConnected = true; //Get the default microphone recording capabilities Microphone.GetDeviceCaps(null, out minFreq, out maxFreq); //According to the documentation, if minFreq and maxFreq are zero, the microphone supports any frequency... if (minFreq == 0 && maxFreq == 0) { //...meaning 44100 Hz can be used as the recording sampling rate maxFreq = 44100; } //Get the attached AudioSource component goAudioSource = this.GetComponent <AudioSource>(); } }
private void Awake() { source = GetComponent <AudioSource>(); if (source == null) { Debug.LogWarning("No AudioSource Component Provided!"); } if (Microphone.devices.Length <= 0) { Debug.LogWarning("No Microphone Connected!"); return; } Microphone.GetDeviceCaps(null, out minimumFrequency, out maximumFrequency); if (minimumFrequency == 0 && maximumFrequency == 0) { maximumFrequency = 44100; } CoreServices.InputSystem?.RegisterHandler <IMixedRealityInputHandler>(this); indicator.gameObject.SetActive(false); service = new DialogFlowService(); isMicrophoneConnected = true; }
public void Initialize(int sampleCount = 1024, int micIndex = 0) { sampleCount_ = sampleCount; data_ = new float[sampleCount]; // Check if microphone exists if (Microphone.devices.Length <= 0) { Debug.LogWarning("Microphone not connected!"); return; } else { int maxIndex = Microphone.devices.Length - 1; if (micIndex > maxIndex) { Debug.LogWarning("MIC_INDEX:" + micIndex + " are changed to " + maxIndex + "."); micIndex = maxIndex; } Debug.Log("Use:" + Microphone.devices[micIndex]); micName_ = Microphone.devices[micIndex]; } // Get default microphone min/max frequencies Microphone.GetDeviceCaps(micName_, out minFreq_, out maxFreq_); Debug.Log("MIC_FREQ:" + minFreq_.ToString() + ", " + maxFreq_.ToString()); if (minFreq_ == 0 && maxFreq_ == 0) { maxFreq_ = 44100; } else if (maxFreq_ > 44100) { maxFreq_ = 44100; } initialized_ = true; }
protected override void OnEnable() { base.OnEnable(); if (Microphone.devices.Length == 0) { Debug.LogFormat("Microphone device not found"); return; } var deviceName = Microphone.devices[deviceIndex]; Microphone.GetDeviceCaps(deviceName, out int minFreq, out int maxFreq); var micClip = Microphone.Start(deviceName, true, 1, 48000); // set the latency to “0” samples before the audio starts to play. while (!(Microphone.GetPosition(deviceName) > 0)) { } audioSource.clip = micClip; audioSource.loop = true; audioSource.Play(); }
/// <summary> /// This is an internal method, please use the class eSenseFramework. /// </summary> /// <param name="targetMicrophone"></param> /// <param name="filterSpikes"></param> /// <returns></returns> public static bool StartMeasurement(string targetMicrophone, bool filterSpikes = true) { filterSpikesFromOutput = filterSpikes; //find device and connect/read it to an audio clip if (Microphone.devices.Length > 0) { if (!string.IsNullOrEmpty(targetMicrophone)) { deviceName = targetMicrophone; } else { deviceName = Microphone.devices[0]; } Debug.Log("Device picked: " + deviceName); int minimal; int maximal; Microphone.GetDeviceCaps(deviceName, out minimal, out maximal); //Debug.Log("Capmin: " + minimal + " Capmax: " + maximal); if (minimal > 44100 || maximal < 44100) { Debug.LogError("Can't get 44100 frequency! Started at frequency " + maximal); micClip = Microphone.Start(deviceName, true, bufferLengthSeconds, maximal);//get best possible frequency } else { micClip = Microphone.Start(deviceName, true, bufferLengthSeconds, 44100);//get it at 44100 } } else { Debug.LogError("No microphone found."); return(false); } return(true); }
void Awake() { if (Microphone.devices.Length >= 0) { micConnected = true; //Get the default microphone recording capabilities Microphone.GetDeviceCaps(null, out minFreq, out maxFreq); //According to the documentation, if minFreq and maxFreq are zero, the microphone supports any frequency... if (minFreq == 0 && maxFreq == 0) { maxFreq = 44100; } minFreq = 2000; audioSource = null; audioSource = GetComponent <AudioSource>(); //audioSource.clip = Microphone.Start(null, true, (int)1000, maxFreq); } else { Debug.LogWarning("Microphone not connected!"); } }
/// <summary> /// Coroutine that tracks the microphone values /// </summary> /// <returns></returns> private IEnumerator StartRecording() { string device = Microphone.devices[0]; State state = State.None; int min, max; Microphone.GetDeviceCaps(device, out min, out max); _source.clip = Microphone.Start(device, true, 1200, max); while (Microphone.GetPosition(device) < 1) { yield return(null); } _source.Play(); float[] clipSampleData = new float[256]; int count = 0; while (Microphone.IsRecording(null)) { float clipLoudness = 0f; _source.clip.GetData(clipSampleData, _source.timeSamples); foreach (float sample in clipSampleData) { clipLoudness += Math.Abs(sample); } clipLoudness /= 256; Debug.Log("Loudness: " + clipLoudness); if (clipLoudness >= 0.01f && !state.Equals(State.Decreasing)) { count++; state = State.Increasing; } else if (clipLoudness < 0.01f && !state.Equals(State.Increasing)) { count--; state = State.Decreasing; } else { state = State.None; } Debug.Log("Mic counter " + count); if (count > 8) { TheaterManager.Instance.PickAudienceAnimate(); count = 0; } else if (count < -6) { TheaterManager.Instance.PickIndifferentAnimate(); count = 0; } yield return(new WaitForSeconds(1)); } }
// change the content of the question canvas according to the question of the given gameobject private IEnumerator UpdateQuestion(GameObject target) { foreach (Question question in target.GetComponent <ImageTarget>().questions) { if (target) { Destroy(target); } timer.gameObject.SetActive(true); if (question.answer_type == "stt") { if (Microphone.devices.Length <= 0) { /*SetVisibile(roomTimerText, false); * Debug.Log("Microphone not connected!"); // Throw a warning message at the console if there isn't * SetVisibile(micWarningText.gameObject, true); * yield return new WaitForSeconds(3.0f); * SetVisibile(micWarningText.gameObject, false);*/ } else // At least one microphone is present { speech_to_text_canvas.SetActive(true); question_text.gameObject.SetActive(true); question_text.GetComponent <Text>().text = question.description; //Set 'micConnected' to true mic_connected = true; //Get the default microphone recording capabilities Microphone.GetDeviceCaps(null, out min_freq, out max_freq); // According to the documentation, if min_freq and max_freq are zero, the microphone supports any frequency... if (min_freq == 0 && max_freq == 0) { //...Meaning 44100 Hz can be used as the recording sampling rate max_freq = 44100; } //Get the attached AudioSource component go_audio_source = this.GetComponent <AudioSource>(); // Display mic image & recording text stt_recording.SetActive(true); // Ready to record Debug.Log("start record"); yield return(StartCoroutine(StartRecording(5))); this.mutex = false; SpeechToText(); stt_recording.SetActive(false); stt_processing.SetActive(true); while (!this.mutex) { yield return(new WaitForSeconds(0.5f)); } stt_processing.SetActive(false); question_text.gameObject.SetActive(false); // Display the user's answer answer_text.text = result; answer_text.gameObject.SetActive(true); // Check if the answer is correct or not // ...Change the color of the text accordingly Debug.Log(result.ToLower()); if (result.ToLower() == question.correct_answer.ToLower()) { correct_answers += 1; answer_text.color = Color.green; } else { false_answers += 1; answer_text.color = Color.red; // Display the correct answer correct_answer_text.gameObject.SetActive(true); correct_answer_text.text = "Doğru cevap: " + question.correct_answer.ToLower(); } yield return(new WaitForSeconds(5.0f)); correct_answer_text.gameObject.SetActive(false); answer_text.gameObject.SetActive(false); speech_to_text_canvas.SetActive(false); // Delete the .wav file used File.Delete(file_path); } } timer.gameObject.SetActive(false); } yield return(StartCoroutine(DisplayResults())); }
public void InitializeLipSync() { active = false; if (catsData == null) { InitCatsData(); } if (!isCanned && clip != null) { Microphone.End(lastMic); } partialAudio = null; partialPos = 0; audioSource = GetComponent <AudioSource>(); if (useCanned && audioSource != null && audioSource.clip != null) { isCanned = true; clip = audioSource.clip; channels = clip.channels; partialAudio = new float[1024 * channels]; freq = audioSource.clip.frequency; if (!inited) { if (OVRLipSync.IsInitialized() == OVRLipSync.Result.Success) { DestroyContext(); OVRLipSync.Shutdown(); } OVRLipSync.Initialize(freq, 1024); CreateContext(); OVRLipSync.SendSignal(context, OVRLipSync.Signals.VisemeSmoothing, smoothAmount, 0); inited = true; } active = true; return; } isCanned = false; int minFreq; int maxFreq = AudioSettings.outputSampleRate; freq = maxFreq; lastMic = mic; if (mic != null) { Microphone.GetDeviceCaps(lastMic, out minFreq, out maxFreq); } if (maxFreq > 0) { freq = maxFreq; } if (!inited) { if (OVRLipSync.IsInitialized() == OVRLipSync.Result.Success) { DestroyContext(); OVRLipSync.Shutdown(); } OVRLipSync.Initialize(freq, 1024); CreateContext(); OVRLipSync.SendSignal(context, OVRLipSync.Signals.VisemeSmoothing, smoothAmount, 0); inited = true; } clip = Microphone.Start(lastMic, true, 1, freq); channels = clip.channels; partialAudio = new float[1024 * channels]; lastPos = 0; active = true; }
/// <inheritdoc /> public async Task StartRecordingAsync(GameObject listener = null, float initialSilenceTimeout = 5f, float autoSilenceTimeout = 20f, int recordingTime = 10, string micDeviceName = "") { using (StartRecordingAsyncPerfMarker.Auto()) { #if UNITY_STANDALONE_WIN || UNITY_WSA || UNITY_EDITOR_WIN if (IsListening || isTransitioning || Service == null || !Application.isPlaying) { Debug.LogWarning("Unable to start recording"); return; } if (dictationRecognizer == null && InputSystemProfile.SpeechCommandsProfile.SpeechRecognizerStartBehavior == AutoStartBehavior.ManualStart) { InitializeDictationRecognizer(); } hasFailed = false; IsListening = true; isTransitioning = true; if (listener != null) { hasListener = true; Service.PushModalInputHandler(listener); } if (PhraseRecognitionSystem.Status == SpeechSystemStatus.Running) { PhraseRecognitionSystem.Shutdown(); } await waitUntilPhraseRecognitionSystemHasStopped; Debug.Assert(PhraseRecognitionSystem.Status == SpeechSystemStatus.Stopped); // Query the maximum frequency of the default microphone. int minSamplingRate; // Not used. deviceName = micDeviceName; Microphone.GetDeviceCaps(deviceName, out minSamplingRate, out samplingRate); dictationRecognizer.InitialSilenceTimeoutSeconds = initialSilenceTimeout; dictationRecognizer.AutoSilenceTimeoutSeconds = autoSilenceTimeout; dictationRecognizer.Start(); await waitUntilDictationRecognizerHasStarted; Debug.Assert(dictationRecognizer.Status == SpeechSystemStatus.Running); if (dictationRecognizer.Status == SpeechSystemStatus.Failed) { Service.RaiseDictationError(inputSource, "Dictation recognizer failed to start!"); return; } // Start recording from the microphone. dictationAudioClip = Microphone.Start(deviceName, false, recordingTime, samplingRate); textSoFar = new StringBuilder(); isTransitioning = false; #else await Task.CompletedTask; #endif } }
public virtual WaveFormat StartCapture(string inputMicName) { //Sanity checks Log.AssertAndThrowPossibleBug(_clip == null, "1BAD3E74-B451-4B7D-A9B9-35225BE55364", "Attempted to Start microphone capture, but capture is already running"); //Early exit if there are no microphones connected if (Log.AssertAndLogWarn(Microphone.devices.Length > 0, "No microphone detected; disabling voice capture")) { return(null); } //Check the micName and default to null if it's invalid (all whitespace or not a known device) _micName = ChooseMicName(inputMicName); //Get device capabilities and choose a sample rate as close to 48000 as possible. //If min and max are both zero that indicates we can use any sample rate int minFreq; int maxFreq; Microphone.GetDeviceCaps(_micName, out minFreq, out maxFreq); var sampleRate = minFreq == 0 && maxFreq == 0 ? 48000 : Mathf.Clamp(48000, minFreq, maxFreq); Log.Debug("GetDeviceCaps name=`{0}` min=`{1}` max=`{2}`", _micName, minFreq, maxFreq); //Get the audioclip from Unity for this microphone (with a fairly large internal buffer) _clip = Microphone.Start(_micName, true, 10, sampleRate); if (_clip == null) { Log.Error("Failed to start microphone capture"); return(null); } //Setup buffers for capture _format = new WaveFormat(_clip.frequency, 1); _maxReadBufferPower = (byte)Math.Ceiling(Math.Log(0.1f * _clip.frequency, 2)); // Create/resize the audio buffers to contain 20ms frames of data. Any frame size will work (the pipeline will buffer/split them as necessary) but 20ms is // optimal because that's native frame size the preprocessor works at so it has to do no extra work to assemble the frames at it's desired size. var frameSize = (int)(0.02 * _clip.frequency); if (_rawMicSamples == null || _rawMicSamples.WaveFormat != _format || _rawMicSamples.Capacity != frameSize || _rawMicFrames.FrameSize != frameSize) { _rawMicSamples = new BufferedSampleProvider(_format, frameSize * 4); _rawMicFrames = new SampleToFrameProvider(_rawMicSamples, (uint)frameSize); } if (_frame == null || _frame.Length != frameSize) { _frame = new float[frameSize]; } //watch for device changes - we need to reset if the audio device changes AudioSettings.OnAudioConfigurationChanged += OnAudioDeviceChanged; _audioDeviceChanged = false; //Reset subscribers to prepare them for another stream of data for (var i = 0; i < _subscribers.Count; i++) { _subscribers[i].Reset(); } Latency = TimeSpan.FromSeconds(frameSize / (float)_format.SampleRate); Log.Info("Began mic capture (SampleRate:{0}Hz, FrameSize:{1}, Buffer Limit:2^{2}, Latency:{3}ms, Device:'{4}')", _clip.frequency, frameSize, _maxReadBufferPower, Latency.TotalMilliseconds, _micName); return(_format); }
public static void Resume() { Microphone.GetDeviceCaps(string.Empty, out _, out var maxFreq); Microphone.Start(string.Empty, true, 5, maxFreq); }
/// <inheritdoc /> public async Task StartRecordingAsync(GameObject listener = null, float initialSilenceTimeout = 5f, float autoSilenceTimeout = 20f, int recordingTime = 10, string micDeviceName = "") { #if UNITY_STANDALONE_WIN || UNITY_WSA || UNITY_EDITOR_WIN IMixedRealityInputSystem inputSystem = Service as IMixedRealityInputSystem; if (IsListening || isTransitioning || inputSystem == null || !Application.isPlaying) { Debug.LogWarning(string.Format("Unable to start recording. (IsListening={0};isTransitioning={1};isPlaying={2};null={3}", IsListening, isTransitioning, Application.isPlaying, inputSystem == null)); //if (isTransitioning && Application.isPlaying) //{ // Debug.Log("Waiting for isTransitiong to go to false"); // await (new WaitUntil(() => (isTransitioning == false))); // this is going to freeze //} else //{ return; //} } hasFailed = false; IsListening = true; isTransitioning = true; if (listener != null) { hasListener = true; inputSystem.PushModalInputHandler(listener); } if (PhraseRecognitionSystem.Status == SpeechSystemStatus.Running) { PhraseRecognitionSystem.Shutdown(); } await waitUntilPhraseRecognitionSystemHasStopped; Debug.Assert(PhraseRecognitionSystem.Status == SpeechSystemStatus.Stopped); // Query the maximum frequency of the default microphone. int minSamplingRate; // Not used. deviceName = micDeviceName; Microphone.GetDeviceCaps(deviceName, out minSamplingRate, out samplingRate); dictationRecognizer.InitialSilenceTimeoutSeconds = initialSilenceTimeout; dictationRecognizer.AutoSilenceTimeoutSeconds = autoSilenceTimeout; dictationRecognizer.Start(); await waitUntilDictationRecognizerHasStarted; Debug.Assert(dictationRecognizer.Status == SpeechSystemStatus.Running); if (dictationRecognizer.Status == SpeechSystemStatus.Failed) { inputSystem.RaiseDictationError(inputSource, "Dictation recognizer failed to start!"); return; } // Start recording from the microphone. dictationAudioClip = Microphone.Start(deviceName, false, recordingTime, samplingRate); textSoFar = new StringBuilder(); isTransitioning = false; #else await Task.CompletedTask; #endif }
protected void Start() { micPermissionGranted = true; micClip = Microphone.Start("", true, 10, RATE); pos = 0; lastPos = 0; sample = new float[CHUNK]; sampleClassification = new float[RATE]; input.AddGlobalListener(gameObject); classString = "started"; captions = new List <GameObject>(); captions.Add(transform.Find("CaptionsDisplay").gameObject); StartContinuous(); CursorObject = GameObject.Find("DefaultCursor"); if (CursorObject == null) { throw new Exception("Can't find DefaultCursor"); } CursorVisual = GameObject.Find("CursorVisual"); if (CursorVisual == null) { throw new Exception("Can't find CursorVisual"); } GameObject menu = GameObject.Find("Menu"); if (menu == null) { throw new Exception("Can't find Menu"); } GameObject menubutton = GameObject.Find("MenuButton"); if (menubutton == null) { throw new Exception("Can't find MenuButton"); } float yp = 0; buttons = new Dictionary <string, GameObject>(); foreach (string name in button_names) { GameObject nextbutton = Instantiate(menubutton); nextbutton.transform.parent = menu.transform; nextbutton.transform.localPosition = new Vector3(0, yp, 0); nextbutton.transform.localRotation = Quaternion.identity; nextbutton.transform.localScale = Vector3.one; yp += 0.09f; buttons.Add(name, nextbutton); } Destroy(menubutton); DepthObject = GameObject.Find("WorldMesh(CRASHES EDITOR)"); if (DepthObject == null) { throw new Exception("Can't find WorldMesh(CRASHES EDITOR)"); } // Sound recognition connection tcpclient = new System.Net.Sockets.TcpClient(); tcpclient.Connect(SERVER_URL, SERVER_PORT); outstream = tcpclient.GetStream(); int unused; int samplingRate; Microphone.GetDeviceCaps("", out unused, out samplingRate); }
public override void OnInspectorGUI() { serializedObject.UpdateIfRequiredOrScript(); //serializedObject.Update(); if (PhotonVoiceEditorUtils.IsInTheSceneInPlayMode(recorder.gameObject)) { if (recorder.RequiresRestart) { EditorGUILayout.HelpBox("Recorder requires restart. Call Recorder.RestartRecording().", MessageType.Warning); if (GUILayout.Button("RestartRecording")) { recorder.RestartRecording(); } } else if (!recorder.IsInitialized) { EditorGUILayout.HelpBox("Recorder requires initialization. Call Recorder.Init or VoiceConnection.InitRecorder.", MessageType.Warning); } } VoiceLogger.ExposeLogLevel(serializedObject, recorder); EditorGUI.BeginChangeCheck(); if (Application.isPlaying) { recorder.ReactOnSystemChanges = EditorGUILayout.Toggle(new GUIContent("React On System Changes", "If true, recording is restarted when Unity detects Audio Config. changes."), recorder.ReactOnSystemChanges); recorder.TransmitEnabled = EditorGUILayout.Toggle(new GUIContent("Transmit Enabled", "If true, audio transmission is enabled."), recorder.TransmitEnabled); if (recorder.IsInitialized) { recorder.IsRecording = EditorGUILayout.Toggle(new GUIContent("IsRecording", "If true, audio recording is on."), recorder.IsRecording); } else { EditorGUILayout.PropertyField(this.autoStartSp, new GUIContent("Auto Start", "If true, recording is started when Recorder is initialized.")); } if (recorder.IsRecording && recorder.TransmitEnabled) { float amplitude = 0f; if (recorder.IsCurrentlyTransmitting) { amplitude = recorder.LevelMeter.CurrentPeakAmp; } EditorGUILayout.Slider("Level", amplitude, 0, 1); } recorder.Encrypt = EditorGUILayout.Toggle(new GUIContent("Encrypt", "If true, voice stream is sent encrypted."), recorder.Encrypt); recorder.InterestGroup = (byte)EditorGUILayout.IntField(new GUIContent("Interest Group", "Target interest group that will receive transmitted audio."), recorder.InterestGroup); if (recorder.InterestGroup == 0) { recorder.DebugEchoMode = EditorGUILayout.Toggle(new GUIContent("Debug Echo", "If true, outgoing stream routed back to client via server same way as for remote client's streams."), recorder.DebugEchoMode); } recorder.ReliableMode = EditorGUILayout.Toggle(new GUIContent("Reliable Mode", "If true, stream data sent in reliable mode."), recorder.ReliableMode); EditorGUILayout.LabelField("Codec Parameters", EditorStyles.boldLabel); recorder.FrameDuration = (OpusCodec.FrameDuration)EditorGUILayout.EnumPopup(new GUIContent("Frame Duration", "Outgoing audio stream encoder delay."), recorder.FrameDuration); recorder.SamplingRate = (POpusCodec.Enums.SamplingRate)EditorGUILayout.EnumPopup( new GUIContent("Sampling Rate", "Outgoing audio stream sampling rate."), recorder.SamplingRate); recorder.Bitrate = EditorGUILayout.IntField(new GUIContent("Bitrate", "Outgoing audio stream bitrate."), recorder.Bitrate); EditorGUILayout.LabelField("Audio Source Settings", EditorStyles.boldLabel); recorder.SourceType = (Recorder.InputSourceType)EditorGUILayout.EnumPopup(new GUIContent("Input Source Type", "Input audio data source type"), recorder.SourceType); switch (recorder.SourceType) { case Recorder.InputSourceType.Microphone: recorder.MicrophoneType = (Recorder.MicType)EditorGUILayout.EnumPopup( new GUIContent("Microphone Type", "Which microphone API to use when the Source is set to Microphone."), recorder.MicrophoneType); EditorGUILayout.HelpBox("Devices list and current selection is valid in Unity Editor only. In build, you need to set it via code preferably at runtime.", MessageType.Info); switch (recorder.MicrophoneType) { case Recorder.MicType.Unity: if (Microphone.devices.Length == 0) { EditorGUILayout.HelpBox("No microphone device found", MessageType.Error); } else { unityMicrophoneDeviceIndex = EditorGUILayout.Popup("Microphone Device", unityMicrophoneDeviceIndex, Microphone.devices); recorder.UnityMicrophoneDevice = Microphone.devices[unityMicrophoneDeviceIndex]; int minFreq, maxFreq; Microphone.GetDeviceCaps(Microphone.devices[unityMicrophoneDeviceIndex], out minFreq, out maxFreq); EditorGUILayout.LabelField("Microphone Device Caps", string.Format("{0}..{1} Hz", minFreq, maxFreq)); } break; case Recorder.MicType.Photon: #if PHOTON_MICROPHONE_ENUMERATOR if (Recorder.PhotonMicrophoneEnumerator.IsSupported) { if (Recorder.PhotonMicrophoneEnumerator.Count == 0) { EditorGUILayout.HelpBox("No microphone device found", MessageType.Error); } else { EditorGUILayout.BeginHorizontal(); photonDeviceIndex = EditorGUILayout.Popup("Microphone Device", photonDeviceIndex, photonDeviceNames); recorder.PhotonMicrophoneDeviceId = photonDeviceIDs[photonDeviceIndex]; if (GUILayout.Button("Refresh", EditorStyles.miniButton, GUILayout.Width(70))) { this.RefreshPhotonMicrophoneDevices(); } EditorGUILayout.EndHorizontal(); } } else { recorder.PhotonMicrophoneDeviceId = -1; EditorGUILayout.HelpBox("PhotonMicrophoneEnumerator Not Supported", MessageType.Error); } #endif #if UNITY_IOS EditorGUILayout.LabelField("iOS Audio Session Parameters", EditorStyles.boldLabel); EditorGUI.indentLevel++; EditorGUILayout.PropertyField(useCustomAudioSessionParametersSp, new GUIContent("Use Custom")); if (useCustomAudioSessionParametersSp.boolValue) { EditorGUILayout.PropertyField(audioSessionParametersCategorySp); EditorGUILayout.PropertyField(audioSessionParametersModeSp); EditorGUILayout.PropertyField(audioSessionParametersCategoryOptionsSp, true); } else { int index = EditorGUILayout.Popup("Preset", audioSessionPresetIndexSp.intValue, iOSAudioSessionPresetsNames); if (index != audioSessionPresetIndexSp.intValue) { audioSessionPresetIndexSp.intValue = index; AudioSessionParameters parameters = iOSAudioSessionPresetsValues[index]; this.SetEnumIndex(audioSessionParametersCategorySp, typeof(AudioSessionCategory), parameters.Category); this.SetEnumIndex(audioSessionParametersModeSp, typeof(AudioSessionMode), parameters.Mode); if (parameters.CategoryOptions != null) { audioSessionParametersCategoryOptionsSp.ClearArray(); audioSessionParametersCategoryOptionsSp.arraySize = parameters.CategoryOptions.Length; if (index == 0) { this.SetEnumIndex(audioSessionParametersCategoryOptionsSp .GetArrayElementAtIndex(0), typeof(AudioSessionCategoryOption), AudioSessionCategoryOption.DefaultToSpeaker); this.SetEnumIndex(audioSessionParametersCategoryOptionsSp .GetArrayElementAtIndex(1), typeof(AudioSessionCategoryOption), AudioSessionCategoryOption.AllowBluetooth); } else if (index == 1) { this.SetEnumIndex(audioSessionParametersCategoryOptionsSp .GetArrayElementAtIndex(0), typeof(AudioSessionCategoryOption), AudioSessionCategoryOption.AllowBluetooth); } } } } EditorGUI.indentLevel--; #endif break; default: throw new ArgumentOutOfRangeException(); } break; case Recorder.InputSourceType.AudioClip: recorder.AudioClip = EditorGUILayout.ObjectField(new GUIContent("Audio Clip", "Source audio clip."), recorder.AudioClip, typeof(AudioClip), false) as AudioClip; recorder.LoopAudioClip = EditorGUILayout.Toggle(new GUIContent("Loop", "Loop playback for audio clip sources."), recorder.LoopAudioClip); break; case Recorder.InputSourceType.Factory: EditorGUILayout.HelpBox("Add a custom InputFactory method in code.", MessageType.Info); break; default: throw new ArgumentOutOfRangeException(); } EditorGUILayout.LabelField("Voice Activity Detection (VAD)", EditorStyles.boldLabel); recorder.VoiceDetection = EditorGUILayout.Toggle(new GUIContent("Detect", "If true, voice detection enabled."), recorder.VoiceDetection); if (recorder.VoiceDetection) { recorder.VoiceDetectionThreshold = EditorGUILayout.Slider( new GUIContent("Threshold", "Voice detection threshold (0..1, where 1 is full amplitude)."), recorder.VoiceDetectionThreshold, 0f, 1f); recorder.VoiceDetectionDelayMs = EditorGUILayout.IntField(new GUIContent("Delay (ms)", "Keep detected state during this time after signal level dropped below threshold. Default is 500ms"), recorder.VoiceDetectionDelayMs); EditorGUILayout.HelpBox("Do not speak and stay in a silent environment when calibrating.", MessageType.Info); if (recorder.VoiceDetectorCalibrating) { EditorGUILayout.LabelField(string.Format("Calibrating {0} ms", calibrationTime)); } else { calibrationTime = EditorGUILayout.IntField("Calibration Time (ms)", calibrationTime); if (recorder.IsRecording && recorder.TransmitEnabled) { if (GUILayout.Button("Calibrate")) { recorder.VoiceDetectorCalibrate(calibrationTime); } } } } } else { EditorGUILayout.PropertyField(this.reactOnSystemChangesSp, new GUIContent("React On System Changes", "If true, recording is restarted when Unity detects Audio Config. changes.")); EditorGUILayout.PropertyField(this.transmitEnabledSp, new GUIContent("Transmit Enabled", "If true, audio transmission is enabled.")); EditorGUILayout.PropertyField(this.autoStartSp, new GUIContent("Auto Start", "If true, recording is started when Recorder is initialized.")); EditorGUILayout.PropertyField(this.encryptSp, new GUIContent("Encrypt", "If true, voice stream is sent encrypted.")); EditorGUILayout.PropertyField(this.interestGroupSp, new GUIContent("Interest Group", "Target interest group that will receive transmitted audio.")); if (this.interestGroupSp.intValue == 0) { EditorGUILayout.PropertyField(this.debugEchoModeSp, new GUIContent("Debug Echo", "If true, outgoing stream routed back to client via server same way as for remote client's streams.")); } else if (this.debugEchoModeSp.boolValue) { Debug.LogWarningFormat("DebugEchoMode disabled because InterestGroup changed to {0}. DebugEchoMode works only with Interest Group 0.", this.interestGroupSp.intValue); this.debugEchoModeSp.boolValue = false; } EditorGUILayout.PropertyField(this.reliableModeSp, new GUIContent("Reliable Mode", "If true, stream data sent in reliable mode.")); EditorGUILayout.LabelField("Codec Parameters", EditorStyles.boldLabel); EditorGUILayout.PropertyField(this.frameDurationSp, new GUIContent("Frame Duration", "Outgoing audio stream encoder delay.")); EditorGUILayout.PropertyField(this.samplingRateSp, new GUIContent("Sampling Rate", "Outgoing audio stream sampling rate.")); EditorGUILayout.PropertyField(this.bitrateSp, new GUIContent("Bitrate", "Outgoing audio stream bitrate.")); EditorGUILayout.LabelField("Audio Source Settings", EditorStyles.boldLabel); EditorGUILayout.PropertyField(this.sourceTypeSp, new GUIContent("Input Source Type", "Input audio data source type")); switch ((Recorder.InputSourceType) this.sourceTypeSp.enumValueIndex) { case Recorder.InputSourceType.Microphone: EditorGUILayout.PropertyField(this.microphoneTypeSp, new GUIContent("Microphone Type", "Which microphone API to use when the Source is set to Microphone.")); EditorGUILayout.HelpBox("Devices list and current selection is valid in Unity Editor only. In build, you need to set it via code preferably at runtime.", MessageType.Info); switch (recorder.MicrophoneType) { case Recorder.MicType.Unity: if (Microphone.devices.Length == 0) { EditorGUILayout.HelpBox("No microphone device found", MessageType.Error); } else { unityMicrophoneDeviceIndex = EditorGUILayout.Popup("Microphone Device", unityMicrophoneDeviceIndex, Microphone.devices); this.unityMicrophoneDeviceSp.stringValue = Microphone.devices[unityMicrophoneDeviceIndex]; int minFreq, maxFreq; Microphone.GetDeviceCaps(Microphone.devices[unityMicrophoneDeviceIndex], out minFreq, out maxFreq); EditorGUILayout.LabelField("Microphone Device Caps", string.Format("{0}..{1} Hz", minFreq, maxFreq)); } break; case Recorder.MicType.Photon: #if PHOTON_MICROPHONE_ENUMERATOR if (Recorder.PhotonMicrophoneEnumerator.IsSupported) { if (Recorder.PhotonMicrophoneEnumerator.Count == 0) { EditorGUILayout.HelpBox("No microphone device found", MessageType.Error); } else { EditorGUILayout.BeginHorizontal(); photonDeviceIndex = EditorGUILayout.Popup("Microphone Device", photonDeviceIndex, photonDeviceNames); this.photonMicrophoneDeviceIdSp.intValue = photonDeviceIDs[photonDeviceIndex]; if (GUILayout.Button("Refresh", EditorStyles.miniButton, GUILayout.Width(70))) { this.RefreshPhotonMicrophoneDevices(); } EditorGUILayout.EndHorizontal(); } } else { recorder.PhotonMicrophoneDeviceId = -1; EditorGUILayout.HelpBox("PhotonMicrophoneEnumerator Not Supported", MessageType.Error); } #endif #if UNITY_IOS EditorGUILayout.LabelField("iOS Audio Session Parameters", EditorStyles.boldLabel); EditorGUI.indentLevel++; EditorGUILayout.PropertyField(useCustomAudioSessionParametersSp, new GUIContent("Use Custom")); if (useCustomAudioSessionParametersSp.boolValue) { EditorGUILayout.PropertyField(audioSessionParametersCategorySp); EditorGUILayout.PropertyField(audioSessionParametersModeSp); EditorGUILayout.PropertyField(audioSessionParametersCategoryOptionsSp, true); } else { int index = EditorGUILayout.Popup("Preset", audioSessionPresetIndexSp.intValue, iOSAudioSessionPresetsNames); if (index != audioSessionPresetIndexSp.intValue) { audioSessionPresetIndexSp.intValue = index; AudioSessionParameters parameters = iOSAudioSessionPresetsValues[index]; this.SetEnumIndex(audioSessionParametersCategorySp, typeof(AudioSessionCategory), parameters.Category); this.SetEnumIndex(audioSessionParametersModeSp, typeof(AudioSessionMode), parameters.Mode); if (parameters.CategoryOptions != null) { audioSessionParametersCategoryOptionsSp.ClearArray(); audioSessionParametersCategoryOptionsSp.arraySize = parameters.CategoryOptions.Length; if (index == 0) { this.SetEnumIndex(audioSessionParametersCategoryOptionsSp .GetArrayElementAtIndex(0), typeof(AudioSessionCategoryOption), AudioSessionCategoryOption.DefaultToSpeaker); this.SetEnumIndex(audioSessionParametersCategoryOptionsSp .GetArrayElementAtIndex(1), typeof(AudioSessionCategoryOption), AudioSessionCategoryOption.AllowBluetooth); } else if (index == 1) { this.SetEnumIndex(audioSessionParametersCategoryOptionsSp .GetArrayElementAtIndex(0), typeof(AudioSessionCategoryOption), AudioSessionCategoryOption.AllowBluetooth); } } } } EditorGUI.indentLevel--; #endif break; default: throw new ArgumentOutOfRangeException(); } break; case Recorder.InputSourceType.AudioClip: EditorGUILayout.PropertyField(this.audioClipSp, new GUIContent("Audio Clip", "Source audio clip.")); EditorGUILayout.PropertyField(this.loopAudioClipSp, new GUIContent("Loop", "Loop playback for audio clip sources.")); break; case Recorder.InputSourceType.Factory: EditorGUILayout.HelpBox("Add a custom InputFactory method in code.", MessageType.Info); break; default: throw new ArgumentOutOfRangeException(); } EditorGUILayout.LabelField("Voice Activity Detection (VAD)", EditorStyles.boldLabel); EditorGUILayout.PropertyField(this.voiceDetectionSp, new GUIContent("Detect", "If true, voice detection enabled.")); if (this.voiceDetectionSp.boolValue) { this.voiceDetectionThresholdSp.floatValue = EditorGUILayout.Slider( new GUIContent("Threshold", "Voice detection threshold (0..1, where 1 is full amplitude)."), this.voiceDetectionThresholdSp.floatValue, 0f, 1f); this.voiceDetectionDelayMsSp.intValue = EditorGUILayout.IntField(new GUIContent("Delay (ms)", "Keep detected state during this time after signal level dropped below threshold. Default is 500ms"), this.voiceDetectionDelayMsSp.intValue); } } if (EditorGUI.EndChangeCheck()) { serializedObject.ApplyModifiedProperties(); } }
public override void OnInspectorGUI() { // serializedObject.UpdateIfRequiredOrScript(); if (Application.isPlaying && recorder.RequiresInit) { if (recorder.IsInitialized) { EditorGUILayout.HelpBox("Recorder requires re-initialization. Call Recorder.ReInit().", MessageType.Warning); if (GUILayout.Button("ReInit")) { recorder.ReInit(); } } else { EditorGUILayout.HelpBox("Recorder requires initialization. Call Recorder.Init(VoiceClient, Object).", MessageType.Warning); } } VoiceLogger.ExposeLogLevel(serializedObject, recorder); EditorGUI.BeginChangeCheck(); recorder.ReactOnSystemChanges = EditorGUILayout.Toggle(new GUIContent("React On System Changes", "If true, ReInit when Unity detects Audio Config. changes."), recorder.ReactOnSystemChanges); recorder.TransmitEnabled = EditorGUILayout.Toggle(new GUIContent("Transmit Enabled", "If true, audio transmission is enabled."), recorder.TransmitEnabled); if (recorder.IsInitialized || !recorder.RequiresInit) { recorder.IsRecording = EditorGUILayout.Toggle(new GUIContent("IsRecording", "If true, audio recording is on."), recorder.IsRecording); } if (recorder.IsRecording) { float amplitude = 0f; if (recorder.IsCurrentlyTransmitting) { amplitude = recorder.LevelMeter.CurrentPeakAmp; if (amplitude > 1f) { amplitude /= 32768; } } EditorGUILayout.Slider("Level", amplitude, 0, 1); } recorder.Encrypt = EditorGUILayout.Toggle(new GUIContent("Encrypt", "If true, voice stream is sent encrypted."), recorder.Encrypt); recorder.InterestGroup = (byte)EditorGUILayout.IntField(new GUIContent("Interest Group", "Target interest group that will receive transmitted audio."), recorder.InterestGroup); if (recorder.InterestGroup == 0) { recorder.DebugEchoMode = EditorGUILayout.Toggle(new GUIContent("Debug Echo", "If true, outgoing stream routed back to client via server same way as for remote client's streams."), recorder.DebugEchoMode); } recorder.ReliableMode = EditorGUILayout.Toggle(new GUIContent("Reliable Mode", "If true, stream data sent in reliable mode."), recorder.ReliableMode); EditorGUILayout.LabelField("Codec Parameters", EditorStyles.boldLabel); recorder.FrameDuration = (OpusCodec.FrameDuration)EditorGUILayout.EnumPopup(new GUIContent("Frame Duration", "Outgoing audio stream encoder delay."), recorder.FrameDuration); recorder.SamplingRate = (POpusCodec.Enums.SamplingRate)EditorGUILayout.EnumPopup( new GUIContent("Sampling Rate", "Outgoing audio stream sampling rate."), recorder.SamplingRate); recorder.Bitrate = EditorGUILayout.IntField(new GUIContent("Bitrate", "Outgoing audio stream bitrate."), recorder.Bitrate); EditorGUILayout.LabelField("Audio Source Settings", EditorStyles.boldLabel); recorder.SourceType = (Recorder.InputSourceType)EditorGUILayout.EnumPopup(new GUIContent("Input Source Type", "Input audio data source type"), recorder.SourceType); switch (recorder.SourceType) { case Recorder.InputSourceType.Microphone: recorder.MicrophoneType = (Recorder.MicType)EditorGUILayout.EnumPopup( new GUIContent("Microphone Type", "Which microphone API to use when the Source is set to Microphone."), recorder.MicrophoneType); switch (recorder.MicrophoneType) { case Recorder.MicType.Unity: unityMicrophoneDeviceIndex = EditorGUILayout.Popup("Microphone Device", unityMicrophoneDeviceIndex, Microphone.devices); recorder.UnityMicrophoneDevice = Microphone.devices[unityMicrophoneDeviceIndex]; int minFreq, maxFreq; Microphone.GetDeviceCaps(Microphone.devices[unityMicrophoneDeviceIndex], out minFreq, out maxFreq); EditorGUILayout.LabelField("Microphone Device Caps", string.Format("{0}..{1} Hz", minFreq, maxFreq)); break; case Recorder.MicType.Photon: #if UNITY_STANDALONE_WIN || UNITY_STANDALONE_OSX if (Recorder.PhotonMicrophoneEnumerator.IsSupported) { recorder.PhotonMicrophoneDeviceId = EditorGUILayout.Popup("Microphone Device", recorder.PhotonMicrophoneDeviceId, photonDeviceNames); } else { recorder.PhotonMicrophoneDeviceId = -1; EditorGUILayout.HelpBox("PhotonMicrophoneEnumerator Not Supported", MessageType.Error); } #endif break; default: throw new ArgumentOutOfRangeException(); } break; case Recorder.InputSourceType.AudioClip: recorder.AudioClip = EditorGUILayout.ObjectField(new GUIContent("Audio Clip", "Source audio clip."), recorder.AudioClip, typeof(AudioClip), false) as AudioClip; recorder.LoopAudioClip = EditorGUILayout.Toggle(new GUIContent("Loop", "Loop playback for audio clip sources."), recorder.LoopAudioClip); break; case Recorder.InputSourceType.Factory: EditorGUILayout.HelpBox("Add a custom InputFactory method in code.", MessageType.Info); break; default: throw new ArgumentOutOfRangeException(); } recorder.TypeConvert = (Recorder.SampleTypeConv)EditorGUILayout.EnumPopup( new GUIContent("Type Convert", "Force creation of 'short' pipeline and convert audio data to short for 'float' audio sources."), recorder.TypeConvert); EditorGUILayout.LabelField("Voice Activity Detection (VAD)", EditorStyles.boldLabel); recorder.VoiceDetection = EditorGUILayout.Toggle(new GUIContent("Detect", "If true, voice detection enabled."), recorder.VoiceDetection); if (recorder.VoiceDetection) { recorder.VoiceDetectionThreshold = EditorGUILayout.FloatField(new GUIContent("Threshold", "Voice detection threshold (0..1, where 1 is full amplitude)."), recorder.VoiceDetectionThreshold); recorder.VoiceDetectionDelayMs = EditorGUILayout.IntField(new GUIContent("Delay (ms)", "Keep detected state during this time after signal level dropped below threshold. Default is 500ms"), recorder.VoiceDetectionDelayMs); if (recorder.VoiceDetectorCalibrating) { EditorGUILayout.LabelField(string.Format("Calibrating {0} ms", calibrationTime)); } else { calibrationTime = EditorGUILayout.IntField("Calibration Time (ms)", calibrationTime); if (recorder.IsInitialized) { if (GUILayout.Button("Calibrate")) { recorder.VoiceDetectorCalibrate(calibrationTime); } } } } if (EditorGUI.EndChangeCheck()) { serializedObject.ApplyModifiedProperties(); } }
private IEnumerator DoResetMicrophone(int newSampleRate, bool isAutomatic) { //invalidate sensor. StopMicrophone(); int fmin = -1; int fmax = -1; for (int i = 0; i < Microphone.devices.Length; i++) { string device = Microphone.devices[i]; Microphone.GetDeviceCaps(device, out fmin, out fmax); Debug.Log(i + ":Name: " + device + " min:" + fmin + " max:" + fmax); } if (Microphone.devices.Length == 0) { yield return(new WaitForSeconds(maxInitTime)); if (MicrophoneStartFailed != null) { MicrophoneStartFailed.Invoke(this, isAutomatic); } yield break; } //initialize audio. microphone = GetComponent <AudioSource>(); microphone.loop = true; microphone.mute = false; int micRecordTime = Mathf.CeilToInt(beforeGap + sensorOn + maxRecordTime + sensorOff + afterGap + 1); Debug.Log("micRecordTime:" + micRecordTime); microphone.clip = Microphone.Start(micDeviceName, true, micRecordTime, newSampleRate); yield return(null); float wtime = 0; while (Microphone.GetPosition(micDeviceName) <= 0) { wtime += Time.deltaTime; if (wtime > this.maxInitTime) { if (MicrophoneStartFailed != null) { MicrophoneStartFailed.Invoke(this, isAutomatic); } yield break; } //wait for maicrophone is ready. yield return(null); } microphone.Play(); yield return(null); micSampleRate = newSampleRate; //reset sensor. sensorTime = 0; lastSamplePos = 0; sensorBuf = new float[micSampleRate / 100]; // samples 1/100 sec. if (MicrophoneStarted != null) { MicrophoneStarted.Invoke(this, isAutomatic); } }
IEnumerator InitializeAudio() { _readSamplesOn = false; yield return(null); //Determine the output sample rate sampleRate = Mathf.FloorToInt(AudioSettings.outputSampleRate); Debug.Log("outputSampleRate: " + sampleRate.ToString()); //finding the best whole number pitchMultiplier we can use to scale up the signal to fix the available frequencies //(sampleRate / 2) gives us the max potential frequency and the rest gives us the maximum frequency we care about if (autoPitchMultiplier) { pitchMultiplier = Mathf.Floor((sampleRate / 2) / (fTarget + (fRangeWidth / 2))); } fMax = AudioSettings.outputSampleRate / 2; fToSampleRange = pitchMultiplier * numSamples / fMax; sampleRangeStart = Mathf.FloorToInt((fTarget - (fRangeWidth / 2)) * fToSampleRange); sampleRangeEnd = Mathf.FloorToInt((fTarget + (fRangeWidth / 2)) * fToSampleRange); sampleSetSize = sampleRangeEnd - sampleRangeStart; //Set sample location for SSVEP samples //only low and high values [1], those to the right of 1kHz are used at the moment but those on the left are still stored ssvepLowValues = new int[2]; ssvepLowValues[0] = Mathf.FloorToInt((fTarget - ssvepLowF) * fToSampleRange) - sampleRangeStart; ssvepLowValues[1] = Mathf.FloorToInt((fTarget + ssvepLowF) * fToSampleRange) - sampleRangeStart; ssvepHighValues = new int[2]; ssvepHighValues[0] = Mathf.FloorToInt((fTarget - ssvepHighF) * fToSampleRange) - sampleRangeStart; ssvepHighValues[1] = Mathf.FloorToInt((fTarget + ssvepHighF) * fToSampleRange) - sampleRangeStart; //Add visual guides for marker lines _chartLineDataUI.RemoveVerticalMarkerLines(); _chartLineDataUI.SetVerticalMarkerLine((float)ssvepLowValues[0] / (float)sampleSetSize, ssvepLowF.ToString()); _chartLineDataUI.SetVerticalMarkerLine((float)ssvepHighValues[0] / (float)sampleSetSize, ssvepHighF.ToString()); _chartLineDataUI.SetVerticalMarkerLine((float)ssvepLowValues[1] / (float)sampleSetSize, ssvepLowF.ToString()); _chartLineDataUI.SetVerticalMarkerLine((float)ssvepHighValues[1] / (float)sampleSetSize, ssvepHighF.ToString()); ResetSamples(); if (useMicrophone) { foreach (string s in Microphone.devices) { Microphone.GetDeviceCaps(s, out deviceSampleRateMin, out deviceSampleRateMax); Debug.Log("Device Name: " + s + " [" + deviceSampleRateMin + "-" + deviceSampleRateMax + "]"); } //_audio.clip = Microphone.Start("AudioBuddy", true, 10, 8000); //_audio.clip = Microphone.Start("Built-in Microphone", true, 10, sampleRate); _audio.clip = Microphone.Start(null, true, 5, sampleRate); //Setup audio latency in ms. Use <= 0 for no latency. while (Microphone.GetPosition(null) <= 0) { } } else { _audio.clip = _demoTone; } //_audio.mute = true; _audio.pitch = pitchMultiplier; _audio.loop = true; _audio.Play(); yield return(new WaitForSeconds(0.2f)); //yield return null; _readSamplesOn = true; }