void Start() { string microphone = "Built-in Microphone"; foreach (var device in Microphone.devices) { print(device); if (device.Contains("Webcam")) //for testing, to use with HTC Vive change search string to HTC { print("Found microphone"); microphone = device; } } AudioSource audioSource = GetComponent <AudioSource>(); print("Starting recording"); audioSource.clip = Microphone.Start(microphone, true, 1, 44100); //loops every 1 second, capturing the audio and overwriting the previous clip audioSource.loop = false; //playback loop is set to false so the same clip isn't looped while (!(Microphone.GetPosition(null) > 0)) { } //audioSource.Play(); }
// Update is called once per frame private void Update() { if (Input.GetKeyDown(KeyCode.Q)) { samplingData.Clear(); //set microphone. audio = GetComponent<AudioSource>(); audio.clip = Microphone.Start(deviceName, false, 999, AudioSettings.outputSampleRate); audio.loop = true; while (!(Microphone.GetPosition(deviceName) > 0)) { } //recording start. audio.Play(); _isStart = true; } else if (Input.GetKeyDown(KeyCode.A)) { _isStart = false; audio.Stop(); Microphone.End(deviceName); WriteAudioData(); } }
void spech() { isRecording = !isRecording; Debug.Log(isRecording == true ? "off" : "recording"); if (isRecording == false) { audioSource.clip = Microphone.Start(null, true, 5, 44100); } else { int length = Microphone.GetPosition(null); Microphone.End(null); // clip = SavWav.TrimSilence(audioSource.clip, 10); var file = SavWav.Save("myfile", audioSource.clip); //clip.Play(); //Debug.Log(file); byte[] bytes = wav.EncodeToWAV(audioSource.clip); StartCoroutine(Upload(file, bytes)); isRecording = !isRecording; } }
void Update() { if (!recording) { return; } forceTransmit -= Time.deltaTime; if (Input.GetKeyUp(toggleToTalkKey)) { transmitToggled = !transmitToggled; } bool transmit = transmitToggled || Input.GetKey(pushToTalkKey); int currentPosition = Microphone.GetPosition(Device); // This means we wrapped around if (currentPosition < previousPosition) { while (sampleIndex < recordFrequency) { ReadSample(transmit); } sampleIndex = 0; } // Read non-wrapped samples previousPosition = currentPosition; while (sampleIndex + recordSampleSize <= currentPosition) { ReadSample(transmit); } }
//get data from microphone into audioclip float LevelMax() { float levelMax = 0; float[] waveData = new float[_sampleWindow]; int micPosition = Microphone.GetPosition(null) - (_sampleWindow + 1); // null means the first microphone if (micPosition < 0) { return(0); } _clipRecord.GetData(waveData, micPosition); // Getting a peak on the last 128 samples for (int i = 0; i < _sampleWindow; i++) { float wavePeak = waveData[i] * waveData[i]; if (levelMax <= wavePeak) { levelMax = wavePeak; Debug.Log("LEVEL MAX: " + levelMax); } } return(levelMax); }
// Unity Update Function void Update() { // audio int pos = Microphone.GetPosition(null); int diff = pos - lastSample; if (diff > 0) { float[] samples = new float[diff * c.channels]; c.GetData(samples, lastSample); byte[] ba = ToByteArray(samples, c.channels); SendData(ba); } lastSample = pos; // Debug.Log("Enter ReceiveKinect's Update"); // Declare the hashtable reference. try { // receive bytes IPEndPoint anyIP = new IPEndPoint(IPAddress.Any, listenport); byte[] data = client.Receive(ref anyIP); PlayAudio(data); } catch (SocketException e) { if (e.SocketErrorCode != SocketError.WouldBlock) { throw; } } catch (Exception err) { Debug.LogError(err.ToString()); } }
void StartMicrophone()//마이크 시작 { audioSource.Stop(); //녹음 시작 (마이크 이름, 루프여부, 샘플레이트) audioSource.clip = Microphone.Start(microphone, true, 10, 44100); audioSource.loop = true; Debug.Log(Microphone.IsRecording(microphone).ToString()); if (Microphone.IsRecording(microphone)) { //마이크가 녹음하고 있는지 확인 (될때까지 루프) while (!(Microphone.GetPosition(microphone) > 0)) { } Debug.Log("녹음 시작 : " + microphone); audioSource.Play(); } else { Debug.Log("녹음 실패 : " + microphone); } }
private float[] GetSamples() { int pos = Microphone.GetPosition(DEVICE_NAME); float[] samples; if (pos - lastSample > 0) { int start = lastSample; int end = pos; samples = new float[(end - start) * micRecording.channels]; micRecording.GetData(samples, start); } else if (pos - lastSample != 0) { int start1 = lastSample; int end1 = micRecording.samples - 1; float[] samples1 = new float[(end1 - start1) * micRecording.channels]; micRecording.GetData(samples1, start1); int start2 = 0; int end2 = pos; float[] samples2 = new float[(end2 - start2) * micRecording.channels]; micRecording.GetData(samples2, start2); samples = new float[samples1.Length + samples2.Length]; samples1.CopyTo(samples, 0); samples2.CopyTo(samples, samples1.Length); } else { return(null); } lastSample = pos; return(samples); }
private const int SampleFrequency = 44100; /// Common sampling rate for recording analog audio. // Start on awake so that it's set up before any other scripts that are dependent on this one. void Awake() { // Add an audio source Unity component to our game object. gameObject.AddComponent <AudioSource>(); // Throw an error message if no microphone is detected and exit out of the script. if (Microphone.devices.Length == 0) { Debug.Log("No microphone detected."); return; } // Start recording from the microphone. GetComponent <AudioSource>().clip = Microphone.Start(null, false, 3600, SampleFrequency); // Set the audio mixer to our custom made silent mixer (this prevents audio playback). GetComponent <AudioSource>().outputAudioMixerGroup = audioOutput; // Check to make sure microphone is recording. if (Microphone.IsRecording("")) { // Wait until recording actually starts. while (Microphone.GetPosition(null) == 0) { ; } // Play our audio clip (plays the microphone recording in real-time). GetComponent <AudioSource>().Play(); } // If microphone isn't recording, throw an error message. else { Debug.Log("Problem with microphone: " + Microphone.devices[0]); } }
public static void Save(string fileName = "test") { while (!(Microphone.GetPosition(null) > 0)) { } samplesData = new float[audioSource.clip.samples * audioSource.clip.channels]; audioSource.clip.GetData(samplesData, 0); string filePath = Path.Combine(Application.streamingAssetsPath, fileName + ".wav"); // Delete the file if it exists. if (File.Exists(filePath)) { File.Delete(filePath); } try { WriteWAVFile(audioSource.clip, filePath); Debug.Log("File Saved Successfully at StreamingAssets/" + fileName + ".wav"); } catch (DirectoryNotFoundException) { Debug.LogError("Please, Create a StreamingAssets Directory in the Assets Folder"); } }
//get data from microphone into audioclip float RecordedVolume() { float levelMax = 0; float[] waveData = new float[sampleWindow]; int micPosition = Microphone.GetPosition(null) - (sampleWindow + 1); // null means the first microphone if (micPosition < 0) { return(0); } clipRecord.GetData(waveData, micPosition); // Getting a peak on the last 128 samples for (int i = 0; i < sampleWindow; i++) { float wavePeak = waveData[i] * waveData[i]; if (levelMax < wavePeak) { levelMax = wavePeak; } } return(levelMax); }
//get data from microphone into audioclip float LevelMax() { float levelMax = 0; float[] waveData = new float[sample_]; int micPosition = Microphone.GetPosition(null) - (sample_ + 1); // null is the first microphone if (micPosition < 0) { return(0); } clipRecord_.GetData(waveData, micPosition); //get a peak on the last 128 samples for (int i = 0; i < sample_; i++) { float wavePeak = waveData[i] * waveData[i]; if (levelMax < wavePeak) { levelMax = wavePeak; } } return(levelMax); }
public void StopRecord() { //マイクの録音位置を取得 int position = Microphone.GetPosition(deviceName: micName); //マイクの録音を強制的に終了 Microphone.End(deviceName: micName); //再生時間を確認すると、停止した時間に関わらず、maxDurationの値になっている。これは無音を含んでいる? Debug.Log("修正前の録音時間: " + audioClip.length); //音声データ一時退避用の領域を確保し、audioClipからのデータを格納 float[] soundData = new float[audioClip.samples * audioClip.channels]; audioClip.GetData(soundData, 0); //新しい音声データ領域を確保し、positonの分だけ格納できるサイズにする。 float[] newData = new float[position * audioClip.channels]; //positionの分だけデータをコピー for (int i = 0; i < newData.Length; i++) { newData[i] = soundData[i]; } //新しいAudioClipのインスタンスを生成し、音声データをセット AudioClip newClip = AudioClip.Create(audioClip.name, position, audioClip.channels, audioClip.frequency, false); newClip.SetData(newData, 0); //audioClipを新しいものに差し替え AudioClip.Destroy(audioClip); audioClip = newClip; //再生時間 Debug.Log("修正後の録音時間: " + audioClip.length); }
// Put the microphone rcording into the audiosource void MicrophoneIntoAudioSource() { // A little pause is needed to reduce the amount of lag in the recording if (Time.time - timeSinceRestart > 0.5f && !Microphone.IsRecording(null)) { // Start the recording of the audioclip // We use null to make use of the default microphone // We use loop for when the recording stops, that it will use whatever it had recorded // 300 seconds to record should be enough to demonstrate the shader that we use this for src.clip = Microphone.Start(null, true, 300, AudioSettings.outputSampleRate); // This is used to control latency // It checks how many samples that the microphone // has been recording into the audioclip // We will use 0 because we want to have an immediate effect while (!(Microphone.GetPosition(null) > 0)) { } // Play back the audio that is in the clip, we will use this playback // to determine the volume and pitch src.Play(); } }
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// //for the above control the mic start or stop public IEnumerator StartMicrophone() { audioSource.clip = Microphone.Start(selectedDevice, true, bufferTime, setFrequency); //Starts recording while (!(Microphone.GetPosition(selectedDevice) > 0)) // Wait if a device is detected and only then start the recording { if (debug) { Debug.Log("Waiting for connection:" + Time.deltaTime); } yield return(0); } if (debug) { Debug.Log("started" + ", Freq (Hz): " + setFrequency + ", samples: " + amountSamples + ", sensitivity: " + sensitivity); } audioSource.Play(); // Play the audio source! if (debug) { Debug.Log("Receiving data"); } }
// Start is called before the first frame update void Start() { grad_used.SetKeys(new GradientColorKey[] { new GradientColorKey(new Color(0.0f, 0.0f, 0.0f), 0.0f), new GradientColorKey(new Color(0.0f, 0.01f, 0.0f), 1.0f) }, new GradientAlphaKey[] { new GradientAlphaKey(0.0f, 0.0f), new GradientAlphaKey(0.0f, 0.0f), new GradientAlphaKey(0.0f, 0.53f), new GradientAlphaKey(0.0f, 1.0f) }); used = new AnimationCurve(new Keyframe[] { new Keyframe(0f, 0.0f), new Keyframe(0.5f, 0.0f), new Keyframe(1f, 0.0f) }); used2 = new AnimationCurve(new Keyframe[] { new Keyframe(0f, 0.0f), new Keyframe(0.5f, 0.0f), new Keyframe(1f, 0.0f) }); ps = this.GetComponent <ParticleSystem>(); ps1 = fire1.GetComponent <ParticleSystem>(); ps2 = fire2.GetComponent <ParticleSystem>(); ps3 = fire3.GetComponent <ParticleSystem>(); ps4 = fire4.GetComponent <ParticleSystem>(); ps5 = fire5.GetComponent <ParticleSystem>(); ps6 = fire6.GetComponent <ParticleSystem>(); ps7 = fire7.GetComponent <ParticleSystem>(); ps8 = fire8.GetComponent <ParticleSystem>(); _audio = GetComponent <AudioSource>(); _audio.clip = Microphone.Start(null, true, 10, 44100); //deviceName, loop bool, secounds, frequency _audio.loop = true; _audio.mute = true; while (!(Microphone.GetPosition(null) > 0)) { } _audio.Play(); }
protected override void OnEnable() { base.OnEnable(); if (Microphone.devices.Length == 0) { Debug.LogFormat("Microphone device not found"); return; } var deviceName = Microphone.devices[deviceIndex]; Microphone.GetDeviceCaps(deviceName, out int minFreq, out int maxFreq); var micClip = Microphone.Start(deviceName, true, 1, 48000); // set the latency to “0” samples before the audio starts to play. while (!(Microphone.GetPosition(deviceName) > 0)) { } audioSource.clip = micClip; audioSource.loop = true; audioSource.Play(); }
private IEnumerator DoResetMicrophone(int newSampleRate, bool isAutomatic) { //invalidate sensor. StopMicrophone(); int fmin = -1; int fmax = -1; for (int i = 0; i < Microphone.devices.Length; i++) { string device = Microphone.devices[i]; Microphone.GetDeviceCaps(device, out fmin, out fmax); Debug.Log(i + ":Name: " + device + " min:" + fmin + " max:" + fmax); } if (Microphone.devices.Length == 0) { yield return(new WaitForSeconds(maxInitTime)); if (MicrophoneStartFailed != null) { MicrophoneStartFailed.Invoke(this, isAutomatic); } yield break; } //initialize audio. microphone = GetComponent <AudioSource>(); microphone.loop = true; microphone.mute = false; int micRecordTime = Mathf.CeilToInt(beforeGap + sensorOn + maxRecordTime + sensorOff + afterGap + 1); Debug.Log("micRecordTime:" + micRecordTime); microphone.clip = Microphone.Start(micDeviceName, true, micRecordTime, newSampleRate); yield return(null); float wtime = 0; while (Microphone.GetPosition(micDeviceName) <= 0) { wtime += Time.deltaTime; if (wtime > this.maxInitTime) { if (MicrophoneStartFailed != null) { MicrophoneStartFailed.Invoke(this, isAutomatic); } yield break; } //wait for maicrophone is ready. yield return(null); } microphone.Play(); yield return(null); micSampleRate = newSampleRate; //reset sensor. sensorTime = 0; lastSamplePos = 0; sensorBuf = new float[micSampleRate / 100]; // samples 1/100 sec. if (MicrophoneStarted != null) { MicrophoneStarted.Invoke(this, isAutomatic); } }
private IEnumerator SetUp() { var length = Microphone.devices.Length; if (!(m_source != null && length > 0)) // オーディオソースとマイクがある { Debug.LogError("AudioSourceかマイクが見つからない"); yield break; } var text = "Microphone List"; for (int i = 0; i < length; i++) { text += "\n" + (i + 1).ToString() + "." + Microphone.devices[i]; } setText.text = text; var devName = ""; var num = -1; setField.ActivateInputField(); while (true) { if (Input.GetKeyDown(KeyCode.Return)) { if (int.TryParse(setField.text, out num)) { if (0 <= num && num <= length) { devName = Microphone.devices[num - 1]; break; } else { Debug.LogError("リストの範囲外の数字が入力されている"); } } else { Debug.LogError("整数以外が入力されている"); } } yield return(null); } m_source.loop = true; // ループにする m_source.clip = Microphone.Start(devName, true, recordingSec, samplingFrequency); // clipをマイクに設定 while (!(Microphone.GetPosition(devName) > 0)) { } // きちんと値をとるために待つ Microphone.GetPosition(null); m_source.Play(); API = GetComponent <JsonTest>(); setText.gameObject.SetActive(false); setField.gameObject.SetActive(false); GetReady(); }
public void EndRecord(int roomId, int roleId, string url, string successFuncName, string failFuncName) { if (Microphone.devices.Length == 0) { UnityEngine.Debug.LogError("Microphone.devices is null"); return; } int lastPos = Microphone.GetPosition(Microphone.devices[0]); float length = audioLength; if (Microphone.IsRecording(Microphone.devices[0])) //录音小于20秒 { length = lastPos / SamplingRate; //录音时长 } Microphone.End(Microphone.devices[0]); LuaState lua = LuaInstance.instance.Get(); if (length < 1.0f) //录音小于1秒就不处理了 { lua.LuaFuncCall(PlatformSDKController.mSelf.luaPlatformHanderRef, failFuncName, ""); return; } DateTime now = DateTime.Now; string str = now.ToString("s"); str = str.Replace("-", ""); str = str.Replace(":", ""); str = str.Replace("T", ""); this.mfilename = str + "_" + roleId + "_" + roomId; this.voice.name = this.mfilename; this.data = new float[200000]; this.voice.GetData(this.data, 0); this.data2.Clear(); for (int index = 0; index < this.data.Length; ++index) { if ((double)this.data[index] != 0.0) { this.data2.Add(this.data[index]); } } //UnityEngine.Debug.Log("count: " + data2.Count + ", length: " + length + ", cliplength: " + voice.length); AudioClip clip = AudioClip.Create(this.mfilename, this.data2.Count, 1, 10000, false); clip.SetData(this.data2.ToArray(), 0); string fileName = mfilename + ".wav"; if (RecordVoice.instance.Save(fileName, clip)) { byte[] data = File.ReadAllBytes(Application.persistentDataPath + "/" + fileName); WWWPortraitLoader.UploadVoice(url, fileName, data, successFuncName, failFuncName); } lua.LuaFuncCall(PlatformSDKController.mSelf.luaPlatformHanderRef, successFuncName, fileName, length); if (isPlay) { NGUITools.PlaySound(clip); } }
private IEnumerator RecordingHandler() { Log.Debug("ExampleStreamingSplitSamples.RecordingHandler()", "devices: {0}", Microphone.devices); // Start recording _recording = Microphone.Start(_microphoneID, true, _recordingBufferSize, _recordingHZ); yield return(null); if (_recording == null) { StopRecording(); yield break; } #if ENABLE_TIME_LOGGING // Set a reference to now to check timing DateTime now = DateTime.Now; #endif // Current sample segment number int sampleSegmentNum = 0; // Size of the sample segment in samples int sampleSegmentSize = _recording.samples / _sampleSegments; // Init samples float[] samples = null; while (_recordingRoutine != 0 && _recording != null) { // Get the mic position int microphonePosition = Microphone.GetPosition(_microphoneID); if (microphonePosition > _recording.samples || !Microphone.IsRecording(_microphoneID)) { Log.Error("ExampleStreamingSplitSamples.RecordingHandler()", "Microphone disconnected."); StopRecording(); yield break; } int sampleStart = sampleSegmentSize * sampleSegmentNum; int sampleEnd = sampleSegmentSize * (sampleSegmentNum + 1); #if ENABLE_DEBUGGING Log.Debug("ExampleStreamingSplitSamples.RecordinHandler", "microphonePosition: {0} | sampleStart: {1} | sampleEnd: {2} | sampleSegmentNum: {3}", microphonePosition.ToString(), sampleStart.ToString(), sampleEnd.ToString(), sampleSegmentNum.ToString()); #endif //If the write position is past the end of the sample segment or if write position is before the start of the sample segment while (microphonePosition > sampleEnd || microphonePosition < sampleStart) { // Init samples samples = new float[sampleSegmentSize]; // Write data from recording into samples starting from the sampleSegmentStart _recording.GetData(samples, sampleStart); // Create AudioData and use the samples we just created AudioData record = new AudioData(); record.MaxLevel = Mathf.Max(Mathf.Abs(Mathf.Min(samples)), Mathf.Max(samples)); record.Clip = AudioClip.Create("Recording", sampleSegmentSize, _recording.channels, _recordingHZ, false); record.Clip.SetData(samples, 0); // Send the newly created AudioData to the service _service.OnListen(record); // Iterate or reset sampleSegmentNum if (sampleSegmentNum < _sampleSegments - 1) { sampleSegmentNum++; #if ENABLE_DEBUGGING Log.Debug("ExampleStreamingSplitSamples.RecordingHandler()", "Iterating sampleSegmentNum: {0}", sampleSegmentNum); #endif } else { sampleSegmentNum = 0; #if ENABLE_DEBUGGING Log.Debug("ExampleStreamingSplitSamples.RecordingHandler()", "Resetting sampleSegmentNum: {0}", sampleSegmentNum); #endif } #if ENABLE_TIME_LOGGING Log.Debug("ExampleStreamingSplitSamples.RecordingHandler", "Sending data - time since last transmission: {0} ms", Mathf.Floor((float)(DateTime.Now - now).TotalMilliseconds)); now = DateTime.Now; #endif sampleStart = sampleSegmentSize * sampleSegmentNum; sampleEnd = sampleSegmentSize * (sampleSegmentNum + 1); } yield return(0); } yield break; }
void Update() { if (m_Active) { //get fft and wave form data from unity/fmod m_AudioSource.GetOutputData(m_WaveFormFloats, 0); m_AudioSource.GetSpectrumData(m_SpectrumFloats, 0, FFTWindow.BlackmanHarris); m_SpectrumFloats.CopyTo(m_SpectrumFloatsTempArray, 0); int iHalfCapture = m_CaptureSize / 2; float fHalfFFTScale = 0.5f; for (int i = 0; i < iHalfCapture; ++i) { float fNormalized = (m_SpectrumFloatsTempArray[i * 2] + m_SpectrumFloatsTempArray[i * 2 + 1]) * fHalfFFTScale; m_SpectrumFloats[i] = fNormalized; m_SpectrumFloats[i + iHalfCapture] = fNormalized; } for (int i = 0; i < m_CaptureSize; ++i) { // Maybe the envelope should be a lookup function float fEnvelope = 1.0f - Mathf.Pow(Mathf.Abs(((float)i / (float)m_CaptureSize) - 0.5f) * 2, 3.0f); m_WaveFormFloatsSmooth[i] = Mathf.Lerp(m_WaveFormFloatsSmooth[i], m_WaveFormFloats[i], m_SmoothLerp) * fEnvelope; m_WaveFormRow[i].a = m_WaveFormFloatsSmooth[i] + 0.5f; } UpdateShaders(); if (m_CurrentLoadMusicState == LoadMusicState.Playing) { if (!m_AudioSource.isPlaying) { //loop music LoadNextSong(); } } else if (m_CurrentLoadMusicState == LoadMusicState.WaitingForWWW) { if (m_LoadMusicWWW.isDone) { if (m_LoadMusicWWW.GetAudioClip() == null) { LoadNextSong(); } else { m_AudioSource.clip = m_LoadMusicWWW.GetAudioClip(false); m_CurrentLoadMusicState = LoadMusicState.WaitingForAudio; } } } else if (m_CurrentLoadMusicState == LoadMusicState.WaitingForAudio) { if (m_AudioSource.clip.loadState == AudioDataLoadState.Loaded) { if (AudioManager.Enabled) { m_AudioSource.Play(); } m_CurrentLoadMusicState = LoadMusicState.WaitingForStart; } } else if (m_CurrentLoadMusicState == LoadMusicState.WaitingForStart) { if (m_AudioSource.isPlaying) { m_CurrentLoadMusicState = LoadMusicState.Playing; } } else if (m_CurrentLoadMusicState == LoadMusicState.MicMode) { m_MicResetCountdown -= Time.deltaTime; if (m_MicResetCountdown <= 0.0f) { EnableMic(false); EnableMic(true); } #if !UNITY_ANDROID && !UNITY_WEBGL //start playing on audio source when mic is ready to go if (Microphone.IsRecording("") && (Microphone.GetPosition("") > 0) && !m_AudioSource.isPlaying) { m_AudioSource.Play(); } #endif } m_WaveFormTexture.SetPixels(0, 0, m_WaveFormTextureWidth, 1, m_WaveFormRow); m_WaveFormTexture.Apply(); Shader.SetGlobalTexture("_WaveFormTex", m_WaveFormTexture); } }
public void Record() { if (Microphone.devices.Length <= 0) { Station.instance.ShowErrorMessage("No microphone detected"); return; } bIsRecording = !bIsRecording; if (bIsRecording == true) { //don't record if another looper is recording if (Station.instance.bRecordingALoop) { bIsRecording = false; return; } recordButtonText.text = "Stop"; recordedBPM = Station.instance.bpm; recordedSignatureHi = Station.instance.signatureHi; Station.instance.bRecordingALoop = true; //stop all recording processes before we start a new recording audioSource0.Stop(); audioSource1.Stop(); Microphone.End(null); audioSource0.clip = null; audioSource1.clip = null; //start recording audioSource0.clip = Microphone.Start(null, true, 40, frequency); recordingStartTime = AudioSettings.dspTime; //set the time of the first down beat if (Station.instance.recordingCountIn == 1) { recordingFirstBeatTime = Station.instance.nextDownBeatTime; } else if (Station.instance.recordingCountIn == 2) { recordingFirstBeatTime = Station.instance.nextDownBeatTime + Station.instance.timeBetweenDownBeats; } else { recordingFirstBeatTime = Station.instance.nextDownBeatTime + 2 * Station.instance.timeBetweenDownBeats; } //once the first down beat has been reached, change the looper color to red StartCoroutine(ChangeLooperBackgroundRoutine(recordingFirstBeatTime - recordingStartTime)); } else { looperBackgroundImage.color = new Color(173f / 255f, 173f / 255f, 173f / 255f, 222f / 255f); recordButtonText.text = "Record"; //stop recording and store recorded data into audio clip int uncutLength = Microphone.GetPosition(null); Microphone.End(null); float[] uncutClipData = new float[uncutLength]; audioSource0.clip.GetData(uncutClipData, 0); double endTime = AudioSettings.dspTime; //cut down the recorded clip to match the correct start and end beat double beginningTimeToDiscard = recordingFirstBeatTime - recordingStartTime; int beginningSamplesToDiscard = Mathf.RoundToInt((float)(frequency * beginningTimeToDiscard)); double endTimeToDiscard; if (Station.instance.recordingCountOut == 1) { endTimeToDiscard = endTime - Station.instance.previousDownBeatTime; } else if (Station.instance.recordingCountOut == 2) { endTimeToDiscard = endTime - (Station.instance.previousDownBeatTime - Station.instance.timeBetweenDownBeats); } else { endTimeToDiscard = endTime - (Station.instance.previousDownBeatTime - 2 * Station.instance.timeBetweenDownBeats); } if (endTimeToDiscard < 0) { //we didn't wait long enough to stop the recording Station.instance.ShowErrorMessage("Failed to create clip: Did not record long enough."); Station.instance.bRecordingALoop = false; looperBackgroundImage.color = new Color(173f / 255f, 173f / 255f, 173f / 255f, 180f / 255f); return; } int endSamplesToDiscard = Mathf.Max(0, Mathf.RoundToInt((float)(frequency * endTimeToDiscard)) - 6000); ; int clipLength = uncutLength - beginningSamplesToDiscard - endSamplesToDiscard; if (clipLength <= 0) { //this shouldn't happen Station.instance.ShowErrorMessage("Failed to create clip: clip length is less than or equal to zero."); looperBackgroundImage.color = new Color(173f / 255f, 173f / 255f, 173f / 255f, 180f / 255f); Debug.Log($"SAMPLES: Uncutlength {uncutLength}" + $" beginningSamplesToDiscard {beginningSamplesToDiscard} endSamplesToDiscard {endSamplesToDiscard} " + $"CurrentTime {AudioSettings.dspTime} PreviousDownBeatTime {Station.instance.previousDownBeatTime}"); Station.instance.bRecordingALoop = false; return; } //create the new trimmed clip float[] clipData = new float[clipLength]; for (int i = 0; i < clipLength; i++) { if (beginningSamplesToDiscard + i < uncutLength) { clipData[i] = uncutClipData[beginningSamplesToDiscard + i]; } } //use the same clip in two audio sources which we will play back to back audioSource0.clip = AudioClip.Create("clip0", clipData.Length, 1, frequency, false); audioSource0.clip.SetData(clipData, 0); audioSource1.clip = AudioClip.Create("clip1", clipData.Length, 1, frequency, false); audioSource1.clip.SetData(clipData, 0); numberOfMeasures = Mathf.Ceil((float)((clipData.Length / frequency) / Station.instance.timeBetweenDownBeats)); bHasClip = true; Station.instance.bRecordingALoop = false; playButton.enabled = true; looperBackgroundImage.color = new Color(173f / 255f, 173f / 255f, 173f / 255f, 222f / 255f); } }
private IEnumerator RecordingHandler() { //temp.text += " Recording handler called "; m_Recording = Microphone.Start(m_MicrophoneID, true, m_RecordingBufferSize, m_RecordingHZ); yield return(null); // let m_RecordingRoutine get set.. if (m_Recording == null) { //temp.text += " m_Recording is null "; StopRecording(); yield break; } else { //temp.text += " mike is not null"; } bool bFirstBlock = true; int midPoint = m_Recording.samples / 2; float[] samples = null; while (m_RecordingRoutine != 0 && m_Recording != null) { int writePos = Microphone.GetPosition(m_MicrophoneID); //temp.text += " writePos is " + writePos.ToString (); if (writePos > m_Recording.samples || !Microphone.IsRecording(m_MicrophoneID)) { Log.Error("MicrophoneWidget", "Microphone disconnected."); temp.text += " Problem with the mike "; StopRecording(); yield break; } if ((bFirstBlock && writePos >= midPoint) || (!bFirstBlock && writePos < midPoint)) { // front block is recorded, make a RecordClip and pass it onto our callback. samples = new float[midPoint]; m_Recording.GetData(samples, bFirstBlock ? 0 : midPoint); //temp.text += " Passing to callback "; AudioData record = new AudioData(); record.MaxLevel = Mathf.Max(samples); record.Clip = AudioClip.Create("Recording", midPoint, m_Recording.channels, m_RecordingHZ, false); record.Clip.SetData(samples, 0); m_SpeechToText.OnListen(record); bFirstBlock = !bFirstBlock; } else { // calculate the number of samples remaining until we ready for a block of audio, // and wait that amount of time it will take to record. int remaining = bFirstBlock ? (midPoint - writePos) : (m_Recording.samples - writePos); float timeRemaining = (float)remaining / (float)m_RecordingHZ; //temp.text += " Waiting for audio sample to finish "; yield return(new WaitForSeconds(timeRemaining)); } } yield break; }
private IEnumerator RecordingHandler() { Log.Debug("ExampleStreaming.RecordingHandler()", "devices: {0}", Microphone.devices); _recording = Microphone.Start(_microphoneID, true, _recordingBufferSize, _recordingHZ); yield return(null); // let _recordingRoutine get set.. if (_recording == null) { StopRecording(); yield break; } bool bFirstBlock = true; int midPoint = _recording.samples / 2; float[] samples = null; while (_recordingRoutine != 0 && _recording != null) { int writePos = Microphone.GetPosition(_microphoneID); if (writePos > _recording.samples || !Microphone.IsRecording(_microphoneID)) { Log.Error("ExampleStreaming.RecordingHandler()", "Microphone disconnected."); StopRecording(); yield break; } if ((bFirstBlock && writePos >= midPoint) || (!bFirstBlock && writePos < midPoint)) { // front block is recorded, make a RecordClip and pass it onto our callback. samples = new float[midPoint]; _recording.GetData(samples, bFirstBlock ? 0 : midPoint); AudioData record = new AudioData(); record.MaxLevel = Mathf.Max(Mathf.Abs(Mathf.Min(samples)), Mathf.Max(samples)); record.Clip = AudioClip.Create("Recording", midPoint, _recording.channels, _recordingHZ, false); record.Clip.SetData(samples, 0); if (levelMeter != null) { levelMeter.fillAmount = record.MaxLevel; if (record.MaxLevel > _speechToText.SilenceThreshold) { levelMeter.color = aboveThresholdMeterColor; SetValue("user:isSpeaking", true, "input level above threshold"); } else { levelMeter.color = belowThresholdMeterColor; SetValue("user:isSpeaking", false, "input level below threshold"); } } _speechToText.OnListen(record); bFirstBlock = !bFirstBlock; } else { // calculate the number of samples remaining until we ready for a block of audio, // and wait that amount of time it will take to record. int remaining = bFirstBlock ? (midPoint - writePos) : (_recording.samples - writePos); float timeRemaining = (float)remaining / (float)_recordingHZ; yield return(new WaitForSeconds(timeRemaining)); } } yield break; }
// Update is called once per frame void Update() { if (EnableLipSync) { if (Context != 0) { if (proxy == null) { if (VRMmodel != null) { proxy = VRMmodel.GetComponent <VRMBlendShapeProxy>(); } } else { // get the current viseme frame OVRLipSync.Frame frame = GetCurrentPhonemeFrame(); if (frame != null) { //あ OVRLipSync.Viseme.aa; BlendShapePreset.A; //い OVRLipSync.Viseme.ih; BlendShapePreset.I; //う OVRLipSync.Viseme.ou; BlendShapePreset.U; //え OVRLipSync.Viseme.E; BlendShapePreset.E; //お OVRLipSync.Viseme.oh; BlendShapePreset.O; var presets = new BlendShapePreset[] { BlendShapePreset.A, BlendShapePreset.I, BlendShapePreset.U, BlendShapePreset.E, BlendShapePreset.O, }; var visemes = new float[] { frame.Visemes[(int)OVRLipSync.Viseme.aa], frame.Visemes[(int)OVRLipSync.Viseme.ih], frame.Visemes[(int)OVRLipSync.Viseme.ou], frame.Visemes[(int)OVRLipSync.Viseme.E], frame.Visemes[(int)OVRLipSync.Viseme.oh], }; int maxindex = 0; float maxvisemes = 0; for (int i = 0; i < presets.Length; i++) { if (visemes[i] < WeightThreashold) { visemes[i] = 0; } if (maxvisemes < visemes[i]) { maxindex = i; maxvisemes = visemes[i]; } } if (MaxWeightEmphasis) { visemes[maxindex] = Mathf.Clamp(visemes[maxindex] * 3, 0.0f, 1.0f); } if (MaxWeightEnable) { for (int i = 0; i < presets.Length; i++) { if (i != maxindex) { visemes[i] = 0.0f; } } } for (int i = 0; i < presets.Length; i++) { visemes[i] *= MaxLevel; proxy.SetValue(presets[i], visemes[i]); } //Debug.Log("Visemes:" + string.Join(",", frame.Visemes.Select(d => d.ToString()))); } } } if (string.IsNullOrEmpty(selectedDevice) == false) { audioSource.volume = (sourceVolume / 100); if (!Microphone.IsRecording(selectedDevice)) { StartMicrophone(); } if (EnableLowLatency) { var position = Microphone.GetPosition(selectedDevice); if (position < 0 || head == position) { return; } audioSource.clip.GetData(microphoneBuffer, 0); while (GetDataLength(microphoneBuffer.Length, head, position) > processBuffer.Length) { var remain = microphoneBuffer.Length - head; if (remain < processBuffer.Length) { Array.Copy(microphoneBuffer, head, processBuffer, 0, remain); Array.Copy(microphoneBuffer, 0, processBuffer, remain, processBuffer.Length - remain); } else { Array.Copy(microphoneBuffer, head, processBuffer, 0, processBuffer.Length); } OVRLipSync.ProcessFrame(Context, processBuffer, Frame); head += processBuffer.Length; if (head > microphoneBuffer.Length) { head -= microphoneBuffer.Length; } } } } } }
/// <summary> /// Coroutine that tracks the microphone values /// </summary> /// <returns></returns> private IEnumerator StartRecording() { string device = Microphone.devices[0]; State state = State.None; int min, max; Microphone.GetDeviceCaps(device, out min, out max); _source.clip = Microphone.Start(device, true, 1200, max); while (Microphone.GetPosition(device) < 1) { yield return(null); } _source.Play(); float[] clipSampleData = new float[256]; int count = 0; while (Microphone.IsRecording(null)) { float clipLoudness = 0f; _source.clip.GetData(clipSampleData, _source.timeSamples); foreach (float sample in clipSampleData) { clipLoudness += Math.Abs(sample); } clipLoudness /= 256; Debug.Log("Loudness: " + clipLoudness); if (clipLoudness >= 0.01f && !state.Equals(State.Decreasing)) { count++; state = State.Increasing; } else if (clipLoudness < 0.01f && !state.Equals(State.Increasing)) { count--; state = State.Decreasing; } else { state = State.None; } Debug.Log("Mic counter " + count); if (count > 8) { TheaterManager.Instance.PickAudienceAnimate(); count = 0; } else if (count < -6) { TheaterManager.Instance.PickIndifferentAnimate(); count = 0; } yield return(new WaitForSeconds(1)); } }
private IEnumerator RecordingHandler() { Failure = false; #if UNITY_WEBPLAYER yield return(Application.RequestUserAuthorization(UserAuthorization.Microphone)); #endif m_Recording = Microphone.Start(m_MicrophoneID, true, m_RecordingBufferSize, m_RecordingHZ); yield return(null); // let m_RecordingRoutine get set.. if (m_Recording == null) { Failure = true; StopRecording(); yield break; } bool bFirstBlock = true; int midPoint = m_Recording.samples / 2; bool bOutputLevelData = m_LevelOutput.IsConnected; bool bOutputAudio = m_AudioOutput.IsConnected || m_PlaybackRecording; int lastReadPos = 0; float[] samples = null; while (m_RecordingRoutine != 0 && m_Recording != null) { int writePos = Microphone.GetPosition(m_MicrophoneID); if (writePos > m_Recording.samples || !Microphone.IsRecording(m_MicrophoneID)) { Log.Error("MicrophoneWidget", "Microphone disconnected."); Failure = true; StopRecording(); yield break; } if (bOutputAudio) { if ((bFirstBlock && writePos >= midPoint) || (!bFirstBlock && writePos < midPoint)) { // front block is recorded, make a RecordClip and pass it onto our callback. samples = new float[midPoint]; m_Recording.GetData(samples, bFirstBlock ? 0 : midPoint); AudioData record = new AudioData(); record.MaxLevel = Mathf.Max(samples); record.Clip = AudioClip.Create("Recording", midPoint, m_Recording.channels, m_RecordingHZ, false); record.Clip.SetData(samples, 0); if (m_PlaybackRecording) { m_Playback.Add(record.Clip); } if (m_AudioOutput.IsConnected && !m_AudioOutput.SendData(record)) { StopRecording(); // automatically stop recording if the callback goes away. } bFirstBlock = !bFirstBlock; } else { // calculate the number of samples remaining until we ready for a block of audio, // and wait that amount of time it will take to record. int remaining = bFirstBlock ? (midPoint - writePos) : (m_Recording.samples - writePos); float timeRemaining = (float)remaining / (float)m_RecordingHZ; if (bOutputLevelData && timeRemaining > m_LevelOutputInterval) { timeRemaining = m_LevelOutputInterval; } yield return(new WaitForSeconds(timeRemaining)); } } else { yield return(new WaitForSeconds(m_LevelOutputInterval)); } if (m_Recording != null && bOutputLevelData) { float fLevel = 0.0f; if (writePos < lastReadPos) { // write has wrapped, grab the last bit from the buffer.. samples = new float[m_Recording.samples - lastReadPos]; m_Recording.GetData(samples, lastReadPos); fLevel = Mathf.Max(fLevel, Mathf.Max(samples)); lastReadPos = 0; } if (lastReadPos < writePos) { samples = new float[writePos - lastReadPos]; m_Recording.GetData(samples, lastReadPos); fLevel = Mathf.Max(fLevel, Mathf.Max(samples)); lastReadPos = writePos; } m_LevelOutput.SendData(new LevelData(fLevel * m_LevelOutputModifier, m_LevelOutputModifier)); } } yield break; }
// bool gameflag = false; void Start() { key_Status = new List <bool>(new bool[88]); key_judg = new List <bool>(new bool[88]); InputSystem.onDeviceChange += (device, change) => { if (change != InputDeviceChange.Added) { return; } var midiDevice = device as Minis.MidiDevice; keydevice = midiDevice; if (midiDevice == null) { return; } midiDevice.onWillNoteOn += (note, velocity) => { // Note that you can't use note.velocity because the state // hasn't been updated yet (as this is "will" event). The note // object is only useful to specify the target note (note // number, channel number, device name, etc.) Use the velocity // argument as an input note velocity. key_Status[note.noteNumber - 21] = true; Debug.Log(string.Format( "Note On #{0} ({1}) vel:{2:0.00} ch:{3} dev:'{4}'", note.noteNumber, note.shortDisplayName, velocity, (note.device as Minis.MidiDevice)?.channel, note.device.description.product )); }; midiDevice.onWillNoteOff += (note) => { key_Status[note.noteNumber - 21] = false; Debug.Log(string.Format( "Note Off #{0} ({1}) ch:{2} dev:'{3}'", note.noteNumber, note.shortDisplayName, (note.device as Minis.MidiDevice)?.channel, note.device.description.product )); }; }; if (keydevice != null) { keydevice.onWillNoteOn += (note, velocity) => { // Note that you can't use note.velocity because the state // hasn't been updated yet (as this is "will" event). The note // object is only useful to specify the target note (note // number, channel number, device name, etc.) Use the velocity // argument as an input note velocity. key_Status[note.noteNumber + 3] = true; Debug.Log(string.Format( "Note On #{0} ({1}) vel:{2:0.00} ch:{3} dev:'{4}'", note.noteNumber, note.shortDisplayName, velocity, (note.device as Minis.MidiDevice)?.channel, note.device.description.product )); }; keydevice.onWillNoteOff += (note) => { key_Status[note.noteNumber + 3] = false; Debug.Log(string.Format( "Note Off #{0} ({1}) ch:{2} dev:'{3}'", note.noteNumber, note.shortDisplayName, (note.device as Minis.MidiDevice)?.channel, note.device.description.product )); }; } // if (Reload) // { // // Application.LoadLevel("PlayMusic"); // // SceneManager.LoadScene(SceneManager.GetActiveScene().name); // Reload = false; // } // Notification.OpenNotification(); if (MIDIStatus != true) { Debug.Log(AudioSettings.outputSampleRate); Audiosource.loop = true; Audiosource.clip = Microphone.Start(null, true, lengthSec, 44100); while (!(Microphone.GetPosition(null) > 0)) { } Audiosource.Play(); } // Invoke("startflager", 1.0f); }
void Update() { /* if (useMicrophone != prevUseMicrophone) * { * prevUseMicrophone = useMicrophone; * if (useMicrophone) * { * foreach (string m in Microphone.devices) * { * device = m; * break; * } * * source = GetComponent<AudioSource>(); * prevClip = source.clip; * source.Stop(); * source.clip = Microphone.Start(null, true, 1, AudioSettings.outputSampleRate); * source.Play(); * * int dspBufferSize, dspNumBuffers; * AudioSettings.GetDSPBufferSize(out dspBufferSize, out dspNumBuffers); * * source.timeSamples = (Microphone.GetPosition(device) + AudioSettings.outputSampleRate - 3 * dspBufferSize * dspNumBuffers) % AudioSettings.outputSampleRate; * } * else * { * Microphone.End(device); * source.clip = prevClip; * source.Play(); * } * }*/ var audio = GetComponent <AudioSource>(); audio.clip = Microphone.Start(null, true, 10, 44100); audio.loop = true; while (!(Microphone.GetPosition(null) > 0)) { } ; audio.Play(); /* float[] samples = new float[source.clip.samples * source.clip.channels]; * source.clip.GetData(samples, 0); * //Debug.Log(samples[samples.Length / 2]); * float scaleFactor = (samples[samples.Length / 2]); // adding 1 to offset the negatives * if (scaleFactor == 0) * { * scaleFactor = 0.1f; * } else * { * scaleFactor = map(scaleFactor, -1, 1, 0.1f, 0.5f); * } * * Debug.Log(scaleFactor); * oldScale = image.transform.localScale; * Vector3 target = new Vector3(scaleFactor, scaleFactor, scaleFactor); * Vector3 newScale = Vector3.Lerp(oldScale, target, Time.deltaTime * 10f); * * image.transform.localScale = target; */ GetSpectrumData(); MakeFrequencyBands(); BandBuffer(); CreateAudioBands(); GetAmplitude(); image.transform.localScale = new Vector3((amplitude * 0.5f) + 0.1f, (amplitude * 0.5f) + 0.1f, (amplitude * 0.5f) + 0.1f); }