/// <summary> /// Sets the laughter to morph target. /// </summary> void SetLaughterToMorphTarget(OVRLipSync.Frame frame) { if (laughterBlendTarget != -1) { // Laughter score will be raw classifier output in [0,1] float laughterScore = frame.laughterScore; // Threshold then re-map to [0,1] laughterScore = laughterScore < laughterThreshold ? 0.0f : laughterScore - laughterThreshold; laughterScore = Mathf.Min(laughterScore * laughterMultiplier, 1.0f); laughterScore *= 1.0f / laughterThreshold; skinnedMeshRenderer.SetBlendShapeWeight( laughterBlendTarget, laughterScore * 100.0f); } }
/// <summary> /// Update this instance. /// </summary> void Update() { if ((lipsyncContext != null) && (skinnedMeshRenderer != null)) { // trap inputs and send signals to phoneme engine for testing purposes // get the current viseme frame OVRLipSync.Frame frame = lipsyncContext.GetCurrentPhonemeFrame(); if (frame != null) { SetVisemeToMorphTarget(frame); } // TEST visemes by capturing key inputs and sending a signal CheckForKeys(); } }
/// <summary> /// Sets the viseme to morph target. /// </summary> void SetVisemeToMorphTarget(OVRLipSync.Frame frame) { var combinepose = CubismPose.CombinePoses(visemePoses, frame.Visemes); if (combinepose != null) { combinepose.SetPosing(target); } //animation.Stop(); //for (int i = 0; i < blendMotions.Length; i++) //{ // animation.Blend("ovrlip_" + i.ToString(), frame.Visemes[i]); //} }
void OnAudioFilterRead(float[] data, int channels) { if (EnableLowLatency == false) { // Do not spatialize if we are not initialized, or if there is no // audio source attached to game object if ((OVRLipSync.IsInitialized() != OVRLipSync.Result.Success) || audioSource == null) { return; } // increase the gain of the input to get a better signal input for (int i = 0; i < data.Length; ++i) { data[i] = data[i] * Gain; } // Send data into Phoneme context for processing (if context is not 0) lock (this) { if (Context != 0) { OVRLipSync.Flags flags = 0; // Set flags to feed into process if (DelayCompensate == true) { flags |= OVRLipSync.Flags.DelayCompensateAudio; } OVRLipSync.Frame frame = this.Frame; OVRLipSync.ProcessFrameInterleaved(Context, data, flags, frame); } } } // Turn off output (so that we don't get feedback from mics too close to speakers) if (AudioMute == true) { for (int i = 0; i < data.Length; ++i) { data[i] = data[i] * 0.0f; } } }
// Update is called once per frame void Update() { if ((lipsyncContext != null) && (BlendShapeProxy != null)) { // get the current viseme frame OVRLipSync.Frame frame = lipsyncContext.GetCurrentPhonemeFrame(); if (frame != null) { SetVisemeToVRMTarget(frame); } // Update smoothing value if (smoothAmount != lipsyncContext.Smoothing) { lipsyncContext.Smoothing = smoothAmount; } } }
private void Update() { if (takeToSpeech.clipFlag) { takeToSpeech.clipFlag = false; float[] rawData = new float[takeToSpeech.www.bytes.Length / 2 * takeToSpeech.clip.channels]; takeToSpeech.clip.GetData(rawData, 0); audioSource.clip = takeToSpeech.clip; audioSource.Play(); int i = Random.Range(0, 3); animatior.SetBool("action" + i, true); } OVRLipSync.Frame frame = lipsyncContext.GetCurrentPhonemeFrame(); if (frame != null) { SetVisemeToMorphTarget(frame); } }
/// <summary> /// Update this instance. /// </summary> void Update() { if (LipSync) { if (lipsyncContext != null) { // get the current viseme frame OVRLipSync.Frame frame = lipsyncContext.GetCurrentPhonemeFrame(); if (frame != null) { SetVisemeToMorphTarget(frame); } // Update smoothing value if (smoothAmount != lipsyncContext.Smoothing) { lipsyncContext.Smoothing = smoothAmount; } } } if (AutoBlink) { // Blink if (!timerStarted) { eyeStatus = AutoBlinkStatus.Close; timerStarted = true; } if (timerStarted) { timeRemining -= Time.deltaTime; if (timeRemining <= 0.0f) { eyeStatus = AutoBlinkStatus.Open; ResetTimer(); } else if (timeRemining <= timeBlink * 0.3f) { eyeStatus = AutoBlinkStatus.HalfClose; } } } }
public static void NewUpdateBlendShape(ChaControl __instance) { var enabled = LipsyncConfig.Instance.enabled; if (!enabled) { return; } var voice = AccessTools.PropertyGetter(typeof(ChaControl), "fbsaaVoice").Invoke(__instance, new object[] { }) as LipDataCreator; AudioSource asVoice = __instance.asVoice; if (asVoice != null && asVoice.isPlaying && asVoice.time <= asVoice.clip.length && voice != null) { if (!LipsyncConfig.Instance.frameStore.TryGetValue(__instance.fbsCtrl.MouthCtrl.GetHashCode(), out var frame)) { frame = new OVRLipSync.Frame(); } frame = voice.GetLipData(asVoice, frame); //! This method relies on the fact that GetHashCode() is _not_ overridden. // Thus it returns the same value for every run, and we can safely use this value // to separate between different objects LipsyncConfig.Instance.frameStore[__instance.fbsCtrl.MouthCtrl.GetHashCode()] = frame; LipsyncConfig.Instance.cleaned = false; } else if (asVoice == null || voice == null) { } else { LipsyncConfig.Instance.frameStore.Remove(__instance.fbsCtrl.MouthCtrl.GetHashCode()); } if (voice == null) { LipsyncConfig.Instance.logger.LogWarning("LipDataCreator is null"); } return; }
void Update() { if ((lipsyncContext == null) || (blendShapeProxy == null)) { return; } // get the current viseme frame OVRLipSync.Frame frame = lipsyncContext.GetCurrentPhonemeFrame(); if (frame != null) { SetVisemeToMorphTarget(frame); } // Update smoothing value // if (smoothAmount != lipsyncContext.Smoothing) // { // lipsyncContext.Smoothing = smoothAmount; // } }
private void UpdateVRMMorph(OVRLipSync.Frame frame) { if (!proxy) { // VRMBlendShapeProxyを探す proxy = FindObjectOfType <VRMBlendShapeProxy>(); } if (!proxy) { return; } for (int i = 1; i < VisemeToBlendTargets.Length; i++) { if (VisemeToBlendTargets[i] != -1) { proxy.SetValue((BlendShapePreset)VisemeToBlendTargets[i], Mathf.Min(1.0f, frame.Visemes[i] * LipSyncSensitivity)); } } }
void Update() { if ((lipsyncContext != null) && currentFace != null) { OVRLipSync.Frame frame = lipsyncContext.GetCurrentPhonemeFrame(); if (frame != null) { for (int i = 0; i < currentFace.v.Length; i++) { currentFace.v[i] = frame.Visemes[i]; } } if (smoothAmount != lipsyncContext.Smoothing) { lipsyncContext.Smoothing = smoothAmount; } } }
/// <summary> /// Update this instance. /// </summary> void Update() { if ((lipsyncContext != null) && (skinnedMeshRenderer != null)) { // get the current viseme frame OVRLipSync.Frame frame = lipsyncContext.GetCurrentPhonemeFrame(); if (frame != null) { SetVisemeToMorphTarget(frame); } // TEST visemes by capturing key inputs and sending a signal CheckForKeys(); // Update smoothing value if (smoothAmount != lipsyncContext.Smoothing) { lipsyncContext.Smoothing = smoothAmount; } } }
/// <summary> /// Update this instance. /// </summary> void Update () { if((lipsyncContext != null) && (material != null)) { // trap inputs and send signals to phoneme engine for testing purposes // get the current viseme frame OVRLipSync.Frame frame = lipsyncContext.GetCurrentPhonemeFrame(): if (frame != null) { // Go through the current and old for (int i = 0: i < frame.Visemes.Length: i++) { oldFrame.Visemes[i] = oldFrame.Visemes[i] * smoothing + frame.Visemes[i] * (1.0f - smoothing): } SetVisemeToTexture(): } } }
[Inject] void Init(LipDataSendModel lipDataSendModel) { var lipSyncContext = GetComponent <OVRLipSyncContextBase>(); this.UpdateAsObservable().Subscribe(_ => { if (lipSyncContext != null) { // get the current viseme frame OVRLipSync.Frame frame = lipSyncContext.GetCurrentPhonemeFrame(); if (frame != null) { lipDataSendModel.SendData(frame); } // Update smoothing value if (smoothAmount != lipSyncContext.Smoothing) { lipSyncContext.Smoothing = smoothAmount; } } }); }
/// <summary> /// Update this instance. /// </summary> void Update() { if ((lipsyncContext != null) && (material != null)) { // trap inputs and send signals to phoneme engine for testing purposes // get the current viseme frame OVRLipSync.Frame frame = lipsyncContext.GetCurrentPhonemeFrame(); if (frame != null) { // Perform smoothing here if on original provider if (lipsyncContext.provider == OVRLipSync.ContextProviders.Original) { // Go through the current and old for (int i = 0; i < frame.Visemes.Length; i++) { // Convert 1-100 to old * (0.00 - 0.99) float smoothing = ((smoothAmount - 1) / 100.0f); oldFrame.Visemes[i] = oldFrame.Visemes[i] * smoothing + frame.Visemes[i] * (1.0f - smoothing); } } else { oldFrame.Visemes = frame.Visemes; } SetVisemeToTexture(); } } // Update smoothing value in context if (smoothAmount != lipsyncContext.Smoothing) { lipsyncContext.Smoothing = smoothAmount; } }
void SetVisemeToMorphTarget(OVRLipSync.Frame frame) { foreach (OVRLipSync.Viseme viseme in visemes) { int index = (int)viseme; switch (viseme) { case OVRLipSync.Viseme.aa: blendShapeProxy.SetValue(BlendShapePreset.A, Mathf.Clamp01(frame.Visemes[index] * gain)); break; case OVRLipSync.Viseme.E: blendShapeProxy.SetValue(BlendShapePreset.E, Mathf.Clamp01(frame.Visemes[index] * gain)); break; case OVRLipSync.Viseme.ih: blendShapeProxy.SetValue(BlendShapePreset.I, Mathf.Clamp01(frame.Visemes[index] * gain)); break; case OVRLipSync.Viseme.oh: blendShapeProxy.SetValue(BlendShapePreset.O, Mathf.Clamp01(frame.Visemes[index] * gain)); break; case OVRLipSync.Viseme.ou: blendShapeProxy.SetValue(BlendShapePreset.U, Mathf.Clamp01(frame.Visemes[index] * gain)); break; default: blendShapeProxy.SetValue(BlendShapePreset.A, 0); blendShapeProxy.SetValue(BlendShapePreset.I, 0); blendShapeProxy.SetValue(BlendShapePreset.U, 0); blendShapeProxy.SetValue(BlendShapePreset.E, 0); blendShapeProxy.SetValue(BlendShapePreset.O, 0); break; } } }
static void OnDefaultBlend(OVRLipSync.Frame frame, VRMBlendShapeProxy vrmBlendShapeProxy) { var visemes = new float[visemeToBlendTargets.Length]; Array.Copy(frame.Visemes, (int)OVRLipSync.Viseme.aa, visemes, 1, visemes.Length - 1); visemes[0] = frame.Visemes[(int)OVRLipSync.Viseme.sil]; var sum = visemes.Sum(); if (sum <= float.Epsilon) { visemes[0] = 1; } else { for (int i = 0; i < visemes.Length; ++i) { visemes[i] /= sum; } } for (int i = 0; i < visemes.Length; ++i) { vrmBlendShapeProxy.AccumulateValue(BlendShapeKey.CreateFromPreset(visemeToBlendTargets[i]), visemes[i]); } }
public void receiveOculusLipsynMessage(string message) { OVRLipSync.Frame frame = JsonUtility.FromJson <OVRLipSync.Frame>(message); if (frame != null) { if (currentType == EHeadType.Human) { morphTarget.SetVisemeToMorphTarget(frame); morphTarget.SetLaughterToMorphTarget(frame); } else { // Perform smoothing here if on original provider if (context.provider == OVRLipSync.ContextProviders.Original) { // Go through the current and old for (int i = 0; i < frame.Visemes.Length; i++) { // Convert 1-100 to old * (0.00 - 0.99) float smoothing = ((smoothAmount - 1) / 100.0f); textureFlip.oldFrame.Visemes[i] = textureFlip.oldFrame.Visemes[i] * smoothing + frame.Visemes[i] * (1.0f - smoothing); } } else { textureFlip.oldFrame.Visemes = frame.Visemes; } textureFlip.SetVisemeToTexture(); } } }
public static OVRLipSyncSequence CreateSequenceFromAudioClip( AudioClip clip, bool useOfflineModel = false) { OVRLipSyncSequence sequence = null; if (clip.channels > 2) { Debug.LogError(clip.name + ": Cannot process phonemes from an audio clip with " + "more than 2 channels"); return(null); } if (clip.loadType != AudioClipLoadType.DecompressOnLoad) { Debug.LogError(clip.name + ": Cannot process phonemes from an audio clip unless " + "its load type is set to DecompressOnLoad."); return(null); } if (OVRLipSync.Initialize(clip.frequency, sSampleSize) != OVRLipSync.Result.Success) { Debug.LogError("Could not create Lip Sync engine."); return(null); } if (clip.loadState != AudioDataLoadState.Loaded) { Debug.LogError("Clip is not loaded!"); return(null); } uint context = 0; OVRLipSync.Result result = useOfflineModel ? OVRLipSync.CreateContextWithModelFile( ref context, OVRLipSync.ContextProviders.Enhanced, Path.Combine(Application.dataPath, "Oculus/LipSync/Assets/OfflineModel/ovrlipsync_offline_model.pb")) : OVRLipSync.CreateContext(ref context, OVRLipSync.ContextProviders.Enhanced); if (result != OVRLipSync.Result.Success) { Debug.LogError("Could not create Phoneme context. (" + result + ")"); OVRLipSync.Shutdown(); return(null); } List <OVRLipSync.Frame> frames = new List <OVRLipSync.Frame>(); float[] samples = new float[sSampleSize * clip.channels]; OVRLipSync.Frame dummyFrame = new OVRLipSync.Frame(); OVRLipSync.ProcessFrame( context, samples, dummyFrame, (clip.channels == 2) ? true : false ); // frame delay in ms float frameDelayInMs = dummyFrame.frameDelay; int frameOffset = (int)(frameDelayInMs * clip.frequency / 1000); int totalSamples = clip.samples; for (int x = 0; x < totalSamples + frameOffset; x += sSampleSize) { int remainingSamples = totalSamples - x; if (remainingSamples >= sSampleSize) { clip.GetData(samples, x); } else if (remainingSamples > 0) { float[] samples_clip = new float[remainingSamples * clip.channels]; clip.GetData(samples_clip, x); Array.Copy(samples_clip, samples, samples_clip.Length); Array.Clear(samples, samples_clip.Length, samples.Length - samples_clip.Length); } else { Array.Clear(samples, 0, samples.Length); } OVRLipSync.Frame frame = new OVRLipSync.Frame(); if (clip.channels == 2) { // interleaved = stereo data, alternating floats OVRLipSync.ProcessFrame(context, samples, frame); } else { // mono OVRLipSync.ProcessFrame(context, samples, frame, false); } if (x < frameOffset) { continue; } frames.Add(frame); } Debug.Log(clip.name + " produced " + frames.Count + " viseme frames, playback rate is " + (frames.Count / clip.length) + " fps"); OVRLipSync.DestroyContext(context); OVRLipSync.Shutdown(); sequence = ScriptableObject.CreateInstance <OVRLipSyncSequence>(); sequence.entries = frames; sequence.length = clip.length; return(sequence); }
// Update is called once per frame void Update() { if (EnableLipSync) { if (Context != 0) { if (proxy == null) { if (VRMmodel != null) { proxy = VRMmodel.GetComponent <VRMBlendShapeProxy>(); } } else { // get the current viseme frame OVRLipSync.Frame frame = GetCurrentPhonemeFrame(); if (frame != null) { //あ OVRLipSync.Viseme.aa; BlendShapePreset.A; //い OVRLipSync.Viseme.ih; BlendShapePreset.I; //う OVRLipSync.Viseme.ou; BlendShapePreset.U; //え OVRLipSync.Viseme.E; BlendShapePreset.E; //お OVRLipSync.Viseme.oh; BlendShapePreset.O; var presets = new BlendShapePreset[] { BlendShapePreset.A, BlendShapePreset.I, BlendShapePreset.U, BlendShapePreset.E, BlendShapePreset.O, }; var visemes = new float[] { frame.Visemes[(int)OVRLipSync.Viseme.aa], frame.Visemes[(int)OVRLipSync.Viseme.ih], frame.Visemes[(int)OVRLipSync.Viseme.ou], frame.Visemes[(int)OVRLipSync.Viseme.E], frame.Visemes[(int)OVRLipSync.Viseme.oh], }; int maxindex = 0; float maxvisemes = 0; for (int i = 0; i < presets.Length; i++) { if (visemes[i] < WeightThreashold) { visemes[i] = 0; } if (maxvisemes < visemes[i]) { maxindex = i; maxvisemes = visemes[i]; } } if (MaxWeightEmphasis) { visemes[maxindex] = Mathf.Clamp(visemes[maxindex] * 3, 0.0f, 1.0f); } if (MaxWeightEnable) { for (int i = 0; i < presets.Length; i++) { if (i != maxindex) { visemes[i] = 0.0f; } } } for (int i = 0; i < presets.Length; i++) { visemes[i] *= MaxLevel; proxy.SetValue(presets[i], visemes[i]); } //Debug.Log("Visemes:" + string.Join(",", frame.Visemes.Select(d => d.ToString()))); } } } if (string.IsNullOrEmpty(selectedDevice) == false) { audioSource.volume = (sourceVolume / 100); if (!Microphone.IsRecording(selectedDevice)) { StartMicrophone(); } if (EnableLowLatency) { var position = Microphone.GetPosition(selectedDevice); if (position < 0 || head == position) { return; } audioSource.clip.GetData(microphoneBuffer, 0); while (GetDataLength(microphoneBuffer.Length, head, position) > processBuffer.Length) { var remain = microphoneBuffer.Length - head; if (remain < processBuffer.Length) { Array.Copy(microphoneBuffer, head, processBuffer, 0, remain); Array.Copy(microphoneBuffer, 0, processBuffer, remain, processBuffer.Length - remain); } else { Array.Copy(microphoneBuffer, head, processBuffer, 0, processBuffer.Length); } OVRLipSync.ProcessFrame(Context, processBuffer, Frame); head += processBuffer.Length; if (head > microphoneBuffer.Length) { head -= microphoneBuffer.Length; } } } } } }
public static OVRLipSyncSequence CreateSequenceFromAudioClip(AudioClip clip) { OVRLipSyncSequence sequence = null; if (clip.loadType != AudioClipLoadType.DecompressOnLoad || clip.channels > 2) { // todo: just fix the clip Debug.LogError("Cannot process phonemes from an audio clip unless its load type is set to DecompressOnLoad."); } else { if (OVRLipSync.Initialize(clip.frequency, sSampleSize) != OVRLipSync.Result.Success) { Debug.LogError("Could not create Lip Sync engine."); } else { uint context = 0; OVRLipSync.Result result = OVRLipSync.CreateContext(ref context, OVRLipSync.ContextProviders.Main); if (result != OVRLipSync.Result.Success) { Debug.LogError("Could not create Phoneme context. (" + result + ")"); OVRLipSync.Shutdown(); } else { List <OVRLipSync.Frame> frames = new List <OVRLipSync.Frame>(); float[] samples = new float[sSampleSize * clip.channels]; int totalSamples = clip.samples; for (int x = 0; x < totalSamples; x += sSampleSize) { // GetData loops at the end of the read. Prevent that when it happens. if (x + samples.Length > totalSamples) { samples = new float[(totalSamples - x) * clip.channels]; } clip.GetData(samples, x); OVRLipSync.Frame frame = new OVRLipSync.Frame(); if (clip.channels == 2) { // interleaved = stereo data, alternating floats OVRLipSync.ProcessFrameInterleaved(context, samples, 0, frame); } else { // mono OVRLipSync.ProcessFrame(context, samples, 0, frame); } frames.Add(frame); } Debug.Log(clip.name + " produced " + frames.Count + " viseme frames, playback rate is " + (frames.Count / clip.length) + " fps"); OVRLipSync.DestroyContext(context); OVRLipSync.Shutdown(); sequence = ScriptableObject.CreateInstance <OVRLipSyncSequence>(); sequence.entries = frames; sequence.length = clip.length; } } } return(sequence); }
void ProcessBuffer(float[] buffer) { if (buffer == null) { return; } audioVolume = 0f; foreach (float v in buffer) { audioVolume += Mathf.Abs(v); } audioVolume /= buffer.Length; int totalLen = partialPos + buffer.Length; int bufferPos = 0; if (totalLen >= 1024 * channels) { volume = 0f; while (totalLen >= 1024 * channels) { int sliceLen = 1024 - partialPos; Array.Copy(buffer, bufferPos, partialAudio, partialPos, sliceLen * channels); totalLen -= 1024 * channels; if (totalLen < 1024 * channels) { for (int i = 0; i < partialAudio.Length; i++) { partialAudio[i] = partialAudio[i] * gain;//Mathf.Clamp(partialAudio[i] * gain, 0f, 1f); volume += Mathf.Abs(partialAudio[i]); } lock (this) { if (context != 0) { OVRLipSync.Frame frame = this.visemeData; if (channels == 2) { OVRLipSync.ProcessFrameInterleaved(context, partialAudio, frame); } else { OVRLipSync.ProcessFrame(context, partialAudio, frame); } } else { Debug.Log("OVRLipSync context is 0"); } } } bufferPos += sliceLen; partialPos = 0; } volume /= (float)buffer.Length; } if (totalLen > 0) { Array.Copy(buffer, bufferPos, partialAudio, partialPos, buffer.Length - bufferPos); partialPos += (buffer.Length - bufferPos) / channels; } }
public void Update() { var lipdata = new OVRLipSync.Frame(); lipDataSendModel.SendData(lipdata); }
private void oculusLipSyncSolution(PhotonStream stream, PhotonMessageInfo info) { if (stream.IsWriting) { stream.SendNext((int)currentType); stream.SendNext(morphTarget.smoothAmount); stream.SendNext(JsonUtility.ToJson(context.GetCurrentPhonemeFrame())); } else { object agoraId = photonView.Owner.CustomProperties["agoraId"]; if (agoraId != null) { UDID = uint.Parse(agoraId.ToString()); } //synced head EHeadType networkHead = (EHeadType)((int)stream.ReceiveNext()); if (networkHead != currentType) { currentType = networkHead; switchHead.switchHead(currentType); } //sync smoothing smoothAmount = (int)stream.ReceiveNext(); context.Smoothing = smoothAmount; //sync lipsync data frameData = (string)stream.ReceiveNext(); OVRLipSync.Frame frame = JsonUtility.FromJson <OVRLipSync.Frame>(frameData); if (frame != null) { if (currentType == EHeadType.Human) { morphTarget.SetVisemeToMorphTarget(frame); morphTarget.SetLaughterToMorphTarget(frame); } else { // Perform smoothing here if on original provider if (context.provider == OVRLipSync.ContextProviders.Original) { // Go through the current and old for (int i = 0; i < frame.Visemes.Length; i++) { // Convert 1-100 to old * (0.00 - 0.99) float smoothing = ((smoothAmount - 1) / 100.0f); textureFlip.oldFrame.Visemes[i] = textureFlip.oldFrame.Visemes[i] * smoothing + frame.Visemes[i] * (1.0f - smoothing); } } else { textureFlip.oldFrame.Visemes = frame.Visemes; } textureFlip.SetVisemeToTexture(); } } double pan = GameState.Instance.getPanFromUser(UDID); double gain = GameState.Instance.getGainFromUser(UDID); //push pan and gain to agora if (GameState.Instance.mRtcEngine != null) { GameState.Instance.mRtcEngine.GetAudioEffectManager().SetRemoteVoicePosition(UDID, pan, gain); } } }
public static OVRLipSyncSequence CreateSequenceFromAudioClip( AudioClip clip, bool useOfflineModel = false) { OVRLipSyncSequence sequence = null; if (clip.channels > 2) { Debug.LogError(clip.name + ": Cannot process phonemes from an audio clip with " + "more than 2 channels"); return(null); } if (clip.loadType != AudioClipLoadType.DecompressOnLoad) { Debug.LogError(clip.name + ": Cannot process phonemes from an audio clip unless " + "its load type is set to DecompressOnLoad."); return(null); } if (OVRLipSync.Initialize(clip.frequency, sSampleSize) != OVRLipSync.Result.Success) { Debug.LogError("Could not create Lip Sync engine."); return(null); } if (clip.loadState != AudioDataLoadState.Loaded) { Debug.LogError("Clip is not loaded!"); return(null); } uint context = 0; OVRLipSync.Result result = useOfflineModel ? OVRLipSync.CreateContextWithModelFile( ref context, OVRLipSync.ContextProviders.Enhanced, Path.Combine(Application.dataPath, "Oculus/LipSync/Assets/OfflineModel/ovrlipsync_offline_model.pb")) : OVRLipSync.CreateContext(ref context, OVRLipSync.ContextProviders.Enhanced); if (result != OVRLipSync.Result.Success) { Debug.LogError("Could not create Phoneme context. (" + result + ")"); OVRLipSync.Shutdown(); return(null); } List <OVRLipSync.Frame> frames = new List <OVRLipSync.Frame>(); float[] samples = new float[sSampleSize * clip.channels]; int totalSamples = clip.samples; for (int x = 0; x < totalSamples; x += sSampleSize) { // GetData loops at the end of the read. Prevent that when it happens. if (x + samples.Length > totalSamples) { samples = new float[(totalSamples - x) * clip.channels]; } clip.GetData(samples, x); OVRLipSync.Frame frame = new OVRLipSync.Frame(); if (clip.channels == 2) { // interleaved = stereo data, alternating floats OVRLipSync.ProcessFrame(context, samples, frame); } else { // mono OVRLipSync.ProcessFrame(context, samples, frame, false); } frames.Add(frame); } Debug.Log(clip.name + " produced " + frames.Count + " viseme frames, playback rate is " + (frames.Count / clip.length) + " fps"); OVRLipSync.DestroyContext(context); OVRLipSync.Shutdown(); sequence = ScriptableObject.CreateInstance <OVRLipSyncSequence>(); sequence.entries = frames; sequence.length = clip.length; return(sequence); }
/// <summary> /// Sets the viseme to VRM target. /// </summary> void SetVisemeToVRMTarget(OVRLipSync.Frame frame) { var values = lipsyncClips.Select(_ => new KeyValuePair <BlendShapeKey, float>(new BlendShapeKey(_.Key), frame.Visemes[_.Value])); BlendShapeProxy.SetValues(values); }
public void BlendFunc(OVRLipSync.Frame frame, VRMBlendShapeProxy vrmBlendShapeProxy) { onBlendShape(frame, vrmBlendShapeProxy); }