private void UpdateFacewave() { if (lipsyncContext != null && (micInput != null || CanOwnMicrophone == false)) { // Get the current viseme frame currentFrame = lipsyncContext.GetCurrentPhonemeFrame(); // Verify length (-1 for laughter) if (currentFrame.Visemes.Length != (VISEME_COUNT - 1)) { Debug.LogError("Unexpected number of visemes " + currentFrame.Visemes); return; } // Copy to viseme array currentFrame.Visemes.CopyTo(visemes, 0); // Copy laughter as final element visemes[VISEME_COUNT - 1] = EnableLaughter ? currentFrame.laughterScore : 0.0f; // Send visemes to native implementation. for (int i = 0; i < VISEME_COUNT; i++) { RuntimeVisemes.visemeParams[i] = visemes[i]; } CAPI.ovrAvatar_SetVisemes(sdkAvatar, RuntimeVisemes); } }
private void UpdateFacewave() { if (lipsyncContext != null && (micInput != null || CanOwnMicrophone == false)) { // Get the current viseme frame currentFrame = lipsyncContext.GetCurrentPhonemeFrame(); // Verify length if (currentFrame.Visemes.Length != 15) { Debug.LogError("Unexpected number of visemes " + currentFrame.Visemes); return; } // Copy to viseme array currentFrame.Visemes.CopyTo(visemes, 0); // Send visemes to native implementation. for (int i = 0; i < 15; i++) { RuntimeVisemes.visemeParams[i] = visemes[i]; } CAPI.ovrAvatar_SetVisemes(sdkAvatar, RuntimeVisemes); } }
/// <summary> /// Update this instance. /// </summary> void LateUpdate() //UpdateからLateUpdateに変更 { _timer += Time.deltaTime; if (_timer >= 0.1f) { _timer = 0; _canSetValues = true; } if ((lipsyncContext != null) && (skinnedMeshRenderer != null)) { // trap inputs and send signals to phoneme engine for testing purposes // get the current viseme frame if (lipsyncContext.GetCurrentPhonemeFrame(ref frame) == OVRLipSync.ovrLipSyncSuccess) { SetVisemeToMorphTarget(); } // Record and playback sequences ControlSequencer(); // TEST visemes by capturing key inputs and sending a signal SendSignals(); } }
/// <summary> /// リップシンク /// </summary> private void SetLipSync() { this.UpdateAsObservable() .Subscribe(_ => { var currentFrame = _lipSyncContext.GetCurrentPhonemeFrame(); _blendShapeProxy.SetValue(BlendShapePreset.A, currentFrame.Visemes[(int)OVRLipSync.Viseme.aa], false); _blendShapeProxy.SetValue(BlendShapePreset.I, currentFrame.Visemes[(int)OVRLipSync.Viseme.ih], false); _blendShapeProxy.SetValue(BlendShapePreset.U, currentFrame.Visemes[(int)OVRLipSync.Viseme.ou], false); _blendShapeProxy.SetValue(BlendShapePreset.E, currentFrame.Visemes[(int)OVRLipSync.Viseme.E], false); _blendShapeProxy.SetValue(BlendShapePreset.O, currentFrame.Visemes[(int)OVRLipSync.Viseme.oh], false); }); }
void Update() { // get the current viseme frame OVRLipSync.Frame frame = lipsyncContext.GetCurrentPhonemeFrame(); if (frame != null) { SetVisemeToMorphTarget(frame); } //Update smoothing value //if (smoothAmount != lipsyncContext.Smoothing) //{ // lipsyncContext.Smoothing = smoothAmount; //} }
/// <summary> /// Update this instance. /// </summary> void Update() { if ((lipsyncContext != null) && (skinnedMeshRenderer != null)) { // trap inputs and send signals to phoneme engine for testing purposes // get the current viseme frame if (lipsyncContext.GetCurrentPhonemeFrame(ref frame) == OVRLipSync.ovrLipSyncSuccess) { SetVisemeToMorphTarget(); } // Record and playback sequences ControlSequencer(); // TEST visemes by capturing key inputs and sending a signal SendSignals(); } }
/// <summary> /// Update this instance. /// </summary> void Update() { if ((phonemeContext != null) && (material != null)) { // trap inputs and send signals to phoneme engine for testing purposes // get the current viseme frame if (phonemeContext.GetCurrentPhonemeFrame(ref frame) == OVRLipSync.ovrLipSyncSuccess) { // Go through the current and old for (int i = 0; i < frame.Visemes.Length; i++) { oldFrame.Visemes[i] = oldFrame.Visemes[i] * smoothing + frame.Visemes[i] * (1.0f - smoothing); } SetVisemeToTexture(); } } }