示例#1
0
    public FacialAnimationClip ConstructFacialAnimationClip(string type, string actionID, string stance, InteractionAnimationType interactionType = InteractionAnimationType.Null)
    {
        FacialAnimationData data = FacialAnimationDatabase.Instance.GetFacialAnimation(type, actionID, interactionType, AnimationUtilities.StanceStringToEnum(stance));

        if (data != null)
        {
            FacialAnimationClip clip = FillAnimationClip(data);

            return(clip);
        }

        return(null);
    }
示例#2
0
    public VoiceRecordingAnimation(FacialAnimationClip clip, FacialAnimation animationTarget, float estimatedSpeakTime, string phonemesPath)
    {
        this.streamedAnimationClip = clip;
        this.animationTarget       = animationTarget;
        this.phonemesPath          = phonemesPath;
        this.lipSyncer             = new LipSyncData();

        //Used to size our animation channels, this way the internal array in the keyframes list should be big enough.
        this.estimatedSpeakFrames = (int)(FacialAnimation.FRAMERATE * estimatedSpeakTime);

        //Create all our streamed lipsync channels.
        this.streamedLipsyncChannels = new List <IAnimationChannel>(7);
        this.streamedLipsyncChannels.Add(new RotationChannel("Mid_Head_Jnt_03", new List <Vector3>(50), false, false, true, true, FacialAnimation.FRAMERATE));
        this.streamedLipsyncChannels.Add(new MorphChannel("Corner_In", new List <float>(50), false, false, true, true, FacialAnimation.FRAMERATE));
        this.streamedLipsyncChannels.Add(new MorphChannel("I", new List <float>(50), false, false, true, true, FacialAnimation.FRAMERATE));
        this.streamedLipsyncChannels.Add(new MorphChannel("Lip_LowerUp", new List <float>(50), false, false, true, true, FacialAnimation.FRAMERATE));
        this.streamedLipsyncChannels.Add(new MorphChannel("Lip_LowerDown", new List <float>(50), false, false, true, true, FacialAnimation.FRAMERATE));
        this.streamedLipsyncChannels.Add(new MorphChannel("Lip_UpperUp", new List <float>(50), false, false, true, true, FacialAnimation.FRAMERATE));
        this.streamedLipsyncChannels.Add(new MorphChannel("Lip_UpperDown", new List <float>(50), false, false, true, true, FacialAnimation.FRAMERATE));

        //Add our streamed channels. This will also remove any previous channel with the same name.
        for (int i = 0; i < this.streamedLipsyncChannels.Count; i++)
        {
            IAnimationChannel channel = this.streamedLipsyncChannels[i];
            this.streamedAnimationClip.AddAnimationChannel(channel);
        }

        //Stream data into these animation channels.
        this.lipSyncer.SetLipSyncChannels(this.streamedLipsyncChannels);

        this.hasBegunAnimating   = false;
        this.hasStartedStreaming = false;

        #if UNITY_EDITOR && DEBUG_MODE
        //Close previous instance of window.
        if (visemeVisualizer != null)
        {
            visemeVisualizer.Close();
        }

        EditorApplication.ExecuteMenuItem("Plotagon/VisemeVisualizer");
        visemeVisualizer = EditorWindow.focusedWindow;

        //Allows us to see results in unitys editor window.
        for (int i = 0; i < this.streamedLipsyncChannels.Count; i++)
        {
            IAnimationChannel channel = this.streamedLipsyncChannels[i];
            channel.AddDebugWindow(visemeVisualizer);
        }
        #endif
    }
示例#3
0
    void StartRecording()
    {
        bool availible = this.device.IsMicrophoneAvailible();

        //Make sure we have a microphone.
        if (!availible)
        {
            return;
        }

        //This will estimate for how long we are to record.
        float estimatedSpeakTime = TextAnalysisTools.EsimateTextLength(this.text, this.syllablesPerMinute) + RecordingMachineBase.RECORD_PADDING;

        UnityEngine.Debug.Log("Speak Time: " + estimatedSpeakTime);

        //Clear states between each run.
        ClearRecordingState();

        RecordingMachineBase.SetRecordingFilePath(Application.persistentDataPath + "/" + Time.realtimeSinceStartup + ".wav");
        RecordingMachineBase.SetCalibrationFilePath(Application.persistentDataPath + "/microphonecalibration.json");

        //Setup initial state where our record state will begin.
        RecordingMachineBase begin = new BeginRecordingMachine(estimatedSpeakTime);

        //Starting here.
        SetRecordingState(begin);

        FacialAnimationClip clip = this.animationTarget.ConstructFacialAnimationClip("dialogue", "neutral", "StandingNormal");

        //Make sure we call this after record state is setup.
        this.voiceAnimation = new VoiceRecordingAnimation(clip, this.animationTarget, estimatedSpeakTime, null);

        this.voiceAnimation.SubscribeOnFinished((List <IAnimationChannel> lipsyncChannels, List <PhonemeContainer> phonemes, string phonemePath) => {
            this.lipsyncChannels = lipsyncChannels;
        });

        //Couple callbacks from microphone to the animation component.
        this.device.AddHandlerToBufferChanged(voiceAnimation.UpdateAnimation);

        //Let animation component know when we are done.
        this.device.AddHandlerToBufferFinished(voiceAnimation.FinalizeAnimation);


        StartCoroutine(ProcessRecordingState());
    }
示例#4
0
    public void PreviewFacialAnimation(FacialAnimationClip animationClip, float previewTime)
    {
        if (animationClip != null)
        {
            this.CurrentClipName = animationClip.AnimationName;

            //Make sure the channels we added are bound to their transforms. (the actual thing they are to animate)
            animationClip.BindAllChannels(this.animationTransforms);

            //Set clip state to playing, it now awaits update calls.
            animationClip.PlayAnimation(1f, 0f, previewTime);

            //Update all transforms to show the frame specified in previewTime.
            animationClip.UpdateFrame();
        }
        else
        {
            Debug.LogError("Cannot preview animation clip");
        }
    }
示例#5
0
    public void PlayFacialAnimation(FacialAnimationClip animationClip, bool doTransition, float transitionTime, float playTime)
    {
        if (animationClip != null)
        {
            this.currentClip     = animationClip;
            this.CurrentClipName = this.currentClip.AnimationName;

            //Make sure the channels we added are bound to their transforms. (the actual thing they are to animate)
            this.currentClip.BindAllChannels(this.animationTransforms);

            //Set clip to currently active clip, it now awaits update calls.
            this.currentClip.PlayAnimation(doTransition ? 0f : 1f, transitionTime, playTime);
            this.currentClip.UpdateFrame();

            this.previousClip = this.currentClip;
        }
        else
        {
            Debug.LogError("Cannot play animation clip");
        }
    }
示例#6
0
    //Fill clip with animation channels from animation data from the database.
    FacialAnimationClip FillAnimationClip(FacialAnimationData data)
    {
        //Create animation clip to fill.
        FacialAnimationClip clip = new FacialAnimationClip(data.ID, this.baseChannels, FRAMERATE);

        for (int i = 0; i < data.RotationChannels.Count; i++)
        {
            RotationFacialAnimationChannel channelData = data.RotationChannels[i];

            RotationChannel rotationChannel = new RotationChannel(channelData.ChannelName, channelData.Keys, channelData.CanLoop, channelData.CanMirror, false, false, FRAMERATE);
            clip.AddAnimationChannel(rotationChannel);
        }

        for (int i = 0; i < data.MorphChannels.Count; i++)
        {
            MorphFacialAnimationChannel channelData = data.MorphChannels[i];
            MorphChannel morphChannel = new MorphChannel(channelData.ChannelName, channelData.Keys, channelData.CanLoop, channelData.CanMirror, false, false, FRAMERATE);
            clip.AddAnimationChannel(morphChannel);
        }

        return(clip);
    }
示例#7
0
    // voice recording
    IEnumerator PlayRecordedAudio(string path, List <IAnimationChannel> animationChannels)
    {
        if (File.Exists(path))
        {
            WWW audioLoader = new WWW("file://" + path);

            while (!audioLoader.isDone)
            {
                yield return(null);
            }

            //As frames are generated they are appended to this clip. (will lag behind by bufferLength * 30)
            this.recordedClip = this.animationTarget.ConstructFacialAnimationClip("dialogue", "neutral", "StandingNormal");

            foreach (IAnimationChannel channel in animationChannels)
            {
                this.recordedClip.AddAnimationChannel(channel);
            }

            this.animationTarget.PlayFacialAnimation(this.recordedClip, true, 1.0f, 0f);

            AudioClip clip = audioLoader.audioClip;
            AudioSource.PlayClipAtPoint(clip, Vector3.zero);

            /*
             * float timer = 0f;
             *
             * while (timer < clip.length)
             * {
             *      timer += Time.deltaTime;
             *      yield return null;
             * }
             *
             * this.recordedClip.StopAnimation();
             */
        }
    }