示例#1
0
    List <PhonemeContainer> CreateMergeBuffer(PhonemeBuffer.InternalBuffer frontBuffer, out float mergeBufferLength)
    {
        List <PhonemeContainer> mergeBuffer = new List <PhonemeContainer>();

        //Start with the buffer furthest back in time (oldest).
        PhonemeBuffer.InternalBuffer current = frontBuffer.GetNextBuffer();

        //Retime all buffers so they use this start value.
        float startTime = current.GetBufferData()[0].Start;

        mergeBufferLength = 0f;

        //step through all buffers until we loop back to the frontbuffer.
        while (!System.Object.ReferenceEquals(current, frontBuffer))
        {
            RetimeBufferData(current.GetBufferData(), startTime, mergeBuffer);

            //Track total length of merge buffer.
            mergeBufferLength += current.GetBufferLength();

            //Traverse buffers upward in time until we loop back to the frontbuffer. (loop breaks there)
            current = current.GetNextBuffer();
        }

        //Finally append lengh of frontbuffer, this gives us the total length of the mergebuffer in seconds.
        mergeBufferLength += frontBuffer.GetBufferLength();

        //Append the frontbuffer at the end of the merged buffer.
        RetimeBufferData(frontBuffer.GetBufferData(), startTime, mergeBuffer);

        return(mergeBuffer);
    }
示例#2
0
    public void FinalizeAnimation(PhonemeBuffer.InternalBuffer completeBuffer)
    {
        //DebugBuffer(completeBuffer);

        //Contains all phonemes for entire recording.
        List <PhonemeContainer> phonemes = completeBuffer.GetBufferData();

        //Create all our streamed lipsync channels.
        List <IAnimationChannel> lipsyncChannels = new List <IAnimationChannel>(7);

        lipsyncChannels.Add(new RotationChannel("Mid_Head_Jnt_03", new List <Vector3>(50), false, false, true, true, FacialAnimation.FRAMERATE));
        lipsyncChannels.Add(new MorphChannel("Corner_In", new List <float>(50), false, false, true, true, FacialAnimation.FRAMERATE));
        lipsyncChannels.Add(new MorphChannel("I", new List <float>(50), false, false, true, true, FacialAnimation.FRAMERATE));
        lipsyncChannels.Add(new MorphChannel("Lip_LowerUp", new List <float>(50), false, false, true, true, FacialAnimation.FRAMERATE));
        lipsyncChannels.Add(new MorphChannel("Lip_LowerDown", new List <float>(50), false, false, true, true, FacialAnimation.FRAMERATE));
        lipsyncChannels.Add(new MorphChannel("Lip_UpperUp", new List <float>(50), false, false, true, true, FacialAnimation.FRAMERATE));
        lipsyncChannels.Add(new MorphChannel("Lip_UpperDown", new List <float>(50), false, false, true, true, FacialAnimation.FRAMERATE));

        this.lipSyncer = new LipSyncData();

        //Stream data into these animation channels.
        this.lipSyncer.SetLipSyncChannels(lipsyncChannels);

        //Calculate lipsync data.
        this.lipSyncer.StreamedTextToSpeech(phonemes, 0f);

        if (this.OnFinished != null)
        {
            this.OnFinished(lipsyncChannels, phonemes, this.phonemesPath);
        }
    }
示例#3
0
    public void UpdateAnimation(PhonemeBuffer.InternalBuffer frontBuffer)
    {
        if (!this.hasStartedStreaming)
        {
            this.hasStartedStreaming = true;
            this.startupTime         = Time.realtimeSinceStartup;
        }

        //Keep track of time when animation data comes in, this allows us to track what frame we should be on.
        this.timeSinceStart = Time.realtimeSinceStartup - this.startupTime;

        float mergeBufferLength;
        List <PhonemeContainer> mergeBuffer = CreateMergeBuffer(frontBuffer, out mergeBufferLength);

        //Determine how much of the mergebuffer to keep after calculating animation.
        float offsetStream = mergeBufferLength - frontBuffer.GetBufferLength();

        this.lipSyncer.StreamedTextToSpeech(mergeBuffer, offsetStream);

        //Only triggered once on the first callback.
        if (!this.hasBegunAnimating)
        {
            this.animationTarget.PlayFacialAnimation(this.streamedAnimationClip, true, 0.5f, 0f);
            this.hasBegunAnimating = true;
        }

        #if UNITY_EDITOR && DEBUG_MODE
        for (int i = 0; i < this.streamedLipsyncChannels.Count; i++)
        {
            IAnimationChannel channel = this.streamedLipsyncChannels[i];
            channel.UpdateDebugWindow();
        }
        #endif
    }
示例#4
0
    void DebugBuffer(PhonemeBuffer.InternalBuffer buffer)
    {
                #if UNITY_EDITOR
        List <PhonemeContainer> phonemes = buffer.GetBufferData();

        Debug.Log("-----Buffer Start----");
        for (int i = 0; i < phonemes.Count; i++)
        {
            PhonemeContainer phoneme = phonemes[i];

            Debug.Log("Phoneme: " + phoneme.Phoneme);
            Debug.Log("Start: " + phoneme.Start);
            Debug.Log("End: " + phoneme.End);
        }
                #endif
    }
    public bool ProcessRecording(string filePath)
    {
        //This will invoke finalize handler in the phonemebuffer instance, this will inturn notify animation system.
        PhonemeBuffer.InternalBuffer completeBuffer = this.phonemeBuffer.Finalize();

        //Empty buffer means no sound was recorded, return false.
        if (completeBuffer.NumberOfItems() == 0)
        {
            return(false);
        }

        //Trim audio file data.
        Bridge_TrimRecording(filePath, 0f, completeBuffer.GetBufferLength());

        //Make this buffer eligable for GC.
        this.phonemeBuffer = null;

        return(true);
    }