protected void SetMatrixBrowesables() { bool scaleIsBrowesable = false; LipSyncMapData mapData = null; if (_library.Library.TryGetValue(_data.PhonemeMapping, out mapData)) { scaleIsBrowesable = mapData.IsMatrix; } Dictionary <string, bool> propertyStates = new Dictionary <string, bool>(3) { { "Orientation", scaleIsBrowesable }, { "ScaleToGrid", scaleIsBrowesable }, { "ScalePercent", scaleIsBrowesable&& !ScaleToGrid }, { "IntensityLevel", scaleIsBrowesable } }; SetBrowsable(propertyStates); TypeDescriptor.Refresh(this); }
// renders the given node to the internal ElementData dictionary. If the given node is // not a element, will recursively descend until we render its elements. private void RenderNode(ElementNode node) { EffectIntents result; LipSyncMapData mapData = null; List <ElementNode> renderNodes = TargetNodes.SelectMany(x => x.GetNodeEnumerator()).ToList(); if (_data.PhonemeMapping != null) { if (!_library.Library.ContainsKey(_data.PhonemeMapping)) { _data.PhonemeMapping = _library.DefaultMappingName; } PhonemeType phoneme = _data.StaticPhoneme; if (_library.Library.TryGetValue(_data.PhonemeMapping, out mapData)) { renderNodes.ForEach(delegate(ElementNode element) { LipSyncMapItem item = mapData.FindMapItem(element.Name); if (item != null) { if (mapData.PhonemeState(element.Name, _data.StaticPhoneme.ToString(), item)) { var level = new SetLevel.SetLevel(); level.TargetNodes = new ElementNode[] { element }; level.Color = mapData.ConfiguredColor(element.Name, phoneme, item); level.IntensityLevel = mapData.ConfiguredIntensity(element.Name, phoneme, item); level.TimeSpan = TimeSpan; result = level.Render(); _elementData.Add(result); } } }); } } }
private void MigrateLipSyncFrom4To5(XElement content) { //This migration deals with changing the LipSync matrix elements from version 4 to 5 //Get the standard namespaces that are needed in the sequence var namespaces = GetStandardNamespaces(); //Add in the ones for this effect XNamespace d2p1 = "http://schemas.datacontract.org/2004/07/VixenModules.Effect.LipSync"; namespaces.AddNamespace("d2p1", d2p1.NamespaceName); //Find the Chase effects. IEnumerable <XElement> lipSyncElements = content.XPathSelectElements( "_dataModels/d1p1:anyType[@i:type = 'd2p1:LipSyncData']", namespaces); var datamodel = content.XPathSelectElement("_dataModels", namespaces); LipSyncMapLibrary _library = ApplicationServices.Get <IAppModuleInstance>(LipSyncMapDescriptor.ModuleID) as LipSyncMapLibrary; foreach (var lipSyncElement in lipSyncElements.ToList()) { LipSyncMapData mapData = null; var lipSyncData = DeSerializer <LipSyncData>(lipSyncElement); if (_library.Library.TryGetValue(lipSyncData.PhonemeMapping, out mapData)) { if ((null != mapData) && (mapData.IsMatrix)) { if (lipSyncData.Level == 0) { lipSyncData.Level = 100; } if (lipSyncData.ScalePercent == 0) { lipSyncData.ScalePercent = 100; } lipSyncData.Orientation = (mapData.StringsAreRows) ? StringOrientation.Horizontal : StringOrientation.Vertical; lipSyncData.ScaleToGrid = true; mapData.UsingDefaults = false; } } //Remove the old version lipSyncElement.Remove(); //Build up a temporary container similar to the way sequences are stored to //make all the namespace prefixes line up. IModuleDataModel[] dm = { lipSyncData }; DataContainer dc = new DataContainer { _dataModels = dm }; //Serialize the object into a xelement XElement glp = Serializer(dc, new[] { typeof(LipSyncData), typeof(IModuleDataModel[]), typeof(DataContainer) }); //Extract the new data model that we want and insert it in the tree datamodel.Add(glp.XPathSelectElement("//*[local-name()='anyType']", namespaces)); } }
private void RenderMapMatrix(LipSyncMapData mapData, PhonemeType phoneme) { SetupPictureEffect(); if (LipSyncMode == LipSyncMode.MarkCollection) { TimeSpan lastMarkTime = StartTime; foreach (var mark in _marks) { if (!AllowMarkGaps) { var gapDuration = mark.StartTime - lastMarkTime; if (gapDuration.TotalMilliseconds > 10) { //Fill the gap with a rest var restImage = mapData.ImageForPhoneme("REST"); if (restImage != null) { _thePic.StartTime = mark.StartTime - StartTime - gapDuration; _thePic.Image = restImage; _thePic.TimeSpan = gapDuration; _thePic.MarkDirty(); var result = _thePic.Render(); result.OffsetAllCommandsByTime(lastMarkTime - StartTime); _elementData.Add(result); } } lastMarkTime = mark.EndTime; } var image = mapData.ImageForPhoneme(mark.Text); if (image != null) { _thePic.StartTime = mark.StartTime - StartTime; _thePic.Image = image; _thePic.TimeSpan = mark.Duration; _thePic.MarkDirty(); var result = _thePic.Render(); result.OffsetAllCommandsByTime(mark.StartTime - StartTime); _elementData.Add(result); } } if (!AllowMarkGaps) { _thePic.StartTime = lastMarkTime - StartTime; var gapDuration = StartTime + TimeSpan - lastMarkTime; if (gapDuration.TotalMilliseconds > 10) { //Fill the gap with a rest var restImage = mapData.ImageForPhoneme("REST"); if (restImage != null) { _thePic.Image = restImage; _thePic.TimeSpan = gapDuration; _thePic.MarkDirty(); var result = _thePic.Render(); result.OffsetAllCommandsByTime(lastMarkTime - StartTime); _elementData.Add(result); } } } } else { var image = mapData.ImageForPhoneme(phoneme); if (image != null) { _thePic.Image = image; _thePic.TimeSpan = TimeSpan; _thePic.MarkDirty(); var result = _thePic.Render(); _elementData.Add(result); } } TearDownPictureEffect(); }
private void RenderMapElements(LipSyncMapData mapData, IElementNode element, PhonemeType phoneme) { LipSyncMapItem item = mapData.FindMapItem(element.Id); if (item == null) { return; } if (mapData.IsFaceComponentType(FaceComponent.Outlines, item)) { var colorVal = mapData.ConfiguredColorAndIntensity(item); var result = CreateIntentsForElement(element, colorVal.Item1, colorVal.Item2, TimeSpan); _elementData.Add(result); } else if (mapData.IsFaceComponentType(FaceComponent.EyesOpen, item)) { var colorVal = mapData.ConfiguredColorAndIntensity(item); var result = CreateIntentsForElement(element, colorVal.Item1, colorVal.Item2, TimeSpan); _elementData.Add(result); } else { if (LipSyncMode == LipSyncMode.MarkCollection && _marks != null) { TimeSpan lastMarkTime = StartTime; foreach (var mark in _marks) { if (!AllowMarkGaps) { var gapDuration = mark.StartTime - lastMarkTime; if (gapDuration.TotalMilliseconds > 10 && mapData.PhonemeState("REST", item)) { //Fill the gap with a rest var colorVal = mapData.ConfiguredColorAndIntensity(item); var result = CreateIntentsForElement(element, colorVal.Item1, colorVal.Item2, gapDuration); result.OffsetAllCommandsByTime(lastMarkTime - StartTime); _elementData.Add(result); } lastMarkTime = mark.EndTime; } if (mapData.PhonemeState(mark.Text, item)) { var colorVal = mapData.ConfiguredColorAndIntensity(item); var result = CreateIntentsForElement(element, colorVal.Item1, colorVal.Item2, mark.Duration); result.OffsetAllCommandsByTime(mark.StartTime - StartTime); _elementData.Add(result); } } if (!AllowMarkGaps) { var gapDuration = StartTime + TimeSpan - lastMarkTime; if (gapDuration.TotalMilliseconds > 10 && mapData.PhonemeState("REST", item)) { //Fill the gap with a rest var colorVal = mapData.ConfiguredColorAndIntensity(item); var result = CreateIntentsForElement(element, colorVal.Item1, colorVal.Item2, gapDuration); result.OffsetAllCommandsByTime(lastMarkTime - StartTime); _elementData.Add(result); } } } else { if (mapData.PhonemeState(phoneme.ToString(), item)) { var colorVal = mapData.ConfiguredColorAndIntensity(item); var result = CreateIntentsForElement(element, colorVal.Item1, colorVal.Item2, TimeSpan); _elementData.Add(result); } } } }
// renders the given node to the internal ElementData dictionary. If the given node is // not a element, will recursively descend until we render its elements. private void RenderNodes() { EffectIntents result; LipSyncMapData mapData = null; List <ElementNode> renderNodes = TargetNodes.SelectMany(x => x.GetNodeEnumerator()).ToList(); if (LipSyncMode == LipSyncMode.MarkCollection) { SetupMarks(); } if (_data.PhonemeMapping != null) { if (!_library.Library.ContainsKey(_data.PhonemeMapping)) { _data.PhonemeMapping = _library.DefaultMappingName; } PhonemeType phoneme = _data.StaticPhoneme; if (_library.Library.TryGetValue(_data.PhonemeMapping, out mapData)) { if (mapData.IsMatrix) { SetupPictureEffect(); if (LipSyncMode == LipSyncMode.MarkCollection) { foreach (var mark in _marks) { var file = mapData.PictureFileName(mark.Text.ToUpper()); _thePic.Image = LoadImage(file); _thePic.TimeSpan = mark.Duration; _thePic.MarkDirty(); result = _thePic.Render(); result.OffsetAllCommandsByTime(mark.StartTime - StartTime); _elementData.Add(result); //} } } else { var file = mapData.PictureFileName(phoneme); if (File.Exists(file)) { _thePic.Image = LoadImage(file); result = _thePic.Render(); _elementData.Add(result); } } if (null != _thePic) { result = _thePic.Render(); _elementData.Add(result); } } else { renderNodes.ForEach(delegate(ElementNode element) { LipSyncMapItem item = mapData.FindMapItem(element.Name); if (item != null) { if (LipSyncMode == LipSyncMode.MarkCollection && _marks != null) { foreach (var mark in _marks) { if (mapData.PhonemeState(element.Name, mark.Text.ToUpper(), item)) { var colorVal = mapData.ConfiguredColorAndIntensity(element.Name, mark.Text.ToUpper(), item); result = CreateIntentsForPhoneme(element, colorVal.Item1, colorVal.Item2, mark.Duration); result.OffsetAllCommandsByTime(mark.StartTime - StartTime); _elementData.Add(result); } } } else { if (mapData.PhonemeState(element.Name, phoneme.ToString(), item)) { var colorVal = mapData.ConfiguredColorAndIntensity(element.Name, phoneme.ToString(), item); result = CreateIntentsForPhoneme(element, colorVal.Item1, colorVal.Item2, TimeSpan); _elementData.Add(result); } } } }); TearDownPictureEffect(); } } } }