public static void AddActionFunction(this IActionProvider actionProvider, string action, ActionFunction func) { actionProvider.Functions.Add("action_" + action, (settings) => { return func(settings as ActionSettings); }); }
public static uint RegisterAction(ActionFunction function, string name, string help) { if (function == null) { throw new ArgumentNullException("function"); } if (_actionFunctions.ContainsValue(function)) { throw new ArgumentOutOfRangeException("function", "Action already registered"); } if (name == null) { throw new ArgumentNullException("name"); } if (String.IsNullOrEmpty(name)) { throw new ArgumentOutOfRangeException("name"); } if (_actionInfos.Values.FirstOrDefault(a => a.Name == name) != null) { throw new ArgumentOutOfRangeException("name", "Action with name " + name + " already registered"); } uint actionId = ++_id; string actionHelp = (help == null) ? String.Empty : help; _actionFunctions.Add(actionId, function); _actionInfos.Add(actionId, new ActionInfo(actionId, name, actionHelp, true, String.Empty)); return(actionId); }
public Action(ActionFunction action) { if (action == null) throw new System.ArgumentNullException("action", "ActionFunction supplied to Action cannot be null"); this.CachedFunc = action; }
/// <summary> /// Query actions' probabilities based on curren states. The first dimension of the array must be batch dimension. Note that it is the normalized log probability /// </summary> public virtual float[,] EvaluateProbability(float[,] vectorObservation, float[,] actions, List <float[, , , ]> visualObservation, List <float[, ]> actionsMask = null) { Debug.Assert(mode == Mode.PPO, "This method is for PPO mode only"); Debug.Assert(TrainingEnabled == true, "The model needs to initalized with Training enabled to use EvaluateProbability()"); List <Array> inputLists = new List <Array>(); if (HasVectorObservation) { Debug.Assert(vectorObservation != null, "Must Have vector observation inputs!"); inputLists.Add(vectorObservation); } if (HasVisualObservation) { Debug.Assert(visualObservation != null, "Must Have visual observation inputs!"); inputLists.AddRange(visualObservation); } var actionProbs = new float[actions.GetLength(0), ActionSpace == SpaceType.continuous ? actions.GetLength(1) : 1]; if (ActionSpace == SpaceType.continuous) { inputLists.Add(actions); var result = ActionProbabilityFunction.Call(inputLists); actionProbs = ((float[, ])result[0].eval()); } else if (ActionSpace == SpaceType.discrete) { List <float[, ]> masks = actionsMask; int batchSize = vectorObservation.GetLength(0); int branchSize = ActionSizes.Length; //create all 1 mask if the input mask is null. if (masks == null) { masks = CreateDummyMasks(ActionSizes, batchSize); } inputLists.AddRange(masks); var result = ActionFunction.Call(inputLists); //get the log probabilities actionProbs = new float[batchSize, branchSize]; for (int b = 0; b < branchSize; ++b) { var tempProbs = ((float[, ])result[b + 1].eval()); int actSize = ActionSizes[b]; for (int i = 0; i < batchSize; ++i) { actionProbs[i, b] = tempProbs[i, Mathf.RoundToInt(actions[i, b])]; } } } return(actionProbs); }
public void WatcherEngineExecution(RuleManager.EngineMessage msg, ActionFunction del, string targetDel, Entity watcher, string valueDel) { for (int i = 0; i < RuleManager.EngineAction.Count; i++) { var tmp = RuleManager.EngineAction.ElementAt(i); if (tmp.Equals(msg)) { del(targetDel, watcher, valueDel); return; } } }
public Problem( TState initalState, ActionFunction <TState, TAction> actionFunction, ResultFunction <TState, TAction> resultFunction, GoalTest <TState> goalTest, StepCost <TState, TAction> stepCost) { InitalState = initalState; ActionFunction = actionFunction; ResultFunction = resultFunction; GoalTest = goalTest; StepCost = stepCost; }
/// <summary> /// THis is implemented for ISupervisedLearingModel so that this model can also be used for TrainerMimic /// </summary> /// <param name="vectorObservation"></param> /// <param name="visualObservation"></param> /// <returns>(mean, var) var will be null for discrete</returns> public ValueTuple <float[, ], float[, ]> EvaluateAction(float[,] vectorObservation, List <float[, , , ]> visualObservation, List <float[, ]> actionsMask) { Debug.Assert(mode == Mode.SupervisedLearning, "This method is for supervised learning mode only"); List <Array> inputLists = new List <Array>(); if (HasVectorObservation) { Debug.Assert(vectorObservation != null, "Must Have vector observation inputs!"); inputLists.Add(vectorObservation); } if (HasVisualObservation) { Debug.Assert(visualObservation != null, "Must Have visual observation inputs!"); inputLists.AddRange(visualObservation); } if (ActionSpace == SpaceType.discrete) { int batchSize = vectorObservation != null?vectorObservation.GetLength(0) : visualObservation[0].GetLength(0); int branchSize = ActionSizes.Length; List <float[, ]> masks = actionsMask; //create all 1 mask if the input mask is null. if (masks == null) { masks = CreateDummyMasks(ActionSizes, batchSize); } inputLists.AddRange(masks); } var result = ActionFunction.Call(inputLists); float[,] actions = ((float[, ])result[0].eval()); float[,] outputVar = null; if (SLHasVar) { outputVar = (float[, ])result[1].eval(); } //normlaized the input observations in every calll of eval action if (useInputNormalization && HasVectorObservation) { UpdateNormalizerFunction.Call(new List <Array>() { vectorObservation }); } return(ValueTuple.Create(actions, outputVar)); }
public void WatcherActionEngine(RuleManager.EngineMessage msg, ActionFunction del, string targetDel, Entity watcher, string valueDel) { Entity.GetComponent <CollisionComponent>().AddOnCollisionHandler(delegate(Fixture sender, Fixture other, Contact contact) { var tmp = new RuleManager.EngineMessage(); tmp.Action = RuleManager.ActionEngine.Collision; tmp.entitybase = EntityManager.GetAllEntities().Find(i => i.Guid == (Guid)sender.Body.Tag); tmp.entityFocus = EntityManager.GetAllEntities().Find(i => i.Guid == (Guid)other.Body.Tag); RuleManager.EngineAction.Enqueue(tmp); return(true); }); listWatcherEngine.Add(msg, new Object[4] { del, targetDel, watcher, valueDel }); WatcherSet = true; }
public override void DrawNode() { GUILayout.BeginHorizontal(); GUILayoutOption op = GUILayout.MinWidth(300); Action = (Controller_Actions)EditorGUILayout.EnumPopup("Action : ", Action, op); if (Event.current.type == EventType.Repaint) { if (Linkers[(int)Node.LinkType.To]) { Linkers[(int)Node.LinkType.To].SetRect(new Rect( rect.x + rect.width / 2, rect.y + rect.height, 16, 16)); } if (Linkers[(int)Node.LinkType.From]) { Linkers[(int)Node.LinkType.From].SetRect(new Rect( rect.x + rect.width / 2, rect.y - 16, 16, 16)); } } GUILayout.EndHorizontal(); ActionFunction.DrawGUI(); if (GUI.changed) { Node_Editor.editor.RecalculateFrom(this); if (oldAction != Action) { var type = System.Type.GetType(Controller_ActionsDico[(int)Action]); ActionFunction = (ActionNodeFunctions)System.Activator.CreateInstance(type); oldAction = Action; } } }
public Task Initialize(ActionFunction action, float priority, string name, bool bLooping, int counter) { if (action == null) throw new System.ArgumentNullException("action", "ActionFunction supplied to Task cannot be null"); this.Action = new Action(action); this.Condition = null; this.Priority = priority; if (!string.IsNullOrEmpty(name)) this.BTName = name; this.Looping = bLooping; this.Counter = counter; return this; }
/// <summary> /// /// </summary> /// <param name="vectorObservation"></param> /// <param name="visualObservation"></param> /// <returns></returns> public virtual ValueTuple <float[, ], float[, ]> EvaluateAction(float[,] vectorObservation, List <float[, , , ]> visualObservation) { List <Array> inputLists = new List <Array>(); if (HasVectorObservation) { Debug.Assert(vectorObservation != null, "Must Have vector observation inputs!"); inputLists.Add(vectorObservation); } if (HasVisualObservation) { Debug.Assert(visualObservation != null, "Must Have visual observation inputs!"); inputLists.AddRange(visualObservation); } var result = ActionFunction.Call(inputLists); var outputAction = ((float[, ])result[0].eval()); float[,] actions = new float[outputAction.GetLength(0), ActionSpace == SpaceType.continuous ? outputAction.GetLength(1) : 1]; if (ActionSpace == SpaceType.continuous) { for (int j = 0; j < outputAction.GetLength(0); ++j) { for (int i = 0; i < outputAction.GetLength(1); ++i) { actions[j, i] = outputAction[j, i]; } } } else if (ActionSpace == SpaceType.discrete) { for (int j = 0; j < outputAction.GetLength(0); ++j) { actions[j, 0] = outputAction.GetRow(j).ArgMax(); } } float[,] outputVar = null; if (hasVariance) { outputVar = (float[, ])result[1].eval(); } return(ValueTuple.Create(actions, outputVar)); }
/// <summary> /// Query actions based on curren states. The first dimension of the array must be batch dimension /// </summary> /// <param name="vectorObservation">current vector states. Can be batch input</param> /// <returns></returns> public virtual float[,] EvaluateAction(float[,] vectorObservation, List <float[, , , ]> visualObservation) { List <Array> inputLists = new List <Array>(); if (HasVectorObservation) { Debug.Assert(vectorObservation != null, "Must Have vector observation inputs!"); inputLists.Add(vectorObservation); } if (HasVisualObservation) { Debug.Assert(visualObservation != null, "Must Have visual observation inputs!"); inputLists.AddRange(visualObservation); } var result = ActionFunction.Call(inputLists); var outputAction = ((float[, ])result[0].eval()); float[,] actions = new float[outputAction.GetLength(0), outputAction.GetLength(1)]; actions = outputAction; if (useInputNormalization && HasVectorObservation) { UpdateNormalizerFunction.Call(new List <Array>() { vectorObservation }); } /*for(int i = 0; i < actions.GetLength(0); ++i) * { * for (int j = 0; j < actions.GetLength(1); ++j) * { * if (float.IsNaN(actions[i, j])) * { * Debug.LogError("error"); * } * } * }*/ return(actions); }
/// <summary> /// /// </summary> /// <param name="vectorObservation"></param> /// <param name="visualObservation"></param> /// <returns></returns> public virtual ValueTuple <float[, ], float[, ]> EvaluateAction(float[,] vectorObservation, List <float[, , , ]> visualObservation, List <float[, ]> actionsMask = null) { List <Array> inputLists = new List <Array>(); if (HasVectorObservation) { Debug.Assert(vectorObservation != null, "Must Have vector observation inputs!"); inputLists.Add(vectorObservation); } if (HasVisualObservation) { Debug.Assert(visualObservation != null, "Must Have visual observation inputs!"); inputLists.AddRange(visualObservation); } if (ActionSpace == SpaceType.discrete) { int batchSize = vectorObservation != null?vectorObservation.GetLength(0) : visualObservation[0].GetLength(0); int branchSize = ActionSizes.Length; List <float[, ]> masks = actionsMask; //create all 1 mask if the input mask is null. if (masks == null) { masks = CreateDummyMasks(ActionSizes, batchSize); } inputLists.AddRange(masks); } var result = ActionFunction.Call(inputLists); float[,] actions = ((float[, ])result[0].eval()); float[,] outputVar = null; if (hasVariance) { outputVar = (float[, ])result[1].eval(); } return(ValueTuple.Create(actions, outputVar)); }
public double ComputeOutput(double[] inputs) { if (inputs.Length != Weight.Length - 1) { throw new ArgumentException("inputs are not the same length same weight"); } _lastInputs = inputs; double sum = 0; for (int i = 0; i < inputs.Length; i++) { sum += inputs[i] * Weight[i]; } sum += Weight[Weight.Length - 1]; //don't forget the threshold _lastSum = sum; LastOutput = ActionFunction.ComputeValue(_lastSum); return(LastOutput); }
/// <summary> /// Query actions' probabilities based on curren states. The first dimension of the array must be batch dimension /// </summary> public virtual float[,] EvaluateProbability(float[,] vectorObservation, float[,] actions, List <float[, , , ]> visualObservation) { Debug.Assert(mode == Mode.PPO, "This method is for PPO mode only"); Debug.Assert(TrainingEnabled == true, "The model needs to initalized with Training enabled to use EvaluateProbability()"); List <Array> inputLists = new List <Array>(); if (HasVectorObservation) { Debug.Assert(vectorObservation != null, "Must Have vector observation inputs!"); inputLists.Add(vectorObservation); } if (HasVisualObservation) { Debug.Assert(visualObservation != null, "Must Have visual observation inputs!"); inputLists.AddRange(visualObservation); } var actionProbs = new float[actions.GetLength(0), ActionSpace == SpaceType.continuous ? actions.GetLength(1) : 1]; if (ActionSpace == SpaceType.continuous) { inputLists.Add(actions); var result = ActionProbabilityFunction.Call(inputLists); actionProbs = ((float[, ])result[0].eval()); } else if (ActionSpace == SpaceType.discrete) { var result = ActionFunction.Call(inputLists); var outputAction = ((float[, ])result[0].eval()); for (int j = 0; j < outputAction.GetLength(0); ++j) { actionProbs[j, 0] = outputAction.GetRow(j)[Mathf.RoundToInt(actions[j, 0])]; } } return(actionProbs); }
private static void SolveProblem <TState, TAction>( ActionFunction <TState, TAction> actionFunction, ResultFunction <TState, TAction> resultFunction, GoalTest <TState> goalTest, StepCost <TState, TAction> stepCost, TState initialState, IGraphSearch <TState, TAction> searchAlgorithm) { var problem = new Problem <TState, TAction>( initialState, actionFunction, resultFunction, goalTest, stepCost); var solution = searchAlgorithm.Search(problem); Console.WriteLine("Solution:"); Console.WriteLine("========="); foreach (var node in solution) { Console.WriteLine(node.State); } Console.ReadKey(); }
/// <summary> /// THis is implemented for ISupervisedLearingModel so that this model can also be used for TrainerMimic /// </summary> /// <param name="vectorObservation"></param> /// <param name="visualObservation"></param> /// <returns>(mean, var) var will be null for discrete</returns> ValueTuple <float[, ], float[, ]> ISupervisedLearningModel.EvaluateAction(float[,] vectorObservation, List <float[, , , ]> visualObservation) { Debug.Assert(mode == Mode.SupervisedLearning, "This method is for SupervisedLearning mode only. Please set the mode of RLModePPO to SupervisedLearning in the editor."); List <Array> inputLists = new List <Array>(); if (HasVectorObservation) { Debug.Assert(vectorObservation != null, "Must Have vector observation inputs!"); inputLists.Add(vectorObservation); } if (HasVisualObservation) { Debug.Assert(visualObservation != null, "Must Have visual observation inputs!"); inputLists.AddRange(visualObservation); } var result = ActionFunction.Call(inputLists); var outputAction = ((float[, ])result[0].eval()); float[,] actions = new float[outputAction.GetLength(0), ActionSpace == SpaceType.continuous ? outputAction.GetLength(1) : 1]; float[,] outputVar = null; if (ActionSpace == SpaceType.continuous) { actions = outputAction; outputVar = (float[, ])result[1].eval(); } else if (ActionSpace == SpaceType.discrete) { for (int j = 0; j < outputAction.GetLength(0); ++j) { actions[j, 0] = outputAction.GetRow(j).ArgMax(); } } return(ValueTuple.Create(actions, outputVar)); }
//fonction qui regarde en permanence une variable d'une entité et qui applique une fonction en fonction public void WatcherVarEntity(string variableName, object targetvalue, string entityName, ActionFunction del, string targetDel, Entity watcher, string valueDel) { listWatcherVariable.Add(del, new Object[7] { variableName, targetvalue, entityName, del, targetDel, watcher, valueDel }); WatcherSet = true; }
public void RemoveWatcher(ActionFunction del) { RuleManager.RemoveWatcher(this, del); }
/// <summary> /// Will display a composite message with a delegate function /// </summary> /// <param name="before">The message to display before the delegate execution</param> /// <param name="after">The message to display after the delegate execution</param> /// <param name="write">The formating function to use</param> /// <param name="fct">The delegate to execute</param> public static void Composite(string before, string after, WriteFunction write, ActionFunction fct) { write.DynamicInvoke(before); fct.DynamicInvoke(); write.DynamicInvoke(after); Write("\n"); }
public Task Initialize(ActionFunction action, float priority, string name) { return Initialize(action, priority, name, false, 0); }
private void Pb_IDCO_Click(object sender, EventArgs e) { Form ActionFunction = new ActionFunction(); ActionFunction.ShowDialog(); }
public void Register(CharacterState state, string action, ActionFunction method) { actions[state][action] = method; }
/// <summary> /// Query actions based on curren states. The first dimension of the array must be batch dimension /// </summary> /// <param name="vectorObservation">current vector states. Can be batch input</param> /// <param name="actionProbs">output actions' probabilities. note that it is the normalized log probability</param> /// <param name="actoinsMask">action mask for discrete action. </param> /// <returns></returns> public virtual float[,] EvaluateAction(float[,] vectorObservation, out float[,] actionProbs, List <float[, , , ]> visualObservation, List <float[, ]> actionsMask = null) { Debug.Assert(mode == Mode.PPO, "This method is for PPO mode only"); List <Array> inputLists = new List <Array>(); if (HasVectorObservation) { Debug.Assert(vectorObservation != null, "Must Have vector observation inputs!"); inputLists.Add(vectorObservation); } if (HasVisualObservation) { Debug.Assert(visualObservation != null, "Must Have visual observation inputs!"); inputLists.AddRange(visualObservation); } float[,] actions = null; actionProbs = null; if (ActionSpace == SpaceType.continuous) { var result = ActionFunction.Call(inputLists); actions = ((float[, ])result[0].eval()); actionProbs = ((float[, ])result[1].eval()); } else if (ActionSpace == SpaceType.discrete) { int batchSize = vectorObservation != null?vectorObservation.GetLength(0) : visualObservation[0].GetLength(0); int branchSize = ActionSizes.Length; List <float[, ]> masks = actionsMask; //create all 1 mask if the input mask is null. if (masks == null) { masks = CreateDummyMasks(ActionSizes, batchSize); } inputLists.AddRange(masks); var result = ActionFunction.Call(inputLists); actions = ((float[, ])result[0].eval()); //get the log probabilities actionProbs = new float[batchSize, branchSize]; for (int b = 0; b < branchSize; ++b) { var tempProbs = ((float[, ])result[b + 1].eval()); int actSize = ActionSizes[b]; for (int i = 0; i < batchSize; ++i) { actionProbs[i, b] = tempProbs[i, Mathf.RoundToInt(actions[i, b])]; } } } //normlaized the input observations in every calll of eval action if (useInputNormalization && HasVectorObservation) { UpdateNormalizerFunction.Call(new List <Array>() { vectorObservation }); } return(actions); }
public void SetAction(ActionFunction action) { _action = action; }
public void addActionDefinition(EntityAction p_Action, ActionFunction p_Func) { m_ActionDefinition.Add(p_Action, p_Func); }
public double ComputeError(double error) { Error = ActionFunction.ComputeDerivateValue(_lastSum) * (error); return(Error); }
/// <summary> /// Initializes a new instance of the <see cref="Crafting_Parser.CraftingParser.FailableAction"/> class. /// </summary> /// <param name='name'> /// Name. /// </param> /// <param name='signerature'> /// Signerature. /// </param> /// <param name='tries'> /// Try Counter. /// </param> /// <param name='successes'> /// Success Counter. /// </param> /// <param name='func'> /// Func. /// </param> public FailableAction(string name, string signerature, ActionCounters uses, ActionCounters successes, ActionFunction func) : base(name, signerature, uses, func) { successCounter = successes; }
public void WatcherExecution(string variableName, object targetvalue, string entityName, ActionFunction del, string targetDel, Entity watcher, string valueDel) { //try catch exception + exception name GetValueEntity(entityName); object valueVariable = null; foreach (KeyValuePair <string, Tuple <Type, object, FieldInfo> > kvp in EntityVarTable) { if (kvp.Key.Equals(variableName)) { valueVariable = kvp.Value.Item2; } } if (valueVariable.Equals(targetvalue)) { del(targetDel, watcher, valueDel); } }
public static void AddActionFunction(this IActionProvider actionProvider, ActionFunction func) { actionProvider.AddActionFunction("default", func); }
public Task Initialize(ActionFunction action) { return Initialize(action, 0.5f); }
public InspectorButton(string label, ActionFunction onPress) { Label = label; OnPress = onPress; }
public Task Initialize(ActionFunction action, float priority) { return Initialize(action, priority, ""); }
/// <summary> /// Query actions based on curren states. The first dimension of the array must be batch dimension /// </summary> /// <param name="vectorObservation">current vector states. Can be batch input</param> /// <param name="actionProbs">output actions' probabilities</param> /// <param name="useProbability">when true, the output actions are sampled based on output mean and variance. Otherwise it uses mean directly.</param> /// <returns></returns> public virtual float[,] EvaluateAction(float[,] vectorObservation, out float[,] actionProbs, List <float[, , , ]> visualObservation, bool useProbability = true) { Debug.Assert(mode == Mode.PPO, "This method is for PPO mode only"); List <Array> inputLists = new List <Array>(); if (HasVectorObservation) { Debug.Assert(vectorObservation != null, "Must Have vector observation inputs!"); inputLists.Add(vectorObservation); } if (HasVisualObservation) { Debug.Assert(visualObservation != null, "Must Have visual observation inputs!"); inputLists.AddRange(visualObservation); } var result = ActionFunction.Call(inputLists); var outputAction = ((float[, ])result[0].eval()); float[,] actions = new float[outputAction.GetLength(0), ActionSpace == SpaceType.continuous ? outputAction.GetLength(1) : 1]; actionProbs = new float[outputAction.GetLength(0), ActionSpace == SpaceType.continuous ? outputAction.GetLength(1) : 1]; if (ActionSpace == SpaceType.continuous) { actions = outputAction; actionProbs = ((float[, ])result[1].eval()); //var actionsMean = (float[,])(result[2].eval()); //var actionsVars = (float[])(result[3].eval()); //print("actual vars" + actions.GetColumn(0).Variance()+"," + actions.GetColumn(1).Variance() + "," + actions.GetColumn(2).Variance() + "," + actions.GetColumn(3).Variance()); } else if (ActionSpace == SpaceType.discrete) { for (int j = 0; j < outputAction.GetLength(0); ++j) { if (useProbability) { actions[j, 0] = MathUtils.IndexByChance(outputAction.GetRow(j)); } else { actions[j, 0] = outputAction.GetRow(j).ArgMax(); } actionProbs[j, 0] = outputAction.GetRow(j)[Mathf.RoundToInt(actions[j, 0])]; } } if (useInputNormalization && HasVectorObservation) { UpdateNormalizerFunction.Call(new List <Array>() { vectorObservation }); //var runningMean = (float[])runningData[0].eval(); //var runningVar = (float[])runningData[1].eval(); //var steps = (float)runningData[2].eval(); //var normalized = (float[,])runningData[3].eval(); } return(actions); }
/// <summary> /// Initializes a new instance of the <see cref="Crafting_Parser.CraftingParser.Action"/> class. /// </summary> /// <param name='name'> /// Name. /// </param> /// <param name='signerature'> /// Signerature. /// </param> /// <param name='tries'> /// Tries. /// </param> /// <param name='func'> /// Func. /// </param> public Action(string name, string signerature, ActionCounters uses, ActionFunction func) { actionName = name; usesCounter = uses; logSignerature = signerature; actionFunction = func; }