public Clarion_AGC_Model(Agent MAKaey) { World.LoggingLevel = System.Diagnostics.TraceLevel.Off; this.MAKaey = MAKaey; NeuralNetwork = AgentInitializer.InitializeImplicitDecisionNetwork(MAKaey, SimplifiedQBPNetwork.Factory); }
static void SerializeDriveComponent() { World.LoggingLevel = TraceLevel.Warning; Agent John = World.NewAgent("John"); BPNetwork net; FoodDrive foodDr = AgentInitializer.InitializeDrive(John, FoodDrive.Factory, .5);; if (load && File.Exists(componentLoadFile)) { Console.WriteLine("Deserializing the drive component"); SerializationPlugin.DeserializeDriveComponent(foodDr, componentLoadFile, out net); } else { Console.WriteLine("Initializing the drive component"); net = AgentInitializer.InitializeDriveComponent(foodDr, BPNetwork.Factory); net.Input.AddRange(Drive.GenerateTypicalInputs(foodDr)); net.Parameters.LEARNING_RATE = .2; net.Parameters.MOMENTUM = .05; foodDr.Commit(net); } John.Commit(foodDr); DoTraining(net, foodDr); Console.WriteLine("Serializing the drive component"); SerializationPlugin.Serialize(net, componentLoadFile); John.Die(); }
public static void Initialize() { World.LoggingLevel = TraceLevel.Off; John = World.NewAgent("John"); SimplifiedQBPNetwork net = AgentInitializer.InitializeImplicitDecisionNetwork(John, SimplifiedQBPNetwork.Factory); net.Input.Add(World.NewDimensionValuePair("Boolean 1", true)); net.Input.Add(World.NewDimensionValuePair("Boolean 1", false)); net.Input.Add(World.NewDimensionValuePair("Boolean 2", true)); net.Input.Add(World.NewDimensionValuePair("Boolean 2", false)); net.Output.Add(World.NewExternalActionChunk(true)); net.Output.Add(World.NewExternalActionChunk(false)); John.ACS.Parameters.PERFORM_RER_REFINEMENT = false; John.ACS.Parameters.SELECTION_TEMPERATURE = .01; John.ACS.Parameters.LEVEL_SELECTION_OPTION = ActionCenteredSubsystem.LevelSelectionOptions.FIXED; John.ACS.Parameters.FIXED_IRL_LEVEL_SELECTION_MEASURE = 0; John.ACS.Parameters.FIXED_FR_LEVEL_SELECTION_MEASURE = 0; //Tweak these parameters to see the impact each has on accuracy and learning net.Parameters.LEARNING_RATE = 1; net.Parameters.MOMENTUM = .02; John.ACS.Parameters.FIXED_BL_LEVEL_SELECTION_MEASURE = .5; John.ACS.Parameters.FIXED_RER_LEVEL_SELECTION_MEASURE = .5; John.Commit(net); }
/// <summary> /// Setup the ACS subsystem /// </summary> private void SetupACS() { // Create Rule to avoid collision with wall SupportCalculator avoidCollisionWallSupportCalculator = FixedRuleToAvoidCollisionWall; FixedRule ruleAvoidCollisionWall = AgentInitializer.InitializeActionRule(CurrentAgent, FixedRule.Factory, outputRotateClockwise, avoidCollisionWallSupportCalculator); // Commit this rule to Agent (in the ACS) CurrentAgent.Commit(ruleAvoidCollisionWall); // Create Rule to avoid collision with wall SupportCalculator goItemSupportCalculator = FixedRuleToGoItem; FixedRule ruleGoItem = AgentInitializer.InitializeActionRule(CurrentAgent, FixedRule.Factory, outputGoItem, goItemSupportCalculator); // Commit this rule to Agent (in the ACS) CurrentAgent.Commit(ruleGoItem); // Create Rule to avoid collision with wall SupportCalculator sackItItemSupportCalculator = FixedRuleToSackItItem; FixedRule ruleSackItItem = AgentInitializer.InitializeActionRule(CurrentAgent, FixedRule.Factory, outputSackItItem, sackItItemSupportCalculator); // Commit this rule to Agent (in the ACS) CurrentAgent.Commit(ruleSackItItem); // Create Rule to avoid collision with wall SupportCalculator eatItemSupportCalculator = FixedRuleToEatItem; FixedRule ruleEatItem = AgentInitializer.InitializeActionRule(CurrentAgent, FixedRule.Factory, outputEatItem, eatItemSupportCalculator); // Commit this rule to Agent (in the ACS) CurrentAgent.Commit(ruleEatItem); // Create Rule to avoid collision with wall SupportCalculator stopCreatureSupportCalculator = FixedRuleToStopCreature; FixedRule ruleStopCreature = AgentInitializer.InitializeActionRule(CurrentAgent, FixedRule.Factory, outputStopCreature, stopCreatureSupportCalculator); // Commit this rule to Agent (in the ACS) CurrentAgent.Commit(ruleStopCreature); // Disable Rule Refinement CurrentAgent.ACS.Parameters.PERFORM_RER_REFINEMENT = false; // The selection type will be probabilistic CurrentAgent.ACS.Parameters.LEVEL_SELECTION_METHOD = ActionCenteredSubsystem.LevelSelectionMethods.STOCHASTIC; // The action selection will be fixed (not variable) i.e. only the statement defined above. CurrentAgent.ACS.Parameters.LEVEL_SELECTION_OPTION = ActionCenteredSubsystem.LevelSelectionOptions.FIXED; // Define Probabilistic values CurrentAgent.ACS.Parameters.FIXED_FR_LEVEL_SELECTION_MEASURE = 1; CurrentAgent.ACS.Parameters.FIXED_IRL_LEVEL_SELECTION_MEASURE = 0; CurrentAgent.ACS.Parameters.FIXED_BL_LEVEL_SELECTION_MEASURE = 0; CurrentAgent.ACS.Parameters.FIXED_RER_LEVEL_SELECTION_MEASURE = 0; }
/// <summary> /// Setup the ACS subsystem /// </summary> private void SetupACS() { // Create Rule to rotate SupportCalculator rotateSupportCalculator = FixedRuleToRotate; FixedRule ruleRotate = AgentInitializer.InitializeActionRule(CurrentAgent, FixedRule.Factory, outputRotateClockwise, rotateSupportCalculator); // Commit this rule to Agent (in the ACS) CurrentAgent.Commit(ruleRotate); // Create Colission To Go Ahead SupportCalculator goAheadSupportCalculator = FixedRuleToGoAhead; FixedRule ruleGoAhead = AgentInitializer.InitializeActionRule(CurrentAgent, FixedRule.Factory, outputGoAhead, goAheadSupportCalculator); // Commit this rule to Agent (in the ACS) CurrentAgent.Commit(ruleGoAhead); // Create Colission To Eat SupportCalculator eatSupportCalculator = FixedRuleToEat; FixedRule ruleEat = AgentInitializer.InitializeActionRule(CurrentAgent, FixedRule.Factory, outputEat, eatSupportCalculator); // Commit this rule to Agent (in the ACS) CurrentAgent.Commit(ruleEat); // Create Colission To Sack SupportCalculator sackSupportCalculator = FixedRuleToSack; FixedRule ruleSack = AgentInitializer.InitializeActionRule(CurrentAgent, FixedRule.Factory, outputSack, sackSupportCalculator); // Commit this rule to Agent (in the ACS) CurrentAgent.Commit(ruleSack); // Create Colission To Hide SupportCalculator hideSupportCalculator = FixedRuleToHide; FixedRule ruleHide = AgentInitializer.InitializeActionRule(CurrentAgent, FixedRule.Factory, outputHide, hideSupportCalculator); // Commit this rule to Agent (in the ACS) CurrentAgent.Commit(ruleHide); // Create Colission To Stop SupportCalculator stopSupportCalculator = FixedRuleToStop; FixedRule ruleStop = AgentInitializer.InitializeActionRule(CurrentAgent, FixedRule.Factory, outputStop, stopSupportCalculator); // Commit this rule to Agent (in the ACS) CurrentAgent.Commit(ruleStop); // Disable Rule Refinement CurrentAgent.ACS.Parameters.PERFORM_RER_REFINEMENT = false; // The selection type will be probabilistic CurrentAgent.ACS.Parameters.LEVEL_SELECTION_METHOD = ActionCenteredSubsystem.LevelSelectionMethods.STOCHASTIC; // The action selection will be fixed (not variable) i.e. only the statement defined above. CurrentAgent.ACS.Parameters.LEVEL_SELECTION_OPTION = ActionCenteredSubsystem.LevelSelectionOptions.FIXED; // Define Probabilistic values CurrentAgent.ACS.Parameters.FIXED_FR_LEVEL_SELECTION_MEASURE = 1; CurrentAgent.ACS.Parameters.FIXED_IRL_LEVEL_SELECTION_MEASURE = 0; CurrentAgent.ACS.Parameters.FIXED_BL_LEVEL_SELECTION_MEASURE = 0; CurrentAgent.ACS.Parameters.FIXED_RER_LEVEL_SELECTION_MEASURE = 0; }
static void SetupBPNetwork(Agent reasoner) { //Chunks for the whales, tuna, and bears DeclarativeChunk TunaChunk = World.NewDeclarativeChunk("Tuna"); DeclarativeChunk WhaleChunk = World.NewDeclarativeChunk("Whale"); DeclarativeChunk BearChunk = World.NewDeclarativeChunk("Bear"); //The 2 properties (as DV pairs) DimensionValuePair livesinwater = World.NewDimensionValuePair("lives in", "water"); DimensionValuePair eatsfish = World.NewDimensionValuePair("eats", "fish"); //The BP network to be used in the bottom level of the NACS BPNetwork net = AgentInitializer.InitializeAssociativeMemoryNetwork(reasoner, BPNetwork.Factory); //Adds the properties (as inputs) and chunks (as outputs) to the BP network net.Input.Add(livesinwater); net.Input.Add(eatsfish); net.Output.Add(TunaChunk); net.Output.Add(WhaleChunk); net.Output.Add(BearChunk); reasoner.Commit(net); //Adds the chunks to the GKS reasoner.AddKnowledge(TunaChunk); reasoner.AddKnowledge(WhaleChunk); reasoner.AddKnowledge(BearChunk); //Initializes a trainer to use to train the BP network GenericEquation trainer = ImplicitComponentInitializer.InitializeTrainer(GenericEquation.Factory, (Equation)trainerEQ); //Adds the properties (as inputs) and chunks (as outputs) to the trainer trainer.Input.Add(livesinwater); trainer.Input.Add(eatsfish); trainer.Output.Add(TunaChunk); trainer.Output.Add(WhaleChunk); trainer.Output.Add(BearChunk); trainer.Commit(); //Sets up data sets for each of the 2 properties List <ActivationCollection> sis = new List <ActivationCollection>(); ActivationCollection si = ImplicitComponentInitializer.NewDataSet(); si.Add(livesinwater, 1); sis.Add(si); si = ImplicitComponentInitializer.NewDataSet(); si.Add(eatsfish, 1); sis.Add(si); Console.Write("Training AMN..."); //Trains the BP network to report associative knowledge between the properties and the chunks ImplicitComponentInitializer.Train(net, trainer, sis, ImplicitComponentInitializer.TrainingTerminationConditions.SUM_SQ_ERROR); Console.WriteLine("Finished!"); }
public void GenerateIRLRuleSet(IRL_Rule_Sets ruleSet) { switch (ruleSet) { case IRL_Rule_Sets.ONE: var i = (from a in As select from b in Bs select new { A = World.GetDimensionValuePair("A", a), B = World.GetDimensionValuePair("B", b) }).SelectMany(t => t); for (double w = 0; w < 12; w++) { foreach (var cond in i) { IRLRule ir = AgentInitializer.InitializeActionRule(John, IRLRule.Factory, World.GetActionChunk(w), IRLSet1_SupportCalculator, IRL_DeletionChecker); ir.GeneralizedCondition.Add(cond.A, true); ir.GeneralizedCondition.Add(cond.B, true); John.Commit(ir); } } break; case IRL_Rule_Sets.TWO: var i2 = (from a in As select from b in Bs select from c in Cs select new { A = World.GetDimensionValuePair("A", a), B = World.GetDimensionValuePair("B", b), C = World.GetDimensionValuePair("C", c) }).SelectMany(t => t).SelectMany(t => t); for (double w = 0; w < 12; w++) { foreach (var cond in i2) { IRLRule ir = AgentInitializer.InitializeActionRule(John, IRLRule.Factory, World.GetActionChunk(w), IRLSet2_SupportCalculator, IRL_DeletionChecker); ir.GeneralizedCondition.Add(cond.A, true); ir.GeneralizedCondition.Add(cond.B, true); ir.GeneralizedCondition.Add(cond.C, true); John.Commit(ir); } } break; } }
private void Initialize() { World.LoggingLevel = TraceLevel.Off; p1 = World.NewDimensionValuePair("Peg", 1); p2 = World.NewDimensionValuePair("Peg", 2); p3 = World.NewDimensionValuePair("Peg", 3); p4 = World.NewDimensionValuePair("Peg", 4); p5 = World.NewDimensionValuePair("Peg", 5); mp1 = World.NewExternalActionChunk(); mp2 = World.NewExternalActionChunk(); mp3 = World.NewExternalActionChunk(); mp4 = World.NewExternalActionChunk(); mp5 = World.NewExternalActionChunk(); mp1 += p1; mp2 += p2; mp3 += p3; mp4 += p4; mp5 += p5; John = World.NewAgent(); net = AgentInitializer.InitializeImplicitDecisionNetwork(John, SimplifiedQBPNetwork.Factory); net.Input.Add(p1); net.Input.Add(p2); net.Input.Add(p3); net.Input.Add(p4); net.Input.Add(p5); net.Output.Add(mp1); net.Output.Add(mp2); net.Output.Add(mp3); net.Output.Add(mp4); net.Output.Add(mp5); net.Parameters.LEARNING_RATE = 1; net.Parameters.MOMENTUM = .01; John.Commit(net); RefineableActionRule.GlobalParameters.GENERALIZATION_THRESHOLD_1 = -.01; RefineableActionRule.GlobalParameters.SPECIALIZATION_THRESHOLD_1 = -.4; }
/// <summary> /// Sets up the associative rules in the top level of the NACS /// </summary> /// <param name="reasoner">The agent in whose NACS the rules are being placed</param> static void SetupRules(Agent reasoner) { //Iterates through each of the chunks (except the last one, for obvious reasons) and creates an associative rule using that chunk as the //condition, and the next chunk in the chunks list as the conclusion. for (int i = 0; i < chunks.Count - 1; i++) { //Initializes the rule RefineableAssociativeRule ar = AgentInitializer.InitializeAssociativeRule(reasoner, RefineableAssociativeRule.Factory, chunks[i + 1]); //Specifies that the current chunk must be activated as part of the condition for the rule ar.GeneralizedCondition.Add(chunks[i], true); //Commits the rule reasoner.Commit(ar); } }
public static void Main() { Agent reasoner = World.NewAgent(); InitializeWorld(reasoner); //Adds all of the declarative chunks to the GKS foreach (DeclarativeChunk dc in chunks) { reasoner.AddKnowledge(dc); } //Initializes the Hopfield network in the bottom level of the NACS HopfieldNetwork net = AgentInitializer.InitializeAssociativeMemoryNetwork (reasoner, HopfieldNetwork.Factory); //Species all of the dimension-value pairs as nodes for the Hopfield network net.Nodes.AddRange(dvs); //Commits the Hopfield network reasoner.Commit(net); //Encodes the patterns into the Hopfield network EncodeHopfieldNetwork(net); //Sets up the rules in the top level of the NAS SetupRules(reasoner); //Specifies that the NACS should perform 2 reasoning iterations reasoner.NACS.Parameters.REASONING_ITERATION_COUNT = 2; //Sets the conclusion threshold to 1 //(indicating that only fully matched conclusions should be returned) reasoner.NACS.Parameters.CONCLUSION_THRESHOLD = 1; //Initiates reasoning and outputs the results DoReasoning(reasoner); //Kills the reasoning agent reasoner.Die(); Console.WriteLine("Press any key to exit"); Console.ReadKey(); }
static void SerializeWorld() { Agent John; BPNetwork net; FoodDrive foodDr; if (load && File.Exists(worldLoadFile)) { Console.WriteLine("Deserializing the world"); SerializationPlugin.DeserializeWorld(worldLoadFile); John = World.GetAgent("John"); foodDr = (FoodDrive)John.GetInternals(Agent.InternalContainers.DRIVES).First(); net = (BPNetwork)foodDr.DriveComponent; } else { Console.WriteLine("Initializing the world"); World.LoggingLevel = TraceLevel.Warning; John = World.NewAgent("John"); foodDr = AgentInitializer.InitializeDrive(John, FoodDrive.Factory, .5); net = AgentInitializer.InitializeDriveComponent(foodDr, BPNetwork.Factory); net.Input.AddRange(Drive.GenerateTypicalInputs(foodDr)); net.Parameters.LEARNING_RATE = .2; net.Parameters.MOMENTUM = .05; foodDr.Commit(net); John.Commit(foodDr); } DoTraining(net, foodDr); Console.WriteLine("Serializing the world"); SerializationPlugin.SerializeWorld(worldLoadFile); John.Die(); }
public void Init() { MyAgent = World.NewAgent(MyHero.Name); // set up the network and learning parameters for the agent SimplifiedQBPNetwork net = AgentInitializer.InitializeImplicitDecisionNetwork(MyAgent, SimplifiedQBPNetwork.Factory); net = AgentInitializer.InitializeImplicitDecisionNetwork(MyAgent, SimplifiedQBPNetwork.Factory); for (int i = 0; i < HealthInputs.GetLength(0); i++) { // net.Input.Add(HealthInputs[i]); // net.Input.Add(ManaInputs[i]); } for (int i = 0; i < InventoryInputs.GetLength(0); i++) { for (int j = 0; j < InventoryInputs.GetLength(1); j++) { net.Input.Add(InventoryInputs[i, j]); } net.Output.Add(PurchaseActions[i]); } FixedRule fr; for (int i = 0; i < PurchaseActions.Length; i++) { fr = AgentInitializer.InitializeActionRule(MyAgent, FixedRule.Factory, PurchaseActions[i], ImitativeSupportDelegate); // fr.GeneralizedCondition.Add(Inputs[i], true); MyAgent.Commit(fr); } // AddRules(); //net.Parameters.LEARNING_RATE = 0.5; net.Parameters.LEARNING_RATE = 2.0; // net.Parameters.MOMENTUM = 0.02; MyAgent.Commit(net); MyAgent.ACS.Parameters.SELECTION_TEMPERATURE = 0.05; MyAgent.ACS.Parameters.DELETION_FREQUENCY = 100; MyAgent.ACS.Parameters.LEVEL_SELECTION_METHOD = ActionCenteredSubsystem.LevelSelectionMethods.COMBINED; MyAgent.ACS.Parameters.LEVEL_SELECTION_OPTION = ActionCenteredSubsystem.LevelSelectionOptions.FIXED; // set up the probabilities used to select which system will be chosen to select an action // at each step (should total 1.0): // BL - bottom layer (reinforcement learning neural net) // RER - rule extraction and refinement - extracts rules from the bottom layer // IRL - independent rule learning - does not use the bottom layer for learning rules // FR - fixed rules - Clarion cannot change these (though they can be added/removed externally) // We are currently using fixed rules when we want the agent to immitate the human player and train // the bottom layer MyAgent.ACS.Parameters.FIXED_BL_LEVEL_SELECTION_MEASURE = 0.33; MyAgent.ACS.Parameters.FIXED_RER_LEVEL_SELECTION_MEASURE = 0.33; MyAgent.ACS.Parameters.FIXED_IRL_LEVEL_SELECTION_MEASURE = 0; MyAgent.ACS.Parameters.FIXED_FR_LEVEL_SELECTION_MEASURE = 0.33; /* * MyAgent.ACS.Parameters.FIXED_BL_LEVEL_SELECTION_MEASURE = 0.75; * MyAgent.ACS.Parameters.FIXED_RER_LEVEL_SELECTION_MEASURE = 0.25; * MyAgent.ACS.Parameters.FIXED_IRL_LEVEL_SELECTION_MEASURE = 0; * MyAgent.ACS.Parameters.FIXED_FR_LEVEL_SELECTION_MEASURE = 0; */ //MyAgent.ACS.Parameters.VARIABLE_BL_BETA = 0.5; //MyAgent.ACS.Parameters.VARIABLE_RER_BETA = 0.5; //MyAgent.ACS.Parameters.VARIABLE_IRL_BETA = 0; //MyAgent.ACS.Parameters.VARIABLE_FR_BETA = 0; }
static void Main(string[] args) { //Initialize the task Console.WriteLine("Initializing the Simple Hello World Task"); int CorrectCounter = 0; int NumberTrials = 10000; int progress = 0; World.LoggingLevel = TraceLevel.Off; TextWriter orig = Console.Out; StreamWriter sw = File.CreateText("HelloWorldSimple.txt"); DimensionValuePair hi = World.NewDimensionValuePair("Salutation", "Hellow"); DimensionValuePair bye = World.NewDimensionValuePair("Salutation", "Goodbyew"); ExternalActionChunk sayHi = World.NewExternalActionChunk("Hello"); ExternalActionChunk sayBye = World.NewExternalActionChunk("Goodbye"); //Initialize the Agent Agent John = World.NewAgent("John"); SimplifiedQBPNetwork net = AgentInitializer.InitializeImplicitDecisionNetwork(John, SimplifiedQBPNetwork.Factory); net.Input.Add(hi); net.Input.Add(bye); net.Output.Add(sayHi); net.Output.Add(sayBye); John.Commit(net); net.Parameters.LEARNING_RATE = 1; John.ACS.Parameters.PERFORM_RER_REFINEMENT = false; //Run the task Console.WriteLine("Running the Simple Hello World Task"); Console.SetOut(sw); Random rand = new Random(); SensoryInformation si; ExternalActionChunk chosen; for (int i = 0; i < NumberTrials; i++) { si = World.NewSensoryInformation(John); //Randomly choose an input to perceive. if (rand.NextDouble() < .5) { //Say "Hello" si.Add(hi, John.Parameters.MAX_ACTIVATION); si.Add(bye, John.Parameters.MIN_ACTIVATION); } else { //Say "Goodbye" si.Add(hi, John.Parameters.MIN_ACTIVATION); si.Add(bye, John.Parameters.MAX_ACTIVATION); } //Perceive the sensory information John.Perceive(si); //Choose an action chosen = John.GetChosenExternalAction(si); //Deliver appropriate feedback to the agent if (chosen == sayHi) { //The agent said "Hello". if (si[hi] == John.Parameters.MAX_ACTIVATION) { //The agent responded correctly Trace.WriteLineIf(World.LoggingSwitch.TraceWarning, "John was correct"); //Record the agent's success. CorrectCounter++; //Give positive feedback. John.ReceiveFeedback(si, 1.0); } else { //The agent responded incorrectly Trace.WriteLineIf(World.LoggingSwitch.TraceWarning, "John was incorrect"); //Give negative feedback. John.ReceiveFeedback(si, 0.0); } } else { //The agent said "Goodbye". if (si[bye] == John.Parameters.MAX_ACTIVATION) { //The agent responded correctly Trace.WriteLineIf(World.LoggingSwitch.TraceWarning, "John was correct"); //Record the agent's success. CorrectCounter++; //Give positive feedback. John.ReceiveFeedback(si, 1.0); } else { //The agent responded incorrectly Trace.WriteLineIf(World.LoggingSwitch.TraceWarning, "John was incorrect"); //Give negative feedback. John.ReceiveFeedback(si, 0.0); } } Console.SetOut(orig); progress = (int)(((double)(i + 1) / (double)NumberTrials) * 100); Console.CursorLeft = 0; Console.Write(progress + "% Complete.."); Console.SetOut(sw); } //Report Results Console.WriteLine("Reporting Results for the Simple Hello World Task"); Console.WriteLine("John got " + CorrectCounter + " correct out of " + NumberTrials + " trials (" + (int)Math.Round(((double)CorrectCounter / (double)NumberTrials) * 100) + "%)"); Console.WriteLine("At the end of the task, John had learned the following rules:"); foreach (var i in John.GetInternals(Agent.InternalContainers.ACTION_RULES)) { Console.WriteLine(i); } sw.Close(); Console.SetOut(orig); Console.CursorLeft = 0; Console.WriteLine("100% Complete.."); //Kill the agent to end the task Console.WriteLine("Killing John to end the program"); John.Die(); Console.WriteLine("John is Dead"); Console.WriteLine("The Simple Hello World Task has finished"); Console.WriteLine("The results have been saved to \"HelloWorldSimple.txt\""); Console.Write("Press any key to exit"); Console.ReadKey(true); }
public static void Main() { Agent reasoner = World.NewAgent(); InitializeWorld(reasoner); //Adds all of the declarative chunks to the GKS foreach (DeclarativeChunk dc in chunks) { reasoner.AddKnowledge(dc); } //Initializes the Hopfield network in the bottom level of the NACS HopfieldNetwork net = AgentInitializer.InitializeAssociativeMemoryNetwork (reasoner, HopfieldNetwork.Factory); //Species all of the dimension-value pairs as nodes for the Hopfield network net.Nodes.AddRange(dvs); //Commits the Hopfield network reasoner.Commit(net); //Encodes the patterns into the Hopfield network EncodeHopfieldNetwork(net); //Specifies that the NACS should perform 2 reasoning iterations reasoner.NACS.Parameters.REASONING_ITERATION_COUNT = 1; //Sets the conclusion threshold to 1 //(indicating that only fully matched conclusions should be returned) reasoner.NACS.Parameters.CONCLUSION_THRESHOLD = 1; // Add Some Action Chunks for the ACS World.NewExternalActionChunk("Yes"); World.NewExternalActionChunk("No"); ReasoningRequestActionChunk think = World.NewReasoningRequestActionChunk("DoReasoning"); think.Add(NonActionCenteredSubsystem.RecognizedReasoningActions.NEW, 1, false); World.NewDimensionValuePair("state", 1); World.NewDimensionValuePair("state", 2); World.NewDimensionValuePair("state", 3); // Add ACS Rule to use chunks RefineableActionRule yes = AgentInitializer.InitializeActionRule(reasoner, RefineableActionRule.Factory, World.GetActionChunk("Yes")); yes.GeneralizedCondition.Add(World.GetDeclarativeChunk(1), true); reasoner.Commit(yes); RefineableActionRule no = AgentInitializer.InitializeActionRule(reasoner, RefineableActionRule.Factory, World.GetActionChunk("No")); no.GeneralizedCondition.Add(World.GetDeclarativeChunk(0), true, "altdim"); no.GeneralizedCondition.Add(World.GetDeclarativeChunk(2), true, "altdim"); no.GeneralizedCondition.Add(World.GetDeclarativeChunk(3), true, "altdim"); no.GeneralizedCondition.Add(World.GetDeclarativeChunk(4), true, "altdim"); reasoner.Commit(no); RefineableActionRule doReasoning = AgentInitializer.InitializeActionRule(reasoner, RefineableActionRule.Factory, World.GetActionChunk("DoReasoning")); doReasoning.GeneralizedCondition.Add(World.GetDimensionValuePair("state", 1)); reasoner.Commit(doReasoning); RefineableActionRule doNothing = AgentInitializer.InitializeActionRule(reasoner, RefineableActionRule.Factory, ExternalActionChunk.DO_NOTHING); doNothing.GeneralizedCondition.Add(World.GetDimensionValuePair("state", 2)); reasoner.Commit(doNothing); reasoner.ACS.Parameters.PERFORM_RER_REFINEMENT = false; reasoner.ACS.Parameters.FIXED_BL_LEVEL_SELECTION_MEASURE = 0; reasoner.ACS.Parameters.FIXED_RER_LEVEL_SELECTION_MEASURE = 1; reasoner.ACS.Parameters.FIXED_IRL_LEVEL_SELECTION_MEASURE = 0; reasoner.ACS.Parameters.FIXED_FR_LEVEL_SELECTION_MEASURE = 0; reasoner.ACS.Parameters.LEVEL_SELECTION_METHOD = ActionCenteredSubsystem.LevelSelectionMethods.STOCHASTIC; reasoner.ACS.Parameters.LEVEL_SELECTION_OPTION = ActionCenteredSubsystem.LevelSelectionOptions.FIXED; reasoner.ACS.Parameters.NACS_REASONING_ACTION_PROBABILITY = 1; reasoner.ACS.Parameters.EXTERNAL_ACTION_PROBABILITY = 1; reasoner.NACS.Parameters.REASONING_ITERATION_TIME = 3000; //Initiates the simulation and outputs the results Run(reasoner); //Kills the reasoning agent reasoner.Die(); Console.WriteLine("Press any key to exit"); Console.ReadKey(); }
public void Initialize(Groups group) { World.Initialize(); John = World.NewAgent(); QBPNetwork idn = AgentInitializer.InitializeImplicitDecisionNetwork(John, QBPNetwork.Factory); World.NewDimensionValuePair("Target P", target); World.NewDimensionValuePair("Current P", target); World.NewExternalActionChunk(target); for (double i = 0; i < 12; i++) { if (World.GetDimensionValuePair("Target P", i) == null) { idn.Input.Add(World.NewDimensionValuePair("Target P", i)); idn.Input.Add(World.NewDimensionValuePair("Current P", i)); idn.Input.Add(World.NewExternalActionChunk(i)); idn.Output.Add(World.GetActionChunk(i)); } else { idn.Input.Add(World.GetDimensionValuePair("Target P", i)); idn.Input.Add(World.GetDimensionValuePair("Current P", i)); idn.Input.Add(World.GetActionChunk(i)); idn.Output.Add(World.GetActionChunk(i)); } } foreach (double i in As) { World.NewDimensionValuePair("A", i); } foreach (double i in Bs) { World.NewDimensionValuePair("B", i); } foreach (double i in Cs) { World.NewDimensionValuePair("C", i); } switch (group) { case Groups.VERBALIZATION: idn.Parameters.POSITIVE_MATCH_THRESHOLD = 1; RefineableActionRule.GlobalParameters.POSITIVE_MATCH_THRESHOLD = 1; RefineableActionRule.GlobalParameters.GENERALIZATION_THRESHOLD_1 = 1; RefineableActionRule.GlobalParameters.SPECIALIZATION_THRESHOLD_1 = .5; threshold_4 = .5; break; case Groups.MEMORY: for (double i = 0; i < 12; i++) { ExternalActionChunk w = (ExternalActionChunk)World.GetActionChunk((double)rand.Next(12)); var p = World.GetDimensionValuePair("Current P", FactoryOutput(i, (double)w.LabelAsIComparable)); ExternalActionChunk w1 = (ExternalActionChunk)World.GetActionChunk(Math.Round((target + p.Value + NoiseOptions[rand.Next(3)]) / 2)); FixedRule mfr = AgentInitializer.InitializeActionRule(John, FixedRule.Factory, w1, MemoryGroup_SupportCalculator); mfr.GeneralizedCondition.Add(p, true); mfr.GeneralizedCondition.Add(w, true); John.Commit(mfr); } goto default; case Groups.SIMPLE_RULE: for (double i = 0; i < 12; i++) { FixedRule sfr = AgentInitializer.InitializeActionRule(John, FixedRule.Factory, World.GetActionChunk(i), SimpleRule_SupportCalculator); John.Commit(sfr); } goto default; default: idn.Parameters.LEARNING_RATE = .05; idn.Parameters.DISCOUNT = .95; John.ACS.Parameters.SELECTION_TEMPERATURE = .09; idn.Parameters.POSITIVE_MATCH_THRESHOLD = 1; RefineableActionRule.GlobalParameters.GENERALIZATION_THRESHOLD_1 = 2; RefineableActionRule.GlobalParameters.SPECIALIZATION_THRESHOLD_1 = 1.2; RefineableActionRule.GlobalParameters.POSITIVE_MATCH_THRESHOLD = 1; threshold_4 = .2; break; } RefineableActionRule.GlobalParameters.INFORMATION_GAIN_OPTION = RefineableActionRule.IGOptions.PERFECT; John.Commit(idn); }
public static void InitializeAgent(Groups gr) { Participant = World.NewAgent(); BPNetwork idn = AgentInitializer.InitializeImplicitDecisionNetwork(Participant, BPNetwork.Factory); idn.Input.AddRange(dvs); idn.Output.AddRange(acts); Participant.Commit(idn); foreach (DeclarativeChunk t in tools) { RefineableActionRule a = AgentInitializer.InitializeActionRule(Participant, RefineableActionRule.Factory, World.GetActionChunk("Tool")); foreach (DimensionValuePair dv in t) { a.GeneralizedCondition.Add(dv, true); } Participant.Commit(a); } foreach (DeclarativeChunk g in guns) { RefineableActionRule a = AgentInitializer.InitializeActionRule(Participant, RefineableActionRule.Factory, World.GetActionChunk("Gun")); foreach (DimensionValuePair dv in g) { a.GeneralizedCondition.Add(dv, true); } Participant.Commit(a); } Participant.ACS.Parameters.PERFORM_RER_REFINEMENT = false; Participant.ACS.Parameters.PERFORM_DELETION_BY_DENSITY = false; Participant.ACS.Parameters.FIXED_BL_LEVEL_SELECTION_MEASURE = 1; Participant.ACS.Parameters.FIXED_RER_LEVEL_SELECTION_MEASURE = 1; Participant.ACS.Parameters.FIXED_IRL_LEVEL_SELECTION_MEASURE = 0; Participant.ACS.Parameters.FIXED_FR_LEVEL_SELECTION_MEASURE = 0; Participant.ACS.Parameters.B = 1; HonorDrive honor = AgentInitializer.InitializeDrive(Participant, HonorDrive.Factory, r.NextDouble()); GenericEquation hd = AgentInitializer.InitializeDriveComponent(honor, GenericEquation.Factory, (Equation)TangentEquation); var ins = Drive.GenerateTypicalInputs(honor); ParameterChangeActionChunk pac = World.NewParameterChangeActionChunk(); pac.Add(Participant.ACS, "MCS_RER_SELECTION_MEASURE", .5); hd.Input.AddRange(ins); hd.Parameters.MAX_ACTIVATION = 5; honor.Commit(hd); honor.Parameters.DRIVE_GAIN = (gr == Groups.PRIVATE) ? .1 / 5 : .2 / 5; Participant.Commit(honor); ParameterSettingModule lpm = AgentInitializer.InitializeMetaCognitiveModule(Participant, ParameterSettingModule.Factory); ACSLevelProbabilitySettingEquation lpe = AgentInitializer.InitializeMetaCognitiveDecisionNetwork(lpm, ACSLevelProbabilitySettingEquation.Factory, Participant); lpe.Input.Add(honor.GetDriveStrength()); lpm.Commit(lpe); Participant.Commit(lpm); lpm.Parameters.FIXED_BL_LEVEL_SELECTION_MEASURE = 1; lpm.Parameters.FIXED_RER_LEVEL_SELECTION_MEASURE = 0; //Pre-train the IDN in the ACS PreTrainACS(idn); }
/// <summary> /// Setup the ACS subsystem /// </summary> private void SetupACS() { // Create Rule to avoid collision with wall SupportCalculator avoidCollisionWallSupportCalculator = FixedRuleToAvoidCollisionWall; FixedRule ruleAvoidCollisionWall = AgentInitializer.InitializeActionRule(CurrentAgent, FixedRule.Factory, outputRotateClockwise, avoidCollisionWallSupportCalculator); // Commit this rule to Agent (in the ACS) CurrentAgent.Commit(ruleAvoidCollisionWall); // Create Rule To Go Ahead SupportCalculator goAheadSupportCalculator = FixedRuleToGoAhead; FixedRule ruleGoAhead = AgentInitializer.InitializeActionRule(CurrentAgent, FixedRule.Factory, outputGoAhead, goAheadSupportCalculator); // Commit this rule to Agent (in the ACS) - FMT commenting to avoid conflict //CurrentAgent.Commit(ruleGoAhead); // FMT 29/04/2017 // FMT Create Rule to Eat SupportCalculator eatSupportCalculator = FixedRuleToEat; FixedRule ruleEat = AgentInitializer.InitializeActionRule(CurrentAgent, FixedRule.Factory, outputEat, eatSupportCalculator); CurrentAgent.ACS.Parameters.PERFORM_RER_REFINEMENT = true; CurrentAgent.ACS.Parameters.LEVEL_SELECTION_METHOD = ActionCenteredSubsystem.LevelSelectionMethods.COMBINED; CurrentAgent.ACS.Parameters.LEVEL_SELECTION_OPTION = ActionCenteredSubsystem.LevelSelectionOptions.FIXED; CurrentAgent.ACS.Parameters.FIXED_FR_LEVEL_SELECTION_MEASURE = 1; CurrentAgent.ACS.Parameters.FIXED_BL_LEVEL_SELECTION_MEASURE = 1; CurrentAgent.ACS.Parameters.FIXED_RER_LEVEL_SELECTION_MEASURE = 1; CurrentAgent.ACS.Parameters.WM_UPDATE_ACTION_PROBABILITY = 1; // Commit this rule to Agent (in the ACS) CurrentAgent.Commit(ruleEat); // FMT Create Rule to Get SupportCalculator getSupportCalculator = FixedRuleToGet; FixedRule ruleGet = AgentInitializer.InitializeActionRule(CurrentAgent, FixedRule.Factory, outputGet, getSupportCalculator); // Commit this rule to Agent (in the ACS) CurrentAgent.Commit(ruleGet); // FMT Create Rule to Hide SupportCalculator hideSupportCalculator = FixedRuleToHide; FixedRule ruleHide = AgentInitializer.InitializeActionRule(CurrentAgent, FixedRule.Factory, outputHide, hideSupportCalculator); // Commit this rule to Agent (in the ACS) CurrentAgent.Commit(ruleHide); // FMT Create Rule to Go To SupportCalculator gotoSupportCalculator = FixedRuleToGoTo; FixedRule ruleGoto = AgentInitializer.InitializeActionRule(CurrentAgent, FixedRule.Factory, outputGoTo, gotoSupportCalculator); // Commit this rule to Agent (in the ACS) CurrentAgent.Commit(ruleGoto); // FMT Create Rule to Stop SupportCalculator stopSupportCalculator = FixedRuleToStop; FixedRule ruleStop = AgentInitializer.InitializeActionRule(CurrentAgent, FixedRule.Factory, outputStop, stopSupportCalculator); // Commit this rule to Agent (in the ACS) CurrentAgent.Commit(ruleStop); // Disable Rule Refinement CurrentAgent.ACS.Parameters.PERFORM_RER_REFINEMENT = false; // The selection type will be probabilistic CurrentAgent.ACS.Parameters.LEVEL_SELECTION_METHOD = ActionCenteredSubsystem.LevelSelectionMethods.STOCHASTIC; // The action selection will be fixed (not variable) i.e. only the statement defined above. CurrentAgent.ACS.Parameters.LEVEL_SELECTION_OPTION = ActionCenteredSubsystem.LevelSelectionOptions.FIXED; // Define Probabilistic values CurrentAgent.ACS.Parameters.FIXED_FR_LEVEL_SELECTION_MEASURE = 1; CurrentAgent.ACS.Parameters.FIXED_IRL_LEVEL_SELECTION_MEASURE = 0; CurrentAgent.ACS.Parameters.FIXED_BL_LEVEL_SELECTION_MEASURE = 0; CurrentAgent.ACS.Parameters.FIXED_RER_LEVEL_SELECTION_MEASURE = 0; // FMT 13/05/2017 additonal setting for network SimplifiedQBPNetwork net = AgentInitializer.InitializeImplicitDecisionNetwork(CurrentAgent, SimplifiedQBPNetwork.Factory); net.Parameters.LEARNING_RATE = 1; CurrentAgent.ACS.Parameters.PERFORM_RER_REFINEMENT = false; }
/// <summary> /// Setup the ACS subsystem /// </summary> private void SetupACS() { // Create Rule to avoid collision with wall SupportCalculator avoidCollisionWallSupportCalculator = FixedRuleToAvoidCollisionWall; FixedRule ruleAvoidCollisionWall = AgentInitializer.InitializeActionRule(CurrentAgent, FixedRule.Factory, outputRotateClockwise, avoidCollisionWallSupportCalculator); // Commit this rule to Agent (in the ACS) CurrentAgent.Commit(ruleAvoidCollisionWall); // Create Rule to Wander SupportCalculator wanderSupportCalculator = FixedRuleToWander; FixedRule ruleWander = AgentInitializer.InitializeActionRule(CurrentAgent, FixedRule.Factory, outputWander, wanderSupportCalculator); // Commit this rule to Agent (in the ACS) CurrentAgent.Commit(ruleWander); // Create Rule To Get Jewel SupportCalculator getJewelSupportCalculator = FixedRuleToGetJewel; FixedRule ruleGetJewel = AgentInitializer.InitializeActionRule(CurrentAgent, FixedRule.Factory, outputGetJewel, getJewelSupportCalculator); // Commit this rule to Agent (in the ACS) CurrentAgent.Commit(ruleGetJewel); // Create Rule To Get Food SupportCalculator getFoodSupportCalculator = FixedRuleToGetFood; FixedRule ruleGetFood = AgentInitializer.InitializeActionRule(CurrentAgent, FixedRule.Factory, outputGetFood, getFoodSupportCalculator); // Commit this rule to Agent (in the ACS) CurrentAgent.Commit(ruleGetFood); // Create Rule To Go To Jewel SupportCalculator goToJewelSupportCalculator = FixedRuleToGoToJewel; FixedRule ruleGoToJewel = AgentInitializer.InitializeActionRule(CurrentAgent, FixedRule.Factory, outputGoToJewel, goToJewelSupportCalculator); // Commit this rule to Agent (in the ACS) CurrentAgent.Commit(ruleGoToJewel); // Create Rule To Go To Food SupportCalculator goToFoodSupportCalculator = FixedRuleToGoToFood; FixedRule ruleGoToFood = AgentInitializer.InitializeActionRule(CurrentAgent, FixedRule.Factory, outputGoToFood, goToFoodSupportCalculator); // Commit this rule to Agent (in the ACS) CurrentAgent.Commit(ruleGoToFood); // Create Rule To Go To the Delivery Spot SupportCalculator goToDeliverySpotSupportCalculator = FixedRuleToGoToDeliverySpot; FixedRule ruleGoToDeliverySpot = AgentInitializer.InitializeActionRule(CurrentAgent, FixedRule.Factory, outputGoToDeliverySpot, goToDeliverySpotSupportCalculator); // Commit this rule to Agent (in the ACS) CurrentAgent.Commit(ruleGoToDeliverySpot); // Create Rule To Deliver Jewels SupportCalculator deliverSupportCalculator = FixedRuleToDeliver; FixedRule ruleDeliver = AgentInitializer.InitializeActionRule(CurrentAgent, FixedRule.Factory, outputDoDelivery, deliverSupportCalculator); // Commit this rule to Agent (in the ACS) CurrentAgent.Commit(ruleDeliver); // Disable Rule Refinement CurrentAgent.ACS.Parameters.PERFORM_RER_REFINEMENT = false; // The selection type will be probabilistic CurrentAgent.ACS.Parameters.LEVEL_SELECTION_METHOD = ActionCenteredSubsystem.LevelSelectionMethods.STOCHASTIC; // The action selection will be fixed (not variable) i.e. only the statement defined above. CurrentAgent.ACS.Parameters.LEVEL_SELECTION_OPTION = ActionCenteredSubsystem.LevelSelectionOptions.FIXED; // Define Probabilistic values CurrentAgent.ACS.Parameters.FIXED_FR_LEVEL_SELECTION_MEASURE = 1; CurrentAgent.ACS.Parameters.FIXED_IRL_LEVEL_SELECTION_MEASURE = 0; CurrentAgent.ACS.Parameters.FIXED_BL_LEVEL_SELECTION_MEASURE = 0; CurrentAgent.ACS.Parameters.FIXED_RER_LEVEL_SELECTION_MEASURE = 0; }
/// <summary> /// Setup the ACS subsystem /// </summary> private void SetupACS() { // Create Rule To Stop when all leaflets have been delivered (success) SupportCalculator stopWhenFinishedSupportCalculator = FixedRuleToStopWhenFinished; FixedRule ruleStopWhenFinished = AgentInitializer.InitializeActionRule(CurrentAgent, FixedRule.Factory, outputStop, stopWhenFinishedSupportCalculator); ruleStopWhenFinished.Parameters.WEIGHT = 1; // Commit this rule to Agent (in the ACS) CurrentAgent.Commit(ruleStopWhenFinished); // Create Rule to Eat Food SupportCalculator eatFoodSupportCalculator = FixedRuleToEatFood; FixedRule ruleEatFood = AgentInitializer.InitializeActionRule(CurrentAgent, FixedRule.Factory, outputEatFood, eatFoodSupportCalculator); ruleEatFood.Parameters.WEIGHT = 0.9; ruleEatFood.Parameters.PARTIAL_MATCH_ON = true; // Commit this rule to Agent (in the ACS) CurrentAgent.Commit(ruleEatFood); // Create Rule to Collect (Sack) Jewel SupportCalculator sackJewelSupportCalculator = FixedRuleToSackJewel; FixedRule ruleSackJewel = AgentInitializer.InitializeActionRule(CurrentAgent, FixedRule.Factory, outputSackJewel, sackJewelSupportCalculator); ruleSackJewel.Parameters.WEIGHT = 0.9; ruleSackJewel.Parameters.PARTIAL_MATCH_ON = true; // Commit this rule to Agent (in the ACS) CurrentAgent.Commit(ruleSackJewel); // Create Rule to avoid colision with wall SupportCalculator avoidCollisionWallSupportCalculator = FixedRuleToAvoidCollisionWall; FixedRule ruleAvoidCollisionWall = AgentInitializer.InitializeActionRule(CurrentAgent, FixedRule.Factory, outputRotateClockwise, avoidCollisionWallSupportCalculator); ruleAvoidCollisionWall.Parameters.WEIGHT = 0.8; ruleAvoidCollisionWall.Parameters.PARTIAL_MATCH_ON = true; // Commit this rule to Agent (in the ACS) CurrentAgent.Commit(ruleAvoidCollisionWall); // Create Rule To Go Ahead //SupportCalculator goAheadSupportCalculator = FixedRuleToGoAhead; //FixedRule ruleGoAhead = AgentInitializer.InitializeActionRule(CurrentAgent, FixedRule.Factory, outputGoAhead, goAheadSupportCalculator); // Commit this rule to Agent (in the ACS) //CurrentAgent.Commit(ruleGoAhead); // Create Rule To Go To Closest Jewel SupportCalculator goToClosestJewelSupportCalculator = FixedRuleToGoToClosestJewel; FixedRule ruleGoToClosestJewel = AgentInitializer.InitializeActionRule(CurrentAgent, FixedRule.Factory, outputGoToClosestJewel, goToClosestJewelSupportCalculator); ruleGoToClosestJewel.Parameters.WEIGHT = 0.7; ruleGoToClosestJewel.Parameters.PARTIAL_MATCH_ON = true; // Commit this rule to Agent (in the ACS) CurrentAgent.Commit(ruleGoToClosestJewel); // Create Rule To Go To Closest Jewel SupportCalculator goToClosestFoodSupportCalculator = FixedRuleToGoToClosestFood; FixedRule ruleGoToClosestFood = AgentInitializer.InitializeActionRule(CurrentAgent, FixedRule.Factory, outputGoToClosestFood, goToClosestFoodSupportCalculator); ruleGoToClosestFood.Parameters.WEIGHT = 0.6; ruleGoToClosestFood.Parameters.PARTIAL_MATCH_ON = true; // Commit this rule to Agent (in the ACS) CurrentAgent.Commit(ruleGoToClosestFood); // Create Rule to Deliver a Leaflet SupportCalculator deliverLeafletSupportCalculator = FixedRuleToDeliverLeaflet; FixedRule ruleDeliverLeaflet = AgentInitializer.InitializeActionRule(CurrentAgent, FixedRule.Factory, outputDeliverLeaflet, deliverLeafletSupportCalculator); // Commit this rule to Agent (in the ACS) CurrentAgent.Commit(ruleDeliverLeaflet); // Disable Rule Refinement CurrentAgent.ACS.Parameters.PERFORM_RER_REFINEMENT = false; // The selection type will be probabilistic CurrentAgent.ACS.Parameters.LEVEL_SELECTION_METHOD = ActionCenteredSubsystem.LevelSelectionMethods.STOCHASTIC; // The action selection will be fixed (not variable) i.e. only the statement defined above. CurrentAgent.ACS.Parameters.LEVEL_SELECTION_OPTION = ActionCenteredSubsystem.LevelSelectionOptions.FIXED; // Define Probabilistic values CurrentAgent.ACS.Parameters.FIXED_FR_LEVEL_SELECTION_MEASURE = 1; CurrentAgent.ACS.Parameters.FIXED_IRL_LEVEL_SELECTION_MEASURE = 0; CurrentAgent.ACS.Parameters.FIXED_BL_LEVEL_SELECTION_MEASURE = 0; CurrentAgent.ACS.Parameters.FIXED_RER_LEVEL_SELECTION_MEASURE = 0; }
public void Initialize() { // Dimension Value Pairs: sayWhat = World.NewDimensionValuePair("YourAction", "What do you want to do?"); // External Action Chunks: sayCooperate = World.NewExternalActionChunk("Cooperate"); sayDefect = World.NewExternalActionChunk("Defect"); // placeholder // GoalChunk salute = World.NewGoalChunk("Salute"); // GoalChunk bidFarewell = World.NewGoalChunk("Bid Farewell"); // WM Actions: wmuacC = World.NewWorkingMemoryUpdateActionChunk("Remember my opponent cooperated"); wmuacD = World.NewWorkingMemoryUpdateActionChunk("Remember my opponent defected"); DeclarativeChunk dcoc = World.NewDeclarativeChunk("My opponent cooperated"); DeclarativeChunk dcod = World.NewDeclarativeChunk("My opponent defected"); wmuacC.Add(WorkingMemory.RecognizedActions.SET_RESET, dcoc); wmuacD.Add(WorkingMemory.RecognizedActions.SET_RESET, dcod); // Set up a two agent model (meaning two agents with the same setup, playing against each other) Alice = World.NewAgent("Alice"); Bob = World.NewAgent("Bob"); // Simulating environment will determine inputs to each agent based on what each agent does.. // Feedback is determined by payoff matrix.. payoff = new int [2, 2, 2]; // Doing this the hard way. Could set this up all in-line above, but this makes the table // more explicit in terms of how we want to use it. // The payoff matrix here is called "Friend or Foe", about the simplest case // indices mean: FOR-WHICH-AGENT, WHAT-ALICE-DOES, WHAT-BOB-DOES payoff[_ALICE, _COOPERATE, _COOPERATE] = 1; payoff[_ALICE, _COOPERATE, _DEFECT] = 0; payoff[_ALICE, _DEFECT, _COOPERATE] = 2; payoff[_ALICE, _DEFECT, _DEFECT] = 0; payoff[_BOB, _COOPERATE, _COOPERATE] = 1; payoff[_BOB, _COOPERATE, _DEFECT] = 2; payoff[_BOB, _DEFECT, _COOPERATE] = 0; payoff[_BOB, _DEFECT, _DEFECT] = 0; maxpay = 2; results = new int[_TRIALS, 2, 2]; // Set up a Q-learning Net = // -- Eligibility Condition = True if "What do you want to do?" is in input, otherwise False // -- Input = "My opponent cooperated", "My opponent defected", "What do you want to do?" // -- Output = "I want to defect", "I want to cooperate" // // Also, RER is turned ON QBPNetwork net_A = AgentInitializer.InitializeImplicitDecisionNetwork(Alice, QBPNetwork.Factory, QNetEC); net_A.Input.Add(sayWhat); net_A.Input.Add(sayCooperate); net_A.Input.Add(sayDefect); net_A.Output.Add(sayCooperate); net_A.Output.Add(sayDefect); Alice.Commit(net_A); net_A.Parameters.LEARNING_RATE = 1; Alice.ACS.Parameters.PERFORM_RER_REFINEMENT = true; // it's true by default anyway Alice.ACS.Parameters.LEVEL_SELECTION_METHOD = ActionCenteredSubsystem.LevelSelectionMethods.COMBINED; Alice.ACS.Parameters.LEVEL_SELECTION_OPTION = ActionCenteredSubsystem.LevelSelectionOptions.FIXED; Alice.ACS.Parameters.FIXED_FR_LEVEL_SELECTION_MEASURE = 1; Alice.ACS.Parameters.FIXED_BL_LEVEL_SELECTION_MEASURE = 1; Alice.ACS.Parameters.FIXED_RER_LEVEL_SELECTION_MEASURE = 1; Alice.ACS.Parameters.WM_UPDATE_ACTION_PROBABILITY = 1; // Rules (2 rules) = // Rule 1: // -- Condition = "Your opponent cooperated" // -- Action = Set "My opponent cooperated" in WM // Rule 2: // -- Condition = "Your opponent defected" // -- Action = Set "My opponent defect" in WM FixedRule ruleA1 = AgentInitializer.InitializeActionRule(Alice, FixedRule.Factory, wmuacC, FRSC); FixedRule ruleA2 = AgentInitializer.InitializeActionRule(Alice, FixedRule.Factory, wmuacD, FRSC); Alice.Commit(ruleA1); Alice.Commit(ruleA2); QBPNetwork net_B = AgentInitializer.InitializeImplicitDecisionNetwork(Bob, QBPNetwork.Factory, QNetEC); net_B.Input.Add(sayWhat); net_B.Input.Add(sayCooperate); net_B.Input.Add(sayDefect); net_B.Output.Add(sayCooperate); net_B.Output.Add(sayDefect); Bob.Commit(net_B); // Use Weighted Combination // NO partial match on TL net_B.Parameters.LEARNING_RATE = 1; Bob.ACS.Parameters.PERFORM_RER_REFINEMENT = true; Bob.ACS.Parameters.LEVEL_SELECTION_METHOD = ActionCenteredSubsystem.LevelSelectionMethods.COMBINED; Bob.ACS.Parameters.LEVEL_SELECTION_OPTION = ActionCenteredSubsystem.LevelSelectionOptions.FIXED; Bob.ACS.Parameters.FIXED_FR_LEVEL_SELECTION_MEASURE = 1; Bob.ACS.Parameters.FIXED_BL_LEVEL_SELECTION_MEASURE = 1; Bob.ACS.Parameters.FIXED_RER_LEVEL_SELECTION_MEASURE = 1; Bob.ACS.Parameters.WM_UPDATE_ACTION_PROBABILITY = 1; FixedRule ruleB1 = AgentInitializer.InitializeActionRule(Bob, FixedRule.Factory, wmuacC, FRSC); FixedRule ruleB2 = AgentInitializer.InitializeActionRule(Bob, FixedRule.Factory, wmuacD, FRSC); Bob.Commit(ruleB1); Bob.Commit(ruleB2); // Initially using the same parameters for RER as Full Hello World RefineableActionRule.GlobalParameters.SPECIALIZATION_THRESHOLD_1 = -.6; RefineableActionRule.GlobalParameters.GENERALIZATION_THRESHOLD_1 = -.1; RefineableActionRule.GlobalParameters.INFORMATION_GAIN_OPTION = RefineableActionRule.IGOptions.PERFECT; /* * Note -- What should be seems is that when you pass in "Your opponent…", * the agent should return the "Do Nothing" external action * (since it performed an internal WM action).. * However, you can just ignore this either way.. */ }
/// <summary> /// Setup the ACS subsystem /// </summary> private void SetupACS() { // Create Rule to avoid collision with wall SupportCalculator avoidCollisionWallSupportCalculator = FixedRuleToAvoidCollisionWall; FixedRule ruleAvoidCollisionWall = AgentInitializer.InitializeActionRule(CurrentAgent, FixedRule.Factory, outputRotateClockwise, avoidCollisionWallSupportCalculator); // Commit this rule to Agent (in the ACS) CurrentAgent.Commit(ruleAvoidCollisionWall); // Create Colission To Go Ahead SupportCalculator goAheadSupportCalculator = FixedRuleToGoAhead; FixedRule ruleGoAhead = AgentInitializer.InitializeActionRule(CurrentAgent, FixedRule.Factory, outputGoAhead, goAheadSupportCalculator); // Commit this rule to Agent (in the ACS) CurrentAgent.Commit(ruleGoAhead); // Create Go to Jewel SupportCalculator goToJewelSupportCalculator = FixedRuleToGoToJewel; FixedRule ruleGoToJewel = AgentInitializer.InitializeActionRule(CurrentAgent, FixedRule.Factory, outputGoToJewelInVision, goToJewelSupportCalculator); // Commit this rule to Agent (in the ACS) CurrentAgent.Commit(ruleGoToJewel); // Create eat food SupportCalculator eatFoodSupportCalculator = FixedRuleToEatFood; FixedRule ruleEatFood = AgentInitializer.InitializeActionRule(CurrentAgent, FixedRule.Factory, outputEatFood, eatFoodSupportCalculator); // Commit this rule to Agent (in the ACS) CurrentAgent.Commit(ruleEatFood); // Create get desired jewel SupportCalculator getDesiredJewelSupportCalculator = FixedRuleToGetDesiredJewel; FixedRule ruleGetDesiredJewel = AgentInitializer.InitializeActionRule(CurrentAgent, FixedRule.Factory, outputGetJewel, getDesiredJewelSupportCalculator); // Commit this rule to Agent (in the ACS) CurrentAgent.Commit(ruleGetDesiredJewel); // Create hide jewel SupportCalculator hideJewelSupportCalculator = FixedRuleToHideJewel; FixedRule ruleHideJewel = AgentInitializer.InitializeActionRule(CurrentAgent, FixedRule.Factory, outputHideJewel, hideJewelSupportCalculator); // Commit this rule to Agent (in the ACS) CurrentAgent.Commit(ruleHideJewel); // Create goal achieved SupportCalculator goalAchievedSupportCalculator = FixedRuleGoalAchieved; FixedRule ruleGoalAchieved = AgentInitializer.InitializeActionRule(CurrentAgent, FixedRule.Factory, outputGoalAchieved, goalAchievedSupportCalculator); // Create fuel low rule SupportCalculator fuelLowSupportCalculator = FixedRuleFuelLow; FixedRule ruleFuelLow = AgentInitializer.InitializeActionRule(CurrentAgent, FixedRule.Factory, outputFuelLow, fuelLowSupportCalculator); CurrentAgent.Commit(ruleFuelLow); // Commit this rule to Agent (in the ACS) CurrentAgent.Commit(ruleGoalAchieved); // Disable Rule Refinement CurrentAgent.ACS.Parameters.PERFORM_RER_REFINEMENT = false; // The selection type will be probabilistic CurrentAgent.ACS.Parameters.LEVEL_SELECTION_METHOD = ActionCenteredSubsystem.LevelSelectionMethods.STOCHASTIC; // The action selection will be fixed (not variable) i.e. only the statement defined above. CurrentAgent.ACS.Parameters.LEVEL_SELECTION_OPTION = ActionCenteredSubsystem.LevelSelectionOptions.FIXED; // Define Probabilistic values CurrentAgent.ACS.Parameters.FIXED_FR_LEVEL_SELECTION_MEASURE = 1; CurrentAgent.ACS.Parameters.FIXED_IRL_LEVEL_SELECTION_MEASURE = 0; CurrentAgent.ACS.Parameters.FIXED_BL_LEVEL_SELECTION_MEASURE = 0; CurrentAgent.ACS.Parameters.FIXED_RER_LEVEL_SELECTION_MEASURE = 0; }
static void Main(string[] args) { //Initialize the task Console.WriteLine("Initializing the Full Hello World Task"); int CorrectCounter = 0; int NumberTrials = 20000; Random rand = new Random(); World.LoggingLevel = TraceLevel.Off; int progress = 0; TextWriter orig = Console.Out; StreamWriter sw = File.CreateText("HelloWorldFull.txt"); DimensionValuePair hi = World.NewDimensionValuePair("Salutation", "Hello"); DimensionValuePair bye = World.NewDimensionValuePair("Salutation", "Goodbye"); ExternalActionChunk sayHi = World.NewExternalActionChunk("Hello"); ExternalActionChunk sayBye = World.NewExternalActionChunk("Goodbye"); GoalChunk salute = World.NewGoalChunk("Salute"); GoalChunk bidFarewell = World.NewGoalChunk("Bid Farewell"); //Initialize the Agent Agent John = World.NewAgent("John"); SimplifiedQBPNetwork net = AgentInitializer.InitializeImplicitDecisionNetwork(John, SimplifiedQBPNetwork.Factory); net.Input.Add(salute, "goals"); net.Input.Add(bidFarewell, "goals"); net.Input.Add(hi); net.Input.Add(bye); net.Output.Add(sayHi); net.Output.Add(sayBye); net.Parameters.LEARNING_RATE = 1; John.Commit(net); John.ACS.Parameters.VARIABLE_BL_BETA = .5; John.ACS.Parameters.VARIABLE_RER_BETA = .5; John.ACS.Parameters.VARIABLE_IRL_BETA = 0; John.ACS.Parameters.VARIABLE_FR_BETA = 0; RefineableActionRule.GlobalParameters.SPECIALIZATION_THRESHOLD_1 = -.6; RefineableActionRule.GlobalParameters.GENERALIZATION_THRESHOLD_1 = -.1; RefineableActionRule.GlobalParameters.INFORMATION_GAIN_OPTION = RefineableActionRule.IGOptions.PERFECT; AffiliationBelongingnessDrive ab = AgentInitializer.InitializeDrive(John, AffiliationBelongingnessDrive.Factory, rand.NextDouble(), (DeficitChangeProcessor)HelloWorldFull_DeficitChange); DriveEquation abd = AgentInitializer.InitializeDriveComponent(ab, DriveEquation.Factory); ab.Commit(abd); John.Commit(ab); AutonomyDrive aut = AgentInitializer.InitializeDrive(John, AutonomyDrive.Factory, rand.NextDouble(), (DeficitChangeProcessor)HelloWorldFull_DeficitChange); DriveEquation autd = AgentInitializer.InitializeDriveComponent(aut, DriveEquation.Factory); aut.Commit(autd); John.Commit(aut); GoalSelectionModule gsm = AgentInitializer.InitializeMetaCognitiveModule(John, GoalSelectionModule.Factory); GoalSelectionEquation gse = AgentInitializer.InitializeMetaCognitiveDecisionNetwork(gsm, GoalSelectionEquation.Factory); gse.Input.Add(ab.GetDriveStrength()); gse.Input.Add(aut.GetDriveStrength()); GoalStructureUpdateActionChunk su = World.NewGoalStructureUpdateActionChunk(); GoalStructureUpdateActionChunk bu = World.NewGoalStructureUpdateActionChunk(); su.Add(GoalStructure.RecognizedActions.SET_RESET, salute); bu.Add(GoalStructure.RecognizedActions.SET_RESET, bidFarewell); gse.Output.Add(su); gse.Output.Add(bu); gsm.SetRelevance(su, ab, 1); gsm.SetRelevance(bu, aut, 1); gsm.Commit(gse); John.Commit(gsm); John.MS.Parameters.CURRENT_GOAL_ACTIVATION_OPTION = MotivationalSubsystem.CurrentGoalActivationOptions.FULL; //Run the task Console.WriteLine("Running the Full Hello World Task"); Console.SetOut(sw); SensoryInformation si; ExternalActionChunk chosen; for (int i = 0; i < NumberTrials; i++) { si = World.NewSensoryInformation(John); si[AffiliationBelongingnessDrive.MetaInfoReservations.STIMULUS, typeof(AffiliationBelongingnessDrive).Name] = 1; si[AutonomyDrive.MetaInfoReservations.STIMULUS, typeof(AutonomyDrive).Name] = 1; //Randomly choose an input to perceive. if (rand.NextDouble() < .5) { //Say "Hello" si.Add(hi, John.Parameters.MAX_ACTIVATION); si.Add(bye, John.Parameters.MIN_ACTIVATION); } else { //Say "Goodbye" si.Add(hi, John.Parameters.MIN_ACTIVATION); si.Add(bye, John.Parameters.MAX_ACTIVATION); } //Perceive the sensory information John.Perceive(si); //Choose an action chosen = John.GetChosenExternalAction(si); //Deliver appropriate feedback to the agent if (chosen == sayHi) { //The agent said "Hello". if (si[hi] == John.Parameters.MAX_ACTIVATION) { //The agent responded correctly Trace.WriteLineIf(World.LoggingSwitch.TraceWarning, "John was correct"); //Record the agent's success. CorrectCounter++; //Give positive feedback. John.ReceiveFeedback(si, 1.0); } else { //The agent responded incorrectly Trace.WriteLineIf(World.LoggingSwitch.TraceWarning, "John was incorrect"); //Give negative feedback. John.ReceiveFeedback(si, 0.0); } } else { //The agent said "Goodbye". if (si[bye] == John.Parameters.MAX_ACTIVATION) { //The agent responded correctly Trace.WriteLineIf(World.LoggingSwitch.TraceWarning, "John was correct"); //Record the agent's success. CorrectCounter++; //Give positive feedback. John.ReceiveFeedback(si, 1.0); } else { //The agent responded incorrectly Trace.WriteLineIf(World.LoggingSwitch.TraceWarning, "John was incorrect"); //Give negative feedback. John.ReceiveFeedback(si, 0.0); } } Console.SetOut(orig); progress = (int)(((double)(i + 1) / (double)NumberTrials) * 100); Console.CursorLeft = 0; Console.Write(progress + "% Complete.."); Console.SetOut(sw); } //Report Results Console.WriteLine("Reporting Results for the Full Hello World Task"); Console.WriteLine("John got " + CorrectCounter + " correct out of " + NumberTrials + " trials (" + (int)Math.Round(((double)CorrectCounter / (double)NumberTrials) * 100) + "%)"); Console.WriteLine("At the end of the task, John had learned the following rules:"); foreach (var i in John.GetInternals(Agent.InternalContainers.ACTION_RULES)) { Console.WriteLine(i); } sw.Close(); Console.SetOut(orig); Console.CursorLeft = 0; Console.WriteLine("100% Complete.."); //Kill the agent to end the task Console.WriteLine("Killing John to end the program"); John.Die(); Console.WriteLine("John is Dead"); Console.WriteLine("The Full Hello World Task has finished"); Console.WriteLine("The results have been saved to \"HelloWorldFull.txt\""); Console.Write("Press any key to exit"); Console.ReadKey(true); }