public static void Main() { Agent reasoner = World.NewAgent(); InitializeWorld(reasoner); //Adds all of the declarative chunks to the GKS foreach (DeclarativeChunk dc in chunks) { reasoner.AddKnowledge(dc); } //Initializes the Hopfield network in the bottom level of the NACS HopfieldNetwork net = AgentInitializer.InitializeAssociativeMemoryNetwork (reasoner, HopfieldNetwork.Factory); //Species all of the dimension-value pairs as nodes for the Hopfield network net.Nodes.AddRange(dvs); //Commits the Hopfield network reasoner.Commit(net); //Encodes the patterns into the Hopfield network EncodeHopfieldNetwork(net); //Specifies that the NACS should perform 2 reasoning iterations reasoner.NACS.Parameters.REASONING_ITERATION_COUNT = 1; //Sets the conclusion threshold to 1 //(indicating that only fully matched conclusions should be returned) reasoner.NACS.Parameters.CONCLUSION_THRESHOLD = 1; // Add Some Action Chunks for the ACS World.NewExternalActionChunk("Yes"); World.NewExternalActionChunk("No"); ReasoningRequestActionChunk think = World.NewReasoningRequestActionChunk("DoReasoning"); think.Add(NonActionCenteredSubsystem.RecognizedReasoningActions.NEW, 1, false); World.NewDimensionValuePair("state", 1); World.NewDimensionValuePair("state", 2); World.NewDimensionValuePair("state", 3); // Add ACS Rule to use chunks RefineableActionRule yes = AgentInitializer.InitializeActionRule(reasoner, RefineableActionRule.Factory, World.GetActionChunk("Yes")); yes.GeneralizedCondition.Add(World.GetDeclarativeChunk(1), true); reasoner.Commit(yes); RefineableActionRule no = AgentInitializer.InitializeActionRule(reasoner, RefineableActionRule.Factory, World.GetActionChunk("No")); no.GeneralizedCondition.Add(World.GetDeclarativeChunk(0), true, "altdim"); no.GeneralizedCondition.Add(World.GetDeclarativeChunk(2), true, "altdim"); no.GeneralizedCondition.Add(World.GetDeclarativeChunk(3), true, "altdim"); no.GeneralizedCondition.Add(World.GetDeclarativeChunk(4), true, "altdim"); reasoner.Commit(no); RefineableActionRule doReasoning = AgentInitializer.InitializeActionRule(reasoner, RefineableActionRule.Factory, World.GetActionChunk("DoReasoning")); doReasoning.GeneralizedCondition.Add(World.GetDimensionValuePair("state", 1)); reasoner.Commit(doReasoning); RefineableActionRule doNothing = AgentInitializer.InitializeActionRule(reasoner, RefineableActionRule.Factory, ExternalActionChunk.DO_NOTHING); doNothing.GeneralizedCondition.Add(World.GetDimensionValuePair("state", 2)); reasoner.Commit(doNothing); reasoner.ACS.Parameters.PERFORM_RER_REFINEMENT = false; reasoner.ACS.Parameters.FIXED_BL_LEVEL_SELECTION_MEASURE = 0; reasoner.ACS.Parameters.FIXED_RER_LEVEL_SELECTION_MEASURE = 1; reasoner.ACS.Parameters.FIXED_IRL_LEVEL_SELECTION_MEASURE = 0; reasoner.ACS.Parameters.FIXED_FR_LEVEL_SELECTION_MEASURE = 0; reasoner.ACS.Parameters.LEVEL_SELECTION_METHOD = ActionCenteredSubsystem.LevelSelectionMethods.STOCHASTIC; reasoner.ACS.Parameters.LEVEL_SELECTION_OPTION = ActionCenteredSubsystem.LevelSelectionOptions.FIXED; reasoner.ACS.Parameters.NACS_REASONING_ACTION_PROBABILITY = 1; reasoner.ACS.Parameters.EXTERNAL_ACTION_PROBABILITY = 1; reasoner.NACS.Parameters.REASONING_ITERATION_TIME = 3000; //Initiates the simulation and outputs the results Run(reasoner); //Kills the reasoning agent reasoner.Die(); Console.WriteLine("Press any key to exit"); Console.ReadKey(); }
public static void InitializeAgent(Groups gr) { Participant = World.NewAgent(); BPNetwork idn = AgentInitializer.InitializeImplicitDecisionNetwork(Participant, BPNetwork.Factory); idn.Input.AddRange(dvs); idn.Output.AddRange(acts); Participant.Commit(idn); foreach (DeclarativeChunk t in tools) { RefineableActionRule a = AgentInitializer.InitializeActionRule(Participant, RefineableActionRule.Factory, World.GetActionChunk("Tool")); foreach (DimensionValuePair dv in t) { a.GeneralizedCondition.Add(dv, true); } Participant.Commit(a); } foreach (DeclarativeChunk g in guns) { RefineableActionRule a = AgentInitializer.InitializeActionRule(Participant, RefineableActionRule.Factory, World.GetActionChunk("Gun")); foreach (DimensionValuePair dv in g) { a.GeneralizedCondition.Add(dv, true); } Participant.Commit(a); } Participant.ACS.Parameters.PERFORM_RER_REFINEMENT = false; Participant.ACS.Parameters.PERFORM_DELETION_BY_DENSITY = false; Participant.ACS.Parameters.FIXED_BL_LEVEL_SELECTION_MEASURE = 1; Participant.ACS.Parameters.FIXED_RER_LEVEL_SELECTION_MEASURE = 1; Participant.ACS.Parameters.FIXED_IRL_LEVEL_SELECTION_MEASURE = 0; Participant.ACS.Parameters.FIXED_FR_LEVEL_SELECTION_MEASURE = 0; Participant.ACS.Parameters.B = 1; HonorDrive honor = AgentInitializer.InitializeDrive(Participant, HonorDrive.Factory, r.NextDouble()); GenericEquation hd = AgentInitializer.InitializeDriveComponent(honor, GenericEquation.Factory, (Equation)TangentEquation); var ins = Drive.GenerateTypicalInputs(honor); ParameterChangeActionChunk pac = World.NewParameterChangeActionChunk(); pac.Add(Participant.ACS, "MCS_RER_SELECTION_MEASURE", .5); hd.Input.AddRange(ins); hd.Parameters.MAX_ACTIVATION = 5; honor.Commit(hd); honor.Parameters.DRIVE_GAIN = (gr == Groups.PRIVATE) ? .1 / 5 : .2 / 5; Participant.Commit(honor); ParameterSettingModule lpm = AgentInitializer.InitializeMetaCognitiveModule(Participant, ParameterSettingModule.Factory); ACSLevelProbabilitySettingEquation lpe = AgentInitializer.InitializeMetaCognitiveDecisionNetwork(lpm, ACSLevelProbabilitySettingEquation.Factory, Participant); lpe.Input.Add(honor.GetDriveStrength()); lpm.Commit(lpe); Participant.Commit(lpm); lpm.Parameters.FIXED_BL_LEVEL_SELECTION_MEASURE = 1; lpm.Parameters.FIXED_RER_LEVEL_SELECTION_MEASURE = 0; //Pre-train the IDN in the ACS PreTrainACS(idn); }