public static void PreTrainACS(BPNetwork idn) { Console.Write("Pre-training ACS..."); pABEBlackGun = abeBlackGun + (((r.NextDouble() * 2) - 1) * abeMaxTemp); pABEWhiteGun = abeWhiteGun + (((r.NextDouble() * 2) - 1) * abeMaxTemp); List <ActivationCollection> dataSets = new List <ActivationCollection>(); List <DeclarativeChunk> primes = new List <DeclarativeChunk>(); primes.AddRange(white_faces); primes.AddRange(black_faces); List <DeclarativeChunk> targets = new List <DeclarativeChunk>(); targets.AddRange(guns); targets.AddRange(tools); foreach (DeclarativeChunk p in primes) { foreach (DeclarativeChunk t in targets) { ActivationCollection ds = ImplicitComponentInitializer.NewDataSet(); ds.AddRange(p, 1); ds.AddRange(t, 1); dataSets.Add(ds); } } ImplicitComponentInitializer.Train(idn, trainer, numIterations: numTrainingTrials, randomTraversal: true, dataSets: dataSets.ToArray()); Console.WriteLine("Finished"); }
public static void PreTrainingEquation(ActivationCollection input, ActivationCollection output) { if (input["SkinColor", "Black"] == 1) { if (r.NextDouble() <= pABEBlackGun) { output[World.GetActionChunk("Gun")] = 1; } else { output[World.GetActionChunk("Tool")] = 1; } } else { if (r.NextDouble() <= pABEWhiteGun) { output[World.GetActionChunk("Gun")] = 1; } else { output[World.GetActionChunk("Tool")] = 1; } } }
public double CalculateSupport_SimpleRule(ActivationCollection si, Rule r = null) { double t = (from i in si where i.WORLD_OBJECT.AsDimensionValuePair.Dimension.ToString() == "Target P" && Math.Abs(i.ACTIVATION - John.Parameters.MAX_ACTIVATION) < double.Epsilon select(double) i.WORLD_OBJECT.AsDimensionValuePair.Value.AsIComparable).First(); double c = (from i in si where i.WORLD_OBJECT.AsDimensionValuePair.Dimension.ToString() == "Current P" && Math.Abs(i.ACTIVATION - John.Parameters.MAX_ACTIVATION) < double.Epsilon select(double) i.WORLD_OBJECT.AsDimensionValuePair.Value.AsIComparable).First(); double result = Math.Round((Math.Abs(t - c) / 2)); if (c < t) { result += c; } else if (c > t) { result -= c; } return((Math.Abs(result - (double)((ActionRule)r).Action.LabelAsIComparable) < double.Epsilon) ? 1 : 0); }
// FMT 15/06/2017 private double FixedRuleToGoTo(ActivationCollection currentInput, Rule target) { // Here we will make the logic to go ahead return(((currentInput.Contains(inputFoodAhead, CurrentAgent.Parameters.MAX_ACTIVATION)) || (currentInput.Contains(inputLeafletJewelAhead, CurrentAgent.Parameters.MAX_ACTIVATION)) || (currentInput.Contains(inputJewelAhead, CurrentAgent.Parameters.MAX_ACTIVATION))) ? 1.0 : 0.0); }
private double FixedRuleFuelLow(ActivationCollection currentInput, Rule target) { //// Here we will make the logic to go to food //if ((currentInput.Contains (inputWallCreatureAhead, CurrentAgent.Parameters.MIN_ACTIVATION)) && // (currentInput.Contains (inputFoodAhead, CurrentAgent.Parameters.MIN_ACTIVATION)) && // (currentInput.Contains (inputJewelAhead, CurrentAgent.Parameters.MIN_ACTIVATION)) && // (currentInput.Contains (inputJewelInVision, CurrentAgent.Parameters.MIN_ACTIVATION)) && // (currentInput.Contains (inputFuelLow, CurrentAgent.Parameters.MAX_ACTIVATION)) && // //(getJewelRemainingTotal () != jewelGoal)) { // (!exchangeJewels)) { // return 1.0; //} else { // return 0.0; //} // Here we will make the logic to go to food if ((currentInput.Contains(inputWallCreatureAhead, CurrentAgent.Parameters.MIN_ACTIVATION)) && (currentInput.Contains(inputFoodAhead, CurrentAgent.Parameters.MIN_ACTIVATION)) && (currentInput.Contains(inputJewelAhead, CurrentAgent.Parameters.MIN_ACTIVATION)) && (fuelLow) && (!exchangeJewels)) { //Console.WriteLine ("Fixed Rule Support Fuel Low"); return(1.0); } else { return(0.0); } }
private double FixedRuleToAvoidCollisionWall(ActivationCollection currentInput, Rule target) { // See partial match threshold to verify what are the rules available for action selection return(((currentInput.Contains(inputCloseObject, CurrentAgent.Parameters.MIN_ACTIVATION)) && (currentInput.Contains(inputFoodAhead, CurrentAgent.Parameters.MIN_ACTIVATION)) && (currentInput.Contains(inputLeafletJewelAhead, CurrentAgent.Parameters.MIN_ACTIVATION)) && (currentInput.Contains(inputJewelAhead, CurrentAgent.Parameters.MIN_ACTIVATION))) ? 1.0 : 0.0); }
// ok, if we are using a fixed rule for imitative learning that should only be valid when it is needed, // the we don't really need to calculate a value for its support, we can just return 1 // ... maybe? public double ReturnFixedSupport(ActivationCollection currentInput, Clarion.Framework.Core.Rule r) { if (PlayerChoice != null && r.OutputChunk == PlayerChoice) { return(1); } return(0); }
static void SetupBPNetwork(Agent reasoner) { //Chunks for the whales, tuna, and bears DeclarativeChunk TunaChunk = World.NewDeclarativeChunk("Tuna"); DeclarativeChunk WhaleChunk = World.NewDeclarativeChunk("Whale"); DeclarativeChunk BearChunk = World.NewDeclarativeChunk("Bear"); //The 2 properties (as DV pairs) DimensionValuePair livesinwater = World.NewDimensionValuePair("lives in", "water"); DimensionValuePair eatsfish = World.NewDimensionValuePair("eats", "fish"); //The BP network to be used in the bottom level of the NACS BPNetwork net = AgentInitializer.InitializeAssociativeMemoryNetwork(reasoner, BPNetwork.Factory); //Adds the properties (as inputs) and chunks (as outputs) to the BP network net.Input.Add(livesinwater); net.Input.Add(eatsfish); net.Output.Add(TunaChunk); net.Output.Add(WhaleChunk); net.Output.Add(BearChunk); reasoner.Commit(net); //Adds the chunks to the GKS reasoner.AddKnowledge(TunaChunk); reasoner.AddKnowledge(WhaleChunk); reasoner.AddKnowledge(BearChunk); //Initializes a trainer to use to train the BP network GenericEquation trainer = ImplicitComponentInitializer.InitializeTrainer(GenericEquation.Factory, (Equation)trainerEQ); //Adds the properties (as inputs) and chunks (as outputs) to the trainer trainer.Input.Add(livesinwater); trainer.Input.Add(eatsfish); trainer.Output.Add(TunaChunk); trainer.Output.Add(WhaleChunk); trainer.Output.Add(BearChunk); trainer.Commit(); //Sets up data sets for each of the 2 properties List <ActivationCollection> sis = new List <ActivationCollection>(); ActivationCollection si = ImplicitComponentInitializer.NewDataSet(); si.Add(livesinwater, 1); sis.Add(si); si = ImplicitComponentInitializer.NewDataSet(); si.Add(eatsfish, 1); sis.Add(si); Console.Write("Training AMN..."); //Trains the BP network to report associative knowledge between the properties and the chunks ImplicitComponentInitializer.Train(net, trainer, sis, ImplicitComponentInitializer.TrainingTerminationConditions.SUM_SQ_ERROR); Console.WriteLine("Finished!"); }
/// <summary> /// Performs reasoning using a "noisy" input based on each pattern /// </summary> /// <param name="reasoner">The reasoner who is performing the reasoning</param> static void DoReasoning(Agent reasoner) { int correct = 0; //Iterates through each pattern foreach (DeclarativeChunk dc in chunks) { //Gets an input to use for reasoning. Note that the World.GetSensoryInformation method can also be used here ActivationCollection si = ImplicitComponentInitializer.NewDataSet(); int count = 0; //Sets up the input foreach (DimensionValuePair dv in dvs) { if (((double)count / (double)dc.Count < (1 - noise))) { if (dc.Contains(dv)) { si.Add(dv, 1); ++count; } else { si.Add(dv, 0); } } else { si.Add(dv, 0); //Zeros out the dimension-value pair if "above the noise level" } } Console.WriteLine("Input to reasoner:\r\n" + si); Console.WriteLine("Output from reasoner:"); //Performs reasoning based on the input. The conclusions returned from this method will be in the form of a //collection of "Chunk Tuples." A chunk tuple is simply just a chunk combined with its associated activation. var o = reasoner.NACS.PerformReasoning(si); //Iterates through the conclusions from reasoning foreach (var i in o) { Console.WriteLine(i.CHUNK); if (i.CHUNK == dc) { correct++; } } } Console.WriteLine("Retrieval Accuracy: " + (int)(((double)correct / (double)chunks.Count) * 100) + "%"); }
private double FixedRuleGoalAchieved(ActivationCollection currentInput, Rule target) { if (getJewelRemainingTotal() == jewelGoal) { return(1.0); } else { return(0.0); } }
private double FixedRuleToEatFood(ActivationCollection currentInput, Rule target) { // Here we will make the logic to go ahead if ((currentInput.Contains(inputWallCreatureAhead, CurrentAgent.Parameters.MIN_ACTIVATION)) && (currentInput.Contains(inputFoodAhead, CurrentAgent.Parameters.MAX_ACTIVATION)) && (currentInput.Contains(inputJewelAhead, CurrentAgent.Parameters.MIN_ACTIVATION)) && (currentInput.Contains(inputJewelInVision, CurrentAgent.Parameters.MIN_ACTIVATION)) && (getJewelRemainingTotal() != jewelGoal)) { return(1.0); } else { return(0.0); } }
public double CalculateSupport_IRLSet1(ActivationCollection si, Rule r = null) { double t = (from i in si where i.WORLD_OBJECT.AsDimensionValuePair.Dimension.ToString() == "Target P" && Math.Abs(i.ACTIVATION - John.Parameters.MAX_ACTIVATION) < double.Epsilon select(double) i.WORLD_OBJECT.AsDimensionValuePair.Value.AsIComparable).First(); double A = (double)(from a in r.GeneralizedCondition where a.Dimension.ToString() == "A" && r.GeneralizedCondition[a] select a.AsDimensionValuePair.Value.AsIComparable).First(); double B = (double)(from b in r.GeneralizedCondition where b.AsDimensionValuePair.Dimension.ToString() == "B" && r.GeneralizedCondition[b] select b.AsDimensionValuePair.Value.AsIComparable).First(); return((Math.Abs((Math.Round((t - B) / A) - (double)((ActionRule)r).Action.LabelAsIComparable)) < double.Epsilon) ? 1 : 0); }
private double FixedRuleToWander(ActivationCollection currentInput, Rule target) { // Here we will make the logic to wander - check for low activation // in all inputs. if (currentInput.Contains(inputWallAhead, MIN_ACT_VAL) && currentInput.Contains(inputJewelAhead, MIN_ACT_VAL) && currentInput.Contains(inputFoodAhead, MIN_ACT_VAL) && currentInput.Contains(inputJewelAway, MIN_ACT_VAL) && currentInput.Contains(inputFoodAway, MIN_ACT_VAL)) { return(1.0); } else { return(0.0); } }
/// <summary> /// The training equation that is used to train the BP network in the bottom level of the NACS /// </summary> /// <param name="input"></param> /// <param name="output"></param> static void trainerEQ(ActivationCollection input, ActivationCollection output) { //If the input is the "lives in water" property, the activations for each chunk are as follows: if (input.Contains(World.GetDimensionValuePair("lives in", "water"), 1)) { output[World.GetDeclarativeChunk("Tuna")] = 1; //Tunas and whales live in water output[World.GetDeclarativeChunk("Whale")] = 1; output[World.GetDeclarativeChunk("Bear")] = .5; //Bears don't exactly live in water, but they do spend some time in it } //If the input is the "eats fish" property, the activations for each chunk are as follows: else if (input.Contains(World.GetDimensionValuePair("eats", "fish"), 1)) { output[World.GetDeclarativeChunk("Tuna")] = .2; //Tuna aren't really known to eat other fish (although they may eat smaller fish) output[World.GetDeclarativeChunk("Whale")] = .75; //Whales definitely eat other fish (although not exclusively) output[World.GetDeclarativeChunk("Bear")] = 1; //Everyone knows bears love salmon! } }
public static double HelloWorldFull_DeficitChange(ActivationCollection si, Drive target) { var cg = ((SensoryInformation)si).AffiliatedAgent.CurrentGoal; if (cg != null) { if ((cg == World.GetGoalChunk("Salute") && target is AffiliationBelongingnessDrive) || (cg == World.GetGoalChunk("Bid Farewell") && target is AutonomyDrive)) { target.Parameters.DEFICIT_CHANGE_RATE = .999; } else { target.Parameters.DEFICIT_CHANGE_RATE = 1.001; } } return(target.Deficit * target.Parameters.DEFICIT_CHANGE_RATE); }
private double FixedRuleToGoToJewel(ActivationCollection currentInput, Rule target) { // Here we will make the logic to go ahead if ((currentInput.Contains(inputWallCreatureAhead, CurrentAgent.Parameters.MIN_ACTIVATION)) && (currentInput.Contains(inputFoodAhead, CurrentAgent.Parameters.MIN_ACTIVATION)) && (currentInput.Contains(inputJewelAhead, CurrentAgent.Parameters.MIN_ACTIVATION)) && (currentInput.Contains(inputJewelInVision, CurrentAgent.Parameters.MAX_ACTIVATION)) && (currentInput.Contains(inputFuelLow, CurrentAgent.Parameters.MIN_ACTIVATION)) && (isDesiredJewel(currentJewel.Material.Color)) && (getJewelRemainingTotal() != jewelGoal)) { // (!exchangeJewels)) { return(1.0); } else { return(0.0); } }
public double CalculateSupport_MemoryGroup(ActivationCollection si, Rule r = null) { DimensionValuePair currentP = (from t in si where t.WORLD_OBJECT.AsDimensionValuePair.Dimension.ToString() == "Current P" && r.GeneralizedCondition.Contains(t.WORLD_OBJECT, true) select t.WORLD_OBJECT.AsDimensionValuePair).FirstOrDefault(); DimensionValuePair previousW = (from t in si where t.WORLD_OBJECT is ExternalActionChunk && r.GeneralizedCondition.Contains(t.WORLD_OBJECT, true) select t.WORLD_OBJECT.AsDimensionValuePair).FirstOrDefault(); if (currentP == null || previousW == null) { return(0); } else { return((si[currentP] == John.Parameters.MAX_ACTIVATION && si[previousW] == John.Parameters.MAX_ACTIVATION) ? 1 : 0); } }
/// <summary> /// Encodes the patterns into the specified Hopfield network and then tests to make sure they have been successfully encoded /// </summary> /// <remarks> /// <note type="implementnotes">Most of the work that is done by this method is actually also performed by the implicit component initializer's /// <see cref="ImplicitComponentInitializer.Encode{T}(T, ImplicitComponentInitializer.EncodeTerminationConditions, int, ActivationCollection[])"> /// Encode</see> method. However, we must separate the "encode" and "recall" phases in this example since we are using a different /// <see cref="HopfieldNetwork.TransmissionOptions">transmission option</see> between these encoding process.</note> /// </remarks> /// <param name="net">the network where the patterns are to be encoded</param> static void EncodeHopfieldNetwork(HopfieldNetwork net) { //Tracks the accuracy of correctly encoded patterns double accuracy = 0; //Continue encoding until all of the patterns are successfully recalled do { //Specifies to use the "N spins" transmission option during the encoding phase net.Parameters.TRANSMISSION_OPTION = HopfieldNetwork.TransmissionOptions.N_SPINS; List <ActivationCollection> sis = new List <ActivationCollection>(); foreach (DeclarativeChunk dc in chunks) { //Gets a new "data set" object (to be used by the Encode method to encode the pattern) ActivationCollection si = ImplicitComponentInitializer.NewDataSet(); //Sets up the pattern si.AddRange(dc, 1); sis.Add(si); } //Encodes the pattern into the Hopfield network ImplicitComponentInitializer.Encode(net, sis); //Specifies to use the "let settle" transmission option during the testing phase net.Parameters.TRANSMISSION_OPTION = HopfieldNetwork.TransmissionOptions.LET_SETTLE; //Tests the net to see if it has learned the patterns accuracy = ImplicitComponentInitializer.Encode(net, sis, testOnly: true); Console.WriteLine(((int)accuracy * 100) + "% of the patterns were successfully recalled."); } while (accuracy < 1); }
private double FixedRuleToAvoidCollisionWall(ActivationCollection currentInput, Rule target) { // See partial match threshold to verify what are the rules available for action selection return(((currentInput.Contains(inputWallCreatureAhead, CurrentAgent.Parameters.MAX_ACTIVATION))) ? 1.0 : 0.0); }
private double FixedRuleToDeliverLeaflet(ActivationCollection currentInput, Rule target) { // Here we will make the logic to deliver a leaflet return(((currentInput.Contains(inputDeliverLeaflet, CurrentAgent.Parameters.MAX_ACTIVATION))) ? 1.0 : 0.0); }
private double FixedRuleToSackJewel(ActivationCollection currentInput, Rule target) { // Here we will make the logic to sack jewel return(((currentInput.Contains(inputJewelAhead, CurrentAgent.Parameters.MAX_ACTIVATION))) ? 1.0 : 0.0); }
private double FixedRuleToEatFood(ActivationCollection currentInput, Rule target) { // Here we will make the logic to eat food return(((currentInput.Contains(inputFoodAhead, CurrentAgent.Parameters.MAX_ACTIVATION))) ? 1.0 : 0.0); }
private double FixedRuleToGoToClosestFood(ActivationCollection currentInput, Rule target) { // Here we will make the logic to go to food return(((currentInput.Contains(inputDistantFood, CurrentAgent.Parameters.MAX_ACTIVATION))) ? 1.0 : 0.0); }
private double FixedRuleToStopWhenFinished(ActivationCollection currentInput, Rule target) { // See partial match threshold to verify what are the rules available for action selection return((checkThreeLeafletsReady() && !stopped) ? 1.0 : 0.0); }
private double FixedRuleToGoAhead(ActivationCollection currentInput, Rule target) { // Here we will make the logic to go ahead return(((currentInput.Contains(inputNeedJewel, CurrentAgent.Parameters.MIN_ACTIVATION))) ? 1.0 : 0.0); }
private double FixedRuleToSackItItem(ActivationCollection currentInput, Rule target) { // See partial match threshold to verify what are the rules available for action selection return(((currentInput.Contains(inputSackItItem, CurrentAgent.Parameters.MAX_ACTIVATION))) ? 1.0 : 0.0); }
public double RuleSupport(ActivationCollection currentInput, Rule target = null) { return(((currentInput.Contains(sayCooperate, 1.0) && target.OutputChunk == wmuacC) || (currentInput.Contains(sayDefect, 1.0) && target.OutputChunk == wmuacD)) ? 1.0 : 0.0); }
public bool QNetEligibility(ActivationCollection currentInput = null, ClarionComponent target = null) { return((currentInput.Contains(sayWhat, 1.0)) ? true : false); }
static void DoReasoning(Agent reasoner) { //Gets an input to use for reasoning. Note that the World.GetSensoryInformation method can also be used here ActivationCollection si = ImplicitComponentInitializer.NewDataSet(); //activation values double act1 = 0; double act2 = 0; //Sets up the input foreach (DimensionValuePair dv in dvs) { if (chunks[0].Contains(dv)) { si.Add(dv, 1); } else { si.Add(dv, 0); } } Console.WriteLine("Using the features for \"robin\" as input to reasoner:\r\n" + si); Console.WriteLine(); Console.WriteLine("Output from reasoner:"); //Performs reasoning based on the input var o = reasoner.NACS.PerformReasoning(si); //Iterates through the conclusions from reasoning foreach (var i in o) { //If it is the robin chunk, skip over if (i.CHUNK == chunks[0]) { continue; } else { Console.WriteLine(i.CHUNK); //If it is the sparrow chunk, set act1 if (i.CHUNK == chunks[1]) { act1 = i.ACTIVATION; } //Otherwise it is the goose chunk else { act2 = i.ACTIVATION; } Console.WriteLine("Activation of \"" + i.CHUNK.LabelAsIComparable + "\" chunk based on \"robin\" input: " + Math.Round(i.ACTIVATION, 2)); } Console.WriteLine(); } Console.WriteLine("Which animal is most similar to a robin?"); if (act1 > act2) { Console.WriteLine("A sparrow because its chunk activation is higher (" + Math.Round(act1, 2) + " vs. " + Math.Round(act2, 2) + ")"); } else { Console.WriteLine("A goose because its chunk activation is higher (" + Math.Round(act2, 2) + " vs. " + Math.Round(act1, 2) + ")"); } }
static void DoReasoning(Agent reasoner) { double act1 = 0; double act2 = 0; //Gets an input to use for reasoning. Note that the World.GetSensoryInformation method can also be used here ActivationCollection hiprhi = ImplicitComponentInitializer.NewDataSet(); ActivationCollection hipham = ImplicitComponentInitializer.NewDataSet(); //Sets up the input foreach (DimensionValuePair dv in dvs) { if (chunks[1].Contains(dv) || chunks[2].Contains(dv)) { hiprhi.Add(dv, 1); } if (chunks[1].Contains(dv) || chunks[3].Contains(dv)) { hipham.Add(dv, 1); } if (!chunks[1].Contains(dv) && !chunks[2].Contains(dv) && !chunks[3].Contains(dv)) { hiprhi.Add(dv, 0); hipham.Add(dv, 0); } } Console.WriteLine("Using the features for \"hippo\" and \"rhino\" as input into reasoner:\r\n" + hiprhi); Console.WriteLine(); Console.WriteLine("Output from reasoner:"); //Performs reasoning based on the input var o = reasoner.NACS.PerformReasoning(hiprhi); //Iterates through the conclusions from reasoning foreach (var i in o) { //Checks to see if it has the right chunk (i.e., mammals) if (i.CHUNK == chunks[0]) { Console.WriteLine(i.CHUNK); Console.WriteLine("The activation of the \"mammal\" chunk based on \"hippo\" and \"rhino\" is: " + Math.Round(i.ACTIVATION, 2)); act1 = i.ACTIVATION; } else { continue; } Console.WriteLine(); } Console.WriteLine("Using the features for \"hippo\" and \"hamster\" as input into reasoner:\r\n" + hiprhi); Console.WriteLine(); Console.WriteLine("Output from reasoner:"); //Performs reasoning based on the input. The conclusions returned from this method will be in the form of a //collection of "Chunk Tuples." A chunk tuple is simply just a chunk combined with its associated activation. var k = reasoner.NACS.PerformReasoning(hipham); //Iterates through the conclusions from reasoning foreach (var i in k) { if (i.CHUNK == chunks[0]) { Console.WriteLine(i.CHUNK); Console.WriteLine("The activation of the \"mammal\" chunk based on \"hippo\" and \"hamster\" is: " + Math.Round(i.ACTIVATION, 2)); act2 = i.ACTIVATION; } else { continue; } Console.WriteLine(); } Console.WriteLine("Which animal combination is a stronger representation of a mammal?"); if (act1 > act2) { Console.WriteLine("A hippo and rhino, because they activate the mammal chunk more"); } else { Console.WriteLine("A hippo and hamster, because they activate the mammal chunk more"); } }