Esempio n. 1
0
        /// <summary>
        /// Executes counterfactual thinking about most important agent goal for specific site
        /// </summary>
        /// <param name="agent"></param>
        /// <param name="lastIteration"></param>
        /// <param name="goal"></param>
        /// <param name="matched"></param>
        /// <param name="layer"></param>
        /// <param name="site"></param>
        /// <returns></returns>
        public bool Execute(IAgent agent, LinkedListNode <Dictionary <IAgent, AgentState <TSite> > > lastIteration, Goal goal,
                            DecisionOption[] matched, DecisionOptionLayer layer, TSite site)
        {
            //Period currentPeriod = periodModel.Value;
            AgentState <TSite> priorIterationAgentState = lastIteration.Previous.Value[agent];

            selectedGoal = goal;

            selectedGoalState            = lastIteration.Value[agent].GoalsState[selectedGoal];
            selectedGoalState.Confidence = false;

            DecisionOptionsHistory history = priorIterationAgentState.DecisionOptionsHistories[site];


            activatedDecisionOption = history.Activated.FirstOrDefault(r => r.Layer == layer);

            anticipatedInfluences = agent.AnticipationInfluence;

            matchedDecisionOptions = matched;

            SpecificLogic(selectedGoal.Tendency);


            return(selectedGoalState.Confidence);
        }
        /// <summary>
        /// Executes second part of action selection for specific site
        /// </summary>
        /// <param name="agent"></param>
        /// <param name="lastIteration"></param>
        /// <param name="rankedGoals"></param>
        /// <param name="processedDecisionOptions"></param>
        /// <param name="site"></param>
        public void ExecutePartII(IAgent agent, LinkedListNode <Dictionary <IAgent, AgentState> > lastIteration, Dictionary <IAgent, Goal[]> rankedGoals, DecisionOption[] processedDecisionOptions, Site site)
        {
            AgentState agentState = lastIteration.Value[agent];

            DecisionOptionsHistory history = agentState.DecisionOptionsHistories[site];

            DecisionOptionLayer layer = processedDecisionOptions.First().Layer;


            DecisionOption selectedDecisionOptions = history.Activated.SingleOrDefault(r => r.Layer == layer);

            if (selectedDecisionOptions == null)
            {
                return;
            }

            if (selectedDecisionOptions.IsCollectiveAction)
            {
                var scope = selectedDecisionOptions.Scope;

                //counting agents which selected this decision option
                int numberOfInvolvedAgents = agent.ConnectedAgents.Where(connected => agent[scope] == connected[scope] || scope == null)
                                             .Count(a => lastIteration.Value[a].DecisionOptionsHistories[site].Activated.Any(decisionOption => decisionOption == selectedDecisionOptions));

                int requiredParticipants = selectedDecisionOptions.RequiredParticipants - 1;

                //add decision option to blocked
                if (numberOfInvolvedAgents < requiredParticipants)
                {
                    history.Blocked.Add(selectedDecisionOptions);

                    history.Activated.Remove(selectedDecisionOptions);

                    ExecutePartI(agent, lastIteration, rankedGoals, processedDecisionOptions, site);

                    ExecutePartII(agent, lastIteration, rankedGoals, processedDecisionOptions, site);
                }
            }
        }
Esempio n. 3
0
        /// <summary>
        /// Executes agent innovation process for specific site
        /// </summary>
        /// <param name="agent">The agent.</param>
        /// <param name="lastIteration">The last iteration.</param>
        /// <param name="goal">The goal.</param>
        /// <param name="layer">The layer.</param>
        /// <param name="site">The site.</param>
        /// <param name="probabilities">The probabilities.</param>
        /// <exception cref="Exception">Not implemented for AnticipatedDirection == 'stay'</exception>
        public void Execute(IAgent agent, LinkedListNode <Dictionary <IAgent, AgentState <TSite> > > lastIteration, Goal goal,
                            DecisionOptionLayer layer, TSite site, Probabilities probabilities)
        {
            Dictionary <IAgent, AgentState <TSite> > currentIteration = lastIteration.Value;
            Dictionary <IAgent, AgentState <TSite> > priorIteration   = lastIteration.Previous.Value;

            //gets prior period activated decision options
            DecisionOptionsHistory history            = priorIteration[agent].DecisionOptionsHistories[site];
            DecisionOption         protDecisionOption = history.Activated.FirstOrDefault(r => r.Layer == layer);

            LinkedListNode <Dictionary <IAgent, AgentState <TSite> > > tempNode = lastIteration.Previous;

            //if prior period decision option is do nothing then looking for any do something decision option
            while (protDecisionOption == null && tempNode.Previous != null)
            {
                tempNode = tempNode.Previous;

                history = tempNode.Value[agent].DecisionOptionsHistories[site];

                protDecisionOption = history.Activated.Single(r => r.Layer == layer);
            }

            //if activated DO is missed, then select random DO
            if (!agent.AssignedDecisionOptions.Contains(protDecisionOption))
            {
                protDecisionOption = agent.AssignedDecisionOptions.Where(a => a.Layer == protDecisionOption.Layer)
                                     .RandomizeOne();
            }

            //if the layer or prior period decision option are modifiable then generate new decision option
            if (layer.LayerConfiguration.Modifiable || (!layer.LayerConfiguration.Modifiable && protDecisionOption.IsModifiable))
            {
                DecisionOptionLayerConfiguration parameters = layer.LayerConfiguration;

                Goal selectedGoal = goal;

                GoalState selectedGoalState = lastIteration.Value[agent].GoalsState[selectedGoal];

                #region Generating consequent
                int min = parameters.MinValue(agent);
                int max = parameters.MaxValue(agent);

                double consequentValue = string.IsNullOrEmpty(protDecisionOption.Consequent.VariableValue)
                    ? protDecisionOption.Consequent.Value
                    : agent[protDecisionOption.Consequent.VariableValue];

                double newConsequent = consequentValue;

                ExtendedProbabilityTable <int> probabilityTable =
                    probabilities.GetExtendedProbabilityTable <int>(SosielProbabilityTables.GeneralProbabilityTable);

                double minStep = Math.Pow(0.1d, parameters.ConsequentPrecisionDigitsAfterDecimalPoint);

                switch (selectedGoalState.AnticipatedDirection)
                {
                case AnticipatedDirection.Up:
                {
                    if (DecisionOptionLayerConfiguration.ConvertSign(parameters.ConsequentRelationshipSign[goal.Name]) == ConsequentRelationship.Positive)
                    {
                        if (consequentValue == max)
                        {
                            return;
                        }

                        newConsequent = probabilityTable.GetRandomValue(consequentValue + minStep, max, false);
                    }
                    if (DecisionOptionLayerConfiguration.ConvertSign(parameters.ConsequentRelationshipSign[goal.Name]) == ConsequentRelationship.Negative)
                    {
                        if (consequentValue == min)
                        {
                            return;
                        }

                        newConsequent = probabilityTable.GetRandomValue(min, consequentValue - minStep, true);
                    }

                    break;
                }

                case AnticipatedDirection.Down:
                {
                    if (DecisionOptionLayerConfiguration.ConvertSign(parameters.ConsequentRelationshipSign[goal.Name]) == ConsequentRelationship.Positive)
                    {
                        if (consequentValue == min)
                        {
                            return;
                        }

                        newConsequent = probabilityTable.GetRandomValue(min, consequentValue - minStep, true);
                    }
                    if (DecisionOptionLayerConfiguration.ConvertSign(parameters.ConsequentRelationshipSign[goal.Name]) == ConsequentRelationship.Negative)
                    {
                        if (consequentValue == max)
                        {
                            return;
                        }

                        newConsequent = probabilityTable.GetRandomValue(consequentValue + minStep, max, false);
                    }

                    break;
                }

                default:
                {
                    throw new Exception("Not implemented for AnticipatedDirection == 'stay'");
                }
                }

                newConsequent = Math.Round(newConsequent, parameters.ConsequentPrecisionDigitsAfterDecimalPoint);

                DecisionOptionConsequent consequent = DecisionOptionConsequent.Renew(protDecisionOption.Consequent, newConsequent);
                #endregion


                #region Generating antecedent
                List <DecisionOptionAntecedentPart> antecedentList = new List <DecisionOptionAntecedentPart>(protDecisionOption.Antecedent.Length);

                bool isTopLevelDO = protDecisionOption.Layer.PositionNumber == 1;

                foreach (DecisionOptionAntecedentPart antecedent in protDecisionOption.Antecedent)
                {
                    dynamic newConst = isTopLevelDO ? antecedent.Value : agent[antecedent.Param];

                    DecisionOptionAntecedentPart newAntecedent = DecisionOptionAntecedentPart.Renew(antecedent, newConst);

                    antecedentList.Add(newAntecedent);
                }
                #endregion

                AgentState <TSite> agentState = currentIteration[agent];

                DecisionOption newDecisionOption = DecisionOption.Renew(protDecisionOption, antecedentList.ToArray(), consequent);


                //change base ai values for the new decision option
                double consequentChangeProportion;
                if (consequentValue == 0)
                {
                    consequentChangeProportion = 0;
                }
                else
                {
                    consequentChangeProportion = Math.Abs(newDecisionOption.Consequent.Value - consequentValue) / consequentValue;
                }

                Dictionary <Goal, double> baseAI = agent.AnticipationInfluence[protDecisionOption];

                Dictionary <Goal, double> proportionalAI = new Dictionary <Goal, double>();

                agent.AssignedGoals.ForEach(g =>
                {
                    double ai = baseAI[g];

                    // ConsequentRelationship relationship = DecisionOptionLayerConfiguration.ConvertSign(protDecisionOption.Layer.LayerConfiguration.ConsequentRelationshipSign[g.Name]);

                    double difference = ai * consequentChangeProportion;

                    switch (selectedGoalState.AnticipatedDirection)
                    {
                    case AnticipatedDirection.Up:
                        {
                            if (ai >= 0)
                            {
                                ai += difference;
                            }
                            else
                            {
                                ai -= difference;
                            }

                            break;
                        }

                    case AnticipatedDirection.Down:
                        {
                            if (ai >= 0)
                            {
                                ai -= difference;
                            }
                            else
                            {
                                ai += difference;
                            }

                            break;
                        }
                    }

                    proportionalAI.Add(g, ai);
                });


                //add the generated decision option to the prototype's mental model and assign one to the agent's mental model
                if (agent.Prototype.IsSimilarDecisionOptionExists(newDecisionOption) == false)
                {
                    //add to the prototype and assign to current agent
                    agent.AddDecisionOption(newDecisionOption, layer, proportionalAI);
                }
                else if (agent.AssignedDecisionOptions.Any(decisionOption => decisionOption == newDecisionOption) == false)
                {
                    var kh = agent.Prototype.DecisionOptions.FirstOrDefault(h => h == newDecisionOption);

                    //assign to current agent only
                    agent.AssignNewDecisionOption(kh, proportionalAI);
                }


                if (layer.Set.Layers.Count > 1)
                {
                    //set consequent to actor's variables for next layers
                    newDecisionOption.Apply(agent);
                }
            }
        }
Esempio n. 4
0
        /// <summary>
        /// Executes social learning process of current agent for specific decision option set layer
        /// </summary>
        /// <param name="agent"></param>
        /// <param name="lastIteration"></param>
        /// <param name="layer"></param>
        public void ExecuteLearning(IAgent agent, LinkedListNode <Dictionary <IAgent, AgentState <TSite> > > lastIteration, DecisionOptionLayer layer)
        {
            Dictionary <IAgent, AgentState <TSite> > priorIterationState = lastIteration.Previous.Value;

            agent.ConnectedAgents.Randomize().ForEach(neighbour =>
            {
                AgentState <TSite> priorIteration;
                if (!priorIterationState.TryGetValue(neighbour, out priorIteration))
                {
                    return;
                }

                IEnumerable <DecisionOption> activatedDecisionOptions = priorIteration.DecisionOptionsHistories
                                                                        .SelectMany(rh => rh.Value.Activated).Where(r => r.Layer == layer);

                activatedDecisionOptions.ForEach(decisionOption =>
                {
                    if (agent.AssignedDecisionOptions.Contains(decisionOption) == false)
                    {
                        agent.AssignNewDecisionOption(decisionOption, neighbour.AnticipationInfluence[decisionOption]);
                    }
                });
            });
        }