/// <summary> /// 添加奖励提示 /// </summary> public virtual void onAddReward(int way, int rewarID, int num, SList <ItemData> randomItemDatas) { RewardShowData data = new RewardShowData(); data.config = RewardConfig.get(rewarID); if (randomItemDatas == null) { randomItemDatas = new SList <ItemData>(); } data.items = randomItemDatas; foreach (DIntData v in data.config.items) { data.items.add(createItem(v.key, v.value * num)); } data.currency = new IntIntMap(); foreach (var v in data.config.currency) { data.currency.addValue(v.key, v.value * num); } me.dispatch(GameEventType.AddReward, data); }
/// <summary> /// 是否有某奖励的位置 /// </summary> public bool hasRewardPlace(int rewardID, int num = 1) { RewardConfig config = RewardConfig.get(rewardID); //没有随机 if (config.randomListID <= 0) { int n = 0; int sMax; foreach (DIntData v in config.items) { if ((sMax = ItemConfig.get(v.key).singlePlusMax) > 0) { n += (v.value * num + sMax - 1) / sMax; } else { n += 1; } } return(hasFreeGrid(n)); } else { return(hasFreeGrid(RewardConfig.get(rewardID).needGrid *num)); } }
public void UpdateReward(RewardConfig respConfig, string newRewardAdUnit) { if (respConfig != null && respConfig.IsValid()) { this.rewardConfig = respConfig; } if (!string.IsNullOrEmpty(newRewardAdUnit)) { this.rewardAdUnit = newRewardAdUnit; } }
/// <summary> /// 添加对应等级奖励 /// </summary> public bool addRewardLevel(int rewardID, int level, int num, int way) { RewardConfig config = RewardConfig.get(rewardID); //没位置 if (!hasRewardPlace(rewardID, num)) { return(false); } return(doAddRewardLevel(config, level, num, false, way)); }
/// <summary> /// 执行单个道具物品动作 /// </summary> public virtual bool checkOneRoleConditionForAction(int[] args, int num) { switch (args[0]) { case RoleActionType.AddReward: { RewardConfig rewardConfig = RewardConfig.get(args[1]); //只看空余格子 return(me.bag.hasFreeGrid(rewardConfig.needGrid * num)); } } return(true); }
/// <summary> /// 绝对添加奖励(包满进邮件) /// </summary> public void addRewardLevelAbs(int rewardID, int level, int num, int way) { RewardConfig config = RewardConfig.get(rewardID); doAddRewardLevel(config, level, num, true, way); }
/// <summary> /// 执行添加奖励 /// </summary> protected bool doAddRewardLevel(RewardConfig config, int level, int num, bool isAbs, int way) { if (isAbs) { addItemsAbs(config.items, num, way); } else { //添加物品ok if (!addItems(config.items, num, way)) { me.throwError("严重错误,已经判定格子够后,还出现添加物品失败"); return(false); } } if (config.randomListID > 0) { randomItemList(_tempItems, config.randomListID, num); if (isAbs) { addItemsAbs(_tempItems, way); } else { if (!addItems(_tempItems, way)) { _tempItems.clear(); me.throwError("严重错误,已经判定格子够后,还出现添加物品失败"); return(false); } } } if (config.currency.Length > 0) { if (config.needLevelAdapter) { int len = config.currency.Length; for (int i = 0; i < len; i++) { int v = BaseC.logic.calculateRewardLevel(config.currency[i].value, level, config.currencyLevelFormula[i]); me.role.addCurrencyAbs(config.currency[i].key, v, way); } } else { me.role.addCurrenciesAbs(config.currency, num, way); } } if (config.roleExp > 0) { if (config.needLevelAdapter) { long v = BaseC.logic.calculateRewardLevel(config.roleExp, level, config.roleExpLevelFormula); me.role.addExp(v, way); } else { me.role.addExp(config.roleExp, way); } } onAddReward(way, config.id, num, _tempItems.isEmpty() ? null : _tempItems); _tempItems.clear(); return(true); }
public double MinRewardWork(VerificationOutput verificationOutput, RewardConfig reward) { //note here should calculate MaxProbNotZero instead of MinProbNotZero //HashSet<MDPState> maxProbNotZero = new HashSet<MDPState>(MaxProbNotZero()); HashSet<MDPState> maxProbOne = new HashSet<MDPState>(this.maxProbOne()); if (!maxProbOne.Contains(InitState)) return double.PositiveInfinity; HashSet<MDPState> working = new HashSet<MDPState>(TargetStates); HashSet<MDPState> visited = new HashSet<MDPState>(TargetStates); double maxDifference = 1; while (maxDifference > MAX_DIFFERENCE || visited.Count < maxProbOne.Count) { verificationOutput.MDPIterationNumber++; maxDifference = 0; //get the nodes which should be re-calculated. HashSet<MDPState> newWorking = new HashSet<MDPState>(); foreach (MDPState state in working) { foreach (MDPState mdpState in state.Pre) { //if (nonSafe.Contains(mdpState)) //note changed here //{ newWorking.Add(mdpState); //} } } visited.UnionWith(newWorking); List<MDPState> toRemove = new List<MDPState>(); foreach (MDPState node in newWorking) { double newMinReward = double.PositiveInfinity; foreach (Distribution distribution in node.Distributions) { double result = 0; //bool hasNewValues = false; foreach (KeyValuePair<double, MDPState> pair in distribution.States) { if (!maxProbOne.Contains(pair.Value)) { result = double.PositiveInfinity; } else { KeyValuePair<Expression, double> value; if (reward.EventToRewardMapping.TryGetValue(distribution.Event, out value)) { result += (value.Value + pair.Value.CurrentReward) * pair.Key; } else { result += pair.Value.CurrentReward * pair.Key; } } //hasNewValues = true; } //if (hasNewValues) //{ newMinReward = Math.Min(newMinReward, result); //} } if (node.CurrentReward < newMinReward) //+ node.StateReward { maxDifference = Math.Max(maxDifference, (newMinReward - node.CurrentReward) / node.CurrentReward);///relative difference + node.StateReward node.CurrentReward = newMinReward; ;// + node.StateReward } else if (node.CurrentReward != 0) { toRemove.Add(node); } } foreach (MDPState i in toRemove) { newWorking.Remove(i); } working = newWorking; } //return Ultility.Ultility.RoundProbWithPrecision(InitState.CurrentProb, Precision); return Ultility.Ultility.RoundProbWithPrecision(InitState.CurrentReward, Precision); }
public double MinReward(VerificationOutput VerificationOutput, RewardConfig reward) { if (TargetStates.Count == 0) { return double.PositiveInfinity; } if (TargetStates.Contains(InitState)) { return 0; } return MinRewardWork(VerificationOutput, reward); }
public double MaxRewardWork(VerificationOutput verificationOutput, RewardConfig reward) { //note here should calculate MinProbNotZero instead of MaxProbNotZero //note to test the efficiency of MaxPorbNotZero and MaxProbNotZero //HashSet<MDPState> minProbNotZero = new HashSet<MDPState>(MinProbNotZero()); //note here calculate the states whose minimal prob to targets are 1; HashSet<MDPState> minProbOne = new HashSet<MDPState>(this.minProbOne()); //if Initial state is not in MinProbNotZero, then return Rmax = infinity if (!minProbOne.Contains(InitState)) { return double.PositiveInfinity; } HashSet<MDPState> working = new HashSet<MDPState>(TargetStates); HashSet<MDPState> visited = new HashSet<MDPState>(TargetStates); double maxDifference = 1; //int counter = 0; while (maxDifference > MAX_DIFFERENCE || visited.Count < minProbOne.Count) { //counter++; verificationOutput.MDPIterationNumber++; maxDifference = 0; //get the nodes which should be re-calculated. HashSet<MDPState> newWorking = new HashSet<MDPState>(); foreach (MDPState state in working) { foreach (MDPState mdpState in state.Pre) { //if a pre-state is in minProbZero, then Rmax must be infinity because from initial state there is a finite trace to this state. if (!minProbOne.Contains(mdpState)) { return double.PositiveInfinity; } newWorking.Add(mdpState); } } visited.UnionWith(newWorking); List<MDPState> toRemove = new List<MDPState>(); foreach (MDPState node in newWorking) { double newMaxReward = 0; foreach (Distribution distribution in node.Distributions) { double result = 0; bool hasNewValues = false; foreach (KeyValuePair<double, MDPState> pair in distribution.States) { //if there is a state which is not in nonsafe, which means from that state we cannot arrive the target states, then the result should be infinity. if (!minProbOne.Contains(pair.Value)) { return double.PositiveInfinity; } KeyValuePair<Expression, double> value; if (reward.EventToRewardMapping.TryGetValue(distribution.Event, out value)) { result += (value.Value + pair.Value.CurrentReward) * pair.Key; } else { result += pair.Value.CurrentReward * pair.Key; } hasNewValues = true; } if (hasNewValues) { newMaxReward = Math.Max(newMaxReward, result); } } if (node.CurrentReward < newMaxReward) { maxDifference = Math.Max(maxDifference, (newMaxReward - node.CurrentReward) / node.CurrentReward);///relative difference node.CurrentReward = newMaxReward; } else if (node.CurrentReward != 0) { toRemove.Add(node); } } foreach (MDPState i in toRemove) { newWorking.Remove(i); } working = newWorking; } return Ultility.Ultility.RoundProbWithPrecision(InitState.CurrentReward, Precision); }