public static void ExportToGv(this MarkovDecisionProcess mdp, TextWriter sb) { sb.WriteLine("digraph S {"); //sb.WriteLine("size = \"8,5\""); sb.WriteLine("node [shape=box];"); var enumerator = mdp.GetEnumerator(); enumerator.SelectInitialDistributions(); var initialStateName = "initialState"; sb.WriteLine($" {initialStateName} [shape=point,width=0.0,height=0.0,label=\"\"];"); ExportDistributionsOfEnumerator(enumerator, initialStateName, sb); enumerator = mdp.GetEnumerator(); while (enumerator.MoveNextState()) { var state = enumerator.CurrentState; sb.Write($" {state} [label=\"{state}\\n("); for (int i = 0; i < mdp.StateFormulaLabels.Length; i++) { if (i > 0) { sb.Write(","); } sb.Write(mdp.StateLabeling[state][i]); } sb.WriteLine(")\"];"); ExportDistributionsOfEnumerator(enumerator, state.ToString(), sb); } sb.WriteLine("}"); }
public double CalculateMinimumFinalProbability(double[] initialStateProbabilities) { var enumerator = MarkovDecisionProcess.GetEnumerator(); enumerator.SelectInitialDistributions(); //select sum of first distribution enumerator.MoveNextDistribution(); var sum = 0.0; while (enumerator.MoveNextTransition()) { var entry = enumerator.CurrentTransition; sum += entry.Value * initialStateProbabilities[entry.Column]; } var finalProbability = sum; //now find a smaller one while (enumerator.MoveNextDistribution()) { sum = 0.0; while (enumerator.MoveNextTransition()) { var entry = enumerator.CurrentTransition; sum += entry.Value * initialStateProbabilities[entry.Column]; } if (sum < finalProbability) { finalProbability = sum; } } return(finalProbability); }
private void WriteMarkovChainToDisk() { _filePrism = new TemporaryFile("prism"); var streamPrism = new StreamWriter(_filePrism.FilePath) { NewLine = "\n" }; MarkovDecisionProcess.ExportToPrism(streamPrism); }
internal double CalculateMinimumProbabilityToReachStateFormulaInBoundedSteps(Formula psi, int steps) { var psiEvaluator = MarkovDecisionProcess.CreateFormulaEvaluator(psi); var directlySatisfiedStates = CalculateSatisfiedStates(psiEvaluator); var excludedStates = new Dictionary <int, bool>(); // change for \phi Until \psi var xnew = MinimumIterator(directlySatisfiedStates, excludedStates, steps); var finalProbability = CalculateMinimumFinalProbability(xnew); return(finalProbability); }
internal double CalculateMinimumProbabilityToReachStateFormula(Formula psi) { // same algorithm as CalculateMinimumProbabilityToReachStateFormulaInBoundedSteps with different // directlySatisfiedStates and excludedStates var maxSteps = AdjustNumberOfStepsForFactor(50); var psiEvaluator = MarkovDecisionProcess.CreateFormulaEvaluator(psi); var directlySatisfiedStates = CalculateSatisfiedStates(psiEvaluator); var excludedStates = new Dictionary <long, bool>(); // change for \phi Until \psi var exactlyZeroStates = StatesReachableWithProbabilityExactlyZeroForAtLeastOneScheduler(directlySatisfiedStates, excludedStates); var exactlyOneStates = SubsetOfStatesReachableWithProbabilityExactlyOneWithAllSchedulers(directlySatisfiedStates, excludedStates); // this algorithm is only an approximation var xnew = MinimumIterator(exactlyOneStates, exactlyZeroStates, maxSteps); var finalProbability = CalculateMinimumFinalProbability(xnew); return(finalProbability); }
private double CalculateMaximumProbabilityToReachStateFormula(Formula psi) { // same algorithm as CalculateMaximumProbabilityToReachStateFormulaInBoundedSteps with different // directlySatisfiedStates and excludedStates var maxSteps = 50; var psiEvaluator = MarkovDecisionProcess.CreateFormulaEvaluator(psi); var directlySatisfiedStates = CalculateSatisfiedStates(psiEvaluator); var excludedStates = new Dictionary <int, bool>(); // change for \phi Until \psi var exactlyZeroStates = StatesReachableWithProbabilityExactlyZeroWithAllSchedulers(directlySatisfiedStates, excludedStates); var exactlyOneStates = StatesReachableWithProbabilityExactlyOneForAtLeastOneScheduler(directlySatisfiedStates, excludedStates); //cannot perform a better pre calculation var xnew = MaximumIterator(exactlyOneStates, exactlyZeroStates, maxSteps); var finalProbability = CalculateMaximumFinalProbability(xnew); return(finalProbability); }
public UnderlyingDigraph(MarkovDecisionProcess mdp) { //Assumption "every node is reachable" is fulfilled due to the construction BaseGraph = new BidirectionalGraph <EdgeData>(); var enumerator = mdp.GetEnumerator(); while (enumerator.MoveNextState()) { while (enumerator.MoveNextDistribution()) { //find targets of this distribution and create the union. Some possibleSuccessors may be added while (enumerator.MoveNextTransition()) { if (enumerator.CurrentTransition.Value > 0.0) { BaseGraph.AddVerticesAndEdge(new Edge <EdgeData>(enumerator.CurrentState, enumerator.CurrentTransition.Column, new EdgeData(enumerator.RowOfCurrentDistribution))); } } } } }
// Note: Should be used with using(var modelchecker = new ...), otherwise the disposed method may be // executed by the .net framework directly after using _filePrism.FilePath the last time and the // file deleted before it could be used by the prism process public ExternalMdpModelCheckerPrism(MarkovDecisionProcess mdp, TextWriter output = null) : base(mdp, output) { WriteMarkovChainToDisk(); }
internal MdpToPrism(MarkovDecisionProcess mdp) { _mdp = mdp; }
internal static void ExportToPrism(this MarkovDecisionProcess mdp, TextWriter sb) { var mdpToPrism = new MdpToPrism(mdp); mdpToPrism.WriteMarkovDecisionProcessToStream(sb); }
// Note: Should be used with using(var modelchecker = new ...) public BuiltinMdpModelChecker(MarkovDecisionProcess mdp, TextWriter output = null) : base(mdp, output) { _underlyingDigraph = MarkovDecisionProcess.CreateUnderlyingDigraph(); }
public Dictionary <long, bool> StatesReachableWithProbabilityExactlyOneForAtLeastOneScheduler(Dictionary <long, bool> directlySatisfiedStates, Dictionary <long, bool> excludedStates) { // calculate probabilityExactlyOne (prob1e). There exists a scheduler, for which the probability of // the resulting states is exactly 1. The result may be different for another scheduler, but at least there exists one. // This is exact // The algorithm works this way: It looks at a set of states probabilityMightBeExactlyOne which are initially all states. // Then it iterates until a fixpoint is found. In each iteration states are removed from probabilityMightBeExactlyOne for // which a scheduler _must_ switch to a state where the probability is < 1. // The removal process works this way: In each iteration a backwards search is started. // A distribution from a predecessor is removed, if not every transition of the distribution leads to a // state in probabilityMightBeExactlyOne (Reason: It is possible from there to go to a state where probability < 1). // The fixpoint is the result. Func <long, bool> nodesToIgnore = excludedStates.ContainsKey; var probabilityMightBeExactlyOne = CreateComplement(new Dictionary <long, bool>()); //all states var _isDistributionIncludedCache = new Dictionary <long, bool>(); var mdpEnumerator = MarkovDecisionProcess.GetEnumerator(); Action resetDistributionIncludedCacheForNewIteration = () => { // One possible optimization. // Only true entries must be deleted because the eligible distributions get less and less each iteration // On the other hand: Clearing the whole data structure makes the dictionary smaller and access faster. _isDistributionIncludedCache.Clear(); }; Func <long, bool> isDistributionIncluded = rowOfDistribution => { if (_isDistributionIncludedCache.ContainsKey(rowOfDistribution)) { return(_isDistributionIncludedCache[rowOfDistribution]); } mdpEnumerator.MoveToDistribution(rowOfDistribution); var includeDistribution = true; while (includeDistribution && mdpEnumerator.MoveNextTransition()) { var targetState = mdpEnumerator.CurrentTransition.Column; // if targetstate is not found in probabilityMightBeExactlyOne then the complete distribution has to be removed if (!probabilityMightBeExactlyOne.ContainsKey(targetState)) { includeDistribution = false; } } return(includeDistribution); }; var fixpointReached = false; while (!fixpointReached) { resetDistributionIncludedCacheForNewIteration(); var ancestorsFound = new Dictionary <long, bool>(); //Note: ancestorsFound must not be reused // based on DFS https://en.wikipedia.org/wiki/Depth-first_search var nodesToTraverse = new Stack <long>(); foreach (var node in directlySatisfiedStates) { nodesToTraverse.Push(node.Key); } while (nodesToTraverse.Count > 0) { var currentNode = nodesToTraverse.Pop(); var isIgnored = nodesToIgnore(currentNode); var alreadyDiscovered = ancestorsFound.ContainsKey(currentNode); if (!(isIgnored || alreadyDiscovered)) { ancestorsFound.Add(currentNode, true); foreach (var inEdge in _underlyingDigraph.BaseGraph.InEdges(currentNode)) { if (isDistributionIncluded(inEdge.Data.RowOfDistribution)) { nodesToTraverse.Push(inEdge.Source); } } } } if (probabilityMightBeExactlyOne.Count == ancestorsFound.Count) { fixpointReached = true; } Assert.That(probabilityMightBeExactlyOne.Count >= ancestorsFound.Count, "bug!"); probabilityMightBeExactlyOne = ancestorsFound; } return(probabilityMightBeExactlyOne); }
public Dictionary <long, bool> StatesReachableWithProbabilityExactlyZeroForAtLeastOneScheduler( Dictionary <long, bool> directlySatisfiedStates, Dictionary <long, bool> excludedStates) { // calculate probabilityExactlyZero (prob0e). There exists a scheduler, for which the probability of // the resulting states is zero. The result may be different for another scheduler, but at least there exists one. // This is exact Dictionary <long, bool> ancestorsFound = null; var probabilityGreaterThanZero = directlySatisfiedStates; //we know initially this is satisfied var mdpEnumerator = MarkovDecisionProcess.GetEnumerator(); // The idea of the algorithm is to calculate probabilityGreaterThanZero: // all states where a directlySatisfiedState is reached with a probability > 0 // no matter which scheduler is selected (valid for _all_ adversaries). // The complement of probabilityGreaterThanZero is the set of states where a scheduler _exists_ for // which the probability to reach a directlySatisfiedState is exactly 0. Func <long, bool> nodesToIgnore = source => { //nodes found by UpdateAncestors are always SourceNodes of a edge to an ancestor in ancestorsFound if (excludedStates.ContainsKey(source)) { return(false); //source must not be ignored } if (directlySatisfiedStates.ContainsKey(source)) { return(false); //source must not be ignored } // must not be cached (, because ancestorsFound might change, even in the same iteration)!!! // check if _all_ distributions of source contain at least transition to a ancestor in ancestorsFound mdpEnumerator.SelectSourceState(source); while (mdpEnumerator.MoveNextDistribution()) { var foundInDistribution = false; while (mdpEnumerator.MoveNextTransition() && !foundInDistribution) { if (ancestorsFound.ContainsKey(mdpEnumerator.CurrentTransition.Column)) { foundInDistribution = true; } } if (!foundInDistribution) { return(true); // the distribution does not have a targetState in ancestorsFound, so source must be ignored } } return(false); //source must not be ignored }; // initialize probabilityGreaterThanZero to the states where we initially know the probability is greater than zero var fixpointReached = false; while (!fixpointReached) { // Calculate fix point of probabilityGreaterThanZero // Should be finished in one iteration, but I have not proved it yet, so repeat it until fixpoint is reached for sure. // (The proof relies on details of the algorithm GetAncestors. Intuition: When a state s was not added to the set of // ancestors it is because one distribution d' has no target state in the ancestors found yet. If the state is in the // final set of ancestors, the reason is that the state s' of the distribution d', which was responsible for declining // s has not yet been added to ancestors. When s' is added all its ancestors are traversed again and s is found.) // Note: // UpdateAncestors must be used, because nodesToIgnore requires access to the current information about the ancestors // (ancestorsFound), if it should work in one iteration. ancestorsFound = new Dictionary <long, bool>(); //Note: We reuse ancestorsFound, which is also known and used by nodesToIgnore. The side effects are on purpose. // based on DFS https://en.wikipedia.org/wiki/Depth-first_search var nodesToTraverse = new Stack <long>(); foreach (var node in probabilityGreaterThanZero) { nodesToTraverse.Push(node.Key); } while (nodesToTraverse.Count > 0) { var currentNode = nodesToTraverse.Pop(); var isIgnored = nodesToIgnore(currentNode); var alreadyDiscovered = ancestorsFound.ContainsKey(currentNode); if (!(isIgnored || alreadyDiscovered)) { ancestorsFound.Add(currentNode, true); foreach (var inEdge in _underlyingDigraph.BaseGraph.InEdges(currentNode)) { nodesToTraverse.Push(inEdge.Source); } } } if (probabilityGreaterThanZero.Count == ancestorsFound.Count) { fixpointReached = true; } probabilityGreaterThanZero = ancestorsFound; } var probabilityExactlyZero = CreateComplement(probabilityGreaterThanZero); return(probabilityExactlyZero); }
internal double[] MaximumIterator(Dictionary <long, bool> exactlyOneStates, Dictionary <long, bool> exactlyZeroStates, int steps) { var stopwatch = new Stopwatch(); stopwatch.Start(); var stateCount = MarkovDecisionProcess.States; var enumerator = MarkovDecisionProcess.GetEnumerator(); var xold = new double[stateCount]; var xnew = CreateDerivedVector(exactlyOneStates); var loops = 0; while (loops < steps) { // switch xold and xnew var xtemp = xold; xold = xnew; xnew = xtemp; loops++; for (var i = 0; i < stateCount; i++) { if (exactlyOneStates.ContainsKey(i)) { //we could remove this line, because already set by CreateDerivedVector and never changed when we initialize xold with CreateDerivedVector(directlySatisfiedStates) xnew[i] = 1.0; } else if (exactlyZeroStates.ContainsKey(i)) { //we could remove this line, because already set by CreateDerivedVector and never changed when we initialize xold with CreateDerivedVector(directlySatisfiedStates) xnew[i] = 0.0; } else { enumerator.SelectSourceState(i); //select sum of first distribution enumerator.MoveNextDistribution(); var sum = 0.0; while (enumerator.MoveNextTransition()) { var entry = enumerator.CurrentTransition; sum += entry.Value * xold[entry.Column]; } xnew[i] = sum; //now find a larger one while (enumerator.MoveNextDistribution()) { sum = 0.0; while (enumerator.MoveNextTransition()) { var entry = enumerator.CurrentTransition; sum += entry.Value * xold[entry.Column]; } if (sum > xnew[i]) { xnew[i] = sum; } } } } if (loops % 10 == 0) { stopwatch.Stop(); var currentProbability = CalculateMaximumFinalProbability(xnew); _output?.WriteLine( $"{loops} Bounded Until iterations in {stopwatch.Elapsed}. Current probability={currentProbability.ToString(CultureInfo.InvariantCulture)}"); stopwatch.Start(); } } stopwatch.Stop(); return(xnew); }
public MarkovDecisionProcessEnumerator(MarkovDecisionProcess mdp) { _mdp = mdp; _matrixEnumerator = mdp.RowsWithDistributions.GetEnumerator(); Reset(); }
// Note: Should be used with using(var modelchecker = new ...) protected MdpModelChecker(MarkovDecisionProcess mdp, TextWriter output = null) { MarkovDecisionProcess = mdp; _output = output; }