internal static DiscreteTimeMarkovChain Create() { // Just a simple DTMC // 0⟶1⟲ var markovChain = new DiscreteTimeMarkovChain(ModelCapacityByMemorySize.Tiny); markovChain.StateFormulaLabels = new string[] { Label1Formula.Label, Label2Formula.Label }; markovChain.StateRewardRetrieverLabels = new string[] { }; markovChain.StartWithInitialDistribution(); markovChain.AddInitialTransition(0, 1.0); markovChain.FinishInitialDistribution(); markovChain.SetStateLabeling(1, new StateFormulaSet(new[] { true, false })); markovChain.StartWithNewDistribution(1); markovChain.AddTransition(1, 1.0); markovChain.FinishDistribution(); markovChain.SetStateLabeling(0, new StateFormulaSet(new[] { false, true })); markovChain.StartWithNewDistribution(0); markovChain.AddTransition(1, 1.0); markovChain.FinishDistribution(); //markovChain.ProbabilityMatrix.OptimizeAndSeal(); return(markovChain); }
internal static DiscreteTimeMarkovChain Create() { // A DTMC for \phi Until \psi (or in this case Label1 U Label2) // 0⟶0.1⟼1⟲ // 0.2⟼2⟼3⟲ // 0.7⟼4↗ // \psi in 3. \phi in 0,2 var markovChain = new DiscreteTimeMarkovChain(ModelCapacityByMemorySize.Tiny); markovChain.StateFormulaLabels = new string[] { Label1Formula.Label, Label2Formula.Label }; markovChain.StateRewardRetrieverLabels = new string[] { }; markovChain.StartWithInitialDistribution(); markovChain.AddInitialTransition(0, 1.0); markovChain.FinishInitialDistribution(); markovChain.SetStateLabeling(0, new StateFormulaSet(new[] { true, false })); markovChain.StartWithNewDistribution(0); markovChain.AddTransition(1, 0.1); markovChain.AddTransition(2, 0.2); markovChain.AddTransition(4, 0.7); markovChain.FinishDistribution(); markovChain.SetStateLabeling(1, new StateFormulaSet(new[] { false, false })); markovChain.StartWithNewDistribution(1); markovChain.AddTransition(1, 1.0); markovChain.FinishDistribution(); markovChain.SetStateLabeling(2, new StateFormulaSet(new[] { true, false })); markovChain.StartWithNewDistribution(2); markovChain.AddTransition(3, 1.0); markovChain.FinishDistribution(); markovChain.SetStateLabeling(3, new StateFormulaSet(new[] { false, true })); markovChain.StartWithNewDistribution(3); markovChain.AddTransition(3, 1.0); markovChain.FinishDistribution(); markovChain.SetStateLabeling(4, new StateFormulaSet(new[] { false, false })); markovChain.StartWithNewDistribution(4); markovChain.AddTransition(3, 1.0); markovChain.FinishDistribution(); //markovChain.ProbabilityMatrix.OptimizeAndSeal(); return(markovChain); }
internal static DiscreteTimeMarkovChain Create() { // Transformed LabeledTransitionMarkovChain.Example4 // 0⟶0.6⟼1⟼0.9⟲ // 0.01⇢3 // 0.09⇢4 // 0.3⟼2⟼0.9⇢1 // 0.01⇢3 // 0.09⇢4 // 0.1⟼4⟼3⟲ var markovChain = new DiscreteTimeMarkovChain(ModelCapacityByMemorySize.Tiny); markovChain.StateFormulaLabels = new string[] { Label1Formula.Label, Label2Formula.Label }; markovChain.StateRewardRetrieverLabels = new string[] { }; markovChain.StartWithInitialDistribution(); markovChain.AddInitialTransition(0, 1.0); markovChain.FinishInitialDistribution(); markovChain.SetStateLabeling(0, new StateFormulaSet(new[] { false, false })); // state 1(-) of LabeledTransitionMarkovChainExamples.Example4 markovChain.StartWithNewDistribution(0); markovChain.AddTransition(1, 0.6); markovChain.AddTransition(2, 0.3); markovChain.AddTransition(4, 0.1); markovChain.FinishDistribution(); markovChain.SetStateLabeling(1, new StateFormulaSet(new[] { false, false })); // state 2(-) of LabeledTransitionMarkovChainExamples.Example4 markovChain.StartWithNewDistribution(1); markovChain.AddTransition(1, 0.9); markovChain.AddTransition(3, 0.01); markovChain.AddTransition(4, 0.09); markovChain.FinishDistribution(); markovChain.SetStateLabeling(2, new StateFormulaSet(new[] { true, false })); // state 2(lab1) of LabeledTransitionMarkovChainExamples.Example4 markovChain.StartWithNewDistribution(2); markovChain.AddTransition(1, 0.9); markovChain.AddTransition(3, 0.01); markovChain.AddTransition(4, 0.09); markovChain.FinishDistribution(); markovChain.SetStateLabeling(3, new StateFormulaSet(new[] { false, false })); // state 3(-) of LabeledTransitionMarkovChainExamples.Example4 markovChain.StartWithNewDistribution(3); markovChain.AddTransition(3, 1.0); markovChain.FinishDistribution(); markovChain.SetStateLabeling(4, new StateFormulaSet(new[] { false, true })); // state 3(lab2) of LabeledTransitionMarkovChainExamples.Example4 markovChain.StartWithNewDistribution(4); markovChain.AddTransition(3, 1.0); markovChain.FinishDistribution(); //markovChain.ProbabilityMatrix.OptimizeAndSeal(); return(markovChain); }