Ejemplo n.º 1
0
 public static Uncertain <TResult> SelectMany <TSource, TCollection, TResult>(
     this Uncertain <TSource> first,
     Func <TSource, Uncertain <TCollection> > collectionSelector,
     Func <TSource, TCollection, Weighted <TResult> > resultSelector)
 {
     return(new SelectMany <TSource, TCollection, TResult>(first, collectionSelector, resultSelector));
 }
Ejemplo n.º 2
0
 internal SelectMany(
     Uncertain <TSource> source,
     Func <TSource, Uncertain <TCollection> > collectionSelector,
     Func <TSource, TCollection, Weighted <TResult> > resultSelector)
 {
     this.source             = source;
     this.CollectionSelector = collectionSelector;
     this.ResultSelector     = resultSelector;
 }
Ejemplo n.º 3
0
 private IEnumerable <Weighted <T1> > GetEnumerator <T1>(Uncertain <T1> fromsource)
 {
     while (true)
     {
         this.generation++;
         fromsource.Accept(this);
         yield return((Weighted <T1>) this.sample);
     }
 }
Ejemplo n.º 4
0
        public MarkovChainMonteCarloSampler(Uncertain <T> source)
        {
            this.source     = source;
            this.generation = 1;
            this.stack      = Tuple.Create(0, 0, 0);
            this.trace      = new List <TraceEntry>();
            this.oldTrace   = new List <TraceEntry>();

            this.cache = new Dictionary <object[], int>(new ObjectArrayComparer());
        }
Ejemplo n.º 5
0
 public static Uncertain <TResult> Select <TSource, TResult>(
     this Uncertain <TSource> first,
     Func <TSource, TResult> projection)
 {
     return(new Select <TSource, TResult>(
                first,
                i => new Weighted <TResult>()
     {
         Value = projection(i), Probability = 1.0
     }
                ));
 }
Ejemplo n.º 6
0
 public static Uncertain <TResult> SelectMany <TSource, TCollection, TResult>(
     this Uncertain <TSource> first,
     Func <TSource, Uncertain <TCollection> > collectionSelector,
     Func <TSource, TCollection, TResult> resultSelector)
 {
     return(new SelectMany <TSource, TCollection, TResult>(
                first,
                collectionSelector,
                (a, b) => new Weighted <TResult>()
     {
         Value = resultSelector(a, b), Probability = 1.0
     }
                ));
 }
Ejemplo n.º 7
0
        public static MeanAndConfidenceInterval ExpectedValueWithConfidence <T>(this Uncertain <T> source, int SampleSize = EXPECTED_VALUE_SAMPLE_SIZE)
        //  where T : IConvertible
        {
            const double CI_AT_95 = 1.96;
            var          sampler  = Sampler.Create(source);
            var          data     = (from k in sampler.Take(SampleSize)
                                     select new Weighted <double>()
            {
                Value = Convert.ToDouble(k.Value),
                Probability = k.Probability
            }).ToList();


            // http://en.wikipedia.org/wiki/Weighted_arithmetic_mean
            var N            = (double)data.Count();
            var WeightSum    = (from k in data select k.Probability).Sum();
            var SumOfSquares = data.Select(x => Math.Pow(x.Probability, 2)).Sum();

            // weighted mean
            var Xbar = data.Select(x => x.Value * x.Probability).Sum() / WeightSum;
            // unbiased estimate of sigma
            var NormalizationFactor = (WeightSum - (SumOfSquares / WeightSum));
            // population weighted StdDev
            var SigmaBar = Math.Sqrt(
                data.Select(
                    x => x.Probability * Math.Pow(x.Value - Xbar, 2)).Sum()
                / NormalizationFactor);
            // TODO: defaults to 95% confidence interval:
            // we should really use the T distribution
            // Result is a bias for small number of samples.
            var C = CI_AT_95 * SigmaBar / Math.Sqrt(N);

            return(new MeanAndConfidenceInterval
            {
                Mean = Xbar,
                CI = C,
            });
        }
Ejemplo n.º 8
0
        private IEnumerable <Weighted <T1> > GetEnumerator <T1>(Uncertain <T1> uncertain)
        {
            /// TODO: I am still building a caching mechanism!
            var regenFrom = 0;

            RandomPrimitive sampled   = null;
            var             initstack = this.stack;
            var             sampler   = new SingleSampler();

            do
            {
                this.stack = initstack;

                // Run the program.
                uncertain.Accept(this);

                //var samples = (from e in this.trace select e.Sample).ToArray();
                //int count;
                //if (!this.cache.TryGetValue(samples, out count))
                //{
                //    count = 0;
                //}
                //else
                //{
                //    count = count;
                //}
                //this.cache[samples] = count + 1;

                // TODO: should we return 0 mass samples?
                //       0 IS a reasonable weight if all
                //       one wants to do is know about a
                //       posterior - need to compute the score
                //       from the Weighted<T1> object rather
                //       than the trace.
                var returnval = (Weighted <T1>) this.sample;

                //if (returnval.Probability > 0)
                //    yield return returnval;
                yield return(returnval);

                var roll       = Extensions.NextRandom();
                var acceptance = Accept(trace, oldTrace, regenFrom);
                if (this.generation > 1 && !(Math.Log(roll) < acceptance))
                {
                    // rollback proposal
                    trace.Clear();
                    trace.AddRange(oldTrace);
                }

                Swap(ref trace, ref oldTrace);
                if (oldTrace.Count > 0)
                {
                    // A programmer induced dependence implies the trace can have
                    // more than one copy of any given Random Primitive
                    // only sample from the set of distinct RandomPrimitives
                    // to avoid oversampling any particular RandomPrimitive.
                    //var dict = new Dictionary<RandomPrimitive, IList<int>>();
                    //var pos = 0;
                    //foreach(var e in oldTrace)
                    //{
                    //    IList<int> lst;
                    //    if (! dict.TryGetValue(e.Erp, out lst))
                    //    {
                    //        lst = new List<int>();
                    //        dict[e.Erp] = lst;
                    //    }
                    //    lst.Add(pos++);
                    //}
                    //regenFrom = (int)Math.Floor(Extensions.NextRandom() * dict.Keys.Count);
                    //sampled = dict.Keys.ElementAt(regenFrom);
                    var distinct = oldTrace.Select(e => e.Erp).Distinct().ToList();
                    regenFrom = (int)Math.Floor(Extensions.NextRandom() * distinct.Count);
                    sampled   = distinct[regenFrom];
                    // force regeneration of this random primitive
                    // note we reset this to false in the
                    // Visit(RandomPrimitive) method
                    sampled.ForceRegen = true;


                    sampled.Accept(sampler);
                    //var key = (from e in this.trace select e.Sample).ToArray();
                }
                trace.Clear();

                this.generation++;
            } while (true);
        }
Ejemplo n.º 9
0
 internal static ISampler <T> Create <T>(Uncertain <T> source)
 {
     // return new ForwardSampler<T>(source);
     return(new MarkovChainMonteCarloSampler <T>(source));
 }
Ejemplo n.º 10
0
 internal Select(Uncertain <TSource> source, Func <TSource, Weighted <TResult> > projection)
 {
     this.source     = source;
     this.Projection = projection;
 }
Ejemplo n.º 11
0
 public static Uncertain <TResult> Select <TSource, TResult>(
     this Uncertain <TSource> first,
     Func <TSource, Weighted <TResult> > projection)
 {
     return(new Select <TSource, TResult>(first, projection));
 }
Ejemplo n.º 12
0
        /// <summary>
        /// Decide if this Bernoulli is true with probability at least <paramref name="Prob"/>.
        /// </summary>
        /// <remarks>
        /// This method implements Wald's Sequential Probability Ratio Test (SPRT) for the special
        /// case where the distribution is a Bernoulli. The log-likelihood is therefore simply a
        /// function of the number of Trues sampled.
        /// </remarks>
        /// <param name="Prob">The probability threshold to compare against</param>
        /// <param name="Alpha">The confidence level of the hypothesis test</param>
        /// <param name="Epsilon">The indifference region for the hypothesis test</param>
        /// <param name="MaxSampleSize">Maximum number of samples to draw before giving up</param>
        /// <param name="InitSampleSize">Initial number of samples to draw</param>
        /// <param name="SampleSizeStep">Number of samples to draw between each hypothesis test</param>
        /// <returns>True if this Bernoulli is true with probability at least <paramref name="Prob"/></returns>
        public static bool Pr(this Uncertain <bool> source, double Prob = 0.5,
                              double Alpha       = 0.05, double Epsilon = 0.03,
                              int MaxSampleSize  = MAX_SAMPLE_SIZE,
                              int InitSampleSize = INITIAL_SAMPLE_SIZE,
                              int SampleSizeStep = SAMPLE_SIZE_STEP)
        {
            // Initial sample size
            int num_samples;

            // The hypotheses being compared
            double H_0 = Prob - Epsilon;  // H_0 : p <= prob - epsilon
            double H_1 = Prob + Epsilon;  // H_1 : p >= prob + epsilon

            // Decide the log-likelihood thresholds for the test
            double Beta = Alpha;                        // We are symmetric w.r.t. false positives/negatives
            double A    = Math.Log(Beta / (1 - Alpha)); // Accept H_0 if the log-likelihood is <= a
            double B    = Math.Log((1 - Beta) / Alpha); // Accept H_1 if the log-likelihood is >= b

            // Draw the initial samples
            int K = 0;  // number of successes in n trials

            double WSum     = 0.0;
            double WSumTrue = 0.0;

            IEnumerator <Weighted <bool> > enumerator  = Sampler.Create(source).GetEnumerator();
            Func <Weighted <bool> >        FuncSampler = () =>
            {
                var tmp = enumerator.Current;
                if (enumerator.MoveNext() == false)
                {
                    throw new Exception("Ran out of data!");
                }
                return(tmp);
            };

            for (num_samples = 0; num_samples < InitSampleSize; num_samples++)
            {
                var sample = FuncSampler();
                if (sample.Value)
                {
                    K        += 1;
                    WSumTrue += sample.Probability;
                }
                WSum += sample.Probability;
            }

            while (num_samples <= MaxSampleSize)
            {
                // Calculate the log-likelihood of the data seen so far
                double LogLikelihood = WSumTrue * Math.Log(H_1 / H_0) + (WSum - WSumTrue) * Math.Log((1 - H_1) / (1 - H_0));

                // If we can accept H_1 then P > Prob, so return true
                if (LogLikelihood >= B)
                {
                    return(true);
                }
                // If we can accept H_0 then P < Prob, so return false
                if (LogLikelihood <= A)
                {
                    return(false);
                }

                // Otherwise, continue sampling
                for (int i = 0; i < SampleSizeStep; i++)
                {
                    var sample = FuncSampler();
                    if (sample.Value)
                    {
                        K        += 1;
                        WSumTrue += sample.Probability;
                    }
                    WSum += sample.Probability;
                }

                num_samples += SampleSizeStep;
            }

            // If the maximum sample size is reached, assume the answer is false. This is an
            // (mostly unjustified) assumption that false positives are more damaging.
            return(false);
        }
Ejemplo n.º 13
0
 internal Where(Uncertain <T> source, Predicate <T> predicate)
 {
     this.source    = source;
     this.Predicate = predicate;
 }
Ejemplo n.º 14
0
 internal ForwardSampler(Uncertain <T> source)
 {
     this.source     = source;
     this.generation = 1;
     this.cache      = new Dictionary <object, Tuple <int, object> >();
 }