public List <Particle> Sample(int sampleCount, List <Particle> Particles) { // make N subdivisions, choose positions // with a consistent random offset var positions = Enumerable.Range(0, Particles.Count).Select(_ => (_ + SingleRandom.Instance.NextDouble()) / Particles.Count).ToArray(); //indexes = np.zeros(N, 'i') var cumulative_sum = Particles.Select(_ => _.Weight).CumulativeSum().ToArray(); int i = 0, j = 0; int[] indexes = new int[Particles.Count]; while (i < Particles.Count) { if (positions[i] < cumulative_sum[j]) { indexes[i] = j; i += 1; } else { j += 1; } } return(ReSampleHelper.ReSampleFromIndex(Particles, indexes)); }
/* * Given the current state estimate X and weight vector w, resample a new set of * states.We use the low-variance resampling algorithm from Thrun, Burgard, and Fox's * "Probabilistic Robotics". By default it will keep the number of samples constant */ public List <Particle> Sample(int sampleCount, List <Particle> Particles) { var r = SingleRandom.Instance.NextDouble() / Particles.Count; int[] indexes = new int[Particles.Count]; for (int i = 0; i < Particles.Count; i++) { var u = r + (i - 1) / Particles.Count; double c = 0; int j = 0; while (c < u) { j++; c = +Particles[i].Weight; } indexes[i] = j; } return(ReSampleHelper.ReSampleFromIndex(Particles, indexes)); }