private static void Main() { using (var rs = new RunningStats <double>()) { double tp1 = 0; double tp2 = 0; // We first generate the data and add it sequentially to our running_stats object. We // then print every fifth data point. for (var x = 1; x <= 100; x++) { tp1 = x / 100.0; tp2 = Sinc(Math.PI * x / 100.0); rs.Add(tp2); if (x % 5 == 0) { Console.WriteLine($" x = {tp1} sinc(x) = {tp2}"); } } // Finally, we compute and print the mean, variance, skewness, and excess kurtosis of // our data. Console.WriteLine(); Console.WriteLine($"Mean: {rs.Mean}"); Console.WriteLine($"Variance: {rs.Variance}"); Console.WriteLine($"Skewness: {rs.Skewness}"); Console.WriteLine($"Excess Kurtosis {rs.ExcessKurtosis}"); } }
private static IList <long> AlignPoints(IList <DPoint> from, IList <DPoint> to, double minAngle = -90 *Math.PI / 180.0, double maxAngle = 90 *Math.PI / 180.0, long numAngles = 181) { /*! * ensures * - Figures out how to align the points in from with the points in to. Returns an * assignment array A that indicates that from[i] matches with to[A[i]]. * * We use the Hungarian algorithm with a search over reasonable angles. This method * works because we just need to account for a translation and a mild rotation and * nothing else. If there is any other more complex mapping then you probably don't * have landmarks that make sense to flip. * !*/ if (from.Count != to.Count) { throw new ArgumentException(); } long[] bestAssignment = null; var bestAssignmentCost = double.PositiveInfinity; using (var dists = new Matrix <double>(from.Count, to.Count)) { foreach (var angle in Dlib.Linspace(minAngle, maxAngle, (int)numAngles)) { using (var rot = Dlib.RotationMatrix(angle)) for (int r = 0, rows = dists.Rows; r < rows; ++r) { using (var tmp = rot * from[r]) for (int c = 0, columns = dists.Columns; c < columns; ++c) { using (var tmp2 = tmp - to[c]) dists[r, c] = Dlib.LengthSquared(tmp2); } } using (var tmp = dists / Dlib.Max(dists)) using (var tmp2 = long.MaxValue * tmp) using (var tmp3 = Dlib.Round(tmp2)) using (var idists = Dlib.MatrixCast <long>(-tmp3)) { var assignment = Dlib.MaxCostAssignment(idists).ToArray(); var cost = Dlib.AssignmentCost(dists, assignment); if (cost < bestAssignmentCost) { bestAssignmentCost = cost; bestAssignment = assignment.ToArray(); } } } // Now compute the alignment error in terms of average distance moved by each part. We // do this so we can give the user a warning if it's impossible to make a good // alignment. using (var rs = new RunningStats <double>()) { var tmp = new List <DPoint>(Enumerable.Range(0, to.Count).Select(i => new DPoint())); for (var i = 0; i < to.Count; ++i) { tmp[(int)bestAssignment[i]] = to[i]; } using (var tform = Dlib.FindSimilarityTransform(from, tmp)) for (var i = 0; i < from.Count; ++i) { var p = tform.Operator(from[i]) - tmp[i]; rs.Add(Dlib.Length(p)); } if (rs.Mean > 0.05) { Console.WriteLine("WARNING, your dataset has object part annotations and you asked imglab to "); Console.WriteLine("flip the data. Imglab tried to adjust the part labels so that the average"); Console.WriteLine("part layout in the flipped dataset is the same as the source dataset. "); Console.WriteLine("However, the part annotation scheme doesn't seem to be left-right symmetric."); Console.WriteLine("You should manually review the output to make sure the part annotations are "); Console.WriteLine("labeled as you expect."); } return(bestAssignment); } } }
private static void Main() { // Here we declare that our samples will be 2 dimensional column vectors. // (Note that if you don't know the dimensionality of your vectors at compile time // you can change the 2 to a 0 and then set the size at runtime) // Now we are making a typedef for the kind of kernel we want to use. I picked the // radial basis kernel because it only has one parameter and generally gives good // results without much fiddling. using (var rbk = new RadialBasisKernel <double, Matrix <double> >(0.1d, 2, 1)) { // Here we declare an instance of the kcentroid object. The kcentroid has 3 parameters // you need to set. The first argument to the constructor is the kernel we wish to // use. The second is a parameter that determines the numerical accuracy with which // the object will perform the centroid estimation. Generally, smaller values // give better results but cause the algorithm to attempt to use more dictionary vectors // (and thus run slower and use more memory). The third argument, however, is the // maximum number of dictionary vectors a kcentroid is allowed to use. So you can use // it to control the runtime complexity. using (var test = new KCentroid <double, RadialBasisKernel <double, Matrix <double> > >(rbk, 0.01, 15)) { // now we train our object on a few samples of the sinc function. using (var m = Matrix <double> .CreateTemplateParameterizeMatrix(2, 1)) { for (double x = -15; x <= 8; x += 1) { m[0] = x; m[1] = Sinc(x); test.Train(m); } using (var rs = new RunningStats <double>()) { // Now let's output the distance from the centroid to some points that are from the sinc function. // These numbers should all be similar. We will also calculate the statistics of these numbers // by accumulating them into the running_stats object called rs. This will let us easily // find the mean and standard deviation of the distances for use below. Console.WriteLine("Points that are on the sinc function:"); m[0] = -1.5; m[1] = Sinc(m[0]); Console.WriteLine($" {test.Operator(m)}"); rs.Add(test.Operator(m)); m[0] = -1.5; m[1] = Sinc(m[0]); Console.WriteLine($" {test.Operator(m)}"); rs.Add(test.Operator(m)); m[0] = -0; m[1] = Sinc(m[0]); Console.WriteLine($" {test.Operator(m)}"); rs.Add(test.Operator(m)); m[0] = -0.5; m[1] = Sinc(m[0]); Console.WriteLine($" {test.Operator(m)}"); rs.Add(test.Operator(m)); m[0] = -4.1; m[1] = Sinc(m[0]); Console.WriteLine($" {test.Operator(m)}"); rs.Add(test.Operator(m)); m[0] = -1.5; m[1] = Sinc(m[0]); Console.WriteLine($" {test.Operator(m)}"); rs.Add(test.Operator(m)); m[0] = -0.5; m[1] = Sinc(m[0]); Console.WriteLine($" {test.Operator(m)}"); rs.Add(test.Operator(m)); Console.WriteLine(); // Let's output the distance from the centroid to some points that are NOT from the sinc function. // These numbers should all be significantly bigger than previous set of numbers. We will also // use the rs.scale() function to find out how many standard deviations they are away from the // mean of the test points from the sinc function. So in this case our criterion for "significantly bigger" // is > 3 or 4 standard deviations away from the above points that actually are on the sinc function. Console.WriteLine("Points that are NOT on the sinc function:"); m[0] = -1.5; m[1] = Sinc(m[0]) + 4; Console.WriteLine($" {test.Operator(m)} is {rs.Scale(test.Operator(m))} standard deviations from sinc."); m[0] = -1.5; m[1] = Sinc(m[0]) + 3; Console.WriteLine($" {test.Operator(m)} is {rs.Scale(test.Operator(m))} standard deviations from sinc."); m[0] = -0; m[1] = -Sinc(m[0]); Console.WriteLine($" {test.Operator(m)} is {rs.Scale(test.Operator(m))} standard deviations from sinc."); m[0] = -0.5; m[1] = -Sinc(m[0]); Console.WriteLine($" {test.Operator(m)} is {rs.Scale(test.Operator(m))} standard deviations from sinc."); m[0] = -4.1; m[1] = Sinc(m[0]) + 2; Console.WriteLine($" {test.Operator(m)} is {rs.Scale(test.Operator(m))} standard deviations from sinc."); m[0] = -1.5; m[1] = Sinc(m[0]) + 0.9; Console.WriteLine($" {test.Operator(m)} is {rs.Scale(test.Operator(m))} standard deviations from sinc."); m[0] = -0.5; m[1] = Sinc(m[0]) + 1; Console.WriteLine($" {test.Operator(m)} is {rs.Scale(test.Operator(m))} standard deviations from sinc."); // And finally print out the mean and standard deviation of points that are actually from sinc(). Console.WriteLine($"\nmean: {rs.Mean}"); Console.WriteLine($"standard deviation: {rs.StdDev}"); // The output is as follows: /* * Points that are on the sinc function: * 0.869913 * 0.869913 * 0.873408 * 0.872807 * 0.870432 * 0.869913 * 0.872807 * * Points that are NOT on the sinc function: * 1.06366 is 119.65 standard deviations from sinc. * 1.02212 is 93.8106 standard deviations from sinc. * 0.921382 is 31.1458 standard deviations from sinc. * 0.918439 is 29.3147 standard deviations from sinc. * 0.931428 is 37.3949 standard deviations from sinc. * 0.898018 is 16.6121 standard deviations from sinc. * 0.914425 is 26.8183 standard deviations from sinc. * * mean: 0.871313 * standard deviation: 0.00160756 */ // So we can see that in this example the kcentroid object correctly indicates that // the non-sinc points are definitely not points from the sinc function. } } } } }