// ================================================= public Algorithm(Problem pb, Parameters param) { this.pb = pb; bestBest = new Position(pb.SS.D); PX = new Velocity(pb.SS.D); R = new SPSO_2007.Result(pb.SS.D); //f_run = File.OpenWrite("f_run.txt"); //f_synth = File.OpenWrite("f_synth.txt"); // ----------------------------------------------- PROBLEM this.param = param; runMax = 100; if (runMax > R_max) runMax = R_max; this.vMax = param.vMax; Utils.Logger.Log("\n c = {0}, w = {1}", param.c, param.w); //--------------- sqrtD = Math.Sqrt(pb.SS.D); //------------------------------------- RUNS /* for (run = 0; run < runMax; run++) { } // End loop on "run" */ // ---------------------END // Save //TODO: Fix up writing out to files /*fUtils.Logger.Log(f_synth, "%f %f %.0f%% %f ", errorMean, variance, successRate, evalMean); for (d = 0; d < pb.SS.D; d++) fUtils.Logger.Log(f_synth, " %f", bestBest.x[d]); fUtils.Logger.Log(f_synth, "\n"); * */ return; // End of main program }
// =============================================================== // PSO static Result PSO(Parameters param, Problem pb) { Velocity aleaV = new Velocity(); int d; int g; int[] index = new int[S_max]; int[] indexTemp = new int[S_max]; // Iteration number (time step) int iterBegin; int[,] LINKS = new int[S_max, S_max]; // Information links int m; int noEval; double normPX = 0.0, normGX = 0.0; int noStop; int outside; double p; Velocity PX = new Velocity(); Result R = new Result(); Matrix RotatePX = new Matrix(); Matrix RotateGX = new Matrix(); int s0, s, s1; double zz; aleaV.size = pb.SS.D; RotatePX.size = pb.SS.D; RotateGX.size = pb.SS.D; // ----------------------------------------------------- // INITIALISATION p = param.p; // Probability threshold for random topology R.SW.S = param.S; // Size of the current swarm // Position and velocity for (s = 0; s < R.SW.S; s++) { R.SW.X[s].size = pb.SS.D; R.SW.V[s].size = pb.SS.D; for (d = 0; d < pb.SS.D; d++) { R.SW.X[s].x[d] = rand.NextDouble(pb.SS.minInit[d], pb.SS.maxInit[d]); R.SW.V[s].v[d] = (rand.NextDouble(pb.SS.min[d], pb.SS.max[d]) - R.SW.X[s].x[d]) / 2; } // Take quantisation into account Position.quantis(R.SW.X[s], pb.SS); // First evaluations R.SW.X[s].f = Problem.perf(R.SW.X[s], pb.function, pb.objective); R.SW.P[s] = R.SW.X[s].Clone(); // Best position = current one R.SW.P[s].improved = 0; // No improvement } // If the number max of evaluations is smaller than // the swarm size, just keep evalMax particles, and finish if (R.SW.S > pb.evalMax) R.SW.S = pb.evalMax; R.nEval = R.SW.S; // Find the best R.SW.best = 0; double errorPrev; switch (param.stop) { default: errorPrev = R.SW.P[R.SW.best].f; // "distance" to the wanted f value (objective) break; case 2: errorPrev = Position.distanceL(R.SW.P[R.SW.best], pb.solution, 2); // Distance to the wanted solution break; } for (s = 1; s < R.SW.S; s++) { switch (param.stop) { default: zz = R.SW.P[s].f; if (zz < errorPrev) { R.SW.best = s; errorPrev = zz; } break; case 2: zz = Position.distanceL(R.SW.P[R.SW.best], pb.solution, 2); if (zz < errorPrev) { R.SW.best = s; errorPrev = zz; } break; } } // Display the best Console.Write(" Best value after init. {0} ", errorPrev); // Console.Write( "\n Position :\n" ); // for ( d = 0; d < SS.D; d++ ) Console.Write( " %f", R.SW.P[R.SW.best].x[d] ); int initLinks = 1; // So that information links will beinitialized // Note: It is also a flag saying "No improvement" noStop = 0; double error = errorPrev; // ---------------------------------------------- ITERATIONS int iter = 0; while (noStop == 0) { iter++; if (initLinks == 1) // Random topology { // Who informs who, at random for (s = 0; s < R.SW.S; s++) { for (m = 0; m < R.SW.S; m++) { if (rand.NextDouble() < p) LINKS[m, s] = 1; // Probabilistic method else LINKS[m, s] = 0; } LINKS[s, s] = 1; } } // The swarm MOVES //Console.Write("\nIteration %i",iter); for (int i = 0; i < R.SW.S; i++) index[i] = i; //Permutate the index order if (param.randOrder == 1) { index.Shuffle(7, R.SW.S); } Velocity GX = new Velocity(); for (s0 = 0; s0 < R.SW.S; s0++) // For each particle ... { s = index[s0]; // ... find the first informant s1 = 0; while (LINKS[s1, s] == 0) s1++; if (s1 >= R.SW.S) s1 = s; // Find the best informant g = s1; for (m = s1; m < R.SW.S; m++) { if (LINKS[m, s] == 1 && R.SW.P[m].f < R.SW.P[g].f) g = m; } //.. compute the new velocity, and move // Exploration tendency for (d = 0; d < pb.SS.D; d++) { R.SW.V[s].v[d] = param.w * R.SW.V[s].v[d]; // Prepare Exploitation tendency p-x PX.v[d] = R.SW.P[s].x[d] - R.SW.X[s].x[d]; if (g != s) GX.v[d] = R.SW.P[g].x[d] - R.SW.X[s].x[d];// g-x } PX.size = pb.SS.D; GX.size = pb.SS.D; // Option "non sentivity to rotation" if (param.rotation > 0) { normPX = Velocity.normL(PX, 2); if (g != s) normGX = Velocity.normL(GX, 2); if (normPX > 0) { RotatePX = Matrix.MatrixRotation(PX); } if (g != s && normGX > 0) { RotateGX = Matrix.MatrixRotation(GX); } } // Exploitation tendencies switch (param.rotation) { default: for (d = 0; d < pb.SS.D; d++) { R.SW.V[s].v[d] = R.SW.V[s].v[d] + rand.NextDouble(0.0, param.c) * PX.v[d]; if(g!=s) R.SW.V[s].v[d] = R.SW.V[s].v[d] + rand.NextDouble(0.0, param.c) * GX.v[d]; } break; case 1: // First exploitation tendency if (normPX > 0) { zz = param.c * normPX / sqrtD; aleaV = rand.NextVector(pb.SS.D, zz); Velocity expt1 = RotatePX.VectorProduct(aleaV); for (d = 0; d < pb.SS.D; d++) { R.SW.V[s].v[d] = R.SW.V[s].v[d] + expt1.v[d]; } } // Second exploitation tendency if (g != s && normGX > 0) { zz = param.c * normGX / sqrtD; aleaV = rand.NextVector(pb.SS.D, zz); Velocity expt2 = RotateGX.VectorProduct(aleaV); for (d = 0; d < pb.SS.D; d++) { R.SW.V[s].v[d] = R.SW.V[s].v[d] + expt2.v[d]; } } break; } // Update the position for (d = 0; d < pb.SS.D; d++) { R.SW.X[s].x[d] = R.SW.X[s].x[d] + R.SW.V[s].v[d]; } if (R.nEval >= pb.evalMax) { //error= fabs(error - pb.objective); goto end; } // -------------------------- noEval = 1; // Quantisation Position.quantis(R.SW.X[s], pb.SS); switch (param.clamping) { case 0: // No clamping AND no evaluation outside = 0; for (d = 0; d < pb.SS.D; d++) { if (R.SW.X[s].x[d] < pb.SS.min[d] || R.SW.X[s].x[d] > pb.SS.max[d]) outside++; } if (outside == 0) // If inside, the position is evaluated { R.SW.X[s].f = Problem.perf(R.SW.X[s], pb.function, pb.objective); R.nEval = R.nEval + 1; } break; case 1: // Set to the bounds, and v to zero for (d = 0; d < pb.SS.D; d++) { if (R.SW.X[s].x[d] < pb.SS.min[d]) { R.SW.X[s].x[d] = pb.SS.min[d]; R.SW.V[s].v[d] = 0; } if (R.SW.X[s].x[d] > pb.SS.max[d]) { R.SW.X[s].x[d] = pb.SS.max[d]; R.SW.V[s].v[d] = 0; } } R.SW.X[s].f = Problem.perf(R.SW.X[s], pb.function, pb.objective); R.nEval = R.nEval + 1; break; } // ... update the best previous position if (R.SW.X[s].f < R.SW.P[s].f) // Improvement { R.SW.P[s] = R.SW.X[s].Clone(); // ... update the best of the bests if (R.SW.P[s].f < R.SW.P[R.SW.best].f) { R.SW.best = s; } } } // End of "for (s0=0 ... " // Check if finished switch (param.stop) { default: error = R.SW.P[R.SW.best].f; break; case 2: error = Position.distanceL(R.SW.P[R.SW.best], pb.solution, 2); break; } //error= fabs(error - pb.epsilon); if (error < errorPrev) // Improvement { initLinks = 0; } else // No improvement { initLinks = 1; // Information links will be reinitialized } if (param.initLink == 1) initLinks = 1 - initLinks; errorPrev = error; end: switch (param.stop) { case 0: case 2: if (error > pb.epsilon && R.nEval < pb.evalMax) { noStop = 0; // Won't stop } else { noStop = 1; // Will stop } break; case 1: if (R.nEval < pb.evalMax) noStop = 0; // Won't stop else noStop = 1; // Will stop break; } } // End of "while nostop ... // Console.Write( "\n and the winner is ... %i", R.SW.best ); // fConsole.Write( f_stag, "\nEND" ); R.error = error; return R; }
public Optimiser(string filename) { Utils.Logger.Log("Loading stopwatch... "); stopWatch = new Stopwatch(); ResultCounter = new Stopwatch(); this.filename = filename; Utils.Logger.Log("Loading preprocessor parameters from " + filename); dataAccess = new DataAccess(filename); preprocessor = new Preprocessor(); preprocessor.ImageSize = new Size(Convert.ToInt32(dataAccess.GetParameter("Master_Width")), Convert.ToInt32(dataAccess.GetParameter("Master_Height"))); preprocessor.KeepAspectRatio = Convert.ToBoolean(dataAccess.GetParameter("Master_Aspect")); preprocessor.ScalingMethod = (ScalingMethods)Convert.ToInt32(dataAccess.GetParameter("Master_Resize")); preprocessor.ContrastStretch = Convert.ToBoolean(dataAccess.GetParameter("Filter_Stretch")); preprocessor.Histogram = Convert.ToBoolean(dataAccess.GetParameter("Filter_Histo")); preprocessor.Gaussian = Convert.ToBoolean(dataAccess.GetParameter("Filter_Gaussian")); preprocessor.GaussianStrength = Convert.ToInt32(dataAccess.GetParameter("Filter_BlurStr")); preprocessor.ContrastAdjustment = Convert.ToBoolean(dataAccess.GetParameter("Filter_Contrast")); preprocessor.ContrastStrength = Convert.ToDecimal(dataAccess.GetParameter("Filter_ContrastStr")); preprocessor.Greyscale = Convert.ToBoolean(dataAccess.GetParameter("Filter_Greyscale")); preprocessor.Bradley = Convert.ToBoolean(dataAccess.GetParameter("Filter_Bradley")); preprocessor.Threshold = Convert.ToBoolean(dataAccess.GetParameter("Filter_Threshold")); preprocessor.ThresholdStrength = Convert.ToDecimal(dataAccess.GetParameter("Filter_ThresholdStr")); /* dataAccess.SetParameter("Opt_Bp_LearningType", cmbLearningRateType.SelectedItem.ToString()); dataAccess.SetParameter("Opt_Bp_InitialLearnRate", txtInitialRate.Text); dataAccess.SetParameter("Opt_Bp_FinalLearnRate", txtFinalRate.Text); dataAccess.SetParameter("Opt_Bp_JitterEpoch", txtJitterEpoch.Text); dataAccess.SetParameter("Opt_Bp_JitterNoiseLimit", txtJitterNoiseLimit.Text); dataAccess.SetParameter("Opt_Bp_MaxIterations", txtMaxIterations.Text); dataAccess.SetParameter("Opt_Bp_MinError", txtMinimumError.Text); */ bool usePSO = false; bool useBP = false; try { useBP = Convert.ToBoolean(dataAccess.GetParameter("Opt_Bp_Enabled")); } catch (Exception) { Utils.Logger.Log("Warning unable to read BP params"); } try { usePSO = Convert.ToBoolean(dataAccess.GetParameter("Opt_Pso_Enabled")); } catch (Exception) { Utils.Logger.Log("Warning unable to read PSO params"); } if (usePSO && useBP) { throw new NotImplementedException("At this current time you cannot use both BP and PSO"); } InputGroup[] inputGroups = dataAccess.GetInputGroups(); SourceItem[] sourceItems = dataAccess.GetSourceItems(); /* Utils.Logger.Log("Preprocessing images..."); foreach (SourceItem item in sourceItems) { Utils.Logger.Log("Preprocessing item {0} ", item.Filename); item.InternalImage = preprocessor.Process((Bitmap)item.InternalImage); } */ int total = 0; foreach (InputGroup inputGroup in inputGroups) { if (inputGroup.InputGroupType == InputGroupType.Grid) { total += (inputGroup.Segments) * (inputGroup.Segments); } else { total += inputGroup.Segments; } } maxIterations = Convert.ToInt32(dataAccess.GetParameter("Opt_Global_MaxIterations")); minError = Convert.ToDouble(dataAccess.GetParameter("Opt_Global_MinError")); maxTime = Convert.ToInt32(dataAccess.GetParameter("Opt_Global_MaxTime")); results = new float[Convert.ToInt32(dataAccess.GetParameter("Opt_Global_BufferSize"))]; if (useBP) { int learningRateFunction = Convert.ToInt32(dataAccess.GetParameter("Opt_Bp_LearningType")); double initialLR = Convert.ToDouble(dataAccess.GetParameter("Opt_Bp_InitialLearnRate")); double finalLR = Convert.ToDouble(dataAccess.GetParameter("Opt_Bp_FinalLearnRate")); int jitterEpoch = Convert.ToInt32(dataAccess.GetParameter("Opt_Bp_JitterEpoch")); double jitterNoiseLimit = Convert.ToDouble(dataAccess.GetParameter("Opt_Bp_JitterNoiseLimit")); NeuronDotNet.Core.Backpropagation.LinearLayer inputLayer = new NeuronDotNet.Core.Backpropagation.LinearLayer(preprocessor.ImageSize.Width * preprocessor.ImageSize.Height); NeuronDotNet.Core.Backpropagation.SigmoidLayer hiddenLayer = new NeuronDotNet.Core.Backpropagation.SigmoidLayer(total); hiddenLayer.InputGroups = inputGroups.Length; NeuronDotNet.Core.Backpropagation.SigmoidLayer outputLayer = new NeuronDotNet.Core.Backpropagation.SigmoidLayer(1); hiddenLayer.Initializer = new NguyenWidrowFunction(); new BackpropagationConnector( inputLayer, hiddenLayer, inputGroups, preprocessor.ImageSize.Width, preprocessor.ImageSize.Height ); new BackpropagationConnector(hiddenLayer, outputLayer); network = new BackpropagationNetwork(inputLayer, outputLayer); switch (learningRateFunction) { case 0: network.SetLearningRate(initialLR); break; case 1: network.SetLearningRate(new NeuronDotNet.Core.LearningRateFunctions.ExponentialFunction(initialLR, finalLR));//exp break; case 2: network.SetLearningRate(new NeuronDotNet.Core.LearningRateFunctions.HyperbolicFunction(initialLR, finalLR));//hyp break; case 3: network.SetLearningRate(new NeuronDotNet.Core.LearningRateFunctions.LinearFunction(initialLR, finalLR));//lin break; default: throw new ArgumentOutOfRangeException("The learning rate index is out of range.\n"); } network.JitterEpoch = jitterEpoch; network.JitterNoiseLimit = jitterNoiseLimit; } if (usePSO) { double minP = Convert.ToDouble(dataAccess.GetParameter("Opt_Pso_MinP")); double maxP = Convert.ToDouble(dataAccess.GetParameter("Opt_Pso_MaxP")); double minI = Convert.ToDouble(dataAccess.GetParameter("Opt_Pso_MinI")); double maxI = Convert.ToDouble(dataAccess.GetParameter("Opt_Pso_MaxI")); double quant = Convert.ToDouble(dataAccess.GetParameter("Opt_Pso_Quant")); double vMax = Convert.ToDouble(dataAccess.GetParameter("Opt_Pso_vMax")); int clamping = Convert.ToInt32(dataAccess.GetParameter("Opt_Pso_Clamping")); int initLinks = Convert.ToInt32(dataAccess.GetParameter("Opt_Pso_InitLinks")); int randomness = Convert.ToInt32(dataAccess.GetParameter("Opt_Pso_Randomness")); int randOrder = Convert.ToInt32(dataAccess.GetParameter("Opt_Pso_ParticleOrder")); int rotation = Convert.ToInt32(dataAccess.GetParameter("Opt_Pso_Rotation")); int dimensions = Convert.ToInt32(dataAccess.GetParameter("Opt_Pso_Dimensions")); int swarmSize = Convert.ToInt32(dataAccess.GetParameter("Opt_Pso_Particles")); double k = Convert.ToDouble(dataAccess.GetParameter("Opt_Pso_k")); double p = Convert.ToDouble(dataAccess.GetParameter("Opt_Pso_p")); double w = Convert.ToDouble(dataAccess.GetParameter("Opt_Pso_w")); double c = Convert.ToDouble(dataAccess.GetParameter("Opt_Pso_c")); Parameters param = new Parameters(); param.vMax = vMax; param.clamping = clamping; // 0 => no clamping AND no evaluation. WARNING: the program // may NEVER stop (in particular with option move 20 (jumps)) 1 // *1 => classical. Set to bounds, and velocity to zero param.initLink = initLinks; // 0 => re-init links after each unsuccessful iteration // 1 => re-init links after each successful iteration param.rand = randomness; // 0 => Use KISS as random number generator. // Any other value => use the system one param.randOrder = randOrder; // 0 => at each iteration, particles are modified // always according to the same order 0..S-1 //*1 => at each iteration, particles numbers are // randomly permutated param.rotation = rotation; // WARNING. Experimental code, completely valid only for dimension 2 // 0 => sensitive to rotation of the system of coordinates // 1 => non sensitive (except side effects), // by using a rotated hypercube for the probability distribution // WARNING. Quite time consuming! param.stop = 0; // Stop criterion // 0 => error < pb.epsilon // 1 => eval >= pb.evalMax // 2 => ||x-solution|| < pb.epsilon // =========================================================== // RUNs // Initialize some objects //pb = new Problem(function); // You may "manipulate" S, p, w and c // but here are the suggested values param.S = swarmSize; if (param.S > 910) param.S = 910; param.K = (int)k; param.p = p; // (to simulate the global best PSO, set param.p=1) //param.p=1; param.w = w; param.c = c; NeuronDotNet.Core.PSO.LinearLayer inputLayer = new NeuronDotNet.Core.PSO.LinearLayer(preprocessor.ImageSize.Width * preprocessor.ImageSize.Height); NeuronDotNet.Core.PSO.SigmoidLayer hiddenLayer = new NeuronDotNet.Core.PSO.SigmoidLayer(total); hiddenLayer.InputGroups = inputGroups.Length; NeuronDotNet.Core.PSO.SigmoidLayer outputLayer = new NeuronDotNet.Core.PSO.SigmoidLayer(1); hiddenLayer.Initializer = new NguyenWidrowFunction(); new PSOConnector( inputLayer, hiddenLayer, inputGroups, preprocessor.ImageSize.Width, preprocessor.ImageSize.Height ); new PSOConnector(hiddenLayer, outputLayer); PSONetwork n = new PSONetwork(inputLayer, outputLayer); n.PsoParameters = param; n.PsoProblem.MaxI = maxI; n.PsoProblem.MinI = minI; n.PsoProblem.MaxP = maxP; n.PsoProblem.MinP = minP; n.PsoProblem.Quantisation = quant; network = n; } set = new TrainingSet(preprocessor.ImageSize.Width * preprocessor.ImageSize.Height, 1); foreach (SourceItem item in sourceItems) { double[] weights = Utils.getImageWeights(item.InternalImage, inputGroups); set.Add(new TrainingSample(weights, new double[] { (double)item.SampleType })); } network.EndEpochEvent += new TrainingEpochEventHandler(network_EndEpochEvent); network.Initialize(); }
// ================================================= static void Main(string[] args) { Position bestBest = new Position(); // Best position over all runs // Current dimension double errorMean = 0;// Average error double errorMin = double.MaxValue; // Best result over all runs double[] errorMeanBest = new double[R_max]; double evalMean = 0; // Mean number of evaluations int nFailure = 0; // Number of unsuccessful runs double logProgressMean = 0.0; int run; f_run = File.OpenWrite("f_run.txt"); f_synth = File.OpenWrite("f_synth.txt"); // ----------------------------------------------- PROBLEM int functionCode = 18; /* (see problemDef( ) for precise definitions) 0 Parabola (Sphere) 1 Griewank 2 Rosenbrock (Banana) 3 Rastrigin 4 Tripod (dimension 2) 5 Ackley 6 Schwefel 7 Schwefel 1.2 8 Schwefel 2.22 9 Neumaier 3 10 G3 11 Network optimisation (Warning: see problemDef() and also perf() for problem elements (number of BTS and BSC) 12 Schwefel 13 2D Goldstein-Price 14 Schaffer f6 15 Step 16 Schwefel 2.21 17 Lennard-Jones 18 Gear Train CEC 2005 benchmark (no more than 30D. See cec2005data.c) 100 F1 (shifted Parabola/Sphere) 102 F6 (shifted Rosenbrock) 103 F9 (shifted Rastrigin) 104 F2 Schwefel 105 F7 Griewank (NOT rotated) 106 F8 Ackley (NOT rotated) 99 Test*/ int runMax = 100; if (runMax > R_max) runMax = R_max; // ----------------------------------------------------- // PARAMETERS // * means "suggested value" Parameters param = new Parameters(); param.clamping = 1; // 0 => no clamping AND no evaluation. WARNING: the program // may NEVER stop (in particular with option move 20 (jumps)) 1 // *1 => classical. Set to bounds, and velocity to zero param.initLink = 0; // 0 => re-init links after each unsuccessful iteration // 1 => re-init links after each successful iteration param.rand = 1; // 0 => Use KISS as random number generator. // Any other value => use the system one param.randOrder = 0; // 0 => at each iteration, particles are modified // always according to the same order 0..S-1 //*1 => at each iteration, particles numbers are // randomly permutated param.rotation = 0; // WARNING. Experimental code, completely valid only for dimension 2 // 0 => sensitive to rotation of the system of coordinates // 1 => non sensitive (except side effects), // by using a rotated hypercube for the probability distribution // WARNING. Quite time consuming! param.stop = 0; // Stop criterion // 0 => error < pb.epsilon // 1 => eval >= pb.evalMax // 2 => ||x-solution|| < pb.epsilon // ------------------------------------------------------- // Some information Console.Write(String.Format("\n Function {0} ", functionCode)); Console.Write("\n (clamping, randOrder, rotation, stop_criterion) = ({0}, {1}, {2}, {3})", param.clamping, param.randOrder, param.rotation, param.stop); //if (param.rand == 0) Console.Write("\n WARNING, I am using the RNG KISS"); //Now just System.Random // =========================================================== // RUNs // Initialize some objects Problem pb = Problem.problemDef(functionCode); // You may "manipulate" S, p, w and c // but here are the suggested values param.S = (int)(10 + 2 * Math.Sqrt(pb.SS.D)); // Swarm size if (param.S > S_max) param.S = S_max; //param.S=100; Console.Write("\n Swarm size {0}", param.S); param.K = 3; param.p = 1.0 - Math.Pow(1.0 - (1.0 / (param.S)), param.K); // (to simulate the global best PSO, set param.p=1) //param.p=1; // According to Clerc's Stagnation Analysis param.w = 1.0 / (2.0 * Math.Log(2.0)); // 0.721 param.c = 0.5 + Math.Log(2.0); // 1.193 Console.Write("\n c = {0}, w = {1}", param.c, param.w); //--------------- sqrtD = Math.Sqrt(pb.SS.D); //------------------------------------- RUNS for (run = 0; run < runMax; run++) { //srand (clock () / 100); // May improve pseudo-randomness Result result = PSO(param, pb); if (result.error > pb.epsilon) // Failure { nFailure = nFailure + 1; } // Memorize the best (useful if more than one run) if (result.error < bestBest.f) bestBest = result.SW.P[result.SW.best].Clone(); // Result display Console.Write("\nRun {0}. Eval {1}. Error {2} \n", run + 1, result.nEval, result.error); //for (d=0;d<pb.SS.D;d++) Console.Write(" %f",result.SW.P[result.SW.best].x[d]); // Save result //TODO: Fix up writing out to files /*fConsole.Write(f_run, "\n%i %.0f %e ", run + 1, result.nEval, error); for (d = 0; d < pb.SS.D; d++) fConsole.Write(f_run, " %f", result.SW.P[result.SW.best].x[d]); */ // Compute/store some statistical information if (run == 0) errorMin = result.error; else if (result.error < errorMin) errorMin = result.error; evalMean = evalMean + result.nEval; errorMean = errorMean + result.error; errorMeanBest[run] = result.error; logProgressMean = logProgressMean - Math.Log(result.error); } // End loop on "run" // ---------------------END // Display some statistical information evalMean /= runMax; errorMean /= runMax; logProgressMean /= runMax; Console.Write("\n Eval. (mean)= {0}", evalMean); Console.Write("\n Error (mean) = {0}", errorMean); // Variance double variance = 0; for (run = 0; run < runMax; run++) { variance += Math.Pow(errorMeanBest[run] - errorMean, 2);} variance = Math.Sqrt(variance / runMax); Console.Write("\n Std. dev. {0}", variance); Console.Write("\n Log_progress (mean) = {0}", logProgressMean); // Success rate and minimum value Console.Write("\n Failure(s) {0}", nFailure); Console.Write("\n Success rate = {0}%", 100 * (1 - nFailure / (double)runMax)); Console.Write("\n Best min value = {0}", errorMin); Console.Write("\nPosition of the optimum: "); for (int d = 0; d < pb.SS.D; d++) {Console.Write(" {0}", bestBest.x[d]);} // Save //TODO: Fix up writing out to files /*fConsole.Write(f_synth, "%f %f %.0f%% %f ", errorMean, variance, successRate, evalMean); for (d = 0; d < pb.SS.D; d++) fConsole.Write(f_synth, " %f", bestBest.x[d]); fConsole.Write(f_synth, "\n"); * */ Console.ReadLine(); return; // End of main program }