public static double normL(Velocity v, double L) { // L-norm of a vector int d; double n; n = 0; for (d = 0; d < v.size; d++) n = n + Math.Pow(Math.Abs(v.v[d]), L); n = Math.Pow(n, 1 / L); return n; }
public Swarm(int maxSwarmSize) { P = new Position[maxSwarmSize]; X = new Position[maxSwarmSize]; V = new Velocity[maxSwarmSize]; for (int i = 0; i < maxSwarmSize; i++) { P[i] = new Position(); X[i] = new Position(); V[i] = new Velocity(); } //P = Enumerable.Repeat(new Position(), maxSwarmSize).ToArray(); //X = Enumerable.Repeat(new Position(), maxSwarmSize).ToArray(); //V = Enumerable.Repeat(new Velocity(), maxSwarmSize).ToArray(); }
public static Velocity NextVector(this Random rand,int dimensions, double coeff) { Velocity velocity = new Velocity(); int K = 2; // 1 => uniform distribution in a hypercube // 2 => "triangle" distribution velocity.size = dimensions; for (int d = 0; d < dimensions; d++) { double rnd = 0.0; for (int i = 0; i < K; i++) { rnd += rand.NextDouble(); } velocity.v[d] = rnd * coeff / K; } return velocity; }
public int NextIteration() { double lAve = 0.0; int vNum = 0; int numClamped = 0; iter++; if (initLinks == 1) // Random topology { // Who informs who, at random for (s = 0; s < PSOResult.SW.S; s++) { for (m = 0; m < PSOResult.SW.S; m++) { if (rand.NextDouble() < p) LINKS[m, s] = 1; // Probabilistic method else LINKS[m, s] = 0; } LINKS[s, s] = 1; } } // The swarm MOVES //Utils.Logger.Log("\nIteration %i",iter); for (int i = 0; i < PSOResult.SW.S; i++) index[i] = i; //Permutate the index order if (param.randOrder == 1) { index.Shuffle(7, PSOResult.SW.S); } Velocity GX = new Velocity(pb.SS.D); for (s0 = 0; s0 < PSOResult.SW.S; s0++) // For each particle ... { s = index[s0]; // ... find the first informant s1 = 0; while (LINKS[s1, s] == 0) s1++; if (s1 >= PSOResult.SW.S) s1 = s; // Find the best informant g = s1; for (m = s1; m < PSOResult.SW.S; m++) { if (LINKS[m, s] == 1 && PSOResult.SW.P[m].f < PSOResult.SW.P[g].f) g = m; } //.. compute the new velocity, and move // Exploration tendency for (d = 0; d < pb.SS.D; d++) { PSOResult.SW.V[s].v[d] = param.w * PSOResult.SW.V[s].v[d]; // Prepare Exploitation tendency p-x PX.v[d] = PSOResult.SW.P[s].x[d] - PSOResult.SW.X[s].x[d]; if (g != s) GX.v[d] = PSOResult.SW.P[g].x[d] - PSOResult.SW.X[s].x[d];// g-x } PX.size = pb.SS.D; GX.size = pb.SS.D; // Option "non sentivity to rotation" if (param.rotation > 0) { normPX = Velocity.normL(PX, 2); if (g != s) normGX = Velocity.normL(GX, 2); if (normPX > 0) { RotatePX = Matrix.MatrixRotation(PX); } if (g != s && normGX > 0) { RotateGX = Matrix.MatrixRotation(GX); } } // Exploitation tendencies switch (param.rotation) { default: for (d = 0; d < pb.SS.D; d++) { PSOResult.SW.V[s].v[d] = PSOResult.SW.V[s].v[d] + rand.NextDouble(0.0, param.c) * PX.v[d]; if (g != s) PSOResult.SW.V[s].v[d] = PSOResult.SW.V[s].v[d] + rand.NextDouble(0.0, param.c) * GX.v[d]; } break; case 1: // First exploitation tendency if (normPX > 0) { zz = param.c * normPX / sqrtD; aleaV = rand.NextVector(pb.SS.D, zz); Velocity expt1 = RotatePX.VectorProduct(aleaV); for (d = 0; d < pb.SS.D; d++) { PSOResult.SW.V[s].v[d] = PSOResult.SW.V[s].v[d] + expt1.v[d]; } } // Second exploitation tendency if (g != s && normGX > 0) { zz = param.c * normGX / sqrtD; aleaV = rand.NextVector(pb.SS.D, zz); Velocity expt2 = RotateGX.VectorProduct(aleaV); for (d = 0; d < pb.SS.D; d++) { PSOResult.SW.V[s].v[d] = PSOResult.SW.V[s].v[d] + expt2.v[d]; } } break; } // Update the position for (d = 0; d < pb.SS.D; d++) { lAve += Math.Abs(PSOResult.SW.V[s].v[d]); vNum++; if (iter > 1) { if (PSOResult.SW.V[s].v[d] > vMax) { PSOResult.SW.V[s].v[d] = vAve; } else if (PSOResult.SW.V[s].v[d] < -vMax) { PSOResult.SW.V[s].v[d] = -vAve; } } PSOResult.SW.X[s].x[d] = PSOResult.SW.X[s].x[d] + (PSOResult.SW.V[s].v[d]); } // -------------------------- //noEval = 1; // Quantisation //Position.quantis(PSOResult.SW.X[s], pb.SS); switch (param.clamping) { case 0: // No clamping AND no evaluation outside = 0; for (d = 0; d < pb.SS.D; d++) { if (PSOResult.SW.X[s].x[d] < pb.SS.min[d] || PSOResult.SW.X[s].x[d] > pb.SS.max[d]) outside++; } //if (outside == 0) // If inside, the position is evaluated { PSOResult.SW.X[s].f = pb.perf(PSOResult.SW.X[s], pb.function, pb.objective); PSOResult.nEval = PSOResult.nEval + 1; } break; case 1: // Set to the bounds, and v to zero for (d = 0; d < pb.SS.D; d++) { if (PSOResult.SW.X[s].x[d] < pb.SS.min[d]) { PSOResult.SW.X[s].x[d] = pb.SS.min[d]; PSOResult.SW.V[s].v[d] = 0; numClamped++; } if (PSOResult.SW.X[s].x[d] > pb.SS.max[d]) { PSOResult.SW.X[s].x[d] = pb.SS.max[d]; PSOResult.SW.V[s].v[d] = 0; numClamped++; } } PSOResult.SW.X[s].f = pb.perf(PSOResult.SW.X[s], pb.function, pb.objective); PSOResult.nEval = PSOResult.nEval + 1; break; } // ... update the best previous position if (PSOResult.SW.X[s].f < PSOResult.SW.P[s].f) // Improvement { PSOResult.SW.P[s] = PSOResult.SW.X[s].Clone(); // ... update the best of the bests if (PSOResult.SW.P[s].f < PSOResult.SW.P[PSOResult.SW.best].f) { PSOResult.SW.best = s; } } } // End of "for (s0=0 ... " vAve = lAve / (double)vNum; // Check if finished switch (param.stop) { default: Error = PSOResult.SW.P[PSOResult.SW.best].f; break; case 2: Error = Position.distanceL(PSOResult.SW.P[PSOResult.SW.best], pb.solution, 2); break; } //error= fabs(error - pb.epsilon); if (Error < errorPrev) // Improvement { initLinks = 0; } else // No improvement { initLinks = 1; // Information links will be reinitialized } if (param.initLink == 1) initLinks = 1 - initLinks; errorPrev = Error; //return (int)PSOResult.nEval; return numClamped; }
// ================================================= public Algorithm(Problem pb, Parameters param) { this.pb = pb; bestBest = new Position(pb.SS.D); PX = new Velocity(pb.SS.D); R = new SPSO_2007.Result(pb.SS.D); //f_run = File.OpenWrite("f_run.txt"); //f_synth = File.OpenWrite("f_synth.txt"); // ----------------------------------------------- PROBLEM this.param = param; runMax = 100; if (runMax > R_max) runMax = R_max; this.vMax = param.vMax; Utils.Logger.Log("\n c = {0}, w = {1}", param.c, param.w); //--------------- sqrtD = Math.Sqrt(pb.SS.D); //------------------------------------- RUNS /* for (run = 0; run < runMax; run++) { } // End loop on "run" */ // ---------------------END // Save //TODO: Fix up writing out to files /*fUtils.Logger.Log(f_synth, "%f %f %.0f%% %f ", errorMean, variance, successRate, evalMean); for (d = 0; d < pb.SS.D; d++) fUtils.Logger.Log(f_synth, " %f", bestBest.x[d]); fUtils.Logger.Log(f_synth, "\n"); * */ return; // End of main program }
public static Matrix MatrixRotation(Velocity V) { /* Define the matrice of the rotation V' => V where V'=(1,1,...1)*normV/Math.Sqrt(D) (i.e. norm(V') = norm(V) ) */ int D = V.size; //matrix reflex1; // Global variable double normV = Velocity.normL(V, 2); var reflex2 = new Matrix { size = D }; var reflex1 = new Matrix {size = D}; // Reflection relatively to the vector V'=(1,1, ...1)/Math.Sqrt(D) // norm(V')=1 for (int i = 0; i < D; i++) { for (int j = 0; j < D; j++) { reflex1.v[i, j] = -2.0 /D; } } for (int d = 0; d < D; d++) { reflex1.v[d, d] = 1 + reflex1.v[d, d]; } //Define the "bisectrix" B of (V',V) as an unit vector var B = new Velocity { size = D }; for (int d = 0; d < D; d++) { B.v[d] = V.v[d] + normV / Math.Sqrt(D); } double normB = Velocity.normL(B, 2); if (normB > 0) { for (int d = 0; d < D; d++) { B.v[d] = B.v[d] / normB; } } // Reflection relatively to B for (int i = 0; i < D; i++) { for (int j = 0; j < D; j++) { reflex2.v[i, j] = -2 * B.v[i] * B.v[j]; } } for (int d = 0; d < D; d++) { reflex2.v[d, d] = 1 + reflex2.v[d, d]; } // Multiply the two reflections // => rotation return MatrixProduct(reflex2, reflex1); }
public Velocity VectorProduct(Velocity V) { var velocity = new Velocity(); for (int d = 0; d < V.size; d++) { double sum = 0; for (int j = 0; j < V.size; j++) { sum = sum + this.v[d, j] * V.v[j]; } velocity.v[d] = sum; } velocity.size = V.size; return velocity; }
// =============================================================== // PSO static Result PSO(Parameters param, Problem pb) { Velocity aleaV = new Velocity(); int d; int g; int[] index = new int[S_max]; int[] indexTemp = new int[S_max]; // Iteration number (time step) int iterBegin; int[,] LINKS = new int[S_max, S_max]; // Information links int m; int noEval; double normPX = 0.0, normGX = 0.0; int noStop; int outside; double p; Velocity PX = new Velocity(); Result R = new Result(); Matrix RotatePX = new Matrix(); Matrix RotateGX = new Matrix(); int s0, s, s1; double zz; aleaV.size = pb.SS.D; RotatePX.size = pb.SS.D; RotateGX.size = pb.SS.D; // ----------------------------------------------------- // INITIALISATION p = param.p; // Probability threshold for random topology R.SW.S = param.S; // Size of the current swarm // Position and velocity for (s = 0; s < R.SW.S; s++) { R.SW.X[s].size = pb.SS.D; R.SW.V[s].size = pb.SS.D; for (d = 0; d < pb.SS.D; d++) { R.SW.X[s].x[d] = rand.NextDouble(pb.SS.minInit[d], pb.SS.maxInit[d]); R.SW.V[s].v[d] = (rand.NextDouble(pb.SS.min[d], pb.SS.max[d]) - R.SW.X[s].x[d]) / 2; } // Take quantisation into account Position.quantis(R.SW.X[s], pb.SS); // First evaluations R.SW.X[s].f = Problem.perf(R.SW.X[s], pb.function, pb.objective); R.SW.P[s] = R.SW.X[s].Clone(); // Best position = current one R.SW.P[s].improved = 0; // No improvement } // If the number max of evaluations is smaller than // the swarm size, just keep evalMax particles, and finish if (R.SW.S > pb.evalMax) R.SW.S = pb.evalMax; R.nEval = R.SW.S; // Find the best R.SW.best = 0; double errorPrev; switch (param.stop) { default: errorPrev = R.SW.P[R.SW.best].f; // "distance" to the wanted f value (objective) break; case 2: errorPrev = Position.distanceL(R.SW.P[R.SW.best], pb.solution, 2); // Distance to the wanted solution break; } for (s = 1; s < R.SW.S; s++) { switch (param.stop) { default: zz = R.SW.P[s].f; if (zz < errorPrev) { R.SW.best = s; errorPrev = zz; } break; case 2: zz = Position.distanceL(R.SW.P[R.SW.best], pb.solution, 2); if (zz < errorPrev) { R.SW.best = s; errorPrev = zz; } break; } } // Display the best Console.Write(" Best value after init. {0} ", errorPrev); // Console.Write( "\n Position :\n" ); // for ( d = 0; d < SS.D; d++ ) Console.Write( " %f", R.SW.P[R.SW.best].x[d] ); int initLinks = 1; // So that information links will beinitialized // Note: It is also a flag saying "No improvement" noStop = 0; double error = errorPrev; // ---------------------------------------------- ITERATIONS int iter = 0; while (noStop == 0) { iter++; if (initLinks == 1) // Random topology { // Who informs who, at random for (s = 0; s < R.SW.S; s++) { for (m = 0; m < R.SW.S; m++) { if (rand.NextDouble() < p) LINKS[m, s] = 1; // Probabilistic method else LINKS[m, s] = 0; } LINKS[s, s] = 1; } } // The swarm MOVES //Console.Write("\nIteration %i",iter); for (int i = 0; i < R.SW.S; i++) index[i] = i; //Permutate the index order if (param.randOrder == 1) { index.Shuffle(7, R.SW.S); } Velocity GX = new Velocity(); for (s0 = 0; s0 < R.SW.S; s0++) // For each particle ... { s = index[s0]; // ... find the first informant s1 = 0; while (LINKS[s1, s] == 0) s1++; if (s1 >= R.SW.S) s1 = s; // Find the best informant g = s1; for (m = s1; m < R.SW.S; m++) { if (LINKS[m, s] == 1 && R.SW.P[m].f < R.SW.P[g].f) g = m; } //.. compute the new velocity, and move // Exploration tendency for (d = 0; d < pb.SS.D; d++) { R.SW.V[s].v[d] = param.w * R.SW.V[s].v[d]; // Prepare Exploitation tendency p-x PX.v[d] = R.SW.P[s].x[d] - R.SW.X[s].x[d]; if (g != s) GX.v[d] = R.SW.P[g].x[d] - R.SW.X[s].x[d];// g-x } PX.size = pb.SS.D; GX.size = pb.SS.D; // Option "non sentivity to rotation" if (param.rotation > 0) { normPX = Velocity.normL(PX, 2); if (g != s) normGX = Velocity.normL(GX, 2); if (normPX > 0) { RotatePX = Matrix.MatrixRotation(PX); } if (g != s && normGX > 0) { RotateGX = Matrix.MatrixRotation(GX); } } // Exploitation tendencies switch (param.rotation) { default: for (d = 0; d < pb.SS.D; d++) { R.SW.V[s].v[d] = R.SW.V[s].v[d] + rand.NextDouble(0.0, param.c) * PX.v[d]; if(g!=s) R.SW.V[s].v[d] = R.SW.V[s].v[d] + rand.NextDouble(0.0, param.c) * GX.v[d]; } break; case 1: // First exploitation tendency if (normPX > 0) { zz = param.c * normPX / sqrtD; aleaV = rand.NextVector(pb.SS.D, zz); Velocity expt1 = RotatePX.VectorProduct(aleaV); for (d = 0; d < pb.SS.D; d++) { R.SW.V[s].v[d] = R.SW.V[s].v[d] + expt1.v[d]; } } // Second exploitation tendency if (g != s && normGX > 0) { zz = param.c * normGX / sqrtD; aleaV = rand.NextVector(pb.SS.D, zz); Velocity expt2 = RotateGX.VectorProduct(aleaV); for (d = 0; d < pb.SS.D; d++) { R.SW.V[s].v[d] = R.SW.V[s].v[d] + expt2.v[d]; } } break; } // Update the position for (d = 0; d < pb.SS.D; d++) { R.SW.X[s].x[d] = R.SW.X[s].x[d] + R.SW.V[s].v[d]; } if (R.nEval >= pb.evalMax) { //error= fabs(error - pb.objective); goto end; } // -------------------------- noEval = 1; // Quantisation Position.quantis(R.SW.X[s], pb.SS); switch (param.clamping) { case 0: // No clamping AND no evaluation outside = 0; for (d = 0; d < pb.SS.D; d++) { if (R.SW.X[s].x[d] < pb.SS.min[d] || R.SW.X[s].x[d] > pb.SS.max[d]) outside++; } if (outside == 0) // If inside, the position is evaluated { R.SW.X[s].f = Problem.perf(R.SW.X[s], pb.function, pb.objective); R.nEval = R.nEval + 1; } break; case 1: // Set to the bounds, and v to zero for (d = 0; d < pb.SS.D; d++) { if (R.SW.X[s].x[d] < pb.SS.min[d]) { R.SW.X[s].x[d] = pb.SS.min[d]; R.SW.V[s].v[d] = 0; } if (R.SW.X[s].x[d] > pb.SS.max[d]) { R.SW.X[s].x[d] = pb.SS.max[d]; R.SW.V[s].v[d] = 0; } } R.SW.X[s].f = Problem.perf(R.SW.X[s], pb.function, pb.objective); R.nEval = R.nEval + 1; break; } // ... update the best previous position if (R.SW.X[s].f < R.SW.P[s].f) // Improvement { R.SW.P[s] = R.SW.X[s].Clone(); // ... update the best of the bests if (R.SW.P[s].f < R.SW.P[R.SW.best].f) { R.SW.best = s; } } } // End of "for (s0=0 ... " // Check if finished switch (param.stop) { default: error = R.SW.P[R.SW.best].f; break; case 2: error = Position.distanceL(R.SW.P[R.SW.best], pb.solution, 2); break; } //error= fabs(error - pb.epsilon); if (error < errorPrev) // Improvement { initLinks = 0; } else // No improvement { initLinks = 1; // Information links will be reinitialized } if (param.initLink == 1) initLinks = 1 - initLinks; errorPrev = error; end: switch (param.stop) { case 0: case 2: if (error > pb.epsilon && R.nEval < pb.evalMax) { noStop = 0; // Won't stop } else { noStop = 1; // Will stop } break; case 1: if (R.nEval < pb.evalMax) noStop = 0; // Won't stop else noStop = 1; // Will stop break; } } // End of "while nostop ... // Console.Write( "\n and the winner is ... %i", R.SW.best ); // fConsole.Write( f_stag, "\nEND" ); R.error = error; return R; }