/// <inheritdoc /> public void Iteration() { int len = _algorithm.LongTermMemory.Length; for (int i = 0; i < len; i++) { int best = -1; double bestScore = _shouldMinimize ? double.PositiveInfinity : double.NegativeInfinity; for (int j = 0; j < _candidate.Length; j++) { _algorithm.LongTermMemory[i] += _stepSize[i] * _candidate[j]; double temp = _score.CalculateScore(_algorithm); _algorithm.LongTermMemory[i] -= _stepSize[i] * _candidate[j]; if ((temp < bestScore) ? _shouldMinimize : !_shouldMinimize) { bestScore = temp; _lastError = bestScore; best = j; } } if (best != -1) { _algorithm.LongTermMemory[i] += _stepSize[i] * _candidate[best]; _stepSize[i] = _stepSize[i] * _candidate[best]; } } }
/// <summary> /// Update the score. /// </summary> private void UpdateScore() { foreach (ContinuousAnt aPopulation in _population) { Array.Copy(aPopulation.Params, _algorithm.LongTermMemory, _paramCount); aPopulation.Score = _score.CalculateScore(_algorithm); } }
/// <inheritdoc /> public void Iteration() { var len = _algorithm.LongTermMemory.Length; _k++; _currentTemperature = CoolingSchedule(); for (var cycle = 0; cycle < _cycles; cycle++) { // backup current state var oldState = new double[len]; Array.Copy(_algorithm.LongTermMemory, 0, oldState, 0, len); // randomize the method PerformRandomize(_algorithm.LongTermMemory); // did we improve it? Only keep the new method if it improved (greedy). var trialError = _score.CalculateScore(_algorithm); // was this iteration an improvement? If so, always keep. var keep = false; if (trialError < _currentError) { keep = true; } else { _lastProbability = CalcProbability(_currentError, trialError, _currentTemperature); if (_lastProbability > _rnd.NextDouble()) { keep = true; } } if (keep) { _currentError = trialError; // better than global error if (trialError < _globalBestError) { _globalBestError = trialError; Array.Copy(_algorithm.LongTermMemory, 0, oldState, 0, len); Array.Copy(_algorithm.LongTermMemory, 0, _globalBest, 0, len); } } else { Array.Copy(oldState, 0, _algorithm.LongTermMemory, 0, len); } } }
/// <summary> /// Construct the simulated annealing trainer. /// </summary> /// <param name="theAlgorithm">The algorithm to optimize.</param> /// <param name="theScore">The score function.</param> /// <param name="theKMax">The max number of iterations.</param> /// <param name="theStartingTemperature">The starting temperature.</param> /// <param name="theEndingTemperature">The ending temperature.</param> public TrainAnneal(IMLMethod theAlgorithm, IScoreFunction theScore, int theKMax, double theStartingTemperature, double theEndingTemperature) { _algorithm = theAlgorithm; _score = theScore; _kMax = theKMax; _currentError = _score.CalculateScore(_algorithm); _startingTemperature = theStartingTemperature; _endingTemperature = theEndingTemperature; _globalBest = new double[theAlgorithm.LongTermMemory.Length]; Array.Copy(_algorithm.LongTermMemory, 0, _globalBest, 0, _globalBest.Length); }
/// <summary> /// Update the personal best position of a particle. /// </summary> /// <param name="particleIndex">Index of the particle in the swarm.</param> /// <param name="particlePosition">the particle current position vector.</param> protected void UpdatePersonalBestPosition(int particleIndex, double[] particlePosition) { // set the network weights and biases from the vector double score = _score.CalculateScore(_particles[particleIndex]); // update the best vectors (g and i) if ((_bestScores[particleIndex] == 0) || IsScoreBetter(score, _bestScores[particleIndex])) { _bestScores[particleIndex] = score; VectorAlgebra.Copy(_bestVectors[particleIndex], particlePosition); } }
/// <summary> /// Perform the task. /// </summary> public void PerformTask() { IMLMethod phenotype = owner.CODEC.Decode(genome); if (phenotype != null) { double score; try { score = scoreFunction.CalculateScore(phenotype); } catch (AIFHError e) { score = Double.NaN; } genome.Score = score; genome.AdjustedScore = score; BasicEA.CalculateScoreAdjustment(genome, adjusters); } }
/// <summary> /// Perform iteration. /// </summary> public void Iteration() { int len = _algorithm.LongTermMemory.Length; // backup current state var oldState = new double[len]; Array.Copy(_algorithm.LongTermMemory, 0, oldState, 0, len); // randomize the method PerformRandomize(_algorithm.LongTermMemory); // did we improve it? Only keep the new method if it improved (greedy). double currentError = _score.CalculateScore(_algorithm); if ((currentError < _lastError) ? _shouldMinimize : !_shouldMinimize) { _lastError = currentError; } else { Array.Copy(oldState, 0, _algorithm.LongTermMemory, 0, len); } }
/// <summary> /// Construct the simulated annealing trainer. /// </summary> /// <param name="theAlgorithm">The algorithm to optimize.</param> /// <param name="theScore">The score function.</param> /// <param name="theKMax">The max number of iterations.</param> /// <param name="theStartingTemperature">The starting temperature.</param> /// <param name="theEndingTemperature">The ending temperature.</param> public TrainAnneal(IMachineLearningAlgorithm theAlgorithm, IScoreFunction theScore, int theKMax, double theStartingTemperature, double theEndingTemperature) { _algorithm = theAlgorithm; _score = theScore; _kMax = theKMax; _currentError = _score.CalculateScore(_algorithm); _startingTemperature = theStartingTemperature; _endingTemperature = theEndingTemperature; _globalBest = new double[theAlgorithm.LongTermMemory.Length]; Array.Copy(_algorithm.LongTermMemory, 0, _globalBest, 0, _globalBest.Length); }
/// <summary> /// Calculate the error for the neural network with a given set of weights. /// </summary> /// <param name="weights">The weights to use.</param> /// <returns>The current error.</returns> public double Fn(double[] weights) { Array.Copy(weights, 0, _algorithm.LongTermMemory, 0, weights.Length); return(_score.CalculateScore(_algorithm)); }