public EncogSigActivation() : base() { activationFunction = new ActivationSigmoid(); activationLevel = 1.0; activationMin = 0.0; activationMax = 1.0; }
public void TestSigmoid() { var activation = new ActivationSigmoid(); Assert.IsTrue(activation.HasDerivative); var clone = (ActivationSigmoid) activation.Clone(); Assert.IsNotNull(clone); double[] input = {0.0}; activation.ActivationFunction(input, 0, 1); Assert.AreEqual(0.5, input[0], 0.1); // test derivative, should throw an error input[0] = activation.DerivativeFunction(input[0],input[0]); Assert.AreEqual(0.25, input[0], 0.1); }
public void TestSigmoid() { var activation = new ActivationSigmoid(); Assert.IsTrue(activation.HasDerivative); var clone = activation.Clone(); Assert.IsInstanceOfType(clone, typeof(ActivationSigmoid)); double[] input = { 0.0 }; activation.ActivationFunction(input, 0, 1); Assert.AreEqual(0.5, input[0], 0.1); input[0] = activation.DerivativeFunction(0, input[0]); Assert.AreEqual(0.25, input[0], 0.1); }
public void TestSigmoid() { var activation = new ActivationSigmoid(); Assert.IsTrue(activation.HasDerivative); var clone = (ActivationSigmoid)activation.Clone(); Assert.IsNotNull(clone); double[] input = { 0.0 }; activation.ActivationFunction(input, 0, 1); Assert.AreEqual(0.5, input[0], 0.1); // test derivative, should throw an error input[0] = activation.DerivativeFunction(input[0], input[0]); Assert.AreEqual(0.25, input[0], 0.1); }
public FlatNetwork(int input, int hidden1, int hidden2, int output, bool tanh) { // This item is obfuscated and can not be translated. FlatLayer[] layerArray; IActivationFunction function2; int num; IActivationFunction function3; IActivationFunction activation = new ActivationLinear(); if (tanh) { goto Label_01EA; } goto Label_0219; Label_000B: this._connectionLimit = 0.0; this.Init(layerArray); if (0 != 0) { if (((uint) hidden1) > uint.MaxValue) { goto Label_0069; } goto Label_0219; } if (((uint) num) < 0) { goto Label_0189; } return; Label_0069: layerArray[3] = new FlatLayer(function2, output, 0.0); if ((((uint) num) + ((uint) output)) >= 0) { } Label_0098: this._isLimited = false; goto Label_000B; Label_00A1: if (hidden2 != 0) { if ((((uint) tanh) - ((uint) hidden2)) > uint.MaxValue) { goto Label_01AA; } layerArray = new FlatLayer[4]; layerArray[0] = new FlatLayer(activation, input, 1.0); layerArray[1] = new FlatLayer(function2, hidden1, 1.0); if ((((uint) tanh) + ((uint) hidden2)) <= uint.MaxValue) { layerArray[2] = new FlatLayer(function2, hidden2, 1.0); if (((uint) hidden2) > uint.MaxValue) { goto Label_015F; } goto Label_0069; } goto Label_000B; } Label_010F: num = Math.Max(hidden1, hidden2); layerArray = new FlatLayer[] { new FlatLayer(activation, input, 1.0), new FlatLayer(function2, num, 1.0), new FlatLayer(function2, output, 0.0) }; goto Label_0098; Label_015F: if (((uint) output) > uint.MaxValue) { goto Label_01AA; } if ((((uint) tanh) + ((uint) num)) <= uint.MaxValue) { goto Label_0198; } Label_0189: if (-1 != 0) { goto Label_00A1; } goto Label_010F; Label_0198: if (hidden1 != 0) { goto Label_00A1; } goto Label_010F; Label_01AA: if (hidden2 != 0) { goto Label_0198; } layerArray = new FlatLayer[] { new FlatLayer(activation, input, 1.0), new FlatLayer(function2, output, 0.0) }; goto Label_0098; Label_01EA: function2 = new ActivationTANH(); if (hidden1 != 0) { goto Label_015F; } goto Label_01AA; Label_0219: function3 = new ActivationSigmoid(); if ((((uint) num) | 2) != 0) { goto Label_01EA; } goto Label_010F; }
private NEATNetwork Create() { IList<NEATNeuron> neurons = new List<NEATNeuron>(); IActivationFunction afSigmoid = new ActivationSigmoid(); IActivationFunction afStep = new ActivationStep(); // create the neurons NEATNeuron input1 = new NEATNeuron( NEATNeuronType.Input, 1, 0.1, 0.2, 0.3); NEATNeuron input2 = new NEATNeuron( NEATNeuronType.Input, 2, 0.1, 0.2, 0.3); NEATNeuron bias = new NEATNeuron( NEATNeuronType.Bias, 3, 0.1, 0.2, 0.3); NEATNeuron hidden1 = new NEATNeuron( NEATNeuronType.Hidden, 4, 0.1, 0.2, 0.3); NEATNeuron output = new NEATNeuron( NEATNeuronType.Output, 5, 0.1, 0.2, 0.3); // add the neurons neurons.Add(input1); neurons.Add(input2); neurons.Add(hidden1); neurons.Add(bias); neurons.Add(output); // connect everything Link(0.01, input1, hidden1, false); Link(0.01, input2, hidden1, false); Link(0.01, bias, hidden1, false); Link(0.01, hidden1, output, false); // create the network NEATNetwork result = new NEATNetwork(2, 1, neurons, afSigmoid, afStep, 3); return result; }
/// <summary> /// Setup for training. /// </summary> private void Init() { // default values ParamActivationMutationRate = 0.1; ParamChanceAddLink = 0.07; ParamChanceAddNode = 0.04; ParamChanceAddRecurrentLink = 0.05; ParamCompatibilityThreshold = 0.26; ParamCrossoverRate = 0.7; ParamMaxActivationPerturbation = 0.1; ParamMaxNumberOfSpecies = 0; ParamMaxPermittedNeurons = 100; ParamMaxWeightPerturbation = 0.5; ParamMutationRate = 0.2; ParamNumAddLinkAttempts = 5; ParamNumGensAllowedNoImprovement = 15; ParamNumTrysToFindLoopedLink = 5; ParamNumTrysToFindOldLink = 5; ParamProbabilityWeightReplaced = 0.1; NeatActivationFunction = new ActivationSigmoid(); OutputActivationFunction = new ActivationLinear(); // NEATGenome genome = (NEATGenome)Population.Genomes[0]; Population.Innovations = new NEATInnovationList(Population, genome.Links, genome.Neurons); splits = Split(null, 0, 1, 0); if (CalculateScore.ShouldMinimize) { bestEverScore = double.MaxValue; } else { bestEverScore = double.MinValue; } ResetAndKill(); SortAndRecord(); SpeciateAndCalculateSpawnLevels(); }
/// <summary> /// Initializes the neural network prior to the training. The actual activation output /// for the output layer and its maximum and minimum levels are also defined here. /// </summary> /// <returns></returns> private BasicNetwork PrepareNetwork() { IActivationFunction outputLayerActivation; EncogActivation encogActivation = (EncogActivation)base.activationFunction; _activationMin = base.activationFunction.activationMin; _activationMax = base.activationFunction.activationMax; // Checking if the selected activation function is adequate for the // output layer and changing it if needed. The activation function in the // last neuron layer should produce outputs between [0,1] or [-1,1]. if (_activationMin < -1 && _activationMax > 1) { //We will use a TANH activation function for the output layer outputLayerActivation = new ActivationTANH(); _activationMin = -1; _activationMax = 1; } else if (_activationMin == 0 && _activationMax > 1) { //We will use a Sigmoid activation function for the output layer outputLayerActivation = new ActivationSigmoid(); _activationMin = 0; _activationMax = 1; } else { outputLayerActivation = ((EncogActivation)base.activationFunction).CreateInstance(); } BasicNetwork network = new BasicNetwork(); if (autoHiddenLayerSize) _actualHiddenLayerSize = inputDim * hiddenLayerSizeFactor; else _actualHiddenLayerSize = hiddenLayerSize; network.AddLayer(new BasicLayer(null, true, inputDim)); network.AddLayer(new BasicLayer(encogActivation.CreateInstance(), true, _actualHiddenLayerSize)); network.AddLayer(new BasicLayer(outputLayerActivation, false, outputDim)); network.Structure.FinalizeStructure(); network.Reset(); return network; }
void AddLayers(List<LayerConfig> gen) { foreach (var g in gen) { IActivationFunction act; if (g.ActivationType == 0) { act = new ActivationBiPolar(); } switch (g.ActivationType ) { case 0: act = new ActivationBiPolar(); break; case 1: act = new ActivationBipolarSteepenedSigmoid (); break; case 2: act = new ActivationClippedLinear(); break; case 3: act = new ActivationCompetitive(); break; case 4: act = new ActivationElliott(); break; case 5: act = new ActivationElliottSymmetric(); break; case 6: act = new ActivationGaussian(); break; case 7: act = new ActivationLinear(); break; case 8: act = new ActivationLOG(); break; case 9: act = new ActivationRamp(); break; case 10: act = new ActivationRamp(); break; case 11: act = new ActivationSigmoid(); break; case 12: act = new ActivationSIN(); break; case 13: act = new ActivationSoftMax(); break; case 14: act = new ActivationSteepenedSigmoid(); break; case 15: act = new ActivationStep(); break; case 16: act = new ActivationTANH(); break; default: act = new ActivationSoftMax(); break; } network.AddLayer(new BasicLayer(act, g.hasBias, g.neurons)); } }