Inheritance: IActivationFunction, ICloneable
コード例 #1
0
        public void TestLinear()
        {
            var activation = new ActivationLinear();

            Assert.IsTrue(activation.HasDerivative);

            var clone = activation.Clone();

            Assert.IsInstanceOfType(clone, typeof(ActivationLinear));

            double[] input = { 1, 2, 3 };

            activation.ActivationFunction(input, 0, 3);

            Assert.AreEqual(1.0, input[0], 0.0);
            Assert.AreEqual(2.0, input[1], 0.0);
            Assert.AreEqual(3.0, input[2], 0.0);


            // test derivative, should not throw an error
            input[0] = activation.DerivativeFunction(input[0], input[0]);
            input[1] = activation.DerivativeFunction(input[1], input[1]);
            input[2] = activation.DerivativeFunction(input[2], input[2]);
            Assert.AreEqual(1.0, input[0], 0.0);
            Assert.AreEqual(1.0, input[1], 0.0);
            Assert.AreEqual(1.0, input[2], 0.0);
        }
コード例 #2
0
        /// <summary>
        /// Create a feed forward network.
        /// </summary>
        ///
        /// <param name="architecture">The architecture string to use.</param>
        /// <param name="input">The input count.</param>
        /// <param name="output">The output count.</param>
        /// <returns>The feedforward network.</returns>
        public IMLMethod Create(String architecture, int input,
            int output)
        {
            var result = new BasicNetwork();
            IList<String> layers = ArchitectureParse.ParseLayers(architecture);
            IActivationFunction af = new ActivationLinear();

            int questionPhase = 0;

            foreach (String layerStr  in  layers)
            {
                // determine default
                int defaultCount = questionPhase == 0 ? input : output;

                ArchitectureLayer layer = ArchitectureParse.ParseLayer(
                    layerStr, defaultCount);
                bool bias = layer.Bias;

                String part = layer.Name;
                part = part != null ? part.Trim() : "";

                IActivationFunction lookup = _factory.Create(part);

                if (lookup!=null)
                {
                    af = lookup;
                }
                else
                {
                    if (layer.UsedDefault)
                    {
                        questionPhase++;
                        if (questionPhase > 2)
                        {
                            throw new EncogError("Only two ?'s may be used.");
                        }
                    }

                    if (layer.Count == 0)
                    {
                        throw new EncogError("Unknown architecture element: "
                                             + architecture + ", can't parse: " + part);
                    }

                    result.AddLayer(new BasicLayer(af, bias, layer.Count));
                }
            }

            result.Structure.FinalizeStructure();
            result.Reset();

            return result;
        }
コード例 #3
0
        public void TestLinear()
        {
            var activation = new ActivationLinear();
            Assert.IsTrue(activation.HasDerivative());

            var clone = (ActivationLinear) activation.Clone();
            Assert.IsNotNull(clone);

            double[] input = {1, 2, 3};

            activation.ActivationFunction(input, 0, 1);

            Assert.AreEqual(1.0, input[0], 0.1);
            Assert.AreEqual(2.0, input[1], 0.1);
            Assert.AreEqual(3.0, input[2], 0.1);

            // test derivative, should not throw an error
            input[0] = activation.DerivativeFunction(input[0],input[0]);
        }
コード例 #4
0
        public void TestLinear()
        {
            var activation = new ActivationLinear();

            Assert.IsTrue(activation.HasDerivative);

            var clone = (ActivationLinear)activation.Clone();

            Assert.IsNotNull(clone);

            double[] input = { 1, 2, 3 };

            activation.ActivationFunction(input, 0, 1);

            Assert.AreEqual(1.0, input[0], 0.1);
            Assert.AreEqual(2.0, input[1], 0.1);
            Assert.AreEqual(3.0, input[2], 0.1);


            // test derivative, should not throw an error
            input[0] = activation.DerivativeFunction(input[0], input[0]);
        }
コード例 #5
0
ファイル: FlatNetwork.cs プロジェクト: neismit/emds
 public FlatNetwork(int input, int hidden1, int hidden2, int output, bool tanh)
 {
     // This item is obfuscated and can not be translated.
     FlatLayer[] layerArray;
     IActivationFunction function2;
     int num;
     IActivationFunction function3;
     IActivationFunction activation = new ActivationLinear();
     if (tanh)
     {
         goto Label_01EA;
     }
     goto Label_0219;
     Label_000B:
     this._connectionLimit = 0.0;
     this.Init(layerArray);
     if (0 != 0)
     {
         if (((uint) hidden1) > uint.MaxValue)
         {
             goto Label_0069;
         }
         goto Label_0219;
     }
     if (((uint) num) < 0)
     {
         goto Label_0189;
     }
     return;
     Label_0069:
     layerArray[3] = new FlatLayer(function2, output, 0.0);
     if ((((uint) num) + ((uint) output)) >= 0)
     {
     }
     Label_0098:
     this._isLimited = false;
     goto Label_000B;
     Label_00A1:
     if (hidden2 != 0)
     {
         if ((((uint) tanh) - ((uint) hidden2)) > uint.MaxValue)
         {
             goto Label_01AA;
         }
         layerArray = new FlatLayer[4];
         layerArray[0] = new FlatLayer(activation, input, 1.0);
         layerArray[1] = new FlatLayer(function2, hidden1, 1.0);
         if ((((uint) tanh) + ((uint) hidden2)) <= uint.MaxValue)
         {
             layerArray[2] = new FlatLayer(function2, hidden2, 1.0);
             if (((uint) hidden2) > uint.MaxValue)
             {
                 goto Label_015F;
             }
             goto Label_0069;
         }
         goto Label_000B;
     }
     Label_010F:
     num = Math.Max(hidden1, hidden2);
     layerArray = new FlatLayer[] { new FlatLayer(activation, input, 1.0), new FlatLayer(function2, num, 1.0), new FlatLayer(function2, output, 0.0) };
     goto Label_0098;
     Label_015F:
     if (((uint) output) > uint.MaxValue)
     {
         goto Label_01AA;
     }
     if ((((uint) tanh) + ((uint) num)) <= uint.MaxValue)
     {
         goto Label_0198;
     }
     Label_0189:
     if (-1 != 0)
     {
         goto Label_00A1;
     }
     goto Label_010F;
     Label_0198:
     if (hidden1 != 0)
     {
         goto Label_00A1;
     }
     goto Label_010F;
     Label_01AA:
     if (hidden2 != 0)
     {
         goto Label_0198;
     }
     layerArray = new FlatLayer[] { new FlatLayer(activation, input, 1.0), new FlatLayer(function2, output, 0.0) };
     goto Label_0098;
     Label_01EA:
     function2 = new ActivationTANH();
     if (hidden1 != 0)
     {
         goto Label_015F;
     }
     goto Label_01AA;
     Label_0219:
     function3 = new ActivationSigmoid();
     if ((((uint) num) | 2) != 0)
     {
         goto Label_01EA;
     }
     goto Label_010F;
 }
コード例 #6
0
        /// <summary>
        /// Construct a flat neural network.
        /// </summary>
        ///
        /// <param name="input">Neurons in the input layer.</param>
        /// <param name="hidden1"></param>
        /// <param name="hidden2"></param>
        /// <param name="output">Neurons in the output layer.</param>
        /// <param name="tanh">True if this is a tanh activation, false for sigmoid.</param>
        public FlatNetwork(int input, int hidden1, int hidden2,
            int output, bool tanh)
        {
            IActivationFunction linearAct = new ActivationLinear();
            FlatLayer[] layers;
            IActivationFunction act = (tanh)
                                          ? (new ActivationTANH())
                                          : (IActivationFunction) (new ActivationSigmoid());

            if ((hidden1 == 0) && (hidden2 == 0))
            {
                layers = new FlatLayer[2];
                layers[0] = new FlatLayer(linearAct, input,
                                          DefaultBiasActivation);
                layers[1] = new FlatLayer(act, output,
                                          NoBiasActivation);
            }
            else if ((hidden1 == 0) || (hidden2 == 0))
            {
                int count = Math.Max(hidden1, hidden2);
                layers = new FlatLayer[3];
                layers[0] = new FlatLayer(linearAct, input,
                                          DefaultBiasActivation);
                layers[1] = new FlatLayer(act, count,
                                          DefaultBiasActivation);
                layers[2] = new FlatLayer(act, output,
                                          NoBiasActivation);
            }
            else
            {
                layers = new FlatLayer[4];
                layers[0] = new FlatLayer(linearAct, input,
                                          DefaultBiasActivation);
                layers[1] = new FlatLayer(act, hidden1,
                                          DefaultBiasActivation);
                layers[2] = new FlatLayer(act, hidden2,
                                          DefaultBiasActivation);
                layers[3] = new FlatLayer(act, output,
                                          NoBiasActivation);
            }

            _isLimited = false;
            _connectionLimit = 0.0d;

            Init(layers);
        }
コード例 #7
0
        /// <summary>
        /// Setup for training.
        /// </summary>
        private void Init()
        {
            // default values
            ParamActivationMutationRate = 0.1;
            ParamChanceAddLink = 0.07;
            ParamChanceAddNode = 0.04;
            ParamChanceAddRecurrentLink = 0.05;
            ParamCompatibilityThreshold = 0.26;
            ParamCrossoverRate = 0.7;
            ParamMaxActivationPerturbation = 0.1;
            ParamMaxNumberOfSpecies = 0;
            ParamMaxPermittedNeurons = 100;
            ParamMaxWeightPerturbation = 0.5;
            ParamMutationRate = 0.2;
            ParamNumAddLinkAttempts = 5;
            ParamNumGensAllowedNoImprovement = 15;
            ParamNumTrysToFindLoopedLink = 5;
            ParamNumTrysToFindOldLink = 5;
            ParamProbabilityWeightReplaced = 0.1;

            NeatActivationFunction = new ActivationSigmoid();
            OutputActivationFunction = new ActivationLinear();


            //
            NEATGenome genome = (NEATGenome)Population.Genomes[0];

            Population.Innovations =
                    new NEATInnovationList(Population, genome.Links,
                            genome.Neurons);

            splits = Split(null, 0, 1, 0);

            if (CalculateScore.ShouldMinimize)
            {
                bestEverScore = double.MaxValue;
            }
            else
            {
                bestEverScore = double.MinValue;
            }

            ResetAndKill();
            SortAndRecord();
            SpeciateAndCalculateSpawnLevels();
        }
コード例 #8
0
ファイル: Program.cs プロジェクト: jorik041/NeuroLife
 void AddLayers(List<LayerConfig> gen)
 {
     foreach (var g in gen)
     {
         IActivationFunction act;
         if (g.ActivationType == 0)
         {
             act = new ActivationBiPolar();
         }
         switch (g.ActivationType )
         {
             case 0:
                 act = new ActivationBiPolar();
                 break;
             case 1:
                 act = new ActivationBipolarSteepenedSigmoid ();
                 break;
             case 2:
                 act = new ActivationClippedLinear();
                 break;
             case 3:
                 act = new ActivationCompetitive();
                 break;
             case 4:
                 act = new ActivationElliott();
                 break;
             case 5:
                 act = new ActivationElliottSymmetric();
                 break;
             case 6:
                 act = new ActivationGaussian();
                 break;
             case 7:
                 act = new ActivationLinear();
                 break;
             case 8:
                 act = new ActivationLOG();
                 break;
             case 9:
                 act = new ActivationRamp();
                 break;
             case 10:
                 act = new ActivationRamp();
                 break;
             case 11:
                 act = new ActivationSigmoid();
                 break;
             case 12:
                 act = new ActivationSIN();
                 break;
             case 13:
                 act = new ActivationSoftMax();
                 break;
             case 14:
                 act = new ActivationSteepenedSigmoid();
                 break;
             case 15:
                 act = new ActivationStep();
                 break;
             case 16:
                 act = new ActivationTANH();
                 break;
             default:
                 act = new ActivationSoftMax();
                 break;
         }
         network.AddLayer(new BasicLayer(act, g.hasBias, g.neurons));
     }
 }
コード例 #9
0
        /// <summary>
        /// Create the flat neural network.
        /// </summary>
        public void Flatten()
        {
            bool isRBF = false;
            IDictionary<ILayer, FlatLayer> regular2flat = new Dictionary<ILayer, FlatLayer>();
            IDictionary<FlatLayer, ILayer> flat2regular = new Dictionary<FlatLayer, ILayer>();            
            IList<ObjectPair<ILayer, ILayer>> contexts = new List<ObjectPair<ILayer, ILayer>>();            
            this.flat = null;

            ValidateForFlat val = new ValidateForFlat();

            if (val.IsValid(this.network) == null)
            {
                if (this.layers.Count == 3
                        && this.layers[1] is RadialBasisFunctionLayer)
                {
                    RadialBasisFunctionLayer rbf = (RadialBasisFunctionLayer)this.layers[1];
                    this.flat = new FlatNetworkRBF(this.network.InputCount,
                            rbf.NeuronCount, this.network.OutputCount,
                            rbf.RadialBasisFunction);
                    FlattenWeights();
                    this.flatUpdate = FlatUpdateNeeded.None;
                    return;
                }

                int flatLayerCount = CountNonContext();
                FlatLayer[] flatLayers = new FlatLayer[flatLayerCount];                

                int index = flatLayers.Length - 1;
                foreach (ILayer layer in this.layers)
                {

                    if (layer is ContextLayer)
                    {
                        ISynapse inboundSynapse = network.Structure
                                .FindPreviousSynapseByLayerType(layer,
                                        typeof(BasicLayer));
                        ISynapse outboundSynapse = network
                                .Structure
                                .FindNextSynapseByLayerType(layer, typeof(BasicLayer));

                        if (inboundSynapse == null)
                            throw new NeuralNetworkError(
                                    "Context layer must be connected to by one BasicLayer.");

                        if (outboundSynapse == null)
                            throw new NeuralNetworkError(
                                    "Context layer must connect to by one BasicLayer.");

                        ILayer inbound = inboundSynapse.FromLayer;
                        ILayer outbound = outboundSynapse.ToLayer;

                        contexts
                                .Add(new ObjectPair<ILayer, ILayer>(inbound, outbound));
                    }
                    else
                    {
                        double bias = this.FindNextBias(layer);

                        IActivationFunction activationType;
                        double[] param = new double[1];

                        if (layer.ActivationFunction == null)
                        {
                            activationType = new ActivationLinear();
                            param = new double[1];
                            param[0] = 1;
                        }
                        else
                        {
                            activationType = layer.ActivationFunction;
                            param = layer.ActivationFunction.Params;
                        }

                        FlatLayer flatLayer = new FlatLayer(activationType, layer
                                .NeuronCount, bias, param);

                        regular2flat[layer] = flatLayer;
                        flat2regular[flatLayer] = layer;
                        flatLayers[index--] = flatLayer;
                    }
                }

                // now link up the context layers
                foreach (ObjectPair<ILayer, ILayer> context in contexts)
                {
                    // link the context layer on the FlatLayer
                    ILayer layer = context.B;
                    ISynapse synapse = this.network
                            .Structure
                            .FindPreviousSynapseByLayerType(layer, typeof(BasicLayer));
                    FlatLayer from = regular2flat[context.A];
                    FlatLayer to = regular2flat[synapse.FromLayer];
                    to.ContextFedBy = from;
                }

                this.flat = new FlatNetwork(flatLayers);

                // update the context indexes on the non-flat network
                for (int i = 0; i < flatLayerCount; i++)
                {
                    FlatLayer fedBy = flatLayers[i].ContextFedBy;
                    if (fedBy != null)
                    {
                        ILayer fedBy2 = flat2regular[flatLayers[i + 1]];
                        ISynapse synapse = FindPreviousSynapseByLayerType(fedBy2, typeof(ContextLayer));
                        if (synapse == null)
                            throw new NeuralNetworkError("Can't find parent synapse to context layer.");
                        ContextLayer context = (ContextLayer)synapse.FromLayer;

                        // find fedby index
                        int fedByIndex = -1;
                        for(int j=0;j<flatLayerCount;j++)
                        {
                            if( flatLayers[j]==fedBy )
                            {
                                fedByIndex = j;
                                break;
                            }
                        }

                        if (fedByIndex == -1)
                            throw new NeuralNetworkError("Can't find layer feeding context.");

                        context.FlatContextIndex = this.flat.ContextTargetOffset[fedByIndex];
                    }
                }

                // RBF networks will not train every layer
                if (isRBF)
                {
                    this.flat.EndTraining = flatLayers.Length - 1;
                }

                FlattenWeights();

                if (this.IsConnectionLimited)
                {

                }

                this.flatUpdate = FlatUpdateNeeded.None;
            }
            else
                this.flatUpdate = FlatUpdateNeeded.Never;
        }