Пример #1
0
        //Dato lo strato di partenza, propaga il potenziale d'azione nello strato successivo.
        private void propagate(int i, Activation act)
        {
            int    j, k;
            double sum;                                                                                //Accumulatore.

            for (j = 0; j < NumPercept[i + 1]; j++)                                                    //Scorro i percettroni dello strato successivo.
            {
                sum = 0;                                                                               //Azzero l'accumulatore
                for (k = 0; k < NumPercept[i]; k++)                                                    //Scorro i percettroni dello strato di partenza.
                {
                    sum += layer[i].perceptron[k].getAction() * layer[i].perceptron[k].getSynapsys(j); /* moltiplica potenziale d'azione
                                                                                                        * per peso sinaptico della connessione
                                                                                                        * fra k e j.*/
                }
                sum += Bias;                                                                           //Se non lo capisci non continuare a leggere e ristudiati la teoria prima.
                layer[i + 1].perceptron[j].setAction(act.LogisticSigmoid(sum));                        /* Applica la funzione di costo e assegna il nuovo
                                                                                                        * potenziale d'azione.*/
            }
            /*per applicare il softmax, decommenta questo segmento.*/

            /*if(i == NumLayers - 2)
             *  act.Softmax(layer[i+1]);*/
        }
Пример #2
0
        /// <summary>
        ///     This method will calculate dC with respect to Z from one of the specified activations
        ///     then use this dC/dZ to calculate the other derivatives.
        /// </summary>
        /// <param name="dA">The derivative of the cost function with respect to the activations.</param>
        /// <param name="Z">The linear function of the weights biases and previous layers activations.</param>
        /// <param name="linearCache">A linear cache obtained from forward prop.</param>
        /// <param name="activation">The type of activation to use. Corrosponds with the activation that was used for this layer during forward prop.</param>
        /// <param name="lambda">The L2 regularization hyper-parameter.</param>
        /// <returns>The derivatives provided from the <see cref="LinearBackward"/> function.</returns>
        private Tuple <MatrixVectors, MatrixVectors, MatrixVectors> ActivationsBackward(MatrixVectors dA, MatrixVectors Z, LinearCache linearCache, Activation activation, float lambda)
        {
            MatrixVectors dZ;

            switch (activation)
            {
            case Activation.Sigmoid:
                dZ = SigmoidPrime(dA, Z);
                break;

            case Activation.ReLu:
                dZ = ReLuPrime(dA, Z);
                break;

            default:
                throw new ArgumentOutOfRangeException();
            }

            return(LinearBackward(dZ, linearCache, lambda));
        }
Пример #3
0
        /// <summary>
        ///     This method runs the linear function and the specified activation function
        ///     to calculate the Z and A of the current layer.
        /// </summary>
        /// <param name="previousLayersActivations">Vector of the previous layer's activations.</param>
        /// <param name="weights">Matrix of the current layers weights.</param>
        /// <param name="bias">Vector of the current layers bias'.</param>
        /// <param name="activation">The type of activation function to use.</param>
        /// <returns>
        ///     It returns a tuple with the cache as the first item and the final activations as
        ///     the second item.
        /// </returns>
        private Tuple <LinearCache, MatrixVectors, MatrixVectors> ActivationsForward(MatrixVectors previousLayersActivations, MatrixVectors weights, MatrixVectors bias, Activation activation)
        {
            Tuple <LinearCache, MatrixVectors> cache = LinearForward(previousLayersActivations, weights, bias);
            MatrixVectors z = cache.Item2;
            MatrixVectors activationsVector;

            switch (activation)
            {
            case Activation.Sigmoid:
                activationsVector = Sigmoid(z);
                break;

            case Activation.ReLu:
                activationsVector = Relu(z);
                break;

            default:
                throw new ArgumentOutOfRangeException();
            }
            LinearCache linearCache = cache.Item1;

            return(new Tuple <LinearCache, MatrixVectors, MatrixVectors>(linearCache, z, activationsVector));
        }