コード例 #1
0
        public INeuralNetworkRecurrentBackpropagation Execute(List <IMatrix> curr, bool backpropagate)
        {
            var input  = curr[0];
            var memory = curr[1];

            var a = Combine(input, memory, _wc.Layer, _uc.Layer, m => _activation.Calculate(m));
            var i = Combine(input, memory, _wi.Layer, _ui.Layer, m => m.SigmoidActivation());
            var f = Combine(input, memory, _wf.Layer, _uf.Layer, m => m.SigmoidActivation());
            var o = Combine(input, memory, _wo.Layer, _uo.Layer, m => m.SigmoidActivation());

            using (var f2 = f.PointwiseMultiply(memory)) {
                var ct = a.PointwiseMultiply(i);
                ct.AddInPlace(f2);
                var cta = _activation.Calculate(ct);

                curr[0] = o.PointwiseMultiply(cta);
                curr[1] = ct;

                if (backpropagate)
                {
                    var ones = _lap.Create(memory.RowCount, memory.ColumnCount, (x, y) => 1f);
                    return(new Backpropagation(_activation, ones, ct, cta, memory, o, a, i, f, input, _uc, _wc, _ui, _wi, _uf, _wf, _uo, _wo));
                }
                //memory.Dispose();
                //input.Dispose();
                a.Dispose();
                i.Dispose();
                f.Dispose();
                o.Dispose();
                cta.Dispose();
                return(null);
            }
        }
コード例 #2
0
        public void Activate(IDisposableMatrixExecutionLine m)
        {
            // multiply weights
            m.Assign(m.Current.Multiply(_weight));

            // add bias
            m.Current.AddToEachRow(_bias);

            // activate output
            m.Assign(_activation.Calculate(m.Current));
        }
コード例 #3
0
        /// <summary>
        /// Activate the network. Activation reads input signals from InputSignalArray and writes output signals
        /// to OutputSignalArray.
        /// </summary>
        public virtual void Activate()
        {
            // Reset any state from a previous activation.
            for (int i = _inputAndBiasNodeCount; i < _activationArr.Length; i++)
            {
                _activationArr[i] = 0.0;
            }

            // Process all layers in turn.
            int conIdx = 0, nodeIdx = _inputAndBiasNodeCount;

            for (int layerIdx = 1; layerIdx < _layerInfoArr.Length; layerIdx++)
            {
                LayerInfo layerInfo = _layerInfoArr[layerIdx - 1];

                // Push signals through the previous layer's connections to the current layer's nodes.
                for (; conIdx < layerInfo._endConnectionIdx; conIdx++)
                {
                    _activationArr[_connectionArr[conIdx]._tgtNeuronIdx] += _activationArr[_connectionArr[conIdx]._srcNeuronIdx] * _connectionArr[conIdx]._weight;
                }

                // Activate current layer's nodes.
                layerInfo = _layerInfoArr[layerIdx];
                for (; nodeIdx < layerInfo._endNodeIdx; nodeIdx++)
                {
                    // THIS IS MODIFIED, REMOVE ASAP
                    IActivationFunction function = _nodeActivationFnArr[nodeIdx];
                    double result = function.Calculate(_activationArr[nodeIdx], _nodeAuxArgsArr[nodeIdx]);
                    _nodeActivationFnArr[nodeIdx].Calculate(_activationArr[nodeIdx], _nodeAuxArgsArr[nodeIdx]);
                    _activationArr[nodeIdx] = result;
                }
            }
        }
コード例 #4
0
ファイル: HopfieldNetwork.cs プロジェクト: spzSource/IMGP
        private double CalculateNewState(int neuronIndex, Vector <double> patternVector)
        {
            Vector <double> weightsRow           = weights.Row(neuronIndex);
            Vector <double> newNeuronStateVector = weightsRow
                                                   .PointwiseMultiply(patternVector);

            return(activationFunction.Calculate(newNeuronStateVector.Sum()));
        }
コード例 #5
0
ファイル: Neuron.cs プロジェクト: MuleaneEve/bepuphysics2
        /// <summary>
        /// Recalculate this neuron's output value.
        /// </summary>
        public void Recalc()
        {
            // No recalculation required for input or bias nodes.
            if (neuronType == NeuronType.Input || neuronType == NeuronType.Bias)
            {
                return;
            }

            // Iterate the connections and total up the input signal from all of them.
            double accumulator = 0;
            int    loopBound   = connectionList.Count;

            for (int i = 0; i < loopBound; i++)
            {
                Connection connection = connectionList[i];
                accumulator += connection.SourceNeuron.outputValue * connection.Weight;
            }

            // Apply the activation function to the accumulated input signal.
            // A parameterised sigmoid from PEANNUT.
            //			outputRecalc = activationParams.a +
            //							activationParams.b *
            //							Math.Tanh(activationParams.c * accumulator - activationParams.d);

            // Functions from original NEAT code

            //RIGHT SHIFTED ---------------------------------------------------------
            //return (1/(1+(exp(-(slope*activesum-constant))))); //ave 3213 clean on 40 runs of p2m and 3468 on another 40
            //41394 with 1 failure on 8 runs

            //LEFT SHIFTED ----------------------------------------------------------
            //return (1/(1+(exp(-(slope*activesum+constant))))); //original setting ave 3423 on 40 runs of p2m, 3729 and 1 failure also

            //PLAIN SIGMOID ---------------------------------------------------------
            //outputRecalc = (1.0D/(1.0D+(Math.Exp(-accumulator))));  // good for x range of 0+-5

            //DOUBLE POLE EXPERIMENT. Lazy/sloping sigmoid from -1 to 1 with output range -1 to 1.
            //outputRecalc = 1.0/(1.0 + Math.Exp(-4.9*accumulator));

            // GWM - Bias subtracted from accumulated inputs
            accumulator -= neuronBias;

            // outputValue has the output from the previous time step
            outputRecalc = outputValue + (1 / timeConstant) * (-outputValue + activationFn.Calculate(accumulator)); // GWM - Leaky integrator equation - Time constant of 1 is no leak

            //outputRecalc = activationFn.Calculate(accumulator);

//			System.Diagnostics.Debug.WriteLine("out=" + outputRecalc);
            // range
//			outputRecalc = 1.0      *  Math.Tanh(0.2 * accumulator);

            //LEFT SHIFTED NON-STEEPENED---------------------------------------------
            //return (1/(1+(exp(-activesum-constant)))); //simple left shifted

            //NON-SHIFTED STEEPENED
            //return (1/(1+(exp(-(slope*activesum))))); //Compressed
        }
コード例 #6
0
            public IMatrix Activate(IMatrix input)
            {
                if (_activation == null)
                {
                    return(Execute(input));
                }

                using (var preActivation = Execute(input))
                    return(_activation.Calculate(preActivation));
            }
コード例 #7
0
//		public FastConcurrentNetwork(NeatGenome.NeatGenome g, IActivationFunction activationFn)
//		{
//			this.activationFn = activationFn;
//
//		//----- Store/calculate some useful values.
//			outputNeuronCount = g.OutputNeuronCount;
//
//			int neuronGeneCount = g.NeuronGeneList.Count;
//
//			// Slightly inefficient - determine the number of bias nodes. Fortunately there is not actually
//			// any reason to ever have more than one bias node - although there may be 0.
//			int neuronGeneIdx=0;
//			for(; neuronGeneIdx<neuronGeneCount; neuronGeneIdx++)
//			{
//				if(g.NeuronGeneList[neuronGeneIdx].NeuronType != NeuronType.Bias)
//					break;
//			}
//			biasNeuronCount = neuronGeneIdx;
//			inputNeuronCount = g.InputNeuronCount;
//			totalInputNeuronCount = inputNeuronCount+biasNeuronCount;
//
//		//----- Allocate the arrays that make up the neural network.
//			// The neurons signals are initialised to 0 by default. Only bias nodes need setting to 1.
//			neuronSignalArray = new float[neuronGeneCount];
//			_neuronSignalArray = new float[neuronGeneCount];
//
//			for(int i=0; i<biasNeuronCount; i++)
//				neuronSignalArray[i] = 1.0F;
//
//			// ConnectionGenes point to a neuron ID. We need to map this ID to a 0 based index for
//			// efficiency. To do this we build a table of indexes (ints) keyed on neuron ID.
//			// TODO: An alternative here would be to forgo the building of a table and do a binary
//			// search directly on the NeuronGeneList - probaly a good idea to use a heuristic based upon
//			// neuroncount*connectioncount that decides on which technique to use. Small networks will
//			// likely be faster to decode using the binary search.
//
//			// Actually we can partly achieve the above optimzation by using HybridDictionary instead of Hashtable.
//			// Although creating a table is a bit expensive.
//			HybridDictionary neuronIndexTable = new HybridDictionary(neuronGeneCount);
//			for(int i=0; i<neuronGeneCount; i++)
//				neuronIndexTable.Add(g.NeuronGeneList[i].InnovationId, i);
//
//			// Now we can build the connection array(s).
//			int connectionCount = g.ConnectionGeneList.Count;
//			connectionArray = new FastConnection[connectionCount];
//			for(int connectionIdx=0; connectionIdx<connectionCount; connectionIdx++)
//			{
//				ConnectionGene connectionGene = g.ConnectionGeneList[connectionIdx];
//
//				connectionArray[connectionIdx].sourceNeuronIdx = (int)neuronIndexTable[connectionGene.SourceNeuronId];
//				connectionArray[connectionIdx].targetNeuronIdx = (int)neuronIndexTable[connectionGene.TargetNeuronId];
//				connectionArray[connectionIdx].weight = (float)connectionGene.Weight;
//			}
//
//			// Now sort the connection array on sourceNeuronIdx, secondary sort on targetNeuronIdx.
//			// TODO: custom sort routine to prevent boxing/unboxing required by Array.Sort(ValueType[])
//			Array.Sort(connectionArray, fastConnectionComparer);
//		}

        #endregion

        #region INetwork Members

        public void SingleStep()
        {
            // Loop connections. Calculate each connection's output signal.
            for (int i = 0; i < connectionArray.Length; i++)
            {
                connectionArray[i].signal = neuronSignalArray[connectionArray[i].sourceNeuronIdx] * connectionArray[i].weight;
            }

            for (int i = totalInputNeuronCount; i < _neuronSignalArray.Length; i++)
            {
                _neuronSignalArray[i] = 1.0F;
            }

            // Loop the connections again. This time add the signals to the target neurons.
            // This will largely require out of order memory writes. This is the one loop where
            // this will happen.
            for (int i = 0; i < connectionArray.Length; i++)
            {
                _neuronSignalArray[connectionArray[i].targetNeuronIdx] *= connectionArray[i].signal;
                // Set flag to indicate this neuron has some inputs (required for multiplicative network).
                neuronSignalFlagArray[connectionArray[i].targetNeuronIdx] = true;
            }

            // Now loop _neuronSignalArray, pass the signals through the activation function
            // and store the result back to neuronSignalArray. Skip over input neurons - these
            // neurons should be untouched.
            for (int i = totalInputNeuronCount; i < _neuronSignalArray.Length; i++)
            {
                if (neuronSignalFlagArray[i])
                {
                    neuronSignalArray[i] = activationFn.Calculate(_neuronSignalArray[i]);
                }
                else
                {
                    neuronSignalArray[i] = activationFn.Calculate(0.0F);
                }

                // Take the opportunity to reset the pre-activation signal array.
                // Reset to 1.0 for multiplicative network.
                //_neuronSignalArray[i]=1.0F;
            }
        }
コード例 #8
0
        private double ApplyActivationFunction(double input, ActivationFunction activationFunction)
        {
            switch (activationFunction)
            {
            case ActivationFunction.Sigmoid:
                return(sigmoidActivationFunction.Calculate(input));

            default:
                return(input);
            }
        }
コード例 #9
0
        public IMatrix Activate(IEnumerable <IMatrix> input)
        {
            var part = input.Zip(_part, (m, w) => w.Execute(m)).ToList();

            for (var i = 1; i < part.Count; i++)
            {
                part[0].AddInPlace(part[1]);
                part[1].Dispose();
            }
            using (var combined = part[0]) {
                return(_activation.Calculate(combined));
            }
        }
コード例 #10
0
ファイル: LSTM.cs プロジェクト: fcmai/brightwire
        public void Activate(List <IDisposableMatrixExecutionLine> curr)
        {
            var curr2 = curr.Select(c => c.Current).ToList();

            using (var a = _c.Activate(curr2))
                using (var i = _i.Activate(curr2))
                    using (var f = _f.Activate(curr2))
                        using (var o = _o.Activate(curr2))
                            using (var f2 = f.PointwiseMultiply(curr[1].Current)) {
                                var ct = a.PointwiseMultiply(i);
                                ct.AddInPlace(f2);
                                using (var cta = _activation.Calculate(ct)) {
                                    curr[0].Assign(o.PointwiseMultiply(cta));
                                    curr[1].Assign(ct);
                                }
                            }
        }
コード例 #11
0
        public INeuralNetworkRecurrentBackpropagation Execute(List <IMatrix> curr, bool backpropagate)
        {
            Debug.Assert(curr.Count == 2);
            var input  = curr[0];
            var memory = curr[1];
            var output = _Combine(input, memory, m => _activation.Calculate(m));

            curr[0] = output;
            curr[1] = output.Clone();

            if (backpropagate)
            {
                return(new Backpropagation(_activation, _input, _memory, input, memory, output));
            }
            //input.Dispose();
            //memory.Dispose();
            //output.Dispose();
            return(null);
        }
コード例 #12
0
 public float Calculate(float value)
 {
     return(_activationFunction.Calculate(value));
 }
コード例 #13
0
        /// <summary>
        /// Activate the network for a fixed number of iterations defined by
        /// the 'maxIterations' parameter at construction time. Activation
        /// reads input signals from InputSignalArray and writes output signals
        /// to OutputSignalArray.
        ///
        /// Input values are taken from the UnitController script.
        ///
        /// IMPROVEMENT? This function is too complex, so it should be broken
        /// into smaller pieces. Will the extra function calls be a problem?
        /// Test!
        /// </summary>
        public virtual void Activate()
        {
/*            foreach (FastConnection connect in _connectionArray)
 *          {
 *              UnityEngine.Debug.Log("Connection from " + connect._srcNeuronIdx +
 *                                    " to " + connect._tgtNeuronIdx);
 *          }*/

            // 1) Loops through connections from input and bias to local_input.
            // Copies the post-activation values directly.
            // This information is constant, so it is done outside of the
            // timesteps loop.
            for (int j = 0; j < _phenVars.localInFromBiasInCount; ++j)
            {
                _postActivationArray[_connectionArray[j]._tgtNeuronIdx] =
                    _postActivationArray[_connectionArray[j]._srcNeuronIdx];
            }

/*			UnityEngine.Debug.Log("CHECK after in to local in update CHECK CHECK CHECK");
 *          UnityEngine.Debug.Log("indices: " + 0 + " " + _phenVars.localInFromBiasInCount);
 *                      for (int h = 0; h < _phenVars.neuronCount; ++h)
 *                      {
 *                              UnityEngine.Debug.Log("Index: Pre/post " + h + ": " + _preActivationArray[h] +
 *                                      " " + _postActivationArray[h]);
 *                      }*/

            // Main loop:
            // Activate the network for a fixed number of timesteps.
            for (int i = 0; i < _phenVars.timestepsPerActivation; ++i)
            {
                // 2.1) Loops through connections from input and bias to
                // regulatory neurons. Copies the post-activation value of the
                // source to the pre-activation of the target.
                for (int j = _phenVars.localInFromBiasInCount; j < _inToRegEndIndex; ++j)
                {
                    _preActivationArray[_connectionArray[j]._tgtNeuronIdx] +=
                        _postActivationArray[_connectionArray[j]._srcNeuronIdx] *
                        _connectionArray[j]._weight;
                }

/*				UnityEngine.Debug.Log("CHECK after in to reg connection update CHECK CHECK CHECK");
 *                              UnityEngine.Debug.Log("indices: " + _phenVars.localInFromBiasInCount + " " + _inToRegEndIndex);
 *                              for (int h = 0; h < _phenVars.neuronCount; ++h)
 *                              {
 *                                      UnityEngine.Debug.Log("Index: Pre/post " + h + ": " + _preActivationArray[h] +
 *                                              " " + _postActivationArray[h]);
 *                              }*/

                // 2.2) Loops through non-protected connections.
                // Copies the post-activation value of the source to the
                // pre-activation of the target, applying the connection weight.
                for (int j = _inToRegEndIndex; j < _connectionArrayLength; ++j)
                {
                    _preActivationArray[_connectionArray[j]._tgtNeuronIdx] +=
                        _postActivationArray[_connectionArray[j]._srcNeuronIdx] *
                        _connectionArray[j]._weight;
                }

/*				UnityEngine.Debug.Log("CHECK after non protected connections update CHECK CHECK CHECK");
 *                              UnityEngine.Debug.Log("indices: " + _inToRegEndIndex + " " + _connectionArrayLength);
 *                              for (int h = 0; h < _phenVars.neuronCount; ++h)
 *                              {
 *                                      UnityEngine.Debug.Log("Index: Pre/post " + h + ": " + _preActivationArray[h] +
 *                                              " " + _postActivationArray[h]);
 *                              }*/

                // 3) Loops through connections from local output neurons to
                // regulatory or local input neurons, which have been grouped
                // by modules (there are regulatoryCount of them).
                // Applies the post-activation value for the regulatory neuron
                // in the module.
                // NOTICE! Module index i corresponds to module i + 1 (there are
                // never any connections of this type for module 0).
                // Note: until the last iteration we may forget about connections
                // to output neurons.
                for (int j = 0; j < _phenVars.regulatoryCount; ++j)
                {
                    // Gets the regulatory neuron post-activation value.
                    double moduleActivity = _postActivationArray[_firstRegIndex + j];
                    for (int k = 0; k < _localOutToRegLInModuleCount[j]; ++k)
                    {
/*                        UnityEngine.Debug.Log("Local out to reg or local in");
 *                      UnityEngine.Debug.Log("from " + _localOutToRegLInConnect[j][k]._srcNeuronIdx +
 *                                            " to " + _localOutToRegLInConnect[j][k]._tgtNeuronIdx);*/
                        _preActivationArray[_localOutToRegLInConnect[j][k]._tgtNeuronIdx] +=
                            _postActivationArray[_localOutToRegLInConnect[j][k]._srcNeuronIdx] *
                            _localOutToRegLInConnect[j][k]._weight * moduleActivity;
                    }
                }

/*				UnityEngine.Debug.Log("CHECK after local out to local in or reg connections MATRIX update CHECK CHECK CHECK");
 *                              for (int h = 0; h < _phenVars.neuronCount; ++h)
 *                              {
 *                                      UnityEngine.Debug.Log("Index: Pre/post " + h + ": " + _preActivationArray[h] +
 *                                              " " + _postActivationArray[h]);
 *                              }*/

                // 4) Loops through neurons (pre- and post-activation arrays).
                // Note: Local input neurons do not change their post-activation
                // values.
                // Note: Local_output-to-output neurons only need to be updated
                // in the last iteration.

                // 4.1) Loops through hidden neurons and local output neurons
                // with not only output neurons as target:
                // TODO: Consider updating here all hidden and local output neurons.
                // There will be very few local output neurons to ONLY output
                // neurons to make up for all the complications!
                for (int j = _inBiasOutRegEndIndex; j < _hiddenLocalOutNoOutEndIndex; ++j)
                {
                    // Updates the post-activation value
                    _postActivationArray[j] = _normalNeuronActivFn.Calculate(
                        _preActivationArray[j], null);
                    // Resets the pre-actiavtion value
                    _preActivationArray[j] = 0.0;
                }

/*              UnityEngine.Debug.Log("CHECK after hidden and local out (no only to out) update CHECK CHECK CHECK");
 *                              UnityEngine.Debug.Log("indices: " + _inBiasOutRegEndIndex  + " " + _hiddenLocalOutNoOutEndIndex);
 *                              for (int h = 0; h < _phenVars.neuronCount; ++h)
 *                              {
 *                                      UnityEngine.Debug.Log("Index: Pre/post " + h + ": " + _preActivationArray[h] +
 *                                              " " + _postActivationArray[h]);
 *                              }*/

                // 4.2) Loops through local input neurons with local output
                // neurons as sources. Local input neurons with bias and input
                // neurons as sources have fixed input, therefore fixed output.
                // Local input neurons are special in that they copy their
                // input.
                for (int j = _localInFromBiasInEndIndex; j < _localOutToOutFirstIndex; ++j)
                {
                    // Updates the post-activation value
                    _postActivationArray[j] = _preActivationArray[j];
                    // Resets the pre-actiavtion value
                    _preActivationArray[j] = 0.0;
                }

/*              UnityEngine.Debug.Log("CHECK after LIn with LO as sources update CHECK CHECK CHECK");
 *              UnityEngine.Debug.Log("indices: " + _localInFromBiasInEndIndex  + " " + _localOutToOutFirstIndex);
 *              for (int h = 0; h < _phenVars.neuronCount; ++h)
 *              {
 *                  UnityEngine.Debug.Log("Index: Pre/post " + h + ": " + _preActivationArray[h] +
 *                      " " + _postActivationArray[h]);
 *              }*/

                // 4.3) Loops through local_output-to-output neurons:
                // We will need the correct post-activation values during the
                // last iteration, but this will be done AFTER this loop, so it
                // is Ok to update these only in the last iteration.
                if (i == _phenVars.timestepsPerActivation - 1)
                {
                    // Updates and resets
                    for (int j = _localOutToOutFirstIndex; j < _phenVars.neuronCount; ++j)
                    {
                        // UnityEngine.Debug.Log("CHECK after local out to out intermediate update INDEX " + j);
                        _postActivationArray[j] = _normalNeuronActivFn.Calculate(
                            _preActivationArray[j], null);
                        _preActivationArray[j] = 0.0;
                    }
                }
                else
                {
                    // Only resets their pre-activation values.
                    for (int j = _localOutToOutFirstIndex; j < _phenVars.neuronCount; ++j)
                    {
                        _preActivationArray[j] = 0.0;
                    }
                }

/*				UnityEngine.Debug.Log("CHECK after local out to out intermediate update CHECK CHECK CHECK");
 *                              UnityEngine.Debug.Log("indices: " + _localOutToOutFirstIndex + " " + _phenVars.neuronCount);
 *                              for (int h = 0; h < _phenVars.neuronCount; ++h)
 *                              {
 *                                      UnityEngine.Debug.Log("Index: Pre/post " + h + ": " + _preActivationArray[h] +
 *                                              " " + _postActivationArray[h]);
 *                              }*/

                // 4.4) Regulatory neurons. For pandemonium group 0 we update
                // them as usual. For other groups we find the regulatory neuron
                // with highest pre-activation activity and set the pos-activity
                // of that one to 1, the rest to 0.
                // First we go through group 0
                for (int k = 0; k < _phenVars.pandemoniumCounts[0]; ++k)
                {
                    // UnityEngine.Debug.Log("Regulatory in 0, index " + _pandemonium[0][k]);
                    // Updates the post-activation value
                    _postActivationArray[_pandemonium[0][k]] =
                        _regulatoryActivFn.Calculate(
                            _preActivationArray[_pandemonium[0][k]], null);
                    // Resets the pre-actiavtion value
                    _preActivationArray[_pandemonium[0][k]] = 0.0;
                }
                // Rest of groups
                for (int j = 1; j < _phenVars.numberOfPandem; ++j)
                {
                    // We need to identify the neuron with maximum pre-activation
                    // activity.
                    int maxIndex = -1;
                    // We require at least a small activation threshold
                    double maxPreActivity = 0.05;
                    for (int k = 0; k < _phenVars.pandemoniumCounts[j]; ++k)
                    {
                        // UnityEngine.Debug.Log("Regulatory in " + j + ", index " + _pandemonium[j][k]);
                        if (_preActivationArray[_pandemonium[j][k]] > maxPreActivity)
                        {
                            maxIndex       = _pandemonium[j][k];
                            maxPreActivity = _preActivationArray[maxIndex];
                        }
                        // We take the chance to reset the pre-activation.
                        // We also set the post-activation to 0 (then we
                        // update this for the chosen regulatory neuron.
                        _preActivationArray[_pandemonium[j][k]]  = 0.0;
                        _postActivationArray[_pandemonium[j][k]] = 0.0;
                    }
                    // If there is at least one regulatory neuron above threshold:
                    if (maxIndex != -1)
                    {
                        _postActivationArray[maxIndex] = 1.0;
                    }
                }

/*				UnityEngine.Debug.Log("CHECK after pandemonium MATRIX update CHECK CHECK CHECK");
 *                              for (int h = 0; h < _phenVars.neuronCount; ++h)
 *                              {
 *                                      UnityEngine.Debug.Log("Index: Pre/post " + h + ": " + _preActivationArray[h] +
 *                                              " " + _postActivationArray[h]);
 *                              }*/
            }

            // 5) Loops through local_output-to-output connections, which are
            // grouped by module (there are regulatoryCount of them).
            // Applies the module regulation.
            for (int j = 0; j < _phenVars.regulatoryCount; ++j)
            {
                // Gets the regulatory neuron post-activation value.
                double moduleActivity = _postActivationArray[_firstRegIndex + j];
                for (int k = 0; k < _localOutToOutModuleCount[j]; ++k)
                {
/*                    UnityEngine.Debug.Log("Local out to out. from " + _localOutToOutConnect[j][k]._srcNeuronIdx +
 *                                        " to " + _localOutToOutConnect[j][k]._tgtNeuronIdx);*/
                    _preActivationArray[_localOutToOutConnect[j][k]._tgtNeuronIdx] +=
                        _postActivationArray[_localOutToOutConnect[j][k]._srcNeuronIdx] *
                        _localOutToOutConnect[j][k]._weight * moduleActivity;
                }
            }

/*          UnityEngine.Debug.Log("CHECK after local out to out MATRIX final update CHECK CHECK CHECK");
 *                      for (int h = 0; h < _phenVars.neuronCount; ++h)
 *                      {
 *                              UnityEngine.Debug.Log("Index: Pre/post " + h + ": " + _preActivationArray[h] +
 *                                      " " + _postActivationArray[h]);
 *                      }*/

            // 6) Loops through output neurons (with their own activation
            // function). Gets the final post-activation values.
            for (int j = _phenVars.inputBiasCount; j < _firstRegIndex; ++j)
            {
                _postActivationArray[j] = _outputNeuronActivFn.Calculate(
                    _preActivationArray[j], null);
                _preActivationArray[j] = 0.0;
            }

/*			UnityEngine.Debug.Log("CHECK after out final update CHECK CHECK CHECK");
 *                      UnityEngine.Debug.Log("indices: " + _phenVars.inputBiasCount + " " + _firstRegIndex);
 *          for (int h = 0; h < _phenVars.neuronCount; ++h)
 *          {
 *              UnityEngine.Debug.Log("Index: Pre/post " + h + ": " + _preActivationArray[h] +
 *                  " " + _postActivationArray[h]);
 *          }*/
        }
コード例 #14
0
ファイル: Neuron.cs プロジェクト: cryingwhitecat/NNet
 public virtual double Calculate()
 {
     this.Output = _activationFunction.Calculate(GetWeightedInput());
     return(this.Output);
 }