Example #1
0
        /// <summary>
        /// 初始化神经网络
        /// </summary>
        /// <param name="inputLayerNodes">输入神经网络层的节点个数</param>
        /// <param name="outputLayerNodes">输出神经网络层的节点个数</param>
        /// <param name="hiddenLayerNodes">隐藏神经网络层的节点个数</param>
        /// <param name="hiddenLayers">隐藏神经网络层的数量</param>
        public void Init(int inputLayerNodes, int outputLayerNodes, int hiddenLayerNodes, int hiddenLayers)
        {
            // 清空
            _allLayer.Clear();
            _inputLayer  = null;
            _outputLayer = null;

            int outputLayerInputs = inputLayerNodes;

            // 输入层
            _inputLayer = AddLayer(inputLayerNodes, 1, ENeuralLayerType.Input);

            // 隐藏层
            if (hiddenLayers > 0)
            {
                outputLayerInputs = hiddenLayerNodes;

                // first hidden layer connect back to inputs
                AddLayer(hiddenLayerNodes, inputLayerNodes, ENeuralLayerType.Hidden);

                for (int i = 0; i < hiddenLayers - 1; i++)
                {
                    AddLayer(hiddenLayerNodes, hiddenLayerNodes, ENeuralLayerType.Hidden);
                }
            }

            // 输出层
            _outputLayer = AddLayer(outputLayerNodes, outputLayerInputs, ENeuralLayerType.Output);
        }
Example #2
0
        /// <summary>
        /// 添加一个神经网络层到神经网络
        /// </summary>
        private MoNeuralLayer AddLayer(int neurons, int inputs, ENeuralLayerType layerType)
        {
            MoNeuralLayer layer = new MoNeuralLayer(neurons, inputs, layerType);

            _allLayer.Add(layer);
            return(layer);
        }
Example #3
0
        /// <summary>
        /// 正向传播
        /// </summary>
        public void Propagate(ENeuralAct neuralACT, MoNeuralLayer nextLayer)
        {
            int numNeurons = nextLayer.Neurons.Count;

            for (int i = 0; i < numNeurons; ++i)
            {
                float value = 0.0f;

                int numWeights = Neurons.Count;
                for (int j = 0; j < numWeights; ++j)
                {
                    // sum the (weights * inputs), the inputs are the outputs of the prop layer
                    value += nextLayer.Neurons[i].Weights[j] * Neurons[j].Output;
                }

                // add in the bias (always has an input of -1)
                value += nextLayer.Neurons[i].Weights[numWeights] * -1.0f;

                // store the outputs, but run activation first
                switch (neuralACT)
                {
                case ENeuralAct.Step:
                    nextLayer.Neurons[i].Output = ActStep(value);
                    break;

                case ENeuralAct.Tanh:
                    nextLayer.Neurons[i].Output = ActTanh(value);
                    break;

                case ENeuralAct.Logistic:
                    nextLayer.Neurons[i].Output = ActLogistic(value);
                    break;

                case ENeuralAct.BipolarSigmoid:
                    nextLayer.Neurons[i].Output = ActBipolarSigmoid(value);
                    break;

                case ENeuralAct.Linear:
                    nextLayer.Neurons[i].Output = value;
                    break;

                default:
                    throw new Exception("Should never get here.");
                }
            }

            //if you wanted to run the Softmax activation function, you
            //would do it here, since it needs all the output values
            //if you pushed all the outputs into a vector, you could...
            //outputs = ActSoftmax(outputs);
            //and then put the outputs back into the correct slots
        }
Example #4
0
 /// <summary>
 /// 修正权重值
 /// </summary>
 /// <param name="inputLayer">输入神经网络层</param>
 /// <param name="learningRate">默认值为0.1f</param>
 /// <param name="momentum">默认值为0.9f</param>
 public void AdjustWeights(MoNeuralLayer inputLayer, float learningRate, float momentum)
 {
     for (int i = 0; i < Neurons.Count; i++)
     {
         int numWeights = Neurons[i].Weights.Count;
         for (int j = 0; j < numWeights; ++j)
         {
             // bias weight always uses -1 output value
             float output = (j == (numWeights - 1)) ? -1.0f : inputLayer.Neurons[j].Output;
             float error  = Neurons[i].Error;
             float delta  = momentum * Neurons[i].LastDelta[j] + (1.0f - momentum) * learningRate * error * output;
             Neurons[i].Weights[j]  += delta;
             Neurons[i].LastDelta[j] = delta;
         }
     }
 }
Example #5
0
        private void ActSoftmax(MoNeuralLayer outputs)
        {
            float total = 0.0f;

            for (int i = 0; i < Neurons.Count; ++i)
            {
#if UNITY_ENGINE
                total = UnityEngine.Mathf.Exp(outputs.Neurons[i].Output);
#else
                total = MathF.Exp(outputs.Neurons[i].Output);
#endif
            }
            for (int i = 0; i < Neurons.Count; ++i)
            {
#if UNITY_ENGINE
                outputs.Neurons[i].Output = UnityEngine.Mathf.Exp(outputs.Neurons[i].Output) / total;
#else
                outputs.Neurons[i].Output = MathF.Exp(outputs.Neurons[i].Output) / total;
#endif
            }
        }
Example #6
0
        /// <summary>
        /// 反向传播
        /// </summary>
        public void BackPropagate(ENeuralAct neuralACT, MoNeuralLayer nextLayer)
        {
            int numNeurons = nextLayer.Neurons.Count;

            for (int i = 0; i < numNeurons; ++i)
            {
                float outputVal = nextLayer.Neurons[i].Output;
                float error     = 0;
                for (int j = 0; j < Neurons.Count; ++j)
                {
                    error += Neurons[j].Weights[i] * Neurons[j].Error;
                }

                switch (neuralACT)
                {
                case ENeuralAct.Tanh:
                    nextLayer.Neurons[i].Error = DerTanh(outputVal) * error;
                    break;

                case ENeuralAct.Logistic:
                    nextLayer.Neurons[i].Error = DerLogistic(outputVal) * error;
                    break;

                case ENeuralAct.BipolarSigmoid:
                    nextLayer.Neurons[i].Error = DerBipolarSigmoid(outputVal) * error;
                    break;

                case ENeuralAct.Linear:
                    nextLayer.Neurons[i].Error = outputVal * error;
                    break;

                default:
                {
                    throw new NotImplementedException();
                }
                }
            }
        }