/// <summary>
        /// Строит модель искусственного нейрона с заданными параметрами.
        /// </summary>
        /// <param name="synapticConnectionAmount">Количество синаптических связей (размерность вектора входных данных)ю</param>
        /// <param name="threshold">Порог, увеличивающий выходное значение функции активации.</param>
        /// <param name="learningSpeed">Коэффициент скорости обучения. Число в полуинтервале (0; double.MaxValue]. Рекомендуемое значение: 1.</param>
        /// <returns>Модель искусственного нейрона с сигмоидальной функцией активацией (гиперболический тангенс)</returns>
        public INeuron Build(int synapticConnectionAmount, double threshold, double learningSpeed)
        {
            var neuron = new SigmoidNeuron(synapticConnectionAmount, threshold, learningSpeed);

            var dispersion = 1.0 / Math.Sqrt(synapticConnectionAmount);

            for (var i = 0; i < neuron.SynapticWeights.Count; i++)
            {
                neuron.SynapticWeights[i] = MathHelper.GetRandomValue(0, dispersion);
            }

            return(neuron);
        }
예제 #2
0
        public void TestSigmoidalFunction()
        {
            NeuronInput inputA = new NeuronInput(0.67, 1.5);
            NeuronInput inputB = new NeuronInput(0.5, 1.0);
            NeuronInput inputC = new NeuronInput(0.8, 0.8);

            NeuronInput[] inputs = new NeuronInput[3];

            inputs[0] = inputA;
            inputs[1] = inputB;
            inputs[2] = inputC;

            SigmoidNeuron neuron = new SigmoidNeuron(inputs, 3.0);

            double output   = neuron.SigmoidalFunction();
            double expected = 0.99420529989699;
            double delta    = expected - output;

            Assert.AreEqual(output, expected, delta);
        }
예제 #3
0
        public void Backprop()
        {
            FeedForward();

            // go through the output layer
            for (int i = 0; i < LayerNeuronCounts[NumLayers - 1]; ++i)
            {
                // for each neuron in the output layer calculate the difference between the output and the expected output
                SigmoidNeuron tempNeuron = NetLayers[NumLayers - 1].Nodes[i];
                tempNeuron.Delta      = CostDerivertive(Outputs[i], ExpectedOutputs[i]) * SigmoidPrime(tempNeuron.z);
                tempNeuron.NablaBias += tempNeuron.Delta;

                // then go through each neuron connected to this neuron.
                for (int j = 0; j < tempNeuron.Inputs.Length; ++j)
                {
                    // set the change required in the weight.
                    tempNeuron.NablaWeights[j] += tempNeuron.Delta * tempNeuron.Inputs[j].Activation;
                    // and set it's required change property
                    tempNeuron.Inputs[j].Delta = (tempNeuron.Weights[j] * tempNeuron.Delta) * SigmoidPrime(tempNeuron.Inputs[j].z);
                }
            }

            // go through the remaning layers and nodes in those layers
            for (int i = NumLayers - 2; i > 0; --i)
            {
                for (int j = 0; j < LayerNeuronCounts[i]; ++j)
                {
                    NetLayers[i].Nodes[j].NablaBias += NetLayers[i].Nodes[j].Delta;
                    // for each node connected to this node set the required change in weight and it's required change property.
                    for (int k = 0; k < NetLayers[i].Nodes[j].NablaWeights.Length; ++k)
                    {
                        NetLayers[i].Nodes[j].NablaWeights[k] += NetLayers[i].Nodes[j].Delta * NetLayers[i].Nodes[j].Inputs[k].Activation;
                        NetLayers[i].Nodes[j].Inputs[k].Delta  = (NetLayers[i].Nodes[j].Weights[k] * NetLayers[i].Nodes[j].Delta) * SigmoidPrime(NetLayers[i].Nodes[j].Inputs[k].z);
                    }
                }
            }
        }
예제 #4
0
        public void FeedForward()
        {
            for (int i = 0; i < LayerNeuronCounts[0]; ++i)
            {
                NetLayers[0].Nodes[i].Activation = Inputs[i];
            }

            // Go through each layer that isn't the input layer.
            for (int i = 1; i < NumLayers; ++i)
            {
                // Go through each node in this layer and calculate it's activation value.
                for (int j = 0; j < LayerNeuronCounts[i]; ++j)
                {
                    SigmoidNeuron tempNeuron = NetLayers[i].Nodes[j];
                    tempNeuron.Activation = Sigmoid(tempNeuron.GetZ());
                }
            }

            // Update the outputs from the output layer.
            for (int i = 0; i < LayerNeuronCounts[NumLayers - 1]; ++i)
            {
                Outputs[i] = NetLayers[NumLayers - 1].Nodes[i].Activation;
            }
        }