예제 #1
0
    //propell the computation that actually emulates learning
    public void BackProp(Perceptron[] outputs)
    {
        int i;

        //traverse the output layer for computing values
        for (i = 0; i < outputPer.Length; i++)
        {
            Perceptron p     = outputPer[i];
            float      state = p.state;
            float      error = state * (1f - state);
            error *= outputs[i].state - state;
            p.AdjustWeights(error);
        }
        //traverse the internal perceptron layers
        for (i = 0; i < hiddenPer.Length; i++)
        {
            Perceptron p     = outputPer[i];
            float      state = p.state;
            float      sum   = 0f;
            for (i = 0; i < outputs.Length; i++)
            {
                float incomingW = outputs[i].GetIncomingWeight();
                sum += incomingW * outputs[i].error;
                float error = state * (1f - state) * sum;
                p.AdjustWeights(error);
            }
        }
    }
예제 #2
0
    public void BackProp(Perceptron[] outputs)
    {
        int i;

        for (i = 0; i < outputPer.Length; i++)
        {
            Perceptron p     = outputPer[i];
            float      state = p.state;
            float      error = state * (1f - state);
            error *= outputs[i].state - state;
            p.AdjustWeights(error);
        }

        for (i = 0; i < hiddenPer.Length; i++)
        {
            Perceptron p     = outputPer[i];
            float      state = p.state;
            float      sum   = 0f;
            for (i = 0; i < outputs.Length; i++)
            {
                float incomingW = outputs[i].GetIncomingWeight();
                sum += incomingW * outputs[i].error;
                float error = state * (1f - state) * sum;
                p.AdjustWeights(error);
            }
        }
    }