예제 #1
0
파일: Network.cs 프로젝트: ronin13/gneuron
        /*
         * Function which performs all the GPU operations
         */
        private void run()
        {
            /* Note : Inner product --- Matrix multiplication
             *        Multiply -- Element by element multiplication */

            FPA t1 = PA.Add(PA.InnerProduct(dinput, diwt), dtheta);

            /* ohidden is the output of hidden layer
             * Only Sigmoid function is used for timebeing */
            FPA ohidden = PA.Reciprocal(PA.Add(PA.Pow(new FPA(2.71828f, new int[] { numpat, nh }), PA.Negate(t1)), 1.0f));

            FPA t2 = PA.Add(PA.InnerProduct(ohidden, dowt), dtau);

            /* ooutput is the "actual" output of hidden layer
             * Only Sigmoid function is used for timebeing */
            FPA ooutput = PA.Reciprocal(PA.Add(PA.Pow(new FPA(2.71828f, new int[] { numpat, no }), PA.Negate(t2)), 1.0f));

            /* Error between expected and actual */
            FPA oerror = PA.Subtract(doutput, ooutput);

            /* Checking if error has fallen below 1% if so terminatinf further cycles */
            BoolParallelArray b = PA.All(PA.CompareGreater(derror, PA.Abs(oerror)), 1);

            b = PA.All(b);
            bool[] bt;
            PA.ToArray(b, out bt);
            if (bt[0] == true)
            {
                traincycles = 0;
            }

            /* herror is the error in the hidden layer */
            FPA herror = PA.Transpose(PA.InnerProduct(dowt, PA.Transpose(oerror, new int[] { 1, 0 })), new int[] { 1, 0 });

            herror = PA.Multiply(PA.Multiply(PA.Subtract(1.0f, ohidden), ohidden), herror);

            /* Weights between hidden  and output layer being updated */
            FPA _owt = PA.Add(PA.Multiply(PA.InnerProduct(PA.Transpose(ohidden, new int[] { 1, 0 }), oerror), betao), dowt);

            /* Weights between input  and hidden layer being updated */
            FPA _iwt = PA.Add(PA.Multiply(PA.InnerProduct(PA.Transpose(dinput, new int[] { 1, 0 }), herror), betah), diwt);

            /*Updating threshold for output layer */
            dtau = PA.Add(PA.Multiply(betao, oerror), dtau);

            /*Updating threshold for hidden layer */
            dtheta = PA.Add(PA.Multiply(betah, herror), dtheta);

            /* Casting the Parallel arrays to normal arrays */
            PA.ToArray(_owt, out owt);
            PA.ToArray(_iwt, out iwt);

            /* Rebuilding the disposable arrays from newly formed arrays */
            diwt = new DFPA(iwt);
            dowt = new DFPA(owt);
        }
예제 #2
0
        public override void FindBMU()
        {
            //Normalize the weight vector
            FloatParallelArray transpose    = ParallelArrays.Transpose(m_GPUWeight, 1, 0);
            FloatParallelArray weightsq     = ParallelArrays.InnerProduct(m_GPUWeight, ParallelArrays.Transpose(m_GPUWeight, 1, 0));
            FloatParallelArray weightsum    = ParallelArrays.Sum(weightsq, 0);
            FloatParallelArray weightlength = ParallelArrays.Sqrt(weightsum);

            weightlength = ParallelArrays.Stretch(ParallelArrays.AddDimension(weightlength, 1), 1, m_Parent.DataSource.PatternLength);
            FloatParallelArray weightnorm = ParallelArrays.Divide(m_GPUWeight, weightlength);

            weightnorm = ParallelArrays.Transpose(weightnorm, 1, 0);

            //Normalize the input vector
            FloatParallelArray inputsq     = ParallelArrays.InnerProduct(m_GPUInput, ParallelArrays.Transpose(m_GPUInput, 1, 0));
            FloatParallelArray inputsum    = ParallelArrays.Sum(inputsq, 0);
            FloatParallelArray inputlength = ParallelArrays.Sqrt(inputsum);

            inputlength = ParallelArrays.Stretch(ParallelArrays.AddDimension(inputlength, 1), 1, m_Parent.DataSource.PatternLength);
            FloatParallelArray inputnorm = ParallelArrays.Divide(m_GPUInput, inputlength);

            FloatParallelArray pacc = ParallelArrays.InnerProduct(inputnorm, weightnorm);

            //Replication bug here...
            FloatParallelArray bmxval = ParallelArrays.MaxVal(pacc, 1);
            //MSR Vivian Swelson workaround
            DisposableFloatParallelArray bmxvalEvaluated = ParallelArrays.Evaluate(bmxval);

            bmxval = ParallelArrays.AddDimension(bmxvalEvaluated, 1);
            bmxval = ParallelArrays.Stretch(bmxval, 1, m_Parent.NeuronMap.GetLength(0));

            //Winner matrix (0 = winner)
            FloatParallelArray pwinner = ParallelArrays.Subtract(pacc, bmxval);

            //Convert to 1 = winner, 0 otherwise
            FloatParallelArray zero  = new FloatParallelArray(0.0f, pwinner.Shape);
            FloatParallelArray one   = new FloatParallelArray(1.0f, pwinner.Shape);
            BoolParallelArray  bmask = ParallelArrays.CompareEqual(pwinner, zero);

            m_PWinner = ParallelArrays.Cond(bmask, one, zero);
        }