示例#1
0
        public static void CalculateAverageAndMaximumAccel(AcceleratorTarget acceleratorTarget, float[,] imageData, out float average, out float maximum)
        {
            var target = GetAcceleratorTarget(acceleratorTarget);

            int width  = imageData.GetLength(0);
            int height = imageData.GetLength(1);

            var fpInput = new FloatParallelArray(imageData);

            var fpAverage = ParallelArrays.Sum(fpInput);

            fpAverage = ParallelArrays.Divide(fpAverage, (float)(width * height));
            fpAverage = ParallelArrays.Pad(fpAverage, new int[] { 0 }, new int[] { 1 }, 0.0f);

            var fpMaximum = ParallelArrays.MaxVal(fpInput);

            fpMaximum = ParallelArrays.Pad(fpMaximum, new int[] { 1 }, new int[] { 0 }, 0.0f);

            var fpOutput = ParallelArrays.Add(fpAverage, fpMaximum);

            var output = target.ToArray1D(fpOutput);

            average = output[0];
            maximum = output[1];
        }
示例#2
0
        public override void DoEpoch(float t, float round_t)
        {
            float[,] test2d;
            float[] test;

            this.FindBMU();

            //Slice the pwinner row by row and do some great stuff
            m_PWinner = ParallelArrays.Evaluate(m_PWinner);

            Slice[] slices = new Slice[2];
            for (int i = 0; i < m_Parent.DataSource.PatternCount; ++i)
            {
                slices[1] = new Slice(0, m_Parent.NeuronMap.GetLength(0));
                slices[0] = new Slice(i, 1);

                FloatParallelArray s = ParallelArrays.Section(m_PWinner, slices);
                s = ParallelArrays.Evaluate(s);
                FloatParallelArray bmuw = ParallelArrays.DropDimension(ParallelArrays.InnerProduct(s, m_GPUWeight), 0);
                FloatParallelArray bmuc = ParallelArrays.InnerProduct(s, m_GPUCoord);

                //Compute distances to bmu
                DisposableFloatParallelArray bmucEvaluated = ParallelArrays.Evaluate(bmuc);     //Workaround
                bmuc = ParallelArrays.Stretch(bmucEvaluated, m_Parent.NeuronMap.GetLength(0), 1);
                FloatParallelArray diff = ParallelArrays.Subtract(m_GPUCoord, bmuc);
                FloatParallelArray dist = ParallelArrays.Multiply(diff, diff);
                dist = ParallelArrays.Sum(dist, 1);
                dist = ParallelArrays.Multiply(-1.0f, dist);

                //Apply update formula
                FloatParallelArray constE = new FloatParallelArray((float)(Math.E), m_Parent.NeuronMap.GetLength(0));
                FloatParallelArray sigma  = new FloatParallelArray((float)(Math.Pow(Neighborhood(t, round_t) * 0.85, 2)), m_Parent.NeuronMap.GetLength(0));
                FloatParallelArray lrate  = new FloatParallelArray((float)LearningRate(t, round_t), m_Parent.NeuronMap.GetLength(0), m_Parent.DataSource.PatternLength);
                FloatParallelArray omeg   = ParallelArrays.Divide(dist, sigma);

                //FloatParallelArray momeg = ParallelArrays.Pow(constE, omeg);
                FloatParallelArray           momeg = ParallelArrays.Pow2(ParallelArrays.Log2(constE) * omeg);
                DisposableFloatParallelArray domeg = ParallelArrays.Evaluate(momeg);         //Workaround
                omeg = ParallelArrays.AddDimension(domeg, 1);
                omeg = ParallelArrays.Stretch(omeg, 1, m_Parent.DataSource.PatternLength);
                FloatParallelArray sbmuw = ParallelArrays.AddDimension(bmuw, 0);
                sbmuw = ParallelArrays.Stretch(sbmuw, m_Parent.NeuronMap.GetLength(0), 1);

                m_GPUWeight = ((m_GPUWeight + ((sbmuw - m_GPUWeight) * omeg * lrate)));
            }
            m_GPUWeight = ParallelArrays.Evaluate(m_GPUWeight);
        }
示例#3
0
        public override void FindBMU()
        {
            //Normalize the weight vector
            FloatParallelArray transpose    = ParallelArrays.Transpose(m_GPUWeight, 1, 0);
            FloatParallelArray weightsq     = ParallelArrays.InnerProduct(m_GPUWeight, ParallelArrays.Transpose(m_GPUWeight, 1, 0));
            FloatParallelArray weightsum    = ParallelArrays.Sum(weightsq, 0);
            FloatParallelArray weightlength = ParallelArrays.Sqrt(weightsum);

            weightlength = ParallelArrays.Stretch(ParallelArrays.AddDimension(weightlength, 1), 1, m_Parent.DataSource.PatternLength);
            FloatParallelArray weightnorm = ParallelArrays.Divide(m_GPUWeight, weightlength);

            weightnorm = ParallelArrays.Transpose(weightnorm, 1, 0);

            //Normalize the input vector
            FloatParallelArray inputsq     = ParallelArrays.InnerProduct(m_GPUInput, ParallelArrays.Transpose(m_GPUInput, 1, 0));
            FloatParallelArray inputsum    = ParallelArrays.Sum(inputsq, 0);
            FloatParallelArray inputlength = ParallelArrays.Sqrt(inputsum);

            inputlength = ParallelArrays.Stretch(ParallelArrays.AddDimension(inputlength, 1), 1, m_Parent.DataSource.PatternLength);
            FloatParallelArray inputnorm = ParallelArrays.Divide(m_GPUInput, inputlength);

            FloatParallelArray pacc = ParallelArrays.InnerProduct(inputnorm, weightnorm);

            //Replication bug here...
            FloatParallelArray bmxval = ParallelArrays.MaxVal(pacc, 1);
            //MSR Vivian Swelson workaround
            DisposableFloatParallelArray bmxvalEvaluated = ParallelArrays.Evaluate(bmxval);

            bmxval = ParallelArrays.AddDimension(bmxvalEvaluated, 1);
            bmxval = ParallelArrays.Stretch(bmxval, 1, m_Parent.NeuronMap.GetLength(0));

            //Winner matrix (0 = winner)
            FloatParallelArray pwinner = ParallelArrays.Subtract(pacc, bmxval);

            //Convert to 1 = winner, 0 otherwise
            FloatParallelArray zero  = new FloatParallelArray(0.0f, pwinner.Shape);
            FloatParallelArray one   = new FloatParallelArray(1.0f, pwinner.Shape);
            BoolParallelArray  bmask = ParallelArrays.CompareEqual(pwinner, zero);

            m_PWinner = ParallelArrays.Cond(bmask, one, zero);
        }