private void UpdateLayer(int PWhichLayer, bool PForward, AutoencoderWeights PWeights)
        {
            int beginlayer = PWhichLayer - 1;

            if (PForward)
            {
                Utility.WithinBounds("Cannot update this layer!!!", PWhichLayer, 1, numlayers);
            }
            else
            {
                Utility.WithinBounds("Cannot update this layer!!!", PWhichLayer, 0, numlayers - 1);
                beginlayer = PWhichLayer + 1;
            }
            RBMLayer thislayer     = layers[PWhichLayer];
            RBMLayer previouslayer = layers[beginlayer];
            double   input         = 0;

            double[] states = previouslayer.GetStates();
            for (int i = 0; i < thislayer.Count; i++)
            {
                for (int j = 0; j < previouslayer.Count; j++)
                {
                    if (!PForward)
                    {
                        input += PWeights.GetWeightSet(beginlayer - 1).GetWeight(i, j) * states[j];
                    }
                    else
                    {
                        input += PWeights.GetWeightSet(beginlayer).GetWeight(j, i) * states[j];
                    }
                }
                thislayer.SetState(i, input);
                input = 0;
            }
        }
        private void PerformPreTraining(int PPreSynapticLayer)
        {
            RBMLearningRate sentlearnrate = new RBMLearningRate(learnrate.prelrweights[PPreSynapticLayer]
                                                                , learnrate.prelrbiases[PPreSynapticLayer]
                                                                , learnrate.premomweights[PPreSynapticLayer]
                                                                , learnrate.premombiases[PPreSynapticLayer]);

            RBMTrainer.Train(layers[PPreSynapticLayer], layers[PPreSynapticLayer + 1], trainingdata[PPreSynapticLayer]
                             , sentlearnrate, recognitionweights.GetWeightSet(PPreSynapticLayer));
        }
示例#3
0
        ////////////////////////////////////////////////////////////////////////////////////////////////////
        /// <summary>   Performs the pre training action. </summary>
        ///
        /// <param name="PPreSynapticLayer">    The pre synaptic layer. </param>
        ////////////////////////////////////////////////////////////////////////////////////////////////////

        private void PerformPreTraining(int PPreSynapticLayer)
        {
            RestrictedBoltzmannMachineLearningRate sentlearnrate = new RestrictedBoltzmannMachineLearningRate(
                learnrate.preLearningRateWeights[PPreSynapticLayer],
                learnrate.preLearningRateBiases[PPreSynapticLayer],
                learnrate.preMomentumWeights[PPreSynapticLayer],
                learnrate.preMomentumBiases[PPreSynapticLayer]);

            RestrictedBoltzmannMachineTrainer.Train(layers[PPreSynapticLayer], layers[PPreSynapticLayer + 1],
                                                    trainingdata[PPreSynapticLayer], sentlearnrate, recognitionweights.GetWeightSet(PPreSynapticLayer));
        }
 private void WakePhase()
 {
     for (int i = 0; i < numlayers - 1; i++)
     {
         double[] visstates    = layers[i].GetStates();
         double[] visact       = layers[i].GetActivities();
         double[] hidstates    = layers[i + 1].GetStates();
         double[] hidact       = layers[i + 1].GetActivities();
         double   curlearnrate = learnrate.finelrweights[i];
         for (int j = 0; j < layers[i].Count; j++)
         {
             for (int k = 0; k < layers[i + 1].Count; k++)
             {
                 generativeweights.GetWeightSet(i).ModifyWeight(j, k, curlearnrate *
                                                                CalculateFineTuneTrain(hidstates[k], visstates[j], visact[j]));
             }
         }
     }
 }