Пример #1
0
        //Constructor
        /// <summary>
        /// Instantiates the RPropTrainer
        /// </summary>
        /// <param name="net">
        /// The feed forward network to be trained
        /// </param>
        /// <param name="inputVectorCollection">
        /// Collection of the training input vectors
        /// </param>
        /// <param name="outputVectorCollection">
        /// Collection of the desired outputs
        /// </param>
        /// Trainer parameters
        /// <param name="settings">
        /// </param>
        public RPropTrainer(FeedForwardNetwork net,
                            List <double[]> inputVectorCollection,
                            List <double[]> outputVectorCollection,
                            RPropTrainerSettings settings = null
                            )
        {
            if (!net.Finalized)
            {
                throw new Exception("Can´t create trainer. Network structure was not finalized.");
            }
            _settings = settings;
            if (_settings == null)
            {
                //Default parameters
                _settings = new RPropTrainerSettings();
            }
            _net = net;
            _inputVectorCollection  = inputVectorCollection;
            _outputVectorCollection = outputVectorCollection;
            _weigthsGradsAcc        = new double[_net.NumOfWeights];
            _weigthsGradsAcc.Populate(0);
            _weigthsPrevGradsAcc = new double[_net.NumOfWeights];
            _weigthsPrevGradsAcc.Populate(0);
            _weigthsPrevDeltas = new double[_net.NumOfWeights];
            _weigthsPrevDeltas.Populate(_settings.IniDelta);
            _weigthsPrevChanges = new double[_net.NumOfWeights];
            _weigthsPrevChanges.Populate(0);
            _prevMSE = 0;
            _lastMSE = 0;
            _epoch   = 0;
            //Parallel gradient workers / batch ranges preparation
            _workerRangeCollection = new List <WorkerRange>();
            int numOfWorkers = Math.Min(Environment.ProcessorCount, _inputVectorCollection.Count);

            numOfWorkers = Math.Max(1, numOfWorkers);
            int workerBatchSize = _inputVectorCollection.Count / numOfWorkers;

            for (int workerIdx = 0, fromRow = 0; workerIdx < numOfWorkers; workerIdx++, fromRow += workerBatchSize)
            {
                WorkerRange workerRange = new WorkerRange();
                workerRange.FromRow = fromRow;
                if (workerIdx == numOfWorkers - 1)
                {
                    workerRange.ToRow = _inputVectorCollection.Count - 1;
                }
                else
                {
                    workerRange.ToRow = (fromRow + workerBatchSize) - 1;
                }
                _workerRangeCollection.Add(workerRange);
            }
            return;
        }
Пример #2
0
        //Constructor
        /// <summary>
        /// Constructs a parallel perceptron P-Delta rule trainer
        /// </summary>
        /// <param name="net">PP to be trained</param>
        /// <param name="inputVectorCollection">Predictors (input)</param>
        /// <param name="outputVectorCollection">Ideal outputs (the same number of rows as predictors rows)</param>
        /// <param name="settings">Optional startup parameters of the trainer</param>
        public PDeltaRuleTrainer(ParallelPerceptron net,
                                 List <double[]> inputVectorCollection,
                                 List <double[]> outputVectorCollection,
                                 PDeltaRuleTrainerSettings settings = null
                                 )
        {
            //Parameters
            _settings = settings;
            if (_settings == null)
            {
                //Default parameters
                _settings = new PDeltaRuleTrainerSettings();
            }
            _net = net;
            _inputVectorCollection  = inputVectorCollection;
            _outputVectorCollection = outputVectorCollection;
            _resSquashCoeff         = _net.ResSquashCoeff;
            _acceptableError        = 1d / (2d * _resSquashCoeff);
            _marginSignificance     = 1;
            _clearMargin            = 0.05;
            _minM         = _acceptableError * _resSquashCoeff;
            _maxM         = 4d * _minM;
            _learningRate = _settings.IniLR;
            _prevWeights  = _net.GetWeights();
            _prevMSE      = 0;
            _currMSE      = 0;
            _epoch        = 0;
            //Parallel workers / batch ranges preparation
            _workerRangeCollection = new List <WorkerRange>();
            int numOfWorkers = Math.Min(Environment.ProcessorCount, _inputVectorCollection.Count);

            numOfWorkers = Math.Max(1, numOfWorkers);
            int workerBatchSize = _inputVectorCollection.Count / numOfWorkers;

            for (int workerIdx = 0, fromRow = 0; workerIdx < numOfWorkers; workerIdx++, fromRow += workerBatchSize)
            {
                int toRow = 0;
                if (workerIdx == numOfWorkers - 1)
                {
                    toRow = _inputVectorCollection.Count - 1;
                }
                else
                {
                    toRow = (fromRow + workerBatchSize) - 1;
                }
                WorkerRange workerRange = new WorkerRange(fromRow, toRow, _net.NumOfWeights);
                _workerRangeCollection.Add(workerRange);
            }
            return;
        }
Пример #3
0
        //Constructor
        /// <summary>
        /// Creates an initialized instance.
        /// </summary>
        /// <param name="net">The PP to be trained.</param>
        /// <param name="inputVectorCollection">The input vectors (input).</param>
        /// <param name="outputVectorCollection">The output vectors (ideal).</param>
        /// <param name="cfg">The configuration of the trainer.</param>
        /// <param name="rand">The random object to be used.</param>
        public PDeltaRuleTrainer(ParallelPerceptron net,
                                 List <double[]> inputVectorCollection,
                                 List <double[]> outputVectorCollection,
                                 PDeltaRuleTrainerSettings cfg,
                                 Random rand
                                 )
        {
            //Parameters
            _cfg                    = (PDeltaRuleTrainerSettings)cfg.DeepClone();
            MaxAttempt              = _cfg.NumOfAttempts;
            MaxAttemptEpoch         = _cfg.NumOfAttemptEpochs;
            _net                    = net;
            _rand                   = rand;
            _inputVectorCollection  = inputVectorCollection;
            _outputVectorCollection = outputVectorCollection;
            _resSquashCoeff         = _net.ResSquashCoeff;
            _acceptableError        = 1d / (2d * _resSquashCoeff);
            _marginSignificance     = 1;
            _clearMargin            = 0.05;
            _minM                   = _acceptableError * _resSquashCoeff;
            _maxM                   = 4d * _minM;
            //Parallel workers / batch ranges preparation
            _workerRangeCollection = new List <WorkerRange>();
            int numOfWorkers = Math.Min(Environment.ProcessorCount, _inputVectorCollection.Count);

            numOfWorkers = Math.Max(1, numOfWorkers);
            int workerBatchSize = _inputVectorCollection.Count / numOfWorkers;

            for (int workerIdx = 0, fromRow = 0; workerIdx < numOfWorkers; workerIdx++, fromRow += workerBatchSize)
            {
                int toRow = 0;
                if (workerIdx == numOfWorkers - 1)
                {
                    toRow = _inputVectorCollection.Count - 1;
                }
                else
                {
                    toRow = (fromRow + workerBatchSize) - 1;
                }
                WorkerRange workerRange = new WorkerRange(fromRow, toRow, _net.NumOfWeights);
                _workerRangeCollection.Add(workerRange);
            }
            InfoMessage = string.Empty;
            //Start training attempt
            Attempt = 0;
            NextAttempt();
            return;
        }