Exemple #1
0
        /// <summary>
        /// Creates a deep copy
        /// </summary>
        public INonRecurrentNetwork DeepClone()
        {
            ParallelPerceptron clone = new ParallelPerceptron(NumOfInputValues, NumOfGates, Resolution);

            clone.SetWeights(_flatWeights);
            return(clone);
        }
Exemple #2
0
        //Constructor
        /// <summary>
        /// Constructs a parallel perceptron P-Delta rule trainer
        /// </summary>
        /// <param name="net">PP to be trained</param>
        /// <param name="inputVectorCollection">Predictors (input)</param>
        /// <param name="outputVectorCollection">Ideal outputs (the same number of rows as predictors rows)</param>
        /// <param name="settings">Optional startup parameters of the trainer</param>
        public PDeltaRuleTrainer(ParallelPerceptron net,
                                 List <double[]> inputVectorCollection,
                                 List <double[]> outputVectorCollection,
                                 PDeltaRuleTrainerSettings settings = null
                                 )
        {
            //Parameters
            _settings = settings;
            if (_settings == null)
            {
                //Default parameters
                _settings = new PDeltaRuleTrainerSettings();
            }
            _net = net;
            _inputVectorCollection  = inputVectorCollection;
            _outputVectorCollection = outputVectorCollection;
            _resSquashCoeff         = _net.ResSquashCoeff;
            _acceptableError        = 1d / (2d * _resSquashCoeff);
            _marginSignificance     = 1;
            _clearMargin            = 0.05;
            _minM         = _acceptableError * _resSquashCoeff;
            _maxM         = 4d * _minM;
            _learningRate = _settings.IniLR;
            _prevWeights  = _net.GetWeights();
            _prevMSE      = 0;
            _currMSE      = 0;
            _epoch        = 0;
            //Parallel workers / batch ranges preparation
            _workerRangeCollection = new List <WorkerRange>();
            int numOfWorkers = Math.Min(Environment.ProcessorCount, _inputVectorCollection.Count);

            numOfWorkers = Math.Max(1, numOfWorkers);
            int workerBatchSize = _inputVectorCollection.Count / numOfWorkers;

            for (int workerIdx = 0, fromRow = 0; workerIdx < numOfWorkers; workerIdx++, fromRow += workerBatchSize)
            {
                int toRow = 0;
                if (workerIdx == numOfWorkers - 1)
                {
                    toRow = _inputVectorCollection.Count - 1;
                }
                else
                {
                    toRow = (fromRow + workerBatchSize) - 1;
                }
                WorkerRange workerRange = new WorkerRange(fromRow, toRow, _net.NumOfWeights);
                _workerRangeCollection.Add(workerRange);
            }
            return;
        }
Exemple #3
0
        //Constructor
        /// <summary>
        /// Constructs a parallel perceptron P-Delta rule trainer
        /// </summary>
        /// <param name="net">PP to be trained</param>
        /// <param name="inputVectorCollection">Predictors (input)</param>
        /// <param name="outputVectorCollection">Ideal outputs (the same number of rows as predictors rows)</param>
        /// <param name="settings">Configuration of the trainer</param>
        /// <param name="rand">Random object to be used</param>
        public PDeltaRuleTrainer(ParallelPerceptron net,
                                 List <double[]> inputVectorCollection,
                                 List <double[]> outputVectorCollection,
                                 PDeltaRuleTrainerSettings settings,
                                 Random rand
                                 )
        {
            //Parameters
            _settings               = (PDeltaRuleTrainerSettings)settings.DeepClone();
            MaxAttempt              = _settings.NumOfAttempts;
            MaxAttemptEpoch         = _settings.NumOfAttemptEpochs;
            _net                    = net;
            _rand                   = rand;
            _inputVectorCollection  = inputVectorCollection;
            _outputVectorCollection = outputVectorCollection;
            _resSquashCoeff         = _net.ResSquashCoeff;
            _acceptableError        = 1d / (2d * _resSquashCoeff);
            _marginSignificance     = 1;
            _clearMargin            = 0.05;
            _minM                   = _acceptableError * _resSquashCoeff;
            _maxM                   = 4d * _minM;
            //Parallel workers / batch ranges preparation
            _workerRangeCollection = new List <WorkerRange>();
            int numOfWorkers = Math.Min(Environment.ProcessorCount, _inputVectorCollection.Count);

            numOfWorkers = Math.Max(1, numOfWorkers);
            int workerBatchSize = _inputVectorCollection.Count / numOfWorkers;

            for (int workerIdx = 0, fromRow = 0; workerIdx < numOfWorkers; workerIdx++, fromRow += workerBatchSize)
            {
                int toRow = 0;
                if (workerIdx == numOfWorkers - 1)
                {
                    toRow = _inputVectorCollection.Count - 1;
                }
                else
                {
                    toRow = (fromRow + workerBatchSize) - 1;
                }
                WorkerRange workerRange = new WorkerRange(fromRow, toRow, _net.NumOfWeights);
                _workerRangeCollection.Add(workerRange);
            }
            InfoMessage = string.Empty;
            //Start training attempt
            Attempt = 0;
            NextAttempt();
            return;
        }