コード例 #1
0
        //Methods
        /// <inheritdoc/>
        protected override void Check()
        {
            if (!FeedForwardNetwork.IsAllowedOutputAF(OutputActivationCfg))
            {
                throw new ArgumentException($"Specified output activation function can't be used in FF network's output activation.", "OutputActivationCfg");
            }
            if (TrainerCfg == null)
            {
                throw new ArgumentNullException("TrainerCfg", "TrainerCfg can not be null.");
            }
            Type trainerType = TrainerCfg.GetType();

            if (trainerType != typeof(QRDRegrTrainerSettings) &&
                trainerType != typeof(RidgeRegrTrainerSettings) &&
                trainerType != typeof(ElasticRegrTrainerSettings) &&
                trainerType != typeof(RPropTrainerSettings)
                )
            {
                throw new ArgumentException($"Unsupported TrainerCfg {trainerType.Name}.", "TrainerCfg");
            }
            if ((HiddenLayersCfg.HiddenLayerCfgCollection.Count > 0 || OutputActivationCfg.GetType() != typeof(AFAnalogIdentitySettings)) &&
                trainerType != typeof(RPropTrainerSettings)
                )
            {
                throw new ArgumentException($"Improper type of trainer {trainerType.Name}. For FF having other than Identity output activation or containing hidden layers can be used only Resilient back propagation trainer.", "TrainerCfg");
            }
            return;
        }
コード例 #2
0
 //Methods
 /// <inheritdoc/>
 protected override void Check()
 {
     if (NumOfNeurons < 1)
     {
         throw new ArgumentException($"Invalid NumOfNeurons {NumOfNeurons.ToString(CultureInfo.InvariantCulture)}. NumOfNeurons must be GT 0.", "NumOfNeurons");
     }
     if (!FeedForwardNetwork.IsAllowedHiddenAF(ActivationCfg))
     {
         throw new ArgumentException($"Specified activation function can't be used in the hidden layer of a FF network.", "ActivationCfg");
     }
     return;
 }
コード例 #3
0
ファイル: FeedForwardNetwork.cs プロジェクト: okozelsk/NET
        /// <inheritdoc/>
        public INonRecurrentNetwork DeepClone()
        {
            FeedForwardNetwork clone = new FeedForwardNetwork(NumOfInputValues, NumOfOutputValues)
            {
                NumOfNeurons = NumOfNeurons
            };

            foreach (Layer layer in LayerCollection)
            {
                clone.LayerCollection.Add(layer.DeepClone());
            }
            clone._flatWeights = (double[])_flatWeights.Clone();
            clone._isAllowedNguyenWidrowRandomization = _isAllowedNguyenWidrowRandomization;
            return(clone);
        }
コード例 #4
0
ファイル: QRDRegrTrainer.cs プロジェクト: thild/NET
 //Constructor
 /// <summary>
 /// Constructs an initialized instance
 /// </summary>
 /// <param name="net">FF network to be trained</param>
 /// <param name="inputVectorCollection">Predictors (input)</param>
 /// <param name="outputVectorCollection">Ideal outputs (the same number of rows as number of inputs)</param>
 /// <param name="rand">Random object to be used for adding a white-noise to predictors</param>
 /// <param name="settings">Startup parameters of the trainer</param>
 public QRDRegrTrainer(FeedForwardNetwork net,
                       List <double[]> inputVectorCollection,
                       List <double[]> outputVectorCollection,
                       QRDRegrTrainerSettings settings,
                       Random rand
                       )
 {
     //Check network readyness
     if (!net.Finalized)
     {
         throw new InvalidOperationException($"Can´t create trainer. Network structure was not finalized.");
     }
     //Check network conditions
     if (net.LayerCollection.Count != 1 || !(net.LayerCollection[0].Activation is Identity))
     {
         throw new InvalidOperationException($"Can´t create trainer. Network structure is not complient (single layer having Identity activation).");
     }
     //Check samples conditions
     if (inputVectorCollection.Count < inputVectorCollection[0].Length + 1)
     {
         throw new InvalidOperationException($"Can´t create trainer. Insufficient number of training samples {inputVectorCollection.Count}. Minimum is {(inputVectorCollection[0].Length + 1)}.");
     }
     //Parameters
     _settings                        = settings;
     MaxAttempt                       = _settings.NumOfAttempts;
     MaxAttemptEpoch                  = _settings.NumOfAttemptEpochs;
     _net                             = net;
     _rand                            = rand;
     _inputVectorCollection           = inputVectorCollection;
     _outputVectorCollection          = outputVectorCollection;
     _outputSingleColMatrixCollection = new List <Matrix>(_net.NumOfOutputValues);
     for (int outputIdx = 0; outputIdx < _net.NumOfOutputValues; outputIdx++)
     {
         Matrix outputSingleColMatrix = new Matrix(_outputVectorCollection.Count, 1);
         for (int row = 0; row < _outputVectorCollection.Count; row++)
         {
             //Output
             outputSingleColMatrix.Data[row][0] = _outputVectorCollection[row][outputIdx];
         }
         _outputSingleColMatrixCollection.Add(outputSingleColMatrix);
     }
     //Start training attempt
     Attempt = 0;
     NextAttempt();
     return;
 }
コード例 #5
0
        //Constructor
        /// <summary>
        /// Instantiates the RPropTrainer
        /// </summary>
        /// <param name="net">The FF network to be trained.</param>
        /// <param name="inputVectorCollection">The input vectors (input).</param>
        /// <param name="outputVectorCollection">The output vectors (ideal).</param>
        /// <param name="cfg">The configuration of the trainer.</param>
        /// <param name="rand">The random object to be used.</param>
        public RPropTrainer(FeedForwardNetwork net,
                            List <double[]> inputVectorCollection,
                            List <double[]> outputVectorCollection,
                            RPropTrainerSettings cfg,
                            Random rand
                            )
        {
            if (!net.Finalized)
            {
                throw new InvalidOperationException($"Can´t create trainer. Network structure was not finalized.");
            }
            _cfg                    = cfg;
            MaxAttempt              = _cfg.NumOfAttempts;
            MaxAttemptEpoch         = _cfg.NumOfAttemptEpochs;
            _net                    = net;
            _rand                   = rand;
            _inputVectorCollection  = inputVectorCollection;
            _outputVectorCollection = outputVectorCollection;
            _weigthsGradsAcc        = new double[_net.NumOfWeights];
            _weigthsPrevGradsAcc    = new double[_net.NumOfWeights];
            _weigthsPrevDeltas      = new double[_net.NumOfWeights];
            _weigthsPrevChanges     = new double[_net.NumOfWeights];
            //Parallel gradient workers (batch ranges) preparation
            int numOfWorkers = Math.Max(1, Math.Min(Environment.ProcessorCount - 1, _inputVectorCollection.Count));

            _gradientWorkerDataCollection = new GradientWorkerData[numOfWorkers];
            int workerBatchSize = _inputVectorCollection.Count / numOfWorkers;

            for (int workerIdx = 0, fromRow = 0; workerIdx < numOfWorkers; workerIdx++, fromRow += workerBatchSize)
            {
                GradientWorkerData gwd = new GradientWorkerData
                                         (
                    fromRow: fromRow,
                    toRow: (workerIdx == numOfWorkers - 1 ? _inputVectorCollection.Count - 1 : (fromRow + workerBatchSize) - 1),
                    numOfWeights: _net.NumOfWeights
                                         );
                _gradientWorkerDataCollection[workerIdx] = gwd;
            }
            InfoMessage = string.Empty;
            //Start training attempt
            Attempt = 0;
            NextAttempt();
            return;
        }
コード例 #6
0
ファイル: ElasticRegrTrainer.cs プロジェクト: okozelsk/NET
        //Constructor
        /// <summary>
        /// Creates an initialized instance.
        /// </summary>
        /// <param name="net">The FF network to be trained.</param>
        /// <param name="inputVectorCollection">The input vectors (input).</param>
        /// <param name="outputVectorCollection">The output vectors (ideal).</param>
        /// <param name="cfg">The configuration of the trainer.</param>
        public ElasticRegrTrainer(FeedForwardNetwork net,
                                  List <double[]> inputVectorCollection,
                                  List <double[]> outputVectorCollection,
                                  ElasticRegrTrainerSettings cfg
                                  )
        {
            //Check network readyness
            if (!net.Finalized)
            {
                throw new InvalidOperationException($"Can´t create trainer. Network structure was not finalized.");
            }
            //Check network conditions
            if (net.LayerCollection.Count != 1 || !(net.LayerCollection[0].Activation is AFAnalogIdentity))
            {
                throw new InvalidOperationException($"Can´t create trainer. Network structure is not complient (single layer having Identity activation).");
            }
            //Check samples conditions
            if (inputVectorCollection.Count == 0)
            {
                throw new InvalidOperationException($"Can´t create trainer. Missing training samples.");
            }
            //Collections
            _inputVectorCollection  = new List <double[]>(inputVectorCollection);
            _outputVectorCollection = new List <double[]>(outputVectorCollection);
            var rangePartitioner = Partitioner.Create(0, _inputVectorCollection.Count);

            _parallelRanges = new List <Tuple <int, int> >(rangePartitioner.GetDynamicPartitions());
            //Parameters
            _cfg            = cfg;
            MaxAttempt      = _cfg.NumOfAttempts;
            MaxAttemptEpoch = _cfg.NumOfAttemptEpochs;
            Attempt         = 1;
            AttemptEpoch    = 0;
            _net            = net;
            _gamma          = _cfg.Lambda * _cfg.Alpha;
            return;
        }
コード例 #7
0
ファイル: RidgeRegrTrainer.cs プロジェクト: thild/NET
        //Constructor
        /// <summary>
        /// Constructs an initialized instance
        /// </summary>
        /// <param name="net">FF network to be trained</param>
        /// <param name="inputVectorCollection">Predictors (input)</param>
        /// <param name="outputVectorCollection">Ideal outputs (the same number of rows as number of inputs)</param>
        /// <param name="settings">Optional startup parameters of the trainer</param>
        public RidgeRegrTrainer(FeedForwardNetwork net,
                                List <double[]> inputVectorCollection,
                                List <double[]> outputVectorCollection,
                                RidgeRegrTrainerSettings settings
                                )
        {
            //Check network readyness
            if (!net.Finalized)
            {
                throw new InvalidOperationException($"Can´t create trainer. Network structure was not finalized.");
            }
            //Check network conditions
            if (net.LayerCollection.Count != 1 || !(net.LayerCollection[0].Activation is Identity))
            {
                throw new InvalidOperationException($"Can´t create trainer. Network structure is not complient (single layer having Identity activation).");
            }
            //Check samples conditions
            if (inputVectorCollection.Count == 0)
            {
                throw new InvalidOperationException($"Can´t create trainer. Missing training samples.");
            }
            //Collections
            _inputVectorCollection  = new List <double[]>(inputVectorCollection);
            _outputVectorCollection = new List <double[]>(outputVectorCollection);
            //Parameters
            _settings       = settings;
            MaxAttempt      = _settings.NumOfAttempts;
            MaxAttemptEpoch = _settings.NumOfAttemptEpochs;
            Attempt         = 1;
            AttemptEpoch    = 0;
            _net            = net;
            _outputSingleColVectorCollection = new List <Vector>(_net.NumOfOutputValues);
            for (int outputIdx = 0; outputIdx < _net.NumOfOutputValues; outputIdx++)
            {
                Vector outputSingleColVector = new Vector(outputVectorCollection.Count);
                for (int row = 0; row < outputVectorCollection.Count; row++)
                {
                    //Output
                    outputSingleColVector.Data[row] = outputVectorCollection[row][outputIdx];
                }
                _outputSingleColVectorCollection.Add(outputSingleColVector);
            }
            //Lambda seeker
            _lambdaSeeker = new ParamSeeker(_settings.LambdaSeekerCfg);
            _currLambda   = 0;
            //Matrix setup
            Matrix X = new Matrix(inputVectorCollection.Count, _net.NumOfInputValues + 1);

            for (int row = 0; row < inputVectorCollection.Count; row++)
            {
                //Add constant bias
                X.Data[row][0] = 1d;
                //Add predictors
                inputVectorCollection[row].CopyTo(X.Data[row], 1);
            }
            _XT     = X.Transpose();
            _XTdotX = _XT * X;
            _XTdotY = new Vector[_net.NumOfOutputValues];
            for (int outputIdx = 0; outputIdx < _net.NumOfOutputValues; outputIdx++)
            {
                _XTdotY[outputIdx] = _XT * _outputSingleColVectorCollection[outputIdx];
            }
            return;
        }