예제 #1
0
 /// <summary>
 /// The deep copy constructor
 /// </summary>
 /// <param name="source">Source instance</param>
 public FeedForwardNetworkSettings(FeedForwardNetworkSettings source)
 {
     OutputLayerActivation = null;
     OutputRange           = null;
     if (source.OutputLayerActivation != null)
     {
         OutputLayerActivation = ActivationFactory.DeepCloneActivationSettings(source.OutputLayerActivation);
         OutputRange           = source.OutputRange.DeepClone();
     }
     RegressionMethod      = source.RegressionMethod;
     HiddenLayerCollection = new List <HiddenLayerSettings>(source.HiddenLayerCollection.Count);
     foreach (HiddenLayerSettings shls in source.HiddenLayerCollection)
     {
         HiddenLayerCollection.Add(shls.DeepClone());
     }
     LinRegrTrainerCfg = null;
     RPropTrainerCfg   = null;
     if (source.LinRegrTrainerCfg != null)
     {
         LinRegrTrainerCfg = source.LinRegrTrainerCfg.DeepClone();
     }
     if (source.RPropTrainerCfg != null)
     {
         RPropTrainerCfg = source.RPropTrainerCfg.DeepClone();
     }
     return;
 }
예제 #2
0
        /// <summary>
        /// Creates the instance and initialize it from given xml element.
        /// This is the preferred way to instantiate reservoir settings.
        /// </summary>
        /// <param name="elem">
        /// Xml data containing feed forward network settings.
        /// Content of xml element is always validated against the xml schema.
        /// </param>
        public FeedForwardNetworkSettings(XElement elem)
        {
            //Validation
            ElemValidator validator     = new ElemValidator();
            Assembly      assemblyRCNet = Assembly.GetExecutingAssembly();

            validator.AddXsdFromResources(assemblyRCNet, "RCNet.Neural.Network.FF.FeedForwardNetworkSettings.xsd");
            validator.AddXsdFromResources(assemblyRCNet, "RCNet.RCNetTypes.xsd");
            XElement feedForwardNetworkSettingsElem = validator.Validate(elem, "rootElem");

            //Parsing
            OutputLayerActivation = ActivationFactory.LoadSettings(feedForwardNetworkSettingsElem.Descendants().First());
            if (!IsAllowedActivation(OutputLayerActivation, out Interval outputRange))
            {
                throw new ApplicationException($"Activation can't be used in FF network. Activation function has to be stateless and has to support derivative calculation.");
            }
            OutputRange      = outputRange;
            RegressionMethod = ParseTrainingMethodType(feedForwardNetworkSettingsElem.Attribute("regressionMethod").Value);
            //Hidden layers
            HiddenLayerCollection = new List <HiddenLayerSettings>();
            XElement hiddenLayersElem = feedForwardNetworkSettingsElem.Descendants("hiddenLayers").FirstOrDefault();

            if (hiddenLayersElem != null)
            {
                foreach (XElement layerElem in hiddenLayersElem.Descendants("layer"))
                {
                    HiddenLayerCollection.Add(new HiddenLayerSettings(layerElem));
                }
            }
            //Trainers
            LinRegrTrainerCfg = null;
            RPropTrainerCfg   = null;
            switch (RegressionMethod)
            {
            case TrainingMethodType.Linear:
                XElement linRegrTrainerElem = feedForwardNetworkSettingsElem.Descendants("linRegrTrainer").FirstOrDefault();
                if (linRegrTrainerElem != null)
                {
                    LinRegrTrainerCfg = new LinRegrTrainerSettings(linRegrTrainerElem);
                }
                else
                {
                    LinRegrTrainerCfg = new LinRegrTrainerSettings();
                }
                break;

            case TrainingMethodType.Resilient:
                XElement resPropTrainerElem = feedForwardNetworkSettingsElem.Descendants("resPropTrainer").FirstOrDefault();
                if (resPropTrainerElem != null)
                {
                    RPropTrainerCfg = new RPropTrainerSettings(resPropTrainerElem);
                }
                else
                {
                    RPropTrainerCfg = new RPropTrainerSettings();
                }
                break;
            }
            return;
        }
예제 #3
0
 //Constructors
 /// <summary>
 /// Creates an uninitialized instance
 /// </summary>
 public FeedForwardNetworkSettings()
 {
     OutputLayerActivation = null;
     RegressionMethod      = TrainingMethodType.Resilient;
     HiddenLayerCollection = new List <HiddenLayerSettings>();
     LinRegrTrainerCfg     = null;
     RPropTrainerCfg       = new RPropTrainerSettings();
     return;
 }
예제 #4
0
 /// <summary>
 /// Deep copy constructor
 /// </summary>
 /// <param name="source">Source instance</param>
 public RPropTrainerSettings(RPropTrainerSettings source)
 {
     ZeroTolerance = source.ZeroTolerance;
     PositiveEta   = source.PositiveEta;
     NegativeEta   = source.NegativeEta;
     IniDelta      = source.IniDelta;
     MinDelta      = source.MinDelta;
     MaxDelta      = source.MaxDelta;
     return;
 }
예제 #5
0
        //Constructor
        /// <summary>
        /// Instantiates the RPropTrainer
        /// </summary>
        /// <param name="net">
        /// The feed forward network to be trained
        /// </param>
        /// <param name="inputVectorCollection">
        /// Collection of the training input vectors
        /// </param>
        /// <param name="outputVectorCollection">
        /// Collection of the desired outputs
        /// </param>
        /// Trainer parameters
        /// <param name="settings">
        /// </param>
        public RPropTrainer(FeedForwardNetwork net,
                            List <double[]> inputVectorCollection,
                            List <double[]> outputVectorCollection,
                            RPropTrainerSettings settings = null
                            )
        {
            if (!net.Finalized)
            {
                throw new Exception("Can´t create trainer. Network structure was not finalized.");
            }
            _settings = settings;
            if (_settings == null)
            {
                //Default parameters
                _settings = new RPropTrainerSettings();
            }
            _net = net;
            _inputVectorCollection  = inputVectorCollection;
            _outputVectorCollection = outputVectorCollection;
            _weigthsGradsAcc        = new double[_net.NumOfWeights];
            _weigthsGradsAcc.Populate(0);
            _weigthsPrevGradsAcc = new double[_net.NumOfWeights];
            _weigthsPrevGradsAcc.Populate(0);
            _weigthsPrevDeltas = new double[_net.NumOfWeights];
            _weigthsPrevDeltas.Populate(_settings.IniDelta);
            _weigthsPrevChanges = new double[_net.NumOfWeights];
            _weigthsPrevChanges.Populate(0);
            _prevMSE = 0;
            _lastMSE = 0;
            _epoch   = 0;
            //Parallel gradient workers / batch ranges preparation
            _workerRangeCollection = new List <WorkerRange>();
            int numOfWorkers = Math.Min(Environment.ProcessorCount, _inputVectorCollection.Count);

            numOfWorkers = Math.Max(1, numOfWorkers);
            int workerBatchSize = _inputVectorCollection.Count / numOfWorkers;

            for (int workerIdx = 0, fromRow = 0; workerIdx < numOfWorkers; workerIdx++, fromRow += workerBatchSize)
            {
                WorkerRange workerRange = new WorkerRange();
                workerRange.FromRow = fromRow;
                if (workerIdx == numOfWorkers - 1)
                {
                    workerRange.ToRow = _inputVectorCollection.Count - 1;
                }
                else
                {
                    workerRange.ToRow = (fromRow + workerBatchSize) - 1;
                }
                _workerRangeCollection.Add(workerRange);
            }
            return;
        }
예제 #6
0
 /// <summary>
 /// Deep copy constructor
 /// </summary>
 /// <param name="source">Source instance</param>
 public RPropTrainerSettings(RPropTrainerSettings source)
 {
     NumOfAttempts      = source.NumOfAttempts;
     NumOfAttemptEpochs = source.NumOfAttemptEpochs;
     ZeroTolerance      = source.ZeroTolerance;
     PositiveEta        = source.PositiveEta;
     NegativeEta        = source.NegativeEta;
     IniDelta           = source.IniDelta;
     MinDelta           = source.MinDelta;
     MaxDelta           = source.MaxDelta;
     return;
 }
예제 #7
0
        //Constructor
        /// <summary>
        /// Instantiates the RPropTrainer
        /// </summary>
        /// <param name="net">
        /// The feed forward network to be trained
        /// </param>
        /// <param name="inputVectorCollection">
        /// Collection of the training input vectors
        /// </param>
        /// <param name="outputVectorCollection">
        /// Collection of the desired outputs
        /// </param>
        /// Trainer parameters
        /// <param name="settings">
        /// </param>
        public RPropTrainer(FeedForwardNetwork net,
                            List <double[]> inputVectorCollection,
                            List <double[]> outputVectorCollection,
                            RPropTrainerSettings settings = null
                            )
        {
            if (!net.Finalized)
            {
                throw new Exception("Can´t create trainer. Network structure was not finalized.");
            }
            _settings = settings;
            if (_settings == null)
            {
                //Default parameters
                _settings = new RPropTrainerSettings();
            }
            _net = net;
            _inputVectorCollection  = inputVectorCollection;
            _outputVectorCollection = outputVectorCollection;
            _weigthsGradsAcc        = new double[_net.NumOfWeights];
            _weigthsGradsAcc.Populate(0);
            _weigthsPrevGradsAcc = new double[_net.NumOfWeights];
            _weigthsPrevGradsAcc.Populate(0);
            _weigthsPrevDeltas = new double[_net.NumOfWeights];
            _weigthsPrevDeltas.Populate(_settings.IniDelta);
            _weigthsPrevChanges = new double[_net.NumOfWeights];
            _weigthsPrevChanges.Populate(0);
            _prevMSE = 0;
            MSE      = 0;
            Epoch    = 0;
            //Parallel gradient workers (batch ranges) preparation
            int numOfWorkers = Math.Max(1, Math.Min(Environment.ProcessorCount - 1, _inputVectorCollection.Count));

            _gradientWorkerDataCollection = new GradientWorkerData[numOfWorkers];
            int workerBatchSize = _inputVectorCollection.Count / numOfWorkers;

            for (int workerIdx = 0, fromRow = 0; workerIdx < numOfWorkers; workerIdx++, fromRow += workerBatchSize)
            {
                GradientWorkerData gwd = new GradientWorkerData
                                         (
                    fromRow: fromRow,
                    toRow: (workerIdx == numOfWorkers - 1 ? _inputVectorCollection.Count - 1 : (fromRow + workerBatchSize) - 1),
                    numOfWeights: _net.NumOfWeights
                                         );
                _gradientWorkerDataCollection[workerIdx] = gwd;
            }
            return;
        }
예제 #8
0
        //Constructor
        /// <summary>
        /// Instantiates the RPropTrainer
        /// </summary>
        /// <param name="net"> The feed forward network to be trained </param>
        /// <param name="inputVectorCollection"> Collection of the training input vectors </param>
        /// <param name="outputVectorCollection"> Collection of the desired outputs </param>
        /// <param name="settings"> Trainer parameters </param>
        /// <param name="rand">Random object to be used</param>
        public RPropTrainer(FeedForwardNetwork net,
                            List <double[]> inputVectorCollection,
                            List <double[]> outputVectorCollection,
                            RPropTrainerSettings settings,
                            Random rand
                            )
        {
            if (!net.Finalized)
            {
                throw new Exception("Can´t create trainer. Network structure was not finalized.");
            }
            _settings               = settings;
            MaxAttempt              = _settings.NumOfAttempts;
            MaxAttemptEpoch         = _settings.NumOfAttemptEpochs;
            _net                    = net;
            _rand                   = rand;
            _inputVectorCollection  = inputVectorCollection;
            _outputVectorCollection = outputVectorCollection;
            _weigthsGradsAcc        = new double[_net.NumOfWeights];
            _weigthsPrevGradsAcc    = new double[_net.NumOfWeights];
            _weigthsPrevDeltas      = new double[_net.NumOfWeights];
            _weigthsPrevChanges     = new double[_net.NumOfWeights];
            //Parallel gradient workers (batch ranges) preparation
            int numOfWorkers = Math.Max(1, Math.Min(Environment.ProcessorCount - 1, _inputVectorCollection.Count));

            _gradientWorkerDataCollection = new GradientWorkerData[numOfWorkers];
            int workerBatchSize = _inputVectorCollection.Count / numOfWorkers;

            for (int workerIdx = 0, fromRow = 0; workerIdx < numOfWorkers; workerIdx++, fromRow += workerBatchSize)
            {
                GradientWorkerData gwd = new GradientWorkerData
                                         (
                    fromRow: fromRow,
                    toRow: (workerIdx == numOfWorkers - 1 ? _inputVectorCollection.Count - 1 : (fromRow + workerBatchSize) - 1),
                    numOfWeights: _net.NumOfWeights
                                         );
                _gradientWorkerDataCollection[workerIdx] = gwd;
            }
            InfoMessage = string.Empty;
            //Start training attempt
            Attempt = 0;
            NextAttempt();
            return;
        }
예제 #9
0
        //Methods
        /// <summary>
        /// See the base.
        /// </summary>
        public override bool Equals(object obj)
        {
            if (obj == null)
            {
                return(false);
            }
            RPropTrainerSettings cmpSettings = obj as RPropTrainerSettings;

            if (ZeroTolerance != cmpSettings.ZeroTolerance ||
                PositiveEta != cmpSettings.PositiveEta ||
                NegativeEta != cmpSettings.NegativeEta ||
                IniDelta != cmpSettings.IniDelta ||
                MinDelta != cmpSettings.MinDelta ||
                MaxDelta != cmpSettings.MaxDelta
                )
            {
                return(false);
            }
            return(true);
        }
예제 #10
0
        /// <summary>
        /// Creates the deep copy instance of this instance
        /// </summary>
        public RPropTrainerSettings DeepClone()
        {
            RPropTrainerSettings clone = new RPropTrainerSettings(this);

            return(clone);
        }