Inheritance: IDisposable
示例#1
0
        /* Method: TrainEpochSarpropParallel

            Parameters:
                data - the data to train on
                threadNumb - the thread to do training on

        */
        public float TrainEpochSarpropParallel(TrainingData data, uint threadNumb)
        {
            return fannfloat.train_epoch_sarprop_parallel(net.to_fann(), data.ToFannTrainData(), threadNumb);
        }
示例#2
0
        /* Method: TrainEpochSarpropParallel

            Parameters:
                data - the data to train on
                threadNumb - the thread to do training on
                predictedOutputs - the predicted outputs

        */
        public float TrainEpochSarpropParallel(TrainingData data, uint threadNumb, List<List<float>> predictedOutputs)
        {
            using (floatVectorVector predicted_out = new floatVectorVector(predictedOutputs.Count))
            {
                for (int i = 0; i < predictedOutputs.Count; i++)
                {
                    predicted_out[i] = new floatVector(predictedOutputs[i].Count);
                }
                float result = fannfloat.train_epoch_sarprop_parallel(net.to_fann(), data.ToFannTrainData(), threadNumb, predicted_out);

                predictedOutputs.Clear();
                for (int i = 0; i < predicted_out.Count; i++)
                {
                    List<float> list = new List<float>();
                    for (int j = 0; j < predicted_out[i].Count; j++)
                    {
                        list.Add(predicted_out[i][j]);
                    }
                    predictedOutputs.Add(list);
                }
                return result;
            }
        }
示例#3
0
        /* Method: TrainEpochIncrementalMod

            Parameters:
                data - the data to train on

        */
        public float TrainEpochIncrementalMod(TrainingData data)
        {
            return fannfloat.train_epoch_incremental_mod(net.to_fann(), data.ToFannTrainData());
        }
示例#4
0
        /* Method: TrainEpochIncrementalMod

            Parameters:
                data - the data to train on
                predictedOutputs - the predicted outputs

        */
        public float TrainEpochIncrementalMod(TrainingData data, List<List<float>> predictedOutputs)
        {
            using (floatVectorVector predicted_out = new floatVectorVector(predictedOutputs.Count))
            {
                for (int i = 0; i < predictedOutputs.Count; i++)
                {
                    predicted_out[i] = new floatVector(predictedOutputs[i].Count);
                }
                float result = fannfloat.train_epoch_incremental_mod(net.to_fann(), data.ToFannTrainData(), predicted_out);

                predictedOutputs.Clear();
                for (int i = 0; i < predicted_out.Count; i++)
                {
                    List<float> list = new List<float>();
                    for (int j = 0; j < predicted_out[i].Count; j++)
                    {
                        list.Add(predicted_out[i][j]);
                    }
                    predictedOutputs.Add(list);
                }
                return result;
            }
        }
示例#5
0
        /* Method: TestDataParallel

            Parameters:
                data - the data to train on
                threadNumb - the thread to do training on

        */
        public float TestDataParallel(TrainingData data, uint threadNumb)
        {
            return fannfloat.test_data_parallel(net.to_fann(), data.ToFannTrainData(), threadNumb);
        }
示例#6
0
        /* Method: TrainEpoch
            Train one epoch with a set of training data.

            Train one epoch with the training data stored in data. One epoch is where all of
            the training data is considered exactly once.

            This function returns the MSE error as it is calculated either before or during
            the actual training. This is not the actual MSE after the training epoch, but since
            calculating this will require to go through the entire training set once more, it is
            more than adequate to use this value during training.

            The training algorithm used by this function is chosen by the <TrainingAlgorithm>
            function.

            See also:
                <TrainOnData>, <TestData>, <fann_train_epoch at http://libfann.github.io/fann/docs/files/fann_train-h.html#fann_train_epoch>

            This function appears in FANN >= 1.2.0.
         */
        public float TrainEpoch(TrainingData data)
        {
            return net.train_epoch(data.InternalData);
        }
示例#7
0
        /* Method: SetScalingParams

           Calculate scaling parameters for future use based on training data.

           See also:
           		        <ClearScalingParams>,
                <fann_set_scaling_params at http://libfann.github.io/fann/docs/files/fann_train-h.html#fann_set_scaling_params>

            This function appears in FANN >= 2.1.0.
         */
        public bool SetScalingParams(TrainingData data, float newInputMin, float newInputMax, float newOutputMin, float newOutputMax)
        {
            return net.set_scaling_params(data.InternalData, newInputMin, newInputMax, newOutputMin, newOutputMax);
        }
示例#8
0
 /* Constructor: TrainingData
  *
  *  Copy constructor constructs a copy of the training data.
  *  Corresponds to the C API <fann_duplicate_train_data at http://libfann.github.io/fann/docs/files/fann_train-h.html#fann_duplicate_train_data> function.
  */
 public TrainingData(TrainingData data)
 {
     InternalData = new FannWrapperFloat.training_data(data.InternalData);
 }
示例#9
0
        /* Method: InitWeights

            Initialize the weights using Widrow + Nguyen's algorithm.

            This function behaves similarly to <fann_randomize_weights at http://libfann.github.io/fann/docs/files/fann-h.html#fann_randomize_weights>. It will use the algorithm developed
            by Derrick Nguyen and Bernard Widrow to set the weights in such a way
            as to speed up training. This technique is not always successful, and in some cases can be less
            efficient than a purely random initialization.

            The algorithm requires access to the range of the input data (ie, largest and smallest input),
            and therefore accepts a second argument, data, which is the training data that will be used to
            train the network.

            See also:
                <RandomizeWeights>, <TrainingData::ReadTrainFromFile>,
                <fann_init_weights at http://libfann.github.io/fann/docs/files/fann-h.html#fann_init_weights>

            This function appears in FANN >= 1.1.0.
        */
        public void InitWeights(TrainingData data)
        {
            net.init_weights(data.InternalData);
        }
示例#10
0
        /*********************************************************************/
        /* Method: ScaleTrain

           Scale input and output data based on previously calculated parameters.

           See also:
           		        <DescaleTrain>, <fann_scale_train at http://libfann.github.io/fann/docs/files/fann_train-h.html#fann_scale_train>

            This function appears in FANN >= 2.1.0.
         */
        public void ScaleTrain(TrainingData data)
        {
            net.scale_train(data.InternalData);
        }
示例#11
0
        /*********************************************************************/
        /* Method: CascadetrainOnData

           Trains on an entire dataset, for a period of time using the Cascade2 training algorithm.
           This algorithm adds neurons to the neural network while training, which means that it
           needs to start with an ANN without any hidden layers. The neural network should also use
           shortcut connections, so NeuralNet(NetworkType.SHORTCUT, ...) should be used to create the NeuralNetwork like this:
           >NeuralNet net(NetworkType.SHORTCUT, ...);

           This training uses the parameters set using the Cascade..., but it also uses another
           training algorithm as it's internal training algorithm. This algorithm can be set to either
           TrainingAlgorithm.TRAIN_RPROP or TrainingAlgorithm.TRAIN_QUICKPROP by <TrainingAlgorithm>, and the parameters
           set for these training algorithms will also affect the cascade training.

           Parameters:
           		        data - The data, which should be used during training
           		        maxNeurons - The maximum number of neurons to be added to neural network
           		        neuronsBetweenReports - The number of neurons between printing a status report to the console.
           			        A value of zero means no reports should be printed.
           		        desiredError - The desired <MSE> or <BitFail>, depending on which stop function
           			        is chosen by <TrainStopFunction>.

            Instead of printing out reports every neuronsBetweenReports, a callback function can be called
            (see <SetCallback>).

            See also:
                <TrainOnData>, <CascadetrainOnFile>, <fann_cascadetrain_on_data at http://libfann.github.io/fann/docs/files/fann_cpp-h.html#neural_net.cascadetrain_on_data>

            This function appears in FANN >= 2.0.0.
        */
        public void CascadetrainOnData(TrainingData data, uint maxNeurons, uint neuronsBetweenReports, float desiredError)
        {
            net.cascadetrain_on_data(data.InternalData, maxNeurons, neuronsBetweenReports, desiredError);
        }
示例#12
0
        /* Method: MergeTrainData

           Merges the data into the data contained in the <TrainingData>.

           This function appears in FANN >= 1.1.0.
         */
        public void MergeTrainData(TrainingData data)
        {
            InternalData.merge_train_data(data.InternalData);
        }
示例#13
0
        /* Constructor: TrainingData

            Copy constructor constructs a copy of the training data.
            Corresponds to the C API <fann_duplicate_train_data at http://libfann.github.io/fann/docs/files/fann_train-h.html#fann_duplicate_train_data> function.
        */
        public TrainingData(TrainingData data)
        {
            InternalData = new FannWrapperFloat.training_data(data.InternalData);
        }
示例#14
0
        /* Method: TrainOnData

           Trains on an entire dataset, for a period of time.

           This training uses the training algorithm chosen by <TrainingAlgorithm>,
           and the parameters set for these training algorithms.

           Parameters:
           		        data - The data, which should be used during training
           		        maxEpochs - The maximum number of epochs the training should continue
           		        epochsBetweenReports - The number of epochs between printing a status report to the console.
           			        A value of zero means no reports should be printed.
           		        desiredError - The desired <MSE> or <BitFail>, depending on which stop function
           			        is chosen by <TrainStopFunction>.

            Instead of printing out reports every epochs_between_reports, a callback function can be called
            (see <SetCallback>).

            See also:
                <TrainOnFile>, <TrainEpoch>, <fann_train_on_data at http://libfann.github.io/fann/docs/files/fann_train-h.html#fann_train_on_data>

            This function appears in FANN >= 1.0.0.
        */
        public void TrainOnData(TrainingData data, uint maxEpochs, uint epochsBetweenReports, float desiredError)
        {
            net.train_on_data(data.InternalData, maxEpochs, epochsBetweenReports, desiredError);
        }
示例#15
0
        /* Method: TestData

           Test a set of training data and calculates the MSE for the training data.

           This function updates the MSE and the bit fail values.

           See also:
         	        <Test>, <MSE>, <BitFail>, <fann_test_data at http://libfann.github.io/fann/docs/files/fann_train-h.html#fann_test_data>

            This function appears in FANN >= 1.2.0.
         */
        public float TestData(TrainingData data)
        {
            return net.test_data(data.InternalData);
        }
示例#16
0
 private int InternalCallback(global::System.IntPtr netPtr, global::System.IntPtr dataPtr, uint max_epochs, uint epochs_between_reports, float desired_error, uint epochs, global::System.IntPtr user_data)
 {
     NeuralNet callbackNet = new NeuralNet(new neural_net(netPtr, false));
     TrainingData callbackData = new TrainingData(new training_data(dataPtr, false));
     GCHandle handle = (GCHandle)user_data;
     return Callback(callbackNet, callbackData, max_epochs, epochs_between_reports, desired_error, epochs, handle.Target as Object);
 }
示例#17
0
 /* Method: MergeTrainData
  *
  * Merges the data into the data contained in the <TrainingData>.
  *
  * This function appears in FANN >= 1.1.0.
  */
 public void MergeTrainData(TrainingData data)
 {
     InternalData.merge_train_data(data.InternalData);
 }