/// <summary> /// Generates output predictions for the input samples. Computation is done in batches. /// </summary> /// <param name="x">The input data frame to run prediction.</param> /// <param name="batch_size">Size of the batch.</param> /// <returns></returns> public DataFrame Predict(DataFrame x, int batch_size) { DataFrameIter dataFrameIter = new DataFrameIter(x); List <float> predictions = new List <float>(); dataFrameIter.SetBatchSize(batch_size); while (dataFrameIter.Next()) { var data = dataFrameIter.GetBatchX(); SuperArray output = data; foreach (var layer in Layers) { if (layer.SkipPred) { continue; } layer.Forward(output); output = layer.Output; } predictions.AddRange(output.List <float>()); } DataFrame result = new DataFrame(); result.Load(predictions.ToArray()); return(result); }
static void Main(string[] args) { //Setup Engine Global.UseEngine(SiaNet.Backend.MxNetLib.SiaNetBackend.Instance, DeviceType.CPU); //Prep Data var(x, y) = PrepDataset(); x.Head(); DataFrameIter trainSet = new DataFrameIter(x, y); //Build model with simple fully connected layers var model = new Sequential(); model.EpochEnd += Model_EpochEnd; model.Add(new Dense(64, ActType.ReLU)); model.Add(new Dense(1, ActType.Sigmoid)); //Compile with Optimizer, Loss and Metric model.Compile(OptimizerType.SGD, LossType.MeanSquaredError, MetricType.BinaryAccurary); // Train for 100 epoch with batch size of 2 model.Train(trainSet, 1000, 2); //Create prediction data to evaluate DataFrame2D predX = new DataFrame2D(2); predX.Load(0, 0, 0, 1); //Result should be 0 and 1 var rawPred = model.Predict(predX); Console.ReadLine(); }
/// <summary> /// Generates output predictions for the input samples. Computation is done in batches. /// </summary> /// <param name="x">The input data frame to run prediction.</param> /// <param name="batch_size">Size of the batch.</param> /// <returns></returns> public Tensor Predict(DataFrame x, int batch_size) { DataFrameIter dataFrameIter = new DataFrameIter(x); List <float> predictions = new List <float>(); dataFrameIter.SetBatchSize(batch_size); long[] outshape = null; while (dataFrameIter.Next()) { var data = dataFrameIter.GetBatchX(); Tensor output = data; foreach (var layer in Layers) { if (layer.SkipPred) { continue; } layer.Forward(output); output = layer.Output; } predictions.AddRange(output.ToArray().Cast <float>()); } return(K.CreateVariable(predictions.ToArray(), outshape)); }
private static void ORGate() { DataFrame train_x = new DataFrame(4, 2); DataFrame train_y = new DataFrame(4, 1); train_x.AddData(0, 0); train_x.AddData(0, 1); train_x.AddData(1, 0); train_x.AddData(1, 1); train_y.AddData(0); train_y.AddData(1); train_y.AddData(1); train_y.AddData(1); DataFrameIter train = new DataFrameIter(train_x, train_y); Sequential model = new Sequential(new Shape(2), 1); model.AddHidden(new Dense(4, ActivationType.ReLU, new GlorotUniform())); model.Compile(OptimizerType.SGD, LossType.BinaryCrossEntropy, "accuracy"); model.Fit(train, 100, 2); model.SaveModel(@"C:\Users\bdkadmin\Desktop\SSHKeys\"); }
/// <summary> /// Trains the model for a given number of epochs (iterations on a dataset). /// </summary> /// <param name="train">The train dataset which is an instance of DataFrame Iter.</param> /// <param name="epochs">Integer. Number of epochs to train the model. An epoch is an iteration over the entire x and y data provided. Note that in conjunction with initial_epoch, epochs is to be understood as "final epoch". The model is not trained for a number of iterations given by epochs, but merely until the epoch of index epochs is reached.</param> /// <param name="batchSize">Integer or None. Number of samples per gradient update. If unspecified, batch_size will default to 32.</param> /// <param name="val">The validation set of data to evaluate the model at every epoch.</param> public void Train(DataFrameIter train, int epochs, int batchSize, DataFrameIter val = null) { LearningHistory = new History(); Stopwatch trainWatch = new Stopwatch(); try { Stopwatch batchWatch = new Stopwatch(); long n = train.DataSize; trainWatch.Start(); train.SetBatchSize(batchSize); if (val != null) { val.SetBatchSize(batchSize); } for (int iteration = 1; iteration <= epochs; iteration++) { batchWatch.Restart(); OnEpochStart(iteration); RunEpoch(iteration, train, val); batchWatch.Stop(); long samplesSeen = n * 1000 / (batchWatch.ElapsedMilliseconds + 1); if (val == null) { OnEpochEnd(iteration, samplesSeen, train_losses.Average(), 0, train_metrics.Average(), 0, batchWatch.ElapsedMilliseconds); } else { OnEpochEnd(iteration, samplesSeen, train_losses.Average(), val_losses.Average(), train_metrics.Average(), val_metrics.Average(), batchWatch.ElapsedMilliseconds); } LearningHistory.Add(train_losses, train_metrics, val_losses, val_metrics); } } catch (Exception ex) { Console.WriteLine(ex.ToString()); } trainWatch.Stop(); OnTrainingEnd(LearningHistory, trainWatch.ElapsedMilliseconds); }
/// <summary> /// Runs the epoch. /// </summary> /// <param name="iteration">The iteration.</param> /// <param name="train">The train.</param> /// <param name="val">The value.</param> /// <returns></returns> private int RunEpoch(int iteration, DataFrameIter train, DataFrameIter val = null) { train_losses.Clear(); train_metrics.Clear(); val_losses.Clear(); val_metrics.Clear(); train.Reset(); if (val != null) { val.Reset(); } while (train.Next()) { var(x, y) = train.GetBatch(); RunTrainOnBatch(iteration, x, y); x.Dispose(); y.Dispose(); } if (val != null) { while (val.Next()) { var(x, y) = val.GetBatch(); var pred = Forward(x); var lossVal = LossFn.Forward(pred, y); var metricVal = MetricFn.Calc(pred, y); val_losses.Add(Ops.Mean(lossVal)); val_metrics.Add(Ops.Mean(metricVal)); x.Dispose(); y.Dispose(); lossVal.Dispose(); metricVal.Dispose(); } } return(iteration); }
static void Main(string[] args) { Global.UseGpu(); Tensor x = Tensor.FromArray(Global.Device, new float[] { 1, 2, 3, 4, 5, 6, 7, 8, 9 }); x = x.Reshape(3, 3); var result = TOps.Diag(x); result.Print(); string datasetFolder = @"C:\dataset\MNIST"; bool useDenseModel = false; var((trainX, trainY), (valX, valY)) = MNISTParser.LoadDataSet(datasetFolder, trainCount: 60000, testCount: 10000, flatten: useDenseModel); Console.WriteLine("Train and Test data loaded"); DataFrameIter trainIter = new DataFrameIter(trainX, trainY); DataFrameIter valIter = new DataFrameIter(valX, valY); Sequential model = null; if (useDenseModel) { model = BuildFCModel(); } else { model = BuildConvModel(); } model.Compile(OptimizerType.Adam, LossType.CategorialCrossEntropy, MetricType.Accuracy); Console.WriteLine("Model compiled.. initiating training"); model.EpochEnd += Model_EpochEnd; model.Train(trainIter, 10, 32, valIter); Console.ReadLine(); }
static void Main(string[] args) { //Setup Engine Global.UseEngine(SiaNet.Backend.ArrayFire.SiaNetBackend.Instance, DeviceType.CPU); //Prep Data var(x, y) = PrepDataset(); DataFrameIter trainSet = new DataFrameIter(x, y); //Build model with simple fully connected layers var model = new Sequential(); model.EpochEnd += Model_EpochEnd; model.Add(new Dense(100, ActType.ReLU)); model.Add(new Dense(50, ActType.ReLU)); model.Add(new Dense(1, ActType.Sigmoid)); //Compile with Optimizer, Loss and Metric model.Compile(OptimizerType.Adam, LossType.MeanSquaredError, MetricType.MAE); // Train for 100 epoch with batch size of 2 model.Train(trainSet, 25, 2); }