private async void OnRecognizedButtonClicked(object sender, RoutedEventArgs e) { progressRing.Visibility = Visibility.Visible; InMemoryRandomAccessStream stream = new InMemoryRandomAccessStream(); var file = await SaveDrawing(); var imageStream = await imageProcessor.Process(file); TrainConfig result = await perceptron.Activation(imageStream); progressRing.Visibility = Visibility.Collapsed; var recognitionResult = await RecognizeDialog.ShowDialogAsync(result.Symbol); progressRing.Visibility = Visibility.Visible; if (recognitionResult == RecognitionResult.Right) { await perceptron.Calculate(imageStream.CloneStream(), result); } else { var rightConfig = await TrainSetConfigHelper.GetOppositTrainConfig(result); await perceptron.Calculate(imageStream.CloneStream(), rightConfig); } progressRing.Visibility = Visibility.Collapsed; ClearCanvas(); }
public Func <DatasetV1Adapter> create_train_input_fn(TrainConfig train_config, InputReader train_input_config, DetectionModel model_config) { Func <DatasetV1Adapter> _train_input_fn = () => train_input(train_config, train_input_config, model_config); return(_train_input_fn); }
// Currently, this program creates a 3-layer network with 10 hidden nodes. // The network accepts binary training data to learn [A XOR B] and ignore input C. // With this structure and about 2000+ training epochs, the network slowly learns // to perform binary logic, and scores full accuracy on the training data. // In principle, there's nothing stopping this network from being used for all // sorts of fun new data analysis now. static void Main(string[] args) { // Create a new 3-10-1 NN var network = new Structure.NeuralNetwork(new [] { 3, 10, 1 }); // Init. training data and labels // Note: BinaryLow/High are premade values corresponding to 0.1d and 0.9d respectively, // to avoid nasty sigmoid gradient problems. var trainingData = new[] { new [] { BinaryLow, BinaryLow, BinaryLow }, new [] { BinaryLow, BinaryHigh, BinaryLow }, new [] { BinaryHigh, BinaryLow, BinaryLow }, new [] { BinaryHigh, BinaryHigh, BinaryLow }, new [] { BinaryLow, BinaryLow, BinaryHigh }, new [] { BinaryLow, BinaryHigh, BinaryHigh }, new [] { BinaryHigh, BinaryLow, BinaryHigh }, new [] { BinaryHigh, BinaryHigh, BinaryHigh } }; var labels = new[] { new [] { BinaryLow }, new [] { BinaryHigh }, new [] { BinaryHigh }, new [] { BinaryLow }, new [] { BinaryLow }, new [] { BinaryHigh }, new [] { BinaryHigh }, new [] { BinaryLow } }; // TrainConfig is a class used to store important training hyperparameters var myTrainConfig = new TrainConfig(); myTrainConfig.Epochs = 5000; // Train the network using the config, network and data above Tasks.Train(network, trainingData, labels, myTrainConfig); // Without further training, feed each data sample through the network and output to console foreach (var data in trainingData) { network.InputLayer.FeedForward(data); Console.Write(Helpers.DataToString(data)); Console.Write(": "); var output = Helpers.DataToString(network.OutputLayer.Read()); Console.WriteLine(output); } // Use the prebuilt regression accuracy task to get an avg error. Console.WriteLine("Avg error: " + Tasks.TestRegressionAccuracy(network, trainingData, labels)); Console.ReadKey(); }
public async static Task <TrainConfig> GetOppositTrainConfig(TrainConfig trainConfig) { var config = await ParseConfigJson(); if (config.Train1.Equals(trainConfig)) { return(config.Train2); } else { return(config.Train1); } }
/// <summary> /// Returns `features` and `labels` tensor dictionaries for training. /// </summary> /// <param name="train_config"></param> /// <param name="train_input_config"></param> /// <param name="model_config"></param> /// <returns></returns> public DatasetV1Adapter train_input(TrainConfig train_config, InputReader train_input_config, DetectionModel model_config) { var arch = modelBuilder.build(model_config, true, true); Func <Tensor, (Tensor, Tensor)> model_preprocess_fn = arch.preprocess; Func <Dictionary <string, Tensor>, (Dictionary <string, Tensor>, Dictionary <string, Tensor>)> transform_and_pad_input_data_fn = (tensor_dict) => { return(_get_features_dict(tensor_dict), _get_labels_dict(tensor_dict)); }; var dataset = datasetBuilder.build(train_input_config); return(dataset); }
public async Task Calculate(IRandomAccessStream imageStream, TrainConfig trainConfig) { await ProcessImageStream(imageStream); var delta = trainConfig.Value - result; weights[0] = weights[0] + LEARN_SPEED * delta; for (int i = 1; i < cellsCount + 1; i++) { weights[i] = weights[i] + LEARN_SPEED * delta * cells[i - 1]; } await SaveModel(); }
public Config(string root) { YOLO = new YoloConfig(root); TRAIN = new TrainConfig(root); }
public static Protos.Optimizer.OptimizerOneofCase get_optimizer_type(TrainConfig train_config) { return(train_config.Optimizer.OptimizerCase); }
public static void Train(Structure.NeuralNetwork network, IEnumerable <IEnumerable <double> > trainingData, IEnumerable <IEnumerable <double> > trainingLabels, TrainConfig trainConfig) { var epochs = trainConfig.Epochs; for (int epoch = 0; epoch < epochs; epoch++) { if (epoch % Math.Ceiling(epochs / 10d) == 0) { Console.WriteLine($"Trained for {epoch} epochs"); } foreach (var dataLabelPair in trainingData.Zip(trainingLabels, (data, labels) => (data, labels))) { network.InputLayer.FeedForward(dataLabelPair.data); network.OutputLayer.Backpropagate(dataLabelPair.labels); } } }