private static Matrix <double> RandInitializeWeights(int rows, int columns) { var srs = new MathNet.Numerics.Random.SystemRandomSource(); IContinuousDistribution dist = new MathNet.Numerics.Distributions.Normal(srs); double epsilon = 0.12; //Matrix<double> w = Matrix<double>.Build.Random(rows, columns) * (2 * epsilon) - epsilon; var seq = srs.NextDoubleSequence().Take(rows * columns); Matrix <double> w = Matrix <double> .Build.DenseOfColumnMajor(rows, columns, seq); w = w * (2 * epsilon) - epsilon; return(w); }
public static double randbetween(object arg1, object arg2) { double a, b; try { a = Convert.ToDouble(arg1); b = Convert.ToDouble(arg2); } catch (Exception ex) { throw ex; } var n = new MathNet.Numerics.Random.SystemRandomSource().NextDouble() * (b - a) + a; return(n); //var m = GetExcel().WorksheetFunction.RandBetween(arg1, arg2); //return m; }
static void Main(string[] args) { if (!System.Console.IsOutputRedirected) { System.Console.Clear(); } CultureInfo.CurrentCulture = CultureInfo.CreateSpecificCulture("en-US"); System.Console.WriteLine("Neural Networks ex.3_nn"); System.Console.WriteLine("==================================================\n"); var M = Matrix <double> .Build; var V = Vector <double> .Build; //// =========== Part 1: Loading and Visualizing Data ============= // We start the exercise by first loading and visualizing the dataset. // You will be working with a dataset that contains handwritten digits. // // Load Training Data System.Console.WriteLine("Loading and Visualizing Data ...\n"); // read all matrices of a file by name into a dictionary Dictionary <string, Matrix <double> > mr = MatlabReader.ReadAll <double>("data\\ex3data1.mat"); Matrix <double> X = mr["X"]; Vector <double> y = mr["y"].Column(0); Double m = X.RowCount; // get a casual sequence of 100 int numbers var srs = new MathNet.Numerics.Random.SystemRandomSource(); var seq = srs.NextInt32Sequence(0, 5000).Take(100).ToList(); // Randomly select 100 data points to display Vector <double>[] sel = new Vector <double> [100]; int idx = 0; Vector <double> v = V.Dense(400); foreach (int i in seq) { sel[idx++] = X.Row(i); } // display DisplayData(sel); Pause(); // ================ Part 2: Loading Pameters ================ // In this part of the exercise, we load some pre-initialized // neural network parameters. System.Console.WriteLine("\nLoading Saved Neural Network Parameters ...\n"); // read all matrices of a file by name into a dictionary mr = MatlabReader.ReadAll <double>("data\\ex3weights.mat"); Matrix <double> theta1 = mr["Theta1"]; // 25 X 401 Matrix <double> theta2 = mr["Theta2"]; // 10 X 26 Pause(); //// ================= Part 3: Implement Predict ================= // After training the neural network, we would like to use it to predict // the labels. You will now implement the "predict" function to use the // neural network to predict the labels of the training set. This lets // you compute the training set accuracy. Vector <double> pred = Predict(theta1, theta2, X); Vector <double> comp = V.Dense(y.Count); for (int i = 0; i < y.Count; i++) { if (pred[i] == y[i]) { comp[i] = 1; } else { comp[i] = 0; } } double accuracy = comp.Mean() * 100; System.Console.WriteLine("\nTraining Set Accuracy: {0:f5}\n", accuracy); // Randomly permute examples seq = srs.NextInt32Sequence(0, 5000).Take(5000).ToList(); for (int i = 0; i < m; i++) { // display DisplayData(new[] { X.Row(seq[i]) }); Matrix <double> x = M.DenseOfRowVectors(new[] { X.Row(seq[i]) }); pred = Predict(theta1, theta2, x); double p = pred[0]; System.Console.WriteLine("\nNeural Network Prediction: {0:N0} (digit {1:N0})\n", p, p % 10); // Pause with quit option System.Console.WriteLine("Paused - press enter to continue, q to exit:"); string s = Console.ReadLine(); if (s.ToLower() == "q") { break; } } Pause(); }
public static void SetSeed(int seed) => r = new MathNet.Numerics.Random.SystemRandomSource(seed, true);
static void Main(string[] args) { if (!System.Console.IsOutputRedirected) { System.Console.Clear(); } CultureInfo.CurrentCulture = CultureInfo.CreateSpecificCulture("en-US"); System.Console.WriteLine("Multi-class Classification and Neural Networks ex.3"); System.Console.WriteLine("================================================\n"); var M = Matrix <double> .Build; var V = Vector <double> .Build; // read all matrices of a file by name into a dictionary Dictionary <string, Matrix <double> > ms = MatlabReader.ReadAll <double>("data\\ex3data1.mat"); Matrix <double> X = ms["X"]; Vector <double> y = ms["y"].Column(0); // get a casual sequence of 100 int numbers var srs = new MathNet.Numerics.Random.SystemRandomSource(); var seq = srs.NextInt32Sequence(0, 5000).Take(100).ToList(); // Randomly select 100 data points to display Vector <double>[] sel = new Vector <double> [100]; int idx = 0; Vector <double> v = V.Dense(400); foreach (int i in seq) { sel[idx++] = X.Row(i); } // display DisplayData(sel); Pause(); //// ============ Part 2a: Vectorize Logistic Regression ============ // In this part of the exercise, you will reuse your logistic regression // code from the last exercise. You task here is to make sure that your // regularized logistic regression implementation is vectorized. After // that, you will implement one-vs-all classification for the handwritten // digit dataset. // // Test case for lrCostFunction System.Console.WriteLine("\nTesting Cost Function with regularization"); Vector <double> theta_t = V.DenseOfArray(new[] { -2.0, -1, 1, 2 }); Matrix <double> X_t = M.DenseOfArray(new [, ] { { 1.0, 0.1, 0.6, 1.1 }, { 1.0, 0.2, 0.7, 1.2 }, { 1.0, 0.3, 0.8, 1.3 }, { 1.0, 0.4, 0.9, 1.4 }, { 1.0, 0.5, 1.0, 1.5 }, }); Vector <Double> y_t = V.DenseOfArray(new [] { 1.0, 0, 1, 0, 1 }); int lambda_t = 3; LogisticRegression lr = new LogisticRegression(X_t, y_t); lr.Lambda = lambda_t; double J = lr.Cost(theta_t); Vector <double> grad = lr.Gradient(theta_t); System.Console.WriteLine("\nCost: {0:f5}\n", J); System.Console.WriteLine("Expected cost: 2.534819\n"); System.Console.WriteLine("Gradients:\n"); System.Console.WriteLine(" {0:f5} \n", grad); System.Console.WriteLine("Expected gradients:\n"); System.Console.WriteLine(" 0.146561\n -0.548558\n 0.724722\n 1.398003\n"); Pause(); //// ============ Part 2b: One-vs-All Training ============ System.Console.WriteLine("\nTraining One-vs-All Logistic Regression...\n"); double lambda = 0.1; int num_labels = 10; Matrix <double> all_theta = OneVsAll(X, y, num_labels, lambda); Pause(); // ================ Part 3: Predict for One-Vs-All ================ Vector <double> pred = PredictOneVsAll(all_theta, X); Vector <double> comp = V.Dense(y.Count); for (int i = 0; i < y.Count; i++) { if (pred[i] == y[i]) { comp[i] = 1; } else { comp[i] = 0; } } double accuracy = comp.Mean() * 100; System.Console.WriteLine("\nTraining Set Accuracy: {0:f5}\n", accuracy); Pause(); }
static void Main(string[] args) { if (!System.Console.IsOutputRedirected) { System.Console.Clear(); } CultureInfo.CurrentCulture = CultureInfo.CreateSpecificCulture("en-US"); System.Console.WriteLine("Multi-class Classification and Neural Networks ex.4"); System.Console.WriteLine("===================================================\n"); var M = Matrix <double> .Build; var V = Vector <double> .Build; // Setup the parameters you will use for this exercise int input_layer_size = 400; // 20x20 Input Images of Digits int hidden_layer_size = 25; // 25 hidden units int num_labels = 10; // 10 labels, from 1 to 10 // (note that we have mapped "0" to label 10) // =========== Part 1: Loading and Visualizing Data ============= // We start the exercise by first loading and visualizing the dataset. // You will be working with a dataset that contains handwritten digits. // // read all matrices of a file by name into a dictionary Dictionary <string, Matrix <double> > ms = MatlabReader.ReadAll <double>("data\\ex3data1.mat"); Matrix <double> X = ms["X"]; Vector <double> y = ms["y"].Column(0); // get a casual sequence of 100 int numbers var srs = new MathNet.Numerics.Random.SystemRandomSource(); var seq = srs.NextInt32Sequence(0, 5000).Take(100).ToList(); // Randomly select 100 data points to display Vector <double>[] sel = new Vector <double> [100]; int idx = 0; Vector <double> v = V.Dense(400); foreach (int i in seq) { sel[idx++] = X.Row(i); } // display DisplayData(sel); Pause(); // ================ Part 2: Loading Parameters ================ // In this part of the exercise, we load some pre-initialized // neural network parameters. System.Console.WriteLine("\nLoading Saved Neural Network Parameters ...\n"); // read all matrices of a file by name into a dictionary Dictionary <string, Matrix <double> > mr = MatlabReader.ReadAll <double>("data\\ex3weights.mat"); Matrix <double> theta1 = mr["Theta1"]; // 25 X 401 Matrix <double> theta2 = mr["Theta2"]; // 10 X 26 // Unroll parameters Vector <double> nn_params = NeuralNetwork.UnrollParameters(theta1, theta2); Pause(); // ================ Part 3: Compute Cost (Feedforward) ================ // To the neural network, you should first start by implementing the // feedforward part of the neural network that returns the cost only. You // should complete the code in nnCostFunction.m to return cost. After // implementing the feedforward to compute the cost, you can verify that // your implementation is correct by verifying that you get the same cost // as us for the fixed debugging parameters. // // We suggest implementing the feedforward cost *without* regularization // first so that it will be easier for you to debug. Later, in part 4, you // will get to implement the regularized cost. System.Console.WriteLine("\nFeedforward Using Neural Network ...\n"); // Weight regularization parameter (we set this to 0 here). NeuralNetwork nn = new NeuralNetwork(X, y, input_layer_size, hidden_layer_size, num_labels); nn.Lambda = 0.0; double J = nn.Cost(nn_params); System.Console.WriteLine("Cost at parameters (loaded from ex4weights): {0:f6}\n(this value should be about 0.287629)\n", J); Pause(); // =============== Part 4: Implement Regularization =============== // Once your cost function implementation is correct, you should now // continue to implement the regularization with the cost. // System.Console.WriteLine("\nChecking Cost Function (w/ Regularization) ... \n"); // Weight regularization parameter (we set this to 1 here). nn.Lambda = 1.0; J = nn.Cost(nn_params); System.Console.WriteLine("Cost at parameters (loaded from ex4weights): {0:f6} \n(this value should be about 0.383770)\n", J); Pause(); // ================ Part 5: Sigmoid Gradient ================ // Before you start implementing the neural network, you will first // implement the gradient for the sigmoid function. You should complete the // code in the sigmoidGradient.m file. System.Console.WriteLine("\nEvaluating sigmoid gradient...\n"); var g = nn.SigmoidGradient(V.DenseOfArray(new[] { -1.0, -0.5, 0, 0.5, 1 })); System.Console.WriteLine("Sigmoid gradient evaluated at [-1 -0.5 0 0.5 1]:\n "); System.Console.WriteLine("{0:f5} ", g); System.Console.WriteLine("\n\n"); Pause(); // ================ Part 6: Initializing Pameters ================ // In this part of the exercise, you will be starting to implment a two // layer neural network that classifies digits. You will start by // implementing a function to initialize the weights of the neural network // (randInitializeWeights.m) System.Console.WriteLine("\nInitializing Neural Network Parameters ...\n"); Matrix <double> initial_Theta1 = RandInitializeWeights(input_layer_size + 1, hidden_layer_size); Matrix <double> initial_Theta2 = RandInitializeWeights(hidden_layer_size + 1, num_labels); // Unroll parameters Vector <double> initial_nn_params = NeuralNetwork.UnrollParameters(initial_Theta1, initial_Theta2); Pause(); // =============== Part 7: Implement Backpropagation =============== // Once your cost matches up with ours, you should proceed to implement the // backpropagation algorithm for the neural network. You should add to the // code you've written in nnCostFunction.m to return the partial // derivatives of the parameters. System.Console.WriteLine("\nChecking Backpropagation... \n"); CheckGradient(0); Pause(); // =============== Part 8: Implement Regularization =============== // Once your backpropagation implementation is correct, you should now // continue to implement the regularization with the cost and gradient. System.Console.WriteLine("\nChecking Backpropagation (w/ Regularization) ... \n"); // Check gradients by running checkNNGradients double lambda = 3; CheckGradient(lambda); // Also output the costFunction debugging values nn.Lambda = lambda; double debug_J = nn.Cost(nn_params); System.Console.WriteLine("\n\nCost at (fixed) debugging parameters (w/ lambda = {0:f1}): {1:f6} " + "\n(for lambda = 3, this value should be about 0.576051)\n\n", lambda, debug_J); Pause(); // =================== Part 8: Training NN =================== // You have now implemented all the code necessary to train a neural // network. To train your neural network, we will now use "fmincg", which // is a function which works similarly to "fminunc". Recall that these // advanced optimizers are able to train our cost functions efficiently as // long as we provide them with the gradient computations. System.Console.WriteLine("\nTraining Neural Network... \n"); // After you have completed the assignment, change the MaxIter to a larger // value to see how more training helps. int maxIter = 40; // You should also try different values of lambda lambda = 1; nn.Lambda = lambda; var obj = ObjectiveFunction.Gradient(nn.Cost, nn.Gradient); var solver = new LimitedMemoryBfgsMinimizer(1e-5, 1e-5, 1e-5, 5, maxIter); var result = solver.FindMinimum(obj, initial_nn_params); System.Console.WriteLine("Reason For Exit: {0}", result.ReasonForExit); System.Console.WriteLine("Iterations: {0}", result.Iterations); System.Console.WriteLine("Cost: {0:e}", result.FunctionInfoAtMinimum.Value); Pause(); // ================= Part 10: Implement Predict ================= // After training the neural network, we would like to use it to predict // the labels. You will now implement the "predict" function to use the // neural network to predict the labels of the training set. This lets // you compute the training set accuracy. Vector <double> pred = nn.Predict(result.MinimizingPoint, X); Vector <double> comp = V.Dense(y.Count); for (int i = 0; i < y.Count; i++) { if (pred[i] == y[i]) { comp[i] = 1; } else { comp[i] = 0; } } double accuracy = comp.Mean() * 100; System.Console.WriteLine("\nTraining Set Accuracy: {0:f5}\n", accuracy); Pause(); }