private void TrainEpoch(VMatrix features, VMatrix labels) { double minDistance; Node bmu; object lo = new object(); Console.Write("TrainEpoch "); int cl = Console.CursorLeft; if (m_iterations < 1) { m_iterations = features.Rows() * 10; } double mapRadius = (double)m_gridSize / 2; double timeConstant = (double)m_iterations / Math.Log(mapRadius); for (int iteration = 0; iteration < m_iterations; iteration++) { int row = m_rand.Next(features.Rows()); minDistance = double.MaxValue; bmu = null; if (((iteration % 100) == 0) || (iteration == (m_iterations - 1))) { Console.SetCursorPosition(cl, Console.CursorTop); Console.Write(iteration); } // calculate the distance #if parallel Parallel.ForEach(m_layers[0], node => #else foreach (var node in m_layers[0]) #endif { node.distance = 0; // calculate the distance for (var w = 0; w < node.weights.Length; w++) { node.distance += (features.Get(row, w) - node.weights[w]) * (features.Get(row, w) - node.weights[w]); } lock (lo) { if (node.distance < minDistance) { minDistance = node.distance; bmu = node; } } #if parallel });
private void GetOutput(Cluster cluster, double[] labels) { bool isContinuous = _labels.ValueCount(0) < 2; int[] count = new int[isContinuous ? 1 : _labels.ValueCount(0)]; double result = 0; // calculate the output for (var i = 0; i < cluster.Instances.Count; i++) { if (isContinuous) { // continuous result += _labels.Get(cluster.Instances[i], 0); count[0]++; } else { double idx = _labels.Get(cluster.Instances[i], 0); if (idx != Matrix.MISSING) { count[(int)idx]++; } } } if (isContinuous) { result /= count[0]; } else { double max = count[0]; labels[0] = 0; for (var c = 1; c < count.Length; c++) { if (count[c] > max) { labels[0] = c; max = count[c]; } } } }
public Cluster(int number, VMatrix features, int row, List <int> ignore) { Number = number; Features = features; Centroid = new double[features.Cols()]; for (var col = 0; col < Centroid.Length; col++) { Centroid[col] = features.Get(row, col); } Instances = new List <int>(); Ignore = ignore; }
private double TrainEpoch(int epoch, VMatrix features, VMatrix labels) { double sse = 0; object lo = new object(); Console.Write("TrainEpoch "); int cl = Console.CursorLeft; for (var row = 0; row < features.Rows(); row++) { if (((row % 100) == 0) || (row == (features.Rows() - 1))) { Console.SetCursorPosition(cl, Console.CursorTop); Console.Write(row); } // calculate the output for (var layer = 0; layer < m_layers.Count; layer++) { #if parallel Parallel.ForEach(m_layers[layer], node => #else foreach (var node in m_layers[layer]) #endif { node.net = 0; // calculate the net value for (var w = 0; w < node.weights.Length - 1; w++) { if (layer == 0) { node.net += node.weights[w] * features.Get(row, w); } else { node.net += node.weights[w] * m_layers[layer - 1][w].output; } } // add the bias node.net += node.weights[node.weights.Length - 1]; node.output = Activation(node.net); #if parallel });
private void TrainEpoch(int epoch, VMatrix features, VMatrix labels, int currLayer) { object lo = new object(); Console.Write("TrainEpoch "); int cl = Console.CursorLeft; for (var row = 0; row < features.Rows(); row++) { if (((row % 100) == 0) || (row == (features.Rows() - 1))) { Console.SetCursorPosition(cl, Console.CursorTop); Console.Write(row); } // calculate the output for (var layer = 0; layer < m_layers.Count; layer++) { #if parallel Parallel.ForEach(m_layers[layer], node => #else foreach (var node in m_layers[layer]) #endif { node.net = 0; node.output = 0; node.error = 0; if (layer == 0) { // input node node.net = features.Get(row, node.index); node.output = node.net; } else { // calculate the net value for (var w = 0; w < node.weights.Length - 1; w++) { node.net += node.weights[w] * m_layers[layer - 1][w].output; } // add the bias node.net += node.weights[node.weights.Length - 1]; // calculate the output if (m_activation == "relu") { if (node.net <= node.threshold) { node.output = (node.net - node.threshold) * node.alpha; } else { node.output = (node.net - node.threshold) * node.beta; } } else if (m_activation == "softsign") { node.output = (node.net / (1.0 + Math.Abs(node.net))); } else if (m_activation == "softplus") { node.output = Math.Log(1.0 + Math.Exp(node.net)); } else { node.output = 1.0 / (1.0 + Math.Exp(-node.net)); } } #if parallel });
private void TrainBatch(VMatrix features, VMatrix labels, int startIdx, int count) { for (var idx = 0; idx < count; idx++) { var row = startIdx + idx; if (row > (features.Rows() - 1)) { row = features.Rows() - 1; } // calculate the output foreach (var layer in Layers) { #if parallel Parallel.ForEach(layer.Nodes, node => #else foreach (var node in layer.Nodes) #endif { node.Net = 0; node.Output = 0; node.Error = 0; if (layer.Type == LayerType.Input) { // input node node.Net = features.Get(row, node.Index); node.Output = node.Net; } else { // calculate the net value for (var w = 0; w < node.Weights.Length - 1; w++) { node.Net += node.Weights[w] * layer.Previous.Nodes[w].Output; } // add the bias node.Net += node.Weights[node.Weights.Length - 1]; // calculate the output switch (Parameters.Activation) { case "relu": node.Output = node.Net < 0 ? 0.01 * node.Net : node.Net; break; case "softsign": node.Output = (node.Net / (1.0 + Math.Abs(node.Net))); break; case "softplus": node.Output = Math.Log(1.0 + Math.Exp(node.Net)); break; default: node.Output = 1.0 / (1.0 + Math.Exp(-node.Net)); break; } } #if parallel });
private double TrainEpoch(int epoch, VMatrix features, VMatrix labels, bool isDAE, bool trainAll) { double sse = 0; object lo = new object(); Console.Write("TrainEpoch "); int cl = Console.CursorLeft; StreamWriter aFile = null; if (!isDAE && (epoch == 1)) { aFile = File.CreateText("dbnTrain.arff"); aFile.WriteLine("@RELATION DAE"); aFile.WriteLine(); for (var i = 1; i <= m_layers[m_layers.Count - 3].Count; i++) { aFile.WriteLine($"@ATTRIBUTE hn{i} real"); } aFile.WriteLine("@ATTRIBUTE class {0,1,2,3,4,5,6,7,8,9}"); aFile.WriteLine(); aFile.WriteLine("@DATA"); } for (var row = 0; row < features.Rows(); row++) { if (((row % 100) == 0) || (row == (features.Rows() - 1))) { Console.SetCursorPosition(cl, Console.CursorTop); Console.Write(row); } // calculate the output for (var layer = 0; layer < m_layers.Count; layer++) { #if parallel Parallel.ForEach(m_layers[layer], node => #else foreach (var node in m_layers[layer]) #endif { node.net = 0; node.output = 0; node.output2 = 0; node.error = 0; if (layer == 0) { // input node node.output = features.Get(row, node.index); node.output2 = node.output; } else { // calculate the net value for (var w = 0; w < node.weights.Length - 1; w++) { node.net += node.weights[w] * m_layers[layer - 1][w].output; } // add the bias node.net += node.weights[node.weights.Length - 1]; // calculate the output node.output = Activation(node.net); node.output2 = node.output; } if (isDAE && (layer == m_layers.Count - 3) && (node.output != 0)) { lock (lo) { // corrupt the output if (m_rand.NextDouble() < m_corruptLevel) { node.output = 0; } } } #if parallel });
// Calculate the MSE public override double VGetMSE(VMatrix features, VMatrix labels) { double sse = 0; var cl = 0; if (Parameters.Verbose) { Console.Write("VGetMSE "); cl = Console.CursorLeft; } for (var row = 0; row < features.Rows(); row++) { if (Parameters.Verbose) { Console.SetCursorPosition(cl, Console.CursorTop); Console.Write(row); } SetInputs(features, row); if (row >= m_k - 1) { // calculate the output for (var layer = 1; layer < m_layers.Count; layer++) { Parallel.ForEach(m_layers[layer], node => { if (!(node is InputNode)) { node.net = 0; node.output = 0; // calculate the net value for (var w = 0; w < node.weights.Length - 1; w++) { node.net += node.weights[w] * m_layers[layer - 1][w].output; } // add the bias node.net += node.weights[node.weights.Length - 1]; node.output = 1.0 / (1.0 + Math.Exp(-node.net)); } }); } // calculate the error of the output layer foreach (OutputNode node in m_layers[m_layers.Count - 1]) { var target = labels.Get(row, node.labelCol); if (!node.isContinuous) { // nominal if (target == node.labelVal) { target = 0.9; } else { target = 0.1; } } var error = target - node.output; // update the error sse += error * error; } } } if (Parameters.Verbose) { Console.WriteLine(); } return(sse / (features.Rows() - m_k + 1)); }
private double TrainEpoch(int epoch, VMatrix features, VMatrix labels) { double sse = 0; var lo = new object(); var cl = 0; if (Parameters.Verbose) { Console.Write("TrainEpoch "); cl = Console.CursorLeft; } for (var rowCount = 1; rowCount <= features.Rows(); rowCount++) { if (Parameters.Verbose) { Console.SetCursorPosition(cl, Console.CursorTop); Console.Write(rowCount); } var row = m_rand.Next(features.Rows() - m_k + 1) + m_k - 1; for (var r = row - m_k + 1; r <= row; r++) { SetInputs(features, r); } // calculate the output for (var layer = 1; layer < m_layers.Count; layer++) { Parallel.ForEach(m_layers[layer], node => { if (!(node is InputNode)) { node.net = 0; node.output = 0; node.error = 0; // calculate the net value for (var w = 0; w < node.weights.Length - 1; w++) { var nNode = m_layers[layer - 1][w]; node.net += node.weights[w] * nNode.output; } // add the bias node.net += node.weights[node.weights.Length - 1]; // calculate the output node.output = 1.0 / (1.0 + Math.Exp(-node.net)); } }); } // calculate the error and weight changes for (var layer = m_layers.Count - 1; layer > 0; layer--) { Parallel.ForEach(m_layers[layer], node => { if (!(node is InputNode)) { var fPrime = node.output * (1.0 - node.output); if (node is OutputNode) { // output layer var oNode = node as OutputNode; var target = labels.Get(row, oNode.labelCol); if (!oNode.isContinuous) { // nominal if (target == oNode.labelVal) { target = 0.9; } else { target = 0.1; } } var error = target - node.output; node.error = error * fPrime; lock (lo) { sse += error * error; } } else { // hidden layer double sum = 0; foreach (var tn in m_layers[layer + 1]) { if (!(tn is InputNode)) { sum += tn.error * tn.weights[node.index]; } } node.error = sum * fPrime; } // calculate the weight changes double delta; for (var w = 0; w < node.weights.Length - 1; w++) { var dNode = m_layers[layer - 1][w]; delta = m_rate * node.error * dNode.output; delta += m_momentum * node.deltas[w]; node.deltas[w] = delta; } // calculate the bias weight change delta = m_rate * node.error; delta += m_momentum * node.deltas[node.weights.Length - 1]; node.deltas[node.weights.Length - 1] = delta; } }); } // update the weights for (var layer = 1; layer < m_layers.Count; layer++) { var idx = m_inputs; foreach (var node in m_layers[layer]) { if (node is OutputNode) { for (var w = 0; w < node.weights.Length; w++) { node.weights[w] += node.deltas[w]; } } else if (node is HiddenNode) { var dNode = m_layers[1][idx++] as HiddenNode; for (var w = 0; w < node.weights.Length; w++) { dNode.weights[w] += node.deltas[w]; } } } } CopyWeights(); } if (Parameters.Verbose) { Console.WriteLine(); } return(sse / features.Rows()); }
private void TrainEpoch(int epoch, VMatrix features, VMatrix labels, bool corrupt, bool trainAll) { var lo = new object(); Console.Write("TrainEpoch "); var cl = Console.CursorLeft; for (var row = 0; row < features.Rows(); row++) { if (((row % 100) == 0) || (row == (features.Rows() - 1))) { Console.SetCursorPosition(cl, Console.CursorTop); Console.Write(row); } // calculate the output for (var layer = 0; layer < m_layers.Count; layer++) { #if parallel Parallel.ForEach(m_layers[layer], node => #else foreach (var node in m_layers[layer]) #endif { node.net = 0; node.output = 0; node.error = 0; if (layer == 0) { // input node node.output = features.Get(row, node.index); } else { // calculate the net value for (var w = 0; w < node.weights.Length - 1; w++) { node.net += node.weights[w] * m_layers[layer - 1][w].output; } // add the bias node.net += node.weights[node.weights.Length - 1]; // calculate the output if (m_activation == "relu") { node.output = (node.net < node.threshold ? ((node.net - node.threshold) * m_actLeak) + node.threshold : node.net * m_actSlope); } else if (m_activation == "softsign") { node.output = (node.net / (1.0 + Math.Abs(node.net))); } else if (m_activation == "softplus") { node.output = Math.Log(1.0 + Math.Exp(node.net)); } else { node.output = 1.0 / (1.0 + Math.Exp(-node.net)); } } if (corrupt && (m_corruptLevel > 0) && (layer == m_layers.Count - 3) && (node.output != 0)) { lock (lo) { // corrupt the output if (m_rand.NextDouble() < m_corruptLevel) { node.output = 0; } } } #if parallel });
public double VMeasureAccuracy(VMatrix features, VMatrix labels, Matrix confusion) { if (features.Rows() != labels.Rows()) { throw (new Exception("Expected the features and labels to have the same number of rows")); } if (labels.Cols() != 1) { throw (new Exception("Sorry, this method currently only supports one-dimensional labels")); } if (features.Rows() == 0) { throw (new Exception("Expected at least one row")); } var cl = 0; if (Parameters.Verbose) { Console.Write("VMeasureAccuracy "); cl = Console.CursorLeft; } var count = features.Rows(); var begRow = 0; if (this is BPTT) { var learner = this as BPTT; begRow = learner.m_k - 1; count -= begRow; } var labelValues = labels.ValueCount(0); if (labelValues == 0) // If the label is continuous... { // The label is continuous, so measure root mean squared error var pred = new double[1]; var sse = 0.0; for (var i = 0; i < features.Rows(); i++) { if (Parameters.Verbose) { Console.SetCursorPosition(cl, Console.CursorTop); Console.Write(i); } var feat = features.Row(i); var targ = labels.Row(i); pred[0] = 0.0; // make sure the prediction is not biased by a previous prediction Predict(feat, pred); if (i >= begRow) { var delta = targ[0] - pred[0]; sse += (delta * delta); } } if (Parameters.Verbose) { Console.WriteLine(); } return(Math.Sqrt(sse / count)); } else { // The label is nominal, so measure predictive accuracy if (confusion != null) { confusion.SetSize(labelValues, labelValues); for (var i = 0; i < labelValues; i++) { confusion.SetAttrName(i, labels.AttrValue(0, i)); } } var correctCount = 0; var prediction = new double[1]; for (var i = 0; i < features.Rows(); i++) { if (Parameters.Verbose) { Console.SetCursorPosition(cl, Console.CursorTop); Console.Write(i); } var feat = features.Row(i); var lab = labels.Get(i, 0); if (lab != Matrix.MISSING) { var targ = (int)lab; if (targ >= labelValues) { throw new Exception("The label is out of range"); } Predict(feat, prediction); if (i >= begRow) { var pred = (int)prediction[0]; if (confusion != null) { confusion.Set(targ, pred, confusion.Get(targ, pred) + 1); } if (pred == targ) { correctCount++; } } } else { count--; } } if (Parameters.Verbose) { Console.WriteLine(); } return((double)correctCount / count); } }
private double TrainDBN(int hLayer, int epoch, VMatrix features, VMatrix labels) { double sse; double sseAccum = 0; object lo = new object(); Console.Write(string.Format("TrainDBN {0} - ", hLayer)); int cl = Console.CursorLeft; // if ((hLayer == 1) && (epoch == 1)) // { // m_layers[0][0].weights[0] = 0; // m_layers[0][1].weights[0] = 0; // m_layers[1][0].weights[0] = 0.6; // m_layers[1][0].weights[1] = 0.4; // m_layers[1][0].weights[2] = 0; // //m_layers[1][0].weights[3] = 0; // m_layers[1][1].weights[0] = 0.5; // m_layers[1][1].weights[1] = -0.1; // m_layers[1][1].weights[2] = 0; // //m_layers[1][1].weights[3] = 0; // } for (var row = 0; row < features.Rows(); row++) { sse = 0; Console.SetCursorPosition(cl, Console.CursorTop); Console.Write(row); //DropNodes(); // calculate the output for (var layer = 0; layer <= hLayer; layer++) { #if parallel Parallel.ForEach(m_layers[layer], node => #else foreach (var node in m_layers[layer]) #endif { node.net = 0; node.output = 0; node.sample = 0; node.net2 = 0; node.output2 = 0; node.sample2 = 0; node.error = 0; if (node.isActive) { if (layer == 0) { // input node node.output = features.Get(row, node.index); node.sample = node.output; } else { // calculate the net value int wCount = m_layers[layer - 1].Count; for (var w = 0; w < wCount; w++) { var nNode = m_layers[layer - 1][w]; if (nNode.isActive) { if (m_sample) { node.net += node.weights[w] * nNode.sample; } else { node.net += node.weights[w] * nNode.output; } } } // add the bias node.net += node.weights[wCount]; // calculate the output node.output = 1.0 / (1.0 + Math.Exp(-node.net)); // sample lock (lo) { node.sample = (m_rand.NextDouble() < node.output ? 1 : 0); } } } #if parallel });
// Calculate the MSE public override double VGetMSE(VMatrix features, VMatrix labels) { double sse = 0; Console.Write("VGetMSE "); var cl = Console.CursorLeft; for (var row = 0; row < features.Rows(); row++) { Console.SetCursorPosition(cl, Console.CursorTop); Console.Write(row); // calculate the output for (var layer = 0; layer < m_layers.Count; layer++) { Parallel.ForEach(m_layers[layer], node => { node.net = 0; node.output = 0; if (layer == 0) { // input node node.output = features.Get(row, node.index); } else { // calculate the net value for (var w = 0; w < node.weights.Length - 1; w++) { var weight = node.weights[w]; if (layer == 1) { weight *= m_pi; } else { weight *= m_ph; } node.net += weight * m_layers[layer - 1][w].output; } // add the bias node.net += node.weights[node.weights.Length - 1]; node.output = 1.0 / (1.0 + Math.Exp(-node.net)); } }); } // calculate the error of the output layer for (var n = 0; n < m_layers[m_layers.Count - 1].Count; n++) { var node = m_layers[m_layers.Count - 1][n] as OutputNode; var target = labels.Get(row, node.labelCol); if (!node.isContinuous) { // nominal if (target == node.labelVal) { target = 0.9; } else { target = 0.1; } } var error = target - node.output; // update the error sse += error * error; } } Console.WriteLine(); return(sse / features.Rows()); }
private double TrainEpoch(int epoch, VMatrix features, VMatrix labels) { double sse = 0; var lo = new object(); Console.Write("TrainEpoch "); var cl = Console.CursorLeft; for (var row = 0; row < features.Rows(); row++) { Console.SetCursorPosition(cl, Console.CursorTop); Console.Write(row); DropNodes(); // calculate the output for (var layer = 0; layer < m_layers.Count; layer++) { Parallel.ForEach(m_layers[layer], node => { node.net = 0; node.output = 0; node.error = 0; if (node.isActive) { if (layer == 0) { // input node node.output = features.Get(row, node.index); } else { // calculate the net value for (var w = 0; w < node.weights.Length - 1; w++) { var nNode = m_layers[layer - 1][w]; if (nNode.isActive) { node.net += node.weights[w] * nNode.output; } } // add the bias node.net += node.weights[node.weights.Length - 1]; // calculate the output node.output = 1.0 / (1.0 + Math.Exp(-node.net)); } } }); } // calculate the error and weight changes for (var layer = m_layers.Count - 1; layer > 0; layer--) { Parallel.ForEach(m_layers[layer], node => { if (node.isActive) { var fPrime = node.output * (1.0 - node.output); if (layer == m_layers.Count - 1) { // output layer var oNode = node as OutputNode; var target = labels.Get(row, oNode.labelCol); if (!oNode.isContinuous) { // nominal if (target == oNode.labelVal) { target = 0.9; } else { target = 0.1; } } var error = target - node.output; node.error = error * fPrime; lock (lo) { sse += error * error; } } else { // hidden layer double sum = 0; foreach (var tn in m_layers[layer + 1]) { if (tn.isActive) { sum += tn.error * tn.weights[node.index]; } } node.error = sum * fPrime; } // calculate the weight changes double delta; for (var w = 0; w < node.weights.Length - 1; w++) { if (node.isActive) { var dNode = m_layers[layer - 1][w]; if (dNode.isActive) { delta = m_rate * node.error * dNode.output; delta += m_momentum * node.deltas[w]; node.deltas[w] = delta; } } } // calculate the bias weight change delta = m_rate * node.error; delta += m_momentum * node.deltas[node.weights.Length - 1]; node.deltas[node.weights.Length - 1] = delta; } }); } // update the weights for (var layer = 1; layer < m_layers.Count; layer++) { Parallel.ForEach(m_layers[layer], node => { if (node.isActive) { for (var w = 0; w < node.weights.Length - 1; w++) { var wNode = m_layers[layer - 1][w]; if (wNode.isActive) { node.weights[w] += node.deltas[w]; } // update the bias weight node.weights[node.weights.Length - 1] += node.deltas[node.weights.Length - 1]; } } }); } } Console.WriteLine(); return(sse / features.Rows()); }