/// <inheritdoc/> public Object Read(Stream istream) { BayesianNetwork result = new BayesianNetwork(); EncogReadHelper input = new EncogReadHelper(istream); EncogFileSection section; String queryType = ""; String queryStr = ""; String contentsStr = ""; while ((section = input.ReadNextSection()) != null) { if (section.SectionName.Equals("BAYES-NETWORK") && section.SubSectionName.Equals("BAYES-PARAM")) { IDictionary <String, String> p = section.ParseParams(); queryType = p["queryType"]; queryStr = p["query"]; contentsStr = p["contents"]; } if (section.SectionName.Equals("BAYES-NETWORK") && section.SubSectionName.Equals("BAYES-TABLE")) { result.Contents = contentsStr; // first, define relationships (1st pass) foreach (String line in section.Lines) { result.DefineRelationship(line); } result.FinalizeStructure(); // now define the probabilities (2nd pass) foreach (String line in section.Lines) { result.DefineProbability(line); } } if (section.SectionName.Equals("BAYES-NETWORK") && section.SubSectionName.Equals("BAYES-PROPERTIES")) { IDictionary <String, String> paras = section.ParseParams(); EngineArray.PutAll(paras, result.Properties); } } // define query, if it exists if (queryType.Length > 0) { IBayesianQuery query = null; if (queryType.Equals("EnumerationQuery")) { query = new EnumerationQuery(result); } else { query = new SamplingQuery(result); } if (query != null && queryStr.Length > 0) { result.Query = query; result.DefineClassificationStructure(queryStr); } } return(result); }
/// <inheritdoc/> public void Write(double[] input, double[] ideal, double significance) { EngineArray.ArrayCopy(input, _input[_index]); EngineArray.ArrayCopy(ideal, _ideal[_index]); _index++; }
/// <summary> /// Derive the minimum, using a conjugate gradient method. /// </summary> /// /// <param name="maxIterations">The max iterations.</param> /// <param name="maxError">Stop at this error rate.</param> /// <param name="eps">The machine's precision.</param> /// <param name="tol">The convergence tolerance.</param> /// <param name="network">The network to get the error from.</param> /// <param name="n">The number of variables.</param> /// <param name="x">The independent variable.</param> /// <param name="ystart">The start for y.</param> /// <param name="bs">Work vector, must have n elements.</param> /// <param name="direc">Work vector, must have n elements.</param> /// <param name="g">Work vector, must have n elements.</param> /// <param name="h">Work vector, must have n elements.</param> /// <param name="deriv2">Work vector, must have n elements.</param> /// <returns>The best error.</returns> public double Calculate(int maxIterations, double maxError, double eps, double tol, ICalculationCriteria network, int n, double[] x, double ystart, double[] bs, double[] direc, double[] g, double[] h, double[] deriv2) { var globalMinimum = new GlobalMinimumSearch(); double fbest = network.CalcErrorWithMultipleSigma(x, direc, deriv2, true); double prevBest = 1.0e30d; for (int i = 0; i < n; i++) { direc[i] = -direc[i]; } EngineArray.ArrayCopy(direc, g); EngineArray.ArrayCopy(direc, h); int convergenceCounter = 0; int poorCj = 0; // Main loop for (int iteration = 0; iteration < maxIterations; iteration++) { if (fbest < maxError) { break; } EncogLogging.Log(EncogLogging.LevelInfo, "Beginning internal Iteration #" + iteration + ", currentError=" + fbest + ",target=" + maxError); // Check for convergence double toler; if (prevBest <= 1.0d) { toler = tol; } else { toler = tol * prevBest; } // Stop if there is little improvement if ((prevBest - fbest) <= toler) { if (++convergenceCounter >= 3) { break; } } else { convergenceCounter = 0; } double dot2 = 0; double dlen = 0; double dot1 = dot2 = dlen = 0.0d; double high = 1.0e-4d; for (int i = 0; i < n; i++) { bs[i] = x[i]; if (deriv2[i] > high) { high = deriv2[i]; } dot1 += direc[i] * g[i]; // Directional first derivative dot2 += direc[i] * direc[i] * deriv2[i]; // and second dlen += direc[i] * direc[i]; // Length of search vector } double scale; if (Math.Abs(dot2) < EncogFramework.DefaultDoubleEqual) { scale = 0; } else { scale = dot1 / dot2; } high = 1.5d / high; if (high < 1.0e-4d) { high = 1.0e-4d; } if (scale < 0.0d) { scale = high; } else if (scale < 0.1d * high) { scale = 0.1d * high; } else if (scale > 10.0d * high) { scale = 10.0d * high; } prevBest = fbest; globalMinimum.Y2 = fbest; globalMinimum.FindBestRange(0.0d, 2.0d * scale, -3, false, maxError, network); if (globalMinimum.Y2 < maxError) { if (globalMinimum.Y2 < fbest) { for (int i = 0; i < n; i++) { x[i] = bs[i] + globalMinimum.Y2 * direc[i]; if (x[i] < 1.0e-10d) { x[i] = 1.0e-10d; } } fbest = globalMinimum.Y2; } else { for (int i = 0; i < n; i++) { x[i] = bs[i]; } } break; } if (convergenceCounter > 0) { fbest = globalMinimum.Brentmin(20, maxError, eps, 1.0e-7d, network, globalMinimum.Y2); } else { fbest = globalMinimum.Brentmin(10, maxError, 1.0e-6d, 1.0e-5d, network, globalMinimum.Y2); } for (int i = 0; i < n; i++) { x[i] = bs[i] + globalMinimum.X2 * direc[i]; if (x[i] < 1.0e-10d) { x[i] = 1.0e-10d; } } double improvement = (prevBest - fbest) / prevBest; if (fbest < maxError) { break; } for (int i = 0; i < n; i++) { direc[i] = -direc[i]; // negative gradient } double gam = Gamma(n, g, direc); if (gam < 0.0d) { gam = 0.0d; } if (gam > 10.0d) { gam = 10.0d; } if (improvement < 0.001d) { ++poorCj; } else { poorCj = 0; } if (poorCj >= 2) { if (gam > 1.0d) { gam = 1.0d; } } if (poorCj >= 6) { poorCj = 0; gam = 0.0d; } FindNewDir(n, gam, g, h, direc); } return(fbest); }
/// <summary> /// Construct a fold from the specified flat network. /// </summary> /// /// <param name="flat">THe flat network.</param> public NetworkFold(FlatNetwork flat) { _weights = EngineArray.ArrayCopy(flat.Weights); _output = EngineArray.ArrayCopy(flat.LayerOutput); }
/// <summary> /// Copy the weights and output from the network. /// </summary> /// /// <param name="source">The network to copy from.</param> public void CopyFromNetwork(FlatNetwork source) { EngineArray.ArrayCopy(source.Weights, _weights); EngineArray.ArrayCopy(source.LayerOutput, _output); }
internal static void arraycopy(float[] source, int sourceIndex, float[] target, int targetIndex, int size) { EngineArray.ArrayCopy(source, sourceIndex, target, targetIndex, size); }
/// <summary> /// Construct a truth table line. /// </summary> /// <param name="prob">The probability.</param> /// <param name="result">The result.</param> /// <param name="args">The arguments.</param> public TableLine(double prob, int result, int[] args) { Probability = prob; _result = result; _arguments = EngineArray.ArrayCopy(args); }
/// <summary> /// Set the current state. /// </summary> /// <param name="s">The new current state.</param> public void SetCurrentState(double[] s) { _currentState = new BiPolarMLData(s.Length); EngineArray.ArrayCopy(s, _currentState.Data); }
private void ProcessCalc() { AnalystField firstOutputField = null; int barsNeeded = Math.Abs(Analyst.DetermineMinTimeSlice()); IndentLevel = 2; AddLine("if( _inputCount>0 && CurrentBar>=" + barsNeeded + " )"); AddLine("{"); IndentIn(); AddLine("double[] input = new double[_inputCount];"); AddLine("double[] output = new double[_outputCount];"); int idx = 0; foreach (AnalystField field in Analyst.Script.Normalize .NormalizedFields) { if (field.Input) { String str; DataField df = Analyst.Script .FindDataField(field.Name); switch (field.Action) { case NormalizationAction.PassThrough: str = EngineArray.Replace(df.Source, "##", "" + (-field.TimeSlice)); AddLine("input[" + idx + "]=" + str + ";"); idx++; break; case NormalizationAction.Normalize: str = EngineArray.Replace(df.Source, "##", "" + (-field.TimeSlice)); AddLine("input[" + idx + "]=Norm(" + str + "," + field.NormalizedHigh + "," + field.NormalizedLow + "," + field.ActualHigh + "," + field.ActualLow + ");"); idx++; break; case NormalizationAction.Ignore: break; default: throw new AnalystCodeGenerationError( "Can't generate Ninjascript code, unsupported normalizatoin action: " + field.Action.ToString()); } } if (field.Output) { if (firstOutputField == null) { firstOutputField = field; } } } if (firstOutputField != null) { AddLine("Compute(input,output);"); AddLine("Output.Set(DeNorm(output[0]" + "," + firstOutputField.NormalizedHigh + "," + firstOutputField.NormalizedLow + "," + firstOutputField.ActualHigh + "," + firstOutputField.ActualLow + "));"); IndentOut(); } AddLine("}"); IndentLevel = 2; }
/// <summary> /// /// </summary> /// public void Reset(int seed) { CurrentState.Clear(); EngineArray.Fill(_weights, 0.0d); }
/// <summary> /// Clear any connection weights. /// </summary> /// public void Clear() { EngineArray.Fill(_weights, 0); }
/// <summary> /// Read an object. /// </summary> /// public Object Read(Stream mask0) { var result = new BasicNetwork(); var flat = new FlatNetwork(); var ins0 = new EncogReadHelper(mask0); EncogFileSection section; while ((section = ins0.ReadNextSection()) != null) { if (section.SectionName.Equals("BASIC") && section.SubSectionName.Equals("PARAMS")) { IDictionary <String, String> paras = section.ParseParams(); EngineArray.PutAll(paras, result.Properties); } if (section.SectionName.Equals("BASIC") && section.SubSectionName.Equals("NETWORK")) { IDictionary <String, String> p = section.ParseParams(); flat.BeginTraining = EncogFileSection.ParseInt(p, BasicNetwork.TagBeginTraining); flat.ConnectionLimit = EncogFileSection.ParseDouble(p, BasicNetwork.TagConnectionLimit); flat.ContextTargetOffset = EncogFileSection.ParseIntArray( p, BasicNetwork.TagContextTargetOffset); flat.ContextTargetSize = EncogFileSection.ParseIntArray( p, BasicNetwork.TagContextTargetSize); flat.EndTraining = EncogFileSection.ParseInt(p, BasicNetwork.TagEndTraining); flat.HasContext = EncogFileSection.ParseBoolean(p, BasicNetwork.TagHasContext); flat.InputCount = EncogFileSection.ParseInt(p, PersistConst.InputCount); flat.LayerCounts = EncogFileSection.ParseIntArray(p, BasicNetwork.TagLayerCounts); flat.LayerFeedCounts = EncogFileSection.ParseIntArray(p, BasicNetwork.TagLayerFeedCounts); flat.LayerContextCount = EncogFileSection.ParseIntArray( p, BasicNetwork.TagLayerContextCount); flat.LayerIndex = EncogFileSection.ParseIntArray(p, BasicNetwork.TagLayerIndex); flat.LayerOutput = section.ParseDoubleArray(p, PersistConst.Output); flat.LayerSums = new double[flat.LayerOutput.Length]; flat.OutputCount = EncogFileSection.ParseInt(p, PersistConst.OutputCount); flat.WeightIndex = EncogFileSection.ParseIntArray(p, BasicNetwork.TagWeightIndex); flat.Weights = section.ParseDoubleArray(p, PersistConst.Weights); flat.BiasActivation = section.ParseDoubleArray(p, BasicNetwork.TagBiasActivation); } else if (section.SectionName.Equals("BASIC") && section.SubSectionName.Equals("ACTIVATION")) { int index = 0; flat.ActivationFunctions = new IActivationFunction[flat.LayerCounts.Length]; foreach (String line in section.Lines) { IActivationFunction af; IList <String> cols = EncogFileSection .SplitColumns(line); String name = ReflectionUtil.AfPath + cols[0]; try { af = (IActivationFunction)ReflectionUtil.LoadObject(name); } catch (TypeLoadException e) { throw new PersistError(e); } catch (TargetException e) { throw new PersistError(e); } catch (MemberAccessException e) { throw new PersistError(e); } for (int i = 0; i < af.ParamNames.Length; i++) { af.Params[i] = CSVFormat.EgFormat.Parse(cols[i + 1]); } flat.ActivationFunctions[index++] = af; } } } result.Structure.Flat = flat; return(result); }
/// <summary> /// Perform a SVD fit. /// </summary> /// <param name="x">The X matrix.</param> /// <param name="y">The Y matrix.</param> /// <param name="a">The A matrix.</param> /// <param name="funcs">The RBF functions.</param> /// <returns>The fit.</returns> public static double Svdfit(double[][] x, double[][] y, double[][] a, IRadialBasisFunction[] funcs) { int i, j, k; double wmax, tmp, thresh, sum, TOL = 1e-13d; //Allocated memory for svd matrices double[][] u = EngineArray.AllocateDouble2D(x.Length, funcs.Length); double[][] v = EngineArray.AllocateDouble2D(funcs.Length, funcs.Length); var w = new double[funcs.Length]; //Fill input matrix with values based on fitting functions and input coordinates for (i = 0; i < x.Length; i++) { for (j = 0; j < funcs.Length; j++) { u[i][j] = funcs[j].Calculate(x[i]); } } //Perform decomposition Svdcmp(u, w, v); //Check for w values that are close to zero and replace them with zeros such that they are ignored in backsub wmax = 0; for (j = 0; j < funcs.Length; j++) { if (w[j] > wmax) { wmax = w[j]; } } thresh = TOL * wmax; for (j = 0; j < funcs.Length; j++) { if (w[j] < thresh) { w[j] = 0; } } //Perform back substitution to get result Svdbksb(u, w, v, y, a); //Calculate chi squared for the fit double chisq = 0; for (k = 0; k < y[0].Length; k++) { for (i = 0; i < y.Length; i++) { sum = 0.0d; for (j = 0; j < funcs.Length; j++) { sum += a[j][k] * funcs[j].Calculate(x[i]); } tmp = (y[i][k] - sum); chisq += tmp * tmp; } } return(Math.Sqrt(chisq / (y.Length * y[0].Length))); }
/// <inheritdoc/> public void Write(double[] input, double[] ideal) { EngineArray.ArrayCopy(input, this.input[index]); EngineArray.ArrayCopy(ideal, this.ideal[index]); index++; }
/// <inheritdoc /> private void InternalCompute(int outputNeuron) { int row = 0; var error = new ErrorCalculation(); var derivative = new double[_weightCount]; // Loop over every training element foreach (IMLDataPair pair in _training) { EngineArray.Fill(derivative, 0); IMLData networkOutput = _network.Compute(pair.Input); double e = pair.Ideal[outputNeuron] - networkOutput[outputNeuron]; error.UpdateError(networkOutput[outputNeuron], pair.Ideal[outputNeuron]); int currentWeight = 0; // loop over the output weights int outputFeedCount = _network.GetLayerTotalNeuronCount(_network.LayerCount - 2); for (int i = 0; i < _network.OutputCount; i++) { for (int j = 0; j < outputFeedCount; j++) { double jc; if (i == outputNeuron) { jc = ComputeDerivative(pair.Input, outputNeuron, currentWeight, _dStep, networkOutput[outputNeuron], row); } else { jc = 0; } _gradients[currentWeight] += jc * e; derivative[currentWeight] = jc; currentWeight++; } } // Loop over every weight in the neural network while (currentWeight < _network.Flat.Weights.Length) { double jc = ComputeDerivative( pair.Input, outputNeuron, currentWeight, _dStep, networkOutput[outputNeuron], row); derivative[currentWeight] = jc; _gradients[currentWeight] += jc * e; currentWeight++; } row++; UpdateHessian(derivative); } _sse += error.CalculateSSE(); }
/// <summary> /// Construct the singular value decomposition /// </summary> /// <param name="Arg">Rectangular matrix</param> public SingularValueDecomposition(Matrix Arg) { // Derived from LINPACK code. // Initialize. double[][] A = Arg.GetArrayCopy(); m = Arg.Rows; n = Arg.Cols; /* * Apparently the failing cases are only a proper subset of (m<n), so * let's not throw error. Correct fix to come later? if (m<n) { throw * new IllegalArgumentException("Jama SVD only works for m >= n"); } */ int nu = Math.Min(m, n); s = new double[Math.Min(m + 1, n)]; umatrix = EngineArray.AllocateDouble2D(m, nu); vmatrix = EngineArray.AllocateDouble2D(n, n); var e = new double[n]; var work = new double[m]; bool wantu = true; bool wantv = true; // Reduce A to bidiagonal form, storing the diagonal elements // in s and the super-diagonal elements in e. int nct = Math.Min(m - 1, n); int nrt = Math.Max(0, Math.Min(n - 2, m)); for (int k = 0; k < Math.Max(nct, nrt); k++) { if (k < nct) { // Compute the transformation for the k-th column and // place the k-th diagonal in s[k]. // Compute 2-norm of k-th column without under/overflow. s[k] = 0; for (int i = k; i < m; i++) { s[k] = EncogMath.Hypot(s[k], A[i][k]); } if (s[k] != 0.0) { if (A[k][k] < 0.0) { s[k] = -s[k]; } for (int i = k; i < m; i++) { A[i][k] /= s[k]; } A[k][k] += 1.0; } s[k] = -s[k]; } for (int j = k + 1; j < n; j++) { if ((k < nct) & (s[k] != 0.0)) { // Apply the transformation. double t = 0; for (int i = k; i < m; i++) { t += A[i][k] * A[i][j]; } t = -t / A[k][k]; for (int i = k; i < m; i++) { A[i][j] += t * A[i][k]; } } // Place the k-th row of A into e for the // subsequent calculation of the row transformation. e[j] = A[k][j]; } if (wantu & (k < nct)) { // Place the transformation in U for subsequent back // multiplication. for (int i = k; i < m; i++) { umatrix[i][k] = A[i][k]; } } if (k < nrt) { // Compute the k-th row transformation and place the // k-th super-diagonal in e[k]. // Compute 2-norm without under/overflow. e[k] = 0; for (int i = k + 1; i < n; i++) { e[k] = EncogMath.Hypot(e[k], e[i]); } if (e[k] != 0.0) { if (e[k + 1] < 0.0) { e[k] = -e[k]; } for (int i = k + 1; i < n; i++) { e[i] /= e[k]; } e[k + 1] += 1.0; } e[k] = -e[k]; if ((k + 1 < m) & (e[k] != 0.0)) { // Apply the transformation. for (int i = k + 1; i < m; i++) { work[i] = 0.0; } for (int j = k + 1; j < n; j++) { for (int i = k + 1; i < m; i++) { work[i] += e[j] * A[i][j]; } } for (int j = k + 1; j < n; j++) { double t = -e[j] / e[k + 1]; for (int i = k + 1; i < m; i++) { A[i][j] += t * work[i]; } } } if (wantv) { // Place the transformation in V for subsequent // back multiplication. for (int i = k + 1; i < n; i++) { vmatrix[i][k] = e[i]; } } } } // Set up the final bidiagonal matrix or order p. int p = Math.Min(n, m + 1); if (nct < n) { s[nct] = A[nct][nct]; } if (m < p) { s[p - 1] = 0.0; } if (nrt + 1 < p) { e[nrt] = A[nrt][p - 1]; } e[p - 1] = 0.0; // If required, generate U. if (wantu) { for (int j = nct; j < nu; j++) { for (int i = 0; i < m; i++) { umatrix[i][j] = 0.0; } umatrix[j][j] = 1.0; } for (int k = nct - 1; k >= 0; k--) { if (s[k] != 0.0) { for (int j = k + 1; j < nu; j++) { double t = 0; for (int i = k; i < m; i++) { t += umatrix[i][k] * umatrix[i][j]; } t = -t / umatrix[k][k]; for (int i = k; i < m; i++) { umatrix[i][j] += t * umatrix[i][k]; } } for (int i = k; i < m; i++) { umatrix[i][k] = -umatrix[i][k]; } umatrix[k][k] = 1.0 + umatrix[k][k]; for (int i = 0; i < k - 1; i++) { umatrix[i][k] = 0.0; } } else { for (int i = 0; i < m; i++) { umatrix[i][k] = 0.0; } umatrix[k][k] = 1.0; } } } // If required, generate V. if (wantv) { for (int k = n - 1; k >= 0; k--) { if ((k < nrt) & (e[k] != 0.0)) { for (int j = k + 1; j < nu; j++) { double t = 0; for (int i = k + 1; i < n; i++) { t += vmatrix[i][k] * vmatrix[i][j]; } t = -t / vmatrix[k + 1][k]; for (int i = k + 1; i < n; i++) { vmatrix[i][j] += t * vmatrix[i][k]; } } } for (int i = 0; i < n; i++) { vmatrix[i][k] = 0.0; } vmatrix[k][k] = 1.0; } } // Main iteration loop for the singular values. int pp = p - 1; int iter = 0; double eps = Math.Pow(2.0, -52.0); double tiny = Math.Pow(2.0, -966.0); while (p > 0) { int k, kase; // Here is where a test for too many iterations would go. // This section of the program inspects for // negligible elements in the s and e arrays. On // completion the variables kase and k are set as follows. // kase = 1 if s(p) and e[k-1] are negligible and k<p // kase = 2 if s(k) is negligible and k<p // kase = 3 if e[k-1] is negligible, k<p, and // s(k), ..., s(p) are not negligible (qr step). // kase = 4 if e(p-1) is negligible (convergence). for (k = p - 2; k >= -1; k--) { if (k == -1) { break; } if (Math.Abs(e[k]) <= tiny + eps * (Math.Abs(s[k]) + Math.Abs(s[k + 1]))) { e[k] = 0.0; break; } } if (k == p - 2) { kase = 4; } else { int ks; for (ks = p - 1; ks >= k; ks--) { if (ks == k) { break; } double t = (ks != p ? Math.Abs(e[ks]) : 0.0) + (ks != k + 1 ? Math.Abs(e[ks - 1]) : 0.0); if (Math.Abs(s[ks]) <= tiny + eps * t) { s[ks] = 0.0; break; } } if (ks == k) { kase = 3; } else if (ks == p - 1) { kase = 1; } else { kase = 2; k = ks; } } k++; // Perform the task indicated by kase. switch (kase) { // Deflate negligible s(p). case 1: { double f = e[p - 2]; e[p - 2] = 0.0; for (int j = p - 2; j >= k; j--) { double t = EncogMath.Hypot(s[j], f); double cs = s[j] / t; double sn = f / t; s[j] = t; if (j != k) { f = -sn * e[j - 1]; e[j - 1] = cs * e[j - 1]; } if (wantv) { for (int i = 0; i < n; i++) { t = cs * vmatrix[i][j] + sn * vmatrix[i][p - 1]; vmatrix[i][p - 1] = -sn * vmatrix[i][j] + cs * vmatrix[i][p - 1]; vmatrix[i][j] = t; } } } } break; // Split at negligible s(k). case 2: { double f = e[k - 1]; e[k - 1] = 0.0; for (int j = k; j < p; j++) { double t = EncogMath.Hypot(s[j], f); double cs = s[j] / t; double sn = f / t; s[j] = t; f = -sn * e[j]; e[j] = cs * e[j]; if (wantu) { for (int i = 0; i < m; i++) { t = cs * umatrix[i][j] + sn * umatrix[i][k - 1]; umatrix[i][k - 1] = -sn * umatrix[i][j] + cs * umatrix[i][k - 1]; umatrix[i][j] = t; } } } } break; // Perform one qr step. case 3: { // Calculate the shift. double scale = Math.Max(Math.Max(Math .Max(Math.Max(Math.Abs(s[p - 1]), Math.Abs(s[p - 2])), Math.Abs(e[p - 2])), Math.Abs(s[k])), Math .Abs( e[k])); double sp = s[p - 1] / scale; double spm1 = s[p - 2] / scale; double epm1 = e[p - 2] / scale; double sk = s[k] / scale; double ek = e[k] / scale; double b = ((spm1 + sp) * (spm1 - sp) + epm1 * epm1) / 2.0; double c = (sp * epm1) * (sp * epm1); double shift = 0.0; if ((b != 0.0) | (c != 0.0)) { shift = Math.Sqrt(b * b + c); if (b < 0.0) { shift = -shift; } shift = c / (b + shift); } double f = (sk + sp) * (sk - sp) + shift; double g = sk * ek; // Chase zeros. for (int j = k; j < p - 1; j++) { double t = EncogMath.Hypot(f, g); double cs = f / t; double sn = g / t; if (j != k) { e[j - 1] = t; } f = cs * s[j] + sn * e[j]; e[j] = cs * e[j] - sn * s[j]; g = sn * s[j + 1]; s[j + 1] = cs * s[j + 1]; if (wantv) { for (int i = 0; i < n; i++) { t = cs * vmatrix[i][j] + sn * vmatrix[i][j + 1]; vmatrix[i][j + 1] = -sn * vmatrix[i][j] + cs * vmatrix[i][j + 1]; vmatrix[i][j] = t; } } t = EncogMath.Hypot(f, g); cs = f / t; sn = g / t; s[j] = t; f = cs * e[j] + sn * s[j + 1]; s[j + 1] = -sn * e[j] + cs * s[j + 1]; g = sn * e[j + 1]; e[j + 1] = cs * e[j + 1]; if (wantu && (j < m - 1)) { for (int i = 0; i < m; i++) { t = cs * umatrix[i][j] + sn * umatrix[i][j + 1]; umatrix[i][j + 1] = -sn * umatrix[i][j] + cs * umatrix[i][j + 1]; umatrix[i][j] = t; } } } e[p - 2] = f; iter = iter + 1; } break; // Convergence. case 4: { // Make the singular values positive. if (s[k] <= 0.0) { s[k] = (s[k] < 0.0 ? -s[k] : 0.0); if (wantv) { for (int i = 0; i <= pp; i++) { vmatrix[i][k] = -vmatrix[i][k]; } } } // Order the singular values. while (k < pp) { if (s[k] >= s[k + 1]) { break; } double t = s[k]; s[k] = s[k + 1]; s[k + 1] = t; if (wantv && (k < n - 1)) { for (int i = 0; i < n; i++) { t = vmatrix[i][k + 1]; vmatrix[i][k + 1] = vmatrix[i][k]; vmatrix[i][k] = t; } } if (wantu && (k < m - 1)) { for (int i = 0; i < m; i++) { t = umatrix[i][k + 1]; umatrix[i][k + 1] = umatrix[i][k]; umatrix[i][k] = t; } } k++; } iter = 0; p--; } break; } } }
/// <inheritdoc/> public int Classify(IMLData input) { IMLData output = Compute(input); return(EngineArray.MaxIndex(output)); }
/// <summary> /// Construct a network analyze class. Analyze the specified network. /// </summary> /// /// <param name="network">The network to analyze.</param> public AnalyzeNetwork(BasicNetwork network) { int assignDisabled = 0; int assignedTotal = 0; IList <Double> biasList = new List <Double>(); IList <Double> weightList = new List <Double>(); IList <Double> allList = new List <Double>(); for (int layerNumber = 0; layerNumber < network.LayerCount - 1; layerNumber++) { int fromCount = network.GetLayerNeuronCount(layerNumber); int fromBiasCount = network .GetLayerTotalNeuronCount(layerNumber); int toCount = network.GetLayerNeuronCount(layerNumber + 1); // weights for (int fromNeuron = 0; fromNeuron < fromCount; fromNeuron++) { for (int toNeuron = 0; toNeuron < toCount; toNeuron++) { double v = network.GetWeight(layerNumber, fromNeuron, toNeuron); if (network.Structure.ConnectionLimited) { if (Math.Abs(v) < network.Structure.ConnectionLimit) { assignDisabled++; } } weightList.Add(v); allList.Add(v); assignedTotal++; } } // bias if (fromCount != fromBiasCount) { int biasNeuron = fromCount; for (int toNeuron = 0; toNeuron < toCount; toNeuron++) { double v = network.GetWeight(layerNumber, biasNeuron, toNeuron); if (network.Structure.ConnectionLimited) { if (Math.Abs(v) < network.Structure.ConnectionLimit) { assignDisabled++; } } biasList.Add(v); allList.Add(v); assignedTotal++; } } } _disabledConnections = assignDisabled; _totalConnections = assignedTotal; _weights = new NumericRange(weightList); _bias = new NumericRange(biasList); _weightsAndBias = new NumericRange(allList); _weightValues = EngineArray.ListToDouble(weightList); _allValues = EngineArray.ListToDouble(allList); _biasValues = EngineArray.ListToDouble(biasList); }
/// <inheritdoc/> public void Clear() { EngineArray.Fill(_gradients, 0); _hessianMatrix.Clear(); }
/// <summary> /// Compute the output from this network. /// </summary> /// /// <param name="input">The input to the network.</param> /// <returns>The output from the network.</returns> public override sealed IMLData Compute(IMLData input) { var xout = new double[OutputCount]; double psum = 0.0d; int r = -1; foreach (IMLDataPair pair in _samples) { r++; if (r == Exclude) { continue; } double dist = 0.0d; for (int i = 0; i < InputCount; i++) { double diff = input[i] - pair.Input[i]; diff /= _sigma[i]; dist += diff * diff; } if (Kernel == PNNKernelType.Gaussian) { dist = Math.Exp(-dist); } else if (Kernel == PNNKernelType.Reciprocal) { dist = 1.0d / (1.0d + dist); } if (dist < 1.0e-40d) { dist = 1.0e-40d; } if (OutputMode == PNNOutputMode.Classification) { var pop = (int)pair.Ideal[0]; xout[pop] += dist; } else if (OutputMode == PNNOutputMode.Unsupervised) { for (int i = 0; i < InputCount; i++) { xout[i] += dist * pair.Input[i]; } psum += dist; } else if (OutputMode == PNNOutputMode.Regression) { for (int i = 0; i < OutputCount; i++) { xout[i] += dist * pair.Ideal[i]; } psum += dist; } } if (OutputMode == PNNOutputMode.Classification) { psum = 0.0d; for (int i = 0; i < OutputCount; i++) { if (_priors[i] >= 0.0d) { xout[i] *= _priors[i] / _countPer[i]; } psum += xout[i]; } if (psum < 1.0e-40d) { psum = 1.0e-40d; } for (int i = 0; i < OutputCount; i++) { xout[i] /= psum; } IMLData result = new BasicMLData(1); result[0] = EngineArray.MaxIndex(xout); return(result); } else if (OutputMode == PNNOutputMode.Unsupervised) { for (int i = 0; i < InputCount; i++) { xout[i] /= psum; } } else if (OutputMode == PNNOutputMode.Regression) { for (int i = 0; i < OutputCount; i++) { xout[i] /= psum; } } return(new BasicMLData(xout)); }
/// <summary> /// Read an object. /// </summary> public Object Read(Stream mask0) { var ins0 = new EncogReadHelper(mask0); EncogFileSection section; var samples = new BasicMLDataSet(); IDictionary <String, String> networkParams = null; PNNKernelType kernel = default(PNNKernelType) /* was: null */; PNNOutputMode outmodel = default(PNNOutputMode) /* was: null */; int inputCount = 0; int outputCount = 0; double error = 0; double[] sigma = null; while ((section = ins0.ReadNextSection()) != null) { if (section.SectionName.Equals("PNN") && section.SubSectionName.Equals("PARAMS")) { networkParams = section.ParseParams(); } if (section.SectionName.Equals("PNN") && section.SubSectionName.Equals("NETWORK")) { IDictionary <String, String> paras = section.ParseParams(); inputCount = EncogFileSection.ParseInt(paras, PersistConst.InputCount); outputCount = EncogFileSection.ParseInt(paras, PersistConst.OutputCount); kernel = StringToKernel(paras[PersistConst.Kernel]); outmodel = StringToOutputMode(paras[PropertyOutputMode]); error = EncogFileSection .ParseDouble(paras, PersistConst.Error); sigma = section.ParseDoubleArray(paras, PersistConst.Sigma); } if (section.SectionName.Equals("PNN") && section.SubSectionName.Equals("SAMPLES")) { foreach (String line in section.Lines) { IList <String> cols = EncogFileSection .SplitColumns(line); int index = 0; var inputData = new BasicMLData(inputCount); for (int i = 0; i < inputCount; i++) { inputData[i] = CSVFormat.EgFormat.Parse(cols[index++]); } var idealData = new BasicMLData(inputCount); idealData[0] = CSVFormat.EgFormat.Parse(cols[index++]); IMLDataPair pair = new BasicMLDataPair(inputData, idealData); samples.Add(pair); } } } var result = new BasicPNN(kernel, outmodel, inputCount, outputCount); if (networkParams != null) { EngineArray.PutAll(networkParams, result.Properties); } result.Samples = samples; result.Error = error; if (sigma != null) { EngineArray.ArrayCopy(sigma, result.Sigma); } return(result); }
/// <summary> /// Perform a revert. /// </summary> /// <param name="revertedData">The source data to revert from.</param> public void PerformRevert(IDictionary <String, String> revertedData) { _data.Clear(); EngineArray.PutAll(revertedData, _data); }
/// <summary> /// Copy weights and output to the network. /// </summary> /// /// <param name="target">The network to copy to.</param> public void CopyToNetwork(FlatNetwork target) { EngineArray.ArrayCopy(_weights, target.Weights); EngineArray.ArrayCopy(_output, target.LayerOutput); }
/// <summary> /// Perform one iteration. /// </summary> /// public override void Iteration() { if (_mustInit) { Init(); } int numWeights = _weights.Length; // Storage space for previous iteration values. if (_restart) { // First time through, set initial values for SCG parameters. _lambda = FirstLambda; _lambda2 = 0; _k = 1; _success = true; _restart = false; } // If an error reduction is possible, calculate 2nd order info. if (_success) { // If the search direction is small, stop. _magP = EngineArray.VectorProduct(_p, _p); double sigma = FirstSigma / Math.Sqrt(_magP); // In order to compute the new step, we need a new gradient. // First, save off the old data. EngineArray.ArrayCopy(Gradients, _oldGradient); EngineArray.ArrayCopy(_weights, _oldWeights); _oldError = Error; // Now we move to the new point in weight space. for (int i = 0; i < numWeights; ++i) { _weights[i] += sigma * _p[i]; } EngineArray.ArrayCopy(_weights, Network.Weights); // And compute the new gradient. CalculateGradients(); // Now we have the new gradient, and we continue the step // computation. _delta = 0; for (int i = 0; i < numWeights; ++i) { double step = (Gradients[i] - _oldGradient[i]) / sigma; _delta += _p[i] * step; } } // Scale delta. _delta += (_lambda - _lambda2) * _magP; // If delta <= 0, make Hessian positive definite. if (_delta <= 0) { _lambda2 = 2 * (_lambda - _delta / _magP); _delta = _lambda * _magP - _delta; _lambda = _lambda2; } // Calculate step size. double mu = EngineArray.VectorProduct(_p, _r); double alpha = mu / _delta; // Calculate the comparison parameter. // We must compute a new gradient, but this time we do not // want to keep the old values. They were useful only for // approximating the Hessian. for (int i = 0; i < numWeights; ++i) { _weights[i] = _oldWeights[i] + alpha * _p[i]; } EngineArray.ArrayCopy(_weights, Network.Weights); CalculateGradients(); double gdelta = 2 * _delta * (_oldError - Error) / (mu * mu); // If gdelta >= 0, a successful reduction in error is possible. if (gdelta >= 0) { // Product of r(k+1) by r(k) double rsum = 0; // Now r = r(k+1). for (int i = 0; i < numWeights; ++i) { double tmp = -Gradients[i]; rsum += tmp * _r[i]; _r[i] = tmp; } _lambda2 = 0; _success = true; // Do we need to restart? if (_k >= numWeights) { _restart = true; EngineArray.ArrayCopy(_r, _p); } else { // Compute new conjugate direction. double beta = (EngineArray.VectorProduct(_r, _r) - rsum) / mu; // Update direction vector. for (int i = 0; i < numWeights; ++i) { _p[i] = _r[i] + beta * _p[i]; } _restart = false; } if (gdelta >= 0.75D) { _lambda *= 0.25D; } } else { // A reduction in error was not possible. // under_tolerance = false; // Go back to w(k) since w(k) + alpha*p(k) is not better. EngineArray.ArrayCopy(_oldWeights, _weights); CurrentError = _oldError; _lambda2 = _lambda; _success = false; } if (gdelta < 0.25D) { _lambda += _delta * (1 - gdelta) / _magP; } _lambda = BoundNumbers.Bound(_lambda); ++_k; EngineArray.ArrayCopy(_weights, Network.Weights); }
/// <inheritdoc/> public Object Read(Stream istream) { int states = 0; int[] items; double[] pi = null; Matrix transitionProbability = null; IDictionary <String, String> properties = null; IList <IStateDistribution> distributions = new List <IStateDistribution>(); EncogReadHelper reader = new EncogReadHelper(istream); EncogFileSection section; while ((section = reader.ReadNextSection()) != null) { if (section.SectionName.Equals("HMM") && section.SubSectionName.Equals("PARAMS")) { properties = section.ParseParams(); } if (section.SectionName.Equals("HMM") && section.SubSectionName.Equals("CONFIG")) { IDictionary <String, String> p = section.ParseParams(); states = EncogFileSection.ParseInt(p, HiddenMarkovModel.TAG_STATES); if (p.ContainsKey(HiddenMarkovModel.TAG_ITEMS)) { items = EncogFileSection.ParseIntArray(p, HiddenMarkovModel.TAG_ITEMS); } pi = section.ParseDoubleArray(p, HiddenMarkovModel.TAG_PI); transitionProbability = EncogFileSection.ParseMatrix(p, HiddenMarkovModel.TAG_TRANSITION); } else if (section.SectionName.Equals("HMM") && section.SubSectionName.StartsWith("DISTRIBUTION-")) { IDictionary <String, String> p = section.ParseParams(); String t = p[HiddenMarkovModel.TAG_DIST_TYPE]; if ("ContinousDistribution".Equals(t)) { double[] mean = section.ParseDoubleArray(p, HiddenMarkovModel.TAG_MEAN); Matrix cova = EncogFileSection.ParseMatrix(p, HiddenMarkovModel.TAG_COVARIANCE); ContinousDistribution dist = new ContinousDistribution(mean, cova.Data); distributions.Add(dist); } else if ("DiscreteDistribution".Equals(t)) { Matrix prob = EncogFileSection.ParseMatrix(p, HiddenMarkovModel.TAG_PROBABILITIES); DiscreteDistribution dist = new DiscreteDistribution(prob.Data); distributions.Add(dist); } } } HiddenMarkovModel result = new HiddenMarkovModel(states); EngineArray.PutAll(properties, result.Properties); result.TransitionProbability = transitionProbability.Data; result.Pi = pi; int index = 0; foreach (IStateDistribution dist in distributions) { result.StateDistributions[index++] = dist; } return(result); }
/// <summary> /// Clear to zero. /// </summary> public void Clear() { EngineArray.Fill(_data, 0); }
/// <summary> /// Determine the winner for the specified input. This is the number of the /// winning neuron. /// </summary> /// /// <param name="input">The input patter to present to the neural network.</param> /// <returns>The winning neuron.</returns> public int Winner(IMLData input) { IMLData output = Compute(input); return(EngineArray.MaxIndex(output)); }
public void CopyTo(double[] target, int targetIndex, int count) { EngineArray.ArrayCopy(_data, 0, target, targetIndex, count); }
/// <summary> /// dst = src /// /// Copy a vector. /// </summary> /// <param name="dst">an array of doubles</param> /// <param name="src">an array of doubles</param> public void Copy(double[] dst, double[] src) { EngineArray.ArrayCopy(src, dst); }
/// <summary> /// Perform one iteration. /// </summary> /// public override void Iteration() { if (shouldInit) { Init(); } int numWeights = this.weights.Length; // Storage space for previous iteration values. if (this.restart) { // First time through, set initial values for SCG parameters. this.lambda = TrainFlatNetworkSCG.FIRST_LAMBDA; this.lambda2 = 0; this.k = 1; this.success = true; this.restart = false; } // If an error reduction is possible, calculate 2nd order info. if (this.success) { // If the search direction is small, stop. this.magP = EngineArray.VectorProduct(this.p, this.p); double sigma = TrainFlatNetworkSCG.FIRST_SIGMA / Math.Sqrt(this.magP); // In order to compute the new step, we need a new gradient. // First, save off the old data. EngineArray.ArrayCopy(this.gradients, this.oldGradient); EngineArray.ArrayCopy(this.weights, this.oldWeights); this.oldError = Error; // Now we move to the new point in weight space. for (int i = 0; i < numWeights; ++i) { this.weights[i] += sigma * this.p[i]; } EngineArray.ArrayCopy(this.weights, this.network.Weights); // And compute the new gradient. CalculateGradients(); // Now we have the new gradient, and we continue the step // computation. this.delta = 0; for (int i_0 = 0; i_0 < numWeights; ++i_0) { double step = (this.gradients[i_0] - this.oldGradient[i_0]) / sigma; this.delta += this.p[i_0] * step; } } // Scale delta. this.delta += (this.lambda - this.lambda2) * this.magP; // If delta <= 0, make Hessian positive definite. if (this.delta <= 0) { this.lambda2 = 2 * (this.lambda - this.delta / this.magP); this.delta = this.lambda * this.magP - this.delta; this.lambda = this.lambda2; } // Calculate step size. double mu = EngineArray.VectorProduct(this.p, this.r); double alpha = mu / this.delta; // Calculate the comparison parameter. // We must compute a new gradient, but this time we do not // want to keep the old values. They were useful only for // approximating the Hessian. for (int i_1 = 0; i_1 < numWeights; ++i_1) { this.weights[i_1] = this.oldWeights[i_1] + alpha * this.p[i_1]; } EngineArray.ArrayCopy(this.weights, this.network.Weights); CalculateGradients(); double gdelta = 2 * this.delta * (this.oldError - Error) / (mu * mu); // If gdelta >= 0, a successful reduction in error is possible. if (gdelta >= 0) { // Product of r(k+1) by r(k) double rsum = 0; // Now r = r(k+1). for (int i_2 = 0; i_2 < numWeights; ++i_2) { double tmp = -this.gradients[i_2]; rsum += tmp * this.r[i_2]; this.r[i_2] = tmp; } this.lambda2 = 0; this.success = true; // Do we need to restart? if (this.k >= numWeights) { this.restart = true; EngineArray.ArrayCopy(this.r, this.p); } else { // Compute new conjugate direction. double beta = (EngineArray.VectorProduct(this.r, this.r) - rsum) / mu; // Update direction vector. for (int i_3 = 0; i_3 < numWeights; ++i_3) { this.p[i_3] = this.r[i_3] + beta * this.p[i_3]; } this.restart = false; } if (gdelta >= 0.75D) { this.lambda *= 0.25D; } } else { // A reduction in error was not possible. // under_tolerance = false; // Go back to w(k) since w(k) + alpha*p(k) is not better. EngineArray.ArrayCopy(this.oldWeights, this.weights); this.currentError = this.oldError; this.lambda2 = this.lambda; this.success = false; } if (gdelta < 0.25D) { this.lambda += this.delta * (1 - gdelta) / this.magP; } this.lambda = BoundNumbers.Bound(this.lambda); ++this.k; EngineArray.ArrayCopy(this.weights, this.network.Weights); }