/// <summary> /// Converts conic representation of an ellipse (given the coefficients vector) to the parametric representation. /// </summary> /// <param name="conic">The implicit equation coefficients</param> /// <returns>Parametric representation</returns> private static EllipseParams Conic2Parametric(double[] conic) { var A = new SymmetricMatrix(2); A[0, 0] = conic[0]; // a A[1, 1] = conic[2]; // c A[0, 1] = A[1, 0] = 0.5 * conic[1]; // half b var B = new ColumnVector(conic[3], conic[4]); var C = conic[5]; var eig = A.Eigensystem(); Debug.Assert(eig.Eigenvalue(0) * eig.Eigenvalue(1) > 0); var D = new double[] { eig.Eigenvalue(0), eig.Eigenvalue(1) }; var Q = eig.Eigentransformation().Transpose(); var t = -0.5 * A.Inverse() * B; var c_h = t.Transpose() * A * t + B.Transpose() * t + C; return(new EllipseParams { Center = new Point(t[0], t[1]), XRadius = Math.Sqrt(-c_h / D[0]), YRadius = Math.Sqrt(-c_h / D[1]), Degrees = 180 * Math.Atan2(Q[0, 1], Q[0, 0]) / Math.PI, }); }
public void SymmetricRandomMatrixInverse() { for (int d = 1; d <= 100; d = d + 11) { SymmetricMatrix M = TestUtilities.CreateSymmetricRandomMatrix(d, 1); SymmetricMatrix MI = M.Inverse(); Assert.IsTrue(TestUtilities.IsNearlyEqual(MI * M, UnitMatrix.OfDimension(d))); } }
public void SymmetricHilbertMatrixInverse() { for (int d = 1; d < 4; d++) { SymmetricMatrix H = TestUtilities.CreateSymmetricHilbertMatrix(d); SymmetricMatrix HI = H.Inverse(); Assert.IsTrue(TestUtilities.IsNearlyEqual(HI * H, UnitMatrix.OfDimension(d))); } // fails for d >= 4; look into this }
public void SymmetricRandomMatrixInverse() { for (int d = 1; d <= 100; d = d + 11) { Console.WriteLine("d={0}", d); SquareMatrix I = TestUtilities.CreateSquareUnitMatrix(d); SymmetricMatrix M = TestUtilities.CreateSymmetricRandomMatrix(d, 1); SymmetricMatrix MI = M.Inverse(); Assert.IsTrue(TestUtilities.IsNearlyEqual(M * MI, I)); } }
public void inverse() { block(OriginA, DiagRowCount); //ParallelInverseOfSymmetric2 pppp = new ParallelInverseOfSymmetric2(SymmetricMatrix); //var aa = pppp.invA.Multiply(SymmetricMatrix); //invA = uniteBlockOfSymmetric(DiagonalMatrix, pppp.invA); DiagonalMatrix = DiagonalMatrix.Inverse(); invA = uniteBlockOfSymmetric(DiagonalMatrix, SymmetricMatrix.Inverse()); //var sss = OriginA.Multiply(invA); }
public void SymmetricHilbertMatrixInverse() { for (int d = 1; d <= 4; d++) { Console.WriteLine("d={0}", d); SquareMatrix I = TestUtilities.CreateSquareUnitMatrix(d); SymmetricMatrix H = TestUtilities.CreateSymmetricHilbertMatrix(d); SymmetricMatrix HI = H.Inverse(); Assert.IsTrue(TestUtilities.IsNearlyEqual(H * HI, I)); } // fails for d > 4! look into this }
public void inverse() { DateTime start = DateTime.Now; var span = DateTime.Now - start; //1级分裂 cA = new cyMatrix(orignA, 2, 2); SymmetricMatrix q22 = new SymmetricMatrix(cA.blockMatrix1[2].Clone().Array); IMatrix q21 = cA.blockMatrix1[1].Clone(); SymmetricMatrix Q = q22.Inverse(); SymmetricMatrix q11 = new SymmetricMatrix(cA.blockMatrix1[0].Minus(q21.Transposition.Multiply(Q).Multiply(q21)).Array); Q11 = q11.Inverse(); //.GetInverse(); var task0 = new Task(() => GetQ21(q21, Q)); var task1 = new Task(() => GetQ22(q21, Q)); task0.Start(); task1.Start(); Task.WaitAll(task0, task1); cA.blockMatrix1[0] = Q11; cA.blockMatrix1[1] = Q21; cA.blockMatrix1[2] = Q22; invA = cA.uniteBlockOfSymmetric(cA, 1); var sssss0 = invA.Multiply(orignA); #region hide ////start = DateTime.Now; ////2级分裂 //cA1 = new cyMatrix(cA.blockMatrix1[0], 2, 2); //cA2 = new cyMatrix(cA.blockMatrix1[2], 2, 2); //DiagM = new SymmetricMatrix[coreNum]; //DiagM[0] = cA1.blockMatrix1[0].Clone(); //DiagM[1] = cA1.blockMatrix1[2].Clone(); //DiagM[2] = cA2.blockMatrix1[0].Clone(); //DiagM[3] = cA2.blockMatrix1[2].Clone(); ////var s11 = DiagM[0].Clone(); //Parallel.For(0, coreNum, (int i) => //{ // DiagM[i] = DiagM[i].GetInverse();//.Inverse; //}); ////var sssss = s11.Multiply(DiagM[0]); ////并行求对角线上的块矩阵 //DiagQ = new SymmetricMatrix[coreNum]; ////通过使用任务来对代码进行并行化 ////创建任务 //var t0 = new Task(() => GenerateDiagQ0()); //var t1 = new Task(() => GenerateDiagQ1()); //var t2 = new Task(() => GenerateDiagQ2()); //var t3 = new Task(() => GenerateDiagQ3()); //t0.Start(); t1.Start(); t2.Start(); t3.Start(); //Task.WaitAll(t0, t1, t2, t3);//等待所有任务的完成 ////var q0 = DiagQ[0].Clone(); ////var q1 = DiagQ[1].Clone(); ////var q2 = DiagQ[2].Clone(); ////var q3 = DiagQ[3].Clone(); ////求逆 //Parallel.For(0, coreNum, (int i) => //{ // DiagQ[i] = DiagQ[i].GetInverse();//.Inverse; //}); ////var r0 = q0.Multiply(DiagQ[0]); ////var r1 = q1.Multiply(DiagQ[1]); ////var r2 = q2.Multiply(DiagQ[2]); ////var r3 = q3.Multiply(DiagQ[3]); //#region 比下面这个多5ms,故省去 ////start = DateTime.Now; var b0 = new Task(() => GenenateBlock0()); ////var b1 = new Task(() => GenenateBlock1()); ////var b2 = new Task(() => GenenateBlock2()); ////var b3 = new Task(() => GenenateBlock3()); ////b0.Start(); b1.Start(); b2.Start(); b3.Start(); ////Task.WaitAll(b0, b1, b2, b3);//等待所有任务的完成 ////span = DateTime.Now - start; ////Console.WriteLine(span.TotalMilliseconds + "ms融合3----"); //#endregion //var xxx = DiagM[1].Plus(DiagM[1].Multiply(cA1.blockMatrix1[1]).Multiply(DiagQ[0]).Multiply(cA1.blockMatrix1[1].Transposition).Multiply(DiagM[1])); //var xxx2 = DiagM[3].Plus(DiagM[3].Multiply(cA2.blockMatrix1[1]).Multiply(DiagQ[2]).Multiply(cA2.blockMatrix1[1].Transposition).Multiply(DiagM[3])); //var c0 = new Task(() => GenenateBlock0()); //var c2 = new Task(() => GenenateBlock2()); //c0.Start(); c2.Start(); Task.WaitAll(c0, c2); //cA1.blockMatrix1[0] = DiagQ[0]; ////cA1.blockMatrix1[2] = DiagQ[1]; //cA2.blockMatrix1[0] = DiagQ[2]; ////cA2.blockMatrix1[2] = DiagQ[3]; ////var ooo = cA1.blockMatrix1[2].Minus(xxx); ////DiagM[0] = cA1.uniteBlockOfSymmetric(cA1,1); ////DiagM[1] = cA2.uniteBlockOfSymmetric(cA2,1); ////var ss = DiagM[0].Multiply(cA.blockMatrix1[0]); ////var ss1 = DiagM[1].Multiply(cA.blockMatrix1[2]); //cA1.blockMatrix1[2] = xxx; //cA2.blockMatrix1[2] = xxx2; //DiagM[0] = cA1.uniteBlockOfSymmetric(cA1, 1); //DiagM[1] = cA2.uniteBlockOfSymmetric(cA2, 1); //var sss = DiagM[0].Multiply(cA.blockMatrix1[0]); //var nt0 = new Task(() => GeneratenewDiagQ0()); //var nt1 = new Task(() => GeneratenewDiagQ1()); //nt0.Start(); //nt1.Start(); //Task.WaitAll(nt0, nt1);//等待所有任务的完成 //Parallel.For(0, 2, (int i) => //{ // DiagQ[i] = DiagQ[i].GetInverse();//.Inverse; //}); //#region 比下面这个直接按顺序计算多15ms,故省去 ////start = DateTime.Now; ////var nb0 = new Task(() => GeneratenewBlock0()); ////var nb1 = new Task(() => GeneratenewBlock1()); ////nb0.Start(); nb1.Start(); ////Task.WaitAll(nb0, nb1);//等待所有任务的完成 ////span = DateTime.Now - start; ////Console.WriteLine(span.TotalMilliseconds + "ms融合30000"); //#endregion //var xxxx = DiagM[1].Plus(DiagM[1].Multiply(cA.blockMatrix1[1]).Multiply(DiagQ[0]).Multiply(cA.blockMatrix1[1].Transposition).Multiply(DiagM[1])); //GeneratenewBlock0(); //cA.blockMatrix1[0] = DiagQ[0]; ////cA.blockMatrix1[2] = DiagQ[1]; ////invA = cA.uniteBlockOfSymmetric(cA, 1); ////var ssss = invA.Multiply(orignA); ////var oooo = cA1.blockMatrix1[2].Minus(xxx); //cA.blockMatrix1[2] = xxxx; //invA = cA.uniteBlockOfSymmetric(cA, 1); ////var sssss0 = invA.Multiply(orignA); #endregion }
/// <summary> /// Finds the Weibull distribution that best fits the given sample. /// </summary> /// <param name="sample">The sample to fit.</param> /// <returns>The best fit parameters.</returns> /// <exception cref="ArgumentNullException"><paramref name="sample"/> is null.</exception> /// <exception cref="InvalidOperationException"><paramref name="sample"/> contains non-positive values.</exception> /// <exception cref="InsufficientDataException"><paramref name="sample"/> contains fewer than three values.</exception> public static WeibullFitResult FitToWeibull(this IReadOnlyList <double> sample) { if (sample == null) { throw new ArgumentNullException(nameof(sample)); } if (sample.Count < 3) { throw new InsufficientDataException(); } foreach (double value in sample) { if (value <= 0.0) { throw new InvalidOperationException(); } } // The log likelihood function is // \log L = N \log k + (k-1) \sum_i \log x_i - N K \log \lambda - \sum_i \left(\frac{x_i}{\lambda}\right)^k // Taking derivatives, we get // \frac{\partial \log L}{\partial \lambda} = - \frac{N k}{\lambda} + \sum_i \frac{k}{\lambda} \left(\frac{x_i}{\lambda}\right)^k // \frac{\partial \log L}{\partial k} =\frac{N}{k} + \sum_i \left[ 1 - \left(\frac{x_i}{\lambda}\right)^k \right] \log \left(\frac{x_i}{\lambda}\right) // Setting the first expression to zero and solving for \lambda gives // \lambda = \left( N^{-1} \sum_i x_i^k \right)^{1/k} = ( < x^k > )^{1/k} // which allows us to reduce the problem from 2D to 1D. // By the way, using the expression for the moment < x^k > of the Weibull distribution, you can show there is // no bias to this result even for finite samples. // Setting the second expression to zero gives // \frac{1}{k} = \frac{1}{N} \sum_i \left[ \left( \frac{x_i}{\lambda} \right)^k - 1 \right] \log \left(\frac{x_i}{\lambda}\right) // which, given the equation for \lambda as a function of k derived from the first expression, is an implicit equation for k. // It cannot be solved in closed form, but we have now reduced our problem to finding a root in one-dimension. // We need a starting guess for k. // The method of moments equations are not solvable for the parameters in closed form // but the scale parameter drops out of the ratio of the 1/3 and 2/3 quantile points // and the result is easily solved for the shape parameter // k = \frac{\log 2}{\log\left(\frac{x_{2/3}}{x_{1/3}}\right)} double x1 = sample.InverseLeftProbability(1.0 / 3.0); double x2 = sample.InverseLeftProbability(2.0 / 3.0); double k0 = Global.LogTwo / Math.Log(x2 / x1); // Given the shape paramter, we could invert the expression for the mean to get // the scale parameter, but since we have an expression for \lambda from k, we // don't need it. //double s0 = sample.Mean / AdvancedMath.Gamma(1.0 + 1.0 / k0); // Simply handing our 1D function to a root-finder works fine until we start to encounter large k. For large k, // even just computing \lambda goes wrong because we are taking x_i^k which overflows. Horst Rinne, "The Weibull // Distribution: A Handbook" describes a way out. Basically, we first move to variables z_i = \log(x_i) and // then w_i = z_i - \bar{z}. Then lots of factors of e^{k \bar{z}} cancel out and, even though we still do // have some e^{k w_i}, the w_i are small and centered around 0 instead of large and centered around \lambda. //Sample transformedSample = sample.Copy(); //transformedSample.Transform(x => Math.Log(x)); double[] transformedSample = new double[sample.Count]; for (int j = 0; j < sample.Count; j++) { transformedSample[j] = Math.Log(sample[j]); } double zbar = transformedSample.Mean(); for (int j = 0; j < transformedSample.Length; j++) { transformedSample[j] -= zbar; } // After this change of variable the 1D function to zero becomes // g(k) = \sum_i ( 1 - k w_i ) e^{k w_i} // It's easy to show that g(0) = n and g(\infinity) = -\infinity, so it must cross zero. It's also easy to take // a derivative // g'(k) = - k \sum_i w_i^2 e^{k w_i} // so we can apply Newton's method. int i = 0; double k1 = k0; while (true) { i++; double g = 0.0; double gp = 0.0; foreach (double w in transformedSample) { double e = Math.Exp(k1 * w); g += (1.0 - k1 * w) * e; gp -= k1 * w * w * e; } double dk = -g / gp; k1 += dk; if (Math.Abs(dk) <= Global.Accuracy * Math.Abs(k1)) { break; } if (i >= Global.SeriesMax) { throw new NonconvergenceException(); } } // The corresponding lambda can also be expressed in terms of zbar and w's. double t = 0.0; foreach (double w in transformedSample) { t += Math.Exp(k1 * w); } t /= transformedSample.Length; double lambda1 = Math.Exp(zbar) * Math.Pow(t, 1.0 / k1); // We need the curvature matrix at the minimum of our log likelihood function // to determine the covariance matrix. Taking more derivatives... // \frac{\partial^2 \log L} = \frac{N k}{\lambda^2} - \sum_i \frac{k(k+1) x_i^k}{\lambda^{k+2}} // = - \frac{N k^2}{\lambda^2} // The second expression follows by inserting the first-derivative-equal-zero relation into the first. // For k=1, this agrees with the variance formula for the mean of the best-fit exponential. // Derivatives involving k are less simple. // We end up needing the means < (x/lambda)^k log(x/lambda) > and < (x/lambda)^k log^2(x/lambda) > double mpl = 0.0; double mpl2 = 0.0; foreach (double x in sample) { double r = x / lambda1; double p = Math.Pow(r, k1); double l = Math.Log(r); double pl = p * l; double pl2 = pl * l; mpl += pl; mpl2 += pl2; } mpl = mpl / sample.Count; mpl2 = mpl2 / sample.Count; // See if we can't do any better here. Transforming to zbar and w's looked ugly, but perhaps it // can be simplified? One interesting observation: if we take expectation values (which gives // the Fisher information matrix) the entries become simple: // B_{\lambda \lambda} = \frac{N k^2}{\lambda^2} // B_{\lambda k} = -\Gamma'(2) \frac{N}{\lambda} // B_{k k } = [1 + \Gamma''(2)] \frac{N}{k^2} // Would it be bad to just use these directly? // Construct the curvature matrix and invert it. SymmetricMatrix C = new SymmetricMatrix(2); C[0, 0] = sample.Count * MoreMath.Sqr(k1 / lambda1); C[0, 1] = -sample.Count * k1 / lambda1 * mpl; C[1, 1] = sample.Count * (1.0 / MoreMath.Sqr(k1) + mpl2); CholeskyDecomposition CD = C.CholeskyDecomposition(); if (CD == null) { throw new DivideByZeroException(); } C = C.Inverse(); // Do a KS test to compare sample to best-fit distribution WeibullDistribution distribution = new WeibullDistribution(lambda1, k1); TestResult test = sample.KolmogorovSmirnovTest(distribution); // return the result return(new WeibullFitResult(lambda1, k1, C, distribution, test)); }