/// <summary> Uses nested Clenshaw-Curtis quadrature on the alternative form with an invariant to compute the probability that each element is the minimum for a set of normal distributions to within a user-specified precision </summary> /// <param name="distributions"> The set of normal distributions for which you want to compute P(X = min X) </param> /// <param name="errorTolerance"> The maximum total error in P(X = min X) over all regions </param> /// <param name="maxIterations"> The maximum number of times the quadrature rule will be used, doubling in order each time </param> /// <returns></returns> public static double[] ComplementsClenshawCurtisAutomatic(Normal[] distributions, double errorTolerance = 10E-14, int maxIterations = 10) { // Change integral to alternative form by negating the distributions distributions = NegateDistributions(distributions); // This change is local to this method // Compute the interval of integration double maxOfMeanMinus8Stddev = distributions[0].Mean - 8 * distributions[0].StdDev; double maxOfMeanPlus8Stddev = distributions[0].Mean + 8 * distributions[0].StdDev; for (int i = 0; i < distributions.Length; i++) { maxOfMeanMinus8Stddev = Math.Max(maxOfMeanMinus8Stddev, distributions[i].Mean - 8 * distributions[i].StdDev); maxOfMeanPlus8Stddev = Math.Max(maxOfMeanPlus8Stddev, distributions[i].Mean + 8 * distributions[i].StdDev); } // 8 standard deviations is just past the threshold beyond which normal PDFs are less than machine epsilon in double precision double intervalLowerLimit = maxOfMeanMinus8Stddev; double intervalUpperLimit = maxOfMeanPlus8Stddev; // Compute a linear transformation from that interval to [-1,1] double a = (intervalUpperLimit - intervalLowerLimit) / 2.0; //double b = -1 * (2 * intervalLowerLimit / (intervalUpperLimit - intervalLowerLimit) + 1); // TODO: Consider channging this to ILL + a, then z = az + b double b = intervalLowerLimit + a; //double xOfz(double z) => (z - b) * a; // As z ranges over [-1,1], x will range over [iLL,iUL] double xOfz(double z) => z * a + b; // As z ranges over [-1,1], x will range over [iLL,iUL] // --- Initialize the Vectors --- int order = 32; // Start with a 33-point CC rule double errorSum = double.PositiveInfinity; double[] errors = new double[distributions.Length]; double[] complements = new double[distributions.Length]; double[] weights = ClenshawCurtis.GetWeights(order); double[] X = ClenshawCurtis.GetEvalPoints(order); // Eval points in Z for (int i = 0; i < X.Length; i++) { X[i] = xOfz(X[i]); } // Convert from Z to X double[] C = new double[X.Length]; // The invariant product for each X value, without weights bool[] isFinished = new bool[distributions.Length]; // Keeps track of which regions are already at the desired precision for (int i = 0; i < C.Length; i++) { C[i] = 1; for (int j = 0; j < distributions.Length; j++) { C[i] *= distributions[j].CumulativeDistribution(X[i]); } } // --- Iterate higher order quadrature rules until desired precision is obtained --- for (int iteration = 0; iteration < maxIterations; iteration++) { // We will have three vectors X[], weights[], and C[] instead of two; the weights are now in weights[] instead of C[] // Each iteration replaces these vectors with expanded versions. Half + 1 of the entries are the old entries, and the other nearly half are freshly computed. // weights[] is the exception: it is completely replaced each time. double[] newComplements = new double[distributions.Length]; // Update discard complement probability vector for (int i = 0; i < distributions.Length; i++) { // Skip if this element is at the desired accuracy already if (iteration > 1 && isFinished[i]) // errors[i] < errorTolerance / distributions.Length { newComplements[i] = complements[i]; continue; } newComplements[i] = 0; for (int j = 0; j < C.Length; j++) { double CDFij = distributions[i].CumulativeDistribution(X[j]); if (CDFij > 0) { newComplements[i] += distributions[i].Density(X[j]) * C[j] * weights[j] / CDFij; } } newComplements[i] *= a; // Multiply by the derivative dx/dz } // Update the error if (iteration > 0) { errorSum = 0; for (int i = 0; i < errors.Length; i++) { double newError = Math.Abs(complements[i] - newComplements[i]); // Detect if finished, which requires the error estimate for the ith term to be decreasing and less than its fair share of the total error tolerance if (!isFinished[i] && i > 1 && newError < errorTolerance / distributions.Length && newError < errors[i]) { isFinished[i] = true; } errors[i] = newError; errorSum += errors[i]; } } complements = newComplements; // Determine if all regions appear to be finished refining their probability estimates //bool allRegionsFinished = false; //for (int i = 0; i < isFinished.Length; i++) { allRegionsFinished &= isFinished[i]; } // Check if all the probabilities add up to one double totalProb = 0; for (int i = 0; i < complements.Length; i++) { totalProb += complements[i]; } bool probsSumToOne = Math.Abs(totalProb - 1.0) < 1E-12; // Handle the end of the iteration if ((errorSum < errorTolerance && probsSumToOne /*&& allRegionsFinished*/) || iteration == maxIterations - 1) { //Console.WriteLine($"Terminating on iteration {iteration} with remaining error estimate {errorSum}"); break; // Terminate and return complements } // Update the vectors for the next iteration order *= 2; weights = ClenshawCurtis.GetWeights(order); // Stretch the old arrays so there are gaps for the new entries double[] newX = new double[weights.Length]; double[] newC = new double[weights.Length]; for (int i = 0; i < X.Length; i++) { newX[2 * i] = X[i]; newC[2 * i] = C[i]; } // Add the new entries to X double[] entries = ClenshawCurtis.GetOddEvalPoints(order); // New entries in Z for (int i = 0; i < entries.Length; i++) { int slot = 2 * i + 1; newX[slot] = xOfz(entries[i]); // Convert from Z to X newC[slot] = 1; for (int j = 0; j < distributions.Length; j++) { newC[slot] *= distributions[j].CumulativeDistribution(newX[slot]); } } X = newX; C = newC; } return(complements); }
public static double[] ComplementsClenshawCurtis(Normal[] distributions, int order) { // Change integral to alternative form by negating the distributions distributions = NegateDistributions(distributions); // This change is local to this method // Compute the evaluation points and weights double[] evalPoints = ClenshawCurtis.GetEvalPoints(order); double[] weights = ClenshawCurtis.GetWeights(order); // Compute the interval of integration double maxMean = distributions[0].Mean; double maxStdev = 0; for (int i = 0; i < distributions.Length; i++) { if (distributions[i].Mean > maxMean) { maxMean = distributions[i].Mean; } if (distributions[i].StdDev > maxStdev) { maxStdev = distributions[i].StdDev; } } // 8 standard deviations is just past the threshold beyond which normal PDFs are less than machine epsilon in double precision double intervalLowerLimit = maxMean - 8 * maxStdev; double intervalUpperLimit = maxMean + 8 * maxStdev; // Compute a linear transformation from that interval to [-1,1] double a = (intervalUpperLimit - intervalLowerLimit) / 2.0; double b = -1 * (2 * intervalLowerLimit / (intervalUpperLimit - intervalLowerLimit) + 1); double xOfz(double z) => (z - b) * a; // As z ranges over [-1,1], x will range over [iLL,iUL] // Compute the vector of constants double[] C = new double[evalPoints.Length]; double[] X = new double[evalPoints.Length]; for (int i = 0; i < C.Length; i++) { C[i] = weights[i]; X[i] = xOfz(evalPoints[i]); for (int j = 0; j < distributions.Length; j++) { C[i] *= distributions[j].CumulativeDistribution(X[i]); } } // --- Perform the Integration --- double[] complementProbs = new double[distributions.Length]; for (int i = 0; i < distributions.Length; i++) { complementProbs[i] = 0; for (int j = 0; j < C.Length; j++) { double CDFij = distributions[i].CumulativeDistribution(X[j]); if (CDFij > 0) { complementProbs[i] += distributions[i].Density(X[j]) * C[j] / CDFij; } } complementProbs[i] *= a; // Multiply by the derivative dx/dz Console.WriteLine($"CCAltInv[{i}]: {complementProbs[i]}"); } return(complementProbs); }