public EuropeanPutResult Calculate(double K, double S, double r, double t, double sigma) { EuropeanPutResult ePut = new EuropeanPutResult(); ePut.IntegralPointD1 = _integralPoints.CalculateIntegralPointD1(S, K, r, sigma, t); ePut.IntegralPointD2 = _integralPoints.CalculateIntegralPointD2(ePut.IntegralPointD1, sigma, t); ePut.Distribution1 = _distribution.CumulativeDistribution(-ePut.IntegralPointD1.Result.Value); ePut.Distribution2 = _distribution.CumulativeDistribution(-ePut.IntegralPointD2.Result.Value); ePut.Result.Value = CalculateValue(K, S, r, t, ePut); return(ePut); }
public BtResult CalculateBtK1(double r, double sigma, double t, double K, double S, int n, double T) { BtResult bt = new BtResult(); bt.IntegralPointD1 = _integralPoints.CalculateIntegralPointD1(K, K, r, sigma, t); bt.IntegralPointD2 = _integralPoints.CalculateIntegralPointD2(bt.IntegralPointD1, sigma, t); bt.Distribution = _distribution.CumulativeDistribution(bt.IntegralPointD1.Result.Value); bt.a = CalculateAValue(sigma, t); bt.Result.Value = CalculateBtK1(sigma, K, r, t, bt); return(bt); }
public async Task <IntegralFunction> CalculateAsync(int n, double T, double r, double sigma, double t, double S, double K, BtResult Btksi) { IntegralFunction integralFunction = new IntegralFunction(); UnderIntegral[] underIntegral = new UnderIntegral[n]; for (int i = 0; i < n; i++) { UnderIntegral ui = new UnderIntegral(); ui.h = (T / n); ui.ksi = i * ui.h; ui.IntegralPointD1 = _integralPoints.CalculateIntegralPointD1(S, Btksi.Result.Value, r, sigma, t - ui.ksi); ui.IntegralPointD2 = _integralPoints.CalculateIntegralPointD2(ui.IntegralPointD1, sigma, t - ui.ksi); ui.Distribution = _distribution.CumulativeDistribution(-ui.IntegralPointD2.Result.Value); ui.Result.Value = CalculateUnderIntegral(r, K, t, ui.ksi, ui.Distribution) * ui.h; underIntegral[i] = ui; integralFunction.Result.Value += ui.Result.Value; } integralFunction.UnderIntegral = underIntegral; return(integralFunction); }
/// <summary> /// Vapnik Chervonenkis test. /// </summary> /// <param name="epsilon">The error we are willing to tolerate.</param> /// <param name="delta">The error probability we are willing to tolerate.</param> /// <param name="s">The samples to use for testing.</param> /// <param name="dist">The distribution we are testing.</param> public static void VapnikChervonenkisTest(double epsilon, double delta, IEnumerable <double> s, IUnivariateDistribution dist) { // Using VC-dimension, we can bound the probability of making an error when estimating empirical probability // distributions. We are using Theorem 2.41 in "All Of Nonparametric Statistics". // http://books.google.com/books?id=MRFlzQfRg7UC&lpg=PP1&dq=all%20of%20nonparametric%20statistics&pg=PA22#v=onepage&q=%22shatter%20coe%EF%AC%83cients%20do%20not%22&f=false .</para> // For intervals on the real line the VC-dimension is 2. double n = s.Count(); Assert.Greater(n, Math.Ceiling(32.0 * Math.Log(16.0 / delta) / epsilon / epsilon)); var histogram = new Histogram(s, NumberOfBuckets); for (var i = 0; i < NumberOfBuckets; i++) { var p = dist.CumulativeDistribution(histogram[i].UpperBound) - dist.CumulativeDistribution(histogram[i].LowerBound); var pe = histogram[i].Count / n; Assert.Less(Math.Abs(p - pe), epsilon, dist.ToString()); } }
private void Recompute(bool getBetaHatOnly) { int p = augmentedExplanatory.ColumnCount; int n = augmentedExplanatory.RowCount; Matrix <double> xt = augmentedExplanatory.Clone(); xt.Transpose(); Vector <double> xty = ((xt * dependent.ToColumnMatrix())).ToVector(); Matrix <double> xtx = xt * augmentedExplanatory; Matrix <double> mxty = Matrix <double> .Build.Dense(xty.Count, 1); for (int i = 0; i < xty.Count; ++i) { mxty[i, 0] = xty[i]; } BetaHat = Vector <double> .Build.Dense(p); //if (mxty.Norm2()==0) if (mxty.L2Norm() == 0) { return; } Matrix <double> bm = xtx.Solve(mxty); // .SolveRobust(mxty); for (int i = 0; i < p; ++i) { BetaHat[i] = bm[i, 0]; } if (getBetaHatOnly) { return; } var fitted = (augmentedExplanatory * BetaHat.ToColumnMatrix()).ToVector(); var resids = dependent - fitted; // now compute approximate p-values Sigma = Math.Sqrt(resids.Variance()) * n / (n - p); BetaHatCovariance = Sigma * Sigma * xtx.Inverse(); PValues = Vector <double> .Build.Dense(augmentedExplanatory.ColumnCount); for (int i = 0; i < augmentedExplanatory.ColumnCount; ++i) { double x = Math.Abs(BetaHat[i]) / Math.Sqrt(BetaHatCovariance[i, i]); PValues[i] = 2 * (1 - stdNormal.CumulativeDistribution(x)); } }
public static double TwoTailProbability(this IUnivariateDistribution nd, double value) { var result = nd.CumulativeDistribution(value); if (result > 0.5) { result = 1 - result; } result *= 2; return(result); }
/// <summary> /// Vapnik Chervonenkis test. /// </summary> /// <param name="epsilon">The error we are willing to tolerate.</param> /// <param name="delta">The error probability we are willing to tolerate.</param> /// <param name="s">The samples to use for testing.</param> /// <param name="dist">The distribution we are testing.</param> public static void VapnikChervonenkisTest(double epsilon, double delta, IEnumerable<double> s, IUnivariateDistribution dist) { // Using VC-dimension, we can bound the probability of making an error when estimating empirical probability // distributions. We are using Theorem 2.41 in "All Of Nonparametric Statistics". // http://books.google.com/books?id=MRFlzQfRg7UC&lpg=PP1&dq=all%20of%20nonparametric%20statistics&pg=PA22#v=onepage&q=%22shatter%20coe%EF%AC%83cients%20do%20not%22&f=false .</para> // For intervals on the real line the VC-dimension is 2. double n = s.Count(); Assert.Greater(n, Math.Ceiling(32.0 * Math.Log(16.0 / delta) / epsilon / epsilon)); var histogram = new Histogram(s, NumberOfBuckets); for (var i = 0; i < NumberOfBuckets; i++) { var p = dist.CumulativeDistribution(histogram[i].UpperBound) - dist.CumulativeDistribution(histogram[i].LowerBound); var pe = histogram[i].Count / n; Assert.Less(Math.Abs(p - pe), epsilon, dist.ToString()); } }
/// <summary> Provide the cumulative probability that a value will be equal to or lower than that supplied </summary> public static decimal?CumulativeP(IUnivariateDistribution distribution, decimal?val) { return(val == null || distribution == null ? null : sanitiseCumulativeP((decimal?)distribution.CumulativeDistribution((double)val))); }