public abstract BigFloat Divide( BigFloat dividend, BigFloat divisor);
protected abstract BigFloat Subtract( BigFloat minuend, BigFloat subtrahend);
/** * Creates a new instance of <code>Location</code> with the given parameters. * * @param latitude * the latitude, in degrees, of this location. North latitude is positive, south negative. * @param longitude * the longitude, in degrees, of this location. East longitude is positive, east negative. */ public Location(double latitude, double longitude) { this.latitude = new BigFloat(latitude); this.longitude = new BigFloat(longitude); }
protected override BigFloat Truncate(BigFloat value) { return(value.Truncate()); }
protected abstract BigFloat Multiply( BigFloat multiplicand, BigFloat multiplier);
protected abstract BigFloat Add( BigFloat leftAddend, BigFloat rightAddend);
public static int[,] Bayes(Tokenizer train, Tokenizer test) { /* * Olasılık hesaplanırken sayılar çarpılarak çok fazla küçüldüğünü için (1/15000)^3 gibi * double ın 10^-324 lük sınırına sığmadı bunu aşmak içinde bigFloat kütüphanesini kullandık * https://github.com/Osinko/BigFloat */ int classCount = train.DocumentClassIndex[train.DocumentClassIndex.Count - 1] + 1; BigFloat[] ratio = new BigFloat[classCount];//her classa ait döküman sayısının tüm döküman sayısına oranı BigFloat[] prob = new BigFloat[classCount]; int[] count = new int[classCount]; Dictionary <string, int>[] classWordFreq = new Dictionary <string, int> [classCount]; int[] classWordFreqSum = new int[classCount]; int[,] result = new int[test.DocumentWordFreq.Count, 2]; for (int i = 0; i < classCount; i++)//atama işlemleri { ratio[i] = new BigFloat(0); prob[i] = new BigFloat(0); } for (int i = 0; i < train.DocumentClassIndex.Count; i++) { ratio[train.DocumentClassIndex[i]]++; } for (int i = 0; i < classCount; i++) { classWordFreq[i] = new Dictionary <string, int>(); ratio[i] /= train.DocumentClassIndex.Count; } for (int j = 0; j < train.DocumentWordFreq.Count; j++)// her bir classın kelime frekansını bulur { foreach (KeyValuePair <string, int> pair in train.DocumentWordFreq[j]) { if (classWordFreq[train.DocumentClassIndex[j]].ContainsKey(pair.Key)) { classWordFreq[train.DocumentClassIndex[j]][pair.Key] += pair.Value; } else { classWordFreq[train.DocumentClassIndex[j]].Add(pair.Key, pair.Value); } classWordFreqSum[train.DocumentClassIndex[j]] += pair.Value;//Her bir classın içindeki toplam kelime sayısı } } int counter = 0; int freq = 0; foreach (Dictionary <string, int> document in test.DocumentWordFreq) { for (int i = 0; i < classCount; i++)//her döküman için olasılık önce orana eşitlenir { prob[i] = ratio[i]; } //ardından mn bayes formülüne göre oran kelimelerin olasılıkları ile çarpılır //ve dökümanın her classa ait olma olasılığı bulunur foreach (KeyValuePair <string, int> pair in document) { for (int i = 0; i < classCount; i++) { if ((classWordFreq[i].ContainsKey(pair.Key))) { freq = classWordFreq[i][pair.Key]; } else { freq = 0; } prob[i] *= new BigFloat(new BigFloat(freq + 1).Divide(new BigFloat(classWordFreqSum[i] + train.corpus.Count)).Pow(pair.Value)); } } BigFloat max = new BigFloat(-1); int maxclass = -1; for (int i = 0; i < classCount; i++)//en büyük oran seçilir.Birbirine eşit oldukları durumda ilk bulduğunu seçer { if (prob[i] > max) { max = prob[i]; maxclass = i; } } result[counter, 0] = maxclass; result[counter, 1] = test.DocumentClassIndex[counter]; counter++; } return(result); }
protected override BigFloat PostDecrement(ref BigFloat value) { return(value--); }
//Turns a JArray deserialized object into an AlgoValue. private static AlgoValue ParseJsonArray(ParserRuleContext context, dynamic deserialized) { //Enumerate over values. List <AlgoValue> list = new List <AlgoValue>(); foreach (JToken token in ((JArray)deserialized).Children()) { switch (token.Type) { //Same parsing as below, but for a list. case JTokenType.Integer: list.Add(new AlgoValue() { Type = AlgoValueType.Integer, Value = BigInteger.Parse(token.ToString()) }); break; case JTokenType.String: list.Add(new AlgoValue() { Type = AlgoValueType.String, Value = token.ToString() }); break; case JTokenType.Array: list.Add(ParseJsonArray(context, (JArray)token)); break; case JTokenType.Boolean: list.Add(new AlgoValue() { Type = AlgoValueType.Boolean, Value = bool.Parse(token.ToString()) }); break; case JTokenType.Float: list.Add(new AlgoValue() { Type = AlgoValueType.Float, Value = BigFloat.Parse(token.ToString()) }); break; case JTokenType.Null: list.Add(AlgoValue.Null); break; case JTokenType.Object: list.Add(ParseJsonObject(context, (JObject)token)); break; } } return(new AlgoValue() { Type = AlgoValueType.List, Value = list }); }
//Turns a JObject deserialized object into an AlgoValue. private static AlgoValue ParseJsonObject(ParserRuleContext context, dynamic deserialized) { //Enumerate over properties. AlgoObject obj = new AlgoObject(); foreach (JProperty token in ((JObject)deserialized).Properties()) { switch (token.Value.Type) { //JSON Integer Representation case JTokenType.Integer: obj.ObjectScopes.AddVariable(token.Name, new AlgoValue() { Type = AlgoValueType.Integer, Value = BigInteger.Parse(token.Value.ToString()) }); break; //JSON String Representation case JTokenType.String: obj.ObjectScopes.AddVariable(token.Name, new AlgoValue() { Type = AlgoValueType.String, Value = token.Value.ToString() }); break; //JSON Array Representation case JTokenType.Array: obj.ObjectScopes.AddVariable(token.Name, ParseJsonArray(context, (JArray)token.Value)); break; //JSON Boolean Representation case JTokenType.Boolean: obj.ObjectScopes.AddVariable(token.Name, new AlgoValue() { Type = AlgoValueType.Boolean, Value = bool.Parse(token.Value.ToString()) }); break; //JSON Float Representation case JTokenType.Float: obj.ObjectScopes.AddVariable(token.Name, new AlgoValue() { Type = AlgoValueType.Float, Value = BigFloat.Parse(token.Value.ToString()) }); break; //JSON Null Representation case JTokenType.Null: obj.ObjectScopes.AddVariable(token.Name, AlgoValue.Null); break; //JSON Object Representation case JTokenType.Object: obj.ObjectScopes.AddVariable(token.Name, ParseJsonObject(context, (JObject)token.Value)); break; default: Error.Fatal(context, "Invalid type '" + token.Value.Type.ToString() + "' to parse from JSON."); return(null); } } //Return the finished object. return(new AlgoValue() { Type = AlgoValueType.Object, Value = obj }); }
public async Task <SendTransactionOperationResult> SendTransaction(Keys keys, string from, string to, BigFloat amount, BigFloat fee, BigFloat gasLimit = null, BigFloat storageLimit = null, JObject param = null) { gasLimit = gasLimit ?? 200; storageLimit = storageLimit ?? 0; JObject head = await GetHeader(); JObject account = await GetAccountForBlock(head["hash"].ToString(), from); int counter = int.Parse(account["counter"].ToString()); JArray operations = new JArray(); JToken managerKey = await GetManagerKey(from); string gas = gasLimit.ToString(); string storage = storageLimit.ToString(); if (keys != null && managerKey["key"] == null) { JObject revealOp = new JObject(); operations.AddFirst(revealOp); revealOp["kind"] = "reveal"; revealOp["fee"] = "0"; revealOp["public_key"] = keys.DecryptPublicKey(); revealOp["source"] = from; revealOp["storage_limit"] = storage; revealOp["gas_limit"] = gas; revealOp["counter"] = (++counter).ToString(); } JObject transaction = new JObject(); operations.Add(transaction); transaction["kind"] = Operations.Transaction; transaction["source"] = from; transaction["fee"] = fee.ToString(); transaction["counter"] = (++counter).ToString(); transaction["gas_limit"] = gas; transaction["storage_limit"] = storage; transaction["amount"] = new BigFloat(amount.ToMicroTez().ToString(6)).Round().ToString(); // Convert to microtez, truncate at 6 digits, round up transaction["destination"] = to; if (param != null) { transaction["parameters"] = param; } else { JObject parameters = new JObject(); transaction["parameters"] = parameters; parameters["prim"] = "Unit"; parameters["args"] = new JArray(); // No args for this contract. } List <OperationResult> sendResults = await SendOperations(operations, keys, head); return(sendResults.LastOrDefault() as SendTransactionOperationResult); }
protected override BigFloat Subtract( BigFloat minuend, BigFloat subtrahend) { return(minuend - subtrahend); }
protected abstract double NaturalLog(BigFloat value);
static bool Near(BigFloat a, BigFloat b, BigFloat c, BigFloat d, BigFloat tol) { a -= c; b -= d; return ((a * a + b * b) * (a * a + b * b) < tol); }
public abstract BigFloat AbsoluteValue(BigFloat value);
protected override BigFloat Add(BigFloat leftAddend, BigFloat rightAddend) { return(BigFloat.Add(leftAddend, rightAddend)); }
/// <summary> /// Transfer funds from one wallet to another. /// </summary> /// <param name="from">From where to transfer the funds.</param> /// <param name="to">To where the funds should be transferred.</param> /// <param name="amount">The amount to transfer.</param> /// <param name="fee">The fee to transfer.</param> /// <param name="gasLimit">The gas limit to transfer.</param> /// <param name="storageLimit">The storage limit to transfer.</param> /// <returns>The result of the transfer operation.</returns> public async Task <OperationResult> Transfer(string from, string to, BigFloat amount, BigFloat fee, BigFloat gasLimit = null, BigFloat storageLimit = null) { return(await new Rpc(Provider).SendTransaction(Keys, from, to, amount, fee, gasLimit, storageLimit)); }
protected override BigFloat PowerTo(BigFloat @base, int exponent) { return(BigFloat.PowerTo(@base, exponent)); }
protected abstract BigFloat PowerTo( BigFloat @base, int exponent);
public override BigFloat AbsoluteValue(BigFloat value) { return(+value); }
public void Can_print_format() { var culture = new CultureInfo("en-US", false) { NumberFormat = { NumberDecimalDigits = 3 } }; Thread.CurrentThread.CurrentCulture = culture; Thread.CurrentThread.CurrentUICulture = culture; new BigFloat("NaN").ToString().Should().Be("NaN"); new BigFloat("Inf").ToString().Should().Be("∞"); new BigFloat("Inf").ToString("^!").Should().Be("+∞"); new BigFloat("-Inf").ToString().Should().Be("-∞"); new BigFloat("5").ToString("b2d5").Should().Be("0.101E+3"); new BigFloat("0.001").ToString().Should().Be("0.1E-2"); new BigFloat("-5.123").ToString("d3").Should().Be("-0.512E+1"); new BigFloat("5").ToString("d5u").Should().Be("0.50000E+1"); new BigFloat("0.001").ToString().Should().Be("0.1E-2"); new BigFloat("-0.001").ToString().Should().Be("-0.1E-2"); new BigFloat("-0.001").ToString("E2").Should().Be("-0.1E-02"); new BigFloat("-0.001").ToString("E2").Should().Be("-0.1E-02"); new BigFloat("-0.001").ToString("e3^__").Should().Be("-0.1e002"); new BigFloat("-0.001").ToString("@3^_!").Should().Be("-0.1@-002"); new BigFloat("-0.001").ToString("E3^_;").Should().Be("-0.1E-002"); new BigFloat("-0.1").ToString("E^___").Should().Be("-0.1E0"); new BigFloat("-0.1").ToString("E^__!").Should().Be("-0.1E+0"); new BigFloat("-0.1").ToString("E^__;").Should().Be("-0.1E+0"); new BigFloat("-0.1").ToString("E^__+").Should().Be("-0.1E+0"); new BigFloat("-0.1").ToString("E^__-").Should().Be("-0.1E-0"); new BigFloat("-1").ToString("E2^!").Should().Be("-0.1E+01"); new BigFloat("-1").ToString("E1^;").Should().Be("-0.1E+1"); new BigFloat("-1").ToString("E0^_").Should().Be("-0.1E1"); new BigFloat("-1").ToString("E").Should().Be("-0.1E+1"); new BigFloat("1").ToString("b16e").Should().Be("0.1@+1"); new BigFloat("1").ToString("b16").Should().Be("0.1@+1"); new BigFloat("5").ToString("p2").Should().Be("05"); new BigFloat("-5.1").ToString("p2").Should().Be("-05"); new BigFloat("5").ToString("p2.2").Should().Be("05.00"); new BigFloat("5.67").ToString("p2.1").Should().Be("05.6"); new BigFloat("-5.67").ToString("p.1").Should().Be("-5.6"); new BigFloat("5.67").ToString("p.0").Should().Be("5"); new BigFloat("5.6789").ToString("p.").Should().Be("5.678"); new BigFloat("5.678").ToString("p.1#2").Should().Be("5.67"); new BigFloat("5.6").ToString("p.1#2").Should().Be("5.6"); new BigFloat("-5.6").ToString("p.#2").Should().Be("-5.600"); new BigFloat("5").ToString("p.#2").Should().Be("5.000"); new BigFloat("5.456789").ToString("p.#4").Should().Be("5.4567"); new BigFloat("5.456789").ToString("p#2").Should().Be("5.45"); new BigFloat("-5.456789").ToString("p#").Should().Be("-5.456789"); new BigFloat("500.4567").ToString("p4#2").Should().Be("0500.45"); new BigFloat("5.4567").ToString("p4#").Should().Be("0005.4567"); new BigFloat("512.4567").ToString("p4#0").Should().Be("0512"); new BigFloat("51234.4567").ToString("p4.0#0").Should().Be("51234"); new BigFloat("-5.4567").ToString("p4.#").Should().Be("-0005.4567"); new BigFloat("5.45").ToString("p4.#").Should().Be("0005.450"); new BigFloat("-1E-3").ToString("p").Should().Be("-0.001"); new BigFloat("5.67").ToString("pu").Should().Be("5.6699999999999999"); new BigFloat("5.67").ToString("pu#").Should().Be("5.6700000000000000"); new BigFloat("5.67").ToString("pu=").Should().Be("5.6699999999999999"); new BigFloat("5.67").ToString("eu").Should().Be("0.56699999999999999e+1"); new BigFloat("5.67").ToString("eu#").Should().Be("0.56700000000000000e+1"); new BigFloat("5.67").ToString("eu=").Should().Be("0.56699999999999999e+1"); new BigFloat("-1").ToString("E=0p=1").Should().Be("-1"); new BigFloat("-1").ToString("E=1p=1").Should().Be("-0.1E+1"); new BigFloat("-1").ToString("E=1p=0").Should().Be("-0.1E+1"); new BigFloat("100").ToString("E(-1,1)=3").Should().Be("0.1E+3"); new BigFloat("10").ToString("E(-1,1)=3").Should().Be("10"); new BigFloat("1").ToString("E(-1,1)=3").Should().Be("0.1E+1"); new BigFloat("0.1").ToString("E(-1,1)=3").Should().Be("0.1E+0"); new BigFloat("0.01").ToString("E(-1,1)=3").Should().Be("0.1E-1"); new BigFloat("0.001").ToString("E(-1,1)=3").Should().Be("0.001"); new BigFloat("100").ToString("p(-1,1)=3").Should().Be("100"); new BigFloat("10").ToString("p(-1,1)=3").Should().Be("0.1E+2"); new BigFloat("1").ToString("p=3(-1,1)").Should().Be("1"); new BigFloat("0.1").ToString("p(-1,1)=3").Should().Be("0.1"); new BigFloat("0.01").ToString("p(-1,1)=3").Should().Be("0.01"); new BigFloat("0.001").ToString("p=3(-1,1)=3").Should().Be("0.1E-2"); var flt = new BigFloat("10", precision: 100); flt.Log(); flt.ToString("p").Should().Be("2.302585092994045684017991454683"); }
protected override BigFloat Multiply(BigFloat multiplicand, BigFloat multiplier) { return(multiplicand.Multiply(multiplier)); }
public override BigFloat Divide( BigFloat dividend, BigFloat divisor) { return(dividend.Divide(divisor)); }
/** * Sets the coordinates of the location object. * * @param latitude * the latitude, in degrees, of this location. North latitude is positive, south negative. * @param longitude * the longitude, in degrees, of this location. East longitude is positive, east negative. */ public void setLocation(String latitude, String longitude) { this.latitude = new BigFloat(latitude); this.longitude = new BigFloat(longitude); }
protected override BigFloat PreDecrement(ref BigFloat value) { value = BigFloat.Decrement(value); return(value); }
/// <summary> /// GetSDTMultiplicityResult() calculates Singles, Doubles, and Triples rates plus related metadata. /// /// This utility method works for both Slow- and Fast-Background Multiplicity Analyzers. /// /// Note that the input parameter "accidentalsHistogram" need not be normalized, /// and that a normalized-accidentals distribution will be included in output and will be valid /// for both slow and fast accidentals. /// /// </summary> /// <param name="realsPlusAccidentalsHistogram"> The histogram of gate populations. </param> /// <param name="accidentalsHistogram"> The histogram of gate populations - NOT NORMALIZED. </param> /// <param name="wasFastAccidentals"> true or false. Affects how PTsingles are calculated, etc. </param> /// <param name="multiplicityGateWidth"> as a UInt64, in 100-nanosecond tics. </param> /// <param name="multiplicityDeadDelay"> as a UInt64, in 100-nanosecond tics. </param> /// <param name="accidentalsDelay"> as a UInt64, in 100-nanosecond tics. </param> /// <param name="deadTimeCoeffTinNanoSecs"> as a double, in nanoseconds. </param> /// <param name="deadTimeCoeffAinMicroSecs"> as a double, in microseconds. </param> /// <param name="deadTimeCoeffBinPicoSecs"> as a double, in picoseconds. </param> /// <param name="deadTimeCoeffCinNanoSecs"> as a double, in nanoseconds. </param> /// <param name="totalMeasurementTime"> as a double, in seconds. </param> /// <param name="normedAccidentalsHistogram"> UInt64[]. </param> /// <returns></returns> public MultiplicityResult GetSDTMultiplicityResult(UInt64[] realsPlusAccidentalsHistogram, UInt64[] accidentalsHistogram, Boolean wasFastAccidentals, UInt64 multiplicityGateWidth, UInt64 multiplicityDeadDelay, UInt64 accidentalsDelay, double deadTimeCoeffTinNanoSecs, double deadTimeCoeffAinMicroSecs, double deadTimeCoeffBinPicoSecs, double deadTimeCoeffCinNanoSecs, double totalMeasurementTime, UInt64[] normedAccidentalsHistogram = null) { MultiplicityResult result; double phi; double gateInSeconds; UInt32 biggestKey, biggestAKey; int arrayLength; double[] alpha; double[] beta; BigFloat[] α = new BigFloat[0], β = new BigFloat[0]; result = new MultiplicityResult(); if (wasFastAccidentals == true) { result.isSlowBackground = false; } else { result.isSlowBackground = true; } //store parameters result.multiplicityGateWidth = multiplicityGateWidth; result.multiplicityDeadDelay = multiplicityDeadDelay; result.accidentalsDelay = accidentalsDelay; result.deadTimeCoefficientTinNanoSecs = deadTimeCoeffTinNanoSecs; result.deadTimeCoefficientAinMicroSecs = deadTimeCoeffAinMicroSecs; result.deadTimeCoefficientBinPicoSecs = deadTimeCoeffBinPicoSecs; result.deadTimeCoefficientCinNanoSecs = deadTimeCoeffCinNanoSecs; //copy the real-plus-accidental multiplicity histogram biggestKey = 0; arrayLength = realsPlusAccidentalsHistogram.Length; for (int i = 0; i < arrayLength; i++) { if (realsPlusAccidentalsHistogram[i] > 0) { result.realPlusAccidentalDistribution.Add((UInt64)i, realsPlusAccidentalsHistogram[i]); biggestKey = (UInt32)i; } } result.maxRABin = biggestKey; //copy the accidental-only histogram biggestAKey = 0; arrayLength = accidentalsHistogram.Length; for (int i = 0; i < arrayLength; i++) { if (accidentalsHistogram[i] > 0) { result.accidentalDistribution.Add((UInt32)i, accidentalsHistogram[i]); biggestAKey = (UInt32)i; } } result.maxABin = biggestAKey; //************************************************************ //Normalize the AccidentalDistribution, //scaling the FastBackgroundAnalysis result in proportion //to the number of Real+Accidental gate UInt64 numAccidentalGates = 0; UInt64 numRealPlusAccidentalGates = 0; foreach (KeyValuePair <UInt64, UInt64> pair in result.realPlusAccidentalDistribution) { numRealPlusAccidentalGates += pair.Value; } // compute the normalization param and recompute the unnormalizaed array from this one (code does not seem to work JFL) if (normedAccidentalsHistogram != null) { for (int i = 0; i < normedAccidentalsHistogram.Length; i++) { if (normedAccidentalsHistogram[i] > 0) { result.normalizedAccidentalDistribution.Add((UInt32)i, normedAccidentalsHistogram[i]); } } UInt64 numNAccidentalGates = 0; foreach (ulong no in normedAccidentalsHistogram) { numNAccidentalGates += no; } double denormalizingRatio; UInt64 denormalizedRate; denormalizingRatio = ((double)numNAccidentalGates) / ((double)numRealPlusAccidentalGates); result.accidentalDistribution.Clear(); foreach (KeyValuePair <UInt64, UInt64> pair in result.normalizedAccidentalDistribution) { denormalizedRate = (UInt64)(pair.Value * denormalizingRatio); result.accidentalDistribution.Add(pair.Key, denormalizedRate); biggestAKey = (UInt32)pair.Key; } result.maxABin = biggestAKey; foreach (KeyValuePair <UInt64, UInt64> pair in result.accidentalDistribution) { numAccidentalGates += pair.Value; } } else { foreach (KeyValuePair <UInt64, UInt64> pair in result.accidentalDistribution) { numAccidentalGates += pair.Value; } double normalizingRatio; UInt64 normalizedRate; normalizingRatio = ((double)numRealPlusAccidentalGates) / ((double)numAccidentalGates); result.normalizedAccidentalDistribution.Clear(); foreach (KeyValuePair <UInt64, UInt64> pair in result.accidentalDistribution) { normalizedRate = (UInt64)(pair.Value * normalizingRatio); result.normalizedAccidentalDistribution.Add(pair.Key, normalizedRate); } } //*** END Normalizing the AccidentalDistribution ************* //store the bigger key... if (biggestAKey > biggestKey) { biggestKey = biggestAKey; } if (biggestKey < 2) { biggestKey = 2; //...minimum size for data-output arrays... } alpha = new double[biggestKey + 1]; beta = new double[biggestKey + 1]; gateInSeconds = ((double)multiplicityGateWidth) * this.ticSizeInSeconds; phi = (deadTimeCoeffTinNanoSecs / 1E9) / gateInSeconds; bool standard = true; // dev note: this toggle signals use of BigNum when FP overflow is detected int axover = 0, bxover = 0; //calculate the alphas alpha[0] = 0.0; alpha[1] = 1.0; for (int n = 2; n <= biggestKey; n++) { if (phi > 1e-20) { alpha[n] = 1.0; if (standard) { for (int k = 0; k <= (n - 2); k++) { double alphaCoeff; alphaCoeff = this.binomialCoefficient((n - 1), (k + 1)) * Math.Pow((double)(k + 1), (double)k) * Math.Pow(phi, (double)k) / (Math.Pow((1.0 - ((k + 1) * phi)), (double)(k + 2))); alpha[n] += alphaCoeff; if (Double.IsInfinity(alpha[n]) || Double.IsNaN(alpha[n])) { result.warnings.Add("Overflow alpha at n = " + n + ", k = " + k); alpha[n] = 0; axover = n; standard = false; k = n; n = n - 1; // redo the loop α = new BigFloat[biggestKey + 1]; } } } else { // INCCCycleConditioning.calc_alpha_beta PrecisionSpec ps128 = new PrecisionSpec(128, PrecisionSpec.BaseType.BIN); BigFloat one = new BigFloat(1, ps128); BigFloat combination; BigFloat sum; double raise1, power1, power2; double log1, log2, log3; BigFloat exp1, exp2, exp3; /* calculate alpha array */ sum = new BigFloat(0, ps128); for (int k = 0; k <= (n - 2); k++) { combination = new BigFloat(binomialCoefficient((n - 1), (k + 1)), ps128); raise1 = (double)(k + 1); power1 = (double)k; power2 = (double)(k + 2); log1 = Math.Log(raise1); log2 = Math.Log(phi); log3 = Math.Log(1.0 - raise1 * phi); exp1 = BigFloat.Exp(new BigFloat(log1 * power1, ps128)); exp2 = BigFloat.Exp(new BigFloat(log2 * power1, ps128)); exp3 = BigFloat.Exp(new BigFloat(log3 * power2, ps128)); sum += combination * exp1 * exp2 / exp3; } α[n] = new BigFloat(one + sum, ps128); } } else { alpha[n] = 1.0; } } //calculate the betas standard = true; beta[0] = 0.0; beta[1] = 0.0; beta[2] = alpha[2] - 1.0; for (int n = 3; n <= biggestKey; n++) { if (phi > 1e-20) { beta[n] = alpha[n] - 1.0; if (standard) { for (int k = 0; k <= (n - 3); k++) { double betaCoeff; betaCoeff = this.binomialCoefficient((n - 1), (k + 2)) * (k + 1) * Math.Pow((double)(k + 2), (double)k) * Math.Pow(phi, (double)k) / (Math.Pow((1.0 - ((k + 2) * phi)), (double)(k + 3))); beta[n] += betaCoeff; if (Double.IsInfinity(beta[n]) || Double.IsNaN(beta[n])) { result.warnings.Add("Overflow beta at n = " + n + ", k = " + k); beta[n] = 0; bxover = n; standard = false; k = n; n = n - 1; // redo the loop β = new BigFloat[biggestKey + 1]; } } } else { PrecisionSpec ps128 = new PrecisionSpec(128, PrecisionSpec.BaseType.BIN); BigFloat one = new BigFloat(1, ps128); BigFloat combination; BigFloat sum; double raise1, power1, power2; double log1, log2, log3; BigFloat exp1, exp2, exp3; sum = new BigFloat(0, ps128); for (int k = 0; k <= n - 3; k++) { combination = new BigFloat(binomialCoefficient((n - 1), (k + 2)), ps128); raise1 = (double)(k + 2); power1 = (double)k; power2 = (double)(k + 3); log1 = Math.Log(raise1); log2 = Math.Log(phi); log3 = Math.Log(1.0 - raise1 * phi); exp1 = BigFloat.Exp(new BigFloat(log1 * power1, ps128)); exp2 = BigFloat.Exp(new BigFloat(log2 * power1, ps128)); exp3 = BigFloat.Exp(new BigFloat(log3 * power2, ps128)); sum += combination * (new BigFloat(k + 1, ps128)) * exp1 * exp2 / exp3; } β[n] = α[n] - one + sum; } } else { beta[n] = 0.0; } } //store the alpha and beta coefficients result.alpha = new double[biggestKey + 1]; result.beta = new double[biggestKey + 1]; for (int i = 0; i <= biggestKey; i++) { result.alpha[i] = alpha[i]; result.beta[i] = beta[i]; } double lastGoodD = 0; for (int i = axover; axover > 0 && i <= biggestKey; i++) { double d; bool good = Double.TryParse(α[i].ToString(), System.Globalization.NumberStyles.Any, System.Globalization.CultureInfo.InvariantCulture.NumberFormat, out d); // what to do when it is really larger than a double? if (!good) { result.alpha[i] = lastGoodD; result.warnings.Add(String.Format("α[{0}] conversion failed on {1}", i, α[i].ToString())); } else { lastGoodD = d; result.alpha[i] = d; } } for (int i = bxover; bxover > 0 && i <= biggestKey; i++) { double d; bool good = Double.TryParse(β[i].ToString(), System.Globalization.NumberStyles.Any, System.Globalization.CultureInfo.InvariantCulture.NumberFormat, out d); if (!good) { result.beta[i] = lastGoodD; result.warnings.Add(String.Format("β[{0}] conversion failed on {1}", i, β[i].ToString())); // URGENT: alpha/beta arrays are to be transformed from doubles to BigFloat types when this conversion happens to overflow } else { lastGoodD = d; result.beta[i] = d; } } //NOTE: in the following calculations, //variables named RAxxx refer to "Reals Plus Accidentals" phenomena, and //variables named Axxx refer to "Accidentals" phenomena (such as, fast-background counting) //calculate the factorial moments double RAfactorialMoment0, RAfactorialMoment1, RAfactorialMoment2, RAfactorialMoment3; double AfactorialMoment0, AfactorialMoment1, AfactorialMoment2, AfactorialMoment3; double RAfactorialMomentAlpha1, AfactorialMomentAlpha1; double RAfactorialMomentBeta2, AfactorialMomentBeta2; RAfactorialMoment0 = 0.0; RAfactorialMoment1 = 0.0; RAfactorialMoment2 = 0.0; RAfactorialMoment3 = 0.0; AfactorialMoment0 = 0.0; AfactorialMoment1 = 0.0; AfactorialMoment2 = 0.0; AfactorialMoment3 = 0.0; RAfactorialMomentAlpha1 = 0.0; AfactorialMomentAlpha1 = 0.0; RAfactorialMomentBeta2 = 0.0; AfactorialMomentBeta2 = 0.0; for (int i = 0; i <= biggestKey; i++) { UInt64 gateCount; int j = i + 1; int k = i + 2; int L = i + 3; if (result.realPlusAccidentalDistribution.TryGetValue((UInt64)i, out gateCount)) { RAfactorialMoment0 += (double)gateCount; } if (result.accidentalDistribution.TryGetValue((UInt64)i, out gateCount)) { AfactorialMoment0 += gateCount; } if (j <= biggestKey) { if (result.realPlusAccidentalDistribution.TryGetValue((UInt64)j, out gateCount)) { RAfactorialMoment1 += (double)(((UInt64)j) * gateCount); RAfactorialMomentAlpha1 += alpha[j] * ((double)gateCount); } if (result.accidentalDistribution.TryGetValue((UInt64)j, out gateCount)) { AfactorialMoment1 += (double)(((UInt64)j) * gateCount); AfactorialMomentAlpha1 += alpha[j] * ((double)gateCount); } } if (k <= biggestKey) { if (result.realPlusAccidentalDistribution.TryGetValue((UInt32)k, out gateCount)) { RAfactorialMoment2 += (double)(((UInt64)(k * (k - 1) / 2)) * gateCount); RAfactorialMomentBeta2 += beta[k] * ((double)gateCount); } if (result.accidentalDistribution.TryGetValue((UInt32)k, out gateCount)) { AfactorialMoment2 += (double)(((UInt64)(k * (k - 1) / 2)) * gateCount); AfactorialMomentBeta2 += beta[k] * ((double)gateCount); } } if (L <= biggestKey) { if (result.realPlusAccidentalDistribution.TryGetValue((UInt32)L, out gateCount)) { RAfactorialMoment3 += ((double)(((UInt64)(L * (L - 1) * (L - 2))) * gateCount)) / 6.0; } if (result.accidentalDistribution.TryGetValue((UInt32)L, out gateCount)) { AfactorialMoment3 += ((double)(((UInt64)(L * (L - 1) * (L - 2))) * gateCount)) / 6.0; } } } //store the factorial moments result.RAfactorialMoment0 = RAfactorialMoment0; result.RAfactorialMoment1 = RAfactorialMoment1; result.RAfactorialMoment2 = RAfactorialMoment2; result.RAfactorialMoment3 = RAfactorialMoment3; result.AfactorialMoment0 = AfactorialMoment0; result.AfactorialMoment1 = AfactorialMoment1; result.AfactorialMoment2 = AfactorialMoment2; result.AfactorialMoment3 = AfactorialMoment3; //NOTE: in the following calculations, //variables named PTxxx refer to "Pulse Triggered" phenomena, and //variables named RTxxx refer to "Regularly Triggered" phenomena (such as, fast-background counting) //penultimately, use all this to calculate the SDT rates double PTsingles, RTsingles, normRAfactorialMoment1, normRAfactorialMoment2; double normRAfactorialMomentAlpha1, normRAfactorialMomentBeta2; double normAfactorialMoment0, normAfactorialMoment1, normAfactorialMoment2, normAfactorialMoment3; double normAfactorialMomentAlpha1, normAfactorialMomentBeta2; if (wasFastAccidentals) { PTsingles = RAfactorialMoment0; double gateFactor = numAccidentalGates / Math.Floor(totalMeasurementTime / (multiplicityGateWidth * this.ticSizeInSeconds)); RTsingles = AfactorialMoment1 / gateFactor; normRAfactorialMoment1 = RAfactorialMoment1 / PTsingles; normRAfactorialMoment2 = RAfactorialMoment2 / PTsingles; //NOT USED: double normRAfactorialMoment3 = RAfactorialMoment3 / PTsingles; normRAfactorialMomentAlpha1 = RAfactorialMomentAlpha1 / PTsingles; normRAfactorialMomentBeta2 = RAfactorialMomentBeta2 / PTsingles; normAfactorialMoment0 = AfactorialMoment0 / numAccidentalGates; normAfactorialMoment1 = AfactorialMoment1 / numAccidentalGates; normAfactorialMoment2 = AfactorialMoment2 / numAccidentalGates; normAfactorialMoment3 = AfactorialMoment3 / numAccidentalGates; normAfactorialMomentAlpha1 = AfactorialMomentAlpha1 / numAccidentalGates; normAfactorialMomentBeta2 = AfactorialMomentBeta2 / numAccidentalGates; } else { PTsingles = AfactorialMoment0; //XXX SHOULDN'T THIS BE RAfactorialMoment0 not AfactorialMoment0???, answer, no, the two values should be the same, RA and A of 0 are identical for "slow" RTsingles = AfactorialMoment0; normRAfactorialMoment1 = RAfactorialMoment1 / PTsingles; normRAfactorialMoment2 = RAfactorialMoment2 / PTsingles; //NOT USED: double normRAfactorialMoment3 = RAfactorialMoment3 / PTsingles; normRAfactorialMomentAlpha1 = RAfactorialMomentAlpha1 / PTsingles; normRAfactorialMomentBeta2 = RAfactorialMomentBeta2 / PTsingles; normAfactorialMoment0 = AfactorialMoment0 / RTsingles; normAfactorialMoment1 = AfactorialMoment1 / RTsingles; normAfactorialMoment2 = AfactorialMoment2 / RTsingles; normAfactorialMoment3 = AfactorialMoment3 / RTsingles; normAfactorialMomentAlpha1 = AfactorialMomentAlpha1 / RTsingles; normAfactorialMomentBeta2 = AfactorialMomentBeta2 / RTsingles; } double RTdoubles = (0.5 / (multiplicityGateWidth * this.ticSizeInSeconds)) * ((2.0 * normAfactorialMoment2) - Math.Pow(normAfactorialMoment1, 2.0)); double RTtriples = (0.16667 / (multiplicityGateWidth * this.ticSizeInSeconds)) * ((6.0 * normAfactorialMoment3) - (6.0 * normAfactorialMoment1 * normAfactorialMoment2) + (2.0 * Math.Pow(normAfactorialMoment1, 3))); double PTdoubles = PTsingles * (normRAfactorialMoment1 - normAfactorialMoment1); double PTtriples; double PTtriplesDTcoef; if (AfactorialMoment0 != 0.0) { PTtriples = PTsingles * ((normRAfactorialMoment2 - normAfactorialMoment2) - ((normAfactorialMoment1 / normAfactorialMoment0) * (normRAfactorialMoment1 - normAfactorialMoment1))); PTtriplesDTcoef = PTsingles * ((normRAfactorialMomentBeta2 - normAfactorialMomentBeta2) - ((normAfactorialMomentAlpha1 / normAfactorialMoment0) * (normRAfactorialMomentAlpha1 - normAfactorialMomentAlpha1))); } else { PTtriples = 0.0; PTtriplesDTcoef = 0.0; } if (totalMeasurementTime > 1E-12) { PTsingles /= totalMeasurementTime; PTdoubles /= totalMeasurementTime; PTtriples /= totalMeasurementTime; PTtriplesDTcoef /= totalMeasurementTime; } else { PTsingles = 0.0; PTdoubles = 0.0; PTtriples = 0.0; PTtriplesDTcoef = 0.0; } //store the SDT rates result.singlesRatePerSecond = PTsingles; result.doublesRatePerSecond = PTdoubles; result.triplesRatePerSecond = PTtriples; //now that rates are calculated, calculate the dead-time corrections // dead time correction coefficients for RT rates (INCC as well as H&C) // determined experimentally using D/T ratio - see analysisComparison/triplesDeadTime.xls // the best fit (poly3) used to reproduce the trend // note: valid only for sources in the range of ~100n/s - ~500,000 n/s /** NOT USED *** * double DTcoeffT0_RT = 3.42854465; * double DTcoeffT1_RT = 3.35351651E-6; * double DTcoeffT2_RT = -5.83706327E-12; * double DTcoeffT3_RT = 2.03604973E-17; ***************/ // dead time correction coefficients for PT rates with background calculated using H&C consecutive gates // determined experimentally using D/T ratio - see analysisComparison/triplesDeadTime.xls // the best fit (poly3) used to reproduce the trend // note: valid only for sources in the range of ~100n/s - ~500,000 n/s /** NOT USED *** * double DTcoeffT0_PT = 2.78760077; * double DTcoeffT1_PT = 2.86078894E-6; * double DTcoeffT2_PT = -8.21994836E-12; * double DTcoeffT3_PT = 9.45195862E-17; ***************/ /** NOT USED *** * double DTcoeffA_RT = 0.2063; // these values were determined using two source method * double DTcoeffB_RT = 0.04256; ***************/ double DTcoeffA = deadTimeCoeffAinMicroSecs; double DTcoeffB = deadTimeCoeffBinPicoSecs; double DTcoeffC = deadTimeCoeffCinNanoSecs; double DTcoeffT = deadTimeCoeffTinNanoSecs; double exponent = ((DTcoeffA / 1E6) + ((DTcoeffB / 1E12) * PTsingles)) * PTsingles; double PTsinglesDTcorr = PTsingles * Math.Exp(exponent / 4.0); double PTdoublesDTcorr = PTdoubles * Math.Exp(exponent); double PTtriplesDTcorr = PTtriplesDTcoef * Math.Exp((DTcoeffT / 1E9) * PTsingles); /** NOT USED *** * double RTsinglesDTcorr = RTsingles * Math.Exp(( ((DTcoeffA/1E6) + ((DTcoeffB/1E12)*RTsingles)) *RTsingles)/4.0); * double RTdoublesDTcorr = RTdoubles * Math.Exp( ((DTcoeffA_RT/1E6) + ((DTcoeffB_RT/1E12)*RTsingles)) *RTsingles); * double RTtriplesDTcorr = RTtriples* (( (DTcoeffT3_RT*Math.Pow(RTsingles,3)) + (DTcoeffT2_RT*Math.Pow(RTsingles,2)) + (DTcoeffT1_RT*RTsingles) + DTcoeffT0_RT) + /DTcoeffT0_RT); ***************/ //store the dead-time corrected values result.deadTimeCorrectedSinglesRate = PTsinglesDTcorr; result.deadTimeCorrectedDoublesRate = PTdoublesDTcorr; result.deadTimeCorrectedTriplesRate = PTtriplesDTcorr; //Calculate the Dytlewski Dead-Time-Corrected Rates, based upon //N. Dytlewski, Dead-time corrections for multiplicity counters, //Nuclear Instruments and Methods in Physics Research A305(1991)492-494 double P03, P02, P13, P12, P23; P03 = Math.Pow((1.0 - (2.0 * phi)), 3.0); P02 = Math.Pow((1.0 - phi), 2.0); P13 = (2.0 * Math.Pow((1.0 - phi), 3.0)) - (2.0 * P03); P12 = 1.0 - P02; P23 = 1.0 + P03 - (2.0 * Math.Pow((1.0 - phi), 3.0)); result.dytlewskiDeadTimeCorrectedTriplesRate = PTtriples / P03; //Martyn made me do it. hn 2.6.2015 result.deadTimeCorrectedTriplesRate = result.dytlewskiDeadTimeCorrectedTriplesRate; result.dytlewskiDeadTimeCorrectedDoublesRate = (PTdoubles / P02) + ((PTtriples * P13) / (P02 * P03)); result.dytlewskiDeadTimeCorrectedSinglesRate = PTsingles + ((P12 * PTdoubles) / P02) + (PTtriples * (((P12 * P13) / (P02 * P03)) - (P23 / P03))); return(result); }
protected override BigFloat Negate(BigFloat value) { return(BigFloat.Negate(value)); }
public async Task GetBalance() { BigFloat balance = await rpc.GetBalance("tz1hmK2ru6ism15MxXbnhKWWKGJ6hqWssMc5"); }
public void TestExtremeMathPi(int precision) { var pi = BigFloat.GetPi(AccuracyGoal.Absolute(precision)); }
private void DoComputeRows(BigFloat startX, BigFloat startY, BigFloat dx, BigFloat dy, int startRow, int numRows, int width, int maxDwell) { BigInfo info; info.startX = startX; info.startY = startY; info.dx = dx; info.dy = dy; info.bailout = BigFloat.Bailout; info.maxDwell = maxDwell; for (int v = startRow; v < startRow + numRows && !m_aborted; ++v) { info.v = v; for (int h = 0; h < width && !m_aborted; ++h) { info.h = h; double mag; ushort dwell; BigCompute(info, out mag, out dwell); float sample; if (dwell < maxDwell) sample = (float) (dwell + (A - Math.Log(0.5*Math.Log(mag)))/B); // Continuous Smoothing (see http://en.wikipedia.org/wiki/Mandelbrot_set#Continuous_.28smooth.29_coloring) else sample = float.PositiveInfinity; m_samples[h, v] = sample; m_minDwell = Math.Min(m_minDwell, dwell); m_maxDwell = Math.Max(m_maxDwell, dwell); if (!float.IsInfinity(sample)) { m_minSample = Math.Min(m_minSample, sample); m_maxSample = Math.Max(m_maxSample, sample); } } } }
protected override BigFloat PreDecrement(ref BigFloat value) { return(--value); }
public static Tuple<List<BigFloat>, List<BigFloat>> Solve(int n, int n_iters, BigFloat tolerance, List<BigFloat> initialGuess, List<BigFloat> zi) { int m = initialGuess.Count, i, j, k; BigFloat d = 0, pa, pb, a, b, qa, qb, k1, k2, k3, na, nb, s1, s2; for (i = 0; i < n_iters; ++i) { d = 0.0; for (j = 0; j < m; ++j) { //Read in zj pa = initialGuess[j]; pb = zi[j]; //Compute denominator // // (zj - z0) * (zj - z1) * ... * (zj - z_{n-1}) // a = 1.0; b = 0.0; for (k = 0; k < m; ++k) { if (k == j) { continue; } qa = pa - initialGuess[k]; qb = pb - zi[k]; if (qa * qa + qb * qb < tolerance) { continue; } k1 = qa * (a + b); k2 = a * (qb - qa); k3 = b * (qa + qb); a = k1 - k3; b = k1 + k2; } //Compute numerator na = RealNumbers[n - 1]; nb = ImaginaryNumbers[n - 1]; s1 = pb - pa; s2 = pa + pb; for (k = n - 2; k >= 0; --k) { k1 = pa * (na + nb); k2 = na * s1; k3 = nb * s2; na = k1 - k3 + RealNumbers[k]; nb = k1 + k2 + ImaginaryNumbers[k]; } //Compute reciprocal k1 = a * a + b * b; if (BigFloat.Abs(k1) > Epsilon) { a /= k1; b /= -k1; } else { a = 1.0; b = 0.0; } //Multiply and accumulate k1 = na * (a + b); k2 = a * (nb - na); k3 = b * (na + nb); qa = k1 - k3; qb = k1 + k2; initialGuess[j] = pa - qa; zi[j] = pb - qb; d = BigFloat.Max(d, BigFloat.Max(BigFloat.Abs(qa), BigFloat.Abs(qb))); } //If converged, exit early if (d < tolerance) { break; } } // Post process: Combine any repeated roots int count; for (i = 0; i < m; ++i) { count = 1; a = initialGuess[i]; b = zi[i]; for (j = 0; j < m; ++j) { if (i == j) { continue; } if (Near(initialGuess[i], zi[i], initialGuess[j], zi[j], tolerance)) { ++count; a += initialGuess[j]; b += zi[j]; } } if (count > 1) { a /= count; b /= count; for (j = 0; j < m; ++j) { if (i == j) { continue; } if (Near(initialGuess[i], zi[i], initialGuess[j], zi[j], tolerance)) { initialGuess[j] = a; zi[j] = b; } } initialGuess[i] = a; zi[i] = b; } } for(i=0;i<initialGuess.Count;i++) { if(BigFloat.Abs(zi[i]) < Epsilon) { zi[i] = 0; } if (BigFloat.Abs(initialGuess[i]) < Epsilon) { initialGuess[i] = 0; } } // Order by size List<Tuple<BigFloat, BigFloat>> Elements = initialGuess.Zip(zi, (x, y) => Tuple.Create(x, y)).OrderByDescending(x => x.Item2.IsZero()).ToList(); Elements = Elements.OrderBy(x => x.Item1).ToList(); // Clear zi = new List<BigFloat>(); initialGuess = new List<BigFloat>(); for (i = 0; i < Elements.Count; i++) { bool found = false; for (j = 0; j < zi.Count; j++) { if (zi[j] == Elements[i].Item2 && initialGuess[j] == Elements[i].Item1) { found = true; } } if (!found) { zi.Add(Elements[i].Item2); initialGuess.Add(Elements[i].Item1); } } // Round the result foreach(BigFloat s in initialGuess) { BigFloat c = new BigFloat(s); c.FPart(); if (BigFloat.Abs(c) < tolerance) { s.Floor(); } } return new Tuple<List<BigFloat>, List<BigFloat>>(initialGuess, zi); }
protected override BigFloat Negate(BigFloat value) { return(-value); }