示例#1
0
文件: Cycle.cs 项目: radtek/INCC6
        public bool Transfer(Multiplicity mup, MultiplicityResult mr, int idx)
        {
            if (mr == null)
            {
                return(true);
            }
            bool res = true;

            try
            {
                SetQCStatus(mup, QCTestStatus.Pass);  // marked Pass at the outset
                MultiplicityCountingRes lmcs = new MultiplicityCountingRes(mup.FA, idx);
                countresults.Add(mup, lmcs);
                lmcs.Totals = Totals;
                lmcs.TransferRawResult(mr);
                lmcs.TS = new TimeSpan(TS.Ticks);
                if (dsid.SRType.IsListMode() && lmcs.FA == FAType.FAOff)
                {
                    lmcs.LMTS[1] = TimeSpan.FromTicks((long)mr.TotalMeasurementTics);//Slow
                }
                else if (dsid.SRType.IsListMode() && lmcs.FA == FAType.FAOn)
                {
                    lmcs.LMTS[0] = TimeSpan.FromTicks((long)mr.TotalMeasurementTics);//Slow
                }
            }
            catch (OutOfMemoryException e)
            {
                mup.reason = "Multiplicity transfer " + e.Message;
                res        = false;
                logger?.TraceEvent(LogLevels.Error, 87406, mup.reason);
            }
            return(res);
        }
示例#2
0
        public bool Transfer(Multiplicity mup, MultiplicityResult mr, int idx)
        {
            if (mr == null)
            {
                return(true);
            }
            bool res = true;

            try
            {
                SetQCStatus(mup, QCTestStatus.Pass);  // marked Pass at the outset
                MultiplicityCountingRes lmcs = new MultiplicityCountingRes(mup.FA, idx);
                countresults.Add(mup, lmcs);
                lmcs.Totals = Totals;
                lmcs.TransferRawResult(mr);
                lmcs.TS = new TimeSpan(TS.Ticks);
            }
            catch (OutOfMemoryException e)
            {
                mup.reason = "Multiplicity transfer " + e.Message;
                res        = false;
                logger.TraceEvent(LogLevels.Error, 87406, mup.reason);
            }
            return(res);
        }
示例#3
0
文件: Cycle.cs 项目: hnordquist/INCC6
 public bool Transfer(Multiplicity mup, MultiplicityResult mr, int idx)
 {
     if (mr == null)
         return true;
     bool res = true;
     try
     {
         SetQCStatus(mup, QCTestStatus.Pass);  // marked Pass at the outset
         MultiplicityCountingRes lmcs = new MultiplicityCountingRes(mup.FA, idx);
         countresults.Add(mup, lmcs);
         lmcs.Totals = Totals;
         lmcs.TransferRawResult(mr);
         lmcs.TS = new TimeSpan(TS.Ticks);
     }
     catch (OutOfMemoryException e)
     {
         mup.reason = "Multiplicity transfer " + e.Message;
         res = false;
         logger.TraceEvent(LogLevels.Error, 87406,  mup.reason);
     }
     return res;
 }
示例#4
0
        /// <summary>
        /// GetSDTMultiplicityResult() calculates Singles, Doubles, and Triples rates plus related metadata.
        /// 
        /// This utility method works for both Slow- and Fast-Background Multiplicity Analyzers.
        /// 
        /// Note that the input parameter "accidentalsHistogram" need not be normalized,
        /// and that a normalized-accidentals distribution will be included in output and will be valid
        /// for both slow and fast accidentals.
        /// 
        /// </summary>
        /// <param name="realsPlusAccidentalsHistogram"> The histogram of gate populations. </param>
        /// <param name="accidentalsHistogram"> The histogram of gate populations - NOT NORMALIZED. </param>
        /// <param name="wasFastAccidentals"> true or false.  Affects how PTsingles are calculated, etc. </param>
        /// <param name="multiplicityGateWidth"> as a UInt64, in 100-nanosecond tics. </param>
        /// <param name="multiplicityDeadDelay"> as a UInt64, in 100-nanosecond tics. </param>
        /// <param name="accidentalsDelay"> as a UInt64, in 100-nanosecond tics. </param>
        /// <param name="deadTimeCoeffTinNanoSecs"> as a double, in nanoseconds. </param>
        /// <param name="deadTimeCoeffAinMicroSecs"> as a double, in microseconds. </param>
        /// <param name="deadTimeCoeffBinPicoSecs"> as a double, in picoseconds. </param>
        /// <param name="deadTimeCoeffCinNanoSecs"> as a double, in nanoseconds. </param>
        /// <param name="totalMeasurementTime"> as a double, in seconds. </param>
        /// <param name="normedAccidentalsHistogram">  UInt64[]. </param>
        /// <returns></returns>
        public MultiplicityResult GetSDTMultiplicityResult(UInt64[] realsPlusAccidentalsHistogram,
                                                           UInt64[] accidentalsHistogram,
                                                           Boolean wasFastAccidentals,
                                                           UInt64 multiplicityGateWidth,
                                                           UInt64 multiplicityDeadDelay,
                                                           UInt64 accidentalsDelay,
                                                           double deadTimeCoeffTinNanoSecs,
                                                           double deadTimeCoeffAinMicroSecs,
                                                           double deadTimeCoeffBinPicoSecs,
                                                           double deadTimeCoeffCinNanoSecs,
                                                           double totalMeasurementTime,
                                                           UInt64[] normedAccidentalsHistogram = null)
        {
            MultiplicityResult result;
            double phi;
            double gateInSeconds;
            UInt32 biggestKey, biggestAKey;
            int arrayLength;
            double[] alpha;
            double[] beta;
            BigFloat[] α = new BigFloat[0], β = new BigFloat[0];

            result = new MultiplicityResult();
            if (wasFastAccidentals == true)
            {
                result.isSlowBackground = false;
            }
            else
            {
                result.isSlowBackground = true;
            }

            //store parameters
            result.multiplicityGateWidth = multiplicityGateWidth;
            result.multiplicityDeadDelay = multiplicityDeadDelay;
            result.accidentalsDelay = accidentalsDelay;
            result.deadTimeCoefficientTinNanoSecs = deadTimeCoeffTinNanoSecs;
            result.deadTimeCoefficientAinMicroSecs = deadTimeCoeffAinMicroSecs;
            result.deadTimeCoefficientBinPicoSecs = deadTimeCoeffBinPicoSecs;
            result.deadTimeCoefficientCinNanoSecs = deadTimeCoeffCinNanoSecs;

            //copy the real-plus-accidental multiplicity histogram
            biggestKey = 0;
            arrayLength = realsPlusAccidentalsHistogram.Length;
            for (int i = 0; i < arrayLength; i++)
            {
                if (realsPlusAccidentalsHistogram[i] > 0)
                {
                    result.realPlusAccidentalDistribution.Add((UInt64)i, realsPlusAccidentalsHistogram[i]);
                    biggestKey = (UInt32)i;
                }
            }
            result.maxRABin = biggestKey;

            //copy the accidental-only histogram
            biggestAKey = 0;
            arrayLength = accidentalsHistogram.Length;
            for (int i = 0; i < arrayLength; i++)
            {
                if (accidentalsHistogram[i] > 0)
                {
                    result.accidentalDistribution.Add((UInt32)i, accidentalsHistogram[i]);
                    biggestAKey = (UInt32)i;
                }
            }
            result.maxABin = biggestAKey;

            //************************************************************
            //Normalize the AccidentalDistribution, 
            //scaling the FastBackgroundAnalysis result in proportion 
            //to the number of Real+Accidental gate
            UInt64 numAccidentalGates = 0;
            UInt64 numRealPlusAccidentalGates = 0;
            foreach (KeyValuePair<UInt64, UInt64> pair in result.realPlusAccidentalDistribution)
            {
                numRealPlusAccidentalGates += pair.Value;
            }

            // compute the normalization param and recompute the unnormalizaed array from this one (code does not seem to work JFL)
            if (normedAccidentalsHistogram != null)
            {
                for (int i = 0; i < normedAccidentalsHistogram.Length; i++)
                {
                    if (normedAccidentalsHistogram[i] > 0)
                    {
                        result.normalizedAccidentalDistribution.Add((UInt32)i, normedAccidentalsHistogram[i]);
                    }
                }

                UInt64 numNAccidentalGates = 0;

                foreach (ulong no in normedAccidentalsHistogram)
                {
                    numNAccidentalGates += no;
                }

                double denormalizingRatio;
                UInt64 denormalizedRate;
                denormalizingRatio = ((double)numNAccidentalGates) / ((double)numRealPlusAccidentalGates);

                result.accidentalDistribution.Clear();
                foreach (KeyValuePair<UInt64, UInt64> pair in result.normalizedAccidentalDistribution)
                {
                    denormalizedRate = (UInt64)(pair.Value * denormalizingRatio);
                    result.accidentalDistribution.Add(pair.Key, denormalizedRate);
                    biggestAKey = (UInt32)pair.Key;
                }
                result.maxABin = biggestAKey;

                foreach (KeyValuePair<UInt64, UInt64> pair in result.accidentalDistribution)
                {
                    numAccidentalGates += pair.Value;
                }

            }
            else
            {
                foreach (KeyValuePair<UInt64, UInt64> pair in result.accidentalDistribution)
                {
                    numAccidentalGates += pair.Value;
                }


                double normalizingRatio;
                UInt64 normalizedRate;
                normalizingRatio = ((double)numRealPlusAccidentalGates) / ((double)numAccidentalGates);

                result.normalizedAccidentalDistribution.Clear();
                foreach (KeyValuePair<UInt64, UInt64> pair in result.accidentalDistribution)
                {
                    normalizedRate = (UInt64)(pair.Value * normalizingRatio);
                    result.normalizedAccidentalDistribution.Add(pair.Key, normalizedRate);
                }
            }


            //*** END Normalizing the AccidentalDistribution *************


            //store the bigger key...
            if (biggestAKey > biggestKey)
            {
                biggestKey = biggestAKey;
            }
            if (biggestKey < 2)
            {
                biggestKey = 2;  //...minimum size for data-output arrays...
            }

            alpha = new double[biggestKey + 1];
            beta = new double[biggestKey + 1];

            gateInSeconds = ((double)multiplicityGateWidth) * this.ticSizeInSeconds;
            phi = (deadTimeCoeffTinNanoSecs / 1E9) / gateInSeconds;
            bool standard = true;  // dev note: this toggle signals use of BigNum when FP overflow is detected
            int axover = 0, bxover = 0;
            //calculate the alphas
            alpha[0] = 0.0;
            alpha[1] = 1.0;
            for (int n = 2; n <= biggestKey; n++)
            {
                if (phi > 1e-20)
                {
                    alpha[n] = 1.0;
                    if (standard)
                    {
                        for (int k = 0; k <= (n - 2); k++)
                        {
                            double alphaCoeff;
                            alphaCoeff = this.binomialCoefficient((n - 1), (k + 1))
                                         * Math.Pow((double)(k + 1), (double)k)
                                         * Math.Pow(phi, (double)k)
                                         / (Math.Pow((1.0 - ((k + 1) * phi)), (double)(k + 2)));
                            alpha[n] += alphaCoeff;
                            if (Double.IsInfinity(alpha[n]) || Double.IsNaN(alpha[n]))
                            {
                                result.warnings.Add("Overflow alpha at n = " + n + ", k = " + k);
                                alpha[n] = 0;
                                axover = n;
                                standard = false; k = n; n = n - 1; // redo the loop
                                α = new BigFloat[biggestKey + 1];
                            }
                        }
                    }
                    else
                    {
                        // INCCCycleConditioning.calc_alpha_beta 
                        PrecisionSpec ps128 = new PrecisionSpec(128, PrecisionSpec.BaseType.BIN);
                        BigFloat one = new BigFloat(1, ps128);
                        BigFloat combination;
                        BigFloat sum;
                        double raise1, power1, power2;
                        double log1, log2, log3;
                        BigFloat exp1, exp2, exp3;
                        /* calculate alpha array */
                        sum = new BigFloat(0, ps128);
                        for (int k = 0; k <= (n - 2); k++)
                        {
                            combination = new BigFloat(binomialCoefficient((n - 1), (k + 1)), ps128);
                            raise1 = (double)(k + 1);
                            power1 = (double)k;
                            power2 = (double)(k + 2);
                            log1 = Math.Log(raise1);
                            log2 = Math.Log(phi);
                            log3 = Math.Log(1.0 - raise1 * phi);
                            exp1 = BigFloat.Exp(new BigFloat(log1 * power1, ps128));
                            exp2 = BigFloat.Exp(new BigFloat(log2 * power1, ps128));
                            exp3 = BigFloat.Exp(new BigFloat(log3 * power2, ps128));
                            sum += combination * exp1 * exp2 / exp3;
                        }
                        α[n] = new BigFloat(one + sum, ps128);
                    }
                }
                else
                {
                    alpha[n] = 1.0;
                }
            }

            //calculate the betas
            standard = true;
            beta[0] = 0.0;
            beta[1] = 0.0;
            beta[2] = alpha[2] - 1.0;
            for (int n = 3; n <= biggestKey; n++)
            {
                if (phi > 1e-20)
                {
                    beta[n] = alpha[n] - 1.0;
                    if (standard)
                    {
                        for (int k = 0; k <= (n - 3); k++)
                        {
                            double betaCoeff;
                            betaCoeff = this.binomialCoefficient((n - 1), (k + 2))
                                        * (k + 1)
                                        * Math.Pow((double)(k + 2), (double)k)
                                        * Math.Pow(phi, (double)k)
                                        / (Math.Pow((1.0 - ((k + 2) * phi)), (double)(k + 3)));
                            beta[n] += betaCoeff;
                            if (Double.IsInfinity(beta[n]) || Double.IsNaN(beta[n]))
                            {
                                result.warnings.Add("Overflow beta at n = " + n + ", k = " + k);
                                beta[n] = 0;
                                bxover = n;
                                standard = false; k = n; n = n - 1; // redo the loop
                                β = new BigFloat[biggestKey + 1];
                            }
                        }
                    }
                    else
                    {
                        PrecisionSpec ps128 = new PrecisionSpec(128, PrecisionSpec.BaseType.BIN);
                        BigFloat one = new BigFloat(1, ps128);
                        BigFloat combination;
                        BigFloat sum;
                        double raise1, power1, power2;
                        double log1, log2, log3;
                        BigFloat exp1, exp2, exp3;
                        sum = new BigFloat(0, ps128);
                        for (int k = 0; k <= n - 3; k++)
                        {
                            combination = new BigFloat(binomialCoefficient((n - 1), (k + 2)), ps128);
                            raise1 = (double)(k + 2);
                            power1 = (double)k;
                            power2 = (double)(k + 3);
                            log1 = Math.Log(raise1);
                            log2 = Math.Log(phi);
                            log3 = Math.Log(1.0 - raise1 * phi);
                            exp1 = BigFloat.Exp(new BigFloat(log1 * power1, ps128));
                            exp2 = BigFloat.Exp(new BigFloat(log2 * power1, ps128));
                            exp3 = BigFloat.Exp(new BigFloat(log3 * power2, ps128));
                            sum += combination * (new BigFloat(k + 1, ps128)) * exp1 * exp2 / exp3;
                        }
                        β[n] = α[n] - one + sum;
                    }
                }
                else
                {
                    beta[n] = 0.0;
                }
            }

            //store the alpha and beta coefficients
            result.alpha = new double[biggestKey + 1];
            result.beta = new double[biggestKey + 1];
            for (int i = 0; i <= biggestKey; i++)
            {
                result.alpha[i] = alpha[i];
                result.beta[i] = beta[i];
            }

            double lastGoodD = 0;
            for (int i = axover; axover > 0 && i <= biggestKey; i++)
            {
                double d;
                bool good = Double.TryParse(α[i].ToString(), System.Globalization.NumberStyles.Any, System.Globalization.CultureInfo.InvariantCulture.NumberFormat, out d);  // what to do when it is really larger than a double?
                if (!good)
                {
                    result.alpha[i] = lastGoodD;
                    result.warnings.Add(String.Format("α[{0}] conversion failed on {1}", i, α[i].ToString()));
                }
                else
                {
                    lastGoodD = d;
                    result.alpha[i] = d;
                }
            }

            for (int i = bxover; bxover > 0 && i <= biggestKey; i++)
            {
                double d;
                bool good = Double.TryParse(β[i].ToString(), System.Globalization.NumberStyles.Any, System.Globalization.CultureInfo.InvariantCulture.NumberFormat, out d); 
                if (!good)
                {
                    result.beta[i] = lastGoodD;
                    result.warnings.Add(String.Format("β[{0}] conversion failed on {1}", i, β[i].ToString()));  // URGENT: alpha/beta arrays are to be transformed from doubles to BigFloat types when this conversion happens to overflow                
                 }
                else
                {
                    lastGoodD = d;
                    result.beta[i] = d;
                }
            }

            //NOTE: in the following calculations,
            //variables named RAxxx refer to "Reals Plus Accidentals" phenomena, and
            //variables named Axxx  refer to "Accidentals" phenomena (such as, fast-background counting)

            //calculate the factorial moments
            double RAfactorialMoment0, RAfactorialMoment1, RAfactorialMoment2, RAfactorialMoment3;
            double AfactorialMoment0, AfactorialMoment1, AfactorialMoment2, AfactorialMoment3;
            double RAfactorialMomentAlpha1, AfactorialMomentAlpha1;
            double RAfactorialMomentBeta2, AfactorialMomentBeta2;
            RAfactorialMoment0 = 0.0;
            RAfactorialMoment1 = 0.0;
            RAfactorialMoment2 = 0.0;
            RAfactorialMoment3 = 0.0;
            AfactorialMoment0 = 0.0;
            AfactorialMoment1 = 0.0;
            AfactorialMoment2 = 0.0;
            AfactorialMoment3 = 0.0;
            RAfactorialMomentAlpha1 = 0.0;
            AfactorialMomentAlpha1 = 0.0;
            RAfactorialMomentBeta2 = 0.0;
            AfactorialMomentBeta2 = 0.0;

            for (int i = 0; i <= biggestKey; i++)
            {
                UInt64 gateCount;
                int j = i + 1;
                int k = i + 2;
                int L = i + 3;

                if (result.realPlusAccidentalDistribution.TryGetValue((UInt64)i, out gateCount))
                {
                    RAfactorialMoment0 += (double)gateCount;
                }
                if (result.accidentalDistribution.TryGetValue((UInt64)i, out gateCount))
                {
                    AfactorialMoment0 += gateCount;
                }

                if (j <= biggestKey)
                {
                    if (result.realPlusAccidentalDistribution.TryGetValue((UInt64)j, out gateCount))
                    {
                        RAfactorialMoment1 += (double)(((UInt64)j) * gateCount);
                        RAfactorialMomentAlpha1 += alpha[j] * ((double)gateCount);
                    }
                    if (result.accidentalDistribution.TryGetValue((UInt64)j, out gateCount))
                    {
                        AfactorialMoment1 += (double)(((UInt64)j) * gateCount);
                        AfactorialMomentAlpha1 += alpha[j] * ((double)gateCount);
                    }
                }

                if (k <= biggestKey)
                {
                    if (result.realPlusAccidentalDistribution.TryGetValue((UInt32)k, out gateCount))
                    {
                        RAfactorialMoment2 += (double)(((UInt64)(k * (k - 1) / 2)) * gateCount);
                        RAfactorialMomentBeta2 += beta[k] * ((double)gateCount);
                    }
                    if (result.accidentalDistribution.TryGetValue((UInt32)k, out gateCount))
                    {
                        AfactorialMoment2 += (double)(((UInt64)(k * (k - 1) / 2)) * gateCount);
                        AfactorialMomentBeta2 += beta[k] * ((double)gateCount);
                    }
                }

                if (L <= biggestKey)
                {
                    if (result.realPlusAccidentalDistribution.TryGetValue((UInt32)L, out gateCount))
                    {
                        RAfactorialMoment3 += ((double)(((UInt64)(L * (L - 1) * (L - 2))) * gateCount)) / 6.0;
                    }
                    if (result.accidentalDistribution.TryGetValue((UInt32)L, out gateCount))
                    {
                        AfactorialMoment3 += ((double)(((UInt64)(L * (L - 1) * (L - 2))) * gateCount)) / 6.0;
                    }
                }
            }

            //store the factorial moments
            result.RAfactorialMoment0 = RAfactorialMoment0;
            result.RAfactorialMoment1 = RAfactorialMoment1;
            result.RAfactorialMoment2 = RAfactorialMoment2;
            result.RAfactorialMoment3 = RAfactorialMoment3;
            result.AfactorialMoment0 = AfactorialMoment0;
            result.AfactorialMoment1 = AfactorialMoment1;
            result.AfactorialMoment2 = AfactorialMoment2;
            result.AfactorialMoment3 = AfactorialMoment3;

            //NOTE: in the following calculations,
            //variables named PTxxx refer to "Pulse Triggered" phenomena, and
            //variables named RTxxx refer to "Regularly Triggered" phenomena (such as, fast-background counting)

            //penultimately, use all this to calculate the SDT rates
            double PTsingles, RTsingles, normRAfactorialMoment1, normRAfactorialMoment2;
            double normRAfactorialMomentAlpha1, normRAfactorialMomentBeta2;
            double normAfactorialMoment0, normAfactorialMoment1, normAfactorialMoment2, normAfactorialMoment3;
            double normAfactorialMomentAlpha1, normAfactorialMomentBeta2;

            if (wasFastAccidentals)
            {
                PTsingles = RAfactorialMoment0;
                double gateFactor = numAccidentalGates / Math.Floor(totalMeasurementTime / (multiplicityGateWidth * this.ticSizeInSeconds));
                RTsingles = AfactorialMoment1 / gateFactor;
                normRAfactorialMoment1 = RAfactorialMoment1 / PTsingles;
                normRAfactorialMoment2 = RAfactorialMoment2 / PTsingles;
                //NOT USED:  double normRAfactorialMoment3 = RAfactorialMoment3 / PTsingles;
                normRAfactorialMomentAlpha1 = RAfactorialMomentAlpha1 / PTsingles;
                normRAfactorialMomentBeta2 = RAfactorialMomentBeta2 / PTsingles;
                normAfactorialMoment0 = AfactorialMoment0 / numAccidentalGates;
                normAfactorialMoment1 = AfactorialMoment1 / numAccidentalGates;
                normAfactorialMoment2 = AfactorialMoment2 / numAccidentalGates;
                normAfactorialMoment3 = AfactorialMoment3 / numAccidentalGates;
                normAfactorialMomentAlpha1 = AfactorialMomentAlpha1 / numAccidentalGates;
                normAfactorialMomentBeta2 = AfactorialMomentBeta2 / numAccidentalGates;
            }
            else
            {
                PTsingles = AfactorialMoment0;  //XXX SHOULDN'T THIS BE RAfactorialMoment0 not AfactorialMoment0???, answer, no, the two values should be the same, RA and A of 0 are identical for "slow"
                RTsingles = AfactorialMoment0;
                normRAfactorialMoment1 = RAfactorialMoment1 / PTsingles;
                normRAfactorialMoment2 = RAfactorialMoment2 / PTsingles;
                //NOT USED:  double normRAfactorialMoment3 = RAfactorialMoment3 / PTsingles;
                normRAfactorialMomentAlpha1 = RAfactorialMomentAlpha1 / PTsingles;
                normRAfactorialMomentBeta2 = RAfactorialMomentBeta2 / PTsingles;
                normAfactorialMoment0 = AfactorialMoment0 / RTsingles;
                normAfactorialMoment1 = AfactorialMoment1 / RTsingles;
                normAfactorialMoment2 = AfactorialMoment2 / RTsingles;
                normAfactorialMoment3 = AfactorialMoment3 / RTsingles;
                normAfactorialMomentAlpha1 = AfactorialMomentAlpha1 / RTsingles;
                normAfactorialMomentBeta2 = AfactorialMomentBeta2 / RTsingles;
            }

            double RTdoubles = (0.5 / (multiplicityGateWidth * this.ticSizeInSeconds)) * ((2.0 * normAfactorialMoment2) - Math.Pow(normAfactorialMoment1, 2.0));
            double RTtriples = (0.16667 / (multiplicityGateWidth * this.ticSizeInSeconds))
                               * ((6.0 * normAfactorialMoment3) - (6.0 * normAfactorialMoment1 * normAfactorialMoment2) + (2.0 * Math.Pow(normAfactorialMoment1, 3)));

            double PTdoubles = PTsingles * (normRAfactorialMoment1 - normAfactorialMoment1);
            double PTtriples;
            double PTtriplesDTcoef;
            if (AfactorialMoment0 != 0.0)
            {
                PTtriples = PTsingles * ((normRAfactorialMoment2 - normAfactorialMoment2)
                                       - ((normAfactorialMoment1 / normAfactorialMoment0)
                                          * (normRAfactorialMoment1 - normAfactorialMoment1)));
                PTtriplesDTcoef = PTsingles * ((normRAfactorialMomentBeta2 - normAfactorialMomentBeta2)
                                               - ((normAfactorialMomentAlpha1 / normAfactorialMoment0)
                                                  * (normRAfactorialMomentAlpha1 - normAfactorialMomentAlpha1)));
            }
            else
            {
                PTtriples = 0.0;
                PTtriplesDTcoef = 0.0;
            }

            if (totalMeasurementTime > 1E-12)
            {
                PTsingles /= totalMeasurementTime;
                PTdoubles /= totalMeasurementTime;
                PTtriples /= totalMeasurementTime;
                PTtriplesDTcoef /= totalMeasurementTime;
            }
            else
            {
                PTsingles = 0.0;
                PTdoubles = 0.0;
                PTtriples = 0.0;
                PTtriplesDTcoef = 0.0;
            }

            //store the SDT rates
            result.singlesRatePerSecond = PTsingles;
            result.doublesRatePerSecond = PTdoubles;
            result.triplesRatePerSecond = PTtriples;

            //now that rates are calculated, calculate the dead-time corrections

            // dead time correction coefficients for RT rates (INCC as well as H&C)
            // determined experimentally using D/T ratio - see analysisComparison/triplesDeadTime.xls
            // the best fit (poly3) used to reproduce the trend
            // note: valid only for sources in the range of ~100n/s - ~500,000 n/s
            /** NOT USED ***
            double DTcoeffT0_RT = 3.42854465;
            double DTcoeffT1_RT = 3.35351651E-6;
            double DTcoeffT2_RT = -5.83706327E-12;
            double DTcoeffT3_RT = 2.03604973E-17;
             ***************/

            // dead time correction coefficients for PT rates with background calculated using H&C consecutive gates
            // determined experimentally using D/T ratio - see analysisComparison/triplesDeadTime.xls
            // the best fit (poly3) used to reproduce the trend
            // note: valid only for sources in the range of ~100n/s - ~500,000 n/s
            /** NOT USED ***
            double DTcoeffT0_PT = 2.78760077;
            double DTcoeffT1_PT = 2.86078894E-6;
            double DTcoeffT2_PT = -8.21994836E-12;
            double DTcoeffT3_PT = 9.45195862E-17;
             ***************/

            /** NOT USED ***
            double DTcoeffA_RT = 0.2063;  // these values were determined using two source method
            double DTcoeffB_RT = 0.04256;
             ***************/

            double DTcoeffA = deadTimeCoeffAinMicroSecs;
            double DTcoeffB = deadTimeCoeffBinPicoSecs;
            double DTcoeffC = deadTimeCoeffCinNanoSecs;
            double DTcoeffT = deadTimeCoeffTinNanoSecs;

            double exponent = ((DTcoeffA / 1E6) + ((DTcoeffB / 1E12) * PTsingles)) * PTsingles;
            double PTsinglesDTcorr = PTsingles * Math.Exp(exponent / 4.0);
            double PTdoublesDTcorr = PTdoubles * Math.Exp(exponent);
            double PTtriplesDTcorr = PTtriplesDTcoef * Math.Exp((DTcoeffT / 1E9) * PTsingles);

            /** NOT USED ***
            double RTsinglesDTcorr = RTsingles * Math.Exp(( ((DTcoeffA/1E6) + ((DTcoeffB/1E12)*RTsingles)) *RTsingles)/4.0);
            double RTdoublesDTcorr = RTdoubles * Math.Exp( ((DTcoeffA_RT/1E6) + ((DTcoeffB_RT/1E12)*RTsingles)) *RTsingles);
            double RTtriplesDTcorr = RTtriples* (( (DTcoeffT3_RT*Math.Pow(RTsingles,3)) 
                                                   + (DTcoeffT2_RT*Math.Pow(RTsingles,2)) 
                                                   + (DTcoeffT1_RT*RTsingles)
                                                   + DTcoeffT0_RT)
                                                 /DTcoeffT0_RT); 
             ***************/

            //store the dead-time corrected values
            result.deadTimeCorrectedSinglesRate = PTsinglesDTcorr;
            result.deadTimeCorrectedDoublesRate = PTdoublesDTcorr;
            result.deadTimeCorrectedTriplesRate = PTtriplesDTcorr;

            //Calculate the Dytlewski Dead-Time-Corrected Rates, based upon
            //N. Dytlewski, Dead-time corrections for multiplicity counters,
            //Nuclear Instruments and Methods in Physics Research A305(1991)492-494
            double P03, P02, P13, P12, P23;
            P03 = Math.Pow((1.0 - (2.0 * phi)), 3.0);
            P02 = Math.Pow((1.0 - phi), 2.0);
            P13 = (2.0 * Math.Pow((1.0 - phi), 3.0)) - (2.0 * P03);
            P12 = 1.0 - P02;
            P23 = 1.0 + P03 - (2.0 * Math.Pow((1.0 - phi), 3.0));
            result.dytlewskiDeadTimeCorrectedTriplesRate = PTtriples / P03;
            //Martyn made me do it. hn 2.6.2015
            result.deadTimeCorrectedTriplesRate = result.dytlewskiDeadTimeCorrectedTriplesRate;
            result.dytlewskiDeadTimeCorrectedDoublesRate = (PTdoubles / P02) + ((PTtriples * P13) / (P02 * P03));
            result.dytlewskiDeadTimeCorrectedSinglesRate = PTsingles + ((P12 * PTdoubles) / P02) + (PTtriples * (((P12 * P13) / (P02 * P03)) - (P23 / P03)));

            return (result);
        }