示例#1
0
        /// <summary>
        /// Calculates the KL-divergence between this Dirichlet random variable and another
        /// </summary>
        /// <param name="other">The other Dirichlet distribution.</param>
        /// <param name="beta">The two dimensional array of cross-counts.</param>
        /// <returns>The KL divergence</returns>
        public double KLDivergence(Dirichlet other, double[][] beta)
        {
            _2_gammafamily g  = new _2_gammafamily();
            double         kl = this.InformationEntropy();

            for (int i = 0; i < other.Alpha.Length; i++)
            {
                kl += g.Gammaln(other.Alpha[i]);
            }

            kl -= g.Gammaln(other.SumAlpha);

            double crossTerm;

            for (int i = 0; i < this.Alpha.Length; i++)
            {
                crossTerm = 0;
                for (int j = 0; j < other.Alpha.Length; j++)
                {
                    crossTerm -= (beta[i][j] - 1) * (g.Digamma(beta[i][j]) - g.Digamma(other.Alpha[j]));
                }

                kl -= crossTerm * this.Alpha[i] / this.SumAlpha;
            }

            return(kl);
        }
示例#2
0
        /// <summary>
        /// Based on the formula for total information entropy given here - https://en.wikipedia.org/wiki/Dirichlet_distribution.
        /// </summary>
        /// <param name="alpha">The parameters of the Dirichlet distribution. These correspond to a histogram with counts.</param>
        /// <returns>The Entropy of a Dirichlet distribution.</returns>
        public double InformationEntropy(double[] alpha)
        {
            _2_gammafamily g = new _2_gammafamily();
            double         alpha_0 = 0, H = 0;//The sum of coefficients (normalizing factor) and final entropy term respectively.
            int            K = alpha.Length;

            for (int i = 0; i < K; i++)
            {
                alpha[i] += regularizer;                          //Before doing anything else, we regularize the parameters which is equivalent to a uniform prior.
                alpha_0  += alpha[i];
                H        += g.Gammaln(alpha[i]);                  //Positive part of normalization constant (which is the log of a multivariate beta distribution).
                H        -= (alpha[i] - 1) * g.Digamma(alpha[i]); //The contribution from each of the alphas.
            }
            H -= g.Gammaln(alpha_0);                              //Negative part of normalization constant.
            H += (alpha_0 - K) * g.Digamma(alpha_0);              //The contribution from the normalizing factor.
            return(H);
        }