Ejemplo n.º 1
0
        internal void TrainHMMScaled(List <T[]> trainingData)
        {
            bool iterating = true;

            while (iterating)
            {
                double[] tempPI = new double[initialStateDistribution.Length];
                double[,] tempA    = new double[stateTransitionProbabilities.GetLength(0), stateTransitionProbabilities.GetLength(1)];
                double[,] tempANum = new double[stateTransitionProbabilities.GetLength(0), stateTransitionProbabilities.GetLength(1)];
                //double[,] tempADen = new double[stateTransitionProbabilities.GetLength(0), stateTransitionProbabilities.GetLength(1)];
                double[] tempADen = new double[stateTransitionProbabilities.GetLength(0)];
                double[,] tempB    = new double[symbolDistributionDiscrete.GetLength(0), symbolDistributionDiscrete.GetLength(1)];
                double[,] tempBNum = new double[symbolDistributionDiscrete.GetLength(0), symbolDistributionDiscrete.GetLength(1)];
                double[,] tempBDen = new double[symbolDistributionDiscrete.GetLength(0), symbolDistributionDiscrete.GetLength(1)];
                double[,] alpha;
                double[,] beta;
                double[]           c;
                DiscreteHMM <T, U> newHMM;

                for (int k = 0; k < trainingData.Count; k++)
                {
                    double[, ,] xi  = new double[trainingData[k].Length - 1, states.Length, states.Length];
                    double[,] gamma = new double[trainingData[k].Length, states.Length];
                    ForwardAndBackwardVariablesScaled(trainingData[k], out alpha, out beta, out c);
                    double totalProb = FastObservationSequenceProbability(trainingData[k]);


                    //Compute xi and gamma
                    for (int t = 0; t < trainingData[k].Length; t++)
                    {
                        for (int i = 0; i < states.Length; i++)
                        {
                            if (t < trainingData[k].Length - 1)
                            {
                                for (int j = 0; j < states.Length; j++)
                                {
                                    xi[t, i, j] = (alpha[t, i] * stateTransitionProbabilities[i, j] * SymbolProbability(trainingData[k][t + 1], states[j]) * beta[t + 1, j]);
                                }
                            }
                            gamma[t, i] = (alpha[t, i] * beta[t, i]) / c[t];
                        }
                    }

                    for (int i = 0; i < states.Length; i++)
                    {
                        tempPI[i] += gamma[0, i];

                        for (int j = 0; j < states.Length; j++)
                        {
                            double aNum = 0;

                            for (int t = 0; t < trainingData[k].Length - 1; t++)
                            {
                                aNum += xi[t, i, j];
                            }
                            tempANum[i, j] += aNum;
                        }

                        double aDen = 0;
                        for (int t = 0; t < trainingData[k].Length - 1; t++)
                        {
                            aDen += gamma[t, i];
                        }
                        tempADen[i] += aDen;
                    }

                    for (int j = 0; j < states.Length; j++)
                    {
                        for (int v = 0; v < symbols.Length; v++)
                        {
                            double bNum = 0.0;
                            double bDen = 0.0;

                            for (int t = 0; t < trainingData[k].Length; t++)
                            {
                                if (trainingData[k][t].Equals(symbols[v]))
                                {
                                    bNum += gamma[t, j];
                                }
                                bDen += gamma[t, j];
                            }

                            tempBNum[j, v] += bNum;
                            tempBDen[j, v] += bDen;
                        }
                    }
                }

                //Calculate the final reestimated values
                for (int i = 0; i < states.Length; i++)
                {
                    tempPI[i] = tempPI[i] / (double)trainingData.Count; //this isn't in Rabiner's tutorial; however, I think it is needed to reestimate ergodic models from multiple sequences

                    for (int j = 0; j < states.Length; j++)
                    {
                        tempA[i, j] = tempANum[i, j] / tempADen[i];
                    }

                    for (int v = 0; v < symbols.Length; v++)
                    {
                        tempB[i, v] = tempBNum[i, v] / tempBDen[i, v];
                    }
                }

                newHMM = new DiscreteHMM <T, U>(symbols, states, tempA, tempPI, tempB);
                double pOld = 0.0;
                double pNew = 0.0;
                for (int k = 0; k < trainingData.Count; k++)
                {
                    pOld += LogFastObservationSequenceProbability(trainingData[k]);         //If you want to multiply variables, you add the logs... DUH!
                    pNew += newHMM.LogFastObservationSequenceProbability(trainingData[k]);
                }

                if (pNew > pOld + .00000001) //Add some error margin so this won't loop forever
                {
                    Array.Copy(tempPI, initialStateDistribution, tempPI.Length);
                    Array.Copy(tempA, stateTransitionProbabilities, tempA.Length);
                    Array.Copy(tempB, symbolDistributionDiscrete, tempB.Length);
                }
                else
                {
                    iterating = false;
                }
            }
        }
Ejemplo n.º 2
0
 internal GestureRecognizer(HMMModel model)
 {
     hmm = new DiscreteHMM <int, int>(model, symbols, states);
 }
Ejemplo n.º 3
0
        internal void TrainHMM(T[] trainingData)
        {
            bool iterating = true;

            double[] tempPI = new double[initialStateDistribution.Length];
            double[,] tempA = new double[stateTransitionProbabilities.GetLength(0), stateTransitionProbabilities.GetLength(1)];
            double[,] tempB = new double[symbolDistributionDiscrete.GetLength(0), symbolDistributionDiscrete.GetLength(1)];
            double[,] alpha;
            double[,] beta;
            double[, ,] xi  = new double[trainingData.Length - 1, states.Length, states.Length];
            double[,] gamma = new double[trainingData.Length, states.Length];
            DiscreteHMM <T, U> newHMM;

            while (iterating)
            {
                alpha = ForwardVariables(trainingData);
                beta  = BackwardVariables(trainingData);
                double totalProb = FastObservationSequenceProbability(trainingData);

                //Compute xi and gamma
                for (int t = 0; t < trainingData.Length; t++)
                {
                    for (int i = 0; i < states.Length; i++)
                    {
                        if (t < trainingData.Length - 1)
                        {
                            for (int j = 0; j < states.Length; j++)
                            {
                                xi[t, i, j] = (alpha[t, i] * stateTransitionProbabilities[i, j] * SymbolProbability(trainingData[t + 1], states[j]) * beta[t + 1, j]) / totalProb;
                            }
                        }
                        gamma[t, i] = (alpha[t, i] * beta[t, i]) / totalProb;
                    }
                }

                for (int i = 0; i < states.Length; i++)
                {
                    tempPI[i] = gamma[0, i];

                    for (int j = 0; j < states.Length; j++)
                    {
                        double aNum = 0;
                        double aDen = 0;
                        for (int t = 0; t < trainingData.Length - 1; t++)
                        {
                            aNum += xi[t, i, j];
                            aDen += gamma[t, i];
                        }
                        tempA[i, j] = aNum / aDen;
                    }
                }

                for (int j = 0; j < states.Length; j++)
                {
                    for (int k = 0; k < symbols.Length; k++)
                    {
                        double bNum = 0.0;
                        double bDen = 0.0;

                        for (int t = 0; t < trainingData.Length; t++)
                        {
                            if (trainingData[t].Equals(symbols[k]))
                            {
                                bNum += gamma[t, j];
                            }
                            bDen += gamma[t, j];
                        }

                        tempB[j, k] = bNum / bDen;
                    }
                }


                newHMM = new DiscreteHMM <T, U>(symbols, states, tempA, tempPI, tempB);
                if (newHMM.LogFastObservationSequenceProbability(trainingData) > LogFastObservationSequenceProbability(trainingData))
                {
                    Array.Copy(tempPI, initialStateDistribution, tempPI.Length);
                    Array.Copy(tempA, stateTransitionProbabilities, tempA.Length);
                    Array.Copy(tempB, symbolDistributionDiscrete, tempB.Length);
                }
                else
                {
                    iterating = false;
                }
            }
        }