Пример #1
0
        internal void TrainHMMScaled(List <T[]> trainingData)
        {
            bool iterating = true;

            while (iterating)
            {
                double[] tempPI = new double[initialStateDistribution.Length];
                double[,] tempA    = new double[stateTransitionProbabilities.GetLength(0), stateTransitionProbabilities.GetLength(1)];
                double[,] tempANum = new double[stateTransitionProbabilities.GetLength(0), stateTransitionProbabilities.GetLength(1)];
                //double[,] tempADen = new double[stateTransitionProbabilities.GetLength(0), stateTransitionProbabilities.GetLength(1)];
                double[] tempADen = new double[stateTransitionProbabilities.GetLength(0)];
                double[,] tempB    = new double[symbolDistributionDiscrete.GetLength(0), symbolDistributionDiscrete.GetLength(1)];
                double[,] tempBNum = new double[symbolDistributionDiscrete.GetLength(0), symbolDistributionDiscrete.GetLength(1)];
                double[,] tempBDen = new double[symbolDistributionDiscrete.GetLength(0), symbolDistributionDiscrete.GetLength(1)];
                double[,] alpha;
                double[,] beta;
                double[]           c;
                DiscreteHMM <T, U> newHMM;

                for (int k = 0; k < trainingData.Count; k++)
                {
                    double[, ,] xi  = new double[trainingData[k].Length - 1, states.Length, states.Length];
                    double[,] gamma = new double[trainingData[k].Length, states.Length];
                    ForwardAndBackwardVariablesScaled(trainingData[k], out alpha, out beta, out c);
                    double totalProb = FastObservationSequenceProbability(trainingData[k]);


                    //Compute xi and gamma
                    for (int t = 0; t < trainingData[k].Length; t++)
                    {
                        for (int i = 0; i < states.Length; i++)
                        {
                            if (t < trainingData[k].Length - 1)
                            {
                                for (int j = 0; j < states.Length; j++)
                                {
                                    xi[t, i, j] = (alpha[t, i] * stateTransitionProbabilities[i, j] * SymbolProbability(trainingData[k][t + 1], states[j]) * beta[t + 1, j]);
                                }
                            }
                            gamma[t, i] = (alpha[t, i] * beta[t, i]) / c[t];
                        }
                    }

                    for (int i = 0; i < states.Length; i++)
                    {
                        tempPI[i] += gamma[0, i];

                        for (int j = 0; j < states.Length; j++)
                        {
                            double aNum = 0;

                            for (int t = 0; t < trainingData[k].Length - 1; t++)
                            {
                                aNum += xi[t, i, j];
                            }
                            tempANum[i, j] += aNum;
                        }

                        double aDen = 0;
                        for (int t = 0; t < trainingData[k].Length - 1; t++)
                        {
                            aDen += gamma[t, i];
                        }
                        tempADen[i] += aDen;
                    }

                    for (int j = 0; j < states.Length; j++)
                    {
                        for (int v = 0; v < symbols.Length; v++)
                        {
                            double bNum = 0.0;
                            double bDen = 0.0;

                            for (int t = 0; t < trainingData[k].Length; t++)
                            {
                                if (trainingData[k][t].Equals(symbols[v]))
                                {
                                    bNum += gamma[t, j];
                                }
                                bDen += gamma[t, j];
                            }

                            tempBNum[j, v] += bNum;
                            tempBDen[j, v] += bDen;
                        }
                    }
                }

                //Calculate the final reestimated values
                for (int i = 0; i < states.Length; i++)
                {
                    tempPI[i] = tempPI[i] / (double)trainingData.Count; //this isn't in Rabiner's tutorial; however, I think it is needed to reestimate ergodic models from multiple sequences

                    for (int j = 0; j < states.Length; j++)
                    {
                        tempA[i, j] = tempANum[i, j] / tempADen[i];
                    }

                    for (int v = 0; v < symbols.Length; v++)
                    {
                        tempB[i, v] = tempBNum[i, v] / tempBDen[i, v];
                    }
                }

                newHMM = new DiscreteHMM <T, U>(symbols, states, tempA, tempPI, tempB);
                double pOld = 0.0;
                double pNew = 0.0;
                for (int k = 0; k < trainingData.Count; k++)
                {
                    pOld += LogFastObservationSequenceProbability(trainingData[k]);         //If you want to multiply variables, you add the logs... DUH!
                    pNew += newHMM.LogFastObservationSequenceProbability(trainingData[k]);
                }

                if (pNew > pOld + .00000001) //Add some error margin so this won't loop forever
                {
                    Array.Copy(tempPI, initialStateDistribution, tempPI.Length);
                    Array.Copy(tempA, stateTransitionProbabilities, tempA.Length);
                    Array.Copy(tempB, symbolDistributionDiscrete, tempB.Length);
                }
                else
                {
                    iterating = false;
                }
            }
        }
Пример #2
0
        internal void TrainGesture(List <List <KinectSkeleton> > trainingData, JointType joint)
        {
            List <List <Point3D> > normalizedTrainingData = new List <List <Point3D> >();

            //Normalize all the training data.  We average the data per skeleton sequence in case it was trained with multiple people
            for (int i = 0; i < trainingData.Count; i++)
            {
                //Find the average shoulder width
                double tempShoulderWidth = 0.0;
                int    n = 0;

                for (int j = 0; j < trainingData[i].Count; j++)
                {
                    if (trainingData[i][j].skeleton[JointType.ShoulderRight].TrackingState == TrackingState.Tracked && trainingData[i][j].skeleton[JointType.ShoulderLeft].TrackingState == TrackingState.Tracked)
                    {
                        //Calculate the distance between the shoulders
                        double temp = (trainingData[i][j].skeleton[JointType.ShoulderRight].Position - trainingData[i][j].skeleton[JointType.ShoulderLeft].Position).Length;

                        tempShoulderWidth += (temp - tempShoulderWidth) / (double)(n + 1);
                        n++;
                    }
                }

                //Normalize this skeleton sequence
                normalizedTrainingData.Add(new List <Point3D>());
                for (int j = 0; j < trainingData[i].Count; j++)
                {
                    normalizedTrainingData[i].Add(GetNormalizedRelativePosition(trainingData[i][j].skeleton, joint, tempShoulderWidth));
                }
            }

            //Find the K-mean centroids
            List <Point3D> combinedData = new List <Point3D>();

            for (int i = 0; i < normalizedTrainingData.Count; i++)
            {
                combinedData.AddRange(normalizedTrainingData[i]);
            }
            List <Point3D> centroids = KMeans.FindKMeanCentroids(combinedData, symbols.Length);

            //Convert the training data to lists of cluster numbers
            List <int[]> clusteredTrainingData = new List <int[]>();

            for (int i = 0; i < normalizedTrainingData.Count; i++)
            {
                clusteredTrainingData.Add(new int[normalizedTrainingData[i].Count]);

                for (int j = 0; j < clusteredTrainingData[i].Length; j++)
                {
                    clusteredTrainingData[i][j] = KMeans.FindNearestCluster(centroids, normalizedTrainingData[i][j]);
                }
            }

            //Train the HMM on the clustered data
            hmm.TrainHMMScaled(clusteredTrainingData);
            kCentroids = centroids;

            //Test the trained HMM against the training data to determine what the threshold should be
            double aveProb = 0.0;

            for (int i = 0; i < clusteredTrainingData.Count; i++)
            {
                double logProb = hmm.LogFastObservationSequenceProbability(clusteredTrainingData[i]);
                aveProb += (logProb - aveProb) / (double)(i + 1);
            }
            rawLogThreshold = aveProb * thresholdScalar; //Set the baseline threshold to twice the average probility
            //Note: the probabilities logs of small numbers, so they are negative values (~-25 normally)
            //That means that a higher multiplier will make it more sensitive, a lower number less sensitive


            //Determine the length of sequence to keep
            double averageSequenceLength = 0;
            double moment2 = 0.0;

            for (int i = 0; i < trainingData.Count; i++)
            {
                double delta = (double)trainingData[i].Count - averageSequenceLength;
                averageSequenceLength += delta / (double)(i + 1);
                moment2 += delta * ((double)trainingData[i].Count - averageSequenceLength);
            }
            double stdDev = Math.Sqrt(moment2 / (double)(trainingData.Count - 1));

            sequenceLength = (int)Math.Floor(averageSequenceLength);
            if (double.IsNaN(stdDev))
            {
                sequenceLength = (int)Math.Ceiling(averageSequenceLength);
            }
            else
            {
                sequenceLength = (int)Math.Ceiling(averageSequenceLength + 2 * stdDev);
            }
        }
Пример #3
0
        internal void TrainHMM(T[] trainingData)
        {
            bool iterating = true;

            double[] tempPI = new double[initialStateDistribution.Length];
            double[,] tempA = new double[stateTransitionProbabilities.GetLength(0), stateTransitionProbabilities.GetLength(1)];
            double[,] tempB = new double[symbolDistributionDiscrete.GetLength(0), symbolDistributionDiscrete.GetLength(1)];
            double[,] alpha;
            double[,] beta;
            double[, ,] xi  = new double[trainingData.Length - 1, states.Length, states.Length];
            double[,] gamma = new double[trainingData.Length, states.Length];
            DiscreteHMM <T, U> newHMM;

            while (iterating)
            {
                alpha = ForwardVariables(trainingData);
                beta  = BackwardVariables(trainingData);
                double totalProb = FastObservationSequenceProbability(trainingData);

                //Compute xi and gamma
                for (int t = 0; t < trainingData.Length; t++)
                {
                    for (int i = 0; i < states.Length; i++)
                    {
                        if (t < trainingData.Length - 1)
                        {
                            for (int j = 0; j < states.Length; j++)
                            {
                                xi[t, i, j] = (alpha[t, i] * stateTransitionProbabilities[i, j] * SymbolProbability(trainingData[t + 1], states[j]) * beta[t + 1, j]) / totalProb;
                            }
                        }
                        gamma[t, i] = (alpha[t, i] * beta[t, i]) / totalProb;
                    }
                }

                for (int i = 0; i < states.Length; i++)
                {
                    tempPI[i] = gamma[0, i];

                    for (int j = 0; j < states.Length; j++)
                    {
                        double aNum = 0;
                        double aDen = 0;
                        for (int t = 0; t < trainingData.Length - 1; t++)
                        {
                            aNum += xi[t, i, j];
                            aDen += gamma[t, i];
                        }
                        tempA[i, j] = aNum / aDen;
                    }
                }

                for (int j = 0; j < states.Length; j++)
                {
                    for (int k = 0; k < symbols.Length; k++)
                    {
                        double bNum = 0.0;
                        double bDen = 0.0;

                        for (int t = 0; t < trainingData.Length; t++)
                        {
                            if (trainingData[t].Equals(symbols[k]))
                            {
                                bNum += gamma[t, j];
                            }
                            bDen += gamma[t, j];
                        }

                        tempB[j, k] = bNum / bDen;
                    }
                }


                newHMM = new DiscreteHMM <T, U>(symbols, states, tempA, tempPI, tempB);
                if (newHMM.LogFastObservationSequenceProbability(trainingData) > LogFastObservationSequenceProbability(trainingData))
                {
                    Array.Copy(tempPI, initialStateDistribution, tempPI.Length);
                    Array.Copy(tempA, stateTransitionProbabilities, tempA.Length);
                    Array.Copy(tempB, symbolDistributionDiscrete, tempB.Length);
                }
                else
                {
                    iterating = false;
                }
            }
        }