Example #1
0
 /// <summary>Extracts a cross validation set from a given set</summary>
 /// <param name="Set">Set to extract cross validation from</param>
 /// <param name="CrossValidationSetPercent">Percentage of elements to extract</param>
 public static TrainingSet GetCrossValidationSet(TrainingSet Set, float CrossValidationSetPercent)
 {
     TrainingSet CrossValidationSet = new TrainingSet();
     int nCrossSet = (int)(CrossValidationSetPercent * (float)Set.getN);
     Random rnd = new Random();
     for (int i = 0; i < nCrossSet; i++)
     {
         int ind = rnd.Next(0, Set.trainingArray.Count - 1);
         TrainingUnit u = Set.trainingArray[ind];
         Set.trainingArray.Remove(u);
         CrossValidationSet.addTrainingUnit(u);
     }
     return CrossValidationSet;
 }
        /// <summary>Trains current SVM with cross-validation, adjusting kernel parameter lambda and box parameter C.
        /// Returns best achieved efficiency.</summary>
        /// <param name="CrossValidationSet">Cross validation set</param>
        /// <param name="LambdaSet">Lambda set</param>
        /// <param name="CSet">C values set</param>
        public float TrainWithCrossValidation(TrainingSet CrossValidationSet, float[] LambdaSet, float[] CSet)
        {
            foreach (float _lambda in LambdaSet)
            {
                for (int i = 0; i < SVMs.Count; i++)
                {
                    SVMs[i].ProblemCfg.lambda = _lambda;
                }

                foreach (float _c in CSet)
                {
                    for (int i = 0; i < SVMs.Count; i++)
                    {
                        SVMs[i].ProblemCfg.c = _c;
                        SVMs[i].Train();
                    }

                    float performance = this.GetHitRate(CrossValidationSet);

                    if (CrossValParams == null)
                    {
                        CrossValParams = new float[3];
                    }

                    if (performance > CrossValParams[0])
                    {
                        CrossValParams[0] = performance;
                        CrossValParams[1] = _lambda;
                        CrossValParams[2] = _c;
                    }
                }
            }

            //Train with best parameters
            for (int i = 0; i < SVMs.Count; i++)
            {
                SVMs[i].ProblemCfg.lambda = CrossValParams[1];
                SVMs[i].ProblemCfg.c      = CrossValParams[2];
                SVMs[i].Train();
            }

            return(CrossValParams[0]);
        }
            /// <summary>Computes the i-th line of matrix K[i][j]</summary>
            /// <param name="problemSolution">SVM to solve</param>
            /// <param name="i">Kernel line number to compute</param>
            private static void ComputeKernels(SVM problemSolution, int i)
            {
                if (problemSolution.TrainingSet.IsKernelCalculated[i])
                {
                    return;
                }
                TrainingSet trainingSet = problemSolution.TrainingSet;

                problemSolution.TrainingSet.kernels[i] = new float[problemSolution.TrainingSet.getN];

                trainingSet.kernels[i] = new float[trainingSet.getN];
                problemSolution.TrainingSet.IsKernelCalculated[i] = true;

                for (int j = 0; j < trainingSet.getN; j++)
                {
                    trainingSet.kernels[i][j] = calculateSingleKernel(trainingSet.trainingArray[i], trainingSet.trainingArray[j], problemSolution);
                    //trainingSet.kernels[j][i] = trainingSet.kernels[i][j];
                }
            }
Example #4
0
        /// <summary>Computes All kernels and errors accelerating with OpenCL</summary>
        /// <param name="problemSolution">Problem solution SVM</param>
        public static void CLcalculateAllKernels(SVM problemSolution)
        {
            TrainingSet   trainingSet   = problemSolution.TrainingSet;
            ProblemConfig problemConfig = problemSolution.ProblemCfg;


            trainingSet.errors             = new float[trainingSet.getN];
            trainingSet.kernels            = new float[trainingSet.getN][];
            trainingSet.IsKernelCalculated = new bool[trainingSet.getN];

            // Caching kernels
            for (int i = 0; i < trainingSet.getN; i++)
            {
                if (problemSolution.alphaList[i] != 0)
                {
                    CLComputeKernels(problemSolution, i);
                }
            }
        }
Example #5
0
        private static void CLupdateErrorsCache(TrainingSet trainingSet, SVM svm,
                                                float oldAlphai, float newAlphai, int iIndex,
                                                float oldAlphaj, float newAlphaj, int jIndex,
                                                float oldB, float newB)
        {
            float alphaiDif = newAlphai - oldAlphai;
            float alphajDif = newAlphaj - oldAlphaj;
            float BDif      = newB - oldB;

            if (trainingSet.trainingArray[iIndex].y < 0)
            {
                alphaiDif = -alphaiDif;
            }
            if (trainingSet.trainingArray[jIndex].y < 0)
            {
                alphajDif = -alphajDif;
            }

            lock (CLResource)
            {
                //Writes kernel values
                if (svm.CLKi == null || svm.CLKi.OriginalVarLength != svm.TrainingSet.errors.Length)
                {
                    svm.CLKi = new CLCalc.Program.Variable(svm.TrainingSet.kernels[iIndex]);
                    svm.CLKj = new CLCalc.Program.Variable(svm.TrainingSet.kernels[jIndex]);
                }
                else
                {
                    svm.CLKi.WriteToDevice(svm.TrainingSet.kernels[iIndex]);
                    svm.CLKj.WriteToDevice(svm.TrainingSet.kernels[jIndex]);
                }
                float[] p = new float[3] {
                    alphaiDif, BDif, alphajDif
                };
                svm.CLUpdtErrParams.WriteToDevice(p);

                //Executes update using GPU
                kernelUpdateErr.Execute(new CLCalc.Program.Variable[] { svm.CLerr, svm.CLKi, svm.CLKj, svm.CLUpdtErrParams }, svm.TrainingSet.getN);

                svm.CLerr.ReadFromDeviceTo(svm.TrainingSet.errors);
            }
        }
            private static float calculateFx(int indexX, SVM currentSolution)
            {
                TrainingSet   trainingSet   = currentSolution.TrainingSet;
                ProblemConfig problemConfig = currentSolution.ProblemCfg;

                float sum = 0;

                for (int i = 0; i < trainingSet.getN; i++)
                {
                    if (trainingSet.trainingArray[i].y > 0)
                    {
                        sum += currentSolution.alphaList[i] * trainingSet.kernels[i][indexX];
                    }
                    else
                    {
                        sum -= currentSolution.alphaList[i] * trainingSet.kernels[i][indexX];
                    }
                }
                return(sum + currentSolution.b);
            }
Example #7
0
        /// <summary>Loads and classifies dataset</summary>
        private void LoadMITFaceClassifier()
        {
            /*
             *
             * CBCL Face Database #1
             * MIT Center For Biological and Computation Learning
             *
             */
            string p         = System.Windows.Forms.Application.StartupPath;
            string fileTrain = p + "\\svm.train.normgrey";
            string fileTest  = p + "\\svm.test.normgrey";


            tSet = new TrainingSet();

            //Fills both, we're not testing the results
            FillTrainingSet(fileTrain, tSet);
            FillTrainingSet(fileTest, tSet);

            SVM = new MultiClassSVM(tSet);
        }
        /// <summary>Trains current SVM with cross-validation, adjusting kernel parameter lambda and box parameter C.
        /// Returns best achieved efficiency.</summary>
        /// <param name="CrossValidationSet">Cross validation set</param>
        public float TrainWithCrossValidation(TrainingSet CrossValidationSet)
        {
            Random rnd = new Random();

            float[] lambdaSet = new float[12];
            //lambdaSet[0] = 3E-9f * ((float)rnd.NextDouble() + 1);
            lambdaSet[0] = 3E-3f * ((float)rnd.NextDouble() + 1);
            for (int i = 1; i < lambdaSet.Length; i++)
            {
                lambdaSet[i] = 4.5f * lambdaSet[i - 1];
            }

            float[] cSet = new float[13];
            cSet[0] = 1E-5f * ((float)rnd.NextDouble() + 1);
            for (int i = 1; i < cSet.Length; i++)
            {
                cSet[i] = 2.0f * cSet[i - 1];
            }

            return(TrainWithCrossValidation(CrossValidationSet, lambdaSet, cSet));
        }
Example #9
0
        /// <summary>Adds a new self training example</summary>
        public void AddSelfTraining(int[] sbFrames, int faceIndex, Bitmap bmp)
        {
            if (SelfTSet == null)
            {
                SelfTSet = new TrainingSet();
            }
            float[] subF = new float[(sbFrames.Length / 3) * 364];

            ExtractFeatures(sbFrames, subF, bmp);

            for (int i = 0; i < sbFrames.Length / 3; i++)
            {
                float[] x = new float[364];

                for (int k = 0; k < 364; k++)
                {
                    x[k] = subF[k + i * 364];
                }

                TrainingUnit tu = new TrainingUnit(x, i == faceIndex ? 1.0f : -1.0f);
                SelfTSet.addTrainingUnit(tu);
            }
        }
            /// <summary>
            /// Predicts the output of a single entry, given a previous problem, solution and correspondent training set
            /// </summary>
            /// <param name="problemSolution">Correspondent problem solution</param>
            /// <param name="untrainedUnit">Input features from which the output will be predicted</param>
            /// <returns>The y classification (true/false = positive/negative)</returns>
            public static float predictOutput(SVM problemSolution, TrainingUnit untrainedUnit)
            {
                TrainingSet   trainingSet   = problemSolution.TrainingSet;
                ProblemConfig problemConfig = problemSolution.ProblemCfg;

                // F(x) = sum + b
                // sum = summation of alpha_i * y_i * kernel(untrained unit, i) for all i in the training set
                float sum = 0;

                for (int i = 0; i < trainingSet.getN; i++)
                {
                    if (trainingSet.trainingArray[i].y > 0)
                    {
                        sum += problemSolution.alphaList[i] * calculateSingleKernel(trainingSet.trainingArray[i], untrainedUnit, problemSolution);
                    }
                    else
                    {
                        sum -= problemSolution.alphaList[i] * calculateSingleKernel(trainingSet.trainingArray[i], untrainedUnit, problemSolution);
                    }
                }

                return(sum + problemSolution.b);
            }
        /// <summary>Creates a new multiclass SVM using desired outputs from training set. Classifications -1.0f are negative for all sets</summary>
        /// <param name="TSet">Training set</param>
        /// <param name="SVMCfg">Configuration parameters</param>
        /// <param name="PreCalibrate">Precalibrate RBF parameter lambda? This will ignore the given value</param>
        private void initMultiSVM(TrainingSet TSet, ProblemConfig SVMCfg, bool PreCalibrate)
        {
            //Determines how many different classifications are there
            Classifications = new List <float>();
            foreach (TrainingUnit tu in TSet.trainingArray)
            {
                if (Classifications.IndexOf(tu.y) < 0 && tu.y != -1.0f)
                {
                    Classifications.Add(tu.y);
                }
            }

            //For each different possible classification, create a different SVM
            SVMs = new List <SVM>();
            foreach (float c in Classifications)
            {
                SVM svm = new SVM();
                svm.TrainingSet = new TrainingSet();
                svm.ProblemCfg  = SVMCfg.Clone();
                SVMs.Add(svm);

                foreach (TrainingUnit tu in TSet.trainingArray)
                {
                    TrainingUnit newTu = tu.Clone();
                    newTu.y = tu.y == c ? 1 : -1;
                    svm.TrainingSet.addTrainingUnit(newTu);
                }

                //Train svm
                if (PreCalibrate)
                {
                    svm.PreCalibrateCfg(0.8f / (float)Math.Sqrt(svm.TrainingSet.getN), 0.3f / (float)Math.Sqrt(svm.TrainingSet.getN));
                }
                svm.Train();
                svm.RemoveNonSupportVectors();
            }
        }
        /// <summary>Adds a new self training example</summary>
        public void AddSelfTraining(int[] sbFrames, int faceIndex, Bitmap bmp)
        {
            if (SelfTSet == null) SelfTSet = new TrainingSet();
            float[] subF = new float[(sbFrames.Length / 3) * 364];

            ExtractFeatures(sbFrames, subF, bmp);

            for (int i = 0; i < sbFrames.Length / 3; i++)
            {
                float[] x = new float[364];

                for (int k = 0; k < 364; k++) x[k] = subF[k + i * 364];

                TrainingUnit tu = new TrainingUnit(x, i == faceIndex ? 1.0f : -1.0f);
                SelfTSet.addTrainingUnit(tu);
            }
        }
        /// <summary>Adds training units to a set from a file</summary>
        /// <param name="filename">File containing features</param>
        /// <param name="TrSet">Training set to be populated</param>
        private void FillTrainingSet(string filename, TrainingSet TrSet)
        {
            string sepdec = (1.5).ToString().Substring(1, 1);
            using (StreamReader sr = new StreamReader(filename))
            {
                string line;

                line = sr.ReadLine();
                int n = int.Parse(line);
                line = sr.ReadLine();
                int dim = (int)Math.Sqrt(double.Parse(line));

                for (int i = 0; i < n; i++)
                {
                    line = sr.ReadLine().Replace(".", sepdec);
                    string[] s = line.Split(new string[] { " " }, StringSplitOptions.RemoveEmptyEntries);
                    float[] x = new float[364];
                    float y;

                    for (int j = 0; j < s.Length - 1; j++) x[j] = float.Parse(s[j]);

                    y = float.Parse(s[s.Length - 1]);

                    TrSet.addTrainingUnit(new TrainingUnit(x, y));

                    /*
                     * Features Haar
                     * [04:05:53] Edmundo ITA05(FOX.Howler): 1,1
            [04:05:57] Edmundo ITA05(FOX.Howler): 7,4
            [04:06:55] Edmundo ITA05(FOX.Howler): new Rectangle(1, 1, 7, 4)
            [04:07:06] Edmundo ITA05(FOX.Howler): 7 x 4
            [04:08:15] Edmundo ITA05(FOX.Howler): new Rectangle(11, 4, 7, 4)
            [04:08:24] Edmundo ITA05(FOX.Howler): divide por 7*4
            [04:09:04] Edmundo ITA05(FOX.Howler): subtrai da área do Rect(8, 1, 3, 4)
            [04:09:13] Edmundo ITA05(FOX.Howler): dividido pela 3 *4
                     *
                     *
                     *
                     * [04:05:53] Edmundo ITA05(FOX.Howler): 1,1
            [04:05:57] Edmundo ITA05(FOX.Howler): 7,4
            [04:06:55] Edmundo ITA05(FOX.Howler): new Rectangle(1, 1, 7, 4)
            [04:07:06] Edmundo ITA05(FOX.Howler): 7 x 4
            [04:08:15] Edmundo ITA05(FOX.Howler): new Rectangle(11, 4, 7, 4)
            [04:08:24] Edmundo ITA05(FOX.Howler): divide por 7*4
            [04:09:04] Edmundo ITA05(FOX.Howler): subtrai da área do Rect(8, 1, 3, 4)
            [04:09:13] Edmundo ITA05(FOX.Howler): dividido pela 3 *4
            [04:13:38] Edmundo ITA05(FOX.Howler): ((1,6,5,5) + (13,6,5,5)) / 2 - (7,6,5,6)
                     *
                     *
                     * [04:17:15] Edmundo ITA05(FOX.Howler): (1,1,17,5) - (1,6,17,8) dividir pela area
                     */

                }
            }
        }
Example #14
0
        /// <summary>Trains current SVM with cross-validation, adjusting kernel parameter lambda and box parameter C. Returns best performance so far</summary>
        /// <param name="CrossValidationSetPercent">Percentage of training examples that should be used as cross validation set</param>
        /// <param name="lambdaSet">Values of lambda to try</param>
        /// <param name="CSet">Values of c to try</param>
        public float TrainWithCrossValidation(float CrossValidationSetPercent, float[] lambdaSet, float[] CSet)
        {
            if (alphaList == null || alphaList.Count != TrainingSet.getN)
            {
                //Problem changed, previous values dont make sense
                initializeWithZeros();
                CrossValParams = null;
            }

            #region Constructs cross validation set

            TrainingSet CrossValidationSet = new TrainingSet();
            int nCrossSet = (int)(CrossValidationSetPercent * (float)this.TrainingSet.getN);
            Random rnd = new Random();
            for (int i = 0; i < nCrossSet; i++)
            {
                int ind = rnd.Next(0, this.TrainingSet.trainingArray.Count - 1);
                TrainingUnit u = this.TrainingSet.trainingArray[ind];
                this.TrainingSet.trainingArray.Remove(u);
                CrossValidationSet.addTrainingUnit(u);
            }

            #endregion

            #region Loops through lambdas and Cs and finds maximum crossvalidation

            foreach (float _lambda in lambdaSet)
            {
                this.ProblemCfg.lambda = _lambda;

                this.initializeWithZeros();
                PreComputeKernels();

                foreach (float _c in CSet)
                {
                    this.ProblemCfg.c = _c;

                    //ProblemSolver.solveSMOStartingFromPreviousSolution(this);
                    ProblemSolver.solveSMOStartingFromZero(this);

                    float performance = this.GetHitRate(CrossValidationSet);

                    if (CrossValParams == null) CrossValParams = new float[3];

                    if (performance > CrossValParams[0])
                    {
                        CrossValParams[0] = performance;
                        CrossValParams[1] = _lambda;
                        CrossValParams[2] = _c;
                    }
                }
            }

            #endregion

            #region Trains with best parameters so far

            this.ProblemCfg.lambda = CrossValParams[1];
            this.ProblemCfg.c = CrossValParams[2];
            this.Train();

            #endregion

            return CrossValParams[0];
        }
Example #15
0
        /// <summary>Computes hit rates for a given test set</summary>
        /// <param name="samples">Test set to be used</param>
        public float GetHitRate(TrainingSet samples)
        {
            float rate = 0;

            foreach (TrainingUnit tu in samples.trainingArray)
            {
                bool c = Classify(tu);
                if ((c && tu.y == 1) || ((!c) && tu.y == -1)) rate++;
            }

            return rate / (float)samples.getN;
        }
Example #16
0
        /*
        /// <summary>
        /// Copy all values from another solution
        /// </summary>
        /// <param name="sourceSolution">The source to copy from</param>
        public void Load(SVM sourceSolution)
        {
            dimension = sourceSolution.dimension;
            alphaList = new float[dimension];
            for (int i = 0; i < dimension; i++)
            {
                alphaList[i] = sourceSolution.alphaList[i];
            }
            b = sourceSolution.b;
        }
        */
        /// <summary>
        /// Copy all values from another solution
        /// </summary>
        /// <param name="FileName">File containing alpha's data</param>
        public void Load(string FileName)
        {
            DataSet d = new DataSet();
            d.ReadXml(FileName);
            DataTable t = d.Tables["Solution"];
            dimension = t.Rows.Count;

            //Configuration
            DataTable TblCfg = d.Tables["Config"];

            float valC, valTol; int valKernel, valMaxP;

            valC = (float)((double)TblCfg.Rows[0]["dblValues"]);
            valKernel = (int)((double)TblCfg.Rows[1]["dblValues"]);
            valTol = (float)((double)TblCfg.Rows[2]["dblValues"]);
            valMaxP = (int)((double)TblCfg.Rows[3]["dblValues"]);
            this.b = (float)((double)TblCfg.Rows[4]["dblValues"]);
            float Lambda = (float)((double)TblCfg.Rows[5]["dblValues"]);
            int xDim = (int)((double)TblCfg.Rows[6]["dblValues"]);

            //Reads classifications
            DataTable TblClassif = d.Tables["Classifications"];

            alphaList = new List<float>();
            TrainingSet = new TrainingSet();

            for (int i = 0; i < dimension; i++)
            {
                TrainingSet.addTrainingUnit(new TrainingUnit(new float[xDim], -1));
            }

            for (int i = 0; i < dimension; i++)
            {
                alphaList.Add((float)((double)t.Rows[i]["dblValues"]));
                TrainingSet.trainingArray[i].y = (float)((double)TblClassif.Rows[i]["dblValues"]) > 0 ? 1 : -1;
            }

            //Reads training set
            //Creates datatables for training examples
            DataTable Tbl = d.Tables["Examples"];
            for (int i = 0; i < dimension; i ++)
            {
                for (int j = 0; j < xDim; j++)
                {
                    TrainingSet.trainingArray[i].xVector[j] = (float)((double)Tbl.Rows[j + i*xDim]["dblValues"]);
                }
            }

            this.ProblemCfg = new ProblemConfig(Lambda, valC, valTol, valMaxP, (ProblemConfig.KernelType)valKernel);

            if (OpenCLTemplate.CLCalc.CLAcceleration == OpenCLTemplate.CLCalc.CLAccelerationType.UsingCL)
            {
                this.WriteToDevice();
            }
        }
Example #17
0
        /// <summary>Attempts to pre-calibrate configuration parameters.
        /// Finds an alpha that enhances similarities between positive examples
        /// and reduces similarities between positive and negative examples.
        /// Assumes that decreasing lambda increases kernel match.
        /// </summary>
        /// <param name="tolPositive">Positive kernels average should be greater than tolPositive</param>
        /// <param name="tolNegative">Negative kernels average should be lesser than tolNegative</param>
        public void PreCalibrateCfg(float tolPositive, float tolNegative)
        {
            #region Checks if there are positive and negative examples
            bool posSamples = false; bool negSamples = false;
            for (int i = 0; i < TrainingSet.trainingArray.Count; i++)
            {
                if (TrainingSet.trainingArray[i].y > 0) posSamples = true;
                if (TrainingSet.trainingArray[i].y < 0) negSamples = true;
                if (posSamples && negSamples) i = TrainingSet.trainingArray.Count;
            }
            if ((!posSamples) || (!negSamples)) throw new Exception("Training set must contain positive and negative samples");
            #endregion

            Random rnd = new Random();
            int nSet = (int)(20 * Math.Log(TrainingSet.getN, 2));

            TrainingSet PositiveExamples1 = new TrainingSet();
            TrainingSet PositiveExamples2 = new TrainingSet();
            TrainingSet NegativeExamples = new TrainingSet();

            //Kernel average for positive and negative samples
            float positiveAvg = 0, negativeAvg = 0;
            float invN = 1 / (float)nSet;
            int count = 0;

            float bestLambda = ProblemCfg.lambda;
            float maxPosNegAvg = -1.0f;

            while ((positiveAvg <= tolPositive || negativeAvg >= tolNegative) && count < nSet)
            {
                //Populates training sets
                PositiveExamples1.trainingArray.Clear();
                PositiveExamples2.trainingArray.Clear();
                NegativeExamples.trainingArray.Clear();
                while (PositiveExamples1.getN < nSet || PositiveExamples2.getN < nSet || NegativeExamples.getN < nSet)
                {
                    TrainingUnit tu = TrainingSet.trainingArray[rnd.Next(TrainingSet.trainingArray.Count - 1)];
                    if (tu.y > 0 && PositiveExamples1.getN < nSet)
                        PositiveExamples1.addTrainingUnit(tu);
                    else if (tu.y > 0 && PositiveExamples2.getN < nSet)
                        PositiveExamples2.addTrainingUnit(tu);

                    if (tu.y < 0 && NegativeExamples.getN < nSet) NegativeExamples.addTrainingUnit(tu);
                }

                count++;

                positiveAvg = 0;
                negativeAvg = 0;
                for (int i = 0; i < nSet; i++)
                {
                    positiveAvg += ProblemSolver.calculateSingleKernel(PositiveExamples1.trainingArray[i], PositiveExamples2.trainingArray[i], this);
                    negativeAvg += ProblemSolver.calculateSingleKernel(PositiveExamples1.trainingArray[i], NegativeExamples.trainingArray[i], this);
                }
                positiveAvg *= invN;
                negativeAvg *= invN;

                if (maxPosNegAvg < positiveAvg - negativeAvg)
                {
                    bestLambda = ProblemCfg.lambda;
                    maxPosNegAvg = positiveAvg - negativeAvg;
                }

                //Desired: positiveAvg=1, negativeAvg = 0
                if (positiveAvg <= tolPositive) this.ProblemCfg.lambda *= 0.15f;
                else if (negativeAvg >= tolNegative) this.ProblemCfg.lambda *= 1.2f;
            }
            ProblemCfg.lambda = bestLambda;
        }
Example #18
0
            private static void updateErrorsCache(TrainingSet trainingSet, SVM currentSolution,
                float oldAlphai, float newAlphai, int iIndex,
                float oldAlphaj, float newAlphaj, int jIndex,
                float oldB, float newB)
            {
                float alphaiDif = newAlphai - oldAlphai;
                float alphajDif = newAlphaj - oldAlphaj;
                float BDif = newB - oldB;

                if (trainingSet.trainingArray[iIndex].y < 0) alphaiDif = -alphaiDif;
                if (trainingSet.trainingArray[jIndex].y < 0) alphajDif = -alphajDif;

                for (int t = 0; t < trainingSet.getN; t++)
                {
                    float variation = alphaiDif * trainingSet.kernels[iIndex][t];
                    variation += alphajDif * trainingSet.kernels[jIndex][t];
                    variation += BDif;

                    trainingSet.errors[t] += variation;
                }
            }
Example #19
0
 private static void updateSingleError(TrainingSet trainingSet, SVM currentSolution,
     float newAlphai, int iIndex)
 {
     for (int t = 0; t < trainingSet.getN; t++)
     {
         float variation = 0;
         if (trainingSet.trainingArray[iIndex].y > 0)
         {
             variation += newAlphai * trainingSet.kernels[iIndex][t];
         }
         else
         {
             variation -= newAlphai * trainingSet.kernels[iIndex][t];
         }
         trainingSet.errors[t] += variation;
     }
 }
Example #20
0
        /// <summary>Creates a new multiclass SVM using desired outputs from training set. Classifications -1.0f are negative for all sets</summary>
        /// <param name="TSet">Training set</param>
        /// <param name="SVMCfg">Configuration parameters</param>
        private void initMultiSVM(TrainingSet TSet, ProblemConfig SVMCfg)
        {
            //Determines how many different classifications are there
            Classifications = new List<float>();
            foreach (TrainingUnit tu in TSet.trainingArray)
            {
                if (Classifications.IndexOf(tu.y) < 0 && tu.y != -1.0f) Classifications.Add(tu.y);
            }

            //For each different possible classification, create a different SVM
            SVMs = new List<SVM>();
            foreach (float c in Classifications)
            {
                SVM svm = new SVM();
                svm.TrainingSet = new TrainingSet();
                svm.ProblemCfg = SVMCfg.Clone();
                SVMs.Add(svm);

                foreach (TrainingUnit tu in TSet.trainingArray)
                {
                    TrainingUnit newTu = tu.Clone();
                    newTu.y = tu.y == c ? 1 : -1;
                    svm.TrainingSet.addTrainingUnit(newTu);
                }

                //Train svm
                svm.PreCalibrateCfg(0.8f / (float)Math.Sqrt(svm.TrainingSet.getN), 0.3f / (float)Math.Sqrt(svm.TrainingSet.getN));
                svm.Train();
                svm.RemoveNonSupportVectors();
            }
        }
 /// <summary>Creates a new multiclass SVM using desired outputs from training set. Classifications -1.0f are negative for all sets</summary>
 /// <param name="TSet">Training set</param>
 /// <param name="SVMCfg">Configuration parameters</param>
 public MultiClassSVM(TrainingSet TSet, ProblemConfig SVMCfg)
 {
     initMultiSVM(TSet, SVMCfg);
 }
Example #22
0
        /// <summary>Trains current SVM with cross-validation, adjusting kernel parameter lambda and box parameter C.
        /// Returns best achieved efficiency.</summary>
        /// <param name="CrossValidationSet">Cross validation set</param>
        public float TrainWithCrossValidation(TrainingSet CrossValidationSet)
        {
            Random rnd = new Random();

            float[] lambdaSet = new float[12];
            //lambdaSet[0] = 3E-9f * ((float)rnd.NextDouble() + 1);
            lambdaSet[0] = 3E-3f * ((float)rnd.NextDouble() + 1);
            for (int i = 1; i < lambdaSet.Length; i++) lambdaSet[i] = 4.5f * lambdaSet[i - 1];

            float[] cSet = new float[13];
            cSet[0] = 1E-5f * ((float)rnd.NextDouble() + 1);
            for (int i = 1; i < cSet.Length; i++) cSet[i] = 2.0f * cSet[i - 1];

            return TrainWithCrossValidation(CrossValidationSet, lambdaSet, cSet);
        }
        /// <summary>Loads and classifies dataset</summary>
        private void LoadMITFaceClassifier()
        {
            /*

            CBCL Face Database #1
            MIT Center For Biological and Computation Learning
             *
             */
            string p = System.Windows.Forms.Application.StartupPath;
            string fileTrain = p + "\\svm.train.normgrey";
            string fileTest = p + "\\svm.test.normgrey";

            tSet = new TrainingSet();

            //Fills both, we're not testing the results
            FillTrainingSet(fileTrain, tSet);
            FillTrainingSet(fileTest, tSet);

            SVM = new MultiClassSVM(tSet);
        }
Example #24
0
        private static void CLupdateErrorsCache(TrainingSet trainingSet, SVM svm,
            float oldAlphai, float newAlphai, int iIndex,
            float oldAlphaj, float newAlphaj, int jIndex,
            float oldB, float newB)
        {
            float alphaiDif = newAlphai - oldAlphai;
            float alphajDif = newAlphaj - oldAlphaj;
            float BDif = newB - oldB;

            if (trainingSet.trainingArray[iIndex].y < 0) alphaiDif = -alphaiDif;
            if (trainingSet.trainingArray[jIndex].y < 0) alphajDif = -alphajDif;

            lock (CLResource)
            {
                //Writes kernel values
                if (svm.CLKi == null || svm.CLKi.OriginalVarLength != svm.TrainingSet.errors.Length)
                {
                    svm.CLKi = new CLCalc.Program.Variable(svm.TrainingSet.kernels[iIndex]);
                    svm.CLKj = new CLCalc.Program.Variable(svm.TrainingSet.kernels[jIndex]);
                }
                else
                {
                    svm.CLKi.WriteToDevice(svm.TrainingSet.kernels[iIndex]);
                    svm.CLKj.WriteToDevice(svm.TrainingSet.kernels[jIndex]);
                }
                float[] p = new float[3] { alphaiDif, BDif, alphajDif };
                svm.CLUpdtErrParams.WriteToDevice(p);

                //Executes update using GPU
                kernelUpdateErr.Execute(new CLCalc.Program.Variable[] { svm.CLerr, svm.CLKi, svm.CLKj, svm.CLUpdtErrParams }, svm.TrainingSet.getN);

                svm.CLerr.ReadFromDeviceTo(svm.TrainingSet.errors);
            }
        }
Example #25
0
        /// <summary>Attempts to pre-calibrate configuration parameters.
        /// Finds an alpha that enhances similarities between positive examples
        /// and reduces similarities between positive and negative examples.
        /// Assumes that decreasing lambda increases kernel match.
        /// </summary>
        /// <param name="tolPositive">Positive kernels average should be greater than tolPositive</param>
        /// <param name="tolNegative">Negative kernels average should be lesser than tolNegative</param>
        public void PreCalibrateCfg(float tolPositive, float tolNegative)
        {
            #region Checks if there are positive and negative examples
            bool posSamples = false; bool negSamples = false;
            for (int i = 0; i < TrainingSet.trainingArray.Count; i++)
            {
                if (TrainingSet.trainingArray[i].y > 0)
                {
                    posSamples = true;
                }
                if (TrainingSet.trainingArray[i].y < 0)
                {
                    negSamples = true;
                }
                if (posSamples && negSamples)
                {
                    i = TrainingSet.trainingArray.Count;
                }
            }
            if ((!posSamples) || (!negSamples))
            {
                throw new Exception("Training set must contain positive and negative samples");
            }
            #endregion

            Random rnd  = new Random();
            int    nSet = (int)(20 * Math.Log(TrainingSet.getN, 2));

            TrainingSet PositiveExamples1 = new TrainingSet();
            TrainingSet PositiveExamples2 = new TrainingSet();
            TrainingSet NegativeExamples  = new TrainingSet();

            //Kernel average for positive and negative samples
            float positiveAvg = 0, negativeAvg = 0;
            float invN  = 1 / (float)nSet;
            int   count = 0;

            float bestLambda   = ProblemCfg.lambda;
            float maxPosNegAvg = -1.0f;

            while ((positiveAvg <= tolPositive || negativeAvg >= tolNegative) && count < nSet)
            {
                //Populates training sets
                PositiveExamples1.trainingArray.Clear();
                PositiveExamples2.trainingArray.Clear();
                NegativeExamples.trainingArray.Clear();
                while (PositiveExamples1.getN < nSet || PositiveExamples2.getN < nSet || NegativeExamples.getN < nSet)
                {
                    TrainingUnit tu = TrainingSet.trainingArray[rnd.Next(TrainingSet.trainingArray.Count - 1)];
                    if (tu.y > 0 && PositiveExamples1.getN < nSet)
                    {
                        PositiveExamples1.addTrainingUnit(tu);
                    }
                    else if (tu.y > 0 && PositiveExamples2.getN < nSet)
                    {
                        PositiveExamples2.addTrainingUnit(tu);
                    }

                    if (tu.y < 0 && NegativeExamples.getN < nSet)
                    {
                        NegativeExamples.addTrainingUnit(tu);
                    }
                }

                count++;

                positiveAvg = 0;
                negativeAvg = 0;
                for (int i = 0; i < nSet; i++)
                {
                    positiveAvg += ProblemSolver.calculateSingleKernel(PositiveExamples1.trainingArray[i], PositiveExamples2.trainingArray[i], this);
                    negativeAvg += ProblemSolver.calculateSingleKernel(PositiveExamples1.trainingArray[i], NegativeExamples.trainingArray[i], this);
                }
                positiveAvg *= invN;
                negativeAvg *= invN;

                if (maxPosNegAvg < positiveAvg - negativeAvg)
                {
                    bestLambda   = ProblemCfg.lambda;
                    maxPosNegAvg = positiveAvg - negativeAvg;
                }

                //Desired: positiveAvg=1, negativeAvg = 0
                if (positiveAvg <= tolPositive)
                {
                    this.ProblemCfg.lambda *= 0.15f;
                }
                else if (negativeAvg >= tolNegative)
                {
                    this.ProblemCfg.lambda *= 1.2f;
                }
            }
            ProblemCfg.lambda = bestLambda;
        }
Example #26
0
        /// <summary>Trains current SVM with cross-validation, adjusting kernel parameter lambda and box parameter C. Returns best performance so far</summary>
        /// <param name="CrossValidationSetPercent">Percentage of training examples that should be used as cross validation set</param>
        /// <param name="lambdaSet">Values of lambda to try</param>
        /// <param name="CSet">Values of c to try</param>
        public float TrainWithCrossValidation(float CrossValidationSetPercent, float[] lambdaSet, float[] CSet)
        {
            if (alphaList == null || alphaList.Count != TrainingSet.getN)
            {
                //Problem changed, previous values dont make sense
                initializeWithZeros();
                CrossValParams = null;
            }

            #region Constructs cross validation set

            TrainingSet CrossValidationSet = new TrainingSet();
            int         nCrossSet          = (int)(CrossValidationSetPercent * (float)this.TrainingSet.getN);
            Random      rnd = new Random();
            for (int i = 0; i < nCrossSet; i++)
            {
                int          ind = rnd.Next(0, this.TrainingSet.trainingArray.Count - 1);
                TrainingUnit u   = this.TrainingSet.trainingArray[ind];
                this.TrainingSet.trainingArray.Remove(u);
                CrossValidationSet.addTrainingUnit(u);
            }

            #endregion

            #region Loops through lambdas and Cs and finds maximum crossvalidation

            foreach (float _lambda in lambdaSet)
            {
                this.ProblemCfg.lambda = _lambda;

                this.initializeWithZeros();
                PreComputeKernels();

                foreach (float _c in CSet)
                {
                    this.ProblemCfg.c = _c;


                    //ProblemSolver.solveSMOStartingFromPreviousSolution(this);
                    ProblemSolver.solveSMOStartingFromZero(this);

                    float performance = this.GetHitRate(CrossValidationSet);

                    if (CrossValParams == null)
                    {
                        CrossValParams = new float[3];
                    }

                    if (performance > CrossValParams[0])
                    {
                        CrossValParams[0] = performance;
                        CrossValParams[1] = _lambda;
                        CrossValParams[2] = _c;
                    }
                }
            }

            #endregion

            #region Trains with best parameters so far

            this.ProblemCfg.lambda = CrossValParams[1];
            this.ProblemCfg.c      = CrossValParams[2];
            this.Train();

            #endregion

            return(CrossValParams[0]);
        }
Example #27
0
        /// <summary>Gets SVM hit rate</summary>
        /// <param name="TestSet">Test set</param>
        public float GetHitRate(TrainingSet TestSet)
        {
            float rate = 0, val;
            foreach (TrainingUnit tu in TestSet.trainingArray)
            {
                float resp = Classify(tu, out val);
                if (resp == tu.y) rate++;
            }

            return rate / (float)TestSet.trainingArray.Count;
        }
            /// <summary>
            /// Solves the SMO considering no previous knowledge about the problem
            /// </summary>
            /// <param name="problemSolution">Known solution</param>
            /// <returns>Solution of the problem with alphas and threshold</returns>
            public static SVM solveSMOStartingFromPreviousSolution(SVM problemSolution)
            {
                System.Diagnostics.Stopwatch swTotalTime     = new System.Diagnostics.Stopwatch();
                System.Diagnostics.Stopwatch swHeuristica    = new System.Diagnostics.Stopwatch();
                System.Diagnostics.Stopwatch swComputeKernel = new System.Diagnostics.Stopwatch();
                System.Diagnostics.Stopwatch swUpdateError   = new System.Diagnostics.Stopwatch();
                swTotalTime.Start();


                ProblemConfig problemConfig = problemSolution.ProblemCfg;

                if (problemSolution.alphaList == null)
                {
                    problemSolution.initializeWithZeros();
                }
                ProblemSolver.calculateErrors(problemSolution);

                //Initializes GPU error vector
                if (OpenCLTemplate.CLCalc.CLAcceleration == OpenCLTemplate.CLCalc.CLAccelerationType.UsingCL)
                {
                    WriteCLErr(problemSolution);
                }

                TrainingSet trainingSet = problemSolution.TrainingSet;

                int passes = 0;
                int m      = trainingSet.getN;

                while (passes < problemConfig.maxPasses)
                {
                    int changedAlphas = 0;
                    for (int i = 0; i < m; i++)
                    {
                        float yi      = trainingSet.trainingArray[i].y;
                        float alpha_i = problemSolution.alphaList[i];
                        // Error between the SVM output on the ith training unit and the true ith output
                        float ei = trainingSet.errors[i];

                        // KKT conditions for ith element
                        if (
                            ((yi * ei < -problemConfig.tol && alpha_i < problemConfig.c) || (yi * ei > problemConfig.tol && alpha_i > 0))
                            )
                        {
                            swHeuristica.Start();

                            #region Computes J using maximum variation heuristics
                            // Get a number from 0 to m - 1 not equal to i
                            int j = 0;
                            if (trainingSet.errors[i] >= 0)
                            {
                                if (OpenCLTemplate.CLCalc.CLAcceleration == OpenCLTemplate.CLCalc.CLAccelerationType.UsingCL)
                                {
                                    j = CLFindMinError(problemSolution);
                                }
                                else
                                {
                                    float minError = trainingSet.errors[0];
                                    for (int k = 1; k < trainingSet.getN; k++)
                                    {
                                        if (minError > trainingSet.errors[k])
                                        {
                                            minError = trainingSet.errors[k];
                                            j        = k;
                                        }
                                    }
                                }
                            }
                            else
                            {
                                if (OpenCLTemplate.CLCalc.CLAcceleration == OpenCLTemplate.CLCalc.CLAccelerationType.UsingCL)
                                {
                                    j = CLFindMaxError(problemSolution);
                                }
                                else
                                {
                                    float maxError = trainingSet.errors[0];
                                    for (int k = 1; k < trainingSet.getN; k++)
                                    {
                                        if (maxError < trainingSet.errors[k])
                                        {
                                            maxError = trainingSet.errors[k];
                                            j        = k;
                                        }
                                    }
                                }
                            }
                            #endregion

                            swHeuristica.Stop();

                            float yj      = trainingSet.trainingArray[j].y;
                            float alpha_j = problemSolution.alphaList[j];
                            // Error between the SVM output on the jth training unit and the true jth output
                            float ej = trainingSet.errors[j];

                            // Save old alphas
                            float oldAlpha_i = problemSolution.alphaList[i];
                            float oldAlpha_j = problemSolution.alphaList[j];

                            #region Compute lower and higher bounds of alpha_j
                            float lowerBound;
                            float higherBound;
                            if (yi != yj)
                            {
                                lowerBound  = Math.Max(0, alpha_j - alpha_i);
                                higherBound = Math.Min(problemConfig.c, problemConfig.c + alpha_j - alpha_i);
                            }
                            else
                            {
                                lowerBound  = Math.Max(0, alpha_j + alpha_i - problemConfig.c);
                                higherBound = Math.Min(problemConfig.c, alpha_j + alpha_i);
                            }
                            #endregion

                            // Nothing to adjust if we can't set any value between those bounds
                            if (lowerBound == higherBound)
                            {
                                continue;
                            }


                            #region Compute eta
                            float kernel_xi_xj;
                            float kernel_xi_xi;
                            float kernel_xj_xj;

                            if (trainingSet.IsKernelCalculated[i])
                            {
                                kernel_xi_xj = trainingSet.kernels[i][j];
                            }
                            else if (trainingSet.IsKernelCalculated[j])
                            {
                                kernel_xi_xj = trainingSet.kernels[j][i];
                            }
                            else
                            {
                                kernel_xi_xj = calculateSingleKernel(trainingSet.trainingArray[i], trainingSet.trainingArray[j], problemSolution); //trainingSet.kernels[i][j];
                            }
                            if (trainingSet.IsKernelCalculated[i])
                            {
                                kernel_xi_xi = trainingSet.kernels[i][i];
                            }
                            else
                            {
                                kernel_xi_xi = calculateSingleKernel(trainingSet.trainingArray[i], trainingSet.trainingArray[i], problemSolution); //trainingSet.kernels[i][i];
                            }
                            if (trainingSet.IsKernelCalculated[j])
                            {
                                kernel_xj_xj = trainingSet.kernels[j][j];
                            }
                            else
                            {
                                kernel_xj_xj = calculateSingleKernel(trainingSet.trainingArray[j], trainingSet.trainingArray[j], problemSolution); //trainingSet.kernels[j][j];
                            }
                            float eta = 2 * kernel_xi_xj - kernel_xi_xi - kernel_xj_xj;
                            #endregion
                            if (eta >= 0)
                            {
                                continue;
                            }

                            // Compute new alpha_j
                            alpha_j = alpha_j - yj * (ei - ej) / eta;
                            // Clip alpha_j if necessary
                            if (alpha_j > higherBound)
                            {
                                alpha_j = higherBound;
                            }
                            else if (alpha_j < lowerBound)
                            {
                                alpha_j = lowerBound;
                            }

                            // If the changes are not big enough, just continue
                            if (Math.Abs(oldAlpha_j - alpha_j) < MIN_ALPHA_CHANGE)
                            {
                                continue;
                            }

                            swComputeKernel.Start();
                            //Needs to compute lines K[i][] and K[j][] since the alphas will change
                            if (OpenCLTemplate.CLCalc.CLAcceleration == OpenCLTemplate.CLCalc.CLAccelerationType.UsingCL)
                            {
                                CLComputeKernels(problemSolution, i);
                                CLComputeKernels(problemSolution, j);
                            }
                            else
                            {
                                ComputeKernels(problemSolution, i);
                                ComputeKernels(problemSolution, j);
                            }
                            swComputeKernel.Stop();


                            // Compute value for alpha_i
                            alpha_i = alpha_i + yi * yj * (oldAlpha_j - alpha_j);

                            // Compute b1, b2 and new b (threshold)
                            float oldB = problemSolution.b;
                            if (0 < alpha_i && alpha_i < problemConfig.c)
                            {
                                // b1 is enough in this case
                                float b1 = problemSolution.b - ei - yi * (alpha_i - oldAlpha_i) * kernel_xi_xi - yj * (alpha_j - oldAlpha_j) * kernel_xi_xj;
                                problemSolution.b = b1;
                            }
                            else if (0 < alpha_j && alpha_j < problemConfig.c)
                            {
                                // b2 is enough in this case
                                float b2 = problemSolution.b - ej - yi * (alpha_i - oldAlpha_i) * kernel_xi_xj - yj * (alpha_j - oldAlpha_j) * kernel_xj_xj;
                                problemSolution.b = b2;
                            }
                            else
                            {
                                // b is the average between b1 and b2
                                float b1 = problemSolution.b - ei - yi * (alpha_i - oldAlpha_i) * kernel_xi_xi - yj * (alpha_j - oldAlpha_j) * kernel_xi_xj;
                                float b2 = problemSolution.b - ej - yi * (alpha_i - oldAlpha_i) * kernel_xi_xj - yj * (alpha_j - oldAlpha_j) * kernel_xj_xj;
                                problemSolution.b = (b1 + b2) * 0.5f;
                            }

                            // Update the changed alphas in the solution
                            problemSolution.alphaList[i] = alpha_i;
                            problemSolution.alphaList[j] = alpha_j;

                            // Update errors cache
                            swUpdateError.Start();
                            if (OpenCLTemplate.CLCalc.CLAcceleration == OpenCLTemplate.CLCalc.CLAccelerationType.UsingCL)
                            {
                                CLupdateErrorsCache(trainingSet, problemSolution, oldAlpha_i, alpha_i, i, oldAlpha_j, alpha_j, j, oldB, problemSolution.b);
                            }
                            else
                            {
                                updateErrorsCache(trainingSet, problemSolution, oldAlpha_i, alpha_i, i, oldAlpha_j, alpha_j, j, oldB, problemSolution.b);
                            }

                            swUpdateError.Stop();

                            changedAlphas++;
                        }
                    }
                    if (changedAlphas == 0)
                    {
                        passes++;
                    }
                    else
                    {
                        passes = 0;
                    }
                }

                return(problemSolution);
            }
Example #29
0
        /// <summary>Trains current SVM with cross-validation, adjusting kernel parameter lambda and box parameter C.
        /// Returns best achieved efficiency.</summary>
        /// <param name="CrossValidationSet">Cross validation set</param>
        /// <param name="LambdaSet">Lambda set</param>
        /// <param name="CSet">C values set</param>
        public float TrainWithCrossValidation(TrainingSet CrossValidationSet, float[] LambdaSet, float[] CSet)
        {
            foreach (float _lambda in LambdaSet)
            {
                for (int i = 0; i < SVMs.Count; i++) SVMs[i].ProblemCfg.lambda = _lambda;

                foreach (float _c in CSet)
                {
                    for (int i = 0; i < SVMs.Count; i++)
                    {
                        SVMs[i].ProblemCfg.c = _c;
                        SVMs[i].Train();
                    }

                    float performance = this.GetHitRate(CrossValidationSet);

                    if (CrossValParams == null) CrossValParams = new float[3];

                    if (performance > CrossValParams[0])
                    {
                        CrossValParams[0] = performance;
                        CrossValParams[1] = _lambda;
                        CrossValParams[2] = _c;
                    }
                }
            }

            //Train with best parameters
            for (int i = 0; i < SVMs.Count; i++)
            {
                SVMs[i].ProblemCfg.lambda = CrossValParams[1];
                SVMs[i].ProblemCfg.c = CrossValParams[2];
                SVMs[i].Train();
            }

            return CrossValParams[0];
        }
Example #30
0
 /// <summary>Creates a new multiclass SVM using desired outputs from training set. Classifications -1.0f are negative for all sets</summary>
 /// <param name="TSet">Training set</param>
 /// <param name="SVMCfg">Configuration parameters</param>
 public MultiClassSVM(TrainingSet TSet, ProblemConfig SVMCfg)
 {
     initMultiSVM(TSet, SVMCfg);
 }
Example #31
0
 /// <summary>Creates a new multiclass SVM using desired outputs from training set. Classifications -1.0f are negative for all sets</summary>
 /// <param name="TSet">Training set</param>
 public MultiClassSVM(TrainingSet TSet)
 {
     ProblemConfig cfg = new ProblemConfig(2.529822E-8f * (float)Math.Sqrt(TSet.getN), 127.922182f, 1e-3f, 1, ProblemConfig.KernelType.RBF);
     initMultiSVM(TSet, cfg);
 }
        /// <summary>Creates a new multiclass SVM using desired outputs from training set. Classifications -1.0f are negative for all sets</summary>
        /// <param name="TSet">Training set</param>
        public MultiClassSVM(TrainingSet TSet)
        {
            ProblemConfig cfg = new ProblemConfig(2.529822E-8f * (float)Math.Sqrt(TSet.getN), 127.922182f, 1e-3f, 1, ProblemConfig.KernelType.RBF);

            initMultiSVM(TSet, cfg);
        }
Example #33
0
 /// <summary>Creates a new multiclass SVM using desired outputs from training set. Classifications -1.0f are negative for all sets</summary>
 /// <param name="TSet">Training set</param>
 /// <param name="SVMCfg">Configuration parameters</param>
 /// <param name="PreCalibrate">Precalibrate RBF parameter lambda? This will ignore the given value</param>
 public MultiClassSVM(TrainingSet TSet, ProblemConfig SVMCfg, bool PreCalibrate)
 {
     initMultiSVM(TSet, SVMCfg, PreCalibrate);
 }
 /// <summary>Creates a new multiclass SVM using desired outputs from training set. Classifications -1.0f are negative for all sets</summary>
 /// <param name="TSet">Training set</param>
 /// <param name="SVMCfg">Configuration parameters</param>
 /// <param name="PreCalibrate">Precalibrate RBF parameter lambda? This will ignore the given value</param>
 public MultiClassSVM(TrainingSet TSet, ProblemConfig SVMCfg, bool PreCalibrate)
 {
     initMultiSVM(TSet, SVMCfg, PreCalibrate);
 }