public EmotionModel() { modelPathMap = new Dictionary <string, string>(); // 模型路径信息 modelPathMap.Add("混合模型(5秒)", Environment.CurrentDirectory + "\\models\\data_after_boardline_smote.scale.model"); modelPathMap.Add("混合模型(10秒)", Environment.CurrentDirectory + "\\models\\data_after_boardline_smote_and_pca_with_49.scale.model"); modelPathMap.Add("交互模型(5秒)", Environment.CurrentDirectory + "\\models\\data_after_undersampling.scale.model"); modelPathMap.Add("交互模型(10秒)", Environment.CurrentDirectory + "\\models\\data_after_undersampling_and_pca_with_49.scale.model"); modelPathMap.Add("人像模型", Environment.CurrentDirectory + "\\models\\data_after_undersampling_and_pca_with_49.scale.model"); svmModel = SVM.LoadModel(modelPathMap["混合模型(5秒)"]); // 加载默认模型 // 初始化特征向量 for (int i = 0; i < featureNum; i++) { svmFeature[i] = new SVMNode(i, 0); } //indexEmotionMap.Add(1, 1); //indexEmotionMap.Add(4, 2); //indexEmotionMap.Add(5, 3); //indexEmotionMap.Add(6, 4); //indexEmotionMap.Add(7, 5); //indexEmotionMap.Add(8, 6); //indexEmotionMap.Add(9, 7); //indexEmotionMap.Add(10, 8); //indexEmotionMap.Add(11, 9); }
//作成した辞書を図でみる public void Debug_DispPredict() { return; //辞書ファイルのロード this.libSVM_model = SVM.LoadModel(@"libsvm_model.xml"); using (IplImage retPlot = new IplImage(300, 300, BitDepth.U8, 3)) { for (int x = 0; x < 300; x++) { for (int y = 0; y < 300; y++) { float[] sample = { x / 300f, y / 300f }; //問題を作成 SVMNode[] node_array = new SVMNode[2]; node_array[0] = new SVMNode(1, sample[0]); node_array[1] = new SVMNode(2, sample[1]); int ret_double = (int)SVM.Predict(libSVM_model, node_array); int ret_i = (int)ret_double; CvRect plotRect = new CvRect(x, 300 - y, 1, 1); if (ret_i == 1) { retPlot.Rectangle(plotRect, CvColor.Red); } else if (ret_i == 2) { retPlot.Rectangle(plotRect, CvColor.GreenYellow); } } } CvWindow.ShowImages(retPlot); } }
private static void TestOne(string prefix) { SVMModel model = SVM.LoadModel(MnistDataPath + "model.txt"); SVMProblem testSet = SVMProblemHelper.Load(MnistDataPath + prefix + ".txt"); testSet = testSet.Normalize(SVMNormType.L2); double[] testResults = testSet.Predict(model); Console.WriteLine("\nTest result: " + testResults[0].ToString()); }
private void btnLoadModel_Click(object sender, EventArgs e) { parameter = load_json_file(parameter_file); string size = parameter["resize"]; sizes2 = new OpenCvSharp.Size(Convert.ToInt32(size.Split(',')[0]), Convert.ToInt32(size.Split(',')[1])); model_load = SVM.LoadModel(parameter["path_model"] + comboBox1.Text); MessageBox.Show("Load model completed"); }
private static void Test(string prefix) { SVMModel model = SVM.LoadModel(MnistDataPath + "model.txt"); SVMProblem testSet = SVMProblemHelper.Load(MnistDataPath + prefix + ".txt"); testSet = testSet.Normalize(SVMNormType.L2); double[] testResults = testSet.Predict(model); int[,] confusionMatrix; double testAccuracy = testSet.EvaluateClassificationProblem(testResults, model.Labels, out confusionMatrix); Console.WriteLine("\nTest accuracy: " + testAccuracy); }
public void LibsvmFirstLook() { var prob = new SVMProblem(); prob.Add(new[] { new SVMNode(1, 1), new SVMNode(2, 0), new SVMNode(3, 1) }, 1); prob.Add(new[] { new SVMNode(1, -1), new SVMNode(2, 0), new SVMNode(3, -1) }, -1); var param = new SVMParameter(); param.Kernel = SVMKernelType.LINEAR; param.C = 10; var m = prob.Train(param); TestOutput(m.Predict(new [] { new SVMNode(1, 1), new SVMNode(2, 1), new SVMNode(3, 1) })); m.SaveModel("trainModel"); var ml = SVM.LoadModel("trainModel"); TestOutput(ml.Predict(new[] { new SVMNode(1, 1), new SVMNode(2, 1), new SVMNode(3, 1) })); }
//SVM判定 public int SVMPredict(FaceFeature.FeatureValue feature) { //学習ファイルを読み込んでいなかったらロード if (this.LoadFlag == false) { this.libSVM_model = SVM.LoadModel(@"model_FaceFeature.xml"); this.LoadFlag = true; } //スケーリングファイルを読み込む あれば if (this.LoadScaleFlag == false && JudgeGUII.APPSetting.NORMALIZE_USE) { this.LoadScaleFlag = ReadScaleFile(@"out/normalize_scale.csv"); } double[] feature_array = new double[FEATURE_COUNT]; int answer = 0; { SetFeatureToArray(feature, ref feature_array); //ここでスケーリングのデータを読み込んでいたら使う if (this.LoadScaleFlag == true && JudgeGUII.APPSetting.NORMALIZE_USE) { execNormalize(ref feature_array); } //問題を作成 SVMNode[] node_array = new SVMNode[FEATURE_COUNT]; for (int i = 0; i < FEATURE_COUNT; i++) { node_array[i] = new SVMNode(i + 1, feature_array[i]); } answer = (int)SVM.Predict(libSVM_model, node_array); return(answer); } }
private void pictureBox1_MouseDown(object sender, MouseEventArgs e) { if (e.Button == MouseButtons.Left) { int selIndex = 0; if (radioButton1.Checked) { selIndex = 0; } if (radioButton2.Checked) { selIndex = 1; } if (radioButton3.Checked) { selIndex = 2; } if (radioButton4.Checked) { selIndex = 3; } mList.Add(new DataInfo(selIndex, e.X, e.Y)); Draw(); } else if (e.Button == MouseButtons.Right) { SVMNode[] node = new SVMNode[2]; node[0] = new SVMNode(1, (double)e.X / (double)mWidth); node[1] = new SVMNode(2, (double)e.Y / (double)mHeight); SVMModel model = SVM.LoadModel(FILE_MODEL); double result = SVM.Predict(model, node); Console.WriteLine("result=" + result); } }
public static SVMModel getExistingModel() { //MessageBox.Show("Model Exists"); return(SVM.LoadModel(Constants.MODEL_PATH)); }
//학습모델 로드 public static SVMModel SVM_LoadModel(String model) { return(SVM.LoadModel(model)); }
static void Main(string[] args) { // Load the datasets: In this example I use the same datasets for training and testing which is not suggested SVMProblem trainingSet = SVMProblemHelper.Load(@"C:\Users\temp\Desktop\ADLfall_train.txt"); // SVMProblem testSet = SVMProblemHelper.Load(@"C:\Users\temp\Desktop\ADLfall_test.txt"); SVMProblem testSet1 = SVMProblemHelper.Load(@"C:\Users\temp\Desktop\ADLfall_test1.txt"); // SVMProblem testSet1 = SVMProblemHelper.Load(@"C:\Users\temp\Desktop\result.txt"); // Normalize the datasets if you want: L2 Norm => x / ||x|| trainingSet = trainingSet.Normalize(SVMNormType.L2); // testSet = testSet.Normalize(SVMNormType.L2); testSet1 = testSet1.Normalize(SVMNormType.L2); // Select the parameter set SVMParameter parameter = new SVMParameter(); parameter.Type = SVMType.C_SVC; parameter.Kernel = SVMKernelType.RBF; parameter.C = 32768.0; parameter.Gamma = 8.0; // Do cross validation to check this parameter set is correct for the dataset or not double[] crossValidationResults; // output labels int nFold = 5; // trainingSet1.CrossValidation(parameter, nFold, out crossValidationResults); // Evaluate the cross validation result // If it is not good enough, select the parameter set again // double crossValidationAccuracy = trainingSet.EvaluateClassificationProblem(crossValidationResults); // Train the model, If your parameter set gives good result on cross validation // SVMModel model = trainingSet.Train(parameter); // Save the model // SVM.SaveModel(model, @"Model\activity_recognition.txt"); SVMModel model = SVM.LoadModel(@"Model\activity_recognition.txt"); int p, q, w, e, r, ok = 0; double sum; q = 0; w = 0; e = 0; r = 0; // Predict the instances in the test set double[] testResults = testSet1.Predict(model); while (ok < testSet1.Length) { var resut = model.Predict(testSet1.X[ok]); // Console.WriteLine("resut111:" + resut); p = Convert.ToInt16(resut); switch (p) { case 1: q++; break; case 2: w++; break; case 3: e++; break; case 4: r++; break; } ok++; } sum = q + w + e + r; Console.WriteLine("result:" + Math.Round(q / sum, 2) + "," + Math.Round(w / sum, 2) + "," + Math.Round(e / sum, 2) + "," + Math.Round(r / sum, 2)); // Evaluate the test results int[,] confusionMatrix; double testAccuracy = testSet1.EvaluateClassificationProblem(testResults, model.Labels, out confusionMatrix); // Print the resutls // Console.WriteLine("\n\nCross validation accuracy: " + crossValidationAccuracy); Console.WriteLine("\nTest accuracy: " + testAccuracy); Console.WriteLine("\nConfusion matrix:\n"); // Print formatted confusion matrix Console.Write(String.Format("{0,6}", "")); for (int i = 0; i < model.Labels.Length; i++) { Console.Write(String.Format("{0,5}", "(" + model.Labels[i] + ")")); } Console.WriteLine(); for (int i = 0; i < confusionMatrix.GetLength(0); i++) { Console.Write(String.Format("{0,5}", "(" + model.Labels[i] + ")")); for (int j = 0; j < confusionMatrix.GetLength(1); j++) { Console.Write(String.Format("{0,5}", confusionMatrix[i, j])); } Console.WriteLine(); } Console.WriteLine("\n\nPress any key to quit..."); Console.ReadLine(); }
static void Main(string[] args) { SVMProblem testSet = SVMProblemHelper.Load(@"Dataset\wine.txt"); // Same as the training set SVMModel model = SVM.LoadModel(@"Model\wine_model.txt"); Console.WriteLine("Feature count in one instance: " + model.SV[0].Length + "\n\n"); // Test 1: Predict instances with SVMProblem's Predict extension method. sw.Start(); double[] target = testSet.Predict(model); sw.Stop(); double elapsedTimeInTest1 = (double)sw.ElapsedMilliseconds / (double)testSet.Length; Console.WriteLine("> Test 1: \nPredict instances with SVMProblem's Predict extension method.\n"); Console.WriteLine("\tAverage elapsed time of one prediction: " + elapsedTimeInTest1 + " ms\n"); // Test 2: Predict instances with RapidPreditor class which is an explicit implementation of the method used in Test 1. using (RapidPredictor predictor = new RapidPredictor(model)) // It needs to be Disposed { sw.Start(); target = new double[testSet.Length]; for (int i = 0; i < testSet.Length; i++) { target[i] = predictor.Predict(testSet.X[i]); } sw.Stop(); } double elapsedTimeInTest2 = (double)sw.ElapsedMilliseconds / (double)testSet.Length; Console.WriteLine("> Test 2: \nPredict instances with RapidPreditor class which is an explicit implementation of the method used in Test 1.\n"); Console.WriteLine("\tAverage elapsed time of one prediction: " + elapsedTimeInTest2 + " ms\n"); // Test 3: Predict instances with standard SVM.Predict method or SVMNode[]'s predict extension method. sw.Start(); target = new double[testSet.Length]; for (int i = 0; i < testSet.Length; i++) { target[i] = SVM.Predict(model, testSet.X[i]); } sw.Stop(); double elapsedTimeInTest3 = (double)sw.ElapsedMilliseconds / (double)testSet.Length; Console.WriteLine("> Test 3: \nPredict instances with standard SVM.Predict method or SVMNode[]'s Predict extension method.\n"); Console.WriteLine("\tAverage elapsed time of one prediction: " + elapsedTimeInTest3 + " ms\n"); // Print the results Console.WriteLine("\nExplanation:\n"); Console.WriteLine( "In standard SVM.Predict method, the SVMModel object is allocated and deallocated every time when the method called. " + "Also the SVMNode[]'s Predict extension methods directly calls the SVM.Predict. " + "However, the model is allocated once and is used to predict whole instances with its pointer in SVMProblem's " + "Predict extension method as implemented in the RapidPredictor class. You can take or modify this class in order " + "to use in your applications, if you have performance considerations. " + "I am not suggesting that SVMProblem's Predict extension method is used in real-time, because the model is allocated" + "in every method call."); Console.WriteLine("\n\nPress any key to quit..."); Console.ReadLine(); }
public void loadModel(string key) { svmModel = SVM.LoadModel(modelPathMap[key]); }
private async Task PerformAnalysis(String path, Rectangle rectangle) { UShortArrayAsImage image = null; double[] pcaComponents = null; int tasksComplete = 0; UpdateStatus(path, startingImageStatusStr); List <Task> tasks = new List <Task>() { new Task(() => { var file = db.FileStorage.FindById($"images/{path}"); var ms = new MemoryStream(); file.CopyTo(ms); ms.Seek(0, 0); image = DicomFile.Open(ms).GetUshortImageInfo(); UpdateStatus(path, loadedImageStatusStr); }), new Task(() => { image = Normalization.GetNormalizedImage(image, rectangle, int.Parse(Configuration.Get("sizeImageToAnalyze"))); db.FileStorage.Upload($"images/{path}-cropped", $"{path}-cropped", image.GetPngAsMemoryStream()); UpdateStatus(path, croppedImageStatusStr); }), new Task(() => { image = Contrast.ApplyHistogramEqualization(image); db.FileStorage.Upload($"images/{path}-croppedContrast", $"{path}-croppedContrast", image.GetPngAsMemoryStream()); UpdateStatus(path, contrastImageStatusStr); }), new Task(() => { //PCA PCA pca = PCA.LoadModelFromFile(Configuration.Get("PcaModelLocation")); if (!int.TryParse(Configuration.Get("componentsToUse"), out int components)) { components = pca.Eigenvalues.Length; } pcaComponents = pca.GetComponentsFromImage(image, components); UpdateStatus(path, pcaImageStatusStr); }), new Task(() => { //SVM SVMProblem svmProblem = new SVMProblem(); // add all the components to an SVMNode[] SVMNode[] nodes = new SVMNode[pcaComponents.Length]; for (int i = 0; i < pcaComponents.Length; i++) { nodes[i] = new SVMNode(i + 1, pcaComponents[i]); } svmProblem.Add(nodes, 0); svmProblem = svmProblem.Normalize(SVMNormType.L2); SVMModel svmModel = SVM.LoadModel(Configuration.Get("ModelLocation")); double[] results = svmProblem.PredictProbability(svmModel, out var probabilities); var analysis = db.GetCollection <Analysis>("analysis"); Analysis currentAnalysis = analysis.FindOne(x => x.Id.ToString().Equals(path)); currentAnalysis.Certainty = results[0] == 0 ? probabilities[0][1] * 100 : probabilities[0][0] * 100; currentAnalysis.Diagnosis = results[0] == 0 ? DdsmImage.Pathologies.Benign : DdsmImage.Pathologies .Malignant; analysis.Update(currentAnalysis); UpdateStatus(path, svmImageStatusStr); }) }; foreach (Task task in tasks) { task.Start(); await task; // lets set percentage done: var analysis = db.GetCollection <Analysis>("analysis"); Analysis currentAnalysis = analysis.FindOne(x => x.Id.ToString().Equals(path)); currentAnalysis.PercentageDone = (++tasksComplete * 100) / tasks.Count; analysis.Update(currentAnalysis); } UpdateStatus(path, doneStatusStr); }
public void SVM_LoadModel_FilenameDoesNotExist_ReturnsNull() { SVM.LoadModel(Contants.WRONG_MODEL_PATH_TO_BE_LOADED); }
public void SVM_LoadModel_FilenameIsInvalid_ReturnsNull() { SVM.LoadModel(""); }