public static double classifyTrain_Test(string classifierFileName, Classifier _classifier) { double performance = 0.0; try { FileReader javaFileReader = new FileReader(classifierFileName); weka.core.Instances insts = new weka.core.Instances(javaFileReader); javaFileReader.close(); insts.setClassIndex(insts.numAttributes() - 1); System.Console.WriteLine("Performing " + percentSplit + "% split evaluation."); int trainSize = insts.numInstances() * percentSplit / 100; int testSize = insts.numInstances() - trainSize; weka.core.Instances train = new weka.core.Instances(insts, 0, trainSize); _classifier.buildClassifier(train); int numCorrect = 0; var numnerOfInst = insts.numInstances(); int dataIndex = 0; for (int i = trainSize; i < numnerOfInst; i++) { dataIndex++; weka.core.Instance currentInst = insts.instance(i); double predictClass = _classifier.classifyInstance(currentInst); double[] dist = _classifier.distributionForInstance(currentInst); string actualClass = insts.classAttribute().value((int)insts.instance(i).classValue()); string predictedClass = insts.classAttribute().value((int)predictClass); var abcd = _classifier.getClass(); if (predictedClass == actualClass) { numCorrect++; } } performance = (double)((double)numCorrect / (double)testSize) * 100; System.Console.WriteLine(numCorrect + " out of " + testSize + " correct (" + performance.toString() + "%)"); } catch (java.lang.Exception ex) { ex.printStackTrace(); } return(performance); }
public static double SupportVectorMachineTest(weka.core.Instances insts) { try { //weka.core.Instances insts = new weka.core.Instances(new java.io.FileReader("iris.arff")); insts.setClassIndex(insts.numAttributes() - 1); SupportVectorMachine = new weka.classifiers.functions.SMO(); weka.filters.Filter myDummy = new weka.filters.unsupervised.attribute.NominalToBinary(); myDummy.setInputFormat(insts); insts = weka.filters.Filter.useFilter(insts, myDummy); weka.filters.Filter myNormalize = new weka.filters.unsupervised.instance.Normalize(); myNormalize.setInputFormat(insts); insts = weka.filters.Filter.useFilter(insts, myNormalize); weka.filters.Filter myRandom = new weka.filters.unsupervised.instance.Randomize(); myRandom.setInputFormat(insts); insts = weka.filters.Filter.useFilter(insts, myRandom); int trainSize = insts.numInstances() * percentSplit / 100; int testSize = insts.numInstances() - trainSize; weka.core.Instances train = new weka.core.Instances(insts, 0, trainSize); SupportVectorMachine.buildClassifier(train); int numCorrect = 0; for (int i = trainSize; i < insts.numInstances(); i++) { weka.core.Instance currentInst = insts.instance(i); double predictedClass = SupportVectorMachine.classifyInstance(currentInst); if (predictedClass == insts.instance(i).classValue()) { numCorrect++; } } return((double)numCorrect / (double)testSize * 100.0); } catch (java.lang.Exception ex) { ex.printStackTrace(); return(0); } }
public double FindAccurancy() //accurancy döndürcek { int numCorrect = 0; for (int i = Classifier.TrainSize; i < Instance.numInstances(); i++) { weka.core.Instance currentInst = Instance.instance(i); double predictedClass = Classifier.classifier.classifyInstance(currentInst); if (predictedClass == Instance.instance(i).classValue()) { numCorrect++; } } return((double)numCorrect / (double)Classifier.TestSize * 100.0); }
public void WekaFeedInput(Pair <string, double>[] encinstance) { //Console.WriteLine(encinstance.mkstring()); weka.core.Instance inst = new weka.core.Instance(attributes.Length + 1); inst.setDataset(trainset); foreach (Pair <string, double> x in encinstance) { if (x == null) { continue; } //Console.WriteLine(x.mkstring()); inst.setValue(FindAttribute(x.first), x.second); } wekaoutput(ClassifyInstance(inst)); }
public static double NaiveBayesTest(weka.core.Instances insts) { try { //weka.core.Instances insts = new weka.core.Instances(new java.io.FileReader("iris.arff")); insts.setClassIndex(insts.numAttributes() - 1); NaiveBayescl = new weka.classifiers.bayes.NaiveBayes(); //discretize weka.filters.Filter myDiscretize = new weka.filters.unsupervised.attribute.Discretize(); myDiscretize.setInputFormat(insts); insts = weka.filters.Filter.useFilter(insts, myDiscretize); weka.filters.Filter myRandom = new weka.filters.unsupervised.instance.Randomize(); myRandom.setInputFormat(insts); insts = weka.filters.Filter.useFilter(insts, myRandom); int trainSize = insts.numInstances() * percentSplit / 100; int testSize = insts.numInstances() - trainSize; weka.core.Instances train = new weka.core.Instances(insts, 0, trainSize); NaiveBayescl.buildClassifier(train); int numCorrect = 0; for (int i = trainSize; i < insts.numInstances(); i++) { weka.core.Instance currentInst = insts.instance(i); double predictedClass = NaiveBayescl.classifyInstance(currentInst); if (predictedClass == insts.instance(i).classValue()) { numCorrect++; } } return((double)numCorrect / (double)testSize * 100.0); } catch (java.lang.Exception ex) { ex.printStackTrace(); return(0); } }
public double GetVolume(weka.core.Instance instance) { double sum = m_counts[0] + m_counts[1] + m_counts[2]; if (sum == 0) { return(0); } double kelly_b = -m_tp / m_sl; double kelly_p = (double)m_counts[2] / sum; double kelly_f = (kelly_p * (kelly_b + 1) - 1) / kelly_b; // f=0.2, sl=20 =>0.01 double v = 2 * (kelly_f / 0.2) / (m_sl / 10); if (v <= 0) { return(0); } return(v); }
public static void Test() { weka.core.Instances data = new weka.core.Instances(new java.io.FileReader("./data/Classification/Communication.arff")); data.setClassIndex(data.numAttributes() - 1); weka.classifiers.Classifier cls = new weka.classifiers.bayes.BayesNet(); //Save BayesNet results in .txt file using (System.IO.StreamWriter file = new System.IO.StreamWriter("./data/Classification/Communication_Report.txt")) { file.WriteLine("Performing " + percentSplit + "% split evaluation."); int runs = 1; // perform cross-validation for (int i = 0; i < runs; i++) { // randomize data int seed = i + 1; java.util.Random rand = new java.util.Random(seed); weka.core.Instances randData = new weka.core.Instances(data); randData.randomize(rand); //weka.classifiers.Evaluation eval = new weka.classifiers.Evaluation(randData); int trainSize = (int)Math.Round((double)data.numInstances() * percentSplit / 100); int testSize = data.numInstances() - trainSize; weka.core.Instances train = new weka.core.Instances(data, 0, 0); weka.core.Instances test = new weka.core.Instances(data, 0, 0); train.setClassIndex(train.numAttributes() - 1); test.setClassIndex(test.numAttributes() - 1); //Print classifier analytics for all the dataset file.WriteLine("EVALUATION OF TEST DATASET."); //int numCorrect = 0; for (int j = 0; j < data.numInstances(); j++) { weka.core.Instance currentInst = randData.instance(j); if (j < trainSize) { train.add(currentInst); } else { test.add(currentInst); /* * double predictedClass = cls.classifyInstance(currentInst); * * double[] prediction = cls.distributionForInstance(currentInst); * * for (int p = 0; p < prediction.Length; p++) * { * file.WriteLine("Probability of class [{0}] for [{1}] is: {2}", currentInst.classAttribute().value(p), currentInst, Math.Round(prediction[p], 4)); * } * file.WriteLine(); * * file.WriteLine(); * if (predictedClass == data.instance(j).classValue()) * numCorrect++;*/ } } // build and evaluate classifier cls.buildClassifier(train); // Test the model weka.classifiers.Evaluation eval = new weka.classifiers.Evaluation(randData); eval.evaluateModel(cls, test); // Print the results as in Weka explorer: //Print statistics String strSummaryTest = eval.toSummaryString(); file.WriteLine(strSummaryTest); file.WriteLine(); //Print detailed class statistics file.WriteLine(eval.toClassDetailsString()); file.WriteLine(); //Print confusion matrix file.WriteLine(eval.toMatrixString()); file.WriteLine(); // Get the confusion matrix double[][] cmMatrixTest = eval.confusionMatrix(); System.Console.WriteLine("Bayesian Network results saved in Communication_Report.txt file successfully."); } } }
public static void BayesTest() { try { weka.core.Instances insts = new weka.core.Instances(new java.io.FileReader("iris.arff")); insts.setClassIndex(insts.numAttributes() - 1); weka.classifiers.Classifier cl = new weka.classifiers.bayes.BayesNet(); System.Console.WriteLine("Performing " + percentSplit + "% split evaluation."); //randomize the order of the instances in the dataset. weka.filters.Filter myRandom = new weka.filters.unsupervised.instance.Randomize(); myRandom.setInputFormat(insts); insts = weka.filters.Filter.useFilter(insts, myRandom); int trainSize = insts.numInstances() * percentSplit / 100; int testSize = insts.numInstances() - trainSize; weka.core.Instances train = new weka.core.Instances(insts, 0, trainSize); weka.core.Instances test = new weka.core.Instances(insts, 0, 0); cl.buildClassifier(train); //print model System.Console.WriteLine(cl); int numCorrect = 0; for (int i = trainSize; i < insts.numInstances(); i++) { weka.core.Instance currentInst = insts.instance(i); double predictedClass = cl.classifyInstance(currentInst); test.add(currentInst); double[] prediction = cl.distributionForInstance(currentInst); for (int x = 0; x < prediction.Length; x++) { System.Console.WriteLine("Probability of class [{0}] for [{1}] is: {2}", currentInst.classAttribute().value(x), currentInst, Math.Round(prediction[x], 4)); } System.Console.WriteLine(); if (predictedClass == insts.instance(i).classValue()) { numCorrect++; } } System.Console.WriteLine(numCorrect + " out of " + testSize + " correct (" + (double)((double)numCorrect / (double)testSize * 100.0) + "%)"); // Train the model weka.classifiers.Evaluation eTrain = new weka.classifiers.Evaluation(train); eTrain.evaluateModel(cl, train); // Print the results as in Weka explorer: //Print statistics String strSummaryTrain = eTrain.toSummaryString(); System.Console.WriteLine(strSummaryTrain); //Print detailed class statistics System.Console.WriteLine(eTrain.toClassDetailsString()); //Print confusion matrix System.Console.WriteLine(eTrain.toMatrixString()); // Get the confusion matrix double[][] cmMatrixTrain = eTrain.confusionMatrix(); // Test the model weka.classifiers.Evaluation eTest = new weka.classifiers.Evaluation(test); eTest.evaluateModel(cl, test); // Print the results as in Weka explorer: //Print statistics String strSummaryTest = eTest.toSummaryString(); System.Console.WriteLine(strSummaryTest); //Print detailed class statistics System.Console.WriteLine(eTest.toClassDetailsString()); //Print confusion matrix System.Console.WriteLine(eTest.toMatrixString()); // Get the confusion matrix double[][] cmMatrixTest = eTest.confusionMatrix(); } catch (java.lang.Exception ex) { ex.printStackTrace(); } }
public static void CreateArffFiles() { java.util.ArrayList atts; java.util.ArrayList attsRel; java.util.ArrayList attVals; java.util.ArrayList attValsRel; Instances data; Instances dataRel; double[] vals; double[] valsRel; int i; // 1. set up attributes atts = new java.util.ArrayList(); // - numeric atts.Add(new weka.core.Attribute("att1")); // - nominal attVals = new java.util.ArrayList(); for (i = 0; i < 5; i++) { attVals.add("val" + (i + 1)); } weka.core.Attribute nominal = new weka.core.Attribute("att2", attVals); atts.add(nominal); // - string atts.add(new weka.core.Attribute("att3", (java.util.ArrayList)null)); // - date atts.add(new weka.core.Attribute("att4", "yyyy-MM-dd")); // - relational attsRel = new java.util.ArrayList(); // -- numeric attsRel.add(new weka.core.Attribute("att5.1")); // -- nominal attValsRel = new java.util.ArrayList(); for (i = 0; i < 5; i++) { attValsRel.Add("val5." + (i + 1)); } attsRel.add(new weka.core.Attribute("att5.2", attValsRel)); dataRel = new Instances("att5", attsRel, 0); atts.add(new weka.core.Attribute("att5", dataRel, 0)); // 2. create Instances object data = new Instances("MyRelation", atts, 0); // 3. fill with data // first instance vals = new double[data.numAttributes()]; // - numeric vals[0] = Math.PI; // - nominal vals[1] = attVals.indexOf("val3"); // - string vals[2] = data.attribute(2).addStringValue("This is a string!"); // - date vals[3] = data.attribute(3).parseDate("2001-11-09"); // - relational dataRel = new Instances(data.attribute(4).relation(), 0); // -- first instance valsRel = new double[2]; valsRel[0] = Math.PI + 1; valsRel[1] = attValsRel.indexOf("val5.3"); weka.core.Instance inst = new DenseInstance(2); inst.setValue(1, valsRel[0]); inst.setValue(1, valsRel[1]); dataRel.add(inst); // -- second instance valsRel = new double[2]; valsRel[0] = Math.PI + 2; valsRel[1] = attValsRel.indexOf("val5.2"); dataRel.add(inst); vals[4] = data.attribute(4).addRelation(dataRel); // add weka.core.Instance inst2 = new DenseInstance(4); inst2.setValue(1, vals[0]); inst2.setValue(1, vals[1]); inst2.setValue(1, vals[2]); inst2.setValue(1, vals[3]); data.add(inst2); // second instance vals = new double[data.numAttributes()]; // important: needs NEW array! // - numeric vals[0] = Math.E; // - nominal vals[1] = attVals.indexOf("val1"); // - string vals[2] = data.attribute(2).addStringValue("And another one!"); // - date vals[3] = data.attribute(3).parseDate("2000-12-01"); // - relational dataRel = new Instances(data.attribute(4).relation(), 0); // -- first instance valsRel = new double[2]; valsRel[0] = Math.E + 1; valsRel[1] = attValsRel.indexOf("val5.4"); dataRel.add(inst); // -- second instance valsRel = new double[2]; valsRel[0] = Math.E + 2; valsRel[1] = attValsRel.indexOf("val5.1"); dataRel.add(inst); vals[4] = data.attribute(4).addRelation(dataRel); // add data.add(inst2); data.setClassIndex(data.numAttributes() - 1); // 4. output data for (int x = 0; x < data.numInstances(); x++) { weka.core.Instance ins = data.instance(x); System.Console.WriteLine(ins.value(x).ToString()); } return; }
public List <double> testSMOUsingWeka(string[] attributeArray, string[] classNames, double[] dataValues, string classHeader, string defaultclass, string modelName, int hiddelLayers = 7, double learningRate = 0.03, double momentum = 0.4, int decimalPlaces = 2, int trainingTime = 1000) { java.util.ArrayList classLabel = new java.util.ArrayList(); foreach (string className in classNames) { classLabel.Add(className); } weka.core.Attribute classHeaderName = new weka.core.Attribute(classHeader, classLabel); java.util.ArrayList attributeList = new java.util.ArrayList(); foreach (string attribute in attributeArray) { weka.core.Attribute newAttribute = new weka.core.Attribute(attribute); attributeList.Add(newAttribute); } attributeList.add(classHeaderName); weka.core.Instances data = new weka.core.Instances("TestInstances", attributeList, 0); data.setClassIndex(data.numAttributes() - 1); // Set instance's values for the attributes weka.core.Instance inst_co = new DenseInstance(data.numAttributes()); for (int i = 0; i < data.numAttributes() - 1; i++) { inst_co.setValue(i, Math.Round(dataValues.ElementAt(i), 5)); } inst_co.setValue(classHeaderName, defaultclass); data.add(inst_co); weka.core.Instance currentInst = data.get(0); int j = 0; //foreach (float value in dataValues) //{ // // double roundedValue = Math.Round(value); // //var rounded = Math.Floor(value * 100) / 100; // if (array.ElementAt(j) != value) // { // System.Console.WriteLine("Masla occur"); // } // j++; //} //double predictedClass = cl.classifyInstance(data.get(0)); weka.classifiers.functions.SMO clRead = new weka.classifiers.functions.SMO(); try { java.io.File path = new java.io.File("/models/"); clRead = loadSMOModel(modelName, path); } catch (Exception e) { //string p1 = Assembly.GetExecutingAssembly().Location; string ClassifierName = Path.GetFileName(Path.GetFileName(modelName)); string Path1 = HostingEnvironment.MapPath(@"~//libs//models//" + ClassifierName); //string Path1 = HostingEnvironment.MapPath(@"~//libs//models//FusionCustomized.model"); clRead = (weka.classifiers.functions.SMO)weka.core.SerializationHelper.read(modelName); } // weka.classifiers.functions.SMO clRead = loadSMOModel(modelName, path); clRead.setBatchSize("100"); clRead.setCalibrator(new weka.classifiers.functions.Logistic()); clRead.setKernel(new weka.classifiers.functions.supportVector.PolyKernel()); clRead.setEpsilon(1.02E-12); clRead.setC(1.0); clRead.setDebug(false); clRead.setChecksTurnedOff(false); clRead.setFilterType(new SelectedTag(weka.classifiers.functions.SMO.FILTER_NORMALIZE, weka.classifiers.functions.SMO.TAGS_FILTER)); double classValue = clRead.classifyInstance(data.get(0)); double[] predictionDistribution = clRead.distributionForInstance(data.get(0)); //for (int predictionDistributionIndex = 0; // predictionDistributionIndex < predictionDistribution.Count(); // predictionDistributionIndex++) //{ // string classValueString1 = classLabel.get(predictionDistributionIndex).ToString(); // double prob= predictionDistribution[predictionDistributionIndex]*100; // System.Console.WriteLine(classValueString1 + ":" + prob); //} List <double> prediction = new List <double>(); prediction.Add(classValue); //prediction.AddRange(predictionDistribution); return(prediction); }
public void trainSMOUsingWeka(string wekaFile, string modelName) { try { weka.core.converters.CSVLoader csvLoader = new weka.core.converters.CSVLoader(); csvLoader.setSource(new java.io.File(wekaFile)); weka.core.Instances insts = csvLoader.getDataSet(); //weka.core.Instances insts = new weka.core.Instances(new java.io.FileReader(wekaFile)); insts.setClassIndex(insts.numAttributes() - 1); cl = new weka.classifiers.functions.SMO(); cl.setBatchSize("100"); cl.setCalibrator(new weka.classifiers.functions.Logistic()); cl.setKernel(new weka.classifiers.functions.supportVector.PolyKernel()); cl.setEpsilon(1.02E-12); cl.setC(1.0); cl.setDebug(false); cl.setChecksTurnedOff(false); cl.setFilterType(new SelectedTag(weka.classifiers.functions.SMO.FILTER_NORMALIZE, weka.classifiers.functions.SMO.TAGS_FILTER)); System.Console.WriteLine("Performing " + percentSplit + "% split evaluation."); //randomize the order of the instances in the dataset. // weka.filters.Filter myRandom = new weka.filters.unsupervised.instance.Randomize(); //myRandom.setInputFormat(insts); // insts = weka.filters.Filter.useFilter(insts, myRandom); int trainSize = insts.numInstances() * percentSplit / 100; int testSize = insts.numInstances() - trainSize; weka.core.Instances train = new weka.core.Instances(insts, 0, trainSize); java.io.File path = new java.io.File("/models/"); cl.buildClassifier(train); saveModel(cl, modelName, path); #region test whole set int numCorrect = 0; for (int i = 0; i < insts.numInstances(); i++) { weka.core.Instance currentInst = insts.instance(i); if (i == 12) { array = new List <float>(); foreach (float value in currentInst.toDoubleArray()) { array.Add(value); } } double predictedClass = cl.classifyInstance(currentInst); if (predictedClass == insts.instance(i).classValue()) { numCorrect++; } } System.Console.WriteLine(numCorrect + " out of " + testSize + " correct (" + (double)((double)numCorrect / (double)testSize * 100.0) + "%)"); #endregion } catch (java.lang.Exception ex) { ex.printStackTrace(); } }
public async Task <string> classifyTest(weka.classifiers.Classifier cl) { string a = ""; double rate = 0; try { //instsTest = Instances.mergeInstances(ins,null); /*if (ins.classIndex() == -1) * ins.setClassIndex(insts.numAttributes() - 1);*/ System.Console.WriteLine("Performing " + percentSplit + "% split evaluation."); weka.filters.Filter normalized = new weka.filters.unsupervised.attribute.Normalize(); normalized.setInputFormat(insts); insts = weka.filters.Filter.useFilter(insts, normalized); //randomize the order of the instances in the dataset. weka.filters.Filter myRandom = new weka.filters.unsupervised.instance.Randomize(); myRandom.setInputFormat(insts); insts = weka.filters.Filter.useFilter(insts, myRandom); //replace missing values weka.filters.Filter replaceMissingValues = new weka.filters.unsupervised.attribute.ReplaceMissingValues(); replaceMissingValues.setInputFormat(insts); insts = weka.filters.Filter.useFilter(insts, replaceMissingValues); int trainSize = insts.numInstances() * percentSplit / 100; int testSize = insts.numInstances() - trainSize; weka.core.Instances train = new weka.core.Instances(insts, 0, trainSize); cl.buildClassifier(train); //double label = cl.classifyInstance(instsTest.instance(0)); double label = cl.classifyInstance(ins); ins.setClassValue(label); //instsTest.instance(0).setClassValue(label); a = ins.toString(ins.numAttributes() - 1); weka.core.SerializationHelper.write("mymodel.model", cl); int numCorrect = 0; for (int i = trainSize; i < insts.numInstances(); i++) { weka.core.Instance currentInst = insts.instance(i); double predictedClass = cl.classifyInstance(currentInst); if (predictedClass == insts.instance(i).classValue()) { numCorrect++; } } rate = (double)((double)numCorrect / (double)testSize * 100.0); } catch (java.lang.Exception ex) { //ex.printStackTrace(); rate = -1; } return(rate.ToString() + ";" + a ?? ""); }
public static double classifyCrossFold_Train_Test_onlySelectedClass(string classifierFileName, int baseClasses, Classifier _classifier) { double performance = 0.0; try { List <BrResult> results = new List <BrResult>(); for (int singleClass = 1; singleClass <= baseClasses; singleClass++) { string eachFileName = String.Format("{0}_{1}.arff", classifierFileName, singleClass); BrResult result = new BrResult(); result.classNumber = singleClass; FileReader javaFileReader = new FileReader(eachFileName); weka.core.Instances insts = new weka.core.Instances(javaFileReader); javaFileReader.close(); insts.setClassIndex(insts.numAttributes() - 1); List <Result> eachResults = new List <Result>(); var totalnstances = insts.numInstances(); var foldsInstances = totalnstances / 10; Instances foldsData = new Instances(insts); var folds = 10; int numCorrect = 0; int dataIndex = 0; for (int n = 0; n < folds; n++) { System.Console.WriteLine("Performing " + n + " folds"); Instances trainFold = foldsData.trainCV(folds, n); var numnerOfTrainInst = trainFold.numInstances(); Instances testFold = foldsData.testCV(folds, n); var numnerOfTestInst = testFold.numInstances(); _classifier.buildClassifier(trainFold); //List<Result> eachResults = new List<Result>(); for (int test = 0; test < numnerOfTestInst; test++) { dataIndex++; Result eachRow = new Result(); eachRow.lineIndex = 0; weka.core.Instance currentInst = testFold.instance(test); double predictClass = _classifier.classifyInstance(currentInst); //double[] dist = _classifier.distributionForInstance(currentInst); string actualClass = testFold.classAttribute().value((int)testFold.instance(test).classValue()); string predictedClass = testFold.classAttribute().value((int)predictClass); //var abcd = _classifier.getClass(); if (predictedClass == actualClass) { eachRow.correct = "1"; numCorrect++; } else { eachRow.correct = "0"; } eachRow.lineIndex = dataIndex; eachRow.classActual = actualClass; eachRow.classPredicted = predictedClass; eachResults.Add(eachRow); } } result.classResult = eachResults; results.Add(result); //System.Console.WriteLine(numCorrect + " out of " + testSize + " correct (" + (double)((double)numCorrect / (double)testSize * 100.0) + "%)"); } #region Evaludation Matrix var evaluationMatrix = new Dictionary <int, string>(); foreach (var res in results) { foreach (var classRes in res.classResult) { if (!evaluationMatrix.Keys.Contains(classRes.lineIndex)) { evaluationMatrix[classRes.lineIndex] = classRes.correct.toString(); } else { evaluationMatrix[classRes.lineIndex] = evaluationMatrix[classRes.lineIndex].toString() + "," + classRes.correct.toString(); } } } #endregion #region int correnctlyClassified = 0; int incorrenctlyClassified = 0; int totalData = evaluationMatrix.Count; foreach (var key in evaluationMatrix.Keys) { string multiLevelClass = evaluationMatrix[key].ToString(); string[] a = multiLevelClass.Split(','); int classPredect = 0; for (int i = 0; i < a.Length; i++) { if (a[i] == "0") { classPredect++; } } if (classPredect == 0) { correnctlyClassified++; } else if (classPredect > 0) { incorrenctlyClassified++; } } performance = (double)((double)correnctlyClassified / (double)totalData) * 100; System.Console.WriteLine(performance); #endregion } catch (java.lang.Exception ex) { ex.printStackTrace(); } return(performance); }