public async Task <ModelEvaluation> EvaluateModel() { try { //change application in run mode IconUri = "Images/runningmodel.png"; RaisePropertyChangedEvent("IsRunning"); //init empty model evaluation var mEval = new ModelEvaluation() { TrainingValue = new List <ZedGraph.PointPair>(), ValidationValue = new List <ZedGraph.PointPair>(), ModelValueTraining = new List <ZedGraph.PointPair>(), ModelValueValidation = new List <ZedGraph.PointPair>(), Classes = new List <string>(), ModelOutputDim = 1 }; var mpt = new ModelPerformance(); mpt.DatSetName = "Training set"; var mpv = new ModelPerformance(); mpv.DatSetName = "Validation set"; //check if the trained model exists if (string.IsNullOrEmpty(TrainingParameters.LastBestModel) || string.IsNullOrEmpty(TrainingParameters.LastBestModel.Trim(' '))) { await Application.Current.Dispatcher.BeginInvoke( DispatcherPriority.Background, new Action( () => MainWindow.SetCursor(false) )); return(mEval); } //get model full path var modelMLPath = Project.GetMLConfigPath(Settings, Name); //check if file exists var fi = new FileInfo(modelMLPath); if (!fi.Exists) { return(mEval); } //evaluate model against training data var task1 = await Task.Run(() => Project.EvaluateMLConfig(modelMLPath, DataSetType.Training, EvaluationType.ResultExtended, ProcessDevice.Default)); var resultTrain = task1; //evaluate model against validation data var task2 = await Task.Run(() => Project.EvaluateMLConfig(modelMLPath, DataSetType.Validation, EvaluationType.ResultExtended, ProcessDevice.Default)); var resultValidation = task2; if (resultTrain.Actual == null && resultTrain.Actual.Count <= 0) { return(mEval); } ////prepare evaluation result for (int i = 0; i < resultTrain.Actual.Count(); i++) { mEval.TrainingValue.Add(new PointPair(i + 1, resultTrain.Actual[i])); } for (int i = 0; i < resultTrain.Predicted.Count(); i++) { mEval.ModelValueTraining.Add(new PointPair(i + 1, resultTrain.Predicted[i])); } ////no validation set defined if (resultValidation.Actual != null && resultValidation.Actual.Count > 0) { for (int i = 0; i < resultValidation.Actual.Count(); i++) { mEval.ValidationValue.Add(new PointPair(i + 1, resultValidation.Actual[i])); } for (int i = 0; i < resultValidation.Predicted.Count(); i++) { mEval.ModelValueValidation.Add(new PointPair(i + 1, resultValidation.Predicted[i])); } } //// mEval.Classes = resultTrain.OutputClasses; mEval.ModelOutputDim = resultTrain.OutputClasses == null ? 1 : resultTrain.OutputClasses.Count; //training performance result mpt = MLEvaluator.CalculatePerformance(resultTrain, "Training set"); //validation performance result mpv = MLEvaluator.CalculatePerformance(resultValidation, "Validation set"); mEval.TrainPerformance = mpt; if (mEval.Classes != null) { mEval.TrainPerformance.Classes = mEval.Classes.ToArray(); } mEval.ValidationPerformance = mpv; if (mEval.Classes != null) { mEval.ValidationPerformance.Classes = mEval.Classes.ToArray(); } ModelEvaluation = mEval; return(mEval); } catch (Exception) { throw; } finally { //change application in normal mode IconUri = "Images/model.png"; RaisePropertyChangedEvent("IsRunning"); } }
public async Task <ModelEvaluation> EvaluateModel() { try { //change application in run mode IconUri = "Images/runningmodel.png"; RaisePropertyChangedEvent("IsRunning"); //init empty model evaluation var mEval = new ModelEvaluation() { TrainingValue = new List <ZedGraph.PointPair>(), ValidationValue = new List <ZedGraph.PointPair>(), ModelValueTraining = new List <ZedGraph.PointPair>(), ModelValueValidation = new List <ZedGraph.PointPair>(), Classes = new List <string>(), ModelOutputDim = 1 }; var mpt = new ModelPerformance(); mpt.DatSetName = "Training set"; var mpv = new ModelPerformance(); mpv.DatSetName = "Validation set"; //check if the trained model exists if (string.IsNullOrEmpty(TrainingParameters.LastBestModel) || string.IsNullOrEmpty(TrainingParameters.LastBestModel.Trim(' '))) { return(mEval); } //save model before evaluation since there is a data must be stored into model file. Save(); //get model full path var modelMLPath = Project.GetMLConfigPath(Settings, Name); //check if file exists var fi = new FileInfo(modelMLPath); if (!fi.Exists) { return(mEval); } //evaluate model against training data var task1 = await Task.Run(() => Project.EvaluateModel(modelMLPath, DataProcessing.Core.DataSetType.Training, EvaluationType.ResultyExtended, ProcessDevice.Default)); var resultTrain = task1; //evaluate model against validation data var task2 = await Task.Run(() => Project.EvaluateModel(modelMLPath, DataProcessing.Core.DataSetType.Validation, EvaluationType.ResultyExtended, ProcessDevice.Default)); var resultValidation = task2; if (resultTrain.Actual == null && resultTrain.Actual.Count <= 0) { return(mEval); } ////prepare evaluation result for (int i = 0; i < resultTrain.Actual.Count(); i++) { mEval.TrainingValue.Add(new PointPair(i + 1, resultTrain.Actual[i])); } for (int i = 0; i < resultTrain.Predicted.Count(); i++) { mEval.ModelValueTraining.Add(new PointPair(i + 1, resultTrain.Predicted[i])); } ////no validation set defined if (resultValidation.Actual != null && resultValidation.Actual.Count > 0) { for (int i = 0; i < resultValidation.Actual.Count(); i++) { mEval.ValidationValue.Add(new PointPair(i + 1, resultValidation.Actual[i])); } for (int i = 0; i < resultValidation.Predicted.Count(); i++) { mEval.ModelValueValidation.Add(new PointPair(i + 1, resultValidation.Predicted[i])); } } //// mEval.Classes = resultTrain.OutputClasses; mEval.ModelOutputDim = resultTrain.OutputClasses == null ? 1 : resultTrain.OutputClasses.Count; if (mEval.ModelOutputDim == 1) { //Training data set var actTData = mEval.TrainingValue.Select(x => x.Y).ToArray(); var preTData = mEval.ModelValueTraining.Select(x => x.Y).ToArray(); mpt.SE = (float)actTData.SE(preTData); mpt.RMSE = (float)actTData.RMSE(preTData); mpt.NSE = (float)actTData.NSE(preTData); mpt.PB = (float)actTData.PBIAS(preTData); mpt.CORR = (float)actTData.R(preTData); mpt.DETC = (float)actTData.R2(preTData); //validation data set var actVData = mEval.ValidationValue.Select(x => x.Y).ToArray(); var preVData = mEval.ModelValueValidation.Select(x => x.Y).ToArray(); if (actVData != null && actVData.Length > 0) { mpv.SE = (float)actVData.SE(preVData); mpv.RMSE = (float)actVData.RMSE(preVData); mpv.NSE = (float)actVData.NSE(preVData); mpv.PB = (float)actVData.PBIAS(preVData); mpv.CORR = (float)actVData.R(preVData); mpv.DETC = (float)actVData.R2(preVData); } } else if (mEval.ModelOutputDim > 1) { var retVal = CalculatePerformance(resultTrain.ActualEx, resultTrain.PredictedEx, null, null); retVal.Add("Classes", mEval.Classes.ToList <object>()); mpt.PerformanceData = retVal; //in case validation set is defined if (resultValidation.Actual != null && resultValidation.Actual.Count > 0) { var retValV = CalculatePerformance(resultValidation.ActualEx, resultValidation.PredictedEx, null, null); retValV.Add("Classes", mEval.Classes.ToList <object>()); mpv.PerformanceData = retValV; } } mEval.TrainPerformance = mpt; if (mEval.Classes != null) { mEval.TrainPerformance.Classes = mEval.Classes.ToArray(); } mEval.ValidationPerformance = mpv; if (mEval.Classes != null) { mEval.ValidationPerformance.Classes = mEval.Classes.ToArray(); } ModelEvaluation = mEval; return(mEval); } catch (Exception) { throw; } finally { //change application in normal mode IconUri = "Images/model.png"; RaisePropertyChangedEvent("IsRunning"); } }