Exemplo n.º 1
0
        public static async Task <bool> ExportToCSV(string mlconfigPath, DeviceDescriptor device, string filePathExport)
        {
            try
            {
                var er = await MLEvaluator.EvaluateMLConfig(mlconfigPath, device, DataSetType.Testing, EvaluationType.Results);

                if (er.Actual == null)
                {
                    throw new Exception("Export has failed. No testing nor validation datatset to export.");
                }

                //
                List <string> strLine = new List <string>();

                //include label categorical values in the export
                if (er.OutputClasses != null && er.OutputClasses.Count > 1)
                {
                    var ss = "!#OutputClasses(";
                    for (int i = 0; i < er.OutputClasses.Count; i++)
                    {
                        ss += $"[{i}={er.OutputClasses[i]}],";
                    }
                    var outputClassesStr = ss.Substring(0, ss.Length - 1) + ")";
                    strLine.Add(outputClassesStr);
                }
                //make header
                var headerStr = string.Join(";", er.Header);
                strLine.Add(headerStr);

                //prepare for saving
                for (int i = 0; i < er.Actual.Count; i++)
                {
                    strLine.Add($"{er.Actual[i].ToString(CultureInfo.InvariantCulture)};{er.Predicted[i].ToString(CultureInfo.InvariantCulture)}");
                }

                //store content to file
                //
                await Task.Run(() => File.WriteAllLines(filePathExport, strLine.ToArray()));

                return(true);
            }
            catch (Exception)
            {
                throw;
            }
        }
Exemplo n.º 2
0
        /// <summary>
        /// Evaluate model defined in the mlconfig file
        /// </summary>
        /// <param name="mlconfigPath"></param>
        /// <param name="device"></param>
        /// <returns></returns>
        public static async Task <EvaluationResult> EvaluateMLConfig(string mlconfigPath, DeviceDescriptor device, DataSetType dsType, EvaluationType evType)
        {
            try
            {
                //define eval result
                var er = new EvaluationResult();
                er.OutputClasses = new List <string>()
                {
                    ""
                };
                er.Actual    = new List <float>();
                er.Predicted = new List <float>();
                er.Header    = new List <string>();

                //Load ML configuration file
                var dicMParameters = MLFactory.LoadMLConfiguration(mlconfigPath);
                //add full path of model folder since model file doesn't contains any absolute path
                dicMParameters.Add("root", MLFactory.GetMLConfigFolder(mlconfigPath));

                // get model data paths
                var dicPath = MLFactory.GetMLConfigComponentPaths(dicMParameters["paths"]);
                //parse feature variables
                var projectValues = dicMParameters["training"].Split(MLFactory.m_cntkSpearator, StringSplitOptions.RemoveEmptyEntries);
                var modelName     = MLFactory.GetParameterValue(projectValues, "TrainedModel");
                var nnModelPath   = Path.Combine(dicMParameters["root"], modelName);
                //check if model exists
                if (!MLFactory.IsFileExist(nnModelPath))
                {
                    return(er);
                }
                //
                var dataset = MLFactory.GetDataPath(dicMParameters, dsType);
                if (string.IsNullOrEmpty(dataset) || string.IsNullOrEmpty(dataset) || dataset == " ")
                {
                    if (dsType == DataSetType.Testing)
                    {
                        dataset = MLFactory.GetDataPath(dicMParameters, DataSetType.Validation);
                    }
                    if (string.IsNullOrEmpty(dataset) || string.IsNullOrEmpty(dataset) || dataset == " ")
                    {
                        return(er);
                    }
                }


                //get output classes in case the ml problem is classification
                var strCls = dicMParameters.ContainsKey("metadata") ? dicMParameters["metadata"] : "";
                var oc     = MLFactory.GetOutputClasses(strCls);
                if (oc != null)
                {
                    er.OutputClasses = oc;
                }

                //MInibatch
                var           mbTypestr = MLFactory.GetParameterValue(projectValues, "Type");
                MinibatchType mbType    = (MinibatchType)Enum.Parse(typeof(MinibatchType), mbTypestr, true);
                var           mbSizetr  = MLFactory.GetParameterValue(projectValues, "BatchSize");

                var mf = MLFactory.CreateMLFactory(dicMParameters);


                //perform evaluation
                var evParams = new EvaluationParameters()
                {
                    MinibatchSize = uint.Parse(mbSizetr),
                    MBSource      = new MinibatchSourceEx(mbType, mf.StreamConfigurations.ToArray(), mf.InputVariables, mf.OutputVariables, dataset, null, MinibatchSource.FullDataSweep, false, 0),
                    Input         = mf.InputVariables,
                    Ouptut        = mf.OutputVariables,
                };

                //evaluate model
                if (evType == EvaluationType.FeaturesOnly)
                {
                    if (!dicMParameters.ContainsKey("metadata"))
                    {
                        throw new Exception("The result cannot be exported to Excel, since no metadata is stored in mlconfig file.");
                    }

                    var desc = MLFactory.ParseRawDataSet(dicMParameters["metadata"]);
                    er.Header = MLFactory.GenerateHeader(desc);
                    var fun = Function.Load(nnModelPath, device);
                    //
                    er.DataSet = await Task.Run(() => MLEvaluator.FeaturesAndLabels(fun, evParams, device));

                    return(er);
                }
                else if (evType == EvaluationType.Results)
                {
                    //define header
                    er.Header.Add(evParams.Ouptut.First().Name + "_actual");
                    er.Header.Add(evParams.Ouptut.First().Name + "_predicted");

                    var fun = Function.Load(nnModelPath, device);
                    //
                    var result = await Task.Run(() => MLEvaluator.EvaluateFunction(fun, evParams, device));

                    er.Actual    = result.actual.ToList();
                    er.Predicted = result.predicted.ToList();

                    if (er.OutputClasses.Count < 2 && evParams.Ouptut.First().Shape.Dimensions.Last() > 1)
                    {
                        var result1 = await Task.Run(() => MLEvaluator.EvaluateFunctionEx(fun, evParams, device));

                        er.ActualEx    = result1.actual;
                        er.PredictedEx = result1.predicted;
                    }
                    return(er);
                }
                else if (evType == EvaluationType.ResultExtended)
                {
                    //define header
                    er.Header.Add(evParams.Ouptut.First().Name + "_actual");
                    er.Header.Add(evParams.Ouptut.First().Name + "_predicted");
                    er.Actual      = new List <float>();
                    er.Predicted   = new List <float>();
                    er.ActualEx    = new List <List <float> >();
                    er.PredictedEx = new List <List <float> >();

                    //
                    var fun      = Function.Load(nnModelPath, device);
                    var resultEx = await Task.Run(() => MLEvaluator.EvaluateFunctionEx(fun, evParams, device));

                    //var resultEx = EvaluateFunctionEx(nnModelPath, dataPath, evParams, device);
                    for (int i = 0; i < resultEx.actual.Count(); i++)
                    {
                        var res1 = MLValue.GetResult(resultEx.actual[i]);
                        er.Actual.Add(res1);
                        var res2 = MLValue.GetResult(resultEx.predicted[i]);
                        er.Predicted.Add(res2);
                    }
                    er.ActualEx    = resultEx.actual;
                    er.PredictedEx = resultEx.predicted;

                    return(er);
                }
                else
                {
                    throw new Exception("Unknown evaluation type!");
                }
            }
            catch (Exception)
            {
                throw;
            }
        }
Exemplo n.º 3
0
        protected virtual ProgressData progressTraining(TrainingParameters trParams, Trainer trainer,
                                                        Function network, MinibatchSourceEx mbs, int epoch, TrainingProgress progress, DeviceDescriptor device)
        {
            //calculate average training loss and evaluation
            var mbAvgLoss = trainer.PreviousMinibatchLossAverage();
            var mbAvgEval = trainer.PreviousMinibatchEvaluationAverage();

            //get training dataset
            double trainEval = mbAvgEval;

            //sometimes when the data set is huge validation model against
            // full training dataset could take time, so we can skip it by setting parameter 'FullTrainingSetEval'
            if (trParams.FullTrainingSetEval)
            {
                var evParams = new EvaluationParameters()
                {
                    MinibatchSize = trParams.BatchSize,
                    MBSource      = new MinibatchSourceEx(mbs.Type, this.StreamConfigurations.ToArray(), this.InputVariables, this.OutputVariables, mbs.TrainingDataFile, null, MinibatchSource.FullDataSweep, false, 0),
                    Ouptut        = OutputVariables,
                    Input         = InputVariables,
                };

                var result = MLEvaluator.EvaluateFunction(trainer.Model(), evParams, device);
                trainEval = MLEvaluator.CalculateMetrics(trainer.EvaluationFunction().Name, result.actual, result.predicted, device);

                ////if output has more than one dimension and when the output is not categorical but numeric with more than one value
                ////for now only custom mini-batch source is supported this kind of variable
                //if(OutputVariables.First().Shape.Dimensions.Last() > 1 && evParams.MBSource.Type== MinibatchType.Custom)
                //{
                //    var result1 = MLEvaluator.EvaluateFunctionEx(trainer.Model(), evParams, device);
                //    trainEval = MLEvaluator.CalculateMetrics(trainer.EvaluationFunction().Name, result1.actual, result1.predicted, device);
                //}
                //else
                //{
                //    var result = MLEvaluator.EvaluateFunction(trainer.Model(), evParams, device);
                //    trainEval = MLEvaluator.CalculateMetrics(trainer.EvaluationFunction().Name, result.actual, result.predicted, device);
                //}
            }

            string bestModelPath = m_bestModelPath;
            double validEval     = 0;

            //in case validation data set is empty don't perform test-minibatch
            if (!string.IsNullOrEmpty(mbs.ValidationDataFile))
            {
                var evParams = new EvaluationParameters()
                {
                    MinibatchSize = trParams.BatchSize,
                    //StrmsConfig = StreamConfigurations.ToArray(),
                    MBSource = new MinibatchSourceEx(mbs.Type, this.StreamConfigurations.ToArray(), this.InputVariables, this.OutputVariables, mbs.ValidationDataFile, null, MinibatchSource.FullDataSweep, false, 0),
                    Ouptut   = OutputVariables,
                    Input    = InputVariables,
                };
                //
                var result = MLEvaluator.EvaluateFunction(trainer.Model(), evParams, device);
                validEval = MLEvaluator.CalculateMetrics(trainer.EvaluationFunction().Name, result.actual, result.predicted, device);

                ////if output has more than one dimension and when the output is not categorical but numeric with more than one value
                ////for now only custom mini-batch source is supported this kind of variable
                //if (OutputVariables.First().Shape.Dimensions.Last() > 1 && evParams.MBSource.Type == MinibatchType.Custom)
                //{
                //    var result1 = MLEvaluator.EvaluateFunctionEx(trainer.Model(), evParams, device);
                //    validEval = MLEvaluator.CalculateMetrics(trainer.EvaluationFunction().Name, result1.actual, result1.predicted, device);
                //}
                //else
                //{
                //    var result = MLEvaluator.EvaluateFunction(trainer.Model(), evParams, device);
                //    validEval = MLEvaluator.CalculateMetrics(trainer.EvaluationFunction().Name, result.actual, result.predicted, device);
                //}
            }

            //here we should decide if the current model worth to be saved into temp location
            // depending of the Evaluation function which sometimes can be better if it is greater that previous (e.g. ClassificationAccuracy)
            if (isBetterThanPrevious(trainEval, validEval, StatMetrics.IsGoalToMinimize(trainer.EvaluationFunction())) && trParams.SaveModelWhileTraining)
            {
                //save model
                var strFilePath = $"{trParams.ModelTempLocation}\\model_at_{epoch}of{trParams.Epochs}_epochs_TimeSpan_{DateTime.Now.Ticks}";
                if (!Directory.Exists(trParams.ModelTempLocation))
                {
                    Directory.CreateDirectory(trParams.ModelTempLocation);
                }

                //save temp model
                network.Save(strFilePath);

                //set training and validation evaluation to previous state
                m_PrevTrainingEval   = trainEval;
                m_PrevValidationEval = validEval;
                bestModelPath        = strFilePath;

                var tpl = Tuple.Create <double, double, string>(trainEval, validEval, strFilePath);
                m_ModelEvaluations.Add(tpl);
            }


            m_bestModelPath = bestModelPath;

            //create progressData object
            var prData = new ProgressData();

            prData.EpochTotal           = trParams.Epochs;
            prData.EpochCurrent         = epoch;
            prData.EvaluationFunName    = trainer.EvaluationFunction().Name;
            prData.TrainEval            = trainEval;
            prData.ValidationEval       = validEval;
            prData.MinibatchAverageEval = mbAvgEval;
            prData.MinibatchAverageLoss = mbAvgLoss;
            //prData.BestModel = bestModelPath;

            //the progress is only reported if satisfied the following condition
            if (progress != null && (epoch % trParams.ProgressFrequency == 0 || epoch == 1 || epoch == trParams.Epochs))
            {
                //add info to the history
                m_trainingHistory.Add(new Tuple <int, float, float, float, float>(epoch, (float)mbAvgLoss, (float)mbAvgEval,
                                                                                  (float)trainEval, (float)validEval));

                //send progress
                progress(prData);
                //
                //Console.WriteLine($"Epoch={epoch} of {trParams.Epochs} processed.");
            }

            //return progress data
            return(prData);
        }
Exemplo n.º 4
0
        public static async Task <List <string> > PrintPerformance(string mlconfigPath, DataSetType dsType, DeviceDescriptor device)
        {
            try
            {
                var er = await MLEvaluator.EvaluateMLConfig(mlconfigPath, device, dsType, EvaluationType.ResultExtended);

                if (er.Actual == null)
                {
                    throw new Exception("Export has failed. No testing nor validation datatset to export.");
                }

                var pa = MLEvaluator.CalculatePerformance(er, "Testing data");

                //print performance result
                var strB        = new List <string>();
                var problemType = pa.Classes.Count() == 1 ? "Regression" : (pa.Classes.Count() == 2 ? "Binary" : "Multiclass");
                ///////////////////////////////REGRESSION////////////////////////////////
                strB.Add("*************               ANNdotNET                    ********************");
                strB.Add("**********************Model Performance Analysis*****************************");
                strB.Add($"Model name={"ML Config Evaluation"}");
                strB.Add($"Problem Type ={problemType}");
                strB.Add($"DataSet Name = {pa.DatSetName}");
                strB.Add(" ");
                //
                if (problemType == "Regression")
                {
                    // strB.Add($"************Performance Parameters Value*************************************");
                    strB.Add(" ");
                    strB.Add($"Squared Error={pa.SE}");
                    strB.Add($"RMSE = {pa.RMSE}");
                    strB.Add($"Correlation Coefficient={pa.CORR}");
                    strB.Add($"Determination Coefficient={pa.DETC}");
                }
                else if (problemType == "Binary")
                {
                    /////////////////////BINARY CLASS/////////////////////////////////////////////
                    // strB.Add($"************Performance Parameters Value*************************************");
                    strB.Add(" ");
                    strB.Add($"Positive Label={pa.Classes.First()} \t\t Negative Label={pa.Classes.Last()}");
                    strB.Add($" ");
                    //
                    strB.Add($"True Positive = {pa.TP} \t\t False Positive = {pa.FP}");
                    strB.Add($"True Negative = {pa.TN} \t\t False Negative = {pa.FN}");
                    strB.Add($" ");
                    //
                    strB.Add($"Accuracy = {pa.Acc}, \t\t Error = {pa.ER}");
                    strB.Add($"Precision= {pa.Precision}, \t\t  Recall = {pa.Recall}");
                    strB.Add($"F1 Score= {pa.F1Score}, \t\t   ");
                    strB.Add($" ");
                    strB.Add($"HSS={pa.HSS}; \t PSS={pa.PSS}");
                    strB.Add($" ");
                    //strB.Add($"* - Heideke Skill Score; \t **- Peirce Scill Score");
                }
                else if (problemType == "Multiclass")
                {
                    /////////////////MULTICLASS//////////////////////////////////
                    //strB.Add($"************Performance Parameters Value*************************************");
                    strB.Add(" ");
                    strB.Add($"Overall Accuracy={pa.OAcc} \t\t\t Average Accuracy={pa.AAcc}");
                    //
                    strB.Add($"Micro avg. Precision = {pa.MicPrec} \t\t Macro avg. Precision = {pa.MacPrec}");
                    strB.Add($"Micro avg. Recall = {pa.MicRcall} \t\t Macro avg. Recall = {pa.MacRcall}");
                    //
                    strB.Add($"HSS ={pa.HSS}; \t\t\t PSS ={pa.PSS}");
                    strB.Add($" ");
                    //strB.Add($"* - Heideke Skill Score; \t **- Peirce Scill Score");
                    //* - Heideke Skill Score; \t **- Peirce Scill Score;
                }
                strB.Add($"************End of Performance Report*************************************");

                return(strB);
            }
            catch (Exception)
            {
                throw;
            }
        }