private static Task ClassifyAsync( IModelDefinitionBuilder <SpamInput, MulticlassClassificationFoldsAverageMetricsResult> modelBuilder, string text, string expectedResult, ILogger logger, CancellationToken cancellationToken) { return(Task.Run( () => { var predictor = modelBuilder.MLContext.Model.CreatePredictionEngine <SpamInput, SpamPrediction>(modelBuilder.Model); var input = new SpamInput { Message = text }; var prediction = predictor.Predict(input); var result = prediction.IsSpam == "spam" ? "spam" : "not spam"; if (prediction.IsSpam == expectedResult) { logger.LogInformation("[ClassifyAsync][Predict] result: '{0}' is {1}", input.Message, result); } else { logger.LogWarning("[ClassifyAsync][Predict] result: '{0}' is {1}", input.Message, result); } }, cancellationToken)); }
private static Task ClassifyAsync( IModelDefinitionBuilder <SentimentIssue, BinaryClassificationMetricsResult> modelBuilder, string text, bool expectedResult, ILogger logger, CancellationToken cancellationToken) { return(Task.Run( () => { var predictor = modelBuilder.MLContext.Model.CreatePredictionEngine <SentimentIssue, SentimentPrediction>(modelBuilder.Model); var input = new SentimentIssue { Text = text }; var prediction = predictor.Predict(input); var result = prediction.Prediction ? "Toxic" : "Non Toxic"; if (prediction.Prediction == expectedResult) { logger.LogInformation( "[ClassifyAsync][Predict] result: '{0}' is {1} Probability of being toxic: {2}", input.Text, result, prediction.Probability); } else { logger.LogWarning( "[ClassifyAsync][Predict] result: '{0}' is {1} Probability of being toxic: {2}", input.Text, result, prediction.Probability); } }, cancellationToken)); }