LinearSvm( this SweepableBinaryClassificationTrainers trainer, string labelColumnName = "Label", string featureColumnName = "Features", SweepableOption <LinearSvmTrainer.Options> optionBuilder = null, LinearSvmTrainer.Options defaultOption = null) { var context = trainer.Context; if (optionBuilder == null) { optionBuilder = LinearSvmBinaryTrainerSweepableOptions.Default; } optionBuilder.SetDefaultOption(defaultOption); return(context.AutoML().CreateSweepableEstimator( (context, option) => { option.LabelColumnName = labelColumnName; option.FeatureColumnName = featureColumnName; return context.BinaryClassification.Trainers.LinearSvm(option); }, optionBuilder, new string[] { labelColumnName, featureColumnName }, new string[] { PredictedLabel }, nameof(LinearSvmTrainer))); }
/// <summary> /// Predict a target using a linear binary classification model trained with the <see cref="LinearSvmTrainer"/> trainer. /// </summary> /// <remarks> /// <para> /// The idea behind support vector machines, is to map instances into a high dimensional space /// in which the two classes are linearly separable, i.e., there exists a hyperplane such that all the positive examples are on one side of it, /// and all the negative examples are on the other. /// </para> /// <para> /// After this mapping, quadratic programming is used to find the separating hyperplane that maximizes the /// margin, i.e., the minimal distance between it and the instances. /// </para> /// </remarks> /// <param name="catalog">The <see cref="BinaryClassificationCatalog"/>.</param> /// <param name="options">Advanced arguments to the algorithm.</param> public static LinearSvmTrainer LinearSupportVectorMachines(this BinaryClassificationCatalog.BinaryClassificationTrainers catalog, LinearSvmTrainer.Options options) { Contracts.CheckValue(catalog, nameof(catalog)); Contracts.CheckValue(options, nameof(options)); return(new LinearSvmTrainer(CatalogUtils.GetEnvironment(catalog), options)); }
static void Main(string[] args) { MLContext mLContext = new MLContext(); IDataView dane = mLContext.Data.LoadFromTextFile <onceKom>("C:/Users/Patryk/source/repos/ConsoleApp15/Zeszyt1.csv", separatorChar: ',', hasHeader: true); var split = mLContext.Data .TrainTestSplit(dane, testFraction: 0.2); var trainSet = mLContext.Data .CreateEnumerable <onceKom>(split.TrainSet, reuseRowObject: false); var testSet = mLContext.Data .CreateEnumerable <onceKom>(split.TestSet, reuseRowObject: false); var trening = mLContext.Data.LoadFromEnumerable(trainSet); var test = mLContext.Data.LoadFromEnumerable(testSet); PrintPreviewRows(trainSet, testSet); //Console.ReadLine(); var ustawieniaSvm = new LinearSvmTrainer.Options { BatchSize = 10, PerformProjection = true, NumberOfIterations = 10 }; var pipeline = mLContext.BinaryClassification.Trainers.LinearSvm(ustawieniaSvm); var model = pipeline.Fit(trening); var transformedTestData = model.Transform(test); var predictions = mLContext.Data .CreateEnumerable <onceKom>(transformedTestData, reuseRowObject: false).ToList(); foreach (var p in predictions.Take(5)) { Console.WriteLine($"Label: {p.win}, " + $"Prediction: {p.Features}"); } Console.ReadLine(); }
public static void Example() { // Create a new context for ML.NET operations. It can be used for exception tracking and logging, // as a catalog of available operations and as the source of randomness. // Setting the seed to a fixed number in this example to make outputs deterministic. var mlContext = new MLContext(seed: 0); // Create a list of training data points. var dataPoints = GenerateRandomDataPoints(1000); // Convert the list of data points to an IDataView object, which is consumable by ML.NET API. var trainingData = mlContext.Data.LoadFromEnumerable(dataPoints); // Define trainer options. var options = new LinearSvmTrainer.Options { BatchSize = 10, PerformProjection = true, NumberOfIterations = 10 }; // Define the trainer. var pipeline = mlContext.BinaryClassification.Trainers.LinearSvm(options); // Train the model. var model = pipeline.Fit(trainingData); // Create testing data. Use different random seed to make it different from training data. var testData = mlContext.Data.LoadFromEnumerable(GenerateRandomDataPoints(500, seed: 123)); // Run the model on test data set. var transformedTestData = model.Transform(testData); // Convert IDataView object to a list. var predictions = mlContext.Data.CreateEnumerable <Prediction>(transformedTestData, reuseRowObject: false).ToList(); // Print 5 predictions. foreach (var p in predictions.Take(5)) { Console.WriteLine($"Label: {p.Label}, Prediction: {p.PredictedLabel}"); } // Expected output: // Label: True, Prediction: True // Label: False, Prediction: True // Label: True, Prediction: True // Label: True, Prediction: True // Label: False, Prediction: False // Evaluate the overall metrics. var metrics = mlContext.BinaryClassification.EvaluateNonCalibrated(transformedTestData); PrintMetrics(metrics); // Expected output: // Accuracy: 0.85 // AUC: 0.95 // F1 Score: 0.86 // Negative Precision: 0.91 // Negative Recall: 0.80 // Positive Precision: 0.80 // Positive Recall: 0.92 // // TEST POSITIVE RATIO: 0.4760 (238.0/(238.0+262.0)) // Confusion table // ||====================== // PREDICTED || positive | negative | Recall // TRUTH ||====================== // positive || 218 | 20 | 0.9160 // negative || 53 | 209 | 0.7977 // ||====================== // Precision || 0.8044 | 0.9127 | }