public void Evaluate(EvalutationContext <ItemRanking> context) { if (!(context is ItemRankingEvaluationContext)) { throw new Exception("Wrong evaluation context."); } var model = (IUserItemMapper)context.Model; var measures = ((ItemRankingEvaluationContext)context).GetTestUsersRankedList() .Select(url => { var rankedItems = url.GetMappedItemIds(model.ItemsMap); var correctItems = url.GetMappedCorrectItemIds(model.ItemsMap); double ap = MML.PrecisionAndRecall.AP(rankedItems, correctItems); double precAtN = MML.PrecisionAndRecall.PrecisionAt(rankedItems, correctItems, _position); double recallAtN = MML.PrecisionAndRecall.RecallAt(rankedItems, correctItems, _position); return(new { AP = ap, PrecAtN = precAtN, RecallAtN = recallAtN }); }).ToList(); if (!context.Items.ContainsKey("AP")) { context["AP"] = measures.Select(m => m.AP).Average(); Console.WriteLine(string.Format("AP: {0:0.0000}", context["AP"])); } context["PrecAt" + _position] = measures.Select(m => m.PrecAtN).Average(); context["RecallAt" + _position] = measures.Select(m => m.RecallAtN).Average(); Console.WriteLine(string.Format("PrecAt {0}: {1:0.0000}", _position, context["PrecAt" + _position])); Console.WriteLine(string.Format("RecallAt {0}: {1:0.0000}", _position, context["RecallAt" + _position])); }
public void Evaluate(EvalutationContext <ItemRating> context) { // make sure that the test samples are predicted context.RunDefaultTrainAndTest(); var testset = context.Dataset != null ? context.Dataset.TestSamples : context.Splitter.Test; double sum = 0; foreach (var itemRating in testset) { sum += Math.Abs(itemRating.PredictedRating - itemRating.Rating); } context["MAE"] = Math.Sqrt(sum / testset.Count()); Console.WriteLine(string.Format("MAE: {0:0.0000}", context["MAE"])); }
public void Evaluate(EvalutationContext <ItemRanking> context) { if (!(context is ItemRankingEvaluationContext)) { throw new Exception("Wrong evaluation context."); } var model = (IUserItemMapper)context.Model; double rr = ((ItemRankingEvaluationContext)context).GetTestUsersRankedList() .Select(url => MyMediaLite.Eval.Measures.ReciprocalRank.Compute( url.GetMappedItemIds(model.ItemsMap), url.GetMappedCorrectItemIds(model.ItemsMap))) .Average(); context["ReciprocalRank"] = rr; Console.WriteLine(string.Format("ReciprocalRank: {0:0.0000}", rr)); }
public void Evaluate(EvalutationContext <ItemRating> context) { if (!(context.Model is IPredictor <ItemRating>)) { throw new Exception("To predict the full user item matrix the model should implement IPredictor<ItemRating>."); } // make sure the dataset is trained context.RunDefaultTrainAndTest(); Console.WriteLine("Predicting full user-item matrix..."); var recommender = (IPredictor <ItemRating>)context.Model; var dataset = context.Dataset; var allItemIds = dataset.AllSamples.Select(ir => ir.Item.Id).ToList(); var allUserIds = dataset.AllSamples.Select(ir => ir.User.Id).ToList(); var writer = new StreamWriter(_outputPath); // header of the file should be the list of all items var header = allItemIds.Aggregate("\t", (cur, next) => cur + "\t" + next); writer.WriteLine(header); allItemIds.ForEach(u => { string line = u; allUserIds.ForEach(i => { var itemRating = new ItemRating(u, i); recommender.Predict(itemRating); line += string.Format("\t{0:0.00}", itemRating.PredictedRating); }); writer.WriteLine(line); writer.Flush(); }); writer.Close(); }
public void Evaluate(EvalutationContext <ItemRanking> context) { if (!(context is ItemRankingEvaluationContext)) { throw new Exception("Wrong evaluation context."); } var dataset = context.Dataset; var model = (IPredictor <ItemRanking>)context.Model; // make sure the model is trained if (!model.IsTrained) { model.Train(dataset.TrainSamples); } var mapper = (IUserItemMapper)context.Model; var testset = dataset.TestSamples.ToPosOnlyFeedback(mapper.UsersMap, mapper.ItemsMap); var trainset = dataset.TrainSamples.ToPosOnlyFeedback(mapper.UsersMap, mapper.ItemsMap); var results = _recommender.Evaluate(testset, trainset); foreach (var item in results) { context[item.Key] = item.Value; Console.WriteLine(string.Format("{0}: {1:0.0000}", item.Key, item.Value)); } // calculate F1@5 and F1@10 var precAt5 = (float)context["prec@5"]; var precAt10 = (float)context["prec@5"]; var recallAt5 = (float)context["recall@5"]; var recallAt10 = (float)context["recall@10"]; var f1At5 = precAt5 * recallAt5 * 2 / (precAt5 + recallAt5); var f1At10 = precAt10 * recallAt10 * 2 / (precAt10 + recallAt10); Console.WriteLine(string.Format("F1@5: {0:0.0000}", f1At5)); Console.WriteLine(string.Format("F1@10: {0:0.0000}", f1At10)); }
public void Evaluate(EvalutationContext <PositiveFeedback> context) { var model = (IPredictor <PositiveFeedback>)context.Model; var trainSet = context.Splitter.Train; var tesSet = context.Splitter.Test; // make sure the model is trained if (!model.IsTrained) { model.Train(trainSet); } var mapper = (IUserItemMapper)context.Model; var testset = tesSet.ToPosOnlyFeedback(mapper.UsersMap, mapper.ItemsMap); var trainset = trainSet.ToPosOnlyFeedback(mapper.UsersMap, mapper.ItemsMap); var results = _recommender.Evaluate(testset, trainset); foreach (var item in results) { context[item.Key] = item.Value; Console.WriteLine(string.Format("{0}: {1:0.0000}", item.Key, item.Value)); } // calculate F1@5 and F1@10 var precAt5 = (float)context["prec@5"]; var precAt10 = (float)context["prec@5"]; var recallAt5 = (float)context["recall@5"]; var recallAt10 = (float)context["recall@10"]; var f1At5 = precAt5 * recallAt5 * 2 / (precAt5 + recallAt5); var f1At10 = precAt10 * recallAt10 * 2 / (precAt10 + recallAt10); Console.WriteLine(string.Format("F1@5: {0:0.0000}", f1At5)); Console.WriteLine(string.Format("F1@10: {0:0.0000}", f1At10)); }
public EvaluationPipeline(EvalutationContext <T> context) { Context = context; Evaluators = new List <IEvaluator <T> >(); }