/// <summary>Evaluate on the folds of a dataset split</summary> /// <param name="recommender">an item recommender</param> /// <param name="split">a dataset split</param> /// <param name="candidate_items">a collection of integers with all candidate items</param> /// <param name="candidate_item_mode">the mode used to determine the candidate items</param> /// <param name="compute_fit">if set to true measure fit on the training data as well</param> /// <param name="show_results">set to true to print results to STDERR</param> /// <returns>a dictionary containing the average results over the different folds of the split</returns> static public EvaluationResults DoRatingBasedRankingCrossValidation( this RatingPredictor recommender, ISplit<IRatings> split, IList<int> candidate_items, CandidateItems candidate_item_mode = CandidateItems.OVERLAP, bool compute_fit = false, bool show_results = false) { var avg_results = new ItemRecommendationEvaluationResults(); Parallel.For(0, (int) split.NumberOfFolds, fold => { try { var split_recommender = (RatingPredictor) recommender.Clone(); // avoid changes in recommender split_recommender.Ratings = split.Train[fold]; split_recommender.Train(); var test_data_posonly = new PosOnlyFeedback<SparseBooleanMatrix>(split.Test[fold]); var training_data_posonly = new PosOnlyFeedback<SparseBooleanMatrix>(split.Train[fold]); IList<int> test_users = test_data_posonly.AllUsers; var fold_results = Items.Evaluate(split_recommender, test_data_posonly, training_data_posonly, test_users, candidate_items, candidate_item_mode); if (compute_fit) fold_results["fit"] = (float) split_recommender.ComputeFit(); // thread-safe stats lock (avg_results) foreach (var key in fold_results.Keys) if (avg_results.ContainsKey(key)) avg_results[key] += fold_results[key]; else avg_results[key] = fold_results[key]; if (show_results) Console.Error.WriteLine("fold {0} {1}", fold, fold_results); } catch (Exception e) { Console.Error.WriteLine("===> ERROR: " + e.Message + e.StackTrace); throw; } }); foreach (var key in Items.Measures) avg_results[key] /= split.NumberOfFolds; avg_results["num_users"] /= split.NumberOfFolds; avg_results["num_items"] /= split.NumberOfFolds; return avg_results; }
private static void writeAvgResults(List <MyMediaLite.Eval.ItemRecommendationEvaluationResults> result_list) { MyMediaLite.Eval.ItemRecommendationEvaluationResults avg_results = new MyMediaLite.Eval.ItemRecommendationEvaluationResults(); int j = 0; foreach (MyMediaLite.Eval.ItemRecommendationEvaluationResults result in result_list) { foreach (var key in result.Keys) { if (!avg_results.ContainsKey(key)) { avg_results.Add(key, 0f); } avg_results[key] += result[key]; } j += 1; } Console.WriteLine("Avg results after " + j.ToString() + " iterations."); foreach (var key in avg_results.Keys) { Console.WriteLine("{0}={1}", key, avg_results [key] / j); } }
/// <summary>Evaluate on the folds of a dataset split</summary> /// <param name="recommender">an item recommender</param> /// <param name="split">a dataset split</param> /// <param name="test_users">a collection of integers with all test users</param> /// <param name="candidate_items">a collection of integers with all candidate items</param> /// <param name="candidate_item_mode">the mode used to determine the candidate items</param> /// <param name="compute_fit">if set to true measure fit on the training data as well</param> /// <param name="show_results">set to true to print results to STDERR</param> /// <returns>a dictionary containing the average results over the different folds of the split</returns> public static ItemRecommendationEvaluationResults DoCrossValidation( this IRecommender recommender, ISplit<IPosOnlyFeedback> split, IList<int> test_users, IList<int> candidate_items, CandidateItems candidate_item_mode = CandidateItems.OVERLAP, bool compute_fit = false, bool show_results = false) { var avg_results = new ItemRecommendationEvaluationResults(); if (!(recommender is ItemRecommender)) throw new ArgumentException("recommender must be of type ItemRecommender"); Parallel.For(0, (int) split.NumberOfFolds, fold => { try { var split_recommender = (ItemRecommender) recommender.Clone(); // avoid changes in recommender split_recommender.Feedback = split.Train[fold]; split_recommender.Train(); var fold_results = Items.Evaluate(split_recommender, split.Test[fold], split.Train[fold], test_users, candidate_items, candidate_item_mode); if (compute_fit) fold_results["fit"] = (float) split_recommender.ComputeFit(); // thread-safe stats lock (avg_results) foreach (var key in fold_results.Keys) if (avg_results.ContainsKey(key)) avg_results[key] += fold_results[key]; else avg_results[key] = fold_results[key]; if (show_results) Console.Error.WriteLine("fold {0} {1}", fold, fold_results); } catch (Exception e) { Console.Error.WriteLine("===> ERROR: " + e.Message + e.StackTrace); throw; } }); foreach (var key in Items.Measures) avg_results[key] /= split.NumberOfFolds; avg_results["num_users"] /= split.NumberOfFolds; avg_results["num_items"] /= split.NumberOfFolds; if (compute_fit) avg_results["fit"] /= split.NumberOfFolds; return avg_results; }
/// <summary>Evaluate on the folds of a dataset split</summary> /// <param name="recommender">an item recommender</param> /// <param name="split">a dataset split</param> /// <param name="candidate_items">a collection of integers with all candidate items</param> /// <param name="candidate_item_mode">the mode used to determine the candidate items</param> /// <param name="compute_fit">if set to true measure fit on the training data as well</param> /// <param name="show_results">set to true to print results to STDERR</param> /// <returns>a dictionary containing the average results over the different folds of the split</returns> static public EvaluationResults DoRatingBasedRankingCrossValidation( this RatingPredictor recommender, ISplit <IRatings> split, IList <int> candidate_items, CandidateItems candidate_item_mode = CandidateItems.OVERLAP, bool compute_fit = false, bool show_results = false) { var avg_results = new ItemRecommendationEvaluationResults(); Parallel.For(0, (int)split.NumberOfFolds, fold => { try { var split_recommender = (RatingPredictor)recommender.Clone(); // avoid changes in recommender split_recommender.Ratings = split.Train[fold]; split_recommender.Train(); var test_data_posonly = new PosOnlyFeedback <SparseBooleanMatrix>(split.Test[fold]); var training_data_posonly = new PosOnlyFeedback <SparseBooleanMatrix>(split.Train[fold]); IList <int> test_users = test_data_posonly.AllUsers; var fold_results = Items.Evaluate(split_recommender, test_data_posonly, training_data_posonly, test_users, candidate_items, candidate_item_mode); if (compute_fit) { fold_results["fit"] = (float)split_recommender.ComputeFit(); } // thread-safe stats lock (avg_results) foreach (var key in fold_results.Keys) { if (avg_results.ContainsKey(key)) { avg_results[key] += fold_results[key]; } else { avg_results[key] = fold_results[key]; } } if (show_results) { Console.Error.WriteLine("fold {0} {1}", fold, fold_results); } } catch (Exception e) { Console.Error.WriteLine("===> ERROR: " + e.Message + e.StackTrace); throw; } }); foreach (var key in Items.Measures) { avg_results[key] /= split.NumberOfFolds; } avg_results["num_users"] /= split.NumberOfFolds; avg_results["num_items"] /= split.NumberOfFolds; return(avg_results); }
/// <summary>Evaluate on the folds of a dataset split</summary> /// <param name="recommender">an item recommender</param> /// <param name="split">a dataset split</param> /// <param name="test_users">a collection of integers with all test users</param> /// <param name="candidate_items">a collection of integers with all candidate items</param> /// <param name="candidate_item_mode">the mode used to determine the candidate items</param> /// <param name="compute_fit">if set to true measure fit on the training data as well</param> /// <param name="show_results">set to true to print results to STDERR</param> /// <returns>a dictionary containing the average results over the different folds of the split</returns> static public ItemRecommendationEvaluationResults DoCrossValidation( this IRecommender recommender, ISplit <IPosOnlyFeedback> split, IList <int> test_users, IList <int> candidate_items, CandidateItems candidate_item_mode = CandidateItems.OVERLAP, bool compute_fit = false, bool show_results = false) { var avg_results = new ItemRecommendationEvaluationResults(); if (!(recommender is ItemRecommender)) { throw new ArgumentException("recommender must be of type ItemRecommender"); } Parallel.For(0, (int)split.NumberOfFolds, fold => { try { var split_recommender = (ItemRecommender)recommender.Clone(); // avoid changes in recommender split_recommender.Feedback = split.Train[fold]; split_recommender.Train(); var fold_results = Items.Evaluate(split_recommender, split.Test[fold], split.Train[fold], test_users, candidate_items, candidate_item_mode); if (compute_fit) { fold_results["fit"] = (float)split_recommender.ComputeFit(); } // thread-safe stats lock (avg_results) foreach (var key in fold_results.Keys) { if (avg_results.ContainsKey(key)) { avg_results[key] += fold_results[key]; } else { avg_results[key] = fold_results[key]; } } if (show_results) { Console.Error.WriteLine("fold {0} {1}", fold, fold_results); } } catch (Exception e) { Console.Error.WriteLine("===> ERROR: " + e.Message + e.StackTrace); throw; } }); foreach (var key in Items.Measures) { avg_results[key] /= split.NumberOfFolds; } avg_results["num_users"] /= split.NumberOfFolds; avg_results["num_items"] /= split.NumberOfFolds; return(avg_results); }