public static void Evaluate(IRecommender recommender, IDataModel model, int samples, IRunningAverage tracker, String tag) { printHeader(); var users = recommender.GetDataModel().GetUserIDs(); while (users.MoveNext()) { long userID = users.Current; var recs1 = recommender.Recommend(userID, model.GetNumItems()); IPreferenceArray prefs2 = model.GetPreferencesFromUser(userID); prefs2.SortByValueReversed(); FastIDSet commonSet = new FastIDSet(); long maxItemID = setBits(commonSet, recs1, samples); FastIDSet otherSet = new FastIDSet(); maxItemID = Math.Max(maxItemID, setBits(otherSet, prefs2, samples)); int max = mask(commonSet, otherSet, maxItemID); max = Math.Min(max, samples); if (max < 2) { continue; } long[] items1 = getCommonItems(commonSet, recs1, max); long[] items2 = getCommonItems(commonSet, prefs2, max); double variance = scoreCommonSubset(tag, userID, samples, max, items1, items2); tracker.AddDatum(variance); } }
public void testRecommender() { IRecommender recommender = buildRecommender(); IList <IRecommendedItem> recommended = recommender.Recommend(1, 1); Assert.NotNull(recommended); Assert.AreEqual(1, recommended.Count); IRecommendedItem firstRecommended = recommended[0]; Assert.AreEqual(2, firstRecommended.GetItemID()); Assert.AreEqual(0.1f, firstRecommended.GetValue(), EPSILON); recommender.Refresh(null); recommended = recommender.Recommend(1, 1); firstRecommended = recommended[0]; Assert.AreEqual(2, firstRecommended.GetItemID()); Assert.AreEqual(0.1f, firstRecommended.GetValue(), EPSILON); }
public void testBestRating() { IRecommender recommender = buildRecommender(); IList <IRecommendedItem> recommended = recommender.Recommend(1, 1); Assert.NotNull(recommended); Assert.AreEqual(1, recommended.Count); IRecommendedItem firstRecommended = recommended[0]; // item one should be recommended because it has a greater rating/score Assert.AreEqual(2, firstRecommended.GetItemID()); Assert.AreEqual(0.1f, firstRecommended.GetValue(), EPSILON); }
public static LoadStatistics runLoad(IRecommender recommender, int howMany) { IDataModel dataModel = recommender.GetDataModel(); int numUsers = dataModel.GetNumUsers(); double sampleRate = 1000.0 / numUsers; var userSampler = SamplinglongPrimitiveIterator.MaybeWrapIterator(dataModel.GetUserIDs(), sampleRate); if (userSampler.MoveNext()) recommender.Recommend(userSampler.Current, howMany); // Warm up var callables = new List<Action>(); while (userSampler.MoveNext()) { callables.Add(new LoadCallable(recommender, userSampler.Current).call); } AtomicInteger noEstimateCounter = new AtomicInteger(); IRunningAverageAndStdDev timing = new FullRunningAverageAndStdDev(); AbstractDifferenceRecommenderEvaluator.execute(callables, noEstimateCounter, timing); return new LoadStatistics(timing); }
/// <summary>Write item predictions (scores) for a given user to a TextWriter object</summary> /// <param name="recommender">the <see cref="IRecommender"/> to use for making the predictions</param> /// <param name="user_id">ID of the user to make recommendations for</param> /// <param name="candidate_items">list of candidate items</param> /// <param name="ignore_items">list of items for which no predictions should be made</param> /// <param name="num_predictions">the number of items to return per user, -1 if there should be no limit</param> /// <param name="writer">the <see cref="TextWriter"/> to write to</param> /// <param name="user_mapping">an <see cref="IMapping"/> object for the user IDs</param> /// <param name="item_mapping">an <see cref="IMapping"/> object for the item IDs</param> static public void WritePredictions( this IRecommender recommender, int user_id, ICollection <int> candidate_items, ICollection <int> ignore_items, int num_predictions, TextWriter writer, IMapping user_mapping, IMapping item_mapping) { if (user_mapping == null) { user_mapping = new IdentityMapping(); } if (item_mapping == null) { item_mapping = new IdentityMapping(); } var ordered_items = recommender.Recommend( user_id, n: num_predictions, ignore_items: ignore_items, candidate_items: candidate_items); writer.Write("{0}\t[", user_mapping.ToOriginalID(user_id)); if (ordered_items.Count > 0) { writer.Write("{0}:{1}", item_mapping.ToOriginalID(ordered_items[0].Item1), ordered_items[0].Item2.ToString(CultureInfo.InvariantCulture)); for (int i = 1; i < ordered_items.Count; i++) { int item_id = ordered_items[i].Item1; float score = ordered_items[i].Item2; writer.Write(",{0}:{1}", item_mapping.ToOriginalID(item_id), score.ToString(CultureInfo.InvariantCulture)); } } writer.WriteLine("]"); }
/// <summary> /// For each user in a given instance source recommends items from the set of items rated by the user. /// </summary> /// <typeparam name="TFeatureSource">The type of a feature source used by the recommendation engine.</typeparam> /// <param name="recommender">The recommendation engine.</param> /// <param name="instanceSource">The instance source.</param> /// <param name="maxRecommendedItemCount">Maximum number of items to recommend to a user.</param> /// <param name="minRecommendationPoolSize"> /// If a user has less than <paramref name="minRecommendationPoolSize"/> possible items to recommend, /// it will be skipped. /// </param> /// <param name="featureSource">The source of features.</param> /// <returns>The list of recommended items for every user in <paramref name="instanceSource"/>.</returns> public IDictionary <TUser, IEnumerable <TItem> > RecommendRatedItems <TFeatureSource>( IRecommender <TInstanceSource, TUser, TItem, TPredictedRating, TPredictedRatingDist, TFeatureSource> recommender, TInstanceSource instanceSource, int maxRecommendedItemCount, int minRecommendationPoolSize, TFeatureSource featureSource = default(TFeatureSource)) { if (recommender == null) { throw new ArgumentNullException("recommender"); } if (maxRecommendedItemCount <= 0) { throw new ArgumentOutOfRangeException("maxRecommendedItemCount", "The number of items to recommend should be positive."); } if (minRecommendationPoolSize <= 0) { throw new ArgumentOutOfRangeException("minRecommendationPoolSize", "The minimum size of the recommendation pool should be positive."); } IDictionary <TUser, IEnumerable <TItem> > recommendations = new Dictionary <TUser, IEnumerable <TItem> >(); foreach (TUser user in this.mapping.GetUsers(instanceSource)) { IEnumerable <TItem> itemSubset = this.mapping.GetItemsRatedByUser(instanceSource, user); if (itemSubset.Count() >= minRecommendationPoolSize) { recommender.ItemSubset = itemSubset; IEnumerable <TItem> recommendedItems = recommender.Recommend(user, maxRecommendedItemCount, featureSource); recommendations.Add(user, recommendedItems); } } return(recommendations); }
public static LoadStatistics runLoad(IRecommender recommender, int howMany) { IDataModel dataModel = recommender.GetDataModel(); int numUsers = dataModel.GetNumUsers(); double sampleRate = 1000.0 / numUsers; var userSampler = SamplinglongPrimitiveIterator.MaybeWrapIterator(dataModel.GetUserIDs(), sampleRate); if (userSampler.MoveNext()) { recommender.Recommend(userSampler.Current, howMany); // Warm up } var callables = new List <Action>(); while (userSampler.MoveNext()) { callables.Add(new LoadCallable(recommender, userSampler.Current).call); } AtomicInteger noEstimateCounter = new AtomicInteger(); IRunningAverageAndStdDev timing = new FullRunningAverageAndStdDev(); AbstractDifferenceRecommenderEvaluator.execute(callables, noEstimateCounter, timing); return(new LoadStatistics(timing)); }
/// <summary>Evaluation for rankings of items</summary> /// <remarks> /// User-item combinations that appear in both sets are ignored for the test set, and thus in the evaluation, /// except the boolean argument repeated_events is set. /// /// The evaluation measures are listed in the Measures property. /// Additionally, 'num_users' and 'num_items' report the number of users that were used to compute the results /// and the number of items that were taken into account. /// /// Literature: /// <list type="bullet"> /// <item><description> /// C. Manning, P. Raghavan, H. Schütze: Introduction to Information Retrieval, Cambridge University Press, 2008 /// </description></item> /// </list> /// /// On multi-core/multi-processor systems, the routine tries to use as many cores as possible, /// which should to an almost linear speed-up. /// </remarks> /// <param name="recommender">item recommender</param> /// <param name="test">test cases</param> /// <param name="training">training data</param> /// <param name="test_users">a list of integers with all test users; if null, use all users in the test cases</param> /// <param name="candidate_items">a list of integers with all candidate items</param> /// <param name="candidate_item_mode">the mode used to determine the candidate items</param> /// <param name="repeated_events">allow repeated events in the evaluation (i.e. items accessed by a user before may be in the recommended list)</param> /// <param name="n">length of the item list to evaluate -- if set to -1 (default), use the complete list, otherwise compute evaluation measures on the top n items</param> /// <returns>a dictionary containing the evaluation results (default is false)</returns> static public ItemRecommendationEvaluationResults Evaluate( this IRecommender recommender, IPosOnlyFeedback test, IPosOnlyFeedback training, IList <int> test_users = null, IList <int> candidate_items = null, CandidateItems candidate_item_mode = CandidateItems.OVERLAP, RepeatedEvents repeated_events = RepeatedEvents.No, int n = -1) { if (test_users == null) { test_users = test.AllUsers; } candidate_items = Candidates(candidate_items, candidate_item_mode, test, training); var result = new ItemRecommendationEvaluationResults(); // make sure that the user matrix is completely initialized before entering parallel code var training_user_matrix = training.UserMatrix; var test_user_matrix = test.UserMatrix; int num_users = 0; Parallel.ForEach(test_users, user_id => { try { var correct_items = new HashSet <int>(test_user_matrix[user_id]); correct_items.IntersectWith(candidate_items); if (correct_items.Count == 0) { return; } var ignore_items_for_this_user = new HashSet <int>( repeated_events == RepeatedEvents.Yes || training_user_matrix[user_id] == null ? new int[0] : training_user_matrix[user_id] ); ignore_items_for_this_user.IntersectWith(candidate_items); int num_candidates_for_this_user = candidate_items.Count - ignore_items_for_this_user.Count; if (correct_items.Count == num_candidates_for_this_user) { return; } var prediction = recommender.Recommend(user_id, candidate_items: candidate_items, n: n, ignore_items: ignore_items_for_this_user); var prediction_list = (from t in prediction select t.Item1).ToArray(); int num_dropped_items = num_candidates_for_this_user - prediction.Count; double auc = AUC.Compute(prediction_list, correct_items, num_dropped_items); double map = PrecisionAndRecall.AP(prediction_list, correct_items); double ndcg = NDCG.Compute(prediction_list, correct_items); double rr = ReciprocalRank.Compute(prediction_list, correct_items); var positions = new int[] { 5, 10 }; var prec = PrecisionAndRecall.PrecisionAt(prediction_list, correct_items, positions); var recall = PrecisionAndRecall.RecallAt(prediction_list, correct_items, positions); // thread-safe incrementing lock (result) { num_users++; result["AUC"] += (float)auc; result["MAP"] += (float)map; result["NDCG"] += (float)ndcg; result["MRR"] += (float)rr; result["prec@5"] += (float)prec[5]; result["prec@10"] += (float)prec[10]; result["recall@5"] += (float)recall[5]; result["recall@10"] += (float)recall[10]; } if (num_users % 1000 == 0) { Console.Error.Write("."); } if (num_users % 60000 == 0) { Console.Error.WriteLine(); } } catch (Exception e) { Console.Error.WriteLine("===> ERROR: " + e.Message + e.StackTrace); throw; } }); foreach (string measure in Measures) { result[measure] /= num_users; } result["num_users"] = num_users; result["num_lists"] = num_users; result["num_items"] = candidate_items.Count; return(result); }
// /// <summary> // /// Gets string for subselecting all id's used from database // /// </summary> // /// <returns>The all identifiers string for database.</returns> // static private string getAllIdsStringForDatabase(IList<int> allItems){ // // string all_ids = "("; // bool first = true; // foreach (int id in allItems) { // if (first) { // all_ids += id.ToString (); // first = false; // } else // all_ids += "," + id.ToString (); // } // all_ids += ")"; // return all_ids; // } // //// static public void getWeatherVectorLocation(IList<int> items, string connection_string, ref Dictionary<int,IList<double>> venueWeatherVectors){ //// DBConnect conn = new DBConnect (connection_string); //// List<string>[] res; //// res = conn.Select ("select * " + //// " from weather_avgs_per_venue where id_int in "+getAllIdsStringForDatabase(items), 9); //// List<string> all_ids = res [0]; //// List<string> temperature = res [1]; //// List<string> precip_intensity = res [2]; //// List<string> wind_speed = res [3]; //// List<string> humidity = res [4]; //// List<string> cloud_cover = res [5]; //// List<string> pressure = res [6]; //// List<string> visibility = res [7]; //// List<string> moonphase = res [8]; //// int i = 0; //// foreach(string id in all_ids){ //// venueWeatherVectors.Add(int.Parse (id),new List<double> { double.Parse(temperature [i]), double.Parse(precip_intensity [i]), double.Parse(wind_speed [i]), double.Parse(humidity [i]), //// double.Parse(cloud_cover [i])}); //// i++; //// } //// } /// <summary>Evaluation for rankings of items</summary> /// <remarks> /// User-item combinations that appear in both sets are ignored for the test set, and thus in the evaluation, /// except the boolean argument repeated_events is set. /// /// The evaluation measures are listed in the Measures property. /// Additionally, 'num_users' and 'num_items' report the number of users that were used to compute the results /// and the number of items that were taken into account. /// /// Literature: /// <list type="bullet"> /// <item><description> /// C. Manning, P. Raghavan, H. Schütze: Introduction to Information Retrieval, Cambridge University Press, 2008 /// </description></item> /// </list> /// /// On multi-core/multi-processor systems, the routine tries to use as many cores as possible, /// which should to an almost linear speed-up. /// </remarks> /// <param name="recommender">item recommender</param> /// <param name="test">test cases</param> /// <param name="training">training data</param> /// <param name="n">length of the item list to evaluate -- if set to -1 (default), use the complete list, otherwise compute evaluation measures on the top n items</param> /// <returns>a dictionary containing the evaluation results (default is false)</returns> // static public ItemRecommendationEvaluationResults Evaluate( // this IRecommender recommender, // ITimedRatings test, // ITimedRatings training, // string connection_string = "", // int n = -1,double alpha = 0.1) // { // // var result = new ItemRecommendationEvaluationResults(); // var candidates = test.AllItems.Intersect(training.AllItems).ToList(); // int num_users = 0; // ThreadPool.SetMinThreads(test.AllUsers.Count, test.AllUsers.Count); // Dictionary<int,IList<int>> user_items = test.getItemsUserDict (); // ParallelOptions po = new ParallelOptions{ // MaxDegreeOfParallelism = Environment.ProcessorCount // }; // // //foreach(int user_id in test.AllUsers){ // Parallel.ForEach (test.AllUsers, po, user_id => { // try { // n = user_items [user_id].Count; // IList<Tuple<int,float>> prediction; // prediction = recommender.Recommend (user_id, candidate_items: candidates, n: n); // var prediction_list = (from t in prediction select t.Item1).ToArray (); // int num_candidates_for_this_user = candidates.Count (); // int num_dropped_items = num_candidates_for_this_user - prediction.Count; // var correct_items = user_items [user_id].Intersect (candidates).ToList (); // if (correct_items.Count () == 0) // return; // // double auc = AUC.Compute (prediction_list, correct_items, num_dropped_items); // double map = PrecisionAndRecall.AP (prediction_list, correct_items); // double ndcg = NDCG.Compute (prediction_list, correct_items); // double rr = ReciprocalRank.Compute (prediction_list, correct_items); // var positions = new int[] { 5, 10 }; // var prec = PrecisionAndRecall.PrecisionAt (prediction_list, correct_items, positions); // var recall = PrecisionAndRecall.RecallAt (prediction_list, correct_items, positions); // // // thread-safe incrementing // lock (result) { // num_users++; // result ["AUC"] += (float)auc; // result ["MAP"] += (float)map; // result ["NDCG"] += (float)ndcg; // result ["MRR"] += (float)rr; // result ["prec@5"] += (float)prec [5]; // result ["prec@10"] += (float)prec [10]; // result ["recall@5"] += (float)recall [5]; // result ["recall@10"] += (float)recall [10]; // } // // if (num_users % 1000 == 0) // Console.Error.Write ("."); // if (num_users % 60000 == 0) // Console.Error.WriteLine (); // } catch (Exception e) { // Console.Error.WriteLine ("===> ERROR: " + e.Message + e.StackTrace); // throw; // } // }); // // foreach (string measure in Measures) // result[measure] /= num_users; // result["num_users"] = num_users; // result["num_lists"] = num_users; // result["num_items"] = candidates.Count(); // // return result; // } static public double EvaluateTime( this IRecommender recommender, ITimedRatings test, ITimedRatings training, string dataset, bool time_aware, int n = -1, double alpha = 0.1) { Dictionary <int, ItemRecommendationEvaluationResults> userRecommendationResults = new Dictionary <int, ItemRecommendationEvaluationResults> (); foreach (int user in test.AllUsers) { userRecommendationResults.Add(user, new ItemRecommendationEvaluationResults()); } var candidates = test.AllItems.Intersect(training.AllItems).ToList(); ParallelOptions po = new ParallelOptions { MaxDegreeOfParallelism = Environment.ProcessorCount }; bool init = true; Dictionary <int, IList <int> > trainingUserItems = training.getItemsUserDict(); Parallel.For(0, test.Users.Count - 1, po, index => { try{ DateTime time = test.Times[index]; int user = test.Users[index]; int item = test.Items[index]; if (trainingUserItems[user].Contains(item)) { return; } IList <int> correct_items = new List <int>(); correct_items.Add(item); correct_items = correct_items.Intersect(candidates).ToList(); if (correct_items.Count() == 0) { return; } IList <Tuple <int, float> > prediction; if (time_aware) { prediction = ((ITimeAwareRatingPredictor)recommender).RecommendTime(user, time, candidate_items: candidates, n: 20); } else { prediction = recommender.Recommend(user, candidate_items: candidates, n: 20); } var prediction_list = (from t in prediction select t.Item1).ToArray(); double auc = AUC.Compute(prediction_list, correct_items, 0); double map = PrecisionAndRecall.AP(prediction_list, correct_items); double ndcg = NDCG.Compute(prediction_list, correct_items); double rr = ReciprocalRank.Compute(prediction_list, correct_items); var positions = new int[] { 5, 10 }; var prec = PrecisionAndRecall.PrecisionAt(prediction_list, correct_items, positions); var recall = PrecisionAndRecall.RecallAt(prediction_list, correct_items, positions); lock (userRecommendationResults){ ItemRecommendationEvaluationResults res = userRecommendationResults[user]; res["AUC"] += (float)auc; res["MAP"] += (float)map; res["NDCG"] += (float)ndcg; res["MRR"] += (float)rr; res["prec@5"] += (float)prec [5]; res["prec@10"] += (float)prec [10]; res["recall@5"] += (float)recall [5]; res["recall@10"] += (float)recall [10]; if (!init) { res["AUC"] /= 2; res["MAP"] /= 2; res["NDCG"] /= 2; res["MRR"] /= 2; res["prec@5"] /= 2; res["prec@10"] /= 2; res["recall@5"] /= 2; res["recall@10"] /= 2; } init = false; userRecommendationResults[user] = res; } } catch (Exception e) { Console.Error.WriteLine("===> ERROR: " + e.Message + e.StackTrace); throw; } }); ItemRecommendationEvaluationResults avg_res = new ItemRecommendationEvaluationResults(); int num_users = 0; Console.WriteLine("Detailed user results:"); foreach (int user in userRecommendationResults.Keys) { Console.Write("User: "******"{0}={1}", key, userRecommendationResults [user] [key]); } num_users++; } foreach (string measure in Measures) { avg_res[measure] /= num_users; } Console.WriteLine(dataset + " Avg results:"); foreach (var key in avg_res.Keys) { Console.WriteLine("{0}={1}", key, avg_res[key]); } return(avg_res["prec@5"]); }
public void call() { recommender.Recommend(userID, 10); }
public IRStatistics Evaluate(IRecommenderBuilder recommenderBuilder, IDataModelBuilder dataModelBuilder, IDataModel dataModel, IDRescorer rescorer, int at, double relevanceThreshold, double evaluationPercentage) { //Preconditions.checkArgument(recommenderBuilder != null, "recommenderBuilder is null"); //Preconditions.checkArgument(dataModel != null, "dataModel is null"); //Preconditions.checkArgument(at >= 1, "at must be at least 1"); //Preconditions.checkArgument(evaluationPercentage > 0.0 && evaluationPercentage <= 1.0, // "Invalid evaluationPercentage: " + evaluationPercentage + ". Must be: 0.0 < evaluationPercentage <= 1.0"); int numItems = dataModel.GetNumItems(); IRunningAverage precision = new FullRunningAverage(); IRunningAverage recall = new FullRunningAverage(); IRunningAverage fallOut = new FullRunningAverage(); IRunningAverage nDCG = new FullRunningAverage(); int numUsersRecommendedFor = 0; int numUsersWithRecommendations = 0; var it = dataModel.GetUserIDs(); while (it.MoveNext()) { long userID = it.Current; if (random.nextDouble() >= evaluationPercentage) { // Skipped continue; } var stopWatch = new System.Diagnostics.Stopwatch(); stopWatch.Start(); IPreferenceArray prefs = dataModel.GetPreferencesFromUser(userID); // List some most-preferred items that would count as (most) "relevant" results double theRelevanceThreshold = Double.IsNaN(relevanceThreshold) ? computeThreshold(prefs) : relevanceThreshold; FastIDSet relevantItemIDs = dataSplitter.GetRelevantItemsIDs(userID, at, theRelevanceThreshold, dataModel); int numRelevantItems = relevantItemIDs.Count(); if (numRelevantItems <= 0) { continue; } FastByIDMap <IPreferenceArray> trainingUsers = new FastByIDMap <IPreferenceArray>(dataModel.GetNumUsers()); var it2 = dataModel.GetUserIDs(); while (it2.MoveNext()) { dataSplitter.ProcessOtherUser(userID, relevantItemIDs, trainingUsers, it2.Current, dataModel); } IDataModel trainingModel = dataModelBuilder == null ? new GenericDataModel(trainingUsers) : dataModelBuilder.BuildDataModel(trainingUsers); try { trainingModel.GetPreferencesFromUser(userID); } catch (NoSuchUserException nsee) { continue; // Oops we excluded all prefs for the user -- just move on } int size = numRelevantItems + trainingModel.GetItemIDsFromUser(userID).Count(); if (size < 2 * at) { // Really not enough prefs to meaningfully evaluate this user continue; } IRecommender recommender = recommenderBuilder.BuildRecommender(trainingModel); int intersectionSize = 0; var recommendedItems = recommender.Recommend(userID, at, rescorer); foreach (IRecommendedItem recommendedItem in recommendedItems) { if (relevantItemIDs.Contains(recommendedItem.GetItemID())) { intersectionSize++; } } int numRecommendedItems = recommendedItems.Count; // Precision if (numRecommendedItems > 0) { precision.AddDatum((double)intersectionSize / (double)numRecommendedItems); } // Recall recall.AddDatum((double)intersectionSize / (double)numRelevantItems); // Fall-out if (numRelevantItems < size) { fallOut.AddDatum((double)(numRecommendedItems - intersectionSize) / (double)(numItems - numRelevantItems)); } // nDCG // In computing, assume relevant IDs have relevance 1 and others 0 double cumulativeGain = 0.0; double idealizedGain = 0.0; for (int i = 0; i < numRecommendedItems; i++) { IRecommendedItem item = recommendedItems[i]; double discount = 1.0 / log2(i + 2.0); // Classical formulation says log(i+1), but i is 0-based here if (relevantItemIDs.Contains(item.GetItemID())) { cumulativeGain += discount; } // otherwise we're multiplying discount by relevance 0 so it doesn't do anything // Ideally results would be ordered with all relevant ones first, so this theoretical // ideal list starts with number of relevant items equal to the total number of relevant items if (i < numRelevantItems) { idealizedGain += discount; } } if (idealizedGain > 0.0) { nDCG.AddDatum(cumulativeGain / idealizedGain); } // Reach numUsersRecommendedFor++; if (numRecommendedItems > 0) { numUsersWithRecommendations++; } stopWatch.Stop(); log.Info("Evaluated with user {} in {}ms", userID, stopWatch.ElapsedMilliseconds); log.Info("Precision/recall/fall-out/nDCG/reach: {} / {} / {} / {} / {}", precision.GetAverage(), recall.GetAverage(), fallOut.GetAverage(), nDCG.GetAverage(), (double)numUsersWithRecommendations / (double)numUsersRecommendedFor); } return(new IRStatisticsImpl( precision.GetAverage(), recall.GetAverage(), fallOut.GetAverage(), nDCG.GetAverage(), (double)numUsersWithRecommendations / (double)numUsersRecommendedFor)); }