Exemple #1
0
        /// <summary>Evaluates a rating predictor for RMSE, (N)MAE, and CBD</summary>
        /// <remarks>
        ///   <para>
        ///     See http://recsyswiki.com/wiki/Root_mean_square_error and http://recsyswiki.com/wiki/Mean_absolute_error
        ///   </para>
        ///   <para>
        ///     For NMAE, see the paper by Goldberg et al.
        ///   </para>
        ///   <para>
        ///     For CBD (capped binomial deviance), see http://www.kaggle.com/c/ChessRatings2/Details/Evaluation
        ///   </para>
        ///   <para>
        ///     If the recommender can take time into account, and the rating dataset provides rating times,
        ///     this information will be used for making rating predictions.
        ///   </para>
        ///   <para>
        ///     Literature:
        ///     <list type="bullet">
        ///       <item><description>
        ///         Ken Goldberg, Theresa Roeder, Dhruv Gupta, and Chris Perkins:
        ///         Eigentaste: A Constant Time Collaborative Filtering Algorithm.
        ///         nformation Retrieval Journal 2001.
        ///         http://goldberg.berkeley.edu/pubs/eigentaste.pdf
        ///       </description></item>
        ///     </list>
        ///   </para>
        /// </remarks>
        /// <param name="recommender">rating predictor</param>
        /// <param name="test_ratings">test cases</param>
        /// <param name="training_ratings">the training examples</param>
        /// <returns>a Dictionary containing the evaluation results</returns>
        static public RatingPredictionEvaluationResults Evaluate(this IRatingPredictor recommender, IRatings test_ratings, IRatings training_ratings = null)
        {
            if (recommender == null)
            {
                throw new ArgumentNullException("recommender");
            }
            if (test_ratings == null)
            {
                throw new ArgumentNullException("ratings");
            }

            var all_indices = Enumerable.Range(0, test_ratings.Count).ToArray();
            var results     = new RatingPredictionEvaluationResults(Evaluate(recommender, test_ratings, all_indices));

            if (training_ratings != null)
            {
                var new_user_indices = (from index in all_indices
                                        where test_ratings.Users[index] > training_ratings.MaxUserID || training_ratings.CountByUser[test_ratings.Users[index]] == 0
                                        select index).ToArray();
                results.NewUserResults = Evaluate(recommender, test_ratings, new_user_indices);
                var new_item_indices = (from index in all_indices
                                        where test_ratings.Items[index] > training_ratings.MaxItemID || training_ratings.CountByItem[test_ratings.Items[index]] == 0 select index).ToArray();
                results.NewItemResults        = Evaluate(recommender, test_ratings, new_item_indices);
                results.NewUserNewItemResults = Evaluate(recommender, test_ratings, Enumerable.Intersect(new_user_indices, new_item_indices).ToArray());
            }
            return(results);
        }
        /// <summary>Evaluate on the folds of a dataset split</summary>
        /// <param name="recommender">a rating predictor</param>
        /// <param name="split">a rating dataset split</param>
        /// <param name="compute_fit">if set to true measure fit on the training data as well</param>
        /// <param name="show_fold_results">set to true to print per-fold results to STDERR</param>
        /// <returns>a dictionary containing the average results over the different folds of the split</returns>
        public static RatingPredictionEvaluationResults DoCrossValidation(
			this RatingPredictor recommender,
			ISplit<IRatings> split,
			bool compute_fit = false,
			bool show_fold_results = false)
        {
            var fold_results = new RatingPredictionEvaluationResults[split.NumberOfFolds];

            Parallel.For(0, (int) split.NumberOfFolds, i =>
            {
                try
                {
                    var split_recommender = (RatingPredictor) recommender.Clone(); // to avoid changes in recommender
                    split_recommender.Ratings = split.Train[i];
                    if (recommender is ITransductiveRatingPredictor)
                        ((ITransductiveRatingPredictor) split_recommender).AdditionalFeedback = split.Test[i];
                    split_recommender.Train();
                    fold_results[i] = Ratings.Evaluate(split_recommender, split.Test[i]);
                    if (compute_fit)
                        fold_results[i]["fit"] = (float) split_recommender.ComputeFit();

                    if (show_fold_results)
                        Console.Error.WriteLine("fold {0} {1}", i, fold_results[i]);
                }
                catch (Exception e)
                {
                    Console.Error.WriteLine("===> ERROR: " + e.Message + e.StackTrace);
                    throw e;
                }
            });

            return new RatingPredictionEvaluationResults(fold_results);
        }
        /// <summary>Performs user-wise fold-in evaluation, but instead of folding in perform incremental training with the new data</summary>
        /// <remarks>
        /// </remarks>
        /// <returns>the evaluation results</returns>
        /// <param name='recommender'>a rating predictor capable of performing a user fold-in</param>
        /// <param name='update_data'>the rating data used to represent the users</param>
        /// <param name='eval_data'>the evaluation data</param>
        static public RatingPredictionEvaluationResults EvaluateFoldInIncrementalTraining(this IncrementalRatingPredictor recommender, IRatings update_data, IRatings eval_data)
        {
            double rmse = 0;
            double mae  = 0;
            double cbd  = 0;

            int rating_count = 0;

            foreach (int user_id in update_data.AllUsers)
            {
                if (eval_data.AllUsers.Contains(user_id))
                {
                    var local_recommender = (IncrementalRatingPredictor)recommender.Clone();

                    // add ratings and perform incremental training
                    var user_ratings = new RatingsProxy(update_data, update_data.ByUser[user_id]);
                    local_recommender.AddRatings(user_ratings);

                    var items_to_rate     = (from index in eval_data.ByUser[user_id] select eval_data.Items[index]).ToArray();
                    var predicted_ratings = recommender.Recommend(user_id, candidate_items: items_to_rate);

                    foreach (var pred in predicted_ratings)
                    {
                        float prediction    = pred.Item2;
                        float actual_rating = eval_data.Get(user_id, pred.Item1, eval_data.ByUser[user_id]);
                        float error         = prediction - actual_rating;

                        rmse += error * error;
                        mae  += Math.Abs(error);
                        cbd  += Eval.Ratings.ComputeCBD(actual_rating, prediction, recommender.MinRating, recommender.MaxRating);
                        rating_count++;
                    }

                    // remove ratings again
                    local_recommender.RemoveRatings(user_ratings);

                    Console.Error.Write(".");
                }
            }

            mae  = mae / rating_count;
            rmse = Math.Sqrt(rmse / rating_count);
            cbd  = cbd / rating_count;

            var result = new RatingPredictionEvaluationResults();

            result["RMSE"] = (float)rmse;
            result["MAE"]  = (float)mae;
            result["NMAE"] = (float)mae / (recommender.MaxRating - recommender.MinRating);
            result["CBD"]  = (float)cbd;
            return(result);
        }
        /// <summary>Performs user-wise fold-in evaluation</summary>
        /// <returns>the evaluation results</returns>
        /// <param name='recommender'>a rating predictor capable of performing a user fold-in</param>
        /// <param name='update_data'>the rating data used to represent the users</param>
        /// <param name='eval_data'>the evaluation data</param>
        static public RatingPredictionEvaluationResults EvaluateFoldIn(this IFoldInRatingPredictor recommender, IRatings update_data, IRatings eval_data)
        {
            double rmse = 0;
            double mae  = 0;
            double cbd  = 0;

            int rating_count = 0;

            foreach (int user_id in update_data.AllUsers)
            {
                if (eval_data.AllUsers.Contains(user_id))
                {
                    var known_ratings = (
                        from index in update_data.ByUser[user_id]
                        select Tuple.Create(update_data.Items[index], update_data[index])
                        ).ToArray();
                    var items_to_rate     = (from index in eval_data.ByUser[user_id] select eval_data.Items[index]).ToArray();
                    var predicted_ratings = recommender.ScoreItems(known_ratings, items_to_rate);

                    foreach (var pred in predicted_ratings)
                    {
                        float prediction    = pred.Item2;
                        float actual_rating = eval_data.Get(user_id, pred.Item1, eval_data.ByUser[user_id]);
                        float error         = prediction - actual_rating;

                        rmse += error * error;
                        mae  += Math.Abs(error);
                        cbd  += Eval.Ratings.ComputeCBD(actual_rating, prediction, recommender.MinRating, recommender.MaxRating);
                        rating_count++;
                    }
                    Console.Error.Write(".");
                }
            }

            mae  = mae / rating_count;
            rmse = Math.Sqrt(rmse / rating_count);
            cbd  = cbd / rating_count;

            var result = new RatingPredictionEvaluationResults();

            result["RMSE"] = (float)rmse;
            result["MAE"]  = (float)mae;
            result["NMAE"] = (float)mae / (recommender.MaxRating - recommender.MinRating);
            result["CBD"]  = (float)cbd;
            return(result);
        }
        /// <summary>Evaluate on the folds of a dataset split</summary>
        /// <param name="recommender">a rating predictor</param>
        /// <param name="split">a rating dataset split</param>
        /// <param name="compute_fit">if set to true measure fit on the training data as well</param>
        /// <param name="show_results">set to true to print results to STDERR</param>
        /// <returns>a dictionary containing the average results over the different folds of the split</returns>
        public static RatingPredictionEvaluationResults DoCrossValidation(
			this RatingPredictor recommender,
			ISplit<IRatings> split,
			bool compute_fit = false,
			bool show_results = false)
        {
            var avg_results = new RatingPredictionEvaluationResults();

            Parallel.For(0, (int) split.NumberOfFolds, i =>
            {
                try
                {
                    var split_recommender = (RatingPredictor) recommender.Clone(); // to avoid changes in recommender
                    split_recommender.Ratings = split.Train[i];
                    if (recommender is ITransductiveRatingPredictor)
                        ((ITransductiveRatingPredictor) split_recommender).AdditionalFeedback = split.Test[i];
                    split_recommender.Train();
                    var fold_results = Ratings.Evaluate(split_recommender, split.Test[i]);
                    if (compute_fit)
                        fold_results["fit"] = (float) split_recommender.ComputeFit();

                    // thread-safe stats
                    lock (avg_results)
                        foreach (var key in fold_results.Keys)
                            if (avg_results.ContainsKey(key))
                                avg_results[key] += fold_results[key];
                            else
                                avg_results[key] = fold_results[key];

                    if (show_results)
                        Console.Error.WriteLine("fold {0} {1}", i, fold_results);
                }
                catch (Exception e)
                {
                    Console.Error.WriteLine("===> ERROR: " + e.Message + e.StackTrace);
                    throw e;
                }
            });

            foreach (var key in Ratings.Measures)
                avg_results[key] /= split.NumberOfFolds;

            return avg_results;
        }
Exemple #6
0
		/// <summary>Performs user-wise fold-in evaluation</summary>
		/// <returns>the evaluation results</returns>
		/// <param name='recommender'>a rating predictor capable of performing a user fold-in</param>
		/// <param name='update_data'>the rating data used to represent the users</param>
		/// <param name='eval_data'>the evaluation data</param>
		static public RatingPredictionEvaluationResults EvaluateFoldIn(this IFoldInRatingPredictor recommender, IRatings update_data, IRatings eval_data)
		{
			double rmse = 0;
			double mae  = 0;
			double cbd  = 0;

			int rating_count = 0;
			foreach (int user_id in update_data.AllUsers)
				if (eval_data.AllUsers.Contains(user_id))
				{
					var known_ratings = (
						from index in update_data.ByUser[user_id]
						select Tuple.Create(update_data.Items[index], update_data[index])
					).ToArray();
					var items_to_rate = (from index in eval_data.ByUser[user_id] select eval_data.Items[index]).ToArray();
					var predicted_ratings = recommender.ScoreItems(known_ratings, items_to_rate);

					foreach (var pred in predicted_ratings)
					{
						float prediction = pred.Item2;
						float actual_rating = eval_data.Get(user_id, pred.Item1, eval_data.ByUser[user_id]);
						float error = prediction - actual_rating;

						rmse += error * error;
						mae  += Math.Abs(error);
						cbd  += Eval.Ratings.ComputeCBD(actual_rating, prediction, recommender.MinRating, recommender.MaxRating);
						rating_count++;
					}
					Console.Error.Write(".");
				}

			mae  = mae / rating_count;
			rmse = Math.Sqrt(rmse / rating_count);
			cbd  = cbd / rating_count;

			var result = new RatingPredictionEvaluationResults();
			result["RMSE"] = (float) rmse;
			result["MAE"]  = (float) mae;
			result["NMAE"] = (float) mae / (recommender.MaxRating - recommender.MinRating);
			result["CBD"]  = (float) cbd;
			return result;
		}
        /// <summary>Evaluate on the folds of a dataset split</summary>
        /// <param name="recommender">a rating predictor</param>
        /// <param name="split">a rating dataset split</param>
        /// <param name="compute_fit">if set to true measure fit on the training data as well</param>
        /// <param name="show_fold_results">set to true to print per-fold results to STDERR</param>
        /// <returns>a dictionary containing the average results over the different folds of the split</returns>
        static public RatingPredictionEvaluationResults DoCrossValidation(
            this RatingPredictor recommender,
            ISplit <IRatings> split,
            bool compute_fit       = false,
            bool show_fold_results = false)
        {
            var fold_results = new RatingPredictionEvaluationResults[split.NumberOfFolds];

            Parallel.For(0, (int)split.NumberOfFolds, i =>
            {
                try
                {
                    var split_recommender     = (RatingPredictor)recommender.Clone();                  // to avoid changes in recommender
                    split_recommender.Ratings = split.Train[i];
                    if (recommender is ITransductiveRatingPredictor)
                    {
                        ((ITransductiveRatingPredictor)split_recommender).AdditionalFeedback = split.Test[i];
                    }
                    split_recommender.Train();
                    fold_results[i] = Ratings.Evaluate(split_recommender, split.Test[i]);
                    if (compute_fit)
                    {
                        fold_results[i]["fit"] = (float)split_recommender.ComputeFit();
                    }

                    if (show_fold_results)
                    {
                        Console.Error.WriteLine("fold {0} {1}", i, fold_results[i]);
                    }
                }
                catch (Exception e)
                {
                    Console.Error.WriteLine("===> ERROR: " + e.Message + e.StackTrace);
                    throw;
                }
            });

            return(new RatingPredictionEvaluationResults(fold_results));
        }
Exemple #8
0
		/// <summary>Evaluates a rating predictor for RMSE, (N)MAE, and CBD</summary>
		/// <remarks>
		///   <para>
		///     See http://recsyswiki.com/wiki/Root_mean_square_error and http://recsyswiki.com/wiki/Mean_absolute_error
		///   </para>
		///   <para>
		///     For NMAE, see the paper by Goldberg et al.
		///   </para>
		///   <para>
		///     For CBD (capped binomial deviance), see http://www.kaggle.com/c/ChessRatings2/Details/Evaluation
		///   </para>
		///   <para>
		///     If the recommender can take time into account, and the rating dataset provides rating times,
		///     this information will be used for making rating predictions.
		///   </para>
		///   <para>
		///     Literature:
		///     <list type="bullet">
		///       <item><description>
		///         Ken Goldberg, Theresa Roeder, Dhruv Gupta, and Chris Perkins:
		///         Eigentaste: A Constant Time Collaborative Filtering Algorithm.
		///         nformation Retrieval Journal 2001.
		///         http://goldberg.berkeley.edu/pubs/eigentaste.pdf
		///       </description></item>
		///     </list>
		///   </para>
		/// </remarks>
		/// <param name="recommender">rating predictor</param>
		/// <param name="test_ratings">test cases</param>
		/// <param name="training_ratings">the training examples</param>
		/// <returns>a Dictionary containing the evaluation results</returns>
		static public RatingPredictionEvaluationResults Evaluate(this IRatingPredictor recommender, IRatings test_ratings, IRatings training_ratings = null)
		{
			if (recommender == null)
				throw new ArgumentNullException("recommender");
			if (test_ratings == null)
				throw new ArgumentNullException("ratings");

			var all_indices = Enumerable.Range(0, test_ratings.Count).ToArray();
			var results = new RatingPredictionEvaluationResults(Evaluate(recommender, test_ratings, all_indices));
			if (training_ratings != null)
			{
				var new_user_indices = (from index in all_indices
				                        where test_ratings.Users[index] > training_ratings.MaxUserID || training_ratings.CountByUser[test_ratings.Users[index]] == 0
				                        select index).ToArray();
				results.NewUserResults = Evaluate(recommender, test_ratings, new_user_indices);
				var new_item_indices = (from index in all_indices
				                        where test_ratings.Items[index] > training_ratings.MaxItemID || training_ratings.CountByItem[test_ratings.Items[index]] == 0 select index).ToArray();
				results.NewItemResults = Evaluate(recommender, test_ratings, new_item_indices);
				results.NewUserNewItemResults = Evaluate(recommender, test_ratings, Enumerable.Intersect(new_user_indices, new_item_indices).ToArray());
			}
			return results;
		}
		/// <summary>Online evaluation for rating prediction</summary>
		/// <remarks>
		/// Every rating that is tested is added to the training set afterwards.
		/// </remarks>
		/// <param name="recommender">rating predictor</param>
		/// <param name="ratings">Test cases</param>
		/// <returns>a Dictionary containing the evaluation results</returns>
		static public RatingPredictionEvaluationResults EvaluateOnline(this IRatingPredictor recommender, IRatings ratings)
		{
			if (recommender == null)
				throw new ArgumentNullException("recommender");
			if (ratings == null)
				throw new ArgumentNullException("ratings");

			var incremental_recommender = recommender as IIncrementalRatingPredictor;
			if (incremental_recommender == null)
				throw new ArgumentException("recommender must be of type IIncrementalRatingPredictor");

			double rmse = 0;
			double mae  = 0;
			double cbd  = 0;

			// iterate in random order
			foreach (int index in ratings.RandomIndex)
			{
				float prediction = recommender.Predict(ratings.Users[index], ratings.Items[index]);
				float error = prediction - ratings[index];

				rmse += error * error;
				mae  += Math.Abs(error);
				cbd  += Eval.Ratings.ComputeCBD(ratings[index], prediction, recommender.MinRating, recommender.MaxRating);

				incremental_recommender.AddRatings(new RatingsProxy(ratings, new int[] { index }));
			}
			mae  = mae / ratings.Count;
			rmse = Math.Sqrt(rmse / ratings.Count);
			cbd  = cbd / ratings.Count;

			var result = new RatingPredictionEvaluationResults();
			result["RMSE"] = (float) rmse;
			result["MAE"]  = (float) mae;
			result["NMAE"] = (float) mae / (recommender.MaxRating - recommender.MinRating);
			result["CBD"]  = (float) cbd;
			return result;
		}
        /// <summary>Evaluate an iterative recommender on the folds of a dataset split, display results on STDOUT</summary>
        /// <param name="recommender">a rating predictor</param>
        /// <param name="split">a rating dataset split</param>
        /// <param name="max_iter">the maximum number of iterations</param>
        /// <param name="find_iter">the report interval</param>
        /// <param name="show_fold_results">if set to true to print per-fold results to STDERR</param>
        public static void DoIterativeCrossValidation(
			this RatingPredictor recommender,
			ISplit<IRatings> split,
			int max_iter,
			int find_iter = 1,
			bool show_fold_results = false)
        {
            if (!(recommender is IIterativeModel))
                throw new ArgumentException("recommender must be of type IIterativeModel");

            var split_recommenders     = new RatingPredictor[split.NumberOfFolds];
            var iterative_recommenders = new IIterativeModel[split.NumberOfFolds];
            var fold_results = new RatingPredictionEvaluationResults[split.NumberOfFolds];

            // initial training and evaluation
            Parallel.For(0, (int) split.NumberOfFolds, i =>
            {
                try
                {
                    split_recommenders[i] = (RatingPredictor) recommender.Clone(); // to avoid changes in recommender
                    split_recommenders[i].Ratings = split.Train[i];
                    if (recommender is ITransductiveRatingPredictor)
                        ((ITransductiveRatingPredictor) split_recommenders[i]).AdditionalFeedback = split.Test[i];
                    split_recommenders[i].Train();
                    iterative_recommenders[i] = (IIterativeModel) split_recommenders[i];
                    fold_results[i] = Ratings.Evaluate(split_recommenders[i], split.Test[i]);

                    if (show_fold_results)
                        Console.Error.WriteLine("fold {0} {1} iteration {2}", i, fold_results[i], iterative_recommenders[i].NumIter);
                }
                catch (Exception e)
                {
                    Console.Error.WriteLine("===> ERROR: " + e.Message + e.StackTrace);
                    throw e;
                }
            });
            Console.WriteLine("{0} iteration {1}", new RatingPredictionEvaluationResults(fold_results), iterative_recommenders[0].NumIter);

            // iterative training and evaluation
            for (int it = (int) iterative_recommenders[0].NumIter + 1; it <= max_iter; it++)
            {
                Parallel.For(0, (int) split.NumberOfFolds, i =>
                {
                    try
                    {
                        iterative_recommenders[i].Iterate();

                        if (it % find_iter == 0)
                        {
                            fold_results[i] = Ratings.Evaluate(split_recommenders[i], split.Test[i]);
                            if (show_fold_results)
                                Console.Error.WriteLine("fold {0} {1} iteration {2}", i, fold_results[i], it);
                        }
                    }
                    catch (Exception e)
                    {
                        Console.Error.WriteLine("===> ERROR: " + e.Message + e.StackTrace);
                        throw e;
                    }
                });
                Console.WriteLine("{0} iteration {1}", new RatingPredictionEvaluationResults(fold_results), it);
            }
        }
Exemple #11
0
        /// <summary>Evaluates a rating predictor for RMSE, (N)MAE, and CBD</summary>
        /// <remarks>
        ///   <para>
        ///     See http://recsyswiki.com/wiki/Root_mean_square_error and http://recsyswiki.com/wiki/Mean_absolute_error
        ///   </para>
        ///   <para>
        ///     For NMAE, see the paper by Goldberg et al.
        ///   </para>
        ///   <para>
        ///     For CBD (capped binomial deviance), see http://www.kaggle.com/c/ChessRatings2/Details/Evaluation
        ///   </para>
        ///   <para>
        ///     If the recommender can take time into account, and the rating dataset provides rating times,
        ///     this information will be used for making rating predictions.
        ///   </para>
        ///   <para>
        ///     Literature:
        ///     <list type="bullet">
        ///       <item><description>
        ///         Ken Goldberg, Theresa Roeder, Dhruv Gupta, and Chris Perkins:
        ///         Eigentaste: A Constant Time Collaborative Filtering Algorithm.
        ///         nformation Retrieval Journal 2001.
        ///         http://goldberg.berkeley.edu/pubs/eigentaste.pdf
        ///       </description></item>
        ///     </list>
        ///   </para>
        /// </remarks>
        /// <param name="recommender">rating predictor</param>
        /// <param name="ratings">Test cases</param>
        /// <returns>a Dictionary containing the evaluation results</returns>
        public static RatingPredictionEvaluationResults Evaluate(this IRatingPredictor recommender, IRatings ratings)
        {
            double rmse = 0;
            double mae  = 0;
            double cbd  = 0;

            if (recommender == null)
                throw new ArgumentNullException("recommender");
            if (ratings == null)
                throw new ArgumentNullException("ratings");

            if (recommender is ITimeAwareRatingPredictor && ratings is ITimedRatings)
            {
                var time_aware_recommender = recommender as ITimeAwareRatingPredictor;
                var timed_ratings = ratings as ITimedRatings;
                for (int index = 0; index < ratings.Count; index++)
                {
                    float prediction = time_aware_recommender.Predict(timed_ratings.Users[index], timed_ratings.Items[index], timed_ratings.Times[index]);
                    float error = prediction - ratings[index];

                    rmse += error * error;
                    mae  += Math.Abs(error);
                    cbd  += ComputeCBD(ratings[index], prediction, ratings.MinRating, ratings.MaxRating);
                }
            }
            else
                for (int index = 0; index < ratings.Count; index++)
                {
                    float prediction = recommender.Predict(ratings.Users[index], ratings.Items[index]);
                    float error = prediction - ratings[index];

                    rmse += error * error;
                    mae  += Math.Abs(error);
                    cbd  += ComputeCBD(ratings[index], prediction, ratings.MinRating, ratings.MaxRating);
                }
            mae  = mae / ratings.Count;
            rmse = Math.Sqrt(rmse / ratings.Count);
            cbd  = cbd / ratings.Count;

            var result = new RatingPredictionEvaluationResults();
            result["RMSE"] = (float) rmse;
            result["MAE"]  = (float) mae;
            result["NMAE"] = (float) mae / (recommender.MaxRating - recommender.MinRating);
            result["CBD"]  = (float) cbd;
            return result;
        }
Exemple #12
0
		/// <summary>Performs user-wise fold-in evaluation, but instead of folding in perform incremental training with the new data</summary>
		/// <remarks>
		/// </remarks>
		/// <returns>the evaluation results</returns>
		/// <param name='recommender'>a rating predictor capable of performing a user fold-in</param>
		/// <param name='update_data'>the rating data used to represent the users</param>
		/// <param name='eval_data'>the evaluation data</param>
		static public RatingPredictionEvaluationResults EvaluateFoldInIncrementalTraining(this IncrementalRatingPredictor recommender, IRatings update_data, IRatings eval_data)
		{
			double rmse = 0;
			double mae  = 0;
			double cbd  = 0;

			int rating_count = 0;
			foreach (int user_id in update_data.AllUsers)
				if (eval_data.AllUsers.Contains(user_id))
				{
					var local_recommender = (IncrementalRatingPredictor) recommender.Clone();

					// add ratings and perform incremental training
					var user_ratings = new RatingsProxy(update_data, update_data.ByUser[user_id]);
					local_recommender.AddRatings(user_ratings);

					var items_to_rate = (from index in eval_data.ByUser[user_id] select eval_data.Items[index]).ToArray();
					var predicted_ratings = recommender.Recommend(user_id, candidate_items:items_to_rate);

					foreach (var pred in predicted_ratings)
					{
						float prediction = pred.Item2;
						float actual_rating = eval_data.Get(user_id, pred.Item1, eval_data.ByUser[user_id]);
						float error = prediction - actual_rating;

						rmse += error * error;
						mae  += Math.Abs(error);
						cbd  += Eval.Ratings.ComputeCBD(actual_rating, prediction, recommender.MinRating, recommender.MaxRating);
						rating_count++;
					}

					// remove ratings again
					local_recommender.RemoveRatings(user_ratings);

					Console.Error.Write(".");
				}
			
			mae  = mae / rating_count;
			rmse = Math.Sqrt(rmse / rating_count);
			cbd  = cbd / rating_count;

			var result = new RatingPredictionEvaluationResults();
			result["RMSE"] = (float) rmse;
			result["MAE"]  = (float) mae;
			result["NMAE"] = (float) mae / (recommender.MaxRating - recommender.MinRating);
			result["CBD"]  = (float) cbd;
			return result;
		}
        /// <summary>Evaluate an iterative recommender on the folds of a dataset split, display results on STDOUT</summary>
        /// <param name="recommender">a rating predictor</param>
        /// <param name="split">a rating dataset split</param>
        /// <param name="max_iter">the maximum number of iterations</param>
        /// <param name="find_iter">the report interval</param>
        /// <param name="show_fold_results">if set to true to print per-fold results to STDERR</param>
        static public void DoIterativeCrossValidation(
            this RatingPredictor recommender,
            ISplit <IRatings> split,
            uint max_iter,
            uint find_iter         = 1,
            bool show_fold_results = false)
        {
            if (!(recommender is IIterativeModel))
            {
                throw new ArgumentException("recommender must be of type IIterativeModel");
            }

            var split_recommenders     = new RatingPredictor[split.NumberOfFolds];
            var iterative_recommenders = new IIterativeModel[split.NumberOfFolds];
            var fold_results           = new RatingPredictionEvaluationResults[split.NumberOfFolds];

            // initial training and evaluation
            Parallel.For(0, (int)split.NumberOfFolds, i =>
            {
                try
                {
                    split_recommenders[i]         = (RatingPredictor)recommender.Clone();              // to avoid changes in recommender
                    split_recommenders[i].Ratings = split.Train[i];
                    if (recommender is ITransductiveRatingPredictor)
                    {
                        ((ITransductiveRatingPredictor)split_recommenders[i]).AdditionalFeedback = split.Test[i];
                    }
                    split_recommenders[i].Train();
                    iterative_recommenders[i] = (IIterativeModel)split_recommenders[i];
                    fold_results[i]           = Ratings.Evaluate(split_recommenders[i], split.Test[i]);

                    if (show_fold_results)
                    {
                        Console.Error.WriteLine("fold {0} {1} iteration {2}", i, fold_results[i], iterative_recommenders[i].NumIter);
                    }
                }
                catch (Exception e)
                {
                    Console.Error.WriteLine("===> ERROR: " + e.Message + e.StackTrace);
                    throw;
                }
            });
            Console.WriteLine("{0} iteration {1}", new RatingPredictionEvaluationResults(fold_results), iterative_recommenders[0].NumIter);

            // iterative training and evaluation
            for (int it = (int)iterative_recommenders[0].NumIter + 1; it <= max_iter; it++)
            {
                Parallel.For(0, (int)split.NumberOfFolds, i =>
                {
                    try
                    {
                        iterative_recommenders[i].Iterate();

                        if (it % find_iter == 0)
                        {
                            fold_results[i] = Ratings.Evaluate(split_recommenders[i], split.Test[i]);
                            if (show_fold_results)
                            {
                                Console.Error.WriteLine("fold {0} {1} iteration {2}", i, fold_results[i], it);
                            }
                        }
                    }
                    catch (Exception e)
                    {
                        Console.Error.WriteLine("===> ERROR: " + e.Message + e.StackTrace);
                        throw;
                    }
                });
                Console.WriteLine("{0} iteration {1}", new RatingPredictionEvaluationResults(fold_results), it);
            }
        }