/// <summary>
        /// Executes the test for a given recommender and a test dataset.
        /// </summary>
        /// <param name="recommender">The recommender to evaluate.</param>
        /// <param name="evaluator">The evaluator.</param>
        /// <param name="testDataset">The test dataset.</param>
        /// <param name="predictionTime">When the method returns, this parameter contains the total prediction time.</param>
        /// <param name="evaluationTime">When the method returns, this parameter contains the total evaluation time.</param>
        /// <param name="metrics">When the method returns, this parameter contains names and values of the computed metrics.</param>
        public override void Execute(
            Recommender recommender,
            Evaluator evaluator,
            SplitInstanceSource <RecommenderDataset> testDataset,
            out TimeSpan predictionTime,
            out TimeSpan evaluationTime,
            out MetricValueDistributionCollection metrics)
        {
            Stopwatch predictionTimer = Stopwatch.StartNew();
            IDictionary <User, IDictionary <Item, int> > predictions = recommender.Predict(testDataset);
            IDictionary <User, IDictionary <Item, RatingDistribution> > uncertainPredictions =
                this.computeUncertainPredictionMetrics ? recommender.PredictDistribution(testDataset) : null;

            predictionTime = predictionTimer.Elapsed;

            var evaluationTimer = Stopwatch.StartNew();

            metrics = new MetricValueDistributionCollection();
            metrics.Add("MAE", new MetricValueDistribution(evaluator.ModelDomainRatingPredictionMetric(testDataset, predictions, Metrics.AbsoluteError)));
            metrics.Add("Per-user MAE", new MetricValueDistribution(evaluator.ModelDomainRatingPredictionMetric(testDataset, predictions, Metrics.AbsoluteError, RecommenderMetricAggregationMethod.PerUserFirst)));
            metrics.Add("RMSE", new MetricValueDistribution(Math.Sqrt(evaluator.ModelDomainRatingPredictionMetric(testDataset, predictions, Metrics.SquaredError))));
            metrics.Add("Per-user RMSE", new MetricValueDistribution(Math.Sqrt(evaluator.ModelDomainRatingPredictionMetric(testDataset, predictions, Metrics.SquaredError, RecommenderMetricAggregationMethod.PerUserFirst))));
            if (this.computeUncertainPredictionMetrics)
            {
                metrics.Add("Expected MAE", new MetricValueDistribution(evaluator.ModelDomainRatingPredictionMetricExpectation(testDataset, uncertainPredictions, Metrics.AbsoluteError)));
            }

            evaluationTime = evaluationTimer.Elapsed;
        }
Esempio n. 2
0
        /// <summary>
        /// Executes the test for a given recommender and a test dataset.
        /// </summary>
        /// <param name="recommender">The recommender to evaluate.</param>
        /// <param name="evaluator">The evaluator.</param>
        /// <param name="testDataset">The test dataset.</param>
        /// <param name="predictionTime">When the method returns, this parameter contains the total prediction time.</param>
        /// <param name="evaluationTime">When the method returns, this parameter contains the total evaluation time.</param>
        /// <param name="metrics">When the method returns, this parameter contains names and values of the computed metrics.</param>
        public override void Execute(
            Recommender recommender,
            Evaluator evaluator,
            SplitInstanceSource <RecommenderDataset> testDataset,
            out TimeSpan predictionTime,
            out TimeSpan evaluationTime,
            out MetricValueDistributionCollection metrics)
        {
            Stopwatch predictionTimer = Stopwatch.StartNew();
            IDictionary <User, IEnumerable <User> > predictions = evaluator.FindRelatedUsersWhoRatedSameItems(
                recommender, testDataset, this.maxRelatedUserCount, this.minCommonRatingCount, this.minRelatedUserPoolSize);

            predictionTime = predictionTimer.Elapsed;

            Stopwatch evaluationTimer = Stopwatch.StartNew();

            metrics = new MetricValueDistributionCollection();
            metrics.Add(
                "Related users L1 Sim NDCG",
                new MetricValueDistribution(evaluator.RelatedUsersMetric(testDataset, predictions, this.minCommonRatingCount, Metrics.Ndcg, Metrics.NormalizedManhattanSimilarity)));
            metrics.Add(
                "Related users L2 Sim NDCG",
                new MetricValueDistribution(evaluator.RelatedUsersMetric(testDataset, predictions, this.minCommonRatingCount, Metrics.Ndcg, Metrics.NormalizedEuclideanSimilarity)));
            evaluationTime = evaluationTimer.Elapsed;
        }
Esempio n. 3
0
        /// <summary>
        /// Executes the test for a given recommender and a test dataset.
        /// </summary>
        /// <param name="recommender">The recommender to evaluate.</param>
        /// <param name="evaluator">The evaluator.</param>
        /// <param name="testDataset">The test dataset.</param>
        /// <param name="predictionTime">When the method returns, this parameter contains the total prediction time.</param>
        /// <param name="evaluationTime">When the method returns, this parameter contains the total evaluation time.</param>
        /// <param name="metrics">When the method returns, this parameter contains names and values of the computed metrics.</param>
        public override void Execute(
            Recommender recommender,
            Evaluator evaluator,
            SplitInstanceSource <RecommenderDataset> testDataset,
            out TimeSpan predictionTime,
            out TimeSpan evaluationTime,
            out MetricValueDistributionCollection metrics)
        {
            var predictionTimer = Stopwatch.StartNew();
            IDictionary <User, IEnumerable <Item> > recommendations = evaluator.RecommendRatedItems(
                recommender, testDataset, this.maxRecommendedItemCount, this.minRecommendationPoolSize);

            predictionTime = predictionTimer.Elapsed;

            var evaluationTimer = Stopwatch.StartNew();

            metrics = new MetricValueDistributionCollection();
            Func <int, double> ratingToGainConverter = r => r - testDataset.InstanceSource.StarRatingInfo.MinStarRating + 1; // Prevent non-positive gains

            metrics.Add("Recommendation NDCG", new MetricValueDistribution(evaluator.ItemRecommendationMetric(testDataset, recommendations, Metrics.Ndcg, ratingToGainConverter)));
            metrics.Add("Recommendation GAP", new MetricValueDistribution(evaluator.ItemRecommendationMetric(testDataset, recommendations, Metrics.GradedAveragePrecision, ratingToGainConverter)));
            evaluationTime = evaluationTimer.Elapsed;
        }