private void GenericDenseDataView <T>(T[] v1, T[] v2) { var inputs = new[] { new DenseExample <T>() { X = v1 }, new DenseExample <T>() { X = v2 } }; using (var host = new TlcEnvironment()) { var data = host.CreateStreamingDataView(inputs); var value = new VBuffer <T>(); int n = 0; using (var cur = data.GetRowCursor(i => true)) { var getter = cur.GetGetter <VBuffer <T> >(0); while (cur.MoveNext()) { getter(ref value); Assert.True(value.Count == 3); ++n; } } Assert.True(n == 2); var iter = data.AsEnumerable <DenseExample <T> >(host, false).GetEnumerator(); n = 0; while (iter.MoveNext()) { ++n; } Assert.True(n == 2); } }
private ModelMetrics ComputeMetrics( IList <SarUsageEvent> usageEvents, IList <SarUsageEvent> evaluationEvents, IList <SarScoreResult> scores, CancellationToken cancellationToken) { if (!scores.Any() || !usageEvents.Any() || !evaluationEvents.Any()) { _tracer.TraceWarning( $"Operation '{nameof(ComputeMetrics)}' returning empty results. Scores: '{scores.Count}', Usage Events: '{usageEvents.Count}', Evaluation Events: '{evaluationEvents.Count}'"); return(new ModelMetrics()); } // convert the usage items to the evaluation format List <SarEvaluationUsageEvent> usageEventsFormatted = usageEvents.Select(ToEvaluationUsageEvent).ToList(); // convert the evaluation usage items to the evaluation format List <SarEvaluationUsageEvent> evaluationEventsFormatted = evaluationEvents.Select(ToEvaluationUsageEvent).ToList(); // convert the scores items to the evaluation format List <SarEvaluationUsageEvent> scoresFormatted = scores.Select(ToEvaluationUsageEvent).ToList(); using (TlcEnvironment environment = new TlcEnvironment()) { environment.AddListener <ChannelMessage>(_tracer.TraceChannelMessage); // Create a precision evaluator. PrecisionAtKEvaluator precisionEvaluator = new PrecisionAtKEvaluator( environment, new PrecisionAtKEvaluator.Arguments { k = MaxPrecisionK }, environment.CreateStreamingDataView(scoresFormatted), environment.CreateStreamingDataView(evaluationEventsFormatted)); cancellationToken.ThrowIfCancellationRequested(); // Create a diversity evaluator. DiversityAtKEvaluator diversityEvaluator = new DiversityAtKEvaluator( environment, new DiversityAtKEvaluator.Arguments { buckets = DiversityBuckets }, environment.CreateStreamingDataView(scoresFormatted), environment.CreateStreamingDataView(usageEventsFormatted)); cancellationToken.ThrowIfCancellationRequested(); // Compute Precision metrics IList <PrecisionAtKEvaluator.MetricItem> precisionMetrics = precisionEvaluator.Evaluate() .AsEnumerable <PrecisionAtKEvaluator.MetricItem>(environment, false) .ToList(); var modelPrecisionMetrics = precisionMetrics.Select( metric => new PrecisionMetric { K = (int)metric.K, Percentage = Math.Round(metric.PrecisionAtK * 100, 3), UsersInTest = (int?)metric.TotalUsers }).ToList(); cancellationToken.ThrowIfCancellationRequested(); // Compute Diversity metrics IList <DiversityAtKEvaluator.MetricItem> diversityMetrics = diversityEvaluator.Evaluate() .AsEnumerable <DiversityAtKEvaluator.MetricItem>(environment, false) .ToList(); ModelDiversityMetrics modelDiversityMetrics = new ModelDiversityMetrics { PercentileBuckets = diversityMetrics.Select(bucket => new PercentileBucket { Min = (int)bucket.BucketMin, Max = (bool)(bucket.BucketLim == 101) ? 100 : (int)bucket.BucketLim, Percentage = Math.Round(bucket.RecommendedItemsFraction * 100, 3) }).ToList(), UniqueItemsRecommended = (int?)diversityMetrics.First().DistinctRecommendations, TotalItemsRecommended = (int?)diversityMetrics.First().TotalRecommendations, UniqueItemsInTrainSet = (int?)diversityMetrics.First().TotalItemsEvaluated }; return(new ModelMetrics { ModelPrecisionMetrics = modelPrecisionMetrics, ModelDiversityMetrics = modelDiversityMetrics }); } }