public override void CalculateMetrics(FeatureSubsetModel <TOutput> model,
                                              ISubsetSelector subsetSelector, Subset subset, Batch batch, bool needMetrics)
        {
            base.CalculateMetrics(model, subsetSelector, subset, batch, needMetrics);

            var vm = model.Predictor as IValueMapper;

            Host.Check(vm != null, "Predictor doesn't implement the expected interface");
            var map = vm.GetMapper <VBuffer <Single>, TOutput>();

            TOutput[] preds = new TOutput[100];
            int       count = 0;
            var       data  = subsetSelector.GetTestData(subset, batch);

            using (var cursor = new FeatureFloatVectorCursor(data, CursOpt.AllFeatures))
            {
                while (cursor.MoveNext())
                {
                    Utils.EnsureSize(ref preds, count + 1);
                    map(in cursor.Features, ref preds[count]);
                    count++;
                }
            }
            Array.Resize(ref preds, count);
            _predictions[model] = preds;
        }
示例#2
0
        public virtual void CalculateMetrics(FeatureSubsetModel <TOutput> model,
                                             ISubsetSelector subsetSelector, Subset subset, Batch batch, bool needMetrics)
        {
            if (!needMetrics || model == null || model.Metrics != null)
            {
                return;
            }

            using (var ch = Host.Start("Calculate metrics"))
            {
                RoleMappedData testData = subsetSelector.GetTestData(subset, batch);
                // Because the training and test datasets are drawn from the same base dataset, the test data role mappings
                // are the same as for the train data.
                IDataScorerTransform scorePipe      = ScoreUtils.GetScorer(model.Predictor, testData, Host, testData.Schema);
                RoleMappedData       scoredTestData = new RoleMappedData(scorePipe,
                                                                         GetColumnRoles(testData.Schema, scorePipe.Schema));
                // REVIEW: Should we somehow allow the user to customize the evaluator?
                // By what mechanism should we allow that?
                IEvaluator evaluator = GetEvaluator(Host);
                // REVIEW: with the new evaluators, metrics of individual models are no longer
                // printed to the Console. Consider adding an option on the combiner to print them.
                // REVIEW: Consider adding an option to the combiner to save a data view
                // containing all the results of the individual models.
                var metricsDict = evaluator.Evaluate(scoredTestData);
                if (!metricsDict.TryGetValue(MetricKinds.OverallMetrics, out IDataView metricsView))
                {
                    throw Host.Except("Evaluator did not produce any overall metrics");
                }
                // REVIEW: We're assuming that the metrics of interest are always doubles here.
                var metrics = EvaluateUtils.GetMetrics(metricsView, getVectorMetrics: false);
                model.Metrics = metrics.ToArray();
            }
        }
        private bool EnsureMinimumFeaturesSelected(Subset subset)
        {
            if (subset.SelectedFeatures == null)
            {
                return(true);
            }
            for (int i = 0; i < subset.SelectedFeatures.Count; i++)
            {
                if (subset.SelectedFeatures[i])
                {
                    return(true);
                }
            }

            return(false);
        }
示例#4
0
 public override void CalculateMetrics(FeatureSubsetModel <TOutput> model,
                                       ISubsetSelector subsetSelector, Subset subset, Batch batch, bool needMetrics)
 {
     base.CalculateMetrics(model, subsetSelector, subset, batch, true);
 }