Пример #1
0
 /// <summary>
 /// Compute the recall
 ///
 /// The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
 /// true positives and ``fn`` the number of false negatives. The recall is
 /// intuitively the ability of the classifier to find all the positive samples.
 ///
 /// The best value is 1 and the worst value is 0.
 /// </summary>
 /// <param name="yTrue">List of labels. Ground truth (correct) target values.</param>
 /// <param name="yPred">Estimated targets as returned by a classifier.</param>
 /// <param name="labels">Integer array of labels.</param>
 /// <param name="posLabel">classification target is binary,
 /// only this class's scores will be returned.</param>
 /// <param name="average">Unless ``posLabel`` is given in binary classification, this
 /// determines the type of averaging performed on the data.</param>
 /// <returns>Recall of the positive class in binary classification or weighted
 /// average of the recall of each class for the multiclass task.</returns>
 /// <examples>
 ///    In the binary case:
 ///
 ///    var yPred = new[] { 0, 1, 0, 0 };
 ///    var yTrue = new[] { 0, 1, 0, 1 };
 ///    Metrics.RecallScoreAvg(yTrue, yPred);
 ///       0.5
 ///
 /// In the multiclass case:
 ///
 ///    var yTrue = new[] { 0, 1, 2, 0, 1, 2 };
 ///    var yPred = new[] { 0, 2, 1, 0, 0, 1 };
 ///    Metrics.RecallScoreAvg(yTrue, yPred, average: AverageKind.Macro)
 ///       0.33...
 ///    Metrics.RecallScoreAvg(yTrue, yPred, average: AverageKind.Micro)
 ///       0.33...
 ///    Metrics.RecallScoreAvg(yTrue, yPred, average: AverageKind.Weighted)
 ///       0.33...
 ///    Metrics.RecallScore(yTrue, yPred)
 ///      { 1.,  0.,  0. }
 /// </examples>
 public static double RecallScoreAvg(
     int[] yTrue,
     int[] yPred,
     int[] labels        = null,
     int posLabel        = 1,
     AverageKind average = AverageKind.Weighted)
 {
     return(PrecisionRecallFScoreSupportAvg(
                yTrue,
                yPred,
                labels: labels,
                posLabel: posLabel,
                average: average).Recall);
 }
Пример #2
0
 /// <summary>
 /// <para>
 /// Compute the F1 score, also known as balanced F-score or F-measure
 /// </para>
 /// <para>
 /// The F1 score can be interpreted as a weighted average of the precision and
 /// recall, where an F1 score reaches its best value at 1 and worst score at 0.
 /// The relative contribution of precision and recall to the F1 score are
 /// equal. The formula for the F1 score is::
 /// </para>
 /// <para>
 /// F1 = 2 * (precision * recall) / (precision + recall)
 /// </para>
 /// <para>
 /// In the multi-class and multi-label case, this is the weighted average of
 /// the F1 score of each class.
 /// </para>
 /// <para>
 ///    References
 ///    ----------
 ///    Wikipedia entry for the F1-score
 ///    http://en.wikipedia.org/wiki/F1_score
 /// </para>
 /// </summary>
 /// <param name="yTrue">List of labels. Ground truth (correct) target values.</param>
 /// <param name="yPred">Estimated targets as returned by a classifier.</param>
 /// <param name="labels">Integer array of labels.</param>
 /// <param name="posLabel">If classification target is binary,
 /// only this class's scores will be returned.</param>
 /// <param name="average">Unless ``posLabel`` is given in binary classification, this
 /// determines the type of averaging performed on the data.</param>
 /// <returns>Weighted average of the F1 scores of each class for the multiclass task.
 /// </returns>
 /// <example>
 ///  In the binary case:
 ///
 ///   var yPred = new[] { 0, 1, 0, 0 };
 ///   var yTrue = new[] { 0, 1, 0, 1 };
 ///   Metrics.F1ScoreAvg(yTrue, yPred);
 ///       0.666...
 ///
 ///  In multiclass case:
 ///
 ///  var yTrue = new[] { 0, 1, 2, 0, 1, 2 };
 ///  var yPred = new[] { , 2, 1, 0, 0, 1 };
 ///  Metrics.F1ScoreAvg(yTrue, yPred, average : AverageKind.Micro)
 ///  0.26...
 ///  Metrics.F1ScoreAvg(yTrue, yPred, average : AverageKind.Macro)
 ///  0.33...
 ///  Metrics.F1ScoreAvg(yTrue, yPred, average : AverageKind.Weighted)
 ///  0.26...
 /// </example>
 public static double F1ScoreAvg(
     int[] yTrue,
     int[] yPred,
     int[] labels        = null,
     int posLabel        = 1,
     AverageKind average = AverageKind.Weighted)
 {
     return(FBetaScoreAvg(
                yTrue,
                yPred,
                1,
                labels: labels,
                posLabel: posLabel,
                average: average));
 }
Пример #3
0
        /// <summary>
        /// Compute average precision, recall, F-measure and support.
        ///
        /// The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
        /// true positives and ``fp`` the number of false positives. The precision is
        /// intuitively the ability of the classifier not to label as positive a sample
        /// that is negative.
        ///
        /// The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
        /// true positives and ``fn`` the number of false negatives. The recall is
        /// intuitively the ability of the classifier to find all the positive samples.
        ///
        /// The F-beta score can be interpreted as a weighted harmonic mean of
        /// the precision and recall, where an F-beta score reaches its best
        /// value at 1 and worst score at 0.
        ///
        /// The F-beta score weights recall more than precision by a factor of
        /// ``beta``. ``beta == 1.0`` means recall and precsion are equally important.
        ///
        /// The support is the number of occurrences of each class in ``y_true``.
        /// </summary>
        /// <param name="yTrue">List of labels. Ground truth (correct) target values.</param>
        /// <param name="yPred">Estimated targets as returned by a classifier.</param>
        /// <param name="beta">Weight of precision in harmonic mean.</param>
        /// <param name="labels">Integer array of labels.</param>
        /// <param name="posLabel">If the classification target is binary,
        /// only this class's scores will be returned.</param>
        /// <param name="average">Unless ``posLabel`` is given in binary classification, this
        /// determines the type of averaging performed on the data.</param>
        /// <returns>Instance of <see cref="PrecisionRecallResultAvg"/>.</returns>
        /// <remarks>
        /// .. [1] `Wikipedia entry for the Precision and recall
        ///    http://en.wikipedia.org/wiki/Precision_and_recall_
        ///
        /// .. [2] `Wikipedia entry for the F1-score
        ///    http://en.wikipedia.org/wiki/F1_score
        ///
        /// .. [3] `Discriminative Methods for Multi-labeled Classification Advances
        ///   in Knowledge Discovery and Data Mining (2004), pp. 22-30 by Shantanu
        ///   Godbole, Sunita Sarawagi
        ///   http://www.godbole.net/shantanu/pubs/multilabelsvm-pakdd04.pdf
        /// </remarks>
        /// <example>
        ///
        /// In the multiclass case:
        ///
        /// yTrue = new[] { 0, 1, 2, 0, 1, 2 };
        /// yPred = new[] { 0, 2, 1, 0, 0, 1 };
        /// var r = Metrics.PrecisionRecallFscoreSupportAvg(yTrue, yPred, average: AverageKind.Macro);
        ///     (Precision = 0.22, Recall = 0.33, FScore = 0.26)
        /// r = Metrics.PrecisionRecallFscoreSupportAvg(yTrue, yPred, average: AverageKind.Micro);
        ///     (Precision = 0.33, Recall = 0.33, FScore = 0.33)
        /// r = Metrics.PrecisionRecallFscoreSupport(yTrue, yPred, average: AverageKind.Weighted);
        ///     (Precision = 0.22, Recall = 0.33, FScore = 0.26)
        /// </example>
        public static PrecisionRecallResultAvg PrecisionRecallFScoreSupportAvg(
            int[] yTrue,
            int[] yPred,
            double beta         = 1.0,
            int[] labels        = null,
            int?posLabel        = 1,
            AverageKind average = AverageKind.Weighted)
        {
            var r = PrecisionRecallFScoreSupportInternal(yTrue, yPred, beta, labels, posLabel, average);

            return(new PrecisionRecallResultAvg
            {
                FBetaScore = r.FBetaScore[0],
                Precision = r.Precision[0],
                Recall = r.Recall[0]
            });
        }
Пример #4
0
        /// <summary>
        /// Compute the F-beta score
        ///
        /// The F-beta score is the weighted harmonic mean of precision and recall,
        /// reaching its optimal value at 1 and its worst value at 0.
        ///
        /// The `beta` parameter determines the weight of precision in the combined
        /// score. ``beta &lt; 1`` lends more weight to precision, while ``beta > 1``
        /// favors precision (``beta == 0`` considers only precision, ``beta == inf``
        /// only recall).
        /// </summary>
        /// <param name="yTrue">List of labels. Ground truth (correct) target values.</param>
        /// <param name="yPred">Estimated targets as returned by a classifier.</param>
        /// <param name="beta">Weight of precision in harmonic mean.</param>
        /// <param name="labels">Integer array of labels.</param>
        /// <param name="posLabel">If the classification target is binary,
        /// only this class's scores will be returned.</param>
        /// <param name="average">Unless ``posLabel`` is given in binary classification, this
        /// determines the type of averaging performed on the data.</param>
        /// <returns> F-beta score of the positive class in binary classification or weighted
        /// average of the F-beta score of each class for the multiclass task.</returns>
        /// <remarks>
        ///     References
        ///  ----------
        ///  .. [1] R. Baeza-Yates and B. Ribeiro-Neto (2011).
        ///   Modern Information Retrieval. Addison Wesley, pp. 327-328.
        ///
        ///   .. [2] `Wikipedia entry for the F1-score
        ///   http://en.wikipedia.org/wiki/F1_score
        /// </remarks>
        /// <example>
        /// In the binary case:
        /// var yPred = new[] { 0, 1, 0, 0 };
        /// var yTrue = new[] { 0, 1, 0, 1 };
        /// Metrics.FBetaScoreAvg(yTrue, yPred, beta: 0.5)
        ///     0.83...
        ///
        /// Metrics.FBetaScoreAvg(yTrue, yPred, beta: 1)
        ///     0.66...
        /// Metrics.FBetaScoreAvg(yTrue, yPred, beta: 2)
        ///     0.55...
        ///
        /// In the multiclass case:
        ///
        /// yTrue = new[] { 0, 1, 2, 0, 1, 2 };
        /// yPred = new[] { 0, 2, 1, 0, 0, 1 };
        /// Metrics.FBetaScoreAvg(yTrue, yPred, average: AverageKind.Macro, beta: 0.5);
        ///    0.23...
        /// Metrics.FBetaScoreAvg(yTrue, yPred, average: AverageKind.Micro, beta: 0.5);
        ///    0.33...
        /// Metrics.FBetaScoreAvg(yTrue, y_pred, average: AverageKind.Weighted, beta: 0.5);
        ///    0.23...
        /// </example>
        public static double FBetaScoreAvg(
            int[] yTrue,
            int[] yPred,
            double beta,
            int[] labels        = null,
            int posLabel        = 1,
            AverageKind average = AverageKind.Weighted)
        {
            var r = PrecisionRecallFScoreSupportAvg(
                yTrue,
                yPred,
                beta: beta,
                labels: labels,
                posLabel: posLabel,
                average: average);

            return(r.FBetaScore);
        }