|
28 | 28 |
|
29 | 29 | class AveragePrecisionMetric(CumulativeIterationMetric):
|
30 | 30 | """
|
31 |
| - Computes Average Precision (AP). Referring to: `sklearn.metrics.average_precision_score |
| 31 | + Computes Average Precision (AP). AP is a useful metric to evaluate a classifier when the classes are |
| 32 | + imbalanced. It summarizes a Precision-Recall curve as the weighted mean of precisions achieved at each |
| 33 | + threshold, with the increase in recall from the previous threshold used as the weight: |
| 34 | +
|
| 35 | + .. math:: |
| 36 | + \\text{AP} = \\sum_n (R_n - R_{n-1}) P_n |
| 37 | + :label: ap |
| 38 | +
|
| 39 | + where :math:`P_n` and :math:`R_n` are the precision and recall at the :math:`n^{th}` threshold. |
| 40 | +
|
| 41 | + Referring to: `sklearn.metrics.average_precision_score |
32 | 42 | <https://scikit-learn.org/stable/modules/generated/sklearn.metrics.average_precision_score>`_.
|
| 43 | +
|
33 | 44 | The input `y_pred` and `y` can be a list of `channel-first` Tensor or a `batch-first` Tensor.
|
34 | 45 |
|
35 | 46 | Example of the typical execution steps of this metric class follows :py:class:`monai.metrics.metric.Cumulative`.
|
@@ -107,7 +118,9 @@ def _calculate(y_pred: torch.Tensor, y: torch.Tensor) -> float:
|
107 | 118 | def compute_average_precision(
|
108 | 119 | y_pred: torch.Tensor, y: torch.Tensor, average: Average | str = Average.MACRO
|
109 | 120 | ) -> np.ndarray | float | npt.ArrayLike:
|
110 |
| - """Computes Average Precision (AP). Referring to: `sklearn.metrics.average_precision_score |
| 121 | + """Computes Average Precision (AP). AP is a useful metric to evaluate a classifier when the classes are |
| 122 | + imbalanced. It summarizes a Precision-Recall according to equation :eq:`ap`. |
| 123 | + Referring to: `sklearn.metrics.average_precision_score |
111 | 124 | <https://scikit-learn.org/stable/modules/generated/sklearn.metrics.average_precision_score>`_.
|
112 | 125 |
|
113 | 126 | Args:
|
|
0 commit comments