Skip to content
2 changes: 2 additions & 0 deletions src/torchmetrics/functional/classification/stat_scores.py
Original file line number Diff line number Diff line change
Expand Up @@ -416,6 +416,8 @@ def _multiclass_stat_scores_update(
fp = confmat.sum(0) - tp
fn = confmat.sum(1) - tp
tn = confmat.sum() - (fp + fn + tp)
if ignore_index is not None:
fp[ignore_index] = 0
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this is probably correct, but since many metrics derive from the stat_scores class that means that basically all would need to have their unittests fixed

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

what do you mean by it needs to be fixed?

return tp, fp, tn, fn


Expand Down
7 changes: 2 additions & 5 deletions tests/unittests/classification/test_accuracy.py
Original file line number Diff line number Diff line change
Expand Up @@ -190,12 +190,9 @@ def _reference_sklearn_accuracy_multiclass(preds, target, ignore_index, multidim
return _reference_sklearn_accuracy(target, preds)
confmat = sk_confusion_matrix(target, preds, labels=list(range(NUM_CLASSES)))
acc_per_class = confmat.diagonal() / confmat.sum(axis=1)
acc_per_class[np.isnan(acc_per_class)] = 0.0
if average == "macro":
acc_per_class = acc_per_class[
(np.bincount(preds, minlength=NUM_CLASSES) + np.bincount(target, minlength=NUM_CLASSES)) != 0.0
]
return acc_per_class.mean()
return np.nanmean(acc_per_class)
acc_per_class[np.isnan(acc_per_class)] = 0.0
if average == "weighted":
weights = confmat.sum(1)
return ((weights * acc_per_class) / weights.sum()).sum()
Expand Down
Loading