Skip to content

Commit 9039aeb

Browse files
nishantb06vfdev-5
andauthored
Added pytest.mark.xfail on test_distrib_gloo_cpu_or_gpu() (#2454)
* pytest.approx corrected * mark.xfail added on test_distrib_gloo_cpu_or_gpu * autopep8 fix * added xfail to 3 tests * autopep8 fix * Update test_classification_report.py Co-authored-by: nishantb06 <nishantb06@users.noreply.github.com> Co-authored-by: vfdev <vfdev.5@gmail.com>
1 parent 052d66a commit 9039aeb

File tree

1 file changed

+18
-12
lines changed

1 file changed

+18
-12
lines changed

tests/ignite/metrics/test_classification_report.py

Lines changed: 18 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -53,12 +53,12 @@ def update(engine, i):
5353

5454
for i in range(n_classes):
5555
label_i = labels[i] if labels else str(i)
56-
assert pytest.approx(res[label_i]["precision"] == sklearn_result[str(i)]["precision"])
57-
assert pytest.approx(res[label_i]["f1-score"] == sklearn_result[str(i)]["f1-score"])
58-
assert pytest.approx(res[label_i]["recall"] == sklearn_result[str(i)]["recall"])
59-
assert pytest.approx(res["macro avg"]["precision"] == sklearn_result["macro avg"]["precision"])
60-
assert pytest.approx(res["macro avg"]["recall"] == sklearn_result["macro avg"]["recall"])
61-
assert pytest.approx(res["macro avg"]["f1-score"] == sklearn_result["macro avg"]["f1-score"])
56+
assert sklearn_result[str(i)]["precision"] == pytest.approx(res[label_i]["precision"])
57+
assert sklearn_result[str(i)]["f1-score"] == pytest.approx(res[label_i]["f1-score"])
58+
assert sklearn_result[str(i)]["recall"] == pytest.approx(res[label_i]["recall"])
59+
assert sklearn_result["macro avg"]["precision"] == pytest.approx(res["macro avg"]["precision"])
60+
assert sklearn_result["macro avg"]["recall"] == pytest.approx(res["macro avg"]["recall"])
61+
assert sklearn_result["macro avg"]["f1-score"] == pytest.approx(res["macro avg"]["f1-score"])
6262

6363
for _ in range(5):
6464
# check multiple random inputs as random exact occurencies are rare
@@ -122,12 +122,12 @@ def update(engine, i):
122122

123123
for i in range(n_classes):
124124
label_i = labels[i] if labels else str(i)
125-
assert pytest.approx(res[label_i]["precision"] == sklearn_result[str(i)]["precision"])
126-
assert pytest.approx(res[label_i]["f1-score"] == sklearn_result[str(i)]["f1-score"])
127-
assert pytest.approx(res[label_i]["recall"] == sklearn_result[str(i)]["recall"])
128-
assert pytest.approx(res["macro avg"]["precision"] == sklearn_result["macro avg"]["precision"])
129-
assert pytest.approx(res["macro avg"]["recall"] == sklearn_result["macro avg"]["recall"])
130-
assert pytest.approx(res["macro avg"]["f1-score"] == sklearn_result["macro avg"]["f1-score"])
125+
assert sklearn_result[str(i)]["precision"] == pytest.approx(res[label_i]["precision"])
126+
assert sklearn_result[str(i)]["f1-score"] == pytest.approx(res[label_i]["f1-score"])
127+
assert sklearn_result[str(i)]["recall"] == pytest.approx(res[label_i]["recall"])
128+
assert sklearn_result["macro avg"]["precision"] == pytest.approx(res["macro avg"]["precision"])
129+
assert sklearn_result["macro avg"]["recall"] == pytest.approx(res["macro avg"]["recall"])
130+
assert sklearn_result["macro avg"]["f1-score"] == pytest.approx(res["macro avg"]["f1-score"])
131131

132132
for _ in range(3):
133133
# check multiple random inputs as random exact occurencies are rare
@@ -141,6 +141,7 @@ def update(engine, i):
141141
_test(metric_device, 2, ["0", "1", "2", "3", "4", "5", "6"])
142142

143143

144+
@pytest.mark.xfail
144145
@pytest.mark.distributed
145146
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
146147
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
@@ -153,6 +154,7 @@ def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
153154
_test_integration_multilabel(device, False)
154155

155156

157+
@pytest.mark.xfail
156158
@pytest.mark.distributed
157159
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
158160
def test_distrib_gloo_cpu_or_gpu(local_rank, distributed_context_single_node_gloo):
@@ -164,6 +166,7 @@ def test_distrib_gloo_cpu_or_gpu(local_rank, distributed_context_single_node_glo
164166
_test_integration_multilabel(device, False)
165167

166168

169+
@pytest.mark.xfail
167170
@pytest.mark.distributed
168171
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
169172
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
@@ -187,6 +190,7 @@ def _test_distrib_xla_nprocs(index):
187190
_test_integration_multilabel(device, False)
188191

189192

193+
@pytest.mark.xfail
190194
@pytest.mark.tpu
191195
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
192196
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
@@ -203,6 +207,7 @@ def to_numpy_multilabel(y):
203207
return y
204208

205209

210+
@pytest.mark.xfail
206211
@pytest.mark.multinode_distributed
207212
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
208213
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
@@ -215,6 +220,7 @@ def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo):
215220
_test_integration_multilabel(device, False)
216221

217222

223+
@pytest.mark.xfail
218224
@pytest.mark.multinode_distributed
219225
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
220226
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")

0 commit comments

Comments
 (0)