File tree Expand file tree Collapse file tree 2 files changed +0
-21
lines changed Expand file tree Collapse file tree 2 files changed +0
-21
lines changed Original file line number Diff line number Diff line change 1
- import warnings
2
1
from abc import ABCMeta , abstractmethod
3
2
from collections .abc import Mapping
4
3
from functools import wraps
@@ -210,17 +209,6 @@ def __init__(
210
209
):
211
210
self ._output_transform = output_transform
212
211
213
- # Check device if distributed is initialized:
214
- if idist .get_world_size () > 1 :
215
-
216
- # check if reset and update methods are decorated. Compute may not be decorated
217
- if not (hasattr (self .reset , "_decorated" ) and hasattr (self .update , "_decorated" )):
218
- warnings .warn (
219
- f"{ self .__class__ .__name__ } class does not support distributed setting. "
220
- "Computed result is not collected across all computing devices" ,
221
- RuntimeWarning ,
222
- )
223
-
224
212
# Some metrics have a large performance regression when run on XLA devices, so for now, we disallow it.
225
213
if torch .device (device ).type == "xla" :
226
214
raise ValueError ("Cannot create metric on an XLA device. Use device='cpu' instead." )
Original file line number Diff line number Diff line change @@ -29,15 +29,6 @@ def update(self, output):
29
29
assert output == self .true_output
30
30
31
31
32
- @pytest .mark .distributed
33
- @pytest .mark .skipif ("WORLD_SIZE" not in os .environ , reason = "Skip if WORLD_SIZE not in env vars" )
34
- @pytest .mark .skipif (torch .cuda .is_available (), reason = "Skip if GPU" )
35
- def test_metric_warning (distributed_context_single_node_gloo ):
36
- y = torch .tensor ([1.0 ])
37
- with pytest .warns (RuntimeWarning , match = r"DummyMetric1 class does not support distributed setting" ):
38
- DummyMetric1 ((y , y ))
39
-
40
-
41
32
def test_no_transform ():
42
33
y_pred = torch .Tensor ([[2.0 ], [- 2.0 ]])
43
34
y = torch .zeros (2 )
You can’t perform that action at this time.
0 commit comments