@@ -851,7 +851,7 @@ def __init__(
851
851
852
852
self .lr_scheduler = lr_scheduler
853
853
super (LRScheduler , self ).__init__ (
854
- optimizer = self .lr_scheduler .optimizer , # type: ignore[attr-defined]
854
+ optimizer = self .lr_scheduler .optimizer ,
855
855
param_name = "lr" ,
856
856
save_history = save_history ,
857
857
)
@@ -861,13 +861,13 @@ def __init__(
861
861
"instead of Events.ITERATION_STARTED to make sure to use "
862
862
"the first lr value from the optimizer, otherwise it is will be skipped"
863
863
)
864
- self .lr_scheduler .last_epoch += 1 # type: ignore[attr-defined]
864
+ self .lr_scheduler .last_epoch += 1
865
865
866
866
self ._state_attrs += ["lr_scheduler" ]
867
867
868
868
def __call__ (self , engine : Optional [Engine ], name : Optional [str ] = None ) -> None :
869
869
super (LRScheduler , self ).__call__ (engine , name )
870
- self .lr_scheduler .last_epoch += 1 # type: ignore[attr-defined]
870
+ self .lr_scheduler .last_epoch += 1
871
871
872
872
def get_param (self ) -> Union [float , List [float ]]:
873
873
"""Method to get current optimizer's parameter value"""
@@ -908,7 +908,7 @@ def simulate_values( # type: ignore[override]
908
908
cache_filepath = Path (tmpdirname ) / "ignite_lr_scheduler_cache.pt"
909
909
obj = {
910
910
"lr_scheduler" : lr_scheduler .state_dict (),
911
- "optimizer" : lr_scheduler .optimizer .state_dict (), # type: ignore[attr-defined]
911
+ "optimizer" : lr_scheduler .optimizer .state_dict (),
912
912
}
913
913
torch .save (obj , cache_filepath .as_posix ())
914
914
@@ -921,7 +921,7 @@ def simulate_values( # type: ignore[override]
921
921
922
922
obj = torch .load (cache_filepath .as_posix ())
923
923
lr_scheduler .load_state_dict (obj ["lr_scheduler" ])
924
- lr_scheduler .optimizer .load_state_dict (obj ["optimizer" ]) # type: ignore[attr-defined]
924
+ lr_scheduler .optimizer .load_state_dict (obj ["optimizer" ])
925
925
926
926
return values
927
927
@@ -1403,7 +1403,7 @@ def simulate_values(cls, num_events: int, schedulers: List[_LRScheduler], **kwar
1403
1403
cache_filepath = Path (tmpdirname ) / "ignite_lr_scheduler_cache.pt"
1404
1404
objs = {f"lr_scheduler_{ i } " : s .state_dict () for i , s in enumerate (schedulers )}
1405
1405
# all schedulers should be related to the same optimizer
1406
- objs ["optimizer" ] = schedulers [0 ].optimizer .state_dict () # type: ignore[attr-defined]
1406
+ objs ["optimizer" ] = schedulers [0 ].optimizer .state_dict ()
1407
1407
1408
1408
torch .save (objs , cache_filepath .as_posix ())
1409
1409
@@ -1417,7 +1417,7 @@ def simulate_values(cls, num_events: int, schedulers: List[_LRScheduler], **kwar
1417
1417
objs = torch .load (cache_filepath .as_posix ())
1418
1418
for i , s in enumerate (schedulers ):
1419
1419
s .load_state_dict (objs [f"lr_scheduler_{ i } " ])
1420
- s .optimizer .load_state_dict (objs ["optimizer" ]) # type: ignore[attr-defined]
1420
+ s .optimizer .load_state_dict (objs ["optimizer" ])
1421
1421
1422
1422
return values
1423
1423
@@ -1561,8 +1561,8 @@ def get_param(self) -> Union[float, List[float]]:
1561
1561
def _reduce_lr (self , epoch : int ) -> None :
1562
1562
for i , param_group in enumerate (self .optimizer_param_groups ):
1563
1563
old_lr = float (param_group ["lr" ])
1564
- new_lr = max (old_lr * self .scheduler .factor , self .scheduler .min_lrs [i ]) # type: ignore[attr-defined]
1565
- if old_lr - new_lr > self .scheduler .eps : # type: ignore[attr-defined]
1564
+ new_lr = max (old_lr * self .scheduler .factor , self .scheduler .min_lrs [i ])
1565
+ if old_lr - new_lr > self .scheduler .eps :
1566
1566
param_group ["lr" ] = new_lr
1567
1567
1568
1568
@classmethod
0 commit comments