Skip to content

Commit

Permalink
Update lr_scheduler.pyi to match lr_scheduler.py (pytorch#88818)
Browse files Browse the repository at this point in the history
Following pytorch#88503, we should also update the pyi file

Pull Request resolved: pytorch#88818
Approved by: https://github.com/soulitzer
  • Loading branch information
janeyx99 authored and pytorchmergebot committed Nov 11, 2022
1 parent 86b7aa2 commit 310335d
Showing 1 changed file with 20 additions and 17 deletions.
37 changes: 20 additions & 17 deletions torch/optim/lr_scheduler.pyi
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
from typing import Iterable, Any, Optional, Callable, Union, List
from .optimizer import Optimizer

class _LRScheduler:
class LRScheduler:
optimizer: Optimizer = ...
base_lrs: List[float] = ...
last_epoch: int = ...
Expand All @@ -14,46 +14,49 @@ class _LRScheduler:
def step(self, epoch: Optional[int] = ...) -> None: ...
def print_lr(self, is_verbose: bool, group: dict, lr: float, epoch: Optional[int] = ...) -> None: ...

class LambdaLR(_LRScheduler):
class _LRScheduler(LRScheduler):
...

class LambdaLR(LRScheduler):
lr_lambdas: List[Callable[[int], float]] = ...
def __init__(self, optimizer: Optimizer, lr_lambda: Union[Callable[[int], float], List[Callable[[int], float]]], last_epoch: int = ..., verbose: bool = ...) -> None: ...

class MultiplicativeLR(_LRScheduler):
class MultiplicativeLR(LRScheduler):
lr_lambdas: List[Callable[[int], float]] = ...
def __init__(self, optimizer: Optimizer, lr_lambda: Union[Callable[[int], float], List[Callable[[int], float]]], last_epoch: int = ..., verbose: bool = ...) -> None: ...

class StepLR(_LRScheduler):
class StepLR(LRScheduler):
step_size: int = ...
gamma: float = ...
def __init__(self, optimizer: Optimizer, step_size: int, gamma: float = ..., last_epoch: int = ..., verbose: bool = ...) -> None: ...

class MultiStepLR(_LRScheduler):
class MultiStepLR(LRScheduler):
milestones: Iterable[int] = ...
gamma: float = ...
def __init__(self, optimizer: Optimizer, milestones: Iterable[int], gamma: float = ..., last_epoch: int = ..., verbose: bool = ...) -> None: ...

class ConstantLR(_LRScheduler):
class ConstantLR(LRScheduler):
factor: float = ...
total_iters: int = ...
def __init__(self, optimizer: Optimizer, factor: float=..., total_iters: int=..., last_epoch: int=..., verbose: bool = ...) -> None: ...

class LinearLR(_LRScheduler):
class LinearLR(LRScheduler):
start_factor: float = ...
end_factor: float = ...
total_iters: int = ...
def __init__(self, optimizer: Optimizer, start_factor: float=..., end_factor: float= ..., total_iters: int= ..., last_epoch: int= ..., verbose: bool = ...) -> None: ...

class ExponentialLR(_LRScheduler):
class ExponentialLR(LRScheduler):
gamma: float = ...
def __init__(self, optimizer: Optimizer, gamma: float, last_epoch: int = ..., verbose: bool = ...) -> None: ...

class ChainedScheduler(_LRScheduler):
def __init__(self, schedulers: List[_LRScheduler]) -> None: ...
class ChainedScheduler(LRScheduler):
def __init__(self, schedulers: List[LRScheduler]) -> None: ...

class SequentialLR(_LRScheduler):
def __init__(self, optimizer: Optimizer, schedulers: List[_LRScheduler], milestones: List[int], last_epoch: int=..., verbose: bool=...) -> None: ...
class SequentialLR(LRScheduler):
def __init__(self, optimizer: Optimizer, schedulers: List[LRScheduler], milestones: List[int], last_epoch: int=..., verbose: bool=...) -> None: ...

class CosineAnnealingLR(_LRScheduler):
class CosineAnnealingLR(LRScheduler):
T_max: int = ...
eta_min: float = ...
def __init__(self, optimizer: Optimizer, T_max: int, eta_min: float = ..., last_epoch: int = ..., verbose: bool = ...) -> None: ...
Expand Down Expand Up @@ -82,7 +85,7 @@ class ReduceLROnPlateau:
def state_dict(self) -> dict: ...
def load_state_dict(self, state_dict: dict) -> None: ...

class CyclicLR(_LRScheduler):
class CyclicLR(LRScheduler):
max_lrs: List[float] = ...
total_size: float = ...
step_ratio: float = ...
Expand All @@ -95,7 +98,7 @@ class CyclicLR(_LRScheduler):
def __init__(self, optimizer: Optimizer, base_lr: Union[float, List[float]], max_lr: Union[float, List[float]], step_size_up: int = ..., step_size_down: Optional[int] = ..., mode: str = ..., gamma: float = ..., scale_fn: Optional[Callable[[float], float]] = ..., scale_mode: str = ..., cycle_momentum: bool = ..., base_momentum: float = ..., max_momentum: float = ..., last_epoch: int = ..., verbose: bool = ...) -> None: ...
def scale_fn(self, x: Any) -> float: ...

class CosineAnnealingWarmRestarts(_LRScheduler):
class CosineAnnealingWarmRestarts(LRScheduler):
T_0: int = ...
T_i: int = ...
T_mult: Optional[int] = ...
Expand All @@ -104,14 +107,14 @@ class CosineAnnealingWarmRestarts(_LRScheduler):
def __init__(self, optimizer: Optimizer, T_0: int, T_mult: int = ..., eta_min: float = ..., last_epoch: int = ..., verbose: bool = ...) -> None: ...
def step(self, epoch: Optional[Any] = ...): ...

class OneCycleLR(_LRScheduler):
class OneCycleLR(LRScheduler):
total_steps: int = ...
anneal_func: Callable[[float, float, float], float] = ...
cycle_momentum: bool = ...
use_beta1: bool = ...
def __init__(self, optimizer: Optimizer, max_lr: Union[float, List[float]], total_steps: int = ..., epochs: int = ..., steps_per_epoch: int = ..., pct_start: float = ..., anneal_strategy: str = ..., cycle_momentum: bool = ..., base_momentum: Union[float, List[float]] = ..., max_momentum: Union[float, List[float]] = ..., div_factor: float = ..., final_div_factor: float = ..., three_phase: bool = ..., last_epoch: int = ..., verbose: bool = ...) -> None: ...

class PolynomialLR(_LRScheduler):
class PolynomialLR(LRScheduler):
total_iters: int = ...
power: float = ...
def __init__(self, optimizer: Optimizer, total_iters: int = ..., power: float = ..., last_epoch: int = ..., verbose: bool = ...) -> None: ...

0 comments on commit 310335d

Please sign in to comment.