Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion docs/source/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -354,7 +354,7 @@ def eval_step(engine, batch):
# create default optimizer for doctests

param_tensor = torch.zeros([1], requires_grad=True)
default_optimizer = torch.optim.SGD([param_tensor], lr=0)
default_optimizer = torch.optim.SGD([param_tensor], lr=0.1)

# create default trainer for doctests
# as handlers could be attached to the trainer,
Expand Down
146 changes: 106 additions & 40 deletions ignite/handlers/param_scheduler.py
Original file line number Diff line number Diff line change
Expand Up @@ -494,14 +494,14 @@ def print_lr():

# CosineAnnealing increases the learning rate from 0.0 to 1.0
# over a cycle of 4 iterations
scheduler1 = CosineAnnealingScheduler(optimizer, "lr (base)", 0.0, 1.0, 4, param_group_index=0)
scheduler_1 = CosineAnnealingScheduler(optimizer, "lr (base)", 0.0, 1.0, 4, param_group_index=0)

# CosineAnnealing increases the learning rate from 0.0 to 0.1
# over a cycle of 4 iterations
scheduler2 = CosineAnnealingScheduler(optimizer, "lr (fc)", 0.0, 0.1, 4, param_group_index=1)
scheduler_2 = CosineAnnealingScheduler(optimizer, "lr (fc)", 0.0, 0.1, 4, param_group_index=1)

default_trainer.add_event_handler(Events.ITERATION_STARTED, scheduler1)
default_trainer.add_event_handler(Events.ITERATION_STARTED, scheduler2)
default_trainer.add_event_handler(Events.ITERATION_STARTED, scheduler_1)
default_trainer.add_event_handler(Events.ITERATION_STARTED, scheduler_2)

@default_trainer.on(Events.ITERATION_COMPLETED)
def print_lr():
Expand Down Expand Up @@ -788,20 +788,43 @@ class LRScheduler(ParamScheduler):
save_history: whether to log the parameter values to
`engine.state.param_history`, (default=False).

.. code-block:: python
Examples:

.. testsetup::

default_trainer = get_default_trainer()

.. testcode::

from ignite.handlers.param_scheduler import LRScheduler
from torch.optim.lr_scheduler import StepLR
from torch.optim.lr_scheduler import StepLR

torch_lr_scheduler = StepLR(default_optimizer, step_size=3, gamma=0.1)

scheduler = LRScheduler(torch_lr_scheduler)

# In this example, we assume to have installed PyTorch>=1.1.0
# (with new `torch.optim.lr_scheduler` behaviour) and
# we attach scheduler to Events.ITERATION_COMPLETED
# instead of Events.ITERATION_STARTED to make sure to use
# the first lr value from the optimizer, otherwise it is will be skipped:
default_trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)

@default_trainer.on(Events.ITERATION_COMPLETED)
def print_lr():
print(default_optimizer.param_groups[0]["lr"])

default_trainer.run([0] * 8, max_epochs=1)

step_scheduler = StepLR(optimizer, step_size=3, gamma=0.1)
scheduler = LRScheduler(step_scheduler)
.. testoutput::

# In this example, we assume to have installed PyTorch>=1.1.0
# (with new `torch.optim.lr_scheduler` behaviour) and
# we attach scheduler to Events.ITERATION_COMPLETED
# instead of Events.ITERATION_STARTED to make sure to use
# the first lr value from the optimizer, otherwise it is will be skipped:
trainer.add_event_handler(Events.ITERATION_COMPLETED, scheduler)
0.1
0.1
0.010...
0.010...
0.010...
0.001...
0.001...
0.001...

.. versionadded:: 0.4.5
"""
Expand Down Expand Up @@ -916,21 +939,40 @@ def create_lr_scheduler_with_warmup(
`lr_scheduler` provides its learning rate values as normally.

Examples:
.. code-block:: python

torch_lr_scheduler = ExponentialLR(optimizer=optimizer, gamma=0.98)
lr_values = [None] * 100
.. testsetup::

default_trainer = get_default_trainer()

.. testcode::

from torch.optim.lr_scheduler import ExponentialLR

torch_lr_scheduler = ExponentialLR(optimizer=default_optimizer, gamma=0.98)

scheduler = create_lr_scheduler_with_warmup(torch_lr_scheduler,
warmup_start_value=0.0,
warmup_end_value=0.1,
warmup_duration=10,
output_simulated_values=lr_values)
lr_values = np.array(lr_values)
# Plot simulated values
plt.plot(lr_values[:, 0], lr_values[:, 1], label="learning rate")
warmup_duration=3)

default_trainer.add_event_handler(Events.ITERATION_COMPLETED, scheduler)

@default_trainer.on(Events.ITERATION_COMPLETED)
def print_lr():
print(default_optimizer.param_groups[0]["lr"])

default_trainer.run([0] * 8, max_epochs=1)

.. testoutput::

# Attach to the trainer
trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)
0.0
0.05
0.1
0.098
0.09604
0.09411...
0.09223...
0.09039...

.. versionadded:: 0.4.5
"""
Expand Down Expand Up @@ -1029,7 +1071,6 @@ class PiecewiseLinear(ParamScheduler):
# Sets the learning rate to 0.5 over the first 10 iterations, then decreases linearly from 0.5 to 0.45 between
# 10th and 20th iterations. Next there is a jump to 0.3 at the 21st iteration and LR decreases linearly
# from 0.3 to 0.1 between 21st and 30th iterations and remains 0.1 until the end of the iterations.
#

Examples:

Expand Down Expand Up @@ -1181,23 +1222,48 @@ class ParamGroupScheduler:
names: list of names of schedulers.
save_history: whether to save history or not.

.. code-block:: python
Examples:

optimizer = SGD(
[
{"params": model.base.parameters(), 'lr': 0.001),
{"params": model.fc.parameters(), 'lr': 0.01),
]
)
.. testsetup::

scheduler1 = LinearCyclicalScheduler(optimizer, 'lr', 1e-7, 1e-5, len(train_loader), param_group_index=0)
scheduler2 = CosineAnnealingScheduler(optimizer, 'lr', 1e-5, 1e-3, len(train_loader), param_group_index=1)
lr_schedulers = [scheduler1, scheduler2]
names = ["lr (base)", "lr (fc)"]
default_trainer = get_default_trainer()

scheduler = ParamGroupScheduler(schedulers=lr_schedulers, names=names)
# Attach single scheduler to the trainer
trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)
.. testcode::

optimizer = torch.optim.SGD(
[
{"params": default_model.base.parameters(), "lr": 0.001},
{"params": default_model.fc.parameters(), "lr": 0.01},
]
)

# CosineAnnealing increases the learning rate from 0.0 to 1.0
# over a cycle of 4 iterations
scheduler_1 = CosineAnnealingScheduler(optimizer, "lr", 0.0, 1.0, 4, param_group_index=0)

# CosineAnnealing increases the learning rate from 0.0 to 0.1
# over a cycle of 4 iterations
scheduler_2 = CosineAnnealingScheduler(optimizer, "lr", 0.0, 0.1, 4, param_group_index=1)

scheduler = ParamGroupScheduler(schedulers=[scheduler_1, scheduler_2],
names=["lr (base)", "lr (fc)"])

default_trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)

@default_trainer.on(Events.ITERATION_COMPLETED)
def print_lr():
print(optimizer.param_groups[0]["lr"],
optimizer.param_groups[1]["lr"])

default_trainer.run([0] * 8, max_epochs=1)

.. testoutput::

0.0 0.0
0.1464... 0.01464...
0.4999... 0.04999...
0.8535... 0.08535...
...

.. versionadded:: 0.4.5
"""
Expand Down