Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Support PyTorch Lightning 2.3 #207

Merged
merged 2 commits into from
Jul 10, 2024
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions cellarium/ml/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -275,6 +275,11 @@ def __init__(self, args: ArgsType = None) -> None:
args=args,
)

def _add_instantiators(self) -> None:
# disable breaking dependency injection support change introduced in PyTorch Lightning 2.3
# https://github.com/Lightning-AI/pytorch-lightning/pull/18105
pass

def instantiate_classes(self) -> None:
with torch.device("meta"):
# skip the initialization of model parameters
Expand Down
14 changes: 14 additions & 0 deletions cellarium/ml/core/module.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,12 @@ def __init__(
self.save_hyperparameters(logger=False)
self.pipeline: CellariumPipeline | None = None

if optim_fn is None:
# Starting from PyTorch Lightning 2.3, automatic optimization doesn't allow to return None
# from the training_step during distributed training. https://github.com/Lightning-AI/pytorch-lightning/pull/19918
# Thus, we need to use manual optimization for the No Optimizer case.
self.automatic_optimization = False

def configure_model(self) -> None:
"""
.. note::
Expand Down Expand Up @@ -156,6 +162,14 @@ def training_step( # type: ignore[override]
if loss is not None:
# Logging to TensorBoard by default
self.log("train_loss", loss)

if not self.automatic_optimization:
# Note, that running .step() is necessary for incrementing the global step even though no backpropagation
# is performed.
no_optimizer = self.optimizers()
assert isinstance(no_optimizer, pl.core.optimizer.LightningOptimizer)
no_optimizer.step()

return loss

def forward(self, batch: dict[str, np.ndarray | torch.Tensor]) -> dict[str, np.ndarray | torch.Tensor]:
Expand Down
Loading