From eee51a2a7caf403c37334c16e262ead8d5e5df6c Mon Sep 17 00:00:00 2001 From: Danielle Pintz <38207072+daniellepintz@users.noreply.github.com> Date: Mon, 3 Jan 2022 04:46:57 -0800 Subject: [PATCH] Remove `Strategy.optimizer_zero_grad` (#11246) --- CHANGELOG.md | 3 +++ pytorch_lightning/loops/optimization/optimizer_loop.py | 2 +- pytorch_lightning/strategies/strategy.py | 4 ---- 3 files changed, 4 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 09623b6b52e25b..a419c02a504290 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -346,6 +346,9 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Removed `Strategy.init_optimizers` in favor of `Strategy.setup_optimizers` ([#11236](https://github.com/PyTorchLightning/pytorch-lightning/pull/11236)) + +- Removed `Strategy.optimizer_zero_grad` ([#11246](https://github.com/PyTorchLightning/pytorch-lightning/pull/11246)) + ### Fixed - Fixed security vulnerabilities CVE-2020-1747 and CVE-2020-14343 caused by the `PyYAML` dependency ([#11099](https://github.com/PyTorchLightning/pytorch-lightning/pull/11099)) diff --git a/pytorch_lightning/loops/optimization/optimizer_loop.py b/pytorch_lightning/loops/optimization/optimizer_loop.py index ee4af19134cf38..00e9b602c15100 100644 --- a/pytorch_lightning/loops/optimization/optimizer_loop.py +++ b/pytorch_lightning/loops/optimization/optimizer_loop.py @@ -400,7 +400,7 @@ def _optimizer_zero_grad(self, batch_idx: int, optimizer: torch.optim.Optimizer, optimizer: the current optimizer opt_idx: the index of the current optimizer """ - self.trainer._call_strategy_hook( + self.trainer._call_lightning_module_hook( "optimizer_zero_grad", self.trainer.current_epoch, batch_idx, optimizer, opt_idx ) self.optim_progress.optimizer.zero_grad.increment_completed() diff --git a/pytorch_lightning/strategies/strategy.py b/pytorch_lightning/strategies/strategy.py index fe9093838c157a..3855a7972d8d05 100644 --- a/pytorch_lightning/strategies/strategy.py +++ b/pytorch_lightning/strategies/strategy.py @@ -177,10 +177,6 @@ def optimizer_step( model = model or self.lightning_module self.precision_plugin.optimizer_step(model, optimizer, opt_idx, closure, **kwargs) - def optimizer_zero_grad(self, current_epoch: int, batch_idx: int, optimizer: Optimizer, opt_idx: int) -> None: - """Zeros all model parameter's gradients.""" - self.lightning_module.optimizer_zero_grad(current_epoch, batch_idx, optimizer, opt_idx) - def _setup_model_and_optimizers(self, model: Module, optimizers: List[Optimizer]) -> Tuple[Module, List[Optimizer]]: """Setup a model and multiple optimizers together.