Skip to content

Commit

Permalink
Move log metrics call out of running optimizer step (#270)
Browse files Browse the repository at this point in the history
Summary:
Pull Request resolved: #270

As title, logging metrics doesn't need to be part of `_run_optimizer_lr_scheduler_step`.

Reviewed By: daniellepintz

Differential Revision: D41148824

fbshipit-source-id: 60e62dbff754ee9d7cdd0c8b18bc1c084af6c3ef
  • Loading branch information
ananthsub authored and facebook-github-bot committed Nov 15, 2022
1 parent a3ed317 commit 7fcb6c0
Showing 1 changed file with 4 additions and 5 deletions.
9 changes: 4 additions & 5 deletions torchtnt/runner/auto_unit.py
Original file line number Diff line number Diff line change
Expand Up @@ -232,6 +232,10 @@ def train_step(self, state: State, data: TData) -> Tuple[torch.Tensor, Any]:
if should_update_weights:
self._run_optimizer_lr_scheduler_step(state)

# log metrics only after an optimizer step
if self.num_optimizer_steps_completed % self.log_frequency_steps == 0:
self.log_metrics(state, self.num_optimizer_steps_completed - 1, "step")

return loss, outputs

def _run_optimizer_lr_scheduler_step(self, state: State) -> None:
Expand Down Expand Up @@ -278,11 +282,6 @@ def _run_optimizer_lr_scheduler_step(self, state: State) -> None:
if lr_scheduler and self.step_lr_interval == "step":
lr_scheduler.step()

# call `log_metrics`
if self.num_optimizer_steps_completed % self.log_frequency_steps == 0:
# users can override this, by default this is a no-op
self.log_metrics(state, self.num_optimizer_steps_completed - 1, "step")

def on_train_epoch_end(self, state: State) -> None:
# note: if user wants to override on_train_epoch_end themselves, they should remember to call up to this method via super().on_train_epoch_end()

Expand Down

0 comments on commit 7fcb6c0

Please sign in to comment.