We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent a127710 commit 686b9dfCopy full SHA for 686b9df
src/transformers/trainer.py
@@ -2965,7 +2965,7 @@ def _maybe_log_save_evaluate(
2965
# reset tr_loss to zero
2966
tr_loss -= tr_loss
2967
2968
- logs["loss"] = round(tr_loss_scalar / (self.state.global_step - self._globalstep_last_logged), 4)
+ logs["loss"] = tr_loss_scalar / (self.state.global_step - self._globalstep_last_logged)
2969
if grad_norm is not None:
2970
logs["grad_norm"] = grad_norm.item() if isinstance(grad_norm, torch.Tensor) else grad_norm
2971
if learning_rate is not None:
0 commit comments