-
Notifications
You must be signed in to change notification settings - Fork 414
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Restore backward after each batch for grad accum #1917
base: main
Are you sure you want to change the base?
Changes from 4 commits
b62af9f
3f8c7aa
494b96b
99acd4e
474b533
32d652d
8e978e7
a878829
83cba27
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -138,6 +138,12 @@ def __init__(self, cfg: DictConfig) -> None: | |
self._gradient_accumulation_steps = cfg.gradient_accumulation_steps | ||
self._optimizer_in_bwd = cfg.get("optimizer_in_bwd", False) | ||
|
||
if self._gradient_accumulation_steps > 1 and self._optimizer_in_bwd: | ||
raise RuntimeError( | ||
"Gradient accumulation is not supported with optimizer in bwd." | ||
"Please set gradient_accumulation_steps=1, or optimizer_in_bwd=False." | ||
) | ||
|
||
# These are public properties which are updated by the checkpoint loader | ||
# when ``resume_from_checkpoint`` is `True` or validated in tests | ||
self.seed = training.set_seed(seed=cfg.seed) | ||
|
@@ -631,7 +637,7 @@ def train(self) -> None: | |
# clean up before training begins | ||
training.cleanup_before_training() | ||
|
||
_, rank = training.get_world_size_and_rank() | ||
world_size, rank = training.get_world_size_and_rank() | ||
|
||
# zero out the gradients before starting training | ||
if not self._optimizer_in_bwd: | ||
|
@@ -697,15 +703,31 @@ def train(self) -> None: | |
# Compute loss | ||
# Loss is normalized by default so we multiply by the number of tokens | ||
# This way we can normalize by the total number of tokens if we're accumulating gradients | ||
running_loss += self._loss_fn(logits, labels) * current_num_tokens | ||
current_loss = self._loss_fn(logits, labels) * current_num_tokens | ||
|
||
# free logits otherwise it peaks backward memory | ||
del logits | ||
|
||
running_loss += current_loss | ||
felipemello1 marked this conversation as resolved.
Show resolved
Hide resolved
|
||
|
||
# For optimizer in backward, we need to normalize before calling backward | ||
# This case and gradient accumulation are mutually exclusive | ||
if self._optimizer_in_bwd: | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I think 783 - 810 could be much more readable as: if self.optimizer_in_bwd:
raise if self._clip_grad_norm # or do this in init
...
current_loss.backward()
elif (idx + 1) % self._gradient_accumulation_steps == 0:
current_loss.backward()
...
scale_grads()
if self._clip_grad_norm is not None:
...
self._optimizer.step()
self._optimizer.zero_grad(...) This could be used in all the distributed recipes. |
||
torch.distributed.all_reduce(num_tokens) | ||
torch.distributed.all_reduce(running_loss) | ||
current_loss = current_loss / num_tokens | ||
|
||
current_loss.backward() | ||
|
||
# Step with optimizer | ||
if (idx + 1) % self._gradient_accumulation_steps == 0: | ||
loss = running_loss / num_tokens | ||
loss.backward() | ||
if not self._optimizer_in_bwd: | ||
# Get total number of tokens across all ranks to normalize gradients | ||
torch.distributed.all_reduce(num_tokens) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. HE'S A GENIUS |
||
# This will ensure that the logged loss matches what we're optimizing | ||
torch.distributed.all_reduce(running_loss) | ||
# Manually scale the gradients from unnormalized loss by total # of tokens | ||
training.scale_grads(self._model, 1 / num_tokens) | ||
felipemello1 marked this conversation as resolved.
Show resolved
Hide resolved
|
||
if self._clip_grad_norm is not None: | ||
if self._optimizer_in_bwd: | ||
raise NotImplementedError( | ||
|
@@ -722,7 +744,7 @@ def train(self) -> None: | |
# Update the number of steps when the weights are updated | ||
self.global_step += 1 | ||
|
||
loss_to_log = loss.item() | ||
loss_to_log = running_loss.item() / num_tokens | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Should probably normalize by local_num_tokens? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Update: I am probably gonna keep it like this since it should be representative of the loss we are actually using to step (even though it means our loss curves will look slightly different than they do today) There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. i think it makes sense. Will it break all regression tests though? |
||
pbar.update(1) | ||
pbar.set_description( | ||
f"{curr_epoch + 1}|{self.global_step}|Loss: {loss_to_log}" | ||
|
@@ -743,7 +765,8 @@ def train(self) -> None: | |
else self._optim_ckpt_wrapper | ||
), | ||
), | ||
"tokens_per_second_per_gpu": num_tokens / time_per_step, | ||
"tokens_per_second_per_gpu": num_tokens | ||
/ (time_per_step * world_size), | ||
} | ||
if self._log_peak_memory_stats: | ||
log_dict.update( | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -44,18 +44,18 @@ def _get_test_config_overrides(self): | |
|
||
def _fetch_expected_loss_values(self, model_type): | ||
loss_values_map = { | ||
"llama2": [10.5164, 10.4830, 10.5138, 10.5199], | ||
"llama3": [12.0672, 11.9067, 11.9304, 11.9351], | ||
"llama2": [10.5211, 10.5217, 10.4944, 10.5134], | ||
"llama3": [11.9836, 11.9683, 11.9594, 11.9366], | ||
} | ||
return loss_values_map[model_type] | ||
|
||
@pytest.mark.integration_test | ||
@pytest.mark.parametrize( | ||
"config, model_type, ckpt_type, micro_batch_size, gradient_accumulation_steps", | ||
[ | ||
("llama2/7B_qat_full", "llama2", "hf", 4, 1), | ||
("llama3/8B_qat_full", "llama3", "tune", 4, 1), | ||
# ("llama2/7B_qat_full", "llama2", "hf", 4, 1), | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Commented out? |
||
("llama3/8B_qat_full", "llama3", "tune", 4, 1), | ||
("llama3/8B_qat_full", "llama3", "tune", 1, 4), | ||
], | ||
) | ||
@gpu_test(gpu_count=2) | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,26 @@ | ||
# Copyright (c) Meta Platforms, Inc. and affiliates. | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Does this really need its own file? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. No There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Where do you wanna put it then? Otherwise I am gonna copy-paste this in every recipe which is worse imo |
||
# All rights reserved. | ||
# | ||
# This source code is licensed under the BSD-style license found in the | ||
# LICENSE file in the root directory of this source tree. | ||
|
||
import torch | ||
from torch import nn | ||
|
||
|
||
def scale_grads(model: nn.Module, scaler: torch.Tensor) -> None: | ||
""" | ||
Utility to scale the gradients of a model. | ||
This is useful for gradient accumulation where we want to normalize | ||
the gradients by the total number of tokens seen. | ||
Inputs: | ||
model (nn.Module): model whose gradients should be scaled | ||
scaler (torch.Tensor): scaling factor to apply to the gradients | ||
Outputs: | ||
None (grad fields are modified in place) | ||
""" | ||
for p in model.parameters(): | ||
if p.grad is not None: | ||
p.grad *= scaler | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Is there any concern here around overflows for lower dtypes? We could do a scaler range check based on dtype. Or is it better to leave it to the recipe to safely choose scaler values? |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
If there was ever a issue with numerical stability, another option for scaling the loss would be:
This might over complicate things but I wanted to leave this here if in the future it turns out a reduced gradient/loss is necessary for smaller dtypes.