Skip to content

Commit

Permalink
[GPT] Bug Fix, to fix pp sp layernorm grad sync (#7613)
Browse files Browse the repository at this point in the history
  • Loading branch information
iosmers authored Dec 12, 2023
1 parent ac7114f commit 0a58a1a
Showing 1 changed file with 7 additions and 1 deletion.
8 changes: 7 additions & 1 deletion paddlenlp/transformers/gpt/modeling_pp.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,10 @@
from paddle.distributed.fleet.utils import recompute

from paddlenlp.transformers.model_utils import PipelinePretrainedModel
from paddlenlp.transformers.sequence_parallel_utils import GatherOp
from paddlenlp.transformers.sequence_parallel_utils import (
GatherOp,
mark_as_sequence_parallel_parameter,
)

from .modeling import (
GPTConfig,
Expand Down Expand Up @@ -128,6 +131,9 @@ def forward(self, args):
class LayerNormPipe(nn.LayerNorm):
def __init__(self, config):
super(LayerNormPipe, self).__init__(config.hidden_size, epsilon=1e-05)
if config.sequence_parallel:
mark_as_sequence_parallel_parameter(self.weight)
mark_as_sequence_parallel_parameter(self.bias)

def forward(self, args):
hidden_states, attention_mask, position_ids = parse_args(args)
Expand Down

0 comments on commit 0a58a1a

Please sign in to comment.