Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Trainer] Support skip data intervals #8989

Merged
merged 12 commits into from
Sep 23, 2024
20 changes: 18 additions & 2 deletions paddlenlp/trainer/argparser.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,17 @@
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Dict, Iterable, NewType, Optional, Tuple, Union, get_type_hints
from typing import (
Any,
Dict,
Iterable,
NewType,
Optional,
Tuple,
Union,
get_args,
get_type_hints,
)

DataClass = NewType("DataClass", Any)
DataClassType = NewType("DataClassType", Any)
Expand Down Expand Up @@ -129,7 +139,13 @@ def _parse_dataclass_field(parser: ArgumentParser, field: dataclasses.Field):
# This is the value that will get picked if we do --field_name (without value)
kwargs["const"] = True
elif isclass(origin_type) and issubclass(origin_type, list):
kwargs["type"] = field.type.__args__[0]
# supprt one dimension list and two dimension list
if hasattr(get_args(field.type)[0], "__args__"):
kwargs["type"] = field.type.__args__[0].__args__[0]
kwargs["action"] = "append"
else:
kwargs["type"] = field.type.__args__[0]

kwargs["nargs"] = "+"
if field.default_factory is not dataclasses.MISSING:
kwargs["default"] = field.default_factory()
Expand Down
71 changes: 52 additions & 19 deletions paddlenlp/trainer/trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -138,6 +138,7 @@
get_scheduler,
has_length,
set_seed,
should_skip_data,
speed_metrics,
)
from .training_args import TrainingArguments
Expand Down Expand Up @@ -277,7 +278,7 @@
# Seed must be set before instantiating the model when using model
set_seed(seed=self.args.seed)

if model is None:
if model is None and not args.debug_data:
raise RuntimeError("`Trainer` requires either a `model` or `model_init` argument")

if self.args.to_static:
Expand Down Expand Up @@ -339,7 +340,7 @@
"You should subclass `Trainer` and override the `create_optimizer_and_scheduler` method."
)

if self.args.pipeline_parallel_degree > 1 and self.args.use_hybrid_parallel:
if self.args.pipeline_parallel_degree > 1 and self.args.use_hybrid_parallel and not args.debug_data:
from paddle.distributed.fleet.meta_parallel import PipelineLayer

assert (isinstance(model, LoRAModel) and isinstance(model.model, PipelineLayer)) or isinstance(
Expand All @@ -357,6 +358,7 @@
self._load_ckpt_func = dist.load_state_dict if self.args.enable_auto_parallel else paddle.load
if self.args.use_async_save:
self._async_optimizer_saver = AsyncSaver()
self.skip_global_steps = 0

if args.max_steps > 0:
logger.info("max_steps is given, it will override any value given in num_train_epochs")
Expand All @@ -377,26 +379,28 @@

self.do_grad_scaling = False
self.enable_autocast_context_manager = False
if args.fp16 or args.bf16:
# set do_grad_scaling, enable_autocast_context_manager
self._wrap_amp_model(args, model)

if args.recompute:
if not args.debug_data:
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

额,debug_data 是模型啥的都不跑是吗?

这个有必要对完暴露吗?还是开发完了,删掉?

Copy link
Contributor Author

@greycooker greycooker Aug 27, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

对,debug_data就是只打印数据不加载模型,而且也不训练,这里是想作为一个通用功能加进来。

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

如果只是我们内部使用的debug模式的话,我感觉加的意义不是很大。

if args.fp16 or args.bf16:
# set do_grad_scaling, enable_autocast_context_manager
self._wrap_amp_model(args, model)

Check warning on line 386 in paddlenlp/trainer/trainer.py

View check run for this annotation

Codecov / codecov/patch

paddlenlp/trainer/trainer.py#L386

Added line #L386 was not covered by tests

def fn(layer):
if hasattr(layer, "enable_recompute") and (
layer.enable_recompute is False or layer.enable_recompute == 0
):
layer.enable_recompute = True
if args.recompute:

model.apply(fn)
def fn(layer):
if hasattr(layer, "enable_recompute") and (

Check warning on line 391 in paddlenlp/trainer/trainer.py

View check run for this annotation

Codecov / codecov/patch

paddlenlp/trainer/trainer.py#L390-L391

Added lines #L390 - L391 were not covered by tests
layer.enable_recompute is False or layer.enable_recompute == 0
):
layer.enable_recompute = True

Check warning on line 394 in paddlenlp/trainer/trainer.py

View check run for this annotation

Codecov / codecov/patch

paddlenlp/trainer/trainer.py#L394

Added line #L394 was not covered by tests

default_label_names = (
["start_positions", "end_positions"]
if "QusetionAnswering" in type(self.model).__name__ or "UIE" in type(self.model).__name__
else ["labels"]
)
self.label_names = default_label_names if self.args.label_names is None else self.args.label_names
model.apply(fn)

Check warning on line 396 in paddlenlp/trainer/trainer.py

View check run for this annotation

Codecov / codecov/patch

paddlenlp/trainer/trainer.py#L396

Added line #L396 was not covered by tests

default_label_names = (
["start_positions", "end_positions"]
if "QusetionAnswering" in type(self.model).__name__ or "UIE" in type(self.model).__name__
else ["labels"]
)
self.label_names = default_label_names if self.args.label_names is None else self.args.label_names

self.control = self.callback_handler.on_init_end(self.args, self.state, self.control)
self.print_config()
Expand Down Expand Up @@ -924,6 +928,7 @@
step_control = 0 # used in loop control, reset to 0 after every step
self.control = self.callback_handler.on_epoch_begin(args, self.state, self.control)

step = -1
for step, inputs in enumerate(epoch_iterator):
if self.args.use_hybrid_parallel and self.args.sep_parallel_degree > 1:
inputs = split_inputs_sequence_dim(inputs)
Expand Down Expand Up @@ -960,6 +965,31 @@
steps_trained_progress_bar.close()
steps_trained_progress_bar = None

# Skip data
if should_skip_data(self.state.global_step, self.args.skip_data_intervals):
logger.warning(f"Skip data at global step {self.state.global_step+1}, sub step {step_control}")
logger.warning(f"{self.tokenizer.batch_decode(inputs['input_ids'], skip_special_tokens=True)}")

Check warning on line 971 in paddlenlp/trainer/trainer.py

View check run for this annotation

Codecov / codecov/patch

paddlenlp/trainer/trainer.py#L970-L971

Added lines #L970 - L971 were not covered by tests
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

这个就不要加了吧

Suggested change
logger.warning(f"{self.tokenizer.batch_decode(inputs['input_ids'], skip_special_tokens=True)}")

Copy link
Contributor Author

@greycooker greycooker Aug 27, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

这个warning是用来打印跳过的数据的,如果去掉的话也是OK的,这里主要是想让用户知道跳过的数据都是啥。


if (step_control + 1) % args.gradient_accumulation_steps == 0 or (

Check warning on line 973 in paddlenlp/trainer/trainer.py

View check run for this annotation

Codecov / codecov/patch

paddlenlp/trainer/trainer.py#L973

Added line #L973 was not covered by tests
# last step in epoch but step is always smaller than gradient_accumulation_steps
steps_in_epoch <= args.gradient_accumulation_steps
and (step + 1) == steps_in_epoch
):
self.skip_global_steps += 1
self.state.global_step += 1
self.state.epoch = epoch + (step + 1) / steps_in_epoch
self.control = self.callback_handler.on_step_end(args, self.state, self.control)
self._maybe_log_save_evaluate(tr_loss, model, epoch, ignore_keys_for_eval, inputs=inputs)
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

这个也不需要了吧?

Copy link
Contributor Author

@greycooker greycooker Aug 27, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

_maybe_log_save_evaluate这里是为了去走:
1.tr_loss的重置:

tr_loss.subtract_(tr_loss)

2._globalstep_last_logged的更新:
self._globalstep_last_logged = self.state.global_step

3.正常的eval流程。不然最后eval计算consumed_samples的时候会有问题https://github.com/PaddlePaddle/PaddleNLP/blob/48820cbc1fe986004f817c0517886735675732d2/paddlenlp/trainer/trainer.py#L2792C6-L2797C18

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

我主要的担心的是,skip数据的时候,碰到了eval 或 者 save 等各种各样的call back 是否有问题。
还是说,我们这里可以只处理数据,其他一律不触发。当然 step之类的更新加上。

self._print_timer()
step_control = 0

Check warning on line 984 in paddlenlp/trainer/trainer.py

View check run for this annotation

Codecov / codecov/patch

paddlenlp/trainer/trainer.py#L978-L984

Added lines #L978 - L984 were not covered by tests
else:
self.control = self.callback_handler.on_substep_end(args, self.state, self.control)
step_control += 1
if self.control.should_epoch_stop or self.control.should_training_stop:
break
self.timers and self.timers("read-data").start()
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

我感觉很多东西你可能不需要啊,没有计算的话,一些call_back 触发不知道有没有问题?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

这里是为了进行一些判断,比如是否应该进行eval、save和停止训练。没有经过前反向计算直接执行callback我测试的时候没有报错,不过可能确实会有一些没测试到的潜在风险。
https://github.com/PaddlePaddle/PaddleNLP/blob/48820cbc1fe986004f817c0517886735675732d2/paddlenlp/trainer/trainer_callback.py#L432C1-L460C23

continue

Check warning on line 991 in paddlenlp/trainer/trainer.py

View check run for this annotation

Codecov / codecov/patch

paddlenlp/trainer/trainer.py#L986-L991

Added lines #L986 - L991 were not covered by tests

if step_control % args.gradient_accumulation_steps == 0:
self.control = self.callback_handler.on_step_begin(args, self.state, self.control)
self.timers and self.timers("forward-backward").start()
Expand Down Expand Up @@ -1181,7 +1211,10 @@
)

self._total_loss_scalar += tr_loss.item()
train_loss = self._total_loss_scalar / self.state.global_step
if self.state.global_step == self.skip_global_steps:
train_loss = 0.0

Check warning on line 1215 in paddlenlp/trainer/trainer.py

View check run for this annotation

Codecov / codecov/patch

paddlenlp/trainer/trainer.py#L1215

Added line #L1215 was not covered by tests
else:
train_loss = self._total_loss_scalar / (self.state.global_step - self.skip_global_steps)

metrics = speed_metrics("train", start_time, num_samples=num_train_samples, num_steps=self.state.max_steps)

Expand Down
17 changes: 17 additions & 0 deletions paddlenlp/trainer/trainer_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -1100,3 +1100,20 @@
tracker.add("global_seed", global_seed)
if "local_seed" not in tracker.states_ and local_seed not in tracker.seeds_:
tracker.add("local_seed", local_seed)


def should_skip_data(global_step, skip_data_intervals):
"""Whether to skip current step data"""

if skip_data_intervals is None:
return False
skip_flag = False
for interval in skip_data_intervals:
if len(interval) != 2 or interval[0] > interval[1] or interval[0] <= 0:
raise ValueError(f"Please check your skip interval {interval}")

Check warning on line 1113 in paddlenlp/trainer/trainer_utils.py

View check run for this annotation

Codecov / codecov/patch

paddlenlp/trainer/trainer_utils.py#L1110-L1113

Added lines #L1110 - L1113 were not covered by tests

start_global_step, end_step = interval[0], interval[1]
if start_global_step <= global_step + 1 <= end_step:
skip_flag = True
break
return skip_flag

Check warning on line 1119 in paddlenlp/trainer/trainer_utils.py

View check run for this annotation

Codecov / codecov/patch

paddlenlp/trainer/trainer_utils.py#L1115-L1119

Added lines #L1115 - L1119 were not covered by tests
10 changes: 10 additions & 0 deletions paddlenlp/trainer/training_args.py
Original file line number Diff line number Diff line change
Expand Up @@ -850,6 +850,16 @@ class TrainingArguments:
release_grads: Optional[bool] = field(
default=False, metadata={"help": "Whether to release gradients during training. Default is `False`."}
)
skip_data_intervals: Optional[List[List[int]]] = field(
default=None,
metadata={"help": "The intervals to skip, pass start global step and end global step at each interval"},
)
debug_data: Optional[bool] = field(
default=False,
metadata={
"help": "whether to debug data.If set to True, will print the skip intervals data and skip training process."
},
)

def __post_init__(self):
env_local_rank = int(os.environ.get("PADDLE_RANK_IN_NODE", -1))
Expand Down
Loading