Skip to content

Commit

Permalink
Merge pull request #367 from ZhuangXialie/main
Browse files Browse the repository at this point in the history
add  max_length and max_prompt_length
  • Loading branch information
shibing624 authored Apr 25, 2024
2 parents 9bd86ea + 3151591 commit c230a77
Showing 1 changed file with 7 additions and 0 deletions.
7 changes: 7 additions & 0 deletions orpo_training.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,11 @@ class ScriptArguments:
The name of the Casual LM model we wish to fine with DPO
"""
# Model arguments

max_length: Optional[int] = field(default=512,
metadata={"help": "Maximum total input sequence length after tokenization."})
max_prompt_length: Optional[int] = field(default=128, metadata={"help": "Maximum length of prompt sequences."})

model_type: str = field(
default=None,
metadata={"help": "Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys())}
Expand Down Expand Up @@ -415,6 +420,8 @@ def main():
model.config.use_cache = True

training_args = ORPOConfig(
max_length=args.max_length,
max_prompt_length=args.max_prompt_length,
per_device_train_batch_size=args.per_device_train_batch_size,
per_device_eval_batch_size=args.per_device_eval_batch_size,
max_steps=args.max_steps,
Expand Down

0 comments on commit c230a77

Please sign in to comment.