Skip to content

Commit

Permalink
Merge pull request nebuly-ai#190 from egrefen/main
Browse files Browse the repository at this point in the history
Change distillate to distill
  • Loading branch information
diegofiori committed Feb 28, 2023
2 parents ccc0e23 + 5157a6d commit f292283
Show file tree
Hide file tree
Showing 2 changed files with 3 additions and 3 deletions.
2 changes: 1 addition & 1 deletion apps/accelerate/chatllama/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ from chatllama.rlhf.config import Config
path = "path_to_config_file.yaml"
config = Config(path=path)
trainer = RLTrainer(config.trainer)
trainer.distillate()
trainer.distill()
trainer.train()
trainer.training_stats.plot()
```
Expand Down
4 changes: 2 additions & 2 deletions apps/accelerate/chatllama/chatllama/rlhf/reward.py
Original file line number Diff line number Diff line change
Expand Up @@ -258,7 +258,7 @@ class RewardTrainer:
train: Train the reward model
generate_user_input: Generate the user input for the LLM to evaluate a
couple, (user_input, completion) and assing a score
distillate: Parse the dataset and assign scores using LLMs
distill: Parse the dataset and assign scores using LLMs
"""

def __init__(self, config: ConfigReward) -> None:
Expand Down Expand Up @@ -324,7 +324,7 @@ def __init__(self, config: ConfigReward) -> None:
config=self.config.deepspeed_config_path,
)

def distillate(
def distill(
self,
):
"""Parse the dataset and assign scores using LLMs
Expand Down

0 comments on commit f292283

Please sign in to comment.