Skip to content

Commit a094e81

Browse files
authored
config.model -> config.model_name (#768)
1 parent fb7d6f5 commit a094e81

File tree

2 files changed

+11
-11
lines changed

2 files changed

+11
-11
lines changed

examples/custom_models/local_mt_model.py

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -84,33 +84,33 @@ class LocalMTClient(LightevalModel):
8484
"""
8585

8686
def __init__(self, config, env_config) -> None:
87-
self.model = config.model
87+
self.model_name = config.model_name
8888
self.model_definition_file_path = config.model_definition_file_path
8989
self.batch_size = 32
9090
self.device = "cuda" if torch.cuda.is_available() else "cpu"
9191

9292
self.model_info = ModelInfo(
93-
model_name=config.model,
93+
model_name=config.model_name,
9494
model_sha="",
9595
model_dtype=None,
9696
model_size="",
9797
)
9898

9999
# Update model initialization to handle both models
100-
if "seamless-m4t" in config.model:
101-
self._tokenizer = AutoProcessor.from_pretrained(config.model)
102-
self._model = SeamlessM4Tv2ForTextToText.from_pretrained(config.model)
100+
if "seamless-m4t" in config.model_name:
101+
self._tokenizer = AutoProcessor.from_pretrained(config.model_name)
102+
self._model = SeamlessM4Tv2ForTextToText.from_pretrained(config.model_name)
103103
self.model_type = "seamless-4mt"
104104
self.batch_size = 1
105105
logger.info(
106106
"Using batch size of 1 for seamless-4mt model because it the target language needs to be set for the entire batch."
107107
)
108-
elif "madlad400" in config.model:
109-
self._tokenizer = AutoTokenizer.from_pretrained(config.model)
110-
self._model = AutoModelForSeq2SeqLM.from_pretrained(config.model)
108+
elif "madlad400" in config.model_name:
109+
self._tokenizer = AutoTokenizer.from_pretrained(config.model_name)
110+
self._model = AutoModelForSeq2SeqLM.from_pretrained(config.model_name)
111111
self.model_type = "madlad400"
112112
else:
113-
raise ValueError(f"Unsupported model: {config.model}")
113+
raise ValueError(f"Unsupported model: {config.model_name}")
114114

115115
self._model.to(self.device)
116116
self._model.eval()

src/lighteval/models/endpoints/openai_model.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -94,7 +94,7 @@ def __init__(self, config: OpenAIModelConfig, env_config) -> None:
9494
self.sampling_params = self.generation_parameters.to_vllm_openai_dict()
9595

9696
self.model_info = ModelInfo(
97-
model_name=config.model,
97+
model_name=config.model_name,
9898
model_sha="",
9999
model_dtype=None,
100100
model_size="",
@@ -103,7 +103,7 @@ def __init__(self, config: OpenAIModelConfig, env_config) -> None:
103103
self.API_RETRY_SLEEP = 3
104104
self.API_RETRY_MULTIPLIER = 2
105105
self.CONCURENT_CALLS = 100
106-
self.model = config.model
106+
self.model_name = config.model_name
107107
try:
108108
self._tokenizer = tiktoken.encoding_for_model(self.model)
109109
except KeyError:

0 commit comments

Comments
 (0)