@@ -84,33 +84,33 @@ class LocalMTClient(LightevalModel):
84
84
"""
85
85
86
86
def __init__ (self , config , env_config ) -> None :
87
- self .model = config .model
87
+ self .model_name = config .model_name
88
88
self .model_definition_file_path = config .model_definition_file_path
89
89
self .batch_size = 32
90
90
self .device = "cuda" if torch .cuda .is_available () else "cpu"
91
91
92
92
self .model_info = ModelInfo (
93
- model_name = config .model ,
93
+ model_name = config .model_name ,
94
94
model_sha = "" ,
95
95
model_dtype = None ,
96
96
model_size = "" ,
97
97
)
98
98
99
99
# Update model initialization to handle both models
100
- if "seamless-m4t" in config .model :
101
- self ._tokenizer = AutoProcessor .from_pretrained (config .model )
102
- self ._model = SeamlessM4Tv2ForTextToText .from_pretrained (config .model )
100
+ if "seamless-m4t" in config .model_name :
101
+ self ._tokenizer = AutoProcessor .from_pretrained (config .model_name )
102
+ self ._model = SeamlessM4Tv2ForTextToText .from_pretrained (config .model_name )
103
103
self .model_type = "seamless-4mt"
104
104
self .batch_size = 1
105
105
logger .info (
106
106
"Using batch size of 1 for seamless-4mt model because it the target language needs to be set for the entire batch."
107
107
)
108
- elif "madlad400" in config .model :
109
- self ._tokenizer = AutoTokenizer .from_pretrained (config .model )
110
- self ._model = AutoModelForSeq2SeqLM .from_pretrained (config .model )
108
+ elif "madlad400" in config .model_name :
109
+ self ._tokenizer = AutoTokenizer .from_pretrained (config .model_name )
110
+ self ._model = AutoModelForSeq2SeqLM .from_pretrained (config .model_name )
111
111
self .model_type = "madlad400"
112
112
else :
113
- raise ValueError (f"Unsupported model: { config .model } " )
113
+ raise ValueError (f"Unsupported model: { config .model_name } " )
114
114
115
115
self ._model .to (self .device )
116
116
self ._model .eval ()
0 commit comments