Skip to content

Commit 14cec91

Browse files
committed
fix: typo in models
1 parent 0a99c35 commit 14cec91

File tree

1 file changed

+5
-5
lines changed

1 file changed

+5
-5
lines changed

token_visualizer/models.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -128,7 +128,7 @@ class TopkTokenModel:
128128
topk_per_token: int = 5 # number of topk tokens to generate for each token
129129
generated_answer: str = None # generated answer from model, to display in frontend
130130

131-
def genearte_topk_per_token(self, text: str) -> List[Token]:
131+
def generate_topk_per_token(self, text: str) -> List[Token]:
132132
raise NotImplementedError
133133

134134
def html_to_visualize(self, tokens: List[Token]) -> str:
@@ -151,7 +151,7 @@ def get_model_tokenizer(self):
151151
self.rev_vocab = format_reverse_vocab(self.tokenizer)
152152
return self.model, self.tokenizer
153153

154-
def genearte_topk_per_token(self, text: str) -> List[Token]:
154+
def generate_topk_per_token(self, text: str) -> List[Token]:
155155
model, tokenizer = self.get_model_tokenizer()
156156
rev_vocab = self.rev_vocab
157157
topk_tokens, topk_probs, sequences = generate_topk_token_prob(
@@ -180,7 +180,7 @@ class TGIModel(TopkTokenModel):
180180
# tgi support top_n_tokens, reference below:
181181
# https://github.com/huggingface/text-generation-inference/blob/7dbaf9e9013060af52024ea1a8b361b107b50a69/proto/generate.proto#L108-L109
182182

183-
def genearte_topk_per_token(self, text: str) -> List[Token]:
183+
def generate_topk_per_token(self, text: str) -> List[Token]:
184184
raise NotImplementedError
185185

186186

@@ -201,7 +201,7 @@ def __post_init__(self):
201201
assert self.api_key is not None, "Please provide api key to access openai api."
202202
self.client = OpenAI(api_key=self.api_key, base_url=self.base_url)
203203

204-
def genearte_topk_per_token(self, text: str, **kwargs) -> List[Token]:
204+
def generate_topk_per_token(self, text: str, **kwargs) -> List[Token]:
205205
kwargs = {
206206
"temperature": self.temperature,
207207
"top_p": self.topp,
@@ -264,7 +264,7 @@ def openai_api_call(self, payload):
264264
data = json.loads(response.text)
265265
return data
266266

267-
def genearte_topk_per_token(self, text: str, **kwargs) -> List[Token]:
267+
def generate_topk_per_token(self, text: str, **kwargs) -> List[Token]:
268268
kwargs = {
269269
"temperature": self.temperature,
270270
"top_p": self.topp,

0 commit comments

Comments
 (0)