Skip to content

Commit

Permalink
py : fix missing added_tokens_dict for SPM and BPE vocabs (#4971)
Browse files Browse the repository at this point in the history
* py : fix missing added_tokens_dict for SPM vocab

* py : pad with unknown tokens when data is missing

ggml-ci

* py : fix BPE vocab conversion

ggml-ci

* py : fix padded dummy tokens (I hope)
  • Loading branch information
ggerganov authored Jan 17, 2024
1 parent 2b3a665 commit 4f4bf35
Showing 1 changed file with 14 additions and 10 deletions.
24 changes: 14 additions & 10 deletions convert.py
Original file line number Diff line number Diff line change
Expand Up @@ -387,6 +387,7 @@ def __init__(
self.bpe_tokenizer = json.loads(
open(str(fname_tokenizer), encoding="utf-8").read()
)
self.vocab = self.bpe_tokenizer["model"]["vocab"]
added_tokens: dict[str, int]
if fname_added_tokens is not None:
# FIXME: Verify that added tokens here _cannot_ overlap with the main vocab.
Expand All @@ -405,7 +406,7 @@ def __init__(
if item["content"] not in self.bpe_tokenizer
)

vocab_size: int = len(self.bpe_tokenizer)
vocab_size: int = len(self.vocab)
expected_ids = list(range(vocab_size, vocab_size + len(added_tokens)))
actual_ids = sorted(added_tokens.values())
if expected_ids != actual_ids:
Expand All @@ -415,17 +416,17 @@ def __init__(
)

items = sorted(added_tokens.items(), key=lambda text_idx: text_idx[1])
self.added_tokens_dict = added_tokens
self.added_tokens_list = [text for (text, idx) in items]
self.vocab_size_base: int = vocab_size
self.vocab_size: int = self.vocab_size_base + len(self.added_tokens_list)
self.fname_tokenizer = fname_tokenizer
self.fname_added_tokens = fname_added_tokens

def bpe_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]:
tokenizer = self.bpe_tokenizer
reverse_vocab = {id: encoded_tok for encoded_tok, id in tokenizer.items()}
reverse_vocab = {id: encoded_tok for encoded_tok, id in self.vocab.items()}

for i, _ in enumerate(tokenizer):
for i, _ in enumerate(self.vocab):
yield reverse_vocab[i], 0.0, gguf.TokenType.NORMAL

def added_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]:
Expand Down Expand Up @@ -466,6 +467,7 @@ def __init__(
)

# Token pieces that were added to the base vocabulary.
self.added_tokens_dict = added_tokens
self.added_tokens_list = [new_tokens[id] for id in actual_new_ids]
self.vocab_size_base = vocab_size
self.vocab_size = self.vocab_size_base + len(self.added_tokens_list)
Expand Down Expand Up @@ -1006,6 +1008,7 @@ def check_vocab_size(params: Params, vocab: Vocab, pad_vocab: bool = False) -> N
)
for i in range(1, pad_count + 1):
vocab.added_tokens_dict[f"<dummy{i:05}>"] = -1
vocab.added_tokens_list.append(f"<dummy{i:05}>")
vocab.vocab_size = params.n_vocab
return

Expand Down Expand Up @@ -1097,6 +1100,8 @@ def extract_vocabulary_from_model(self, vocab: Vocab) -> Tuple[list, list, list]
scores.append(score)
toktypes.append(toktype)

assert(len(tokens) == vocab.vocab_size)

return tokens, scores, toktypes

def add_meta_vocab(self, vocab: Vocab) -> None:
Expand Down Expand Up @@ -1373,15 +1378,14 @@ def _detect_files(self):
self.files[file] = file_path
elif parent_file_path.exists():
self.files[file] = parent_file_path
print(f"Found vocab files: {self.files}")

def _select_file(self, vocabtype: Optional[str]) -> Path:
if vocabtype in ["spm", "bpe"]:
# For SentencePiece and BPE, return specific files as before
file_key = "tokenizer.model" if vocabtype == "spm" else "vocab.json"
if self.files[file_key]:
return self.files[file_key]
else:
raise FileNotFoundError(f"{vocabtype} {file_key} not found.")
for file_key in self.files.keys():
if self.files[file_key]:
return self.files[file_key]
raise FileNotFoundError(f"{vocabtype} vocab not found.")
elif vocabtype == "hfft":
# For Hugging Face Fast Tokenizer, return the directory path instead of a specific file
return self.path
Expand Down

0 comments on commit 4f4bf35

Please sign in to comment.