Skip to content

Commit

Permalink
Update for changes in tokenizers API
Browse files Browse the repository at this point in the history
  • Loading branch information
n1t0 committed Dec 26, 2019
1 parent 734d29b commit 1f82a5d
Show file tree
Hide file tree
Showing 4 changed files with 20 additions and 16 deletions.
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@
packages=find_packages("src"),
install_requires=[
"numpy",
"tokenizers",
"tokenizers == 0.0.10",
# accessing files from S3 directly
"boto3",
# filesystem locks e.g. to prevent parallel downloads
Expand Down
14 changes: 8 additions & 6 deletions src/transformers/tokenization_bert.py
Original file line number Diff line number Diff line change
Expand Up @@ -583,12 +583,14 @@ def __init__(
)
)
if max_length is not None:
self._tokenizer.with_truncation(max_length, stride, truncation_strategy)
self._tokenizer.with_truncation(max_length,
stride=stride,
strategy=truncation_strategy)
self._tokenizer.with_padding(
max_length if pad_to_max_length else None,
self.padding_side,
self.pad_token_id,
self.pad_token_type_id,
self.pad_token,
max_length=max_length if pad_to_max_length else None,
direction=self.padding_side,
pad_id=self.pad_token_id,
pad_type_id=self.pad_token_type_id,
pad_token=self.pad_token,
)
self._decoder = tk.decoders.WordPiece.new()
16 changes: 9 additions & 7 deletions src/transformers/tokenization_gpt2.py
Original file line number Diff line number Diff line change
Expand Up @@ -274,15 +274,17 @@ def __init__(

self._tokenizer = tk.Tokenizer(tk.models.BPE.from_files(vocab_file, merges_file))
self._update_special_tokens()
self._tokenizer.with_pre_tokenizer(tk.pre_tokenizers.ByteLevel.new(add_prefix_space))
self._tokenizer.with_pre_tokenizer(tk.pre_tokenizers.ByteLevel.new(add_prefix_space=add_prefix_space))
self._tokenizer.with_decoder(tk.decoders.ByteLevel.new())
if max_length:
self._tokenizer.with_truncation(max_length, stride, truncation_strategy)
self._tokenizer.with_truncation(max_length,
stride=stride,
strategy=truncation_strategy)
self._tokenizer.with_padding(
max_length if pad_to_max_length else None,
self.padding_side,
self.pad_token_id if self.pad_token_id is not None else 0,
self.pad_token_type_id,
self.pad_token if self.pad_token is not None else "",
max_length=max_length if pad_to_max_length else None,
direction=self.padding_side,
pad_id=self.pad_token_id if self.pad_token_id is not None else 0,
pad_type_id=self.pad_token_type_id,
pad_token=self.pad_token if self.pad_token is not None else "",
)
self._decoder = tk.decoders.ByteLevel.new()
4 changes: 2 additions & 2 deletions src/transformers/tokenization_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -1430,10 +1430,10 @@ def decoder(self):

@property
def vocab_size(self):
return self.tokenizer.get_vocab_size(False)
return self.tokenizer.get_vocab_size(with_added_tokens=False)

def __len__(self):
return self.tokenizer.get_vocab_size(True)
return self.tokenizer.get_vocab_size(with_added_tokens=True)

def _update_special_tokens(self):
self.tokenizer.add_special_tokens(self.all_special_tokens)
Expand Down

0 comments on commit 1f82a5d

Please sign in to comment.