Skip to content

Commit

Permalink
#[allow(dead_code)] // Suppress the "method is never used" warning
Browse files Browse the repository at this point in the history
  • Loading branch information
ArthurZucker committed Sep 5, 2023
1 parent c3fa75f commit b57e1c3
Showing 1 changed file with 7 additions and 3 deletions.
10 changes: 7 additions & 3 deletions tokenizers/src/tokenizer/added_vocabulary.rs
Original file line number Diff line number Diff line change
Expand Up @@ -180,8 +180,8 @@ impl AddedVocabulary {
split_normalized_trie: (normalized_trie, vec![]),
}
}

/// Size of the additional vocabulary
#[allow(dead_code)] // Suppress the "method is never used" warning
pub fn len(&self) -> usize {
self.added_tokens_map.len()
}
Expand Down Expand Up @@ -585,7 +585,9 @@ mod tests {
),
1
);
assert_eq!(vocab.len(), 1);

let vocab_len: usize = vocab.len();
assert_eq!(vocab_len, 1);

// Does not add multiple time the same token
assert_eq!(
Expand Down Expand Up @@ -685,7 +687,7 @@ mod tests {
assert_eq!(token.content, "hey"); // Token was already there

token.special = true;
assert_eq!(token.special, true); // Token was already there
assert!(token.special); // Token was already there
}

#[test]
Expand Down Expand Up @@ -821,6 +823,8 @@ mod tests {
let mut vocab = AddedVocabulary::new();
let normalizer = Lowercase;

assert_eq!(vocab.len(), 0);

vocab.add_tokens(
&[AddedToken::from("<mask>", false).single_word(true)],
&model,
Expand Down

0 comments on commit b57e1c3

Please sign in to comment.