From 4d124baf8f4706c6060d446b38f07c4258a91d97 Mon Sep 17 00:00:00 2001 From: Yaser Martinez Palenzuela Date: Mon, 5 Nov 2018 23:04:29 +0100 Subject: [PATCH] Add test for Chinese tokenization --- tests/tokenization_test.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tests/tokenization_test.py b/tests/tokenization_test.py index 8d6ede9300a7c1..7c12ecccfed29a 100644 --- a/tests/tokenization_test.py +++ b/tests/tokenization_test.py @@ -43,6 +43,13 @@ def test_full_tokenizer(self): self.assertListEqual( tokenizer.convert_tokens_to_ids(tokens), [7, 4, 5, 10, 8, 9]) + def test_chinese(self): + tokenizer = tokenization.BasicTokenizer() + + self.assertListEqual( + tokenizer.tokenize(u"ah\u535A\u63A8zz"), + [u"ah", u"\u535A", u"\u63A8", u"zz"]) + def test_basic_tokenizer_lower(self): tokenizer = tokenization.BasicTokenizer(do_lower_case=True)