Skip to content

Commit

Permalink
SPLIT PR: add user defined symbols and control symbols (#31305)
Browse files Browse the repository at this point in the history
* PR SPLIT: moving origina changes for adding user defined symbols

* adding gemma test and generalizing gemma converter

* ruff

* update common test

* update serialization test

* deberta v2 tests updates as rust version adds '.' as a user added token, so a space is not added

* removing commented lines

* applying feedback - user only added_tokens to add and check piece.type instead of trainer_spec for user_defined_symbols

* add comment referencing sentencepiece
  • Loading branch information
itazap committed Jun 21, 2024
1 parent 5d11492 commit 9b992b7
Show file tree
Hide file tree
Showing 6 changed files with 60 additions and 23 deletions.
15 changes: 11 additions & 4 deletions src/transformers/convert_slow_tokenizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -622,6 +622,17 @@ def decoder(self, replacement, add_prefix_space):
def converted(self) -> Tokenizer:
tokenizer = self.tokenizer(self.proto)

# Add user defined symbols (type == 4) from sentnecepiece (https://github.com/google/sentencepiece/blob/6225e08edb2577757163b3f5dbba4c0b670ef445/src/sentencepiece_model.proto#L299C29-L299C33)
user_defined_symbols = [
AddedToken(token, normalized=False, special=False)
for token in [p.piece for p in self.proto.pieces if p.type == 4]
]
control_symbols = [
AddedToken(token, normalized=False, special=True) for token in self.proto.trainer_spec.control_symbols
]

tokenizer.add_tokens(user_defined_symbols + control_symbols)

# Tokenizer assemble
normalizer = self.normalizer(self.proto)
if normalizer is not None:
Expand Down Expand Up @@ -1330,10 +1341,6 @@ def tokenizer(self, proto):
raise Exception(
"You're trying to run a `Unigram` model but you're file was trained with a different algorithm"
)
user_defined_symbols = [
AddedToken(token, normalized=True, special=False) for token in proto.trainer_spec.user_defined_symbols
]
tokenizer.add_tokens(user_defined_symbols)
return tokenizer


Expand Down
10 changes: 8 additions & 2 deletions tests/models/camembert/test_tokenization_camembert.py
Original file line number Diff line number Diff line change
Expand Up @@ -144,7 +144,7 @@ def _test_added_vocab_and_eos(expected, tokenizer_class, expected_eos, temp_dir)
self.assertTrue(str(expected_eos) not in tokenizer.additional_special_tokens)
self.assertIn(new_eos, tokenizer.added_tokens_decoder.values())
self.assertEqual(tokenizer.added_tokens_decoder[tokenizer.eos_token_id], new_eos)
self.assertDictEqual(expected, tokenizer.added_tokens_decoder)
self.assertTrue(all(item in tokenizer.added_tokens_decoder.items() for item in expected.items()))
return tokenizer

new_eos = AddedToken("[NEW_EOS]", rstrip=False, lstrip=True, normalized=False)
Expand Down Expand Up @@ -198,7 +198,13 @@ def _test_added_vocab_and_eos(expected, tokenizer_class, expected_eos, temp_dir)
self.assertIn(new_eos, list(tokenizer_fast.added_tokens_decoder.values()))
# We can't test the following because for BC we kept the default rstrip lstrip in slow not fast. Will comment once normalization is alright
with self.subTest("Hub -> Fast == Hub -> Slow: make sure slow and fast tokenizer match"):
self.assertDictEqual(EXPECTED_ADDED_TOKENS_DECODER, tokenizer_fast.added_tokens_decoder)
with self.subTest("Hub -> Fast == Hub -> Slow: make sure slow and fast tokenizer match"):
self.assertTrue(
all(
item in tokenizer.added_tokens_decoder.items()
for item in EXPECTED_ADDED_TOKENS_DECODER.items()
)
)

EXPECTED_ADDED_TOKENS_DECODER = tokenizer_fast.added_tokens_decoder
with tempfile.TemporaryDirectory() as tmp_dir_4:
Expand Down
26 changes: 13 additions & 13 deletions tests/models/deberta_v2/test_tokenization_deberta_v2.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,8 +89,8 @@ def test_sentencepiece_tokenize_and_decode(self):

def test_split_by_punct(self):
# fmt: off
sequence = "I was born in 92000, and this is falsé."
tokens_target = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
sequence = "I was born in 92000, and this is falsé!"
tokens_target = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", "!", ]
# fmt: on

tokenizer = DebertaV2Tokenizer(SAMPLE_VOCAB, unk_token="<unk>", split_by_punct=True)
Expand All @@ -105,8 +105,8 @@ def test_split_by_punct(self):

def test_do_lower_case_split_by_punct(self):
# fmt: off
sequence = "I was born in 92000, and this is falsé."
tokens_target = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
sequence = "I was born in 92000, and this is falsé!"
tokens_target = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", "!", ]
# fmt: on

tokenizer = DebertaV2Tokenizer(SAMPLE_VOCAB, unk_token="<unk>", do_lower_case=True, split_by_punct=True)
Expand All @@ -121,8 +121,8 @@ def test_do_lower_case_split_by_punct(self):

def test_do_lower_case_split_by_punct_false(self):
# fmt: off
sequence = "I was born in 92000, and this is falsé."
tokens_target = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
sequence = "I was born in 92000, and this is falsé!"
tokens_target = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "!", ]
# fmt: on

tokenizer = DebertaV2Tokenizer(SAMPLE_VOCAB, unk_token="<unk>", do_lower_case=True, split_by_punct=False)
Expand All @@ -139,8 +139,8 @@ def test_do_lower_case_split_by_punct_false(self):

def test_do_lower_case_false_split_by_punct(self):
# fmt: off
sequence = "I was born in 92000, and this is falsé."
tokens_target = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
sequence = "I was born in 92000, and this is falsé!"
tokens_target = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", "!", ]
# fmt: on

tokenizer = DebertaV2Tokenizer(SAMPLE_VOCAB, unk_token="<unk>", do_lower_case=False, split_by_punct=True)
Expand Down Expand Up @@ -177,7 +177,7 @@ def test_rust_and_python_full_tokenizers(self):
tokenizer = self.get_tokenizer()
rust_tokenizer = self.get_rust_tokenizer()

sequence = "I was born in 92000, and this is falsé."
sequence = "I was born in 92000, and this is falsé!"

tokens = tokenizer.convert_ids_to_tokens(tokenizer.encode(sequence, add_special_tokens=False))
rust_tokens = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(sequence, add_special_tokens=False))
Expand Down Expand Up @@ -216,10 +216,10 @@ def test_full_tokenizer(self):
self.assertListEqual(rust_back_tokens, back_tokens_target)

# fmt: off
sequence = "I was born in 92000, and this is falsé."
ids_target = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
tokens_target = ["▁", "I", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", ".", ]
back_tokens_target = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
sequence = "I was born in 92000, and this is falsé!"
ids_target = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 187]
tokens_target = ["▁", "I", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", "!", ]
back_tokens_target = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "!", ]
# fmt: on

ids = tokenizer.encode(sequence, add_special_tokens=False)
Expand Down
13 changes: 13 additions & 0 deletions tests/models/gemma/test_tokenization_gemma.py
Original file line number Diff line number Diff line change
Expand Up @@ -193,6 +193,19 @@ def integration_tests(self):
},
)

def test_user_added_tokens(self):
# Ensure that user added tokens are not split in the fast tokenizer
slow_tokenizer = self.tokenizer
fast_tokenizer = self.rust_tokenizer

user_added_token = "<mask>"

slow_tokens = slow_tokenizer.convert_ids_to_tokens(slow_tokenizer.encode(user_added_token))
fast_tokens = slow_tokenizer.convert_ids_to_tokens(fast_tokenizer.encode(user_added_token))

self.assertTrue(user_added_token in fast_tokens)
self.assertEqual(slow_tokens, fast_tokens)

def test_fast_special_tokens(self):
slow_tokenizer = self.tokenizer
fast_tokenizer = self.rust_tokenizer
Expand Down
9 changes: 7 additions & 2 deletions tests/models/rembert/test_tokenization_rembert.py
Original file line number Diff line number Diff line change
Expand Up @@ -172,7 +172,7 @@ def _test_added_vocab_and_eos(expected, tokenizer_class, expected_eos, temp_dir)
self.assertTrue(str(expected_eos) not in tokenizer.additional_special_tokens)
self.assertIn(new_eos, tokenizer.added_tokens_decoder.values())
self.assertEqual(tokenizer.added_tokens_decoder[tokenizer.eos_token_id], new_eos)
self.assertDictEqual(expected, tokenizer.added_tokens_decoder)
self.assertTrue(all(item in tokenizer.added_tokens_decoder.items() for item in expected.items()))
return tokenizer

new_eos = AddedToken("[NEW_EOS]", rstrip=False, lstrip=True, normalized=False, special=True)
Expand Down Expand Up @@ -227,7 +227,12 @@ def _test_added_vocab_and_eos(expected, tokenizer_class, expected_eos, temp_dir)
self.assertIn(new_eos, list(tokenizer_fast.added_tokens_decoder.values()))
# We can't test the following because for BC we kept the default rstrip lstrip in slow not fast. Will comment once normalization is alright
with self.subTest("Hub -> Fast == Hub -> Slow: make sure slow and fast tokenizer match"):
self.assertDictEqual(EXPECTED_ADDED_TOKENS_DECODER, tokenizer_fast.added_tokens_decoder)
self.assertTrue(
all(
item in tokenizer.added_tokens_decoder.items()
for item in EXPECTED_ADDED_TOKENS_DECODER.items()
)
)

EXPECTED_ADDED_TOKENS_DECODER = tokenizer_fast.added_tokens_decoder
with tempfile.TemporaryDirectory() as tmp_dir_4:
Expand Down
10 changes: 8 additions & 2 deletions tests/test_tokenization_common.py
Original file line number Diff line number Diff line change
Expand Up @@ -4228,7 +4228,7 @@ def _test_added_vocab_and_eos(expected, tokenizer_class, expected_eos, temp_dir)
self.assertTrue(str(expected_eos) not in tokenizer.additional_special_tokens)
self.assertIn(new_eos, tokenizer.added_tokens_decoder.values())
self.assertEqual(tokenizer.added_tokens_decoder[tokenizer.eos_token_id], new_eos)
self.assertDictEqual(expected, tokenizer.added_tokens_decoder)
self.assertTrue(all(item in tokenizer.added_tokens_decoder.items() for item in expected.items()))
return tokenizer

new_eos = AddedToken("[NEW_EOS]", rstrip=False, lstrip=True, normalized=False, special=True)
Expand Down Expand Up @@ -4280,7 +4280,13 @@ def _test_added_vocab_and_eos(expected, tokenizer_class, expected_eos, temp_dir)
self.assertIn(new_eos, list(tokenizer_fast.added_tokens_decoder.values()))
# We can't test the following because for BC we kept the default rstrip lstrip in slow not fast. Will comment once normalization is alright
with self.subTest("Hub -> Fast == Hub -> Slow: make sure slow and fast tokenizer match"):
self.assertDictEqual(EXPECTED_ADDED_TOKENS_DECODER, tokenizer_fast.added_tokens_decoder)
# Fast tokenizer may have user_defined_symbols and control_symbols added, unlike slow
self.assertTrue(
all(
item in tokenizer.added_tokens_decoder.items()
for item in EXPECTED_ADDED_TOKENS_DECODER.items()
)
)

EXPECTED_ADDED_TOKENS_DECODER = tokenizer_fast.added_tokens_decoder
with tempfile.TemporaryDirectory() as tmp_dir_4:
Expand Down

0 comments on commit 9b992b7

Please sign in to comment.