--- convert.py 2023-09-15 11:26:48 +++ convert.py.new 2023-09-15 11:27:46 @@ -7,6 +7,7 @@ import enum import faulthandler import functools +import importlib import io import itertools import json @@ -296,8 +297,31 @@ params.path_model = model_plus.paths[0].parent return params + +class XgenVocab: + def __init__(self, path: Path) -> None: + self.fname_tokenizer = path + self.fname_added_tokens = None + path = str((path / "tokenization_xgen.py").absolute()) + spec = importlib.util.spec_from_file_location(path, path) + module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(module) + self.xt = module.XgenTokenizer() + self.vocab_size_base: int = self.xt.vocab_size + self.vocab_size: int = self.xt.vocab_size + self.added_tokens_list = [] + def all_tokens(self) -> Iterable[Tuple[bytes, float, gguf.TokenType]]: + for index in range(0, self.vocab_size_base): + token = self.xt.encoder.decode_single_token_bytes(index) + yield token, float(index), gguf.TokenType.NORMAL + for index in range(self.vocab_size_base, self.vocab_size): + yield b'<|unk|>', float(index), gguf.TokenType.USER_DEFINED + + def __repr__(self) -> str: + return f"" + # # vocab # @@ -429,7 +453,7 @@ def __repr__(self) -> str: return f"" -Vocab: TypeAlias = 'BpeVocab | SentencePieceVocab' +Vocab: TypeAlias = 'BpeVocab | SentencePieceVocab | XgenVocab' # # data loading @@ -802,12 +826,15 @@ def check_vocab_size(params: Params, vocab: Vocab) -> None: if params.n_vocab != vocab.vocab_size: - assert isinstance(vocab, BpeVocab) or isinstance(vocab, SentencePieceVocab) + assert isinstance(vocab, BpeVocab) or isinstance(vocab, SentencePieceVocab) or isinstance(vocab, XgenVocab) if params.n_vocab == vocab.vocab_size_base: print("Ignoring added_tokens.json since model matches vocab size without it.") vocab.added_tokens_list = [] vocab.vocab_size = vocab.vocab_size_base return + if isinstance(vocab, XgenVocab): + vocab.vocab_size = params.n_vocab + return msg = f"Vocab size mismatch (model has {params.n_vocab}, but {vocab.fname_tokenizer}" if vocab.fname_added_tokens is not None: msg += f" combined with {vocab.fname_added_tokens}" @@ -863,8 +890,10 @@ self.gguf.add_tokenizer_model("llama") elif isinstance(vocab, BpeVocab): self.gguf.add_tokenizer_model("gpt2") + elif isinstance(vocab, XgenVocab): + self.gguf.add_tokenizer_model("llama") else: - raise ValueError(f'Unknown vocab type: Not BpeVocab or SentencePieceVocab') + raise ValueError(f'Unknown vocab type: Not BpeVocab not SentencePieceVocab nor XgenVocab') self.gguf.add_token_list(tokens) self.gguf.add_token_scores(scores) self.gguf.add_token_types(toktypes) @@ -1077,6 +1106,8 @@ # a directory, it might be the model directory, and tokenizer.model might # be in the parent of that. if path.is_dir(): + if (path / "tokenization_xgen.py").exists(): + return XgenVocab(path) vocab_file = "tokenizer.model" if vocabtype == 'bpe': vocab_file = "vocab.json"