From 02624d8bb6ebc678869e4ff2c69aac54b6c5dbe8 Mon Sep 17 00:00:00 2001 From: DriverSong <31926998+DriverSong@users.noreply.github.com> Date: Thu, 6 Jun 2024 01:59:02 +0800 Subject: [PATCH] [BugFix] Apply get_cached_tokenizer to the tokenizer setter of LLM (#5207) Co-authored-by: qiujiawei9 --- vllm/entrypoints/llm.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/vllm/entrypoints/llm.py b/vllm/entrypoints/llm.py index d4a4c16f2a7d..905c36afde1e 100644 --- a/vllm/entrypoints/llm.py +++ b/vllm/entrypoints/llm.py @@ -14,6 +14,7 @@ from vllm.outputs import EmbeddingRequestOutput, RequestOutput from vllm.pooling_params import PoolingParams from vllm.sampling_params import SamplingParams +from vllm.transformers_utils.tokenizer import get_cached_tokenizer from vllm.usage.usage_lib import UsageContext from vllm.utils import Counter, deprecate_kwargs @@ -152,7 +153,14 @@ def set_tokenizer( self, tokenizer: Union[PreTrainedTokenizer, PreTrainedTokenizerFast], ) -> None: - self.llm_engine.tokenizer.tokenizer = tokenizer + # While CachedTokenizer is dynamic, have no choice but + # compare class name. Misjudgment will arise from + # user-defined tokenizer started with 'Cached' + if tokenizer.__class__.__name__.startswith("Cached"): + self.llm_engine.tokenizer.tokenizer = tokenizer + else: + self.llm_engine.tokenizer.tokenizer = get_cached_tokenizer( + tokenizer) @overload # LEGACY: single (prompt + optional token ids) def generate(