Skip to content

Commit dbe7a7c

Browse files
authored
Fix: Add a INFO-level log when fallback to gpt2tokenizer (#12508)
1 parent b7a4e39 commit dbe7a7c

File tree

1 file changed

+4
-0
lines changed

1 file changed

+4
-0
lines changed

api/core/model_runtime/model_providers/__base/tokenizers/gpt2_tokenzier.py

+4
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,9 @@
1+
import logging
12
from threading import Lock
23
from typing import Any
34

5+
logger = logging.getLogger(__name__)
6+
47
_tokenizer: Any = None
58
_lock = Lock()
69

@@ -43,5 +46,6 @@ def get_encoder() -> Any:
4346
base_path = abspath(__file__)
4447
gpt2_tokenizer_path = join(dirname(base_path), "gpt2")
4548
_tokenizer = TransformerGPT2Tokenizer.from_pretrained(gpt2_tokenizer_path)
49+
logger.info("Fallback to Transformers' GPT-2 tokenizer from tiktoken")
4650

4751
return _tokenizer

0 commit comments

Comments
 (0)