Skip to content

Commit

Permalink
fix llamacpp download tinyllama
Browse files Browse the repository at this point in the history
  • Loading branch information
davidberenstein1957 committed Dec 23, 2024
1 parent 52f6eb4 commit 4e291e7
Showing 1 changed file with 8 additions and 2 deletions.
10 changes: 8 additions & 2 deletions tests/unit/models/llms/test_llamacpp.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,14 +23,18 @@
from .utils import DummyUserDetail


@pytest.fixture(scope="module")
def llm() -> Generator[LlamaCppLLM, None, None]:
def download_tinyllama() -> None:
if not os.path.exists("tinyllama.gguf"):
urllib.request.urlretrieve(
"https://huggingface.co/TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF/resolve/main/tinyllama-1.1b-chat-v1.0.Q2_K.gguf",
"tinyllama.gguf",
)


@pytest.fixture(scope="module")
def llm() -> Generator[LlamaCppLLM, None, None]:
download_tinyllama()

llm = LlamaCppLLM(model_path="tinyllama.gguf", n_gpu_layers=0) # type: ignore
llm.load()

Expand All @@ -39,6 +43,8 @@ def llm() -> Generator[LlamaCppLLM, None, None]:

class TestLlamaCppLLM:
def test_no_tokenizer_magpie_raise_value_error(self) -> None:
download_tinyllama()

with pytest.raises(
ValueError,
match="`use_magpie_template` cannot be `True` if `tokenizer_id` is `None`",
Expand Down

0 comments on commit 4e291e7

Please sign in to comment.