-
Notifications
You must be signed in to change notification settings - Fork 18
/
llamafile_client.py
65 lines (56 loc) · 1.69 KB
/
llamafile_client.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
import requests
import settings
import numpy as np
def tokenize(
text: str,
base_url_prefix: str = "http://localhost",
port: int = 8080,
) -> list[int]:
response = requests.post(
url=f"{base_url_prefix}:{port}/tokenize",
headers={"Content-Type": "application/json"},
json={
"content": text,
},
)
response.raise_for_status()
return response.json()["tokens"]
def detokenize(
tokens: list[int],
base_url_prefix: str = "http://localhost",
port: int = 8080,
) -> str:
response = requests.post(
url=f"{base_url_prefix}:{port}/detokenize",
headers={"Content-Type": "application/json"},
json={
"tokens": tokens,
},
)
response.raise_for_status()
return response.json()["content"]
def embed(text: str, base_url_prefix: str = "http://localhost") -> np.ndarray:
response = requests.post(
url=f"{base_url_prefix}:{settings.EMBEDDING_MODEL_PORT}/embedding",
headers={"Content-Type": "application/json"},
json={
"content": text,
},
)
response.raise_for_status()
emb = np.array(response.json()["embedding"], dtype=np.float32)
return np.expand_dims(emb, axis=0)
def completion(prompt: str, base_url_prefix: str = "http://localhost", **kwargs):
# defaults
options = {
"temperature": 0,
"seed": 0,
}
options.update(kwargs)
response = requests.post(
url=f"{base_url_prefix}:{settings.GENERATION_MODEL_PORT}/completion",
headers={"Content-Type": "application/json"},
json={"prompt": prompt, **options},
)
response.raise_for_status()
return response.json()["content"]