-
-
Notifications
You must be signed in to change notification settings - Fork 14
/
Copy pathllm_openrouter.py
103 lines (81 loc) · 2.93 KB
/
llm_openrouter.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
import llm
from llm.default_plugins.openai_models import Chat, AsyncChat
from pathlib import Path
import json
import time
import httpx
def get_openrouter_models():
return fetch_cached_json(
url="https://openrouter.ai/api/v1/models",
path=llm.user_dir() / "openrouter_models.json",
cache_timeout=3600,
)["data"]
class OpenRouterChat(Chat):
needs_key = "openrouter"
key_env_var = "OPENROUTER_KEY"
def __str__(self):
return "OpenRouter: {}".format(self.model_id)
class OpenRouterAsyncChat(AsyncChat):
needs_key = "openrouter"
key_env_var = "OPENROUTER_KEY"
def __str__(self):
return "OpenRouter: {}".format(self.model_id)
@llm.hookimpl
def register_models(register):
# Only do this if the openrouter key is set
key = llm.get_key("", "openrouter", "LLM_OPENROUTER_KEY")
if not key:
return
for model_definition in get_openrouter_models():
supports_images = get_supports_images(model_definition)
kwargs = dict(
model_id="openrouter/{}".format(model_definition["id"]),
model_name=model_definition["id"],
vision=supports_images,
api_base="https://openrouter.ai/api/v1",
headers={"HTTP-Referer": "https://llm.datasette.io/", "X-Title": "LLM"},
)
register(
OpenRouterChat(**kwargs),
OpenRouterAsyncChat(**kwargs),
)
class DownloadError(Exception):
pass
def fetch_cached_json(url, path, cache_timeout):
path = Path(path)
# Create directories if not exist
path.parent.mkdir(parents=True, exist_ok=True)
if path.is_file():
# Get the file's modification time
mod_time = path.stat().st_mtime
# Check if it's more than the cache_timeout old
if time.time() - mod_time < cache_timeout:
# If not, load the file
with open(path, "r") as file:
return json.load(file)
# Try to download the data
try:
response = httpx.get(url, follow_redirects=True)
response.raise_for_status() # This will raise an HTTPError if the request fails
# If successful, write to the file
with open(path, "w") as file:
json.dump(response.json(), file)
return response.json()
except httpx.HTTPError:
# If there's an existing file, load it
if path.is_file():
with open(path, "r") as file:
return json.load(file)
else:
# If not, raise an error
raise DownloadError(
f"Failed to download data and no cache is available at {path}"
)
def get_supports_images(model_definition):
try:
# e.g. `text->text` or `text+image->text`
modality = model_definition["architecture"]["modality"]
input_modalities = modality.split("->")[0].split("+")
return "image" in input_modalities
except Exception:
return False