From f94c63405c1566d74ddaac0859b38badedbb6acb Mon Sep 17 00:00:00 2001 From: eliranwong Date: Thu, 28 Nov 2024 17:49:02 +0000 Subject: [PATCH] support X AI Grok LLM --- README.md | 11 ++++++- setup.py | 2 +- uniquebible/README.md | 11 ++++++- uniquebible/__init__.py | 17 ++++++++++- uniquebible/gui/Worker.py | 13 ++++++++ uniquebible/latest_changes.txt | 4 +++ uniquebible/plugins/menu/Bible Chat.py | 37 ++++++++++++++++++++--- uniquebible/startup/nonGui.py | 4 +++ uniquebible/util/ConfigUtil.py | 17 +++++++++++ {uniquebible/xonsh => xonsh}/README.md | 0 {uniquebible/xonsh => xonsh}/completer.py | 0 11 files changed, 107 insertions(+), 9 deletions(-) rename {uniquebible/xonsh => xonsh}/README.md (100%) rename {uniquebible/xonsh => xonsh}/completer.py (100%) diff --git a/README.md b/README.md index 966e3e4864..885f15000b 100644 --- a/README.md +++ b/README.md @@ -12,7 +12,7 @@ This single project has two major interfaces: Qt-based Multi-Window Desktop Application: -Tested in: Windows 10, Windows WSL2, macOS [Sierra+] and Linux (Arch, Debian, Ubuntu & Mint), Chrome OS (Debian 10), Android / iOS +Platforms: Windows 10, Windows WSL2, macOS [Sierra+] and Linux (Arch, Debian, Ubuntu & Mint), Chrome OS (Debian 10), Android / iOS Unique Bible App can runs in different modes, both online and offline, for examples: @@ -24,6 +24,15 @@ Unique Bible App can other modes ... +# AI Features + +AI Features have been integrated into UniqueBible App. Five backends are supported: +1. OpenAI / ChatGPT +2. Google AI / Gemini +3. X AI / Grok +4. Grok Cloud API +5. Mistral AI API. + # Development Team Eliran Wong (https://github.com/eliranwong) diff --git a/setup.py b/setup.py index 326be21402..537dd6f2c5 100644 --- a/setup.py +++ b/setup.py @@ -47,7 +47,7 @@ # https://packaging.python.org/en/latest/guides/distributing-packages-using-setuptools/ setup( name=package, - version="0.2.3", + version="0.2.4", python_requires=">=3.8, <3.13", description=f"UniqueBible App is a cross-platform & offline bible application, integrated with high-quality resources and unique features. Developers: Eliran Wong and Oliver Tseng", long_description=long_description, diff --git a/uniquebible/README.md b/uniquebible/README.md index 966e3e4864..885f15000b 100644 --- a/uniquebible/README.md +++ b/uniquebible/README.md @@ -12,7 +12,7 @@ This single project has two major interfaces: Qt-based Multi-Window Desktop Application: -Tested in: Windows 10, Windows WSL2, macOS [Sierra+] and Linux (Arch, Debian, Ubuntu & Mint), Chrome OS (Debian 10), Android / iOS +Platforms: Windows 10, Windows WSL2, macOS [Sierra+] and Linux (Arch, Debian, Ubuntu & Mint), Chrome OS (Debian 10), Android / iOS Unique Bible App can runs in different modes, both online and offline, for examples: @@ -24,6 +24,15 @@ Unique Bible App can other modes ... +# AI Features + +AI Features have been integrated into UniqueBible App. Five backends are supported: +1. OpenAI / ChatGPT +2. Google AI / Gemini +3. X AI / Grok +4. Grok Cloud API +5. Mistral AI API. + # Development Team Eliran Wong (https://github.com/eliranwong) diff --git a/uniquebible/__init__.py b/uniquebible/__init__.py index afc56f2219..04e58a4e46 100644 --- a/uniquebible/__init__.py +++ b/uniquebible/__init__.py @@ -131,7 +131,7 @@ def isServerAlive(ip, port): import unicodedata, traceback, markdown from uniquebible.util.BibleVerseParser import BibleVerseParser -config.llm_backends = ["openai", "google", "groq", "mistral"] +config.llm_backends = ["openai", "google", "grok", "groq", "mistral"] def is_CJK(self, text): for char in text: @@ -146,6 +146,8 @@ def isLLMReady(backend=""): return True elif backend == "mistral" and config.mistralApi_key: return True + elif backend == "grok" and config.grokApi_key: + return True elif backend == "groq" and config.groqApi_key: return True elif backend == "google" and config.googleaiApi_key: @@ -217,6 +219,19 @@ def getChatResponse(backend, chatMessages) -> Optional[str]: max_tokens=config.openaiApi_chat_model_max_tokens, stream=False, ) + elif backend == "grok": + grokClient = OpenAI( + api_key=config.grokApi_key, + base_url="https://api.x.ai/v1", + ) + completion = grokClient.chat.completions.create( + model=config.grokApi_chat_model, + messages=chatMessages, + n=1, + temperature=config.grokApi_llmTemperature, + max_tokens=config.grokApi_chat_model_max_tokens, + stream=False, + ) elif backend == "google": # https://ai.google.dev/gemini-api/docs/openai googleaiClient = OpenAI( diff --git a/uniquebible/gui/Worker.py b/uniquebible/gui/Worker.py index 380abd2841..9b9db32124 100644 --- a/uniquebible/gui/Worker.py +++ b/uniquebible/gui/Worker.py @@ -197,6 +197,19 @@ def getMistralApi_key(): max_tokens=config.googleaiApi_chat_model_max_tokens, stream=True, ) + elif config.llm_backend == "grok": + grokClient = OpenAI( + api_key=config.grokApi_key, + base_url="https://api.x.ai/v1", + ) + return grokClient.chat.completions.create( + model=config.grokApi_chat_model, + messages=thisMessage, + n=1, + temperature=config.grokApi_llmTemperature, + max_tokens=config.grokApi_chat_model_max_tokens, + stream=True, + ) elif config.llm_backend == "mistral": return Mistral(api_key=getMistralApi_key()).chat.stream( model=config.mistralApi_chat_model, diff --git a/uniquebible/latest_changes.txt b/uniquebible/latest_changes.txt index ffe21aad45..83cf98d3c9 100755 --- a/uniquebible/latest_changes.txt +++ b/uniquebible/latest_changes.txt @@ -1,5 +1,9 @@ PIP package: +0.2.4 + +* added support of using X AI Grok model + 0.2.1-0.2.3 * added support of xonsh auto-completions diff --git a/uniquebible/plugins/menu/Bible Chat.py b/uniquebible/plugins/menu/Bible Chat.py index 160ca616fc..734da9d20e 100644 --- a/uniquebible/plugins/menu/Bible Chat.py +++ b/uniquebible/plugins/menu/Bible Chat.py @@ -64,6 +64,8 @@ def __init__(self, parent=None): self.apiKeyEdit = QLineEdit(config.googleaiApi_key) elif config.llm_backend == "mistral": self.apiKeyEdit = QLineEdit(str(config.mistralApi_key)) + elif config.llm_backend == "grok": + self.apiKeyEdit = QLineEdit(str(config.grokApi_key)) elif config.llm_backend == "groq": self.apiKeyEdit = QLineEdit(str(config.groqApi_key)) self.apiKeyEdit.setEchoMode(QLineEdit.Password) @@ -90,6 +92,12 @@ def __init__(self, parent=None): if key == config.mistralApi_chat_model: initialIndex = index index += 1 + elif config.llm_backend == "grok": + for key in ("grok-beta",): + self.apiModelBox.addItem(key) + if key == config.grokApi_chat_model: + initialIndex = index + index += 1 elif config.llm_backend == "groq": for key in ("gemma2-9b-it", "gemma-7b-it", "llama-3.1-70b-versatile", "llama-3.1-8b-instant", "llama-3.2-1b-preview", "llama-3.2-3b-preview", "llama-3.2-11b-vision-preview", "llama-3.2-90b-vision-preview", "llama3-70b-8192", "llama3-8b-8192", "mixtral-8x7b-32768"): self.apiModelBox.addItem(key) @@ -121,6 +129,8 @@ def __init__(self, parent=None): self.maxTokenEdit = QLineEdit(str(config.googleaiApi_chat_model_max_tokens)) elif config.llm_backend == "mistral": self.maxTokenEdit = QLineEdit(str(config.mistralApi_chat_model_max_tokens)) + elif config.llm_backend == "grok": + self.maxTokenEdit = QLineEdit(str(config.grokApi_chat_model_max_tokens)) elif config.llm_backend == "groq": self.maxTokenEdit = QLineEdit(str(config.groqApi_chat_model_max_tokens)) self.maxTokenEdit.setToolTip("The maximum number of tokens to generate in the completion.\nThe token count of your prompt plus max_tokens cannot exceed the model's context length. Most models have a context length of 2048 tokens (except for the newest models, which support 4096).") @@ -476,13 +486,15 @@ def setupUI(self): self.backends.setCurrentIndex(0) elif config.llm_backend == "google": self.backends.setCurrentIndex(1) - elif config.llm_backend == "groq": + elif config.llm_backend == "grok": self.backends.setCurrentIndex(2) - elif config.llm_backend == "mistral": + elif config.llm_backend == "groq": self.backends.setCurrentIndex(3) + elif config.llm_backend == "mistral": + self.backends.setCurrentIndex(4) else: config.llm_backend == "groq" - self.backends.setCurrentIndex(2) + self.backends.setCurrentIndex(3) self.fontSize = QComboBox() self.fontSize.addItems([str(i) for i in range(1, 51)]) self.fontSize.setCurrentIndex((config.chatGPTFontSize - 1)) @@ -492,6 +504,8 @@ def setupUI(self): self.temperature.setCurrentIndex(int(config.openaiApi_llmTemperature * 10)) elif config.llm_backend == "google": self.temperature.setCurrentIndex(int(config.googleaiApi_llmTemperature * 10)) + elif config.llm_backend == "grok": + self.temperature.setCurrentIndex(int(config.grokApi_llmTemperature * 10)) elif config.llm_backend == "groq": self.temperature.setCurrentIndex(int(config.groqApi_llmTemperature * 10)) elif config.llm_backend == "mistral": @@ -700,6 +714,8 @@ def showApiDialog(self): config.openaiApi_key = dialog.api_key() elif config.llm_backend == "google": config.googleaiApi_key = dialog.api_key() + elif config.llm_backend == "grok": + config.grokApi_key = dialog.api_key() elif config.llm_backend == "mistral": config.mistralApi_key = dialog.api_key() try: @@ -727,6 +743,10 @@ def showApiDialog(self): config.googleaiApi_chat_model_max_tokens = int(dialog.max_token()) if config.googleaiApi_chat_model_max_tokens < 20: config.googleaiApi_chat_model_max_tokens = 20 + elif config.llm_backend == "grok": + config.grokApi_chat_model_max_tokens = int(dialog.max_token()) + if config.grokApi_chat_model_max_tokens < 20: + config.grokApi_chat_model_max_tokens = 20 elif config.llm_backend == "mistral": config.mistralApi_chat_model_max_tokens = int(dialog.max_token()) if config.mistralApi_chat_model_max_tokens < 20: @@ -753,6 +773,8 @@ def showApiDialog(self): config.openaiApi_chat_model = dialog.apiModel() elif config.llm_backend == "google": config.googleaiApi_chat_model = dialog.apiModel() + elif config.llm_backend == "grok": + config.grokApi_chat_model = dialog.apiModel() elif config.llm_backend == "mistral": config.mistralApi_chat_model = dialog.apiModel() elif config.llm_backend == "groq": @@ -768,7 +790,7 @@ def showApiDialog(self): self.parent.reloadMenubar() config.mainWindow.runBibleChatPlugins() #config.chatGPTApiPredefinedContext = dialog.predefinedContext() - config.chatGPTApiContextInAllInputs = dialog.contextInAllInputs() + #config.chatGPTApiContextInAllInputs = dialog.contextInAllInputs() config.chatGPTApiContext = dialog.context() #config.chatGPTApiAudioLanguage = dialog.language() self.newData() @@ -782,13 +804,17 @@ def updateBackend(self, index): elif index == 1: config.llm_backend = "google" elif index == 2: - config.llm_backend = "groq" + config.llm_backend = "grok" elif index == 3: + config.llm_backend = "groq" + elif index == 4: config.llm_backend = "mistral" def updateTemperature(self, index): if config.llm_backend == "mistral": config.mistralApi_llmTemperature = float(index / 10) + elif config.llm_backend == "grok": + config.grokApi_llmTemperature = float(index / 10) elif config.llm_backend == "groq": config.groqApi_llmTemperature = float(index / 10) elif config.llm_backend == "openai": @@ -973,6 +999,7 @@ def newData(self): 1) Register and get an API key in one of the following websites: OpenAI - https://platform.openai.com/account/api-keys Google - https://ai.google.dev/ + Grok - https://docs.x.ai/docs Groq - https://console.groq.com/keys Mistral - https://console.mistral.ai/api-keys/ 2) Select a backend below diff --git a/uniquebible/startup/nonGui.py b/uniquebible/startup/nonGui.py index 16bc066de6..9fc05583ef 100755 --- a/uniquebible/startup/nonGui.py +++ b/uniquebible/startup/nonGui.py @@ -227,6 +227,7 @@ def run_terminal_mode(): # api-client mode def run_api_client_mode(): + cwd = os.getcwd() def getApiOutput(command: str): private = f"private={config.web_api_private}&" if config.web_api_private else "" @@ -373,9 +374,11 @@ def changeSettings(): #import traceback #print(traceback.format_exc()) print(f"Failed to connect '{config.web_api_endpoint}' at the moment!") + os.chdir(cwd) # stream mode def run_stream_mode(): + cwd = os.getcwd() # standard input stdin_text = sys.stdin.read() if not sys.stdin.isatty() else "" @@ -393,6 +396,7 @@ def run_stream_mode(): # run terminal mode if no command is given config.runMode = "terminal" run_terminal_mode() + os.chdir(cwd) # ssh-server # read setup guide at https://github.com/eliranwong/UniqueBible/wiki/Run-SSH-Server diff --git a/uniquebible/util/ConfigUtil.py b/uniquebible/util/ConfigUtil.py index 80c77aea37..8a0d1a7fc6 100644 --- a/uniquebible/util/ConfigUtil.py +++ b/uniquebible/util/ConfigUtil.py @@ -205,6 +205,10 @@ def updateModules(module, isInstalled): # config.mistralApi_llmTemperature # config.mistralApi_chat_model # config.mistralApi_chat_model_max_tokens + # config.grokApi_key + # config.grokApi_llmTemperature + # config.grokApi_chat_model + # config.grokApi_chat_model_max_tokens # config.openaiApi_key # config.openaiApi_llmTemperature # config.openaiApi_chat_model @@ -282,6 +286,19 @@ def updateModules(module, isInstalled): setConfig("groqApi_llmTemperature", """ # Groq Chat Temperature""", 0.3) # 0.2-0.8 is good to use + setConfig("grokApi_key", """ + # Grok X AI API Keys""", + "") + setConfig("grokApi_chat_model", """ + # Grok X AI Chat Model""", + "grok-beta") + setConfig("grokApi_chat_model_max_tokens", """ + # Grok X AI Chat Maximum Output Tokens""", + 127999) # maximum 127999, greater than this value causes errors + setConfig("grokApi_llmTemperature", """ + # Grok X AI Chat Temperature""", + 0.3) + # mistral setConfig("mistralApi_key", """ # Mistral AI API Keys""", "") diff --git a/uniquebible/xonsh/README.md b/xonsh/README.md similarity index 100% rename from uniquebible/xonsh/README.md rename to xonsh/README.md diff --git a/uniquebible/xonsh/completer.py b/xonsh/completer.py similarity index 100% rename from uniquebible/xonsh/completer.py rename to xonsh/completer.py