Skip to content

Commit

Permalink
upgraded lollms
Browse files Browse the repository at this point in the history
  • Loading branch information
ParisNeo committed Sep 14, 2024
1 parent fe56254 commit 0450007
Show file tree
Hide file tree
Showing 7 changed files with 192 additions and 8 deletions.
42 changes: 42 additions & 0 deletions environment.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
name: lollms_env
channels:
- defaults
- conda-forge # Adds a wider selection of packages, especially for less common ones
dependencies:
- python=3.11
- numpy=1.26.*
- pandas
- pillow>=9.5.0
- pyyaml
- requests
- rich
- scipy
- tqdm
- setuptools
- wheel
- psutil
- pytest
- gitpython
- beautifulsoup4
- packaging
- fastapi
- uvicorn
- pydantic
- selenium
- aiofiles
- pip # Conda will manage pip installation
- pip:
- colorama
- ascii-colors>=0.4.2
- python-multipart
- python-socketio
- python-socketio[client]
- python-socketio[asyncio_client]
- tiktoken
- pipmaster>=0.1.7
- lollmsvectordb>=1.1.0
- freedom-search>=0.1.9
- scrapemaster>=0.2.0
- lollms_client>=0.7.5
- zipfile36
- freedom_search
2 changes: 1 addition & 1 deletion lollms/server/endpoints/lollms_tts.py
Original file line number Diff line number Diff line change
Expand Up @@ -244,7 +244,7 @@ async def upload_voice_file(file: UploadFile = File(...)):

# Save the file to disk or process it further
contents = await file.read()
safe_filename = f"voice_{file_path.name}"
safe_filename = f"{file_path.name}"
safe_file_path = lollmsElfServer.lollms_paths.custom_voices_path/safe_filename
with safe_file_path.open("wb") as f:
f.write(contents)
Expand Down
52 changes: 52 additions & 0 deletions lollms/server/endpoints/lollms_whisper.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
"""
project: lollms_webui
file: lollms_xtts.py
author: ParisNeo
description:
This module contains a set of FastAPI routes that provide information about the Lord of Large Language and Multimodal Systems (LoLLMs) Web UI
application. These routes allow users to
"""
from fastapi import APIRouter, Request, UploadFile, File, HTTPException
from fastapi.responses import PlainTextResponse
from lollms_webui import LOLLMSWebUI
from pydantic import BaseModel
from starlette.responses import StreamingResponse
from lollms.types import MSG_OPERATION_TYPE
from lollms.main_config import BaseConfig
from lollms.utilities import find_next_available_filename, output_file_path_to_url, detect_antiprompt, remove_text_from_string, trace_exception, find_first_available_file_index, add_period, PackageManager
from lollms.security import sanitize_path, validate_path, check_access
from pathlib import Path
from ascii_colors import ASCIIColors
import os
import platform

# ----------------------- Defining router and main class ------------------------------

router = APIRouter()
lollmsElfServer:LOLLMSWebUI = LOLLMSWebUI.get_instance()

class Identification(BaseModel):
client_id: str

# ----------------------- voice ------------------------------
@router.post("/install_whisper")
def install_whisper(data: Identification):
check_access(lollmsElfServer, data.client_id)
try:
if lollmsElfServer.config.headless_server_mode:
return {"status":False,"error":"Service installation is blocked when in headless mode for obvious security reasons!"}

if lollmsElfServer.config.host!="localhost" and lollmsElfServer.config.host!="127.0.0.1":
return {"status":False,"error":"Service installation is blocked when the server is exposed outside for very obvious reasons!"}

lollmsElfServer.ShowBlockingMessage("Installing whisper library\nPlease stand by")
from lollms.services.stt.whisper.lollms_whisper import install_whisper
install_whisper(lollmsElfServer)
ASCIIColors.success("Done")
lollmsElfServer.HideBlockingMessage()
return {"status":True}
except Exception as ex:
lollmsElfServer.HideBlockingMessage()
lollmsElfServer.InfoMessage(f"It looks like I could not install whisper because of this error:\n{ex}")
return {"status":False, 'error':str(ex)}
52 changes: 52 additions & 0 deletions lollms/server/endpoints/lollms_xtts.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
"""
project: lollms_webui
file: lollms_xtts.py
author: ParisNeo
description:
This module contains a set of FastAPI routes that provide information about the Lord of Large Language and Multimodal Systems (LoLLMs) Web UI
application. These routes allow users to
"""
from fastapi import APIRouter, Request, UploadFile, File, HTTPException
from fastapi.responses import PlainTextResponse
from lollms_webui import LOLLMSWebUI
from pydantic import BaseModel
from starlette.responses import StreamingResponse
from lollms.types import MSG_OPERATION_TYPE
from lollms.main_config import BaseConfig
from lollms.utilities import find_next_available_filename, output_file_path_to_url, detect_antiprompt, remove_text_from_string, trace_exception, find_first_available_file_index, add_period, PackageManager
from lollms.security import sanitize_path, validate_path, check_access
from pathlib import Path
from ascii_colors import ASCIIColors
import os
import platform

# ----------------------- Defining router and main class ------------------------------

router = APIRouter()
lollmsElfServer:LOLLMSWebUI = LOLLMSWebUI.get_instance()

class Identification(BaseModel):
client_id: str

# ----------------------- voice ------------------------------
@router.post("/install_xtts")
def install_xtts(data: Identification):
check_access(lollmsElfServer, data.client_id)
try:
if lollmsElfServer.config.headless_server_mode:
return {"status":False,"error":"Service installation is blocked when in headless mode for obvious security reasons!"}

if lollmsElfServer.config.host!="localhost" and lollmsElfServer.config.host!="127.0.0.1":
return {"status":False,"error":"Service installation is blocked when the server is exposed outside for very obvious reasons!"}

lollmsElfServer.ShowBlockingMessage("Installing XTTS library\nPlease stand by")
from lollms.services.tts.xtts.lollms_xtts import xtts_install
xtts_install(lollmsElfServer)
ASCIIColors.success("Done")
lollmsElfServer.HideBlockingMessage()
return {"status":True}
except Exception as ex:
lollmsElfServer.HideBlockingMessage()
lollmsElfServer.InfoMessage(f"It looks like I could not install XTT because of this error:\n{ex}")
return {"status":False, 'error':str(ex)}
27 changes: 20 additions & 7 deletions lollms/services/stt/whisper/lollms_whisper.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,17 +17,22 @@
from ascii_colors import ASCIIColors, trace_exception
from lollms.paths import LollmsPaths
import subprocess

import pipmaster as pm
try:
if not PackageManager.check_package_installed("whisper"):
PackageManager.install_package("openai-whisper")
if not pm.is_installed("openai-whisper"):
pm.install("openai-whisper")
try:
install_conda_package("conda-forge::ffmpeg")
except Exception as ex:
trace_exception(ex)
ASCIIColors.red("Couldn't install ffmpeg")
except:
PackageManager.install_package("git+https://github.com/openai/whisper.git")
try:
install_conda_package("conda-forge::ffmpeg")
except Exception as ex:
trace_exception(ex)
ASCIIColors.red("Couldn't install ffmpeg")
pm.install("git+https://github.com/openai/whisper.git")


import whisper
Expand All @@ -41,12 +46,20 @@ def __init__(
output_path=None
):
super().__init__("whisper",app, model, output_path)
self.whisper = whisper.load_model(model)
try:
self.whisper = whisper.load_model(model)
except:
ASCIIColors.red("Couldn't load whisper model!\nWhisper will be disabled")
self.whisper = None
self.ready = True

def transcribe(
self,
wave_path: str|Path
)->str:
result = self.whisper.transcribe(str(wave_path))
return result["text"]
if self.whisper:
result = self.whisper.transcribe(str(wave_path))
return result["text"]
else:
ASCIIColors.error("Whisper is broken")
return ""
4 changes: 4 additions & 0 deletions lollms/services/tts/xtts/lollms_xtts.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@
import time
from queue import Queue
import re
import pipmaster as pm

# List of common sampling rates
common_sampling_rates = [8000, 11025, 16000, 22050, 32000, 44100, 48000, 96000, 192000]
Expand All @@ -42,6 +43,9 @@
def closest_sampling_rate(freq, common_rates):
return min(common_rates, key=lambda x: abs(x - freq))

def xtts_install():
pm.install_or_update("tts", force_reinstall=True)

class LollmsXTTS(LollmsTTS):
def __init__(self, app: LollmsApplication, voices_folders: List[str|Path], freq = 22050):
super().__init__("lollms_xtts", app)
Expand Down
21 changes: 21 additions & 0 deletions tests/endoints_unit_tests/components_test/whisper_test.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
# Title LollmsWhisper
# Licence: MIT
# Author : Paris Neo
#

from pathlib import Path
import whisper


if __name__ == "__main__":
# Create a mock LollmsApplication instance
w = whisper.load_model("small")
# Example usage
audio_file_path = Path(r"E:\lollms\custom_voices\ParisNeo_Original_voice.wav")

if audio_file_path.exists():
transcription = w.transcribe(str(audio_file_path))
print("Transcription:")
print(transcription)
else:
print(f"Audio file not found: {audio_file_path}")

0 comments on commit 0450007

Please sign in to comment.