From d43a1ff318311fe3cf701e1e853b664029de576e Mon Sep 17 00:00:00 2001 From: ashish Date: Tue, 2 Apr 2024 17:08:25 +0530 Subject: [PATCH 1/4] Support for coinfig.ini for persistent settings --- configs/config.ini | 4 ++++ core.py | 16 +++++++++++++++- webui.py | 12 +++++++++++- 3 files changed, 30 insertions(+), 2 deletions(-) create mode 100644 configs/config.ini diff --git a/configs/config.ini b/configs/config.ini new file mode 100644 index 0000000..33171d7 --- /dev/null +++ b/configs/config.ini @@ -0,0 +1,4 @@ +[Settings] +execution_provider = cpu +repo_id = stabilityai/stable-code-instruct-3b + diff --git a/core.py b/core.py index 43ff4f8..f02e912 100644 --- a/core.py +++ b/core.py @@ -1,6 +1,8 @@ import os, shutil +from configparser import ConfigParser default_repo_id = "stabilityai/stable-code-instruct-3b" +config_path = "configs/config.ini" default_repo_id_parts = default_repo_id.split("/") default_model_folder = f"models--{'--'.join(default_repo_id_parts)}" @@ -22,4 +24,16 @@ def remove_dir(path): shutil.rmtree(model_path) print("successfully removed cached models!") except OSError as e: - print(f"Error: {e.strerror}") \ No newline at end of file + print(f"Error: {e.strerror}") + +def read_config(): + config = ConfigParser() + config.read(config_path) + if config.get('Settings', 'repo_id') == "" and config.get('Settings', 'execution_provider') == "": + return None, config + else: + return config, config + +def update_config(config): + with open(config_path, 'w') as configfile: + config.write(configfile) \ No newline at end of file diff --git a/webui.py b/webui.py index d44816f..68148ab 100644 --- a/webui.py +++ b/webui.py @@ -7,13 +7,23 @@ from langchain.llms.base import LLM from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer, AutoConfig -from core import list_download_models, format_model_name, remove_dir, default_repo_id +from core import list_download_models, remove_dir, default_repo_id, read_config ,update_config cache_dir = os.path.join(os.getcwd(), "models") saved_models_list = list_download_models(cache_dir) #check if cuda is available device = 'cuda' if torch.cuda.is_available() else 'cpu' +state, config = read_config() +if state == None: + config.set('Settings', 'execution_provider', device) + config.set('Settings', 'repo_id', default_repo_id) + + update_config(config) +else: + default_repo_id = config.get('Settings', 'repo_id') + device = config.get('Settings', 'execution_provider') + def initialize_model_and_tokenizer(model_name): config = AutoConfig.from_pretrained(model_name, cache_dir=cache_dir) From 419d6be635bf1daf2058743923506de6e0abb1d4 Mon Sep 17 00:00:00 2001 From: Subhanshu0027 Date: Wed, 3 Apr 2024 11:11:47 +0530 Subject: [PATCH 2/4] feat: Implement dynamic config.ini updates and saves model information and config --- core.py | 9 ++++++++- webui.py | 6 ++++++ 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/core.py b/core.py index f02e912..3df9dab 100644 --- a/core.py +++ b/core.py @@ -33,7 +33,14 @@ def read_config(): return None, config else: return config, config + # return config -def update_config(config): +# def update_config(config): +# with open(config_path, 'w') as configfile: +# config.write(configfile) + +def update_config(config, **kwargs): + for key, value in kwargs.items(): + config.set('Settings', key, value) with open(config_path, 'w') as configfile: config.write(configfile) \ No newline at end of file diff --git a/webui.py b/webui.py index 68148ab..2df01e5 100644 --- a/webui.py +++ b/webui.py @@ -15,6 +15,7 @@ #check if cuda is available device = 'cuda' if torch.cuda.is_available() else 'cpu' state, config = read_config() +print(state,config) if state == None: config.set('Settings', 'execution_provider', device) config.set('Settings', 'repo_id', default_repo_id) @@ -127,12 +128,16 @@ def removeModelCache(): def updateExecutionProvider(provider): if provider == "cuda": if torch.cuda.is_available(): + device = "cuda" + update_config(config, execution_provider=device) model.cuda() print("Model loaded in cuda", model) else: raise gr.Error("Torch not compiled with CUDA enabled. Please make sure cuda is installed.") else: + device = "cpu" + update_config(config, execution_provider=device) model.cpu() def loadModel(repo_id): @@ -140,6 +145,7 @@ def loadModel(repo_id): if repo_id: model, tokenizer = initialize_model_and_tokenizer(repo_id) llm_chain, llm = init_chain(model, tokenizer) + update_config(config, repo_id=repo_id) return gr.update(value=repo_id) else: raise gr.Error("Repo can not be empty!") From aec728f070b36b5c6f8ca22a23f1886377593084 Mon Sep 17 00:00:00 2001 From: Subhanshu0027 Date: Wed, 3 Apr 2024 11:30:20 +0530 Subject: [PATCH 3/4] feat: Implement dynamic config.ini updates and saves model information and config --- .gitignore | 2 +- configs/config.ini | 4 ++-- core.py | 5 ----- webui.py | 2 +- 4 files changed, 4 insertions(+), 9 deletions(-) diff --git a/.gitignore b/.gitignore index 2a1d31d..4b314de 100644 --- a/.gitignore +++ b/.gitignore @@ -2,7 +2,7 @@ models/models--* models/.locks models/tmp* - +configs/config.ini #compiled files *.pyc diff --git a/configs/config.ini b/configs/config.ini index 33171d7..759f277 100644 --- a/configs/config.ini +++ b/configs/config.ini @@ -1,4 +1,4 @@ [Settings] -execution_provider = cpu -repo_id = stabilityai/stable-code-instruct-3b +execution_provider = +repo_id = diff --git a/core.py b/core.py index 3df9dab..37cd89c 100644 --- a/core.py +++ b/core.py @@ -33,11 +33,6 @@ def read_config(): return None, config else: return config, config - # return config - -# def update_config(config): -# with open(config_path, 'w') as configfile: -# config.write(configfile) def update_config(config, **kwargs): for key, value in kwargs.items(): diff --git a/webui.py b/webui.py index 2df01e5..f5abd5f 100644 --- a/webui.py +++ b/webui.py @@ -7,7 +7,7 @@ from langchain.llms.base import LLM from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer, AutoConfig -from core import list_download_models, remove_dir, default_repo_id, read_config ,update_config +from core import list_download_models, remove_dir, default_repo_id, read_config, update_config cache_dir = os.path.join(os.getcwd(), "models") saved_models_list = list_download_models(cache_dir) From bb70ee43c0b5297d62a37a86be7c905b8e68e593 Mon Sep 17 00:00:00 2001 From: Subhanshu0027 Date: Wed, 3 Apr 2024 12:04:53 +0530 Subject: [PATCH 4/4] Remove duplicates and logging --- webui.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/webui.py b/webui.py index f5abd5f..588a584 100644 --- a/webui.py +++ b/webui.py @@ -15,7 +15,6 @@ #check if cuda is available device = 'cuda' if torch.cuda.is_available() else 'cpu' state, config = read_config() -print(state,config) if state == None: config.set('Settings', 'execution_provider', device) config.set('Settings', 'repo_id', default_repo_id) @@ -129,7 +128,6 @@ def updateExecutionProvider(provider): if provider == "cuda": if torch.cuda.is_available(): device = "cuda" - update_config(config, execution_provider=device) model.cuda() print("Model loaded in cuda", model) else: @@ -137,9 +135,10 @@ def updateExecutionProvider(provider): else: device = "cpu" - update_config(config, execution_provider=device) model.cpu() + update_config(config, execution_provider=provider) + def loadModel(repo_id): global llm_chain, llm if repo_id: