forked from pythaiml/automindx
-
Notifications
You must be signed in to change notification settings - Fork 0
/
UIUX11.py
103 lines (84 loc) · 3.66 KB
/
UIUX11.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
import os
import gradio as gr
import fire
from enum import Enum
from transformers import AutoTokenizer, AutoModelForCausalLM
from huggingface_hub import hf_hub_download
from MASTERMIND import MASTERMIND
from logic import LogicTables
from reasoning import SocraticReasoning
from prediction import Predictor
from epistemic import AutoepistemicAgent
from bdi import Belief, Desire, Intention, Goal, Reward
from memory import save_conversation_memory
from aglm import LlamaModel
class Model_Type(Enum):
gptq = 1
ggml = 2
gguf = 3
full_precision = 4
def get_model_type(model_name):
if "gptq" in model_name.lower():
return Model_Type.gptq
elif "ggml" in model_name.lower():
return Model_Type.ggml
elif "gguf" in model_name.lower():
return Model_Type.gguf
else:
return Model_Type.full_precision
def initialize_model(model_name, model_type):
models_folder = "./models"
create_folder_if_not_exists(models_folder)
try:
if model_type in [Model_Type.ggml, Model_Type.gguf]:
filename = "pytorch_model.bin" if model_type == Model_Type.ggml else "flax_model.msgpack"
file_path = hf_hub_download(repo_id=model_name, filename=filename, local_dir=models_folder)
model = Llama(file_path, n_ctx=4096)
tokenizer = AutoTokenizer.from_pretrained("gpt2") # Default tokenizer for GGML and GGUF
else:
model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto")
tokenizer = AutoTokenizer.from_pretrained(model_name)
except Exception as e:
print(f"Failed to load model {model_name} with type {model_type}: {e}")
raise
return model, tokenizer
def create_folder_if_not_exists(folder_path):
if not os.path.exists(folder_path):
os.makedirs(folder_path)
def run_ui(model, tokenizer, is_chat_model, model_type, save_history=True):
with gr.Blocks() as demo:
chatbot = gr.Chatbot()
msg = gr.Textbox()
clear = gr.Button("Clear")
conversation_memory = []
epistemic_agent = AutoepistemicAgent(initial_beliefs={'The sky is blue': True})
belief = Belief("The sky is blue") # Initialize Belief
def user(user_message, memory):
nonlocal conversation_memory
conversation_memory.append([user_message, None])
# Process belief and simulate adding conflicting information
belief.process_belief()
epistemic_agent.add_information({'The sky is blue': False})
epistemic_agent.revise_beliefs()
current_beliefs = f"Processed Belief: {belief}"
memory[-1][1] = current_beliefs
return "", memory
def bot(memory):
nonlocal conversation_memory
conversation_memory = memory
instruction = memory[-1][0]
response = model.generate_contextual_output(instruction)
memory[-1][1] = f"Response: {response}"
if save_history:
save_conversation_memory(conversation_memory)
return memory
msg.submit(user, inputs=[msg, chatbot], outputs=[msg, chatbot], queue=False).then(bot, inputs=[chatbot], outputs=[chatbot])
clear.click(lambda: None, inputs=None, outputs=chatbot, queue=False)
demo.launch(share=False, debug=True)
def main(model_name=None, file_name=None, save_history=True):
assert model_name, "model_name argument is missing."
model_type = get_model_type(model_name)
model, tokenizer = initialize_model(model_name, model_type)
run_ui(model, tokenizer, 'chat' in model_name.lower(), model_type, save_history=save_history)
if __name__ == '__main__':
fire.Fire(main)