Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Added voting logic #9

Merged
merged 14 commits into from
Oct 19, 2023
5 changes: 3 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -25,15 +25,16 @@ MQ:
port: <MQ Port>
server: <MQ Hostname or IP>
users:
mq-chatgpt-api:
neon_llm_chat_gpt:
password: <neon_chatgpt user's password>
user: neon_chatgpt
LLM_CHATGPT:
LLM_CHAT_GPT:
key: ""
model: "gpt-3.5-turbo"
role: "You are trying to give a short answer in less than 40 words."
context_depth: 3
max_tokens: 100
num_parallel_processes: 2
```

For example, if your configuration resides in `~/.config`:
Expand Down
5 changes: 3 additions & 2 deletions docker_overlay/etc/neon/diana.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,9 @@ MQ:
mq_handler:
user: neon_api_utils
password: Klatchat2021
LLM_CHATGPT:
LLM_CHAT_GPT:
model: "gpt-3.5-turbo"
role: "You are trying to give a short answer in less than 40 words."
context_depth: 3
max_tokens: 100
max_tokens: 100
num_parallel_processes: 2
27 changes: 21 additions & 6 deletions neon_llm_chatgpt/chatgpt.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,21 +40,33 @@ class ChatGPT(NeonLLM):

def __init__(self, config):
super().__init__(config)
self.model = config["model"]
self.model_name = config["model"]
self.role = config["role"]
self.context_depth = config["context_depth"]
self.max_tokens = config["max_tokens"]
self.api_key = config["key"]
self.num_parallel_processes = config["num_parallel_processes"]
self.warmup()

@property
def tokenizer(self) -> None:
return self._tokenizer

@property
def tokenizer_model_name(self) -> str:
return ""

NeonDaniel marked this conversation as resolved.
Show resolved Hide resolved
@property
def model(self) -> openai:
if self._model is None:
openai.api_key = self.api_key
self._model = openai
return self._model

@property
def llm_model_name(self) -> str:
return self.model_name

@property
def _system_prompt(self) -> str:
return self.role
Expand All @@ -72,20 +84,20 @@ def get_sorted_answer_indexes(self, question: str, answers: List[str]) -> List[i
"""
if not answers:
return []
scores = self._score(question=question, answers=answers)
scores = self._score(prompt=question, targets=answers)
NeonDaniel marked this conversation as resolved.
Show resolved Hide resolved
sorted_items = sorted(zip(range(len(answers)), scores), key=lambda x: x[1])
sorted_items_indexes = [x[0] for x in sorted_items]
return sorted_items_indexes

def _call_model(self, prompt: List[Dict[str]]) -> str:
def _call_model(self, prompt: List[Dict[str, str]]) -> str:
"""
Wrapper for ChatGPT Model generation logic
:param prompt: Input messages sequence
:returns: Output text sequence generated by model
"""

response = openai.ChatCompletion.create(
model=self.model,
model=self.llm_model_name,
messages=prompt,
temperature=0,
max_tokens=self.max_tokens,
Expand All @@ -94,7 +106,7 @@ def _call_model(self, prompt: List[Dict[str]]) -> str:

return text

def _assemble_prompt(self, message: str, chat_history: List[List[str]]) -> List[Dict[str]]:
def _assemble_prompt(self, message: str, chat_history: List[List[str]]) -> List[Dict[str, str]]:
"""
Assembles prompt engineering logic
Setup Guidance:
Expand Down Expand Up @@ -126,7 +138,10 @@ def _score(self, prompt: str, targets: List[str]) -> List[float]:
scores_list = distances_from_embeddings(question_embeddings, answers_embeddings)
return scores_list

def _embeddings(self, question: str, answers: List[str]) -> (List[str], List[List[float]]):
def _tokenize(self, prompt: str) -> None:
return None
NeonDaniel marked this conversation as resolved.
Show resolved Hide resolved

def _embeddings(self, question: str, answers: List[str]) -> (List[float], List[List[float]]):
"""
Computes embeddings for the list of provided answers
:param question: Question for LLM to response to
Expand Down
2 changes: 1 addition & 1 deletion neon_llm_chatgpt/rmq.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ def __init__(self):

@property
def name(self):
return "chatgpt"
return "chat_gpt"

@property
def model(self):
Expand Down