Skip to content
This repository has been archived by the owner on Dec 9, 2024. It is now read-only.

Commit

Permalink
feat: Adapt finbot Makefile. Fix finbot context bug.
Browse files Browse the repository at this point in the history
  • Loading branch information
iusztinpaul committed Oct 9, 2023
1 parent 9c9762e commit 0a5d0b9
Show file tree
Hide file tree
Showing 9 changed files with 47 additions and 62 deletions.
9 changes: 9 additions & 0 deletions .vscode/launch.json
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
"version": "0.2.0",
"configurations": [

{
"name": "Python: Current File",
"type": "python",
Expand Down Expand Up @@ -118,5 +119,13 @@
"tools.run_batch:build_flow(latest_n_days=2, debug=True)"
]
},
{
"name": "Financial Bot [Dev]",
"type": "python",
"request": "launch",
"module": "tools.run_chain",
"justMyCode": false,
"cwd": "${workspaceFolder}/modules/financial_bot",
},
]
}
13 changes: 8 additions & 5 deletions modules/financial_bot/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -4,26 +4,29 @@ install:
@echo "Installing financial bot..."

poetry env use $(shell which python3.10) && \
export PYTHON_KEYRING_BACKEND=keyring.backends.null.Keyring; poetry install
PYTHON_KEYRING_BACKEND=keyring.backends.null.Keyring poetry install && \
poetry run pip install torch==2.0.1

install_dev: install
export PYTHON_KEYRING_BACKEND=keyring.backends.null.Keyring; poetry install --only dev
PYTHON_KEYRING_BACKEND=keyring.backends.null.Keyring poetry install --only dev

install_only_dev:
export PYTHON_KEYRING_BACKEND=keyring.backends.null.Keyring; poetry install --only dev
PYTHON_KEYRING_BACKEND=keyring.backends.null.Keyring poetry install --only dev

add:
export PYTHON_KEYRING_BACKEND=keyring.backends.null.Keyring; poetry add $(package)
PYTHON_KEYRING_BACKEND=keyring.backends.null.Keyring poetry add $(package)

add_dev:
export PYTHON_KEYRING_BACKEND=keyring.backends.null.Keyring; poetry add --group dev $(package)
PYTHON_KEYRING_BACKEND=keyring.backends.null.Keyring poetry add --group dev $(package)


# === Run ===

run:
@echo "Running financial_bot..."

@echo "LD_LIBRARY_PATH: $(LD_LIBRARY_PATH)"

poetry run python -m tools.run_chain


Expand Down
2 changes: 1 addition & 1 deletion modules/financial_bot/financial_bot/embeddings.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ def __init__(

self._tokenizer = AutoTokenizer.from_pretrained(model_id)
self._model = AutoModel.from_pretrained(model_id).to(self._device)
self._movel.eval()
self._model.eval()

@property
def max_input_length(self) -> int:
Expand Down
17 changes: 11 additions & 6 deletions modules/financial_bot/financial_bot/langchain_bot.py
Original file line number Diff line number Diff line change
@@ -1,21 +1,26 @@
import logging

from langchain import chains

from financial_bot import constants
from financial_bot.chains import ContextExtractorChain, FinancialBotQAChain
from financial_bot.embeddings import EmbeddingModelSingleton
from financial_bot.models import build_huggingface_pipeline
from financial_bot.qdrant import build_qdrant_client
from financial_bot.template import get_llm_template
from langchain import chains

logger = logging.getLogger(__name__)


class FinancialBot:
def __init__(self):
def __init__(
self,
llm_model_id: str = constants.LLM_MODEL_ID,
llm_lora_model_id: str = constants.LLM_QLORA_CHECKPOINT,
):
self._qdrant_client = build_qdrant_client()
self._embd_model = EmbeddingModelSingleton()
self._llm_agent = build_huggingface_pipeline()
self._llm_agent = build_huggingface_pipeline(llm_model_id=llm_model_id, llm_lora_model_id=llm_lora_model_id)
self.finbot_chain = self.build_chain()

def build_chain(self) -> chains.SequentialChain:
Expand Down Expand Up @@ -63,7 +68,7 @@ def build_chain(self) -> chains.SequentialChain:
logger.info("Connecting chains into SequentialChain")
seq_chain = chains.SequentialChain(
chains=[context_retrieval_chain, llm_generator_chain],
input_variables=["about_me", "question"],
input_variables=["about_me", "question", "context"],
output_variables=["response"],
verbose=True,
)
Expand All @@ -76,7 +81,7 @@ def build_chain(self) -> chains.SequentialChain:
)
return seq_chain

def answer(self, about_me: str, question: str) -> str:
def answer(self, about_me: str, question: str, context: str) -> str:
"""
Given a short description about the user and a question make the LLM
generate a response.
Expand All @@ -94,7 +99,7 @@ def answer(self, about_me: str, question: str) -> str:
LLM generated response.
"""
try:
inputs = {"about_me": about_me, "question": question}
inputs = {"about_me": about_me, "question": question, "context": context}
response = self.finbot_chain.run(inputs)
return response
except KeyError as e:
Expand Down
15 changes: 13 additions & 2 deletions modules/financial_bot/financial_bot/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,9 +45,20 @@ def download_from_model_registry(model_id: str, cache_dir: Optional[Path] = None
return model_dir


def build_huggingface_pipeline():
def build_huggingface_pipeline(
llm_model_id: str,
llm_lora_model_id: str,
gradient_checkpointing: bool = False,
cache_dir: Optional[Path] = None,
):
"""Using our custom LLM + Finetuned checkpoint we create a HF pipeline"""
model, tokenizer, _ = build_qlora_model()

model, tokenizer, _ = build_qlora_model(
pretrained_model_name_or_path=llm_model_id,
peft_pretrained_model_name_or_path=llm_lora_model_id,
gradient_checkpointing=gradient_checkpointing,
cache_dir=cache_dir
)
model.eval()

pipe = pipeline(
Expand Down
Loading

0 comments on commit 0a5d0b9

Please sign in to comment.