diff --git a/casalioy/CustomChains.py b/casalioy/CustomChains.py
new file mode 100644
index 0000000..f2b6384
--- /dev/null
+++ b/casalioy/CustomChains.py
@@ -0,0 +1,160 @@
+"""Custom chains for LLM"""
+
+from langchain import PromptTemplate
+from langchain.base_language import BaseLanguageModel
+from langchain.chains.qa_generation.prompt import PROMPT_SELECTOR
+from langchain.schema import Document
+from langchain.vectorstores.base import VectorStoreRetriever
+
+from casalioy.load_env import (
+ model_n_ctx,
+ n_forward_documents,
+ n_retrieve_documents,
+)
+from casalioy.utils import print_HTML
+
+
+class BaseQA:
+ """base class for Question-Answering"""
+
+ def __init__(self, llm: BaseLanguageModel, retriever: VectorStoreRetriever, prompt: PromptTemplate = None):
+ self.llm = llm
+ self.retriever = retriever
+ self.prompt = prompt or self.default_prompt
+ self.retriever.search_kwargs = {**self.retriever.search_kwargs, "k": n_forward_documents, "fetch_k": n_retrieve_documents}
+
+ @property
+ def default_prompt(self) -> PromptTemplate:
+ """the default prompt"""
+ return PROMPT_SELECTOR.get_prompt(self.llm)
+
+ def fetch_documents(self, search: str) -> list[Document]:
+ """fetch documents from retriever"""
+ return self.retriever.get_relevant_documents(search)
+
+ def __call__(self, input_str: str) -> dict:
+ """ask a question, return results"""
+ return {"result": self.llm.predict(self.default_prompt.format_prompt(question=input_str).to_string())}
+
+
+class StuffQA(BaseQA):
+ """custom QA close to a stuff chain
+ compared to the default stuff chain which may exceed the context size, this chain loads as many documents as allowed by the context size.
+ Since it uses all the context size, it's meant for a "one-shot" question, not leaving space for a follow-up question which exactly contains the previous one.
+ """
+
+ @property
+ def default_prompt(self) -> PromptTemplate:
+ """the default prompt"""
+ prompt = """HUMAN:
+Answer the question using ONLY the given extracts from (possibly unrelated and irrelevant) documents, not your own knowledge.
+If you are unsure of the answer or if it isn't provided in the extracts, answer "Unknown[STOP]".
+Conclude your answer with "[STOP]" when you're finished.
+
+Question: {question}
+
+--------------
+Here are the extracts:
+{context}
+
+--------------
+Remark: do not repeat the question !
+
+ASSISTANT:
+"""
+ return PromptTemplate(template=prompt, input_variables=["context", "question"])
+
+ @staticmethod
+ def context_prompt_str(documents: list[Document]) -> str:
+ """the document's prompt"""
+ prompt = "".join(f"Extract {i + 1}: {document.page_content}\n\n" for i, document in enumerate(documents))
+ return prompt.strip()
+
+ def __call__(self, input_str: str) -> dict:
+ all_documents, documents = self.fetch_documents(input_str), []
+ for document in all_documents:
+ documents.append(document)
+ context_str = self.context_prompt_str(documents)
+ if (
+ self.llm.get_num_tokens(self.prompt.format_prompt(question=input_str, context=context_str).to_string())
+ > model_n_ctx - self.llm.dict()["max_tokens"]
+ ):
+ documents.pop()
+ break
+ print_HTML("Stuffed {n} documents in the context", n=len(documents))
+ context_str = self.context_prompt_str(documents)
+ formatted_prompt = self.prompt.format_prompt(question=input_str, context=context_str).to_string()
+ return {"result": self.llm.predict(formatted_prompt), "source_documents": documents}
+
+
+class RefineQA(BaseQA):
+ """custom QA close to a refine chain"""
+
+ @property
+ def default_prompt(self) -> PromptTemplate:
+ """the default prompt"""
+ prompt = f"""HUMAN:
+Answer the question using ONLY the given extracts from a (possibly irrelevant) document, not your own knowledge.
+If you are unsure of the answer or if it isn't provided in the extract, answer "Unknown[STOP]".
+Conclude your answer with "[STOP]" when you're finished.
+Avoid adding any extraneous information.
+
+Question:
+-----------------
+{{question}}
+
+Extract:
+-----------------
+{{context}}
+
+ASSISTANT:
+"""
+ return PromptTemplate(template=prompt, input_variables=["context", "question"])
+
+ @property
+ def refine_prompt(self) -> PromptTemplate:
+ """prompt to use for the refining step"""
+ prompt = f"""HUMAN:
+Refine the original answer to the question using the new (possibly irrelevant) document extract.
+Use ONLY the information from the extract and the previous answer, not your own knowledge.
+The extract may not be relevant at all to the question.
+Conclude your answer with "[STOP]" when you're finished.
+Avoid adding any extraneous information.
+
+Question:
+-----------------
+{{question}}
+
+Original answer:
+-----------------
+{{previous_answer}}
+
+New extract:
+-----------------
+{{context}}
+
+Reminder:
+-----------------
+If the extract is not relevant or helpful, don't even talk about it. Simply copy the original answer, without adding anything.
+Do not copy the question.
+
+ASSISTANT:
+"""
+ return PromptTemplate(template=prompt, input_variables=["context", "question", "previous_answer"])
+
+ def __call__(self, input_str: str) -> dict:
+ """ask a question"""
+ documents = self.fetch_documents(input_str)
+ last_answer, score = None, None
+ for i, doc in enumerate(documents):
+ print_HTML("Refining from document {i}/{N}", i=i + 1, N=len(documents))
+ prompt = self.default_prompt if i == 0 else self.refine_prompt
+ if i == 0:
+ formatted_prompt = prompt.format_prompt(question=input_str, context=doc.page_content)
+ else:
+ formatted_prompt = prompt.format_prompt(question=input_str, context=doc.page_content, previous_answer=last_answer)
+ last_answer = self.llm.predict(formatted_prompt.to_string())
+ return {
+ "result": f"{last_answer}",
+ "source_documents": documents,
+ }
diff --git a/casalioy/dev_debug_formatting.py b/casalioy/dev_debug_formatting.py
new file mode 100644
index 0000000..15e1ef8
--- /dev/null
+++ b/casalioy/dev_debug_formatting.py
@@ -0,0 +1,18 @@
+"""dev utility to debug formatting problems arising in print_HTML"""
+from prompt_toolkit import HTML
+
+from casalioy.utils import print_HTML
+
+## Add to print_HTML
+# with open("temp.txt", "w", encoding="utf-8") as f:
+# f.write(text.format(**kwargs))
+
+with open("temp.txt", "r", encoding="utf-8") as f:
+ s = f.read()
+
+escape_one = lambda v: v.replace("\f", " ").replace("\b", "\\")
+s = escape_one(s)
+
+print(s)
+print(HTML(s))
+print_HTML(s)
diff --git a/casalioy/load_env.py b/casalioy/load_env.py
index 09abac4..90f0a2f 100644
--- a/casalioy/load_env.py
+++ b/casalioy/load_env.py
@@ -11,7 +11,6 @@
load_dotenv()
text_embeddings_model = os.environ.get("TEXT_EMBEDDINGS_MODEL")
text_embeddings_model_type = os.environ.get("TEXT_EMBEDDINGS_MODEL_TYPE")
-model_n_ctx = int(os.environ.get("MODEL_N_CTX"))
use_mlock = os.environ.get("USE_MLOCK").lower() == "true"
# ingest
@@ -23,6 +22,8 @@
# generate
model_type = os.environ.get("MODEL_TYPE")
model_path = os.environ.get("MODEL_PATH")
+model_n_ctx = int(os.environ.get("MODEL_N_CTX"))
+model_max_tokens = int(os.environ.get("MODEL_MAX_TOKENS"))
model_temp = float(os.environ.get("MODEL_TEMP", "0.8"))
model_stop = os.environ.get("MODEL_STOP", "")
model_stop = model_stop.split(",") if model_stop else []
diff --git a/casalioy/startLLM.py b/casalioy/startLLM.py
index daf439e..378abe6 100644
--- a/casalioy/startLLM.py
+++ b/casalioy/startLLM.py
@@ -9,10 +9,12 @@
from prompt_toolkit.auto_suggest import AutoSuggestFromHistory
from prompt_toolkit.formatted_text.html import html_escape
+from casalioy.CustomChains import RefineQA, StuffQA
from casalioy.load_env import (
chain_type,
get_embedding_model,
get_prompt_template_kwargs,
+ model_max_tokens,
model_n_ctx,
model_path,
model_stop,
@@ -36,7 +38,7 @@ def __init__(
db_path: str,
model_path: str,
n_ctx: int,
- temperature: float,
+ model_temp: float,
stop: list[str],
use_mlock: bool,
n_gpu_layers: int,
@@ -55,18 +57,19 @@ def __init__(
llm = LlamaCpp(
model_path=model_path,
n_ctx=n_ctx,
- temperature=temperature,
+ temperature=model_temp,
stop=stop,
callbacks=callbacks,
verbose=True,
n_threads=6,
n_batch=1000,
use_mlock=use_mlock,
+ n_gpu_layers=n_gpu_layers,
+ max_tokens=model_max_tokens,
)
- # Need this hack because this param isn't yet supported by the python lib
- state = llm.client.__getstate__()
- state["n_gpu_layers"] = n_gpu_layers
- llm.client.__setstate__(state)
+ # Fix wrong default
+ object.__setattr__(llm, "get_num_tokens", lambda text: len(llm.client.tokenize(b" " + text.encode("utf-8"))))
+
case "GPT4All":
from langchain.llms import GPT4All
@@ -80,13 +83,20 @@ def __init__(
case _:
raise ValueError("Only LlamaCpp or GPT4All supported right now. Make sure you set up your .env correctly.")
- self.qa = RetrievalQA.from_chain_type(
- llm=llm,
- chain_type=chain_type,
- retriever=self.qdrant_langchain.as_retriever(search_type="mmr"),
- return_source_documents=True,
- chain_type_kwargs=get_prompt_template_kwargs(),
- )
+ self.llm = llm
+ retriever = self.qdrant_langchain.as_retriever(search_type="mmr")
+ if chain_type == "betterstuff":
+ self.qa = StuffQA(retriever=retriever, llm=self.llm)
+ elif chain_type == "betterrefine":
+ self.qa = RefineQA(retriever=retriever, llm=self.llm)
+ else:
+ self.qa = RetrievalQA.from_chain_type(
+ llm=self.llm,
+ chain_type=chain_type,
+ retriever=retriever,
+ return_source_documents=True,
+ chain_type_kwargs=get_prompt_template_kwargs(),
+ )
self.qa.retriever.search_kwargs = {**self.qa.retriever.search_kwargs, "k": n_forward_documents, "fetch_k": n_retrieve_documents}
def prompt_once(self, query: str) -> tuple[str, str]:
diff --git a/casalioy/utils.py b/casalioy/utils.py
index 77c1b3a..7160262 100644
--- a/casalioy/utils.py
+++ b/casalioy/utils.py
@@ -22,26 +22,31 @@
)
+def escape_for_html(text, **kwargs) -> str:
+ """escape unicode stuff. kwargs are changed in-place."""
+ escape_one = lambda v: v.replace("\f", " ").replace("\b", "\\")
+ for k, v in kwargs.items():
+ kwargs[k] = escape_one(str(v))
+ text = escape_one(text)
+ return text
+
+
def print_HTML(text: str, **kwargs) -> None:
"""print formatted HTML text"""
try:
- for k, v in kwargs.items(): # necessary
- kwargs[k] = str(v).replace("\f", "")
- text = text.replace("\f", "")
+ text = escape_for_html(text, **kwargs)
print_formatted_text(HTML(text).format(**kwargs), style=style)
except ExpatError:
- print(text)
+ print(text.format(**kwargs))
def prompt_HTML(session: PromptSession, prompt: str, **kwargs) -> str:
"""print formatted HTML text"""
try:
- for k, v in kwargs.items(): # necessary
- kwargs[k] = str(v).replace("\f", "")
- prompt = prompt.replace("\f", "")
+ prompt = escape_for_html(prompt, **kwargs)
return session.prompt(HTML(prompt).format(**kwargs), style=style)
except ExpatError:
- return input(prompt)
+ return input(prompt.format(**kwargs))
def download_if_repo(path: str, file: str = None, allow_patterns: str | list[str] = None) -> str:
diff --git a/example.env b/example.env
index 9aa6465..3505346 100644
--- a/example.env
+++ b/example.env
@@ -1,5 +1,4 @@
# Generic
-MODEL_N_CTX=1024
TEXT_EMBEDDINGS_MODEL=sentence-transformers/all-MiniLM-L6-v2
TEXT_EMBEDDINGS_MODEL_TYPE=HF # LlamaCpp or HF
USE_MLOCK=true
@@ -14,8 +13,10 @@ INGEST_CHUNK_OVERLAP=50
MODEL_TYPE=LlamaCpp # GPT4All or LlamaCpp
MODEL_PATH=eachadea/ggml-vicuna-7b-1.1/ggml-vic7b-q5_1.bin
MODEL_TEMP=0.8
+MODEL_N_CTX=1024 # Max total size of prompt+answer
+MODEL_MAX_TOKENS=256 # Max size of answer
MODEL_STOP=[STOP]
-CHAIN_TYPE=stuff
+CHAIN_TYPE=betterstuff
N_RETRIEVE_DOCUMENTS=100 # How many documents to retrieve from the db
-N_FORWARD_DOCUMENTS=6 # How many documents to forward to the LLM, chosen among those retrieved
+N_FORWARD_DOCUMENTS=100 # How many documents to forward to the LLM, chosen among those retrieved
N_GPU_LAYERS=4
diff --git a/poetry.lock b/poetry.lock
index 7fb4812..808fce2 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -888,14 +888,14 @@ mime = ["python-magic (>=0.4.27,<0.5)"]
[[package]]
name = "faker"
-version = "18.7.0"
+version = "18.9.0"
description = "Faker is a Python package that generates fake data for you."
category = "dev"
optional = false
python-versions = ">=3.7"
files = [
- {file = "Faker-18.7.0-py3-none-any.whl", hash = "sha256:38dbc3b80e655d7301e190426ab30f04b6b7f6ca4764c5dd02772ffde0fa6dcd"},
- {file = "Faker-18.7.0.tar.gz", hash = "sha256:f02c6d3fdb5bc781f80b440cf2bdec336ed47ecfb8d620b20c3d4188ed051831"},
+ {file = "Faker-18.9.0-py3-none-any.whl", hash = "sha256:defe9ed618a67ebf0f3eb1895e198c2355a7128a09087a6dce342ef2253263ea"},
+ {file = "Faker-18.9.0.tar.gz", hash = "sha256:80a5ea1464556c06b98bf47ea3adc7f33811a1182518d847860b1874080bd3c9"},
]
[package.dependencies]
@@ -1649,14 +1649,14 @@ files = [
[[package]]
name = "langchain"
-version = "0.0.168"
+version = "0.0.171"
description = "Building applications with LLMs through composability"
category = "dev"
optional = false
python-versions = ">=3.8.1,<4.0"
files = [
- {file = "langchain-0.0.168-py3-none-any.whl", hash = "sha256:122590f9ccb343f19eefc5f7a548c96fab10d15fcf66fc33c0d9937b157f1c6b"},
- {file = "langchain-0.0.168.tar.gz", hash = "sha256:ed1a38a5d0bff9f06250a928be25ca929567e36d409df8ca9f7a7a33a7b10790"},
+ {file = "langchain-0.0.171-py3-none-any.whl", hash = "sha256:ac014d1912bdbadf608120b29981e4177f293bcdf50e0987f682c1f34f3d3b3e"},
+ {file = "langchain-0.0.171.tar.gz", hash = "sha256:d32dba400c35a71221bb7e903175ee5ea4e9decf4354cedd070adf95fb1e4d16"},
]
[package.dependencies]
@@ -1673,11 +1673,11 @@ SQLAlchemy = ">=1.4,<3"
tenacity = ">=8.1.0,<9.0.0"
[package.extras]
-all = ["O365 (>=2.0.26,<3.0.0)", "aleph-alpha-client (>=2.15.0,<3.0.0)", "anthropic (>=0.2.6,<0.3.0)", "arxiv (>=1.4,<2.0)", "atlassian-python-api (>=3.36.0,<4.0.0)", "azure-cosmos (>=4.4.0b1,<5.0.0)", "azure-identity (>=1.12.0,<2.0.0)", "beautifulsoup4 (>=4,<5)", "clickhouse-connect (>=0.5.14,<0.6.0)", "cohere (>=3,<4)", "deeplake (>=3.3.0,<4.0.0)", "docarray (>=0.31.0,<0.32.0)", "duckduckgo-search (>=2.8.6,<3.0.0)", "elasticsearch (>=8,<9)", "faiss-cpu (>=1,<2)", "google-api-python-client (==2.70.0)", "google-search-results (>=2,<3)", "gptcache (>=0.1.7)", "hnswlib (>=0.7.0,<0.8.0)", "html2text (>=2020.1.16,<2021.0.0)", "huggingface_hub (>=0,<1)", "jina (>=3.14,<4.0)", "jinja2 (>=3,<4)", "jq (>=1.4.1,<2.0.0)", "lancedb (>=0.1,<0.2)", "lark (>=1.1.5,<2.0.0)", "manifest-ml (>=0.0.1,<0.0.2)", "networkx (>=2.6.3,<3.0.0)", "nlpcloud (>=1,<2)", "nltk (>=3,<4)", "nomic (>=1.0.43,<2.0.0)", "openai (>=0,<1)", "opensearch-py (>=2.0.0,<3.0.0)", "pdfminer-six (>=20221105,<20221106)", "pexpect (>=4.8.0,<5.0.0)", "pgvector (>=0.1.6,<0.2.0)", "pinecone-client (>=2,<3)", "pinecone-text (>=0.4.2,<0.5.0)", "protobuf (==3.19)", "psycopg2-binary (>=2.9.5,<3.0.0)", "pyowm (>=3.3.0,<4.0.0)", "pypdf (>=3.4.0,<4.0.0)", "pytesseract (>=0.3.10,<0.4.0)", "pyvespa (>=0.33.0,<0.34.0)", "qdrant-client (>=1.1.2,<2.0.0)", "redis (>=4,<5)", "sentence-transformers (>=2,<3)", "spacy (>=3,<4)", "steamship (>=2.16.9,<3.0.0)", "tensorflow-text (>=2.11.0,<3.0.0)", "tiktoken (>=0.3.2,<0.4.0)", "torch (>=1,<3)", "transformers (>=4,<5)", "weaviate-client (>=3,<4)", "wikipedia (>=1,<2)", "wolframalpha (==5.0.0)"]
+all = ["O365 (>=2.0.26,<3.0.0)", "aleph-alpha-client (>=2.15.0,<3.0.0)", "anthropic (>=0.2.6,<0.3.0)", "arxiv (>=1.4,<2.0)", "atlassian-python-api (>=3.36.0,<4.0.0)", "azure-cosmos (>=4.4.0b1,<5.0.0)", "azure-identity (>=1.12.0,<2.0.0)", "beautifulsoup4 (>=4,<5)", "clickhouse-connect (>=0.5.14,<0.6.0)", "cohere (>=3,<4)", "deeplake (>=3.3.0,<4.0.0)", "docarray (>=0.31.0,<0.32.0)", "duckduckgo-search (>=2.8.6,<3.0.0)", "elasticsearch (>=8,<9)", "faiss-cpu (>=1,<2)", "google-api-python-client (==2.70.0)", "google-search-results (>=2,<3)", "gptcache (>=0.1.7)", "gql (>=3.4.1,<4.0.0)", "hnswlib (>=0.7.0,<0.8.0)", "html2text (>=2020.1.16,<2021.0.0)", "huggingface_hub (>=0,<1)", "jina (>=3.14,<4.0)", "jinja2 (>=3,<4)", "jq (>=1.4.1,<2.0.0)", "lancedb (>=0.1,<0.2)", "lark (>=1.1.5,<2.0.0)", "manifest-ml (>=0.0.1,<0.0.2)", "networkx (>=2.6.3,<3.0.0)", "nlpcloud (>=1,<2)", "nltk (>=3,<4)", "nomic (>=1.0.43,<2.0.0)", "openai (>=0,<1)", "opensearch-py (>=2.0.0,<3.0.0)", "pdfminer-six (>=20221105,<20221106)", "pexpect (>=4.8.0,<5.0.0)", "pgvector (>=0.1.6,<0.2.0)", "pinecone-client (>=2,<3)", "pinecone-text (>=0.4.2,<0.5.0)", "protobuf (==3.19)", "psycopg2-binary (>=2.9.5,<3.0.0)", "pyowm (>=3.3.0,<4.0.0)", "pypdf (>=3.4.0,<4.0.0)", "pytesseract (>=0.3.10,<0.4.0)", "pyvespa (>=0.33.0,<0.34.0)", "qdrant-client (>=1.1.2,<2.0.0)", "redis (>=4,<5)", "sentence-transformers (>=2,<3)", "spacy (>=3,<4)", "steamship (>=2.16.9,<3.0.0)", "tensorflow-text (>=2.11.0,<3.0.0)", "tiktoken (>=0.3.2,<0.4.0)", "torch (>=1,<3)", "transformers (>=4,<5)", "weaviate-client (>=3,<4)", "wikipedia (>=1,<2)", "wolframalpha (==5.0.0)"]
azure = ["azure-core (>=1.26.4,<2.0.0)", "azure-cosmos (>=4.4.0b1,<5.0.0)", "azure-identity (>=1.12.0,<2.0.0)", "openai (>=0,<1)"]
cohere = ["cohere (>=3,<4)"]
embeddings = ["sentence-transformers (>=2,<3)"]
-extended-testing = ["pdfminer-six (>=20221105,<20221106)", "pypdf (>=3.4.0,<4.0.0)", "tqdm (>=4.48.0)"]
+extended-testing = ["jq (>=1.4.1,<2.0.0)", "lxml (>=4.9.2,<5.0.0)", "pdfminer-six (>=20221105,<20221106)", "pymupdf (>=1.22.3,<2.0.0)", "pypdf (>=3.4.0,<4.0.0)", "pypdfium2 (>=4.10.0,<5.0.0)", "tqdm (>=4.48.0)"]
hnswlib = ["docarray (>=0.31.0,<0.32.0)", "hnswlib (>=0.7.0,<0.8.0)", "protobuf (==3.19)"]
in-memory-store = ["docarray (>=0.31.0,<0.32.0)"]
llms = ["anthropic (>=0.2.6,<0.3.0)", "cohere (>=3,<4)", "huggingface_hub (>=0,<1)", "manifest-ml (>=0.0.1,<0.0.2)", "nlpcloud (>=1,<2)", "openai (>=0,<1)", "torch (>=1,<3)", "transformers (>=4,<5)"]
@@ -4062,14 +4062,14 @@ files = [
[[package]]
name = "tldextract"
-version = "3.4.1"
+version = "3.4.2"
description = "Accurately separates a URL's subdomain, domain, and public suffix, using the Public Suffix List (PSL). By default, this includes the public ICANN TLDs and their exceptions. You can optionally support the Public Suffix List's private domains as well."
category = "main"
optional = false
python-versions = ">=3.7"
files = [
- {file = "tldextract-3.4.1-py3-none-any.whl", hash = "sha256:26f646987b01ae2946e7491cce4aaf54129f3489a196a274e6c843ec72968313"},
- {file = "tldextract-3.4.1.tar.gz", hash = "sha256:fa9e50c4a03bede2a1d95dca620d661678484626858ccf388cf9671a0dd497a4"},
+ {file = "tldextract-3.4.2-py3-none-any.whl", hash = "sha256:1f168f0a1c000a757aefed94a5bda10349c58976cb2ef1cc17c6e3b213440521"},
+ {file = "tldextract-3.4.2.tar.gz", hash = "sha256:98e36b0aa3a6d8fd084d80d75ae1372da02027efb556c146a59dfd14457071ba"},
]
[package.dependencies]
@@ -4283,14 +4283,14 @@ telegram = ["requests"]
[[package]]
name = "transformers"
-version = "4.29.1"
+version = "4.29.2"
description = "State-of-the-art Machine Learning for JAX, PyTorch and TensorFlow"
category = "dev"
optional = false
python-versions = ">=3.7.0"
files = [
- {file = "transformers-4.29.1-py3-none-any.whl", hash = "sha256:75f851f2420c26410edbdf4a2a1a5b434ab2b96aea36eb5931d06cc3b2e7b509"},
- {file = "transformers-4.29.1.tar.gz", hash = "sha256:3dc9cd198918e140468edbf37d7edf3b7a75633655ce0771ce323bbf8c118c4d"},
+ {file = "transformers-4.29.2-py3-none-any.whl", hash = "sha256:0ef158b99bad6f4e6652a0d8655fbbe58b4cb788ce7040f320b5d29c7c810a75"},
+ {file = "transformers-4.29.2.tar.gz", hash = "sha256:ed9467661f459f1ce49461d83f18f3b36b6a37f306182dc2ba272935f3b93ebb"},
]
[package.dependencies]
@@ -4783,4 +4783,4 @@ testing = ["big-O", "flake8 (<5)", "jaraco.functools", "jaraco.itertools", "more
[metadata]
lock-version = "2.0"
python-versions = ">3.9.7,<3.12"
-content-hash = "1899d9bfd156bf9a9a5814c5e459caf0d718235e49bc78f70f3b033dca7b79ac"
+content-hash = "84be5f4e64ea20c3533e0b54bc7f43912c65572aafd269d6bf2b617309c5f6bf"
diff --git a/pyproject.toml b/pyproject.toml
index a2aaf16..7a585a7 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -20,7 +20,7 @@ streamlit-chat = "^0.0.2.2"
streamlit-extras = "^0.2.7"
[tool.poetry.group.LLM.dependencies]
-langchain = "^0.0.168"
+langchain = "^0.0.171"
pygpt4all = "^1.1.0"
qdrant-client = "^1.1.7"
unstructured = "^0.6.6" # Handle ingestion file formats