Skip to content

Commit

Permalink
added genai chatbot
Browse files Browse the repository at this point in the history
  • Loading branch information
anurag6569201 committed Jul 27, 2024
1 parent f1a3e76 commit 3a78f81
Show file tree
Hide file tree
Showing 8 changed files with 490 additions and 0 deletions.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Empty file.
Binary file added chatbot/LLM_Model/db/chroma.sqlite3
Binary file not shown.
Binary file not shown.
402 changes: 402 additions & 0 deletions chatbot/LLM_Model/test.ipynb

Large diffs are not rendered by default.

88 changes: 88 additions & 0 deletions chatbot/LLM_Model/test.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,88 @@
from langchain_community.document_loaders import PyPDFDirectoryLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_google_genai import GoogleGenerativeAIEmbeddings
from langchain_google_genai import ChatGoogleGenerativeAI

from langchain.vectorstores import Chroma


import os
from dotenv import load_dotenv

load_dotenv()
google_gemini_api=os.getenv("GOOGLE_API_KEY")



pdf_loader=PyPDFDirectoryLoader("pdfs")
data=pdf_loader.load()
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=400,
chunk_overlap=100
)
text_chunks=text_splitter.split_documents(data)


persist_directory="db"
embedding = GoogleGenerativeAIEmbeddings(model="models/embedding-001")
vectordb=Chroma.from_documents(
documents=text_chunks,
embedding=embedding,
persist_directory=persist_directory
)
vectordb.persist()
vectordb=None
vectordb=Chroma(persist_directory=persist_directory,embedding_function=embedding)


retriver=vectordb.as_retriever()

llm_model=ChatGoogleGenerativeAI(model="gemini-1.5-pro",google_api_key=google_gemini_api)

system_prompt = (
"You have an expertise on Muncipal corporation and you are well aware about the Muncipal Corporation Indore and You have all the information regarding the Indore"
"You also have some additional data from the dataset of the IMC indore"
"Provide the answer consisely"
"Provide the answer Under 150 words"
"Context: {context}"
)

from langchain.chains import create_retrieval_chain
from langchain.chains.combine_documents import create_stuff_documents_chain
from langchain_core.prompts import ChatPromptTemplate
prompt = ChatPromptTemplate.from_messages(
[
("system", system_prompt),
("human", "{input}"),
]
)
question_answer_chain = create_stuff_documents_chain(llm_model, prompt)
chain = create_retrieval_chain(retriver, question_answer_chain)

# Basic memory class
class Memory:
def __init__(self):
self.history = []

def add(self, entry):
self.history.append(entry)
if len(self.history) > 10: # Limit the history size
self.history.pop(0)

def get_context(self):
return " ".join(self.history)

memory = Memory()

def get_response(query):
try:
context = memory.get_context()
response = chain.invoke({"input": query, "context": context})
answer = response.get('answer', '')
memory.add(f"User: {query}\nAI: {answer}")
return answer
except Exception as e:
return "Sorry, I couldn't process your request."

response=get_response("i found pithole near my home indore")
print(response)

0 comments on commit 3a78f81

Please sign in to comment.