Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: Add support for gpt-4o model #2589

Merged
merged 1 commit into from
May 13, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
634 changes: 309 additions & 325 deletions Pipfile.lock

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion backend/modules/assistant/ito/difference.py
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ async def process_assistant(self):
document_1_to_langchain = document_1_llama_parsed[0].to_langchain_format()
document_2_to_langchain = document_2_llama_parsed[0].to_langchain_format()

llm = ChatLiteLLM(model="gpt-4-turbo-2024-04-09")
llm = ChatLiteLLM(model="gpt-4o")

human_prompt = """Given the following two documents, find the difference between them:

Expand Down
4 changes: 1 addition & 3 deletions backend/modules/brain/integrations/GPT4/Brain.py
Original file line number Diff line number Diff line change
Expand Up @@ -166,9 +166,7 @@ def create_graph(self):
return app

def get_chain(self):
self.function_model = ChatOpenAI(
model="gpt-4-turbo", temperature=0, streaming=True
)
self.function_model = ChatOpenAI(model="gpt-4o", temperature=0, streaming=True)

self.function_model = self.function_model.bind_tools(self.tools)

Expand Down
1 change: 1 addition & 0 deletions backend/modules/brain/qa_interface.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@ def generate_stream(

def model_compatible_with_function_calling(self, model: str):
if model in [
"gpt-4o",
"gpt-4-turbo",
"gpt-4-turbo-2024-04-09",
"gpt-4-turbo-preview",
Expand Down
3 changes: 2 additions & 1 deletion backend/modules/brain/rags/quivr_rag.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,14 +4,14 @@
from uuid import UUID

from langchain.chains import ConversationalRetrievalChain
from langchain_community.embeddings import OllamaEmbeddings
from langchain.llms.base import BaseLLM
from langchain.prompts import HumanMessagePromptTemplate, SystemMessagePromptTemplate
from langchain.retrievers import ContextualCompressionRetriever
from langchain.retrievers.document_compressors import FlashrankRerank
from langchain.schema import format_document
from langchain_cohere import CohereRerank
from langchain_community.chat_models import ChatLiteLLM
from langchain_community.embeddings import OllamaEmbeddings
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate, PromptTemplate
from langchain_core.pydantic_v1 import BaseModel as BaseModelV1
Expand Down Expand Up @@ -136,6 +136,7 @@ def prompt_to_use(self):

def model_compatible_with_function_calling(self):
if self.model in [
"gpt-4o",
"gpt-4-turbo",
"gpt-4-turbo-2024-04-09",
"gpt-4-turbo-preview",
Expand Down
1 change: 1 addition & 0 deletions backend/modules/chat/controller/chat/brainful_chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@
"gpt-4-0125-preview",
"gpt-3.5-turbo",
"gpt-4-turbo",
"gpt-4o",
]


Expand Down
34 changes: 17 additions & 17 deletions backend/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,8 @@ backoff==2.2.1; python_version >= '3.7' and python_version < '4.0'
beautifulsoup4==4.12.3; python_full_version >= '3.6.0'
billiard==4.2.0; python_version >= '3.7'
black==24.4.2; python_version >= '3.8'
boto3==1.34.101; python_version >= '3.8'
botocore==1.34.101; python_version >= '3.8'
boto3==1.34.104; python_version >= '3.8'
botocore==1.34.104; python_version >= '3.8'
cachetools==5.3.3; python_version >= '3.7'
celery[redis,sqs]==5.4.0; python_version >= '3.8'
certifi==2024.2.2; python_version >= '3.6'
Expand All @@ -29,15 +29,15 @@ click==8.1.7; python_version >= '3.7'
click-didyoumean==0.3.1; python_full_version >= '3.6.2'
click-plugins==1.1.1
click-repl==0.3.0; python_version >= '3.6'
cohere==5.3.5; python_version >= '3.8' and python_version < '4.0'
cohere==5.4.0; python_version >= '3.8' and python_version < '4.0'
coloredlogs==15.0.1; python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'
colorlog==6.8.2; python_version >= '3.6'
contourpy==1.2.1; python_version >= '3.9'
cryptography==42.0.7; python_version >= '3.7'
cssselect==1.2.0; python_version >= '3.7'
curl-cffi==0.7.0b4; python_version >= '3.8'
cycler==0.12.1; python_version >= '3.8'
dataclasses-json==0.6.5; python_version >= '3.7' and python_version < '4.0'
dataclasses-json==0.6.6; python_version >= '3.7' and python_version < '4.0'
datasets==2.19.1; python_full_version >= '3.8.0'
debugpy==1.8.1; python_version >= '3.8'
decorator==5.1.1; python_version >= '3.5'
Expand Down Expand Up @@ -109,35 +109,35 @@ jsonpath-python==1.0.6; python_version >= '3.6'
jsonpointer==2.4; python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4, 3.5, 3.6'
kiwisolver==1.4.5; python_version >= '3.7'
kombu[sqs]==5.3.7; python_version >= '3.8'
langchain==0.1.19; python_version < '4.0' and python_full_version >= '3.8.1'
langchain==0.1.20; python_version < '4.0' and python_full_version >= '3.8.1'
langchain-cohere==0.1.4; python_version < '4.0' and python_full_version >= '3.8.1'
langchain-community==0.0.38; python_version < '4.0' and python_full_version >= '3.8.1'
langchain-core==0.1.52; python_version < '4.0' and python_full_version >= '3.8.1'
langchain-openai==0.1.6; python_version < '4.0' and python_full_version >= '3.8.1'
langchain-text-splitters==0.0.1; python_version < '4.0' and python_full_version >= '3.8.1'
langdetect==1.0.9
langfuse==2.29.3; python_version < '4.0' and python_full_version >= '3.8.1'
langfuse==2.30.0; python_version < '4.0' and python_full_version >= '3.8.1'
langgraph==0.0.48; python_version < '4.0' and python_full_version >= '3.9.0'
langsmith==0.1.56; python_version < '4.0' and python_full_version >= '3.8.1'
langsmith==0.1.57; python_version < '4.0' and python_full_version >= '3.8.1'
layoutparser[layoutmodels,tesseract]==0.3.4; python_version >= '3.6'
litellm==1.36.4; python_version not in '2.7, 3.0, 3.1, 3.2, 3.3, 3.4, 3.5, 3.6, 3.7' and python_version >= '3.8'
litellm==1.37.5; python_version not in '2.7, 3.0, 3.1, 3.2, 3.3, 3.4, 3.5, 3.6, 3.7' and python_version >= '3.8'
llama-cpp-python==0.2.67; python_version >= '3.8'
llama-index==0.10.35; python_version < '4.0' and python_full_version >= '3.8.1'
llama-index==0.10.36; python_version < '4.0' and python_full_version >= '3.8.1'
llama-index-agent-openai==0.2.4; python_version < '4.0' and python_full_version >= '3.8.1'
llama-index-cli==0.1.12; python_version < '4.0' and python_full_version >= '3.8.1'
llama-index-core==0.10.35.post1; python_version < '4.0' and python_full_version >= '3.8.1'
llama-index-core==0.10.36; python_version < '4.0' and python_full_version >= '3.8.1'
llama-index-embeddings-openai==0.1.9; python_version < '4.0' and python_full_version >= '3.8.1'
llama-index-indices-managed-llama-cloud==0.1.6; python_version < '4.0' and python_full_version >= '3.8.1'
llama-index-legacy==0.9.48; python_version < '4.0' and python_full_version >= '3.8.1'
llama-index-llms-openai==0.1.18; python_version < '4.0' and python_full_version >= '3.8.1'
llama-index-multi-modal-llms-openai==0.1.5; python_version < '4.0' and python_full_version >= '3.8.1'
llama-index-llms-openai==0.1.19; python_version < '4.0' and python_full_version >= '3.8.1'
llama-index-multi-modal-llms-openai==0.1.6; python_version < '4.0' and python_full_version >= '3.8.1'
llama-index-program-openai==0.1.6; python_version < '4.0' and python_full_version >= '3.8.1'
llama-index-question-gen-openai==0.1.3; python_version < '4.0' and python_full_version >= '3.8.1'
llama-index-readers-file==0.1.22; python_version < '4.0' and python_full_version >= '3.8.1'
llama-index-readers-llama-parse==0.1.4; python_version < '4.0' and python_full_version >= '3.8.1'
llama-parse==0.4.2; python_version < '4.0' and python_full_version >= '3.8.1'
llamaindex-py-client==0.1.19; python_version >= '3.8' and python_version < '4'
lxml[html_clean]==5.2.1; python_version >= '3.6'
lxml[html_clean]==5.2.2; python_version >= '3.6'
lxml-html-clean==0.1.1
markdown==3.6
markdown-it-py==3.0.0; python_version >= '3.8'
Expand All @@ -162,7 +162,7 @@ olefile==0.47; python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2,
omegaconf==2.3.0; python_version >= '3.6'
onnx==1.16.0
onnxruntime==1.17.3
openai==1.27.0; python_full_version >= '3.7.1'
openai==1.29.0; python_full_version >= '3.7.1'
opencv-python==4.9.0.80; python_version >= '3.6'
openpyxl==3.1.2
ordered-set==4.1.0; python_version >= '3.7'
Expand Down Expand Up @@ -209,7 +209,7 @@ pyinstrument==4.6.2; python_version >= '3.7'
pypandoc==1.13; python_version >= '3.6'
pyparsing==3.1.2; python_full_version >= '3.6.8'
pypdf==4.2.0; python_version >= '3.6'
pypdfium2==4.29.0; python_version >= '3.6'
pypdfium2==4.30.0; python_version >= '3.6'
pyright==1.1.362; python_version >= '3.7'
pysbd==0.3.4; python_version >= '3'
pytesseract==0.3.10; python_version >= '3.7'
Expand All @@ -232,7 +232,7 @@ ragas==0.1.7
rapidfuzz==3.9.0; python_version >= '3.8'
realtime==1.0.4; python_version >= '3.8' and python_version < '4.0'
redis==5.0.4; python_version >= '3.7'
regex==2024.4.28; python_version >= '3.8'
regex==2024.5.10; python_version >= '3.8'
requests==2.31.0; python_version >= '3.7'
requests-file==2.0.0
resend==1.0.1; python_version >= '3.7'
Expand Down Expand Up @@ -261,7 +261,7 @@ sympy==1.12; python_version >= '3.8'
tabulate==0.9.0; python_version >= '3.7'
tavily-python==0.3.3; python_version >= '3.6'
tenacity==8.3.0; python_version >= '3.8'
tiktoken==0.6.0; python_version >= '3.8'
tiktoken==0.7.0; python_version >= '3.8'
timm==0.9.16; python_version >= '3.8'
tinysegmenter==0.3
tldextract==5.1.2; python_version >= '3.8'
Expand Down
2 changes: 1 addition & 1 deletion backend/tests/ragas_evaluation/run_evaluation.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ def main(
score = evaluate(
response_dataset,
metrics=ragas_metrics,
llm=ChatOpenAI(model="gpt-3.5-turbo-0125", temperature=0.1),
llm=ChatOpenAI(model="gpt-4o", temperature=0.1),
embeddings=LangchainEmbeddingsWrapper(
OpenAIEmbeddings(model="text-embedding-3-large", dimensions=1536)
),
Expand Down
4 changes: 3 additions & 1 deletion frontend/lib/helpers/defineMaxTokens.ts
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,9 @@ export const defineMaxTokens = (
return 2000;
case "mistral/mistral-large-latest":
return 2000;
case "gpt-4o":
return 2000;
default:
return 1000;
return 2000;
}
};
1 change: 1 addition & 0 deletions frontend/lib/types/BrainConfig.ts
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@ export type BrainConfig = {

export const openAiFreeModels = [
"gpt-3.5-turbo",
"gpt-4o",
"gpt-3.5-turbo-1106",
"gpt-3.5-turbo-16k",
"gpt-4-0125-preview",
Expand Down