Skip to content

Commit

Permalink
feat: Add support for gpt-4o model (#2589)
Browse files Browse the repository at this point in the history
This pull request adds support for the gpt-4o model to the existing
codebase. It includes changes to the BrainConfig, openAiFreeModels,
defineMaxTokens, model_compatible_with_function_calling, create_graph,
main, and process_assistant functions.
  • Loading branch information
StanGirard committed May 13, 2024
1 parent 3086891 commit cd927eb
Show file tree
Hide file tree
Showing 10 changed files with 337 additions and 349 deletions.
634 changes: 309 additions & 325 deletions Pipfile.lock

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion backend/modules/assistant/ito/difference.py
Expand Up @@ -101,7 +101,7 @@ async def process_assistant(self):
document_1_to_langchain = document_1_llama_parsed[0].to_langchain_format()
document_2_to_langchain = document_2_llama_parsed[0].to_langchain_format()

llm = ChatLiteLLM(model="gpt-4-turbo-2024-04-09")
llm = ChatLiteLLM(model="gpt-4o")

human_prompt = """Given the following two documents, find the difference between them:
Expand Down
4 changes: 1 addition & 3 deletions backend/modules/brain/integrations/GPT4/Brain.py
Expand Up @@ -166,9 +166,7 @@ def create_graph(self):
return app

def get_chain(self):
self.function_model = ChatOpenAI(
model="gpt-4-turbo", temperature=0, streaming=True
)
self.function_model = ChatOpenAI(model="gpt-4o", temperature=0, streaming=True)

self.function_model = self.function_model.bind_tools(self.tools)

Expand Down
1 change: 1 addition & 0 deletions backend/modules/brain/qa_interface.py
Expand Up @@ -42,6 +42,7 @@ def generate_stream(

def model_compatible_with_function_calling(self, model: str):
if model in [
"gpt-4o",
"gpt-4-turbo",
"gpt-4-turbo-2024-04-09",
"gpt-4-turbo-preview",
Expand Down
3 changes: 2 additions & 1 deletion backend/modules/brain/rags/quivr_rag.py
Expand Up @@ -4,14 +4,14 @@
from uuid import UUID

from langchain.chains import ConversationalRetrievalChain
from langchain_community.embeddings import OllamaEmbeddings
from langchain.llms.base import BaseLLM
from langchain.prompts import HumanMessagePromptTemplate, SystemMessagePromptTemplate
from langchain.retrievers import ContextualCompressionRetriever
from langchain.retrievers.document_compressors import FlashrankRerank
from langchain.schema import format_document
from langchain_cohere import CohereRerank
from langchain_community.chat_models import ChatLiteLLM
from langchain_community.embeddings import OllamaEmbeddings
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate, PromptTemplate
from langchain_core.pydantic_v1 import BaseModel as BaseModelV1
Expand Down Expand Up @@ -136,6 +136,7 @@ def prompt_to_use(self):

def model_compatible_with_function_calling(self):
if self.model in [
"gpt-4o",
"gpt-4-turbo",
"gpt-4-turbo-2024-04-09",
"gpt-4-turbo-preview",
Expand Down
1 change: 1 addition & 0 deletions backend/modules/chat/controller/chat/brainful_chat.py
Expand Up @@ -34,6 +34,7 @@
"gpt-4-0125-preview",
"gpt-3.5-turbo",
"gpt-4-turbo",
"gpt-4o",
]


Expand Down
34 changes: 17 additions & 17 deletions backend/requirements.txt
Expand Up @@ -17,8 +17,8 @@ backoff==2.2.1; python_version >= '3.7' and python_version < '4.0'
beautifulsoup4==4.12.3; python_full_version >= '3.6.0'
billiard==4.2.0; python_version >= '3.7'
black==24.4.2; python_version >= '3.8'
boto3==1.34.101; python_version >= '3.8'
botocore==1.34.101; python_version >= '3.8'
boto3==1.34.104; python_version >= '3.8'
botocore==1.34.104; python_version >= '3.8'
cachetools==5.3.3; python_version >= '3.7'
celery[redis,sqs]==5.4.0; python_version >= '3.8'
certifi==2024.2.2; python_version >= '3.6'
Expand All @@ -29,15 +29,15 @@ click==8.1.7; python_version >= '3.7'
click-didyoumean==0.3.1; python_full_version >= '3.6.2'
click-plugins==1.1.1
click-repl==0.3.0; python_version >= '3.6'
cohere==5.3.5; python_version >= '3.8' and python_version < '4.0'
cohere==5.4.0; python_version >= '3.8' and python_version < '4.0'
coloredlogs==15.0.1; python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'
colorlog==6.8.2; python_version >= '3.6'
contourpy==1.2.1; python_version >= '3.9'
cryptography==42.0.7; python_version >= '3.7'
cssselect==1.2.0; python_version >= '3.7'
curl-cffi==0.7.0b4; python_version >= '3.8'
cycler==0.12.1; python_version >= '3.8'
dataclasses-json==0.6.5; python_version >= '3.7' and python_version < '4.0'
dataclasses-json==0.6.6; python_version >= '3.7' and python_version < '4.0'
datasets==2.19.1; python_full_version >= '3.8.0'
debugpy==1.8.1; python_version >= '3.8'
decorator==5.1.1; python_version >= '3.5'
Expand Down Expand Up @@ -109,35 +109,35 @@ jsonpath-python==1.0.6; python_version >= '3.6'
jsonpointer==2.4; python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4, 3.5, 3.6'
kiwisolver==1.4.5; python_version >= '3.7'
kombu[sqs]==5.3.7; python_version >= '3.8'
langchain==0.1.19; python_version < '4.0' and python_full_version >= '3.8.1'
langchain==0.1.20; python_version < '4.0' and python_full_version >= '3.8.1'
langchain-cohere==0.1.4; python_version < '4.0' and python_full_version >= '3.8.1'
langchain-community==0.0.38; python_version < '4.0' and python_full_version >= '3.8.1'
langchain-core==0.1.52; python_version < '4.0' and python_full_version >= '3.8.1'
langchain-openai==0.1.6; python_version < '4.0' and python_full_version >= '3.8.1'
langchain-text-splitters==0.0.1; python_version < '4.0' and python_full_version >= '3.8.1'
langdetect==1.0.9
langfuse==2.29.3; python_version < '4.0' and python_full_version >= '3.8.1'
langfuse==2.30.0; python_version < '4.0' and python_full_version >= '3.8.1'
langgraph==0.0.48; python_version < '4.0' and python_full_version >= '3.9.0'
langsmith==0.1.56; python_version < '4.0' and python_full_version >= '3.8.1'
langsmith==0.1.57; python_version < '4.0' and python_full_version >= '3.8.1'
layoutparser[layoutmodels,tesseract]==0.3.4; python_version >= '3.6'
litellm==1.36.4; python_version not in '2.7, 3.0, 3.1, 3.2, 3.3, 3.4, 3.5, 3.6, 3.7' and python_version >= '3.8'
litellm==1.37.5; python_version not in '2.7, 3.0, 3.1, 3.2, 3.3, 3.4, 3.5, 3.6, 3.7' and python_version >= '3.8'
llama-cpp-python==0.2.67; python_version >= '3.8'
llama-index==0.10.35; python_version < '4.0' and python_full_version >= '3.8.1'
llama-index==0.10.36; python_version < '4.0' and python_full_version >= '3.8.1'
llama-index-agent-openai==0.2.4; python_version < '4.0' and python_full_version >= '3.8.1'
llama-index-cli==0.1.12; python_version < '4.0' and python_full_version >= '3.8.1'
llama-index-core==0.10.35.post1; python_version < '4.0' and python_full_version >= '3.8.1'
llama-index-core==0.10.36; python_version < '4.0' and python_full_version >= '3.8.1'
llama-index-embeddings-openai==0.1.9; python_version < '4.0' and python_full_version >= '3.8.1'
llama-index-indices-managed-llama-cloud==0.1.6; python_version < '4.0' and python_full_version >= '3.8.1'
llama-index-legacy==0.9.48; python_version < '4.0' and python_full_version >= '3.8.1'
llama-index-llms-openai==0.1.18; python_version < '4.0' and python_full_version >= '3.8.1'
llama-index-multi-modal-llms-openai==0.1.5; python_version < '4.0' and python_full_version >= '3.8.1'
llama-index-llms-openai==0.1.19; python_version < '4.0' and python_full_version >= '3.8.1'
llama-index-multi-modal-llms-openai==0.1.6; python_version < '4.0' and python_full_version >= '3.8.1'
llama-index-program-openai==0.1.6; python_version < '4.0' and python_full_version >= '3.8.1'
llama-index-question-gen-openai==0.1.3; python_version < '4.0' and python_full_version >= '3.8.1'
llama-index-readers-file==0.1.22; python_version < '4.0' and python_full_version >= '3.8.1'
llama-index-readers-llama-parse==0.1.4; python_version < '4.0' and python_full_version >= '3.8.1'
llama-parse==0.4.2; python_version < '4.0' and python_full_version >= '3.8.1'
llamaindex-py-client==0.1.19; python_version >= '3.8' and python_version < '4'
lxml[html_clean]==5.2.1; python_version >= '3.6'
lxml[html_clean]==5.2.2; python_version >= '3.6'
lxml-html-clean==0.1.1
markdown==3.6
markdown-it-py==3.0.0; python_version >= '3.8'
Expand All @@ -162,7 +162,7 @@ olefile==0.47; python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2,
omegaconf==2.3.0; python_version >= '3.6'
onnx==1.16.0
onnxruntime==1.17.3
openai==1.27.0; python_full_version >= '3.7.1'
openai==1.29.0; python_full_version >= '3.7.1'
opencv-python==4.9.0.80; python_version >= '3.6'
openpyxl==3.1.2
ordered-set==4.1.0; python_version >= '3.7'
Expand Down Expand Up @@ -209,7 +209,7 @@ pyinstrument==4.6.2; python_version >= '3.7'
pypandoc==1.13; python_version >= '3.6'
pyparsing==3.1.2; python_full_version >= '3.6.8'
pypdf==4.2.0; python_version >= '3.6'
pypdfium2==4.29.0; python_version >= '3.6'
pypdfium2==4.30.0; python_version >= '3.6'
pyright==1.1.362; python_version >= '3.7'
pysbd==0.3.4; python_version >= '3'
pytesseract==0.3.10; python_version >= '3.7'
Expand All @@ -232,7 +232,7 @@ ragas==0.1.7
rapidfuzz==3.9.0; python_version >= '3.8'
realtime==1.0.4; python_version >= '3.8' and python_version < '4.0'
redis==5.0.4; python_version >= '3.7'
regex==2024.4.28; python_version >= '3.8'
regex==2024.5.10; python_version >= '3.8'
requests==2.31.0; python_version >= '3.7'
requests-file==2.0.0
resend==1.0.1; python_version >= '3.7'
Expand Down Expand Up @@ -261,7 +261,7 @@ sympy==1.12; python_version >= '3.8'
tabulate==0.9.0; python_version >= '3.7'
tavily-python==0.3.3; python_version >= '3.6'
tenacity==8.3.0; python_version >= '3.8'
tiktoken==0.6.0; python_version >= '3.8'
tiktoken==0.7.0; python_version >= '3.8'
timm==0.9.16; python_version >= '3.8'
tinysegmenter==0.3
tldextract==5.1.2; python_version >= '3.8'
Expand Down
2 changes: 1 addition & 1 deletion backend/tests/ragas_evaluation/run_evaluation.py
Expand Up @@ -70,7 +70,7 @@ def main(
score = evaluate(
response_dataset,
metrics=ragas_metrics,
llm=ChatOpenAI(model="gpt-3.5-turbo-0125", temperature=0.1),
llm=ChatOpenAI(model="gpt-4o", temperature=0.1),
embeddings=LangchainEmbeddingsWrapper(
OpenAIEmbeddings(model="text-embedding-3-large", dimensions=1536)
),
Expand Down
4 changes: 3 additions & 1 deletion frontend/lib/helpers/defineMaxTokens.ts
Expand Up @@ -21,7 +21,9 @@ export const defineMaxTokens = (
return 2000;
case "mistral/mistral-large-latest":
return 2000;
case "gpt-4o":
return 2000;
default:
return 1000;
return 2000;
}
};
1 change: 1 addition & 0 deletions frontend/lib/types/BrainConfig.ts
Expand Up @@ -39,6 +39,7 @@ export type BrainConfig = {

export const openAiFreeModels = [
"gpt-3.5-turbo",
"gpt-4o",
"gpt-3.5-turbo-1106",
"gpt-3.5-turbo-16k",
"gpt-4-0125-preview",
Expand Down

0 comments on commit cd927eb

Please sign in to comment.