Skip to content

Commit

Permalink
added SVG for Groq model model providers (#1470)
Browse files Browse the repository at this point in the history
#1432  #1447 
This PR adds support for the GROQ LLM (Large Language Model).

Groq is an AI solutions company delivering ultra-low latency inference
with the first-ever LPU™ Inference Engine. The Groq API enables
developers to integrate state-of-the-art LLMs, such as Llama-2 and
llama3-70b-8192, into low latency applications with the request limits
specified below. Learn more at [groq.com](https://groq.com/).
Supported Models


| ID | Requests per Minute | Requests per Day | Tokens per Minute |

|----------------------|---------------------|------------------|-------------------|
| gemma-7b-it | 30 | 14,400 | 15,000 |
| gemma2-9b-it | 30 | 14,400 | 15,000 |
| llama3-70b-8192 | 30 | 14,400 | 6,000 |
| llama3-8b-8192 | 30 | 14,400 | 30,000 |
| mixtral-8x7b-32768 | 30 | 14,400 | 5,000 |

---------

Co-authored-by: paresh0628 <paresh.tuvoc@gmail.com>
Co-authored-by: Kevin Hu <kevinhu.sh@gmail.com>
  • Loading branch information
3 people authored Jul 12, 2024
1 parent 009e18f commit ddeac9a
Show file tree
Hide file tree
Showing 8 changed files with 118 additions and 5 deletions.
47 changes: 47 additions & 0 deletions api/db/init_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -180,6 +180,12 @@ def init_superuser():
"logo": "",
"tags": "LLM,TEXT EMBEDDING,IMAGE2TEXT",
"status": "1",
},
{
"name": "Groq",
"logo": "",
"tags": "LLM",
"status": "1",
}
# {
# "name": "文心一言",
Expand Down Expand Up @@ -933,6 +939,47 @@ def init_llm_factory():
"tags": "TEXT EMBEDDING",
"max_tokens": 2048,
"model_type": LLMType.EMBEDDING.value
},
# ------------------------ Groq -----------------------
{
"fid": factory_infos[18]["name"],
"llm_name": "gemma-7b-it",
"tags": "LLM,CHAT,15k",
"max_tokens": 8192,

"model_type": LLMType.CHAT.value
},
{
"fid": factory_infos[18]["name"],
"llm_name": "gemma2-9b-it",
"tags": "LLM,CHAT,15k",
"max_tokens": 8192,

"model_type": LLMType.CHAT.value
},
{
"fid": factory_infos[18]["name"],
"llm_name": "llama3-70b-8192",
"tags": "LLM,CHAT,6k",
"max_tokens": 8192,

"model_type": LLMType.CHAT.value
},
{
"fid": factory_infos[18]["name"],
"llm_name": "llama3-8b-8192",
"tags": "LLM,CHAT,30k",
"max_tokens": 8192,

"model_type": LLMType.CHAT.value
},
{
"fid": factory_infos[18]["name"],
"llm_name": "mixtral-8x7b-32768",
"tags": "LLM,CHAT,5k",
"max_tokens": 32768,

"model_type": LLMType.CHAT.value
}
]
for info in factory_infos:
Expand Down
3 changes: 2 additions & 1 deletion rag/llm/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,8 @@
"Jina": JinaEmbed,
"BAAI": DefaultEmbedding,
"Mistral": MistralEmbed,
"Bedrock": BedrockEmbed
"Bedrock": BedrockEmbed,
"Groq": GroqChat
}


Expand Down
62 changes: 61 additions & 1 deletion rag/llm/chat_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
from volcengine.maas.v2 import MaasService
from rag.nlp import is_english
from rag.utils import num_tokens_from_string
from groq import Groq


class Base(ABC):
Expand Down Expand Up @@ -681,4 +682,63 @@ def chat_streamly(self, system, history, gen_conf):
except Exception as e:
yield ans + "\n**ERROR**: " + str(e)

yield response._chunks[-1].usage_metadata.total_token_count
yield response._chunks[-1].usage_metadata.total_token_count



class GroqChat:
def __init__(self, key, model_name,base_url=''):
self.client = Groq(api_key=key)
self.model_name = model_name

def chat(self, system, history, gen_conf):
if system:
history.insert(0, {"role": "system", "content": system})
for k in list(gen_conf.keys()):
if k not in ["temperature", "top_p", "max_tokens"]:
del gen_conf[k]

ans = ""
try:
response = self.client.chat.completions.create(
model=self.model_name,
messages=history,
**gen_conf
)
ans = response.choices[0].message.content
if response.choices[0].finish_reason == "length":
ans += "...\nFor the content length reason, it stopped, continue?" if self.is_english(
[ans]) else "······\n由于长度的原因,回答被截断了,要继续吗?"
return ans, response.usage.total_tokens
except Exception as e:
return ans + "\n**ERROR**: " + str(e), 0

def chat_streamly(self, system, history, gen_conf):
if system:
history.insert(0, {"role": "system", "content": system})
for k in list(gen_conf.keys()):
if k not in ["temperature", "top_p", "max_tokens"]:
del gen_conf[k]
ans = ""
total_tokens = 0
try:
response = self.client.chat.completions.create(
model=self.model_name,
messages=history,
stream=True,
**gen_conf
)
for resp in response:
if not resp.choices or not resp.choices[0].delta.content:
continue
ans += resp.choices[0].delta.content
total_tokens += 1
if resp.choices[0].finish_reason == "length":
ans += "...\nFor the content length reason, it stopped, continue?" if self.is_english(
[ans]) else "······\n由于长度的原因,回答被截断了,要继续吗?"
yield ans

except Exception as e:
yield ans + "\n**ERROR**: " + str(e)

yield total_tokens
3 changes: 2 additions & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -147,4 +147,5 @@ markdown==3.6
mistralai==0.4.2
boto3==1.34.140
duckduckgo_search==6.1.9
google-generativeai==0.7.2
google-generativeai==0.7.2
groq==0.9.0
3 changes: 2 additions & 1 deletion requirements_arm.txt
Original file line number Diff line number Diff line change
Expand Up @@ -148,4 +148,5 @@ markdown==3.6
mistralai==0.4.2
boto3==1.34.140
duckduckgo_search==6.1.9
google-generativeai==0.7.2
google-generativeai==0.7.2
groq==0.9.0
3 changes: 2 additions & 1 deletion requirements_dev.txt
Original file line number Diff line number Diff line change
Expand Up @@ -133,4 +133,5 @@ markdown==3.6
mistralai==0.4.2
boto3==1.34.140
duckduckgo_search==6.1.9
google-generativeai==0.7.2
google-generativeai==0.7.2
groq==0.9.0
1 change: 1 addition & 0 deletions web/src/assets/svg/llm/Groq.svg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
1 change: 1 addition & 0 deletions web/src/pages/user-setting/setting-model/index.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,7 @@ const IconMap = {
'Azure-OpenAI': 'azure',
Bedrock: 'bedrock',
Gemini:'gemini',
Groq: 'Groq',
};

const LlmIcon = ({ name }: { name: string }) => {
Expand Down

0 comments on commit ddeac9a

Please sign in to comment.