Skip to content

Commit

Permalink
Merge pull request #260 from Fanlu91/fix_fetchChatAPI
Browse files Browse the repository at this point in the history
fix: remove unused fetchChatAPI
  • Loading branch information
Kerwin1202 authored Jun 23, 2023
2 parents af2f07b + 397e5f4 commit 204055c
Show file tree
Hide file tree
Showing 3 changed files with 36 additions and 78 deletions.
60 changes: 35 additions & 25 deletions service/src/chatgpt/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -49,31 +49,32 @@ export async function initApi(key: KeyConfig, chatModel: CHATMODEL) {
messageStore: undefined,
getMessageById,
}

// Set the token limits based on the model's type. This is because different models have different token limits.
// The token limit includes the token count from both the message array sent and the model response.
// 'gpt-35-turbo' has a limit of 4096 tokens, 'gpt-4' and 'gpt-4-32k' have limits of 8192 and 32768 tokens respectively.

// Check if the model type includes '16k'
if (model.toLowerCase().includes('16k')) {
// If it's a '16k' model, set the maxModelTokens to 16384 and maxResponseTokens to 4096
options.maxModelTokens = 16384;
options.maxResponseTokens = 4096;
} else if (model.toLowerCase().includes('32k')) {
// If it's a '32k' model, set the maxModelTokens to 32768 and maxResponseTokens to 8192
options.maxModelTokens = 32768;
options.maxResponseTokens = 8192;
} else if (model.toLowerCase().includes('gpt-4')) {
// If it's a 'gpt-4' model, set the maxModelTokens and maxResponseTokens to 8192 and 2048 respectively
options.maxModelTokens = 8192;
options.maxResponseTokens = 2048;
} else {
// If none of the above, use the default values, set the maxModelTokens and maxResponseTokens to 8192 and 2048 respectively
options.maxModelTokens = 4096;
options.maxResponseTokens = 1024;
}

// Set the token limits based on the model's type. This is because different models have different token limits.
// The token limit includes the token count from both the message array sent and the model response.
// 'gpt-35-turbo' has a limit of 4096 tokens, 'gpt-4' and 'gpt-4-32k' have limits of 8192 and 32768 tokens respectively.

// Check if the model type includes '16k'
if (model.toLowerCase().includes('16k')) {
// If it's a '16k' model, set the maxModelTokens to 16384 and maxResponseTokens to 4096
options.maxModelTokens = 16384
options.maxResponseTokens = 4096
}
else if (model.toLowerCase().includes('32k')) {
// If it's a '32k' model, set the maxModelTokens to 32768 and maxResponseTokens to 8192
options.maxModelTokens = 32768
options.maxResponseTokens = 8192
}
else if (model.toLowerCase().includes('gpt-4')) {
// If it's a 'gpt-4' model, set the maxModelTokens and maxResponseTokens to 8192 and 2048 respectively
options.maxModelTokens = 8192
options.maxResponseTokens = 2048
}
else {
// If none of the above, use the default values, set the maxModelTokens and maxResponseTokens to 8192 and 2048 respectively
options.maxModelTokens = 4096
options.maxResponseTokens = 1024
}

if (isNotEmptyString(OPENAI_API_BASE_URL))
options.apiBaseUrl = `${OPENAI_API_BASE_URL}/v1`
Expand Down Expand Up @@ -262,12 +263,21 @@ async function fetchBalance() {
console.error('您的账户已被封禁,请登录OpenAI进行查看。')
return
}
const subscriptionData = await response.json()
interface SubscriptionData {
hard_limit_usd?: number
// 这里可以添加其他可能的属性
}
const subscriptionData: SubscriptionData = await response.json()
const totalAmount = subscriptionData.hard_limit_usd

interface UsageData {
total_usage?: number
// 这里可以添加其他可能的属性
}

// 获取已使用量
response = await fetch(urlUsage, { agent: socksAgent === undefined ? httpsAgent : socksAgent, headers })
const usageData = await response.json()
const usageData: UsageData = await response.json()
const totalUsage = usageData.total_usage / 100

// 计算剩余额度
Expand Down
42 changes: 1 addition & 41 deletions service/src/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ import jwt from 'jsonwebtoken'
import * as dotenv from 'dotenv'
import { ObjectId } from 'mongodb'
import type { RequestProps } from './types'
import type { ChatContext, ChatMessage } from './chatgpt'
import type { ChatMessage } from './chatgpt'
import { abortChatProcess, chatConfig, chatReplyProcess, containsSensitiveWords, initAuditService } from './chatgpt'
import { auth, getUserId } from './middleware/auth'
import { clearApiKeyCache, clearConfigCache, getApiKeys, getCacheApiKeys, getCacheConfig, getOriginConfig } from './storage/config'
Expand Down Expand Up @@ -335,46 +335,6 @@ router.post('/chat-clear', auth, async (req, res) => {
}
})

router.post('/chat', auth, async (req, res) => {
try {
const { roomId, uuid, regenerate, prompt, options = {} } = req.body as
{ roomId: number; uuid: number; regenerate: boolean; prompt: string; options?: ChatContext }
const message = regenerate
? await getChat(roomId, uuid)
: await insertChat(uuid, prompt, roomId, options as ChatOptions)
const response = await chatReply(prompt, options)
if (response.status === 'Success') {
if (regenerate && message.options.messageId) {
const previousResponse = message.previousResponse || []
previousResponse.push({ response: message.response, options: message.options })
await updateChat(message._id as unknown as string,
response.data.text,
response.data.id,
response.data.detail?.usage as UsageResponse,
previousResponse as [])
}
else {
await updateChat(message._id as unknown as string,
response.data.text,
response.data.id,
response.data.detail?.usage as UsageResponse)
}

if (response.data.usage) {
await insertChatUsage(new ObjectId(req.headers.userId as string),
roomId,
message._id,
response.data.id,
response.data.detail?.usage as UsageResponse)
}
}
res.send(response)
}
catch (error) {
res.send(error)
}
})

router.post('/chat-process', [auth, limiter], async (req, res) => {
res.setHeader('Content-type', 'application/octet-stream')

Expand Down
12 changes: 0 additions & 12 deletions src/api/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -3,18 +3,6 @@ import { get, post } from '@/utils/request'
import type { AuditConfig, CHATMODEL, ConfigState, KeyConfig, MailConfig, SiteConfig, Status, UserRole } from '@/components/common/Setting/model'
import { useAuthStore, useSettingStore } from '@/store'

export function fetchChatAPI<T = any>(
prompt: string,
options?: { conversationId?: string; parentMessageId?: string },
signal?: GenericAbortSignal,
) {
return post<T>({
url: '/chat',
data: { prompt, options },
signal,
})
}

export function fetchChatConfig<T = any>() {
return post<T>({
url: '/config',
Expand Down

0 comments on commit 204055c

Please sign in to comment.