Skip to content

Commit

Permalink
Merge commit '6db3ae9b8ec2f8491e2c9355056a8693ecd67f47'
Browse files Browse the repository at this point in the history
* commit '6db3ae9b8ec2f8491e2c9355056a8693ecd67f47': (22 commits)
  chore: remove webapp ga (langgenius#12909)
  fix: variable panel scrollable (langgenius#12769)
  fix: OpenAI o1 Bad Request Error (langgenius#12839)
  Update deepseek model configuration (langgenius#12899)
  fix: external dataset hit test display issue(langgenius#12564) (langgenius#12612)
  add deepseek-reasoner (langgenius#12898)
  chore(fix): Invalid quotes for using Array[String] in HTTP request node as JSON body (langgenius#12761)
  fix: Issues related to the deletion of conversation_id (langgenius#12488) (langgenius#12665)
  chore(lint): fix quotes for f-string formatting by bumping ruff to 0.9.x (langgenius#12702)
  feat:Support Minimax-Text-01 (langgenius#12763)
  fix: serply credential check query might return empty records (langgenius#12784)
  feat: allow updating chunk settings for the existing documents (langgenius#12833)
  fix: SparkLite API Auth error (langgenius#12781) (langgenius#12790)
  fix: "parmas" spelling mistake. (langgenius#12875)
  Fix suggested_question_prompt (langgenius#12738)
  fix(i18n): correct typo in zh-Hant translation (langgenius#12852)
  chore: fix chinese translation for 'recall' (langgenius#12772)
  fix: DeepSeek API Error with response format active (text and json_object)  (langgenius#12747)
  feat: enhance credential extraction logic based on configurate method (langgenius#12853)
  fix: Fix rerank model switching issue (langgenius#12721)
  ...

# Conflicts:
#	api/core/tools/utils/message_transformer.py
#	api/poetry.lock
  • Loading branch information
Scorpion1221 committed Jan 21, 2025
2 parents 1597a07 + 6db3ae9 commit 3d585be
Show file tree
Hide file tree
Showing 132 changed files with 1,161 additions and 404 deletions.
2 changes: 2 additions & 0 deletions api/.ruff.toml
Original file line number Diff line number Diff line change
Expand Up @@ -53,10 +53,12 @@ ignore = [
"FURB152", # math-constant
"UP007", # non-pep604-annotation
"UP032", # f-string
"UP045", # non-pep604-annotation-optional
"B005", # strip-with-multi-characters
"B006", # mutable-argument-default
"B007", # unused-loop-control-variable
"B026", # star-arg-unpacking-after-keyword-arg
"B903", # class-as-data-structure
"B904", # raise-without-from-inside-except
"B905", # zip-without-explicit-strict
"N806", # non-lowercase-variable-in-function
Expand Down
2 changes: 1 addition & 1 deletion api/configs/feature/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -147,7 +147,7 @@ class EndpointConfig(BaseSettings):
)

CONSOLE_WEB_URL: str = Field(
description="Base URL for the console web interface," "used for frontend references and CORS configuration",
description="Base URL for the console web interface,used for frontend references and CORS configuration",
default="",
)

Expand Down
2 changes: 1 addition & 1 deletion api/configs/feature/hosted_service/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -181,7 +181,7 @@ class HostedFetchAppTemplateConfig(BaseSettings):
"""

HOSTED_FETCH_APP_TEMPLATES_MODE: str = Field(
description="Mode for fetching app templates: remote, db, or builtin" " default to remote,",
description="Mode for fetching app templates: remote, db, or builtin default to remote,",
default="remote",
)

Expand Down
2 changes: 1 addition & 1 deletion api/controllers/console/admin.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ def post(self):

app = App.query.filter(App.id == args["app_id"]).first()
if not app:
raise NotFound(f'App \'{args["app_id"]}\' is not found')
raise NotFound(f"App '{args['app_id']}' is not found")

site = app.site
if not site:
Expand Down
2 changes: 1 addition & 1 deletion api/controllers/console/datasets/datasets.py
Original file line number Diff line number Diff line change
Expand Up @@ -457,7 +457,7 @@ def post(self):
)
except LLMBadRequestError:
raise ProviderNotInitializeError(
"No Embedding Model available. Please configure a valid provider " "in the Settings -> Model Provider."
"No Embedding Model available. Please configure a valid provider in the Settings -> Model Provider."
)
except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError(ex.description)
Expand Down
6 changes: 2 additions & 4 deletions api/controllers/console/datasets/datasets_document.py
Original file line number Diff line number Diff line change
Expand Up @@ -350,8 +350,7 @@ def post(self):
)
except InvokeAuthorizationError:
raise ProviderNotInitializeError(
"No Embedding Model available. Please configure a valid provider "
"in the Settings -> Model Provider."
"No Embedding Model available. Please configure a valid provider in the Settings -> Model Provider."
)
except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError(ex.description)
Expand Down Expand Up @@ -526,8 +525,7 @@ def get(self, dataset_id, batch):
return response.model_dump(), 200
except LLMBadRequestError:
raise ProviderNotInitializeError(
"No Embedding Model available. Please configure a valid provider "
"in the Settings -> Model Provider."
"No Embedding Model available. Please configure a valid provider in the Settings -> Model Provider."
)
except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError(ex.description)
Expand Down
12 changes: 4 additions & 8 deletions api/controllers/console/datasets/datasets_segments.py
Original file line number Diff line number Diff line change
Expand Up @@ -168,8 +168,7 @@ def patch(self, dataset_id, document_id, action):
)
except LLMBadRequestError:
raise ProviderNotInitializeError(
"No Embedding Model available. Please configure a valid provider "
"in the Settings -> Model Provider."
"No Embedding Model available. Please configure a valid provider in the Settings -> Model Provider."
)
except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError(ex.description)
Expand Down Expand Up @@ -217,8 +216,7 @@ def post(self, dataset_id, document_id):
)
except LLMBadRequestError:
raise ProviderNotInitializeError(
"No Embedding Model available. Please configure a valid provider "
"in the Settings -> Model Provider."
"No Embedding Model available. Please configure a valid provider in the Settings -> Model Provider."
)
except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError(ex.description)
Expand Down Expand Up @@ -267,8 +265,7 @@ def patch(self, dataset_id, document_id, segment_id):
)
except LLMBadRequestError:
raise ProviderNotInitializeError(
"No Embedding Model available. Please configure a valid provider "
"in the Settings -> Model Provider."
"No Embedding Model available. Please configure a valid provider in the Settings -> Model Provider."
)
except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError(ex.description)
Expand Down Expand Up @@ -437,8 +434,7 @@ def post(self, dataset_id, document_id, segment_id):
)
except LLMBadRequestError:
raise ProviderNotInitializeError(
"No Embedding Model available. Please configure a valid provider "
"in the Settings -> Model Provider."
"No Embedding Model available. Please configure a valid provider in the Settings -> Model Provider."
)
except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError(ex.description)
Expand Down
9 changes: 3 additions & 6 deletions api/controllers/service_api/dataset/segment.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,8 +53,7 @@ def post(self, tenant_id, dataset_id, document_id):
)
except LLMBadRequestError:
raise ProviderNotInitializeError(
"No Embedding Model available. Please configure a valid provider "
"in the Settings -> Model Provider."
"No Embedding Model available. Please configure a valid provider in the Settings -> Model Provider."
)
except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError(ex.description)
Expand Down Expand Up @@ -95,8 +94,7 @@ def get(self, tenant_id, dataset_id, document_id):
)
except LLMBadRequestError:
raise ProviderNotInitializeError(
"No Embedding Model available. Please configure a valid provider "
"in the Settings -> Model Provider."
"No Embedding Model available. Please configure a valid provider in the Settings -> Model Provider."
)
except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError(ex.description)
Expand Down Expand Up @@ -175,8 +173,7 @@ def post(self, tenant_id, dataset_id, document_id, segment_id):
)
except LLMBadRequestError:
raise ProviderNotInitializeError(
"No Embedding Model available. Please configure a valid provider "
"in the Settings -> Model Provider."
"No Embedding Model available. Please configure a valid provider in the Settings -> Model Provider."
)
except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError(ex.description)
Expand Down
3 changes: 1 addition & 2 deletions api/core/app/apps/base_app_queue_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -167,8 +167,7 @@ def _check_for_sqlalchemy_models(self, data: Any):
else:
if isinstance(data, DeclarativeMeta) or hasattr(data, "_sa_instance_state"):
raise TypeError(
"Critical Error: Passing SQLAlchemy Model instances "
"that cause thread safety issues is not allowed."
"Critical Error: Passing SQLAlchemy Model instances that cause thread safety issues is not allowed."
)


Expand Down
1 change: 1 addition & 0 deletions api/core/app/apps/message_based_app_generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,7 @@ def _get_conversation_by_user(
Conversation.id == conversation_id,
Conversation.app_id == app_model.id,
Conversation.status == "normal",
Conversation.is_deleted.is_(False),
]

if isinstance(user, Account):
Expand Down
2 changes: 1 addition & 1 deletion api/core/app/task_pipeline/message_cycle_manage.py
Original file line number Diff line number Diff line change
Expand Up @@ -145,7 +145,7 @@ def _message_file_to_stream_response(self, event: QueueMessageFileEvent) -> Opti

# get extension
if "." in message_file.url:
extension = f'.{message_file.url.split(".")[-1]}'
extension = f".{message_file.url.split('.')[-1]}"
if len(extension) > 10:
extension = ".bin"
else:
Expand Down
5 changes: 3 additions & 2 deletions api/core/external_data_tool/api/api.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,8 +62,9 @@ def query(self, inputs: dict, query: Optional[str] = None) -> str:

if not api_based_extension:
raise ValueError(
"[External data tool] API query failed, variable: {}, "
"error: api_based_extension_id is invalid".format(self.variable)
"[External data tool] API query failed, variable: {}, error: api_based_extension_id is invalid".format(
self.variable
)
)

# decrypt api_key
Expand Down
2 changes: 1 addition & 1 deletion api/core/file/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ def to_dict(self) -> Mapping[str, str | int | None]:
def markdown(self) -> str:
url = self.generate_url()
if self.type == FileType.IMAGE:
text = f'![{self.filename or ""}]({url})'
text = f"![{self.filename or ''}]({url})"
else:
text = f"[{self.filename or url}]({url})"

Expand Down
2 changes: 1 addition & 1 deletion api/core/llm_generator/prompts.py
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,7 @@
SUGGESTED_QUESTIONS_AFTER_ANSWER_INSTRUCTION_PROMPT = (
"Please help me predict the three most likely questions that human would ask, "
"and keeping each question under 20 characters.\n"
"MAKE SURE your output is the SAME language as the Assistant's latest response"
"MAKE SURE your output is the SAME language as the Assistant's latest response. "
"The output must be an array in JSON format following the specified schema:\n"
'["question1","question2","question3"]\n'
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,7 @@ def validate_credentials(self, model: str, credentials: dict) -> None:
ai_model_entity = self._get_ai_model_entity(base_model_name=base_model_name, model=model)

if not ai_model_entity:
raise CredentialsValidateFailedError(f'Base Model Name {credentials["base_model_name"]} is invalid')
raise CredentialsValidateFailedError(f"Base Model Name {credentials['base_model_name']} is invalid")

try:
client = AzureOpenAI(**self._to_credential_kwargs(credentials))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,7 @@ def validate_credentials(self, model: str, credentials: dict) -> None:
raise CredentialsValidateFailedError("Base Model Name is required")

if not self._get_ai_model_entity(credentials["base_model_name"], model):
raise CredentialsValidateFailedError(f'Base Model Name {credentials["base_model_name"]} is invalid')
raise CredentialsValidateFailedError(f"Base Model Name {credentials['base_model_name']} is invalid")

try:
credentials_kwargs = self._to_credential_kwargs(credentials)
Expand Down
Original file line number Diff line number Diff line change
@@ -1,2 +1,3 @@
- deepseek-chat
- deepseek-coder
- deepseek-reasoner
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ features:
- stream-tool-call
model_properties:
mode: chat
context_size: 128000
context_size: 64000
parameter_rules:
- name: temperature
use_template: temperature
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ features:
- stream-tool-call
model_properties:
mode: chat
context_size: 128000
context_size: 64000
parameter_rules:
- name: temperature
use_template: temperature
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
model: deepseek-reasoner
label:
zh_Hans: deepseek-reasoner
en_US: deepseek-reasoner
model_type: llm
features:
- agent-thought
model_properties:
mode: chat
context_size: 64000
parameter_rules:
- name: max_tokens
use_template: max_tokens
min: 1
max: 8192
default: 4096
pricing:
input: "4"
output: "16"
unit: "0.000001"
currency: RMB
3 changes: 0 additions & 3 deletions api/core/model_runtime/model_providers/deepseek/llm/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,9 +24,6 @@ def _invoke(
user: Optional[str] = None,
) -> Union[LLMResult, Generator]:
self._add_custom_parameters(credentials)
# {"response_format": "xx"} need convert to {"response_format": {"type": "xx"}}
if "response_format" in model_parameters:
model_parameters["response_format"] = {"type": model_parameters.get("response_format")}
return super()._invoke(model, credentials, prompt_messages, model_parameters, tools, stop, stream)

def validate_credentials(self, model: str, credentials: dict) -> None:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -162,9 +162,9 @@ def _calc_response_usage(self, model: str, credentials: dict, tokens: int) -> Em
@staticmethod
def _check_endpoint_url_model_repository_name(credentials: dict, model_name: str):
try:
url = f'{HUGGINGFACE_ENDPOINT_API}{credentials["huggingface_namespace"]}'
url = f"{HUGGINGFACE_ENDPOINT_API}{credentials['huggingface_namespace']}"
headers = {
"Authorization": f'Bearer {credentials["huggingfacehub_api_token"]}',
"Authorization": f"Bearer {credentials['huggingfacehub_api_token']}",
"Content-Type": "application/json",
}

Expand Down
1 change: 1 addition & 0 deletions api/core/model_runtime/model_providers/minimax/llm/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@

class MinimaxLargeLanguageModel(LargeLanguageModel):
model_apis = {
"minimax-text-01": MinimaxChatCompletionPro,
"abab7-chat-preview": MinimaxChatCompletionPro,
"abab6.5t-chat": MinimaxChatCompletionPro,
"abab6.5s-chat": MinimaxChatCompletionPro,
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
model: minimax-text-01
label:
en_US: Minimax-Text-01
model_type: llm
features:
- agent-thought
- tool-call
- stream-tool-call
model_properties:
mode: chat
context_size: 1000192
parameter_rules:
- name: temperature
use_template: temperature
min: 0.01
max: 1
default: 0.1
- name: top_p
use_template: top_p
min: 0.01
max: 1
default: 0.95
- name: max_tokens
use_template: max_tokens
required: true
default: 2048
min: 1
max: 1000192
- name: mask_sensitive_info
type: boolean
default: true
label:
zh_Hans: 隐私保护
en_US: Moderate
help:
zh_Hans: 对输出中易涉及隐私问题的文本信息进行打码,目前包括但不限于邮箱、域名、链接、证件号、家庭住址等,默认true,即开启打码
en_US: Mask the sensitive info of the generated content, such as email/domain/link/address/phone/id..
- name: presence_penalty
use_template: presence_penalty
- name: frequency_penalty
use_template: frequency_penalty
pricing:
input: '0.001'
output: '0.008'
unit: '0.001'
currency: RMB
3 changes: 0 additions & 3 deletions api/core/model_runtime/model_providers/moonshot/llm/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,9 +44,6 @@ def _invoke(
self._add_custom_parameters(credentials)
self._add_function_call(model, credentials)
user = user[:32] if user else None
# {"response_format": "json_object"} need convert to {"response_format": {"type": "json_object"}}
if "response_format" in model_parameters:
model_parameters["response_format"] = {"type": model_parameters.get("response_format")}
return super()._invoke(model, credentials, prompt_messages, model_parameters, tools, stop, stream, user)

def validate_credentials(self, model: str, credentials: dict) -> None:
Expand Down
Loading

0 comments on commit 3d585be

Please sign in to comment.