Skip to content

Commit

Permalink
fix: llm tests
Browse files Browse the repository at this point in the history
  • Loading branch information
Yeuoly committed Jan 10, 2025
1 parent 38ab4fe commit 5a80ff8
Show file tree
Hide file tree
Showing 4 changed files with 74 additions and 73 deletions.
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
import datetime
import uuid
from collections.abc import Generator, Sequence
from decimal import Decimal
from collections.abc import Generator
from json import dumps

# import monkeypatch
from typing import Optional, Sequence
import uuid
from typing import Optional

from core.model_runtime.entities.common_entities import I18nObject
from core.model_runtime.entities.llm_entities import LLMMode, LLMResult, LLMResultChunk, LLMResultChunkDelta, LLMUsage
Expand Down Expand Up @@ -199,7 +199,7 @@ def mocked_chat_create_stream(
delta=LLMResultChunkDelta(
index=0,
message=AssistantPromptMessage(
content=full_text[i],
content="",
tool_calls=[tool_call] if tool_call else [],
),
),
Expand Down
49 changes: 49 additions & 0 deletions api/tests/integration_tests/workflow/nodes/__mock/model.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
from unittest.mock import MagicMock
from core.app.entities.app_invoke_entities import ModelConfigWithCredentialsEntity
from core.entities.provider_configuration import ProviderConfiguration, ProviderModelBundle
from core.entities.provider_entities import CustomConfiguration, CustomProviderConfiguration, SystemConfiguration
from core.model_manager import ModelInstance
from core.model_runtime.entities.model_entities import ModelType
from core.model_runtime.model_providers.model_provider_factory import ModelProviderFactory
from models.provider import ProviderType


def get_mocked_fetch_model_config(
provider: str,
model: str,
mode: str,
credentials: dict,
):
model_provider_factory = ModelProviderFactory(tenant_id="test_tenant")
model_type_instance = model_provider_factory.get_model_type_instance(provider, ModelType.LLM)
provider_model_bundle = ProviderModelBundle(
configuration=ProviderConfiguration(
tenant_id="1",
provider=model_provider_factory.get_provider_schema(provider),
preferred_provider_type=ProviderType.CUSTOM,
using_provider_type=ProviderType.CUSTOM,
system_configuration=SystemConfiguration(enabled=False),
custom_configuration=CustomConfiguration(provider=CustomProviderConfiguration(credentials=credentials)),
model_settings=[],
),
model_type_instance=model_type_instance,
)
model_instance = ModelInstance(provider_model_bundle=provider_model_bundle, model=model)
model_schema = model_provider_factory.get_model_schema(
provider=provider,
model_type=model_type_instance.model_type,
model=model,
credentials=credentials,
)
assert model_schema is not None
model_config = ModelConfigWithCredentialsEntity(
model=model,
provider=provider,
mode=mode,
credentials=credentials,
parameters={},
model_schema=model_schema,
provider_model_bundle=provider_model_bundle,
)

return MagicMock(return_value=(model_instance, model_config))
85 changes: 19 additions & 66 deletions api/tests/integration_tests/workflow/nodes/test_llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,12 +7,7 @@

import pytest

from core.app.entities.app_invoke_entities import InvokeFrom, ModelConfigWithCredentialsEntity
from core.entities.provider_configuration import ProviderConfiguration, ProviderModelBundle
from core.entities.provider_entities import CustomConfiguration, CustomProviderConfiguration, SystemConfiguration
from core.model_manager import ModelInstance
from core.model_runtime.entities.model_entities import ModelType
from core.model_runtime.model_providers.model_provider_factory import ModelProviderFactory
from core.app.entities.app_invoke_entities import InvokeFrom
from core.workflow.entities.variable_pool import VariablePool
from core.workflow.enums import SystemVariableKey
from core.workflow.graph_engine.entities.graph import Graph
Expand All @@ -22,8 +17,8 @@
from core.workflow.nodes.llm.node import LLMNode
from extensions.ext_database import db
from models.enums import UserFrom
from models.provider import ProviderType
from models.workflow import WorkflowNodeExecutionStatus, WorkflowType
from tests.integration_tests.workflow.nodes.__mock.model import get_mocked_fetch_model_config

"""FOR MOCK FIXTURES, DO NOT REMOVE"""
from tests.integration_tests.model_runtime.__mock.plugin_daemon import setup_model_mock # noqa
Expand Down Expand Up @@ -81,15 +76,19 @@ def init_llm_node(config: dict) -> LLMNode:
return node


@pytest.mark.parametrize("setup_openai_mock", [["chat"]], indirect=True)
def test_execute_llm(setup_openai_mock):
def test_execute_llm(setup_model_mock):
node = init_llm_node(
config={
"id": "llm",
"data": {
"title": "123",
"type": "llm",
"model": {"provider": "openai", "name": "gpt-3.5-turbo", "mode": "chat", "completion_params": {}},
"model": {
"provider": "langgenius/openai/openai",
"name": "gpt-3.5-turbo",
"mode": "chat",
"completion_params": {},
},
"prompt_template": [
{"role": "system", "text": "you are a helpful assistant.\ntoday's weather is {{#abc.output#}}."},
{"role": "user", "text": "{{#sys.query#}}"},
Expand All @@ -103,38 +102,16 @@ def test_execute_llm(setup_openai_mock):

credentials = {"openai_api_key": os.environ.get("OPENAI_API_KEY")}

provider_instance = ModelProviderFactory("aa").get_provider_instance("openai")
model_type_instance = provider_instance.get_model_instance(ModelType.LLM)
provider_model_bundle = ProviderModelBundle(
configuration=ProviderConfiguration(
tenant_id="1",
provider=provider_instance.get_provider_schema(),
preferred_provider_type=ProviderType.CUSTOM,
using_provider_type=ProviderType.CUSTOM,
system_configuration=SystemConfiguration(enabled=False),
custom_configuration=CustomConfiguration(provider=CustomProviderConfiguration(credentials=credentials)),
model_settings=[],
),
model_type_instance=model_type_instance,
)
model_instance = ModelInstance(provider_model_bundle=provider_model_bundle, model="gpt-3.5-turbo")
model_schema = model_type_instance.get_model_schema("gpt-3.5-turbo")
assert model_schema is not None
model_config = ModelConfigWithCredentialsEntity(
# Mock db.session.close()
db.session.close = MagicMock()

node._fetch_model_config = get_mocked_fetch_model_config(
provider="langgenius/openai/openai",
model="gpt-3.5-turbo",
provider="openai",
mode="chat",
credentials=credentials,
parameters={},
model_schema=model_schema,
provider_model_bundle=provider_model_bundle,
)

# Mock db.session.close()
db.session.close = MagicMock()

node._fetch_model_config = MagicMock(return_value=(model_instance, model_config))

# execute node
result = node._run()
assert isinstance(result, Generator)
Expand All @@ -149,8 +126,7 @@ def test_execute_llm(setup_openai_mock):


@pytest.mark.parametrize("setup_code_executor_mock", [["none"]], indirect=True)
@pytest.mark.parametrize("setup_openai_mock", [["chat"]], indirect=True)
def test_execute_llm_with_jinja2(setup_code_executor_mock, setup_openai_mock):
def test_execute_llm_with_jinja2(setup_code_executor_mock, setup_model_mock):
"""
Test execute LLM node with jinja2
"""
Expand Down Expand Up @@ -190,39 +166,16 @@ def test_execute_llm_with_jinja2(setup_code_executor_mock, setup_openai_mock):

credentials = {"openai_api_key": os.environ.get("OPENAI_API_KEY")}

provider_instance = ModelProviderFactory().get_provider_instance("openai")
model_type_instance = provider_instance.get_model_instance(ModelType.LLM)
provider_model_bundle = ProviderModelBundle(
configuration=ProviderConfiguration(
tenant_id="1",
provider=provider_instance.get_provider_schema(),
preferred_provider_type=ProviderType.CUSTOM,
using_provider_type=ProviderType.CUSTOM,
system_configuration=SystemConfiguration(enabled=False),
custom_configuration=CustomConfiguration(provider=CustomProviderConfiguration(credentials=credentials)),
model_settings=[],
),
model_type_instance=model_type_instance,
)
# Mock db.session.close()
db.session.close = MagicMock()

model_instance = ModelInstance(provider_model_bundle=provider_model_bundle, model="gpt-3.5-turbo")
model_schema = model_type_instance.get_model_schema("gpt-3.5-turbo")
assert model_schema is not None
model_config = ModelConfigWithCredentialsEntity(
node._fetch_model_config = get_mocked_fetch_model_config(
provider="langgenius/openai/openai",
model="gpt-3.5-turbo",
provider="openai",
mode="chat",
credentials=credentials,
parameters={},
model_schema=model_schema,
provider_model_bundle=provider_model_bundle,
)

# Mock db.session.close()
db.session.close = MagicMock()

node._fetch_model_config = MagicMock(return_value=(model_instance, model_config))

# execute node
result = node._run()

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@
from typing import Optional
from unittest.mock import MagicMock


from core.app.entities.app_invoke_entities import InvokeFrom, ModelConfigWithCredentialsEntity
from core.entities.provider_configuration import ProviderConfiguration, ProviderModelBundle
from core.entities.provider_entities import CustomConfiguration, CustomProviderConfiguration, SystemConfiguration
Expand All @@ -22,8 +21,8 @@
from models.provider import ProviderType

"""FOR MOCK FIXTURES, DO NOT REMOVE"""
from models.workflow import WorkflowNodeExecutionStatus, WorkflowType # noqa
from tests.integration_tests.model_runtime.__mock.plugin_daemon import setup_model_mock # noqa
from models.workflow import WorkflowNodeExecutionStatus, WorkflowType
from tests.integration_tests.model_runtime.__mock.plugin_daemon import setup_model_mock


def get_mocked_fetch_model_config(
Expand Down

0 comments on commit 5a80ff8

Please sign in to comment.