Skip to content

Commit

Permalink
feat: POC automation test for AI using python
Browse files Browse the repository at this point in the history
JIRA: QA-23855
risk: nonprod
  • Loading branch information
Tubt committed Jan 3, 2025
1 parent 40b351c commit a33b1a0
Show file tree
Hide file tree
Showing 26 changed files with 1,025 additions and 1 deletion.
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -16,3 +16,5 @@ docs/resources/_gen
docs/tmp/
docs/versioned_docs
docs/.hugo_build.lock

integration_tests/**/__pycache__
18 changes: 17 additions & 1 deletion gooddata-sdk/gooddata_sdk/compute/service.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,5 +105,21 @@ def ai_chat_history_reset(self, workspace_id: str) -> None:
Args:
workspace_id: workspace identifier
"""
chat_history_request = ChatHistoryRequest(reset=True)
chat_history_request = ChatHistoryRequest(
reset=True,
)
self._actions_api.ai_chat_history(workspace_id, chat_history_request, _check_return_type=False)

def ai_chat_history_user_feedback(
self, workspace_id: str, chat_history_interaction_id: int = 0, user_feedback: str = "POSITIVE"
) -> None:
"""
Reset chat history with AI in GoodData workspace.
Args:
workspace_id: workspace identifier
"""
chat_history_request = ChatHistoryRequest(
chat_history_interaction_id=chat_history_interaction_id, user_feedback=user_feedback
)
self._actions_api.ai_chat_history(workspace_id, chat_history_request, _check_return_type=False)
6 changes: 6 additions & 0 deletions integration_tests/.env.template
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
# (C) 2024 GoodData Corporation
HOST=
TOKEN=
DATASOURCE_ID=
WORKSPACE_ID=
LLM_TOKEN=
1 change: 1 addition & 0 deletions integration_tests/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
# (C) 2021 GoodData Corporation
34 changes: 34 additions & 0 deletions integration_tests/conftest.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
# (C) 2024 GoodData Corporation
# filepath: /Users/tubui/Documents/CODE/gooddata-python-sdk-1/gooddata-sdk/integration_tests/scripts/conftest.py
import os

import pytest
from dotenv import load_dotenv

# Load the .env file from the current directory
load_dotenv()


@pytest.fixture(scope="session", autouse=True)
def setup_env():
# Ensure that the environment variables are set
os.environ["HOST"] = os.getenv("HOST", "https://checklist.staging.stg11.panther.intgdc.com")
os.environ["TOKEN"] = os.getenv("TOKEN", "")
os.environ["DATASOURCE_ID"] = os.getenv("DATASOURCE_ID", "")
os.environ["WORKSPACE_ID"] = os.getenv("WORKSPACE_ID", "")
os.environ["DATASOURCE_TYPE"] = os.getenv("DATASOURCE_TYPE", "")
os.environ["DATASOURCE_PASSWORD"] = os.getenv("DATASOURCE_PASSWORD", "")

# Check if the necessary environment variables are set
if not os.environ["HOST"]:
raise OSError("\nHOST environment variable is not set.")
if not os.environ["TOKEN"]:
raise OSError("\nTOKEN environment variable is not set.")
if not os.environ["DATASOURCE_ID"]:
print("\nWarning: DATA_SOURCE_ID environment variable is not set.")
if not os.environ["WORKSPACE_ID"]:
print("\nWarning: WORKSPACE_ID environment variable is not set.")
if not os.environ["DATASOURCE_TYPE"]:
print("\nWarning: DATASOURCE_TYPE environment variable is not set.")
if not os.environ["DATASOURCE_PASSWORD"]:
print("\nWarning: DATASOURCE_PASSWORD environment variable is not set.")
30 changes: 30 additions & 0 deletions integration_tests/expected/column_total_returns_by_month.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
{
"id": "total_returns_per_month",
"title": "Total Returns per Month",
"visualizationType": "COLUMN",
"metrics": [
{
"id": "total_returns",
"type": "metric",
"title": "Total Returns"
}
],
"dimensionality": [
{
"id": "return_date.month",
"type": "attribute",
"title": "Return date - Month/Year"
}
],
"filters": [],
"suggestions": [
{
"query": "Switch to a line chart to better visualize the trend of total returns over the months.",
"label": "Line Chart for Trends"
},
{
"query": "Filter the data to show total returns for this year only.",
"label": "This Year's Returns"
}
]
}
21 changes: 21 additions & 0 deletions integration_tests/expected/headline_count_of_order.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
{
"id": "number_of_order_ids",
"title": "Number of Order IDs",
"visualizationType": "HEADLINE",
"metrics": [
{
"id": "order_id",
"type": "attribute",
"title": "Number of Order IDs",
"aggFunction": "COUNT"
}
],
"dimensionality": [],
"filters": [],
"suggestions": [
{
"query": "Show the number of orders by year",
"label": "Show by Year"
}
]
}
10 changes: 10 additions & 0 deletions integration_tests/fixtures/ai_questions.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
[
{
"question": "What is number of order id, show as HEADLINE chart?",
"expected_objects_file": "headline_count_of_order.json"
},
{
"question": "What is total returns per month? show as COLUMN chart",
"expected_objects_file": "column_total_returns_by_month.json"
}
]
1 change: 1 addition & 0 deletions integration_tests/scenarios/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
# (C) 2021 GoodData Corporation
56 changes: 56 additions & 0 deletions integration_tests/scenarios/aiChat.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
# (C) 2024 GoodData Corporation
import os
import sys
from pprint import pprint

import pytest
from dotenv import load_dotenv
from gooddata_sdk import GoodDataSdk

SCRIPTS_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(SCRIPTS_DIR)

# Load environment variables from the .env file
load_dotenv()

# Create the test_config dictionary with the loaded environment variables
test_config = {"host": os.getenv("HOST"), "token": os.getenv("TOKEN")}
workspace_id = os.getenv("WORKSPACE_ID")

questions = ["What is number of order line id ?"]
sdk = GoodDataSdk.create(host_=test_config["host"], token_=test_config["token"])


def test_reset_chat_history():
sdk.compute.ai_chat_history_reset(workspace_id)


@pytest.mark.parametrize("question", questions)
def test_ask_ai(question):
chat_ai_res = sdk.compute.ai_chat(workspace_id, question=question)
pprint(chat_ai_res.to_dict())
assert chat_ai_res["created_visualizations"] is not None, "Created visualizations should not be None"
assert chat_ai_res["routing"] is not None, "Routing should not be None"


def test_ai_chat_history():
chat_ai_res = sdk.compute.ai_chat(workspace_id, question="show me a headline generating net sales and net order")
chat_ai_res.to_dict()
chat_history_interaction_id = chat_ai_res["chat_history_interaction_id"]
pprint(chat_history_interaction_id)
chat_history_res = sdk.compute.ai_chat_history(workspace_id, chat_history_interaction_id)
sdk.compute.ai_chat_history_user_feedback(workspace_id, chat_history_interaction_id, "POSITIVE")
pprint(chat_history_res.to_dict())


def test_get_chat_history():
chat_history_res = sdk.compute.ai_chat_history(workspace_id)
pprint(chat_history_res.to_dict())
assert chat_history_res["interactions"] is not None, "Interactions should not be None"
assert (
chat_history_res["interactions"][0]["question"] == "What is number of order line id ?"
), "First interaction question should match"


if __name__ == "__main__":
pytest.main()
97 changes: 97 additions & 0 deletions integration_tests/scenarios/chatHistory.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,97 @@
# (C) 2024 GoodData Corporation

import os
from pathlib import Path
from pprint import pprint

import gooddata_api_client
import pytest
from dotenv import load_dotenv
from gooddata_api_client.api import smart_functions_api
from gooddata_api_client.model.chat_history_request import ChatHistoryRequest
from gooddata_api_client.model.chat_request import ChatRequest

from integration_tests.scenarios.utils import compare_and_print_diff, load_json, normalize_metrics

_current_dir = Path(__file__).parent.absolute()
parent_dir = _current_dir.parent
expected_object_dir = parent_dir / "expected"
questions_list_dir = parent_dir / "fixtures" / "ai_questions.json"

# Load environment variables from the .env file
load_dotenv()


@pytest.fixture(scope="module")
def test_config():
return {
"host": os.getenv("HOST"),
"token": os.getenv("TOKEN"),
"workspace_id": os.getenv("WORKSPACE_ID"),
"llm_token": os.getenv("LLM_TOKEN"),
}


@pytest.fixture(scope="module")
def api_client(test_config):
configuration = gooddata_api_client.Configuration(host=test_config["host"])
api_client = gooddata_api_client.ApiClient(configuration)
api_client.default_headers["Authorization"] = f"Bearer {test_config['token']}"
return api_client


def validate_response(actual_response, expected_response):
actual_metrics = normalize_metrics(
actual_response["created_visualizations"]["objects"][0]["metrics"], exclude_keys=["title"]
)
expected_metrics = normalize_metrics(expected_response["metrics"], exclude_keys=["title"])
compare_and_print_diff(actual_metrics, expected_metrics, "Metrics")
actual_visualization_type = actual_response["created_visualizations"]["objects"][0]["visualization_type"]
expected_visualization_type = expected_response["visualizationType"]
compare_and_print_diff(actual_visualization_type, expected_visualization_type, "Visualization type")
actual_dimensionality = actual_response["created_visualizations"]["objects"][0]["dimensionality"]
expected_dimensionality = expected_response["dimensionality"]
compare_and_print_diff(actual_dimensionality, expected_dimensionality, "Dimensionality")
actual_filters = actual_response["created_visualizations"]["objects"][0]["filters"]
expected_filters = expected_response["filters"]
compare_and_print_diff(actual_filters, expected_filters, "Filters")


def test_ai_chat_history_reset(api_client, test_config):
api_instance = smart_functions_api.SmartFunctionsApi(api_client)
chat_history_request = ChatHistoryRequest(reset=True)
try:
api_response = api_instance.ai_chat_history(test_config["workspace_id"], chat_history_request)
pprint(api_response)
except gooddata_api_client.ApiException as e:
pytest.fail(f"API exception: {e}")
except Exception as e:
pytest.fail(f"Unexpected error: {e}")


questions_list = load_json(questions_list_dir)


@pytest.mark.parametrize(
"question, expected_file",
[(item["question"], item["expected_objects_file"]) for item in questions_list],
ids=[item["question"] for item in questions_list],
)
def test_ai_chat(api_client, test_config, question, expected_file):
expected_objects = load_json(os.path.join(expected_object_dir, expected_file))
api_instance = smart_functions_api.SmartFunctionsApi(api_client)
try:
api_response = api_instance.ai_chat(test_config["workspace_id"], ChatRequest(question=question))
print("\napi_response", api_response.created_visualizations.objects[0])
print("\nexpected_file", expected_objects)

validate_response(api_response.to_dict(), expected_objects)

except gooddata_api_client.ApiException as e:
pytest.fail(f"API exception: {e}")
except Exception as e:
pytest.fail(f"Unexpected error: {e}")


if __name__ == "__main__":
pytest.main(["-s", __file__])
Loading

0 comments on commit a33b1a0

Please sign in to comment.