Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Qa 7229 Request for performance testing on the new module badges feature for BHA #377

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions LocustScripts/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -3,3 +3,4 @@ lxml>5.0.0
PyYAML>6.0.0
pydantic>2.0.0
pytest>=8.1.1
coloredlogs
146 changes: 146 additions & 0 deletions LocustScripts/update-scripts/commcarehq-badge-test.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,146 @@
import logging

import time
from datetime import datetime

from locust import SequentialTaskSet, between, task, tag, events
from locust.exception import InterruptTaskSet

from user.models import UserDetails, BaseLoginCommCareUser
from common.args import file_path
from common.utils import RandomItems, load_json_data
import coloredlogs

logger = logging.getLogger(__name__)
level_styles = {
'critical': {'color': 'red', 'bold': True},
'error': {'color': 'red'},
'warning': {'color': 'yellow'},
'debug': {'color': 'green', 'bold': True},
'notset': {'color': 'cyan'},
'info': {'color': 'white'}
}
coloredlogs.install(logger=logger, level='DEBUG', level_styles=level_styles) # install a handler on the root logger with level debug

@events.init_command_line_parser.add_listener
def _(parser):
# Use below command to execute these tests:
# locust -f .\LocustScripts\update-scripts\commcarehq-badge-test.py --domain co-carecoordination-perf --build-id 36f4769e96a5a95048857850a17fa99f --app-id f22041c733f14f9b89723a9358a92a35 --app-config .\LocustScripts\update-scripts\project-config\co-carecoordination-perf\app_config_badge_test.json --user-details .\LocustScripts\update-scripts\project-config\co-carecoordination-perf\mobile_worker_credentials_badge.json

parser.add_argument("--domain", help="CommCare domain", required=True, env_var="COMMCARE_DOMAIN")
parser.add_argument("--build-id", help="CommCare build id", required=True, env_var="COMMCARE_APP_ID")
parser.add_argument("--app-id", help="CommCare app id", required=True, env_var="COMMCARE_APP_ID")
parser.add_argument("--app-config", help="Configuration of CommCare app", required=True)
parser.add_argument("--user-details", help="Path to user details file", required=True)


APP_CONFIG = {}
USERS_DETAILS = RandomItems()


@events.init.add_listener
def _(environment, **kw):
try:
app_config_path = file_path(environment.parsed_options.app_config)
APP_CONFIG.update(load_json_data(app_config_path))
logger.info("Loaded app config")
except Exception as e:
logger.error("Error loading app config: %s", e)
raise InterruptTaskSet from e
try:
user_path = file_path(environment.parsed_options.user_details)
user_data = load_json_data(user_path)["user"]
USERS_DETAILS.set([UserDetails(**user) for user in user_data])
logger.info("Loaded %s users", len(USERS_DETAILS.items))
except Exception as e:
logger.error("Error loading users: %s", e)
raise InterruptTaskSet from e


class WorkloadModelSteps(SequentialTaskSet):
wait_time = between(3, 7)

def on_start(self):
self.FUNC_HOME_SCREEN = APP_CONFIG['FUNC_HOME_SCREEN']
self.FUNC_COUNT = APP_CONFIG['FUNC_COUNT']
self.FUNC_CLINIC_COUNT = APP_CONFIG['FUNC_CLINIC_COUNT']
self.FUNC_CLINIC_COUNT_FORM_SUBMIT = APP_CONFIG['FUNC_CLINIC_COUNT_FORM_SUBMIT']

@tag('home_screen')
@task
def home_screen(self):
self.user.hq_user.navigate_start(expected_title=self.FUNC_HOME_SCREEN['title'])
logger.info("Open Home Screen for mobile worker " + self.user.user_detail.username)

@tag('count_menu')
@task
def count_menu(self):
self.user.hq_user.navigate(
"Open 'Counts' Menu",
data={"selections": [self.FUNC_COUNT['selections']]},
expected_title=self.FUNC_COUNT['title']
)
logger.info("Open 'Counts' Menu for mobile worker " + self.user.user_detail.username)

@tag('clinic_count_menu')
@task
def clinic_count_menu(self):
data = self.user.hq_user.navigate(
"Open 'Clinic Counts' Menu",
data={"selections": self.FUNC_CLINIC_COUNT['selections']},
expected_title=self.FUNC_CLINIC_COUNT['title']
)
self.session_id = data['session_id']
return self.session_id
logger.info("Open 'Clinic Counts' Menu with session id: " + str(self.session_id ) + " for mobile worker "+ self.user.user_detail.username )

@tag('submit_clinic_count_form')
@task
def submit_clinic_count_form(self):
extra_json = {
"answers": {0: "OK"},
"prevalidated": True,
"debuggerEnabled": True,
"session_id": self.session_id,
}
self.user.hq_user.submit_all(
"Submit Clinic Count Form",
extra_json,
expected_response_message=self.FUNC_CLINIC_COUNT_FORM_SUBMIT['submitResponseMessage']
)
logger.info("Clinic Count Form submitted successfully - user:" + self.user.user_detail.username + " and session id: " + str( self.session_id ) + " ; request: submit_all" )

@tag('count_menu_again')
@task
def count_menu_again(self):
start_time = datetime.now()
self.user.hq_user.navigate(
"Open 'Counts' Menu After Form Submission",
data={"selections": [self.FUNC_COUNT['selections']]},
expected_title=self.FUNC_COUNT['title'],
commands_list=self.FUNC_COUNT['commands']
)
end_time = datetime.now()
total_time = (end_time - start_time).total_seconds()
if total_time <= 3:
logger.debug("Open 'Counts' Menu load time for mobile worker " + self.user.user_detail.username +
" is : " + str(total_time) + " seconds"
)
else:
logger.warning("Open 'Counts' Menu load time for mobile worker " + self.user.user_detail.username +
" is : " + str(total_time) + " seconds"
)


class LoginCommCareHQWithUniqueUsers(BaseLoginCommCareUser):
tasks = [WorkloadModelSteps]
wait_time = between(3, 7)

def on_start(self):
super().on_start(
domain=self.environment.parsed_options.domain,
host=self.environment.parsed_options.host,
user_details=USERS_DETAILS,
build_id=self.environment.parsed_options.build_id,
app_id=self.environment.parsed_options.app_id
)
61 changes: 37 additions & 24 deletions LocustScripts/update-scripts/formplayer.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,10 @@
import logging
from dataclasses import dataclass, field
from typing import List, Dict, Optional
from typing import List, Dict, Optional, Union
import coloredlogs

logger = logging.getLogger(__name__)
coloredlogs.install(isatty=True, logger=logger, level='DEBUG')

def post(command, client, app_details, user_details, extra_json=None, name=None, validation=None):
formplayer_host = "/formplayer"
Expand All @@ -26,47 +29,57 @@ def post(command, client, app_details, user_details, extra_json=None, name=None,
catch_response=True
) as response:
if command == 'submit-all':
logging.info(f"{formplayer_host}/{command}/")
logging.info("json submitted: "+ str(data))
logging.info("response"+str(response.json()))
logger.info(f"{formplayer_host}/{command}/")
# logger.info("json submitted: "+ str(data))
# logger.info("response"+str(response.json()))
if validation:
validate_response(response, validation)
return response.json()


@dataclass
class ValidationCriteria:
key_value_pairs: Optional[Dict[str, Optional[str]]] = field(default_factory=dict)
key_value_pairs: Optional[Union[Dict[str, Optional[str]], List[Dict[str, Optional[str]]]]] = field(default_factory=dict)
length_check: Optional[Dict[str, int]] = field(default_factory=dict)


def validate_response(response, validation: ValidationCriteria):
data = response.json()
for checkKey, checkValue in validation.key_value_pairs.items():
checkLen = validation.length_check.get(checkKey, None)
if "notification" in data and data["notification"]:
if data["notification"]["type"] == "error":
msg = "ERROR::-" + data["notification"]["message"]
if "commands" in checkKey:
if isinstance(checkValue, dict):
data_command = data["commands"]
for dicts in data_command:
all(item in checkValue for item in dicts)
else:
msg = "ERROR::- mismatch in values" + checkValue + " and " + data["commands"]
response.failure(msg)
raise FormplayerResponseError("ERROR::-" + data["notification"]["message"])
if "exception" in data:
msg = "ERROR::exception error--" + data['exception']
response.failure(msg)
raise FormplayerResponseError(msg)
elif checkKey and checkKey not in data:
msg = "error::" + checkKey + " not in data"
response.failure(msg)
raise FormplayerResponseError(msg)
elif checkKey and checkLen:
if len(data[checkKey]) != checkLen:
msg = "ERROR::len(data['" + checkKey + "']) != " + checkLen
raise FormplayerResponseError("ERROR::- mismatch in values" + checkValue + " and " + data["commands"])
else:
checkLen = validation.length_check.get(checkKey, None)
if "notification" in data and data["notification"]:
if data["notification"]["type"] == "error":
msg = "ERROR::-" + data["notification"]["message"]
response.failure(msg)
raise FormplayerResponseError("ERROR::-" + data["notification"]["message"])
if "exception" in data:
msg = "ERROR::exception error--" + data['exception']
response.failure(msg)
raise FormplayerResponseError(msg)
elif checkKey and checkValue:
if checkValue not in data[checkKey]:
msg = "ERROR::data['" + checkKey + "'], " + data[checkKey] + " does not have " + checkValue
elif checkKey and checkKey not in data:
msg = "error::" + checkKey + " not in data"
response.failure(msg)
raise FormplayerResponseError(msg)
elif checkKey and checkLen:
if len(data[checkKey]) != checkLen:
msg = "ERROR::len(data['" + checkKey + "']) != " + checkLen
response.failure(msg)
raise FormplayerResponseError(msg)
elif checkKey and checkValue:
if checkValue not in data[checkKey]:
msg = "ERROR::data['" + checkKey + "'], " + data[checkKey] + " does not have " + checkValue
response.failure(msg)
raise FormplayerResponseError(msg)


class FormplayerResponseError(Exception):
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
{
"FUNC_HOME_SCREEN": {
"title":"Badge test"
},
"FUNC_COUNT": {
"selections":"0", "title":"counts",
"commands": [
{
"index": 0,
"displayText": "clinic count",
"navigationState": "JUMP",
"badgeText": "1850"
},
{
"index": 1,
"displayText": "referal count",
"navigationState": "JUMP",
"badgeText": "132965"
},
{
"index": 2,
"displayText": "referal count one clinic",
"navigationState": "JUMP",
"badgeText": "2"
},
{
"index": 3,
"displayText": "referal count multiple clinics",
"navigationState": "JUMP",
"badgeText": "1035"
},
{
"index": 4,
"displayText": "referal count multiple clinics and open time",
"navigationState": "JUMP",
"badgeText": "28"
},
{
"index": 5,
"displayText": "user clinic count",
"navigationState": "JUMP",
"badgeText": "2"
}
]
},
"FUNC_CLINIC_COUNT": {
"selections":["0","0"], "title":"clinic count"
},
"FUNC_CLINIC_COUNT_FORM_SUBMIT": {
"submitResponseMessage":"'clinic count' successfully saved!"
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
host: https://www.commcarehq.org
domain: co-carecoordination-perf
app_id: f22041c733f14f9b89723a9358a92a35
build_id: 36f4769e96a5a95048857850a17fa99f
domain_user_credential: LocustScripts/update-scripts/project-config/co-carecoordination-perf/mobile_worker_credentials_badge.json
owner_id: 874a15d630924c63bd364d300fc14059
app_config_bed_tracking_tool: LocustScripts/update-scripts/project-config/co-carecoordination-perf/app_config_badge_test.json
Loading
Loading