Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

DAOS-16217 test: Update run_local(). #14748

Merged
merged 18 commits into from
Jul 30, 2024
Merged
Show file tree
Hide file tree
Changes from 10 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
84 changes: 69 additions & 15 deletions src/tests/ftest/harness/unit.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,8 @@
:avocado: recursive
"""

def _verify_command_result(self, result, passed, expected, timeout, homogeneous, passed_hosts,

Check warning on line 19 in src/tests/ftest/harness/unit.py

View workflow job for this annotation

GitHub Actions / Pylint check

too-many-arguments, Too many arguments (12/10)
failed_hosts, all_stdout, all_stderr):
failed_hosts, all_stdout, all_stderr, join_stdout, join_stderr):
"""Verify a CommandResult object.

Args:
Expand All @@ -30,6 +30,8 @@
failed_hosts (NodeSet): expected set of hosts on which the command failed
all_stdout (dict): expected stdout str per host key
all_stderr (dict): expected stderr str per host key
join_stdout (str): expected all stdout joined into one string
join_stderr (str): expected all stderr joined into one string
"""
self.assertEqual(passed, result.passed, 'Incorrect CommandResult.passed')
self.assertEqual(len(expected), len(result.output), 'Incorrect CommandResult.output count')
Expand All @@ -45,6 +47,8 @@
self.assertEqual(failed_hosts, result.failed_hosts, 'Incorrect CommandResult.failed_hosts')
self.assertEqual(all_stdout, result.all_stdout, 'Incorrect CommandResult.all_stdout')
self.assertEqual(all_stderr, result.all_stderr, 'Incorrect CommandResult.all_stderr')
self.assertEqual(join_stdout, result.joined_stdout, 'Incorrect CommandResult.joined_stdout')
self.assertEqual(join_stderr, result.joined_stderr, 'Incorrect CommandResult.joined_stderr')

def test_harness_unit_list_unique(self):
"""Verify list_unique().
Expand Down Expand Up @@ -249,7 +253,35 @@
passed_hosts=host,
failed_hosts=NodeSet(),
all_stdout={str(host): 'GNU/Linux'},
all_stderr={str(host): ''}
all_stderr={str(host): ''},
join_stdout='GNU/Linux',
join_stderr='',
)
self.log_step('Unit Test Passed')

def test_harness_unit_run_local_separated(self):
"""Verify run_local() with separate stdout and stderr.

:avocado: tags=all
:avocado: tags=vm
:avocado: tags=harness,run_utils
:avocado: tags=HarnessUnitTest,test_harness_unit_run_local_separated
"""
host = get_local_host()
command = 'echo stdout; echo stderr 1>&2'
self.log_step('Verify run_local() w/ no stdout')
self._verify_command_result(
result=run_local(self.log, command, stderr=True),
passed=True,
expected=[ResultData(command, 0, host, ['stdout'], ['stderr'], False)],
timeout=False,
homogeneous=True,
passed_hosts=host,
failed_hosts=NodeSet(),
all_stdout={str(host): 'stdout'},
all_stderr={str(host): 'stderr'},
join_stdout='stdout',
join_stderr='stderr',
)
self.log_step('Unit Test Passed')

Expand All @@ -265,15 +297,17 @@
command = 'echo stderr 1>&2'
self.log_step('Verify run_local() w/ no stdout')
self._verify_command_result(
result=run_local(self.log, command),
result=run_local(self.log, command, stderr=True),
passed=True,
expected=[ResultData(command, 0, host, [], ['stderr'], False)],
timeout=False,
homogeneous=True,
passed_hosts=host,
failed_hosts=NodeSet(),
all_stdout={str(host): ''},
all_stderr={str(host): 'stderr'}
all_stderr={str(host): 'stderr'},
join_stdout='',
join_stderr='stderr',
)
self.log_step('Unit Test Passed')

Expand All @@ -291,13 +325,15 @@
self._verify_command_result(
result=run_local(self.log, command),
passed=False,
expected=[ResultData(command, 0, host, ['fail'], [], False)],
expected=[ResultData(command, 1, host, ['fail'], [], False)],
timeout=False,
homogeneous=True,
passed_hosts=NodeSet(),
failed_hosts=host,
all_stdout={str(host): 'fail'},
all_stderr={str(host): ''}
all_stderr={str(host): ''},
join_stdout='fail',
join_stderr='',
)
self.log_step('Unit Test Passed')

Expand All @@ -321,7 +357,9 @@
passed_hosts=NodeSet(),
failed_hosts=host,
all_stdout={str(host): 'wait'},
all_stderr={str(host): ''}
all_stderr={str(host): ''},
join_stdout='wait',
join_stderr='',
)
self.log_step('Unit Test Passed')

Expand All @@ -345,7 +383,9 @@
passed_hosts=NodeSet(hosts[0]),
failed_hosts=NodeSet(),
all_stdout={hosts[0]: 'GNU/Linux'},
all_stderr={hosts[0]: ''}
all_stderr={hosts[0]: ''},
join_stdout='GNU/Linux',
join_stderr='',
)
self.log_step('Unit Test Passed')

Expand All @@ -369,7 +409,9 @@
passed_hosts=hosts,
failed_hosts=NodeSet(),
all_stdout={str(hosts): 'GNU/Linux'},
all_stderr={str(hosts): ''}
all_stderr={str(hosts): ''},
join_stdout='\n'.join(['GNU/Linux'] * len(hosts)),
join_stderr='',
)
self.log_step('Unit Test Passed')

Expand Down Expand Up @@ -403,6 +445,8 @@
hosts[0]: '',
hosts[1]: ''
},
join_stdout='\n'.join(hosts),
join_stderr='',
)
self.log_step('Unit Test Passed')

Expand Down Expand Up @@ -435,7 +479,9 @@
all_stderr={
hosts[0]: '',
hosts[1]: ''
}
},
join_stdout='stdout\nstdout\nstderr',
join_stderr='',
)
self.log_step('Unit Test Passed')

Expand Down Expand Up @@ -468,7 +514,9 @@
all_stderr={
hosts[0]: '',
hosts[1]: 'stderr'
}
},
join_stdout='stdout\nstdout',
join_stderr='stderr',
)
self.log_step('Unit Test Passed')

Expand Down Expand Up @@ -501,7 +549,9 @@
all_stderr={
hosts[0]: '',
hosts[1]: 'stderr'
}
},
join_stdout='',
join_stderr='stderr',
)
self.log_step('Unit Test Passed')

Expand Down Expand Up @@ -534,7 +584,9 @@
all_stderr={
hosts[0]: '',
hosts[1]: ''
}
},
join_stdout='pass\nfail',
join_stderr='',
)
self.log_step('Unit Test Passed')

Expand All @@ -554,7 +606,7 @@
passed=False,
expected=[
ResultData(command, 0, NodeSet(hosts[0]), ['pass'], [], False),
ResultData(command, 1, NodeSet(hosts[1]), ['wait'], [], True),
ResultData(command, 124, NodeSet(hosts[1]), ['wait'], [], True),
],
timeout=True,
homogeneous=False,
Expand All @@ -567,6 +619,8 @@
all_stderr={
hosts[0]: '',
hosts[1]: ''
}
},
join_stdout='pass\nwait',
join_stderr='',
)
self.log_step('Unit Test Passed')
4 changes: 1 addition & 3 deletions src/tests/ftest/process_core_files.py
Original file line number Diff line number Diff line change
Expand Up @@ -279,7 +279,7 @@ def install_debuginfo_packages(self):
cmds.append(["sudo", "rm", "-f", path])

if self.USE_DEBUGINFO_INSTALL:
dnf_args = ["--exclude", "ompi-debuginfo"]
dnf_args = ["--nobest", "--exclude", "ompi-debuginfo"]
if os.getenv("TEST_RPMS", 'false') == 'true':
if "suse" in self.distro_info.name.lower():
dnf_args.extend(["libpmemobj1", "python3", "openmpi3"])
Expand Down Expand Up @@ -382,8 +382,6 @@ def resolve_debuginfo(self, pkg):
command = " ".join(
["rpm", "-q", "--qf", "'%{name} %{version} %{release} %{epoch}'", pkg])
result = run_local(self.log, command)
if not result.passed:
raise RunException(f"Error running {command}")
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If this fails, name, version, release, epoch = result.joined_stdout.split() would probably be wrong and this function would still return package_info with weird data

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks, code updated.

name, version, release, epoch = result.joined_stdout.split()

debuginfo_map = {"glibc": "glibc-debuginfo-common"}
Expand Down
4 changes: 1 addition & 3 deletions src/tests/ftest/util/collection_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
# pylint: disable=import-error,no-name-in-module
from util.environment_utils import TestEnvironment
from util.host_utils import get_local_host
from util.run_utils import RunException, find_command, run_local, run_remote, stop_processes

Check failure on line 19 in src/tests/ftest/util/collection_utils.py

View workflow job for this annotation

GitHub Actions / Flake8 check

F401 'util.run_utils.RunException' imported but unused

Check warning on line 19 in src/tests/ftest/util/collection_utils.py

View workflow job for this annotation

GitHub Actions / Pylint check

unused-import, Unused RunException imported from util.run_utils
from util.user_utils import get_chown_command
from util.yaml_utils import get_test_category

Expand Down Expand Up @@ -709,9 +709,7 @@
return 1024

# Remove latest symlink directory to avoid inclusion in the Jenkins build artifacts
try:
run_local(logger, f"rm -fr '{test_logs_lnk}'")
except RunException:
if not run_local(logger, f"rm -fr '{test_logs_lnk}'").passed:
message = f"Error removing {test_logs_lnk}"
test_result.fail_test(logger, "Process", message, sys.exc_info())
return 1024
Expand Down
19 changes: 9 additions & 10 deletions src/tests/ftest/util/launch_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -391,22 +391,22 @@ def execute(self, logger, test, repeat, number, sparse, fail_fast):
start_time = int(time.time())
result = run_local(logger, " ".join(command))
end_time = int(time.time())

return_code = result.output[0].returncode
if not result.passed:
message = f"Error executing {test} on repeat {repeat}"
self.test_result.fail_test(logger, "Execute", message, sys.exc_info())
return_code = 1

if return_code == 0:
logger.debug("All avocado test variants passed")
elif return_code & 2 == 2:
elif return_code & 1 == 1:
logger.debug("At least one avocado test variant failed")
elif return_code & 2 == 2:
logger.debug("At least one avocado job failed")
elif return_code & 4 == 4:
message = "Failed avocado commands detected"
self.test_result.fail_test(logger, "Execute", message)
elif return_code & 8 == 8:
logger.debug("At least one avocado test variant was interrupted")
else:
message = f"Unhandled rc={return_code} while executing {test} on repeat {repeat}"
self.test_result.fail_test(logger, "Execute", message, sys.exc_info())
return_code = 1
if return_code:
self._collect_crash_files(logger)

Expand Down Expand Up @@ -927,9 +927,8 @@ def list_tests(self, logger, verbose):
logger.info("Detecting tests matching tags: %s", " ".join(command))
result = run_local(logger, " ".join(command))
if not result.passed:
raise RunException("Error running avocaod list")
unique_test_files = set(
re.findall(self._avocado.get_list_regex(), result.joined_stdout.splitlines()))
raise RunException("Error running avocado list")
unique_test_files = set(re.findall(self._avocado.get_list_regex(), result.joined_stdout))
for index, test_file in enumerate(unique_test_files):
self.tests.append(TestInfo(test_file, index + 1, self._yaml_extension))
logger.info(" %s", self.tests[-1])
Expand Down
Loading
Loading