diff --git a/spec/plans/discover.fmf b/spec/plans/discover.fmf index 5ce770fcd1..18a7bb8655 100644 --- a/spec/plans/discover.fmf +++ b/spec/plans/discover.fmf @@ -30,6 +30,7 @@ description: | result: respect tag: [tag] tier: 1 + serialnumber: 1 - name: /test/two summary: Short test summary. diff --git a/tests/discover/main.fmf b/tests/discover/main.fmf index 9c6aaa869b..8ebc12fa2a 100644 --- a/tests/discover/main.fmf +++ b/tests/discover/main.fmf @@ -56,3 +56,7 @@ /force: summary: Force run of discover finds tests test: ./force.sh + +/serial-number: + summary: Test test serial number assignment + test: ./serial-number.sh diff --git a/tests/discover/order.sh b/tests/discover/order.sh index 33eabb47a6..5faf98f885 100755 --- a/tests/discover/order.sh +++ b/tests/discover/order.sh @@ -6,14 +6,14 @@ function assert_execution_order(){ local input_file="$tmp/run/plans${plan}/data/execution_order" sed -n 's;^.*execute/data\(.\+\)/data;\1;p' "$input_file" > $tmp/outcome - rlRun "diff -pu $tmp/outcome $tmp/EXPECTED" + rlRun "diff -pu $tmp/outcome $tmp/EXPECTED-EXECUTION" 0 "Verify execution order" } function assert_discover_order(){ local input_file="$rlRun_LOG" # Discovered tests are printed with leading spaces sed -n 's;^ \+\(/.\+\);\1;p' "$input_file" > $tmp/outcome - rlRun "diff -pu $tmp/outcome $tmp/EXPECTED" + rlRun "diff -pu $tmp/outcome $tmp/EXPECTED-DISCOVERY" 0 "Verify discovery order" } function run_test(){ @@ -43,55 +43,83 @@ rlJournalStart ### New test begins plan="/single-without-order-tag" - cat > $tmp/EXPECTED < $tmp/EXPECTED-DISCOVERY < $tmp/EXPECTED-EXECUTION < $tmp/EXPECTED < $tmp/EXPECTED-DISCOVERY < $tmp/EXPECTED-EXECUTION < $tmp/EXPECTED < $tmp/EXPECTED-DISCOVERY < $tmp/EXPECTED-EXECUTION < $tmp/EXPECTED < $tmp/EXPECTED-DISCOVERY < $tmp/EXPECTED-EXECUTION < $tmp/EXPECTED < $tmp/EXPECTED-DISCOVERY < $tmp/EXPECTED-EXECUTION < $tmp/EXPECTED < $tmp/EXPECTED-DISCOVERY < $tmp/EXPECTED-EXECUTION < $tmp/EXPECTED < $tmp/EXPECTED-DISCOVERY < $tmp/EXPECTED-EXECUTION <&1 >/dev/null" 2 "Test does not provide 'results.yaml' file" - rlAssertGrep "custom results file '/tmp/.*/plans/default/execute/data/test/missing-custom-results/data/results.yaml' not found" $rlRun_LOG + rlAssertGrep "custom results file '/tmp/.*/plans/default/execute/data/test/missing-custom-results-1/data/results.yaml' not found" $rlRun_LOG rlPhaseEnd testName="/test/empty-custom-results-file" diff --git a/tests/execute/upgrade/ignore-test.sh b/tests/execute/upgrade/ignore-test.sh index c256af7898..bcae98c3d8 100755 --- a/tests/execute/upgrade/ignore-test.sh +++ b/tests/execute/upgrade/ignore-test.sh @@ -20,10 +20,10 @@ rlJournalStart rlAssertGrep "3 tests passed" $rlRun_LOG # Check that the IN_PLACE_UPGRADE variable was set data="$run/plan/no-path/execute/data" - rlAssertGrep "IN_PLACE_UPGRADE=old" "$data/old/test/output.txt" - rlAssertGrep "IN_PLACE_UPGRADE=new" "$data/new/test/output.txt" + rlAssertGrep "IN_PLACE_UPGRADE=old" "$data/old/test-1/output.txt" + rlAssertGrep "IN_PLACE_UPGRADE=new" "$data/new/test-1/output.txt" # No upgrade path -> no environment variable - rlAssertNotGrep "VERSION_ID=$PREVIOUS_VERSION" "$data/upgrade/tasks/prepare/output.txt" + rlAssertNotGrep "VERSION_ID=$PREVIOUS_VERSION" "$data/upgrade/tasks/prepare-0/output.txt" rlPhaseEnd rlPhaseStartCleanup diff --git a/tests/execute/upgrade/override.sh b/tests/execute/upgrade/override.sh index c21f3e40d9..40df5cee15 100755 --- a/tests/execute/upgrade/override.sh +++ b/tests/execute/upgrade/override.sh @@ -23,10 +23,10 @@ rlJournalStart rlAssertGrep "3 tests passed" $rlRun_LOG # Check that the IN_PLACE_UPGRADE variable was set data="$run/plan/path/execute/data" - rlAssertGrep "IN_PLACE_UPGRADE=old" "$data/old/test/output.txt" - rlAssertGrep "IN_PLACE_UPGRADE=new" "$data/new/test/output.txt" + rlAssertGrep "IN_PLACE_UPGRADE=old" "$data/old/test-1/output.txt" + rlAssertGrep "IN_PLACE_UPGRADE=new" "$data/new/test-1/output.txt" # Environment of plan was passed - rlAssertGrep "VERSION_ID=$PREVIOUS_VERSION" "$data/upgrade/tasks/prepare/output.txt" + rlAssertGrep "VERSION_ID=$PREVIOUS_VERSION" "$data/upgrade/tasks/prepare-0/output.txt" rlPhaseEnd done diff --git a/tests/execute/upgrade/simple.sh b/tests/execute/upgrade/simple.sh index b27ea014d2..b98695c78f 100755 --- a/tests/execute/upgrade/simple.sh +++ b/tests/execute/upgrade/simple.sh @@ -20,10 +20,10 @@ rlJournalStart rlAssertGrep "3 tests passed" $rlRun_LOG # Check that the IN_PLACE_UPGRADE variable was set data="$run/plan/no-path/execute/data" - rlAssertGrep "IN_PLACE_UPGRADE=old" "$data/old/test/output.txt" - rlAssertGrep "IN_PLACE_UPGRADE=new" "$data/new/test/output.txt" + rlAssertGrep "IN_PLACE_UPGRADE=old" "$data/old/test-1/output.txt" + rlAssertGrep "IN_PLACE_UPGRADE=new" "$data/new/test-1/output.txt" # No upgrade path -> no environment variable - rlAssertNotGrep "VERSION_ID=$PREVIOUS_VERSION" "$data/upgrade/tasks/prepare/output.txt" + rlAssertNotGrep "VERSION_ID=$PREVIOUS_VERSION" "$data/upgrade/tasks/prepare-0/output.txt" rlPhaseEnd rlPhaseStartCleanup diff --git a/tests/finish/prune/test.sh b/tests/finish/prune/test.sh index f7b6d3672e..277b0918e3 100755 --- a/tests/finish/prune/test.sh +++ b/tests/finish/prune/test.sh @@ -14,7 +14,7 @@ rlJournalStart rlAssertNotExists $tmp1/plan/discover/default-0 rlAssertNotExists $tmp1/plan/discover/default-1 rlAssertExists $tmp1/plan/data/out-plan.txt - rlAssertExists $tmp1/plan/execute/data/default-2/write/test-data/data/out-test.txt + rlAssertExists $tmp1/plan/execute/data/default-2/write/test-data-3/data/out-test.txt for step in discover execute finish prepare provision report; do rlAssertExists $tmp1/plan/$step/step.yaml done @@ -37,7 +37,7 @@ rlJournalStart rlAssertExists $tmp2/plan/discover/default-0 rlAssertExists $tmp2/plan/discover/default-1 rlAssertExists $tmp2/plan/data/out-plan.txt - rlAssertExists $tmp2/plan/execute/data/default-2/write/test-data/data/out-test.txt + rlAssertExists $tmp2/plan/execute/data/default-2/write/test-data-3/data/out-test.txt for step in discover execute finish prepare provision report; do rlAssertExists $tmp2/plan/$step/step.yaml done diff --git a/tests/pull/results/test.sh b/tests/pull/results/test.sh index c3babf9aaa..1132c412d7 100755 --- a/tests/pull/results/test.sh +++ b/tests/pull/results/test.sh @@ -15,10 +15,10 @@ rlJournalStart # Check output and extra logs in the test data directory data="$run/plan/execute/data" - rlAssertGrep "ok" "$data/test/good/output.txt" - rlAssertGrep "ko" "$data/test/bad/output.txt" - rlAssertGrep "extra good" "$data/test/good/data/extra.log" - rlAssertGrep "extra bad" "$data/test/bad/data/extra.log" + rlAssertGrep "ok" "$data/test/good-3/output.txt" + rlAssertGrep "ko" "$data/test/bad-1/output.txt" + rlAssertGrep "extra good" "$data/test/good-3/data/extra.log" + rlAssertGrep "extra bad" "$data/test/bad-1/data/extra.log" # Check logs in the plan data directory rlAssertGrep "common good" "$run/plan/data/log.txt" @@ -31,14 +31,14 @@ rlJournalStart # Check beakerlib's backup directory pull if [[ "$method" =~ local|container ]]; then # No pull happened so it shoud be present - rlAssertExists "$data/test/beakerlib/backup" - rlAssertExists "$data/test/beakerlib/backup-NS1" - rlAssertNotEquals "any backup dir is present" "$(eval 'echo $data/test/beakerlib/backup*')" "$data/test/beakerlib/backup*" + rlAssertExists "$data/test/beakerlib-2/backup" + rlAssertExists "$data/test/beakerlib-2/backup-NS1" + rlAssertNotEquals "any backup dir is present" "$(eval 'echo $data/test/beakerlib-2/backup*')" "$data/test/beakerlib-2/backup*" else # Should be ignored - rlAssertNotExists "$data/test/beakerlib/backup" - rlAssertNotExists "$data/test/beakerlib/backup-NS1" - rlAssertEquals "no backup dir is present" "$(eval 'echo $data/test/beakerlib/backup*')" "$data/test/beakerlib/backup*" + rlAssertNotExists "$data/test/beakerlib-2/backup" + rlAssertNotExists "$data/test/beakerlib-2/backup-NS1" + rlAssertEquals "no backup dir is present" "$(eval 'echo $data/test/beakerlib-2/backup*')" "$data/test/beakerlib-2/backup*" fi rlPhaseEnd done diff --git a/tests/unit/test_report_junit.py b/tests/unit/test_report_junit.py index 3179691239..9e22dfeba7 100644 --- a/tests/unit/test_report_junit.py +++ b/tests/unit/test_report_junit.py @@ -128,7 +128,7 @@ def assert_xml(actual_filepath, expected): class TestStateMapping: def test_pass(self, report_fix): report, results, out_file_path = report_fix - results.append(Result(result=ResultOutcome.PASS, name="/pass")) + results.append(Result(result=ResultOutcome.PASS, name="/pass", serialnumber=1)) report.go() @@ -142,7 +142,7 @@ def test_pass(self, report_fix): def test_info(self, report_fix): report, results, out_file_path = report_fix - results.append(Result(result=ResultOutcome.INFO, name="/info")) + results.append(Result(result=ResultOutcome.INFO, name="/info", serialnumber=1)) report.go() assert_xml(out_file_path, """ @@ -157,7 +157,7 @@ def test_info(self, report_fix): def test_warn(self, report_fix): report, results, out_file_path = report_fix - results.append(Result(result=ResultOutcome.WARN, name="/warn")) + results.append(Result(result=ResultOutcome.WARN, name="/warn", serialnumber=1)) report.go() assert_xml(out_file_path, """ @@ -172,7 +172,7 @@ def test_warn(self, report_fix): def test_error(self, report_fix): report, results, out_file_path = report_fix - results.append(Result(result=ResultOutcome.ERROR, name="/error")) + results.append(Result(result=ResultOutcome.ERROR, name="/error", serialnumber=1)) report.go() assert_xml(out_file_path, """ @@ -187,7 +187,7 @@ def test_error(self, report_fix): def test_fail(self, report_fix): report, results, out_file_path = report_fix - results.append(Result(result=ResultOutcome.FAIL, name="/fail")) + results.append(Result(result=ResultOutcome.FAIL, name="/fail", serialnumber=1)) report.go() assert_xml(out_file_path, """ diff --git a/tmt/base.py b/tmt/base.py index b3c02c1178..78144937ba 100644 --- a/tmt/base.py +++ b/tmt/base.py @@ -3,14 +3,15 @@ import copy import dataclasses import enum +import itertools import os import re import shutil import sys import time from typing import (TYPE_CHECKING, Any, Callable, ClassVar, Dict, Generator, - Iterable, List, Optional, Sequence, Tuple, TypeVar, Union, - cast) + Iterable, Iterator, List, Optional, Sequence, Tuple, + TypeVar, Union, cast) import fmf import fmf.base @@ -702,6 +703,8 @@ class Test(Core, tmt.export.Exportable['Test']): duration: str = DEFAULT_TEST_DURATION_L1 result: str = 'respect' + serialnumber: int = 0 + returncode: Optional[int] = None real_duration: Optional[str] = None _reboot_count: int = 0 @@ -1177,6 +1180,22 @@ def _expand_node_data(self, data: T, fmf_context: Dict[str, str]) -> T: data[i] = self._expand_node_data(item, fmf_context) return data + # TODO: better, more elaborate ways of assigning serial numbers to tests + # can be devised - starting with a really trivial one: each test gets + # one, starting with `1`. + # + # For now, the test itself is not important, and it's part of the method + # signature to leave the door open for more sophisticated methods that + # might depend on the actual test properties. Our simple "increment by 1" + # method does not need it. + _test_serial_number_generator: Optional[Iterator[int]] = None + + def draw_test_serial_number(self, test: Test) -> int: + if self._test_serial_number_generator is None: + self._test_serial_number_generator = itertools.count(start=1, step=1) + + return next(self._test_serial_number_generator) + @property def environment(self) -> EnvironmentType: """ Return combined environment from plan data and command line """ diff --git a/tmt/result.py b/tmt/result.py index af75c64961..2875072fc2 100644 --- a/tmt/result.py +++ b/tmt/result.py @@ -76,6 +76,7 @@ class Result(tmt.utils.SerializableContainer): """ Describes what tmt knows about a single test result """ name: str + serialnumber: int = 0 result: ResultOutcome = field( default=ResultOutcome.PASS, serialize=lambda result: result.value, @@ -141,6 +142,7 @@ def from_test( _result = Result( name=test.name, + serialnumber=test.serialnumber, result=result, note=note, duration=duration, diff --git a/tmt/steps/discover/__init__.py b/tmt/steps/discover/__init__.py index 00bf37bc8a..382c491563 100644 --- a/tmt/steps/discover/__init__.py +++ b/tmt/steps/discover/__init__.py @@ -279,6 +279,9 @@ def go(self) -> None: else: raise GeneralError(f'Unexpected phase in discover step: {phase}') + for test in self._tests: + test.serialnumber = self.plan.draw_test_serial_number(test) + # Show fmf identifiers for tests discovered in plan # TODO: This part should go into the 'fmf.py' module if self.opt('fmf_id'): diff --git a/tmt/steps/execute/__init__.py b/tmt/steps/execute/__init__.py index 90d07fc1c9..2ff510d606 100644 --- a/tmt/steps/execute/__init__.py +++ b/tmt/steps/execute/__init__.py @@ -200,8 +200,10 @@ def data_path( filename not provided) or to the given data file otherwise. """ # Prepare directory path, create if requested - assert self.step.workdir is not None - directory = self.step.workdir / TEST_DATA / test.name.lstrip('/') + assert self.step.workdir is not None # narrow type + directory = self.step.workdir \ + / TEST_DATA \ + / f'{test.name.lstrip("/") or "default"}-{test.serialnumber}' if create and not directory.is_dir(): directory.joinpath(TEST_DATA).mkdir(parents=True) if not filename: @@ -393,15 +395,33 @@ def check_custom_results(self, test: "tmt.Test") -> List["tmt.Result"]: custom_results = [] for partial_result_data in results: partial_result = tmt.Result.from_serialized(partial_result_data) + # Name '/' means the test itself if partial_result.name == '/': partial_result.name = test.name else: partial_result.name = test.name + partial_result.name + # Fix log paths as user provides relative path to TMT_TEST_DATA # but Result has to point relative to the execute workdir log_path_base = self.data_path(test, full=False, filename=tmt.steps.execute.TEST_DATA) partial_result.log = [log_path_base / log for log in partial_result.log] + + # TODO: this might need more care: the test has been assigned a serial + # number, which is now part of its data directory path. Now, the test + # produced custom results, with possibly many, many results. What + # is the serial number of a test they belong to? + # + # A naive implementation assigns them the serial number of the test + # that spawned them, but it can be argued the test may effectively + # create results for virtual tests, would they deserve their own + # serial numbers? On the hand, there's no risk of on-disk collision + # as these tests do not really exist, they do not have their own + # data directories, they are all confined into its parent test's + # directory. And the serial number correspondence in results.yaml + # can be useful, for grouping results that belong to the same tests. + partial_result.serialnumber = test.serialnumber + custom_results.append(partial_result) return custom_results