Skip to content

Commit

Permalink
Merge branch 'ci/remove_gitlab_api_while_generating_test_pipeline' in…
Browse files Browse the repository at this point in the history
…to 'master'

CI: remove gitlab api call while generating test pipeline

See merge request espressif/esp-idf!34397
  • Loading branch information
hfudev committed Oct 25, 2024
2 parents 27f11f8 + 502749d commit 9106c43
Show file tree
Hide file tree
Showing 3 changed files with 58 additions and 80 deletions.
16 changes: 10 additions & 6 deletions tools/ci/dynamic_pipelines/report.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@

import yaml
from artifacts_handler import ArtifactType
from gitlab import GitlabUpdateError
from gitlab_api import Gitlab
from idf_build_apps import App
from idf_build_apps.constants import BuildStatus
Expand Down Expand Up @@ -254,6 +255,10 @@ def _update_mr_comment(self, comment: str, print_retry_jobs_message: bool) -> No
)
del_retry_job_pic_pattern = re.escape(RETRY_JOB_TITLE) + r'.*?' + re.escape(f'{RETRY_JOB_PICTURE_PATH})')

new_comment = f'{COMMENT_START_MARKER}\n\n{comment}'
if print_retry_jobs_message:
new_comment += retry_job_picture_comment

for note in self.mr.notes.list(iterator=True):
if note.body.startswith(COMMENT_START_MARKER):
updated_str = self._get_updated_comment(note.body, comment)
Expand All @@ -264,14 +269,13 @@ def _update_mr_comment(self, comment: str, print_retry_jobs_message: bool) -> No
updated_str += retry_job_picture_comment

note.body = updated_str
note.save()
try:
note.save()
except GitlabUpdateError:
print('Failed to update MR comment, Creating a new comment')
self.mr.notes.create({'body': new_comment})
break
else:
# Create a new comment if no existing comment is found
new_comment = f'{COMMENT_START_MARKER}\n\n{comment}'
if print_retry_jobs_message:
new_comment += retry_job_picture_comment

self.mr.notes.create({'body': new_comment})

def _get_updated_comment(self, existing_comment: str, new_comment: str) -> str:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@
"""
import argparse
import glob
import logging
import os
import typing as t
from collections import Counter
Expand All @@ -21,16 +20,15 @@
from dynamic_pipelines.constants import DEFAULT_TARGET_TEST_CHILD_PIPELINE_FILEPATH
from dynamic_pipelines.constants import DEFAULT_TARGET_TEST_CHILD_PIPELINE_NAME
from dynamic_pipelines.constants import DEFAULT_TEST_PATHS
from dynamic_pipelines.constants import KNOWN_GENERATE_TEST_CHILD_PIPELINE_WARNINGS_FILEPATH
from dynamic_pipelines.constants import (
KNOWN_GENERATE_TEST_CHILD_PIPELINE_WARNINGS_FILEPATH,
)
from dynamic_pipelines.models import EmptyJob
from dynamic_pipelines.models import Job
from dynamic_pipelines.models import TargetTestJob
from dynamic_pipelines.utils import dump_jobs_to_yaml
from gitlab.v4.objects import Project
from gitlab_api import Gitlab
from idf_build_apps import App
from idf_ci.app import import_apps_from_txt
from idf_ci_utils import IDF_PATH
from idf_pytest.script import get_pytest_cases


Expand All @@ -50,47 +48,38 @@ def get_tags_with_amount(s: str) -> t.List[str]:


def get_target_test_jobs(
project: Project, paths: str, apps: t.List[App]
) -> t.Tuple[t.List[Job], t.List[str], t.Dict[str, t.List[str]]]:
paths: str, apps: t.List[App], exclude_runner_tags: t.Set[str]
) -> t.Tuple[t.List[Job], t.List[str], t.List[str]]:
"""
Return the target test jobs and the extra yaml files to include
"""
issues: t.Dict[str, t.List[str]] = {
'no_env_marker_test_cases': [],
'no_runner_tags': [],
}

if mr_labels := os.getenv('CI_MERGE_REQUEST_LABELS'):
print(f'MR labels: {mr_labels}')

if BUILD_ONLY_LABEL in mr_labels.split(','):
print('MR has build only label, skip generating target test child pipeline')
return [EmptyJob()], [], issues
return [EmptyJob()], [], []

pytest_cases = get_pytest_cases(
paths,
apps=apps,
marker_expr='not host_test', # since it's generating target-test child pipeline
)

no_env_marker_test_cases: t.List[str] = []
res = defaultdict(list)
for case in pytest_cases:
if not case.env_markers:
issues['no_env_marker_test_cases'].append(case.item.nodeid)
no_env_marker_test_cases.append(case.item.nodeid)
continue

res[(case.target_selector, tuple(sorted(case.env_markers)))].append(case)

target_test_jobs: t.List[Job] = []
for (target_selector, env_markers), cases in res.items():
runner_tags = get_tags_with_amount(target_selector) + list(env_markers)
# we don't need to get all runner, as long as we get one runner, it's fine
runner_list = project.runners.list(status='online', tag_list=','.join(runner_tags), get_all=False)
if not runner_list:
issues['no_runner_tags'].append(','.join(runner_tags))
logging.warning(f'No runner found for {",".join(runner_tags)}, required by cases:')
for case in cases:
logging.warning(f' - {case.item.nodeid}')
if ','.join(runner_tags) in exclude_runner_tags:
print('WARNING: excluding test cases with runner tags:', runner_tags)
continue

target_test_job = TargetTestJob(
Expand All @@ -115,63 +104,54 @@ def get_target_test_jobs(
if fast_pipeline_flag:
extra_include_yml = ['tools/ci/dynamic_pipelines/templates/fast_pipeline.yml']

issues['no_env_marker_test_cases'] = sorted(issues['no_env_marker_test_cases'])
issues['no_runner_tags'] = sorted(issues['no_runner_tags'])

return target_test_jobs, extra_include_yml, issues
no_env_marker_test_cases.sort()
return target_test_jobs, extra_include_yml, no_env_marker_test_cases


def generate_target_test_child_pipeline(
project: Project,
paths: str,
apps: t.List[App],
output_filepath: str,
) -> None:
target_test_jobs, extra_include_yml, issues = get_target_test_jobs(project, paths, apps)

with open(KNOWN_GENERATE_TEST_CHILD_PIPELINE_WARNINGS_FILEPATH) as fr:
known_warnings_dict = yaml.safe_load(fr) or dict()

exclude_runner_tags_set = set(known_warnings_dict.get('no_runner_tags', []))
# EXCLUDE_RUNNER_TAGS is a string separated by ';'
# like 'esp32,generic;esp32c3,wifi'
if exclude_runner_tags := os.getenv('EXCLUDE_RUNNER_TAGS'):
exclude_runner_tags_set.update(exclude_runner_tags.split(';'))

target_test_jobs, extra_include_yml, no_env_marker_test_cases = get_target_test_jobs(
paths=paths,
apps=apps,
exclude_runner_tags=exclude_runner_tags_set,
)

known_no_env_marker_test_cases = set(known_warnings_dict.get('no_env_marker_test_cases', []))
no_env_marker_test_cases = set(issues['no_env_marker_test_cases'])
no_env_marker_test_cases_set = set(no_env_marker_test_cases)

no_env_marker_test_cases_fail = False
if no_env_marker_test_cases - known_no_env_marker_test_cases:
if no_env_marker_test_cases_set - known_no_env_marker_test_cases:
print('ERROR: NEW "no_env_marker_test_cases" detected:')
for case in no_env_marker_test_cases - known_no_env_marker_test_cases:
for case in no_env_marker_test_cases_set - known_no_env_marker_test_cases:
print(f' - {case}')
no_env_marker_test_cases_fail = True

print('Please add at least one environment markers to the test cases listed above. '
'You may check all the env markers here: tools/ci/idf_pytest/constants.py')

known_no_runner_tags = set(known_warnings_dict.get('no_runner_tags', []))
no_runner_tags = set(issues['no_runner_tags'])

no_runner_tags_fail = False
if no_runner_tags - known_no_runner_tags:
print('ERROR: NEW "no_runner_tags" detected:')
for tag in no_runner_tags - known_no_runner_tags:
print(f' - {tag}')
no_runner_tags_fail = True

print(
'- If you\'re the owner of the missing runners, '
'please make sure the runners are online and have the required tags.\n'
'- If you\'re the owner of the test cases that require the missing tags, '
'please add at least one runner with the required tags.\n'
'- For other users, please contact the runner owner first, '
'or report this issue in our internal CI channel.\n'
'If the issue cannot be solved in a short time, '
'please add the missing tags to the "no_runner_tags" section '
'under the file inside ESP-IDF repo: '
f'{os.path.relpath(KNOWN_GENERATE_TEST_CHILD_PIPELINE_WARNINGS_FILEPATH, IDF_PATH)}.'
'Please add at least one environment markers to the test cases listed above. '
'You may check all the env markers here: tools/ci/idf_pytest/constants.py'
)

if no_env_marker_test_cases_fail or no_runner_tags_fail:
if no_env_marker_test_cases_fail:
raise SystemExit('Failed to generate target test child pipeline.')

dump_jobs_to_yaml(target_test_jobs, output_filepath, DEFAULT_TARGET_TEST_CHILD_PIPELINE_NAME, extra_include_yml)
dump_jobs_to_yaml(
target_test_jobs,
output_filepath,
DEFAULT_TARGET_TEST_CHILD_PIPELINE_NAME,
extra_include_yml,
)
print(f'Generate child pipeline yaml file {output_filepath} with {sum(j.parallel for j in target_test_jobs)} jobs')


Expand All @@ -187,18 +167,6 @@ def generate_target_test_child_pipeline(
default=DEFAULT_TEST_PATHS,
help='Paths to the apps to build.',
)
parser.add_argument(
'--project-id',
type=int,
default=os.getenv('CI_PROJECT_ID'),
help='Project ID',
)
parser.add_argument(
'--pipeline-id',
type=int,
default=os.getenv('PARENT_PIPELINE_ID'),
help='Pipeline ID',
)
parser.add_argument(
'-o',
'--output',
Expand All @@ -215,15 +183,12 @@ def generate_target_test_child_pipeline(

args = parser.parse_args()

gl_project = Gitlab(args.project_id).project

apps = []
for f in glob.glob(args.app_info_filepattern):
apps.extend(import_apps_from_txt(f))

generate_target_test_child_pipeline(
gl_project,
args.paths,
apps,
args.output,
paths=args.paths,
apps=apps,
output_filepath=args.output,
)
Original file line number Diff line number Diff line change
@@ -1,3 +1,6 @@
# This is the file that contains the known warnings for the generate_test_child_pipeline.py script.
# no_env_marker_test_cases: List of test cases that do not have environment markers.
# each item shall be the test node id, you may check the error message to get the node id.
no_env_marker_test_cases:
- components/nvs_flash/test_apps/pytest_nvs_flash.py::test_nvs_flash[default]
- components/vfs/test_apps/pytest_vfs.py::test_vfs_ccomp[ccomp]
Expand All @@ -7,6 +10,12 @@ no_env_marker_test_cases:
- examples/storage/nvs_rw_value/pytest_nvs_rw_value.py::test_examples_nvs_rw_value
- examples/storage/nvs_rw_value_cxx/pytest_nvs_rw_value_cxx.py::test_examples_nvs_rw_value_cxx
- examples/storage/wear_levelling/pytest_wear_levelling_example.py::test_wear_levelling_example

# no_runner_tags: List of runner tags that has no test runner set.
# each item shall be a comma separated list of runner tags.
# NOTE:
# 1. for multi dut tests, the runner tag shall be <target>_<count>, e.g. esp32_2 instead of esp32,esp32
# 2. don't have spaces in the comma separated list.
no_runner_tags:
- esp32,ip101
- esp32c2,jtag,xtal_40mhz
Expand Down

0 comments on commit 9106c43

Please sign in to comment.