diff --git a/github_tests_validator_app/bin/github_repo_validation.py b/github_tests_validator_app/bin/github_repo_validation.py index 40b1afc..4399ae7 100644 --- a/github_tests_validator_app/bin/github_repo_validation.py +++ b/github_tests_validator_app/bin/github_repo_validation.py @@ -10,8 +10,15 @@ GH_WORKFLOWS_FOLDER_NAME, commit_ref_path, default_message, + base_tokens, + required_checks ) -from github_tests_validator_app.lib.utils import pull_requested_test_results + +from github_tests_validator_app.lib.utils import ( + pull_requested_test_results, + validate_and_assign_token +) + from github_tests_validator_app.lib.connectors.github_client import GitHubConnector from github_tests_validator_app.lib.connectors.sqlalchemy_client import SQLAlchemyConnector, User @@ -162,11 +169,7 @@ def validate_github_repo( github_event=event, user_github_connector=user_github_connector ) - logging.info(f"failed_test : {failed_tests[1]}") pytest_result_conclusion = "failure" if failed_tests[1] > 0 else "success" - logging.info(f"pytest_result_conclusion 01 = {pytest_result_conclusion}") - - logging.info(f"pytest_result_conclusion = {pytest_result_conclusion}") sql_client.add_new_repository_validation( @@ -185,44 +188,67 @@ def validate_github_repo( default_message["valid_repository"]["tests"][str(tests_havent_changed)], ) + if event == "pull_request": - # Create a Check Run with detailed test results in case of failure - user_github_connector.repo.create_check_run( - name="[Integrity] Test Folder Validation", - head_sha=payload["pull_request"]["head"]["sha"], - status="completed", - conclusion=tests_conclusion, - output={ - "title": "Test Folder Validation Result", - "summary": tests_message, - } - ) - user_github_connector.repo.create_check_run( - name="[Integrity] Workflow Folder Validation", - head_sha=payload["pull_request"]["head"]["sha"], - status="completed", - conclusion=workflows_conclusion, - output={ - "title": "Workflow Folder Validation Result", - "summary": workflows_message, - } - ) - pytest_result_message = pull_requested_test_results( - tests_results_json=pytests_results_json, - payload=payload, - github_event=event, - user_github_connector=user_github_connector - ) - user_github_connector.repo.create_check_run( - name="[Pytest] Pytest Result Validation", - head_sha=payload["pull_request"]["head"]["sha"], - status="completed", - conclusion=pytest_result_conclusion, - output={ - "title": "Pytest Validation Result", - "summary": pytest_result_message[0], - } - ) + try : + # Create a Check Run with detailed test results in case of failure + user_github_connector.repo.create_check_run( + name="[Integrity] Test Folder Validation", + head_sha=payload["pull_request"]["head"]["sha"], + status="completed", + conclusion=tests_conclusion, + output={ + "title": "Test Folder Validation Result", + "summary": tests_message, + } + ) + user_github_connector.repo.create_check_run( + name="[Integrity] Workflow Folder Validation", + head_sha=payload["pull_request"]["head"]["sha"], + status="completed", + conclusion=workflows_conclusion, + output={ + "title": "Workflow Folder Validation Result", + "summary": workflows_message, + } + ) + pytest_result_message = pull_requested_test_results( + tests_results_json=pytests_results_json, + payload=payload, + github_event=event, + user_github_connector=user_github_connector + ) + user_github_connector.repo.create_check_run( + name="[Pytest] Pytest Result Validation", + head_sha=payload["pull_request"]["head"]["sha"], + status="completed", + conclusion=pytest_result_conclusion, + output={ + "title": "Pytest Validation Result", + "summary": pytest_result_message[0], + } + ) + except Exception as e : + logging.error(f"Error creating check run: {e}") + + # All exercice need to be validated for -> Token + part, token = validate_and_assign_token(sha=payload["pull_request"]["head"]["sha"], + tokens=base_tokens, + user_github_connector=user_github_connector, + required_checks=required_checks) + + if part and token: + pr_number = payload["pull_request"]["number"] + comment_message = ( + f"🎉 Congratulations! You've validated all exercises for {part}. " + f"Here is your token: `{token}`" + ) + # Post a comment in the PR + user_github_connector.repo.get_pull(pr_number).create_issue_comment(comment_message) + logging.info(f"Posted comment to PR #{pr_number}: {comment_message}") + else: + logging.info("Not all exercises are validated. No token assigned.") + elif event == "pusher": # Check if there is already an open PR gh_branch = payload["ref"].replace("refs/heads/", "") @@ -230,44 +256,74 @@ def validate_github_repo( state="open", head=f"{user_github_connector.repo.owner.login}:{gh_branch}" ) + logging.info(f"HEAD SHA for check runs: {payload['after']}") + if gh_prs.totalCount > 0: gh_pr = gh_prs[0] # Get first matching PR if gh_pr.head.sha == payload["after"]: - return + logging.info("SHA matches an open PR; skipping duplicate processing.") - user_github_connector.repo.create_check_run( - name="[Integrity] Test Folder Validation", - head_sha=payload["after"], - status="completed", - conclusion=tests_conclusion, - output={ - "title": "Test Folder Validation Result", - "summary": tests_message, - } - ) - user_github_connector.repo.create_check_run( - name="[Integrity] Workflow Folder Validation", - head_sha=payload["after"], - status="completed", - conclusion=workflows_conclusion, - output={ - "title": "Workflow Folder Validation Result", - "summary": workflows_message, - } - ) - pytest_result_message = pull_requested_test_results( - tests_results_json=pytests_results_json, - payload=payload, - github_event=event, - user_github_connector=user_github_connector - ) - user_github_connector.repo.create_check_run( - name="[Pytest] Pytest Result Validation", - head_sha=payload["after"], - status="completed", - conclusion=pytest_result_conclusion, - output={ - "title": "Pytest Validation Result", - "summary": pytest_result_message[0], - } - ) + try : + user_github_connector.repo.create_check_run( + name="[Integrity] Test Folder Validation", + head_sha=payload["after"], + status="completed", + conclusion=tests_conclusion, + output={ + "title": "Test Folder Validation Result", + "summary": tests_message, + } + ) + user_github_connector.repo.create_check_run( + name="[Integrity] Workflow Folder Validation", + head_sha=payload["after"], + status="completed", + conclusion=workflows_conclusion, + output={ + "title": "Workflow Folder Validation Result", + "summary": workflows_message, + } + ) + pytest_result_message = pull_requested_test_results( + tests_results_json=pytests_results_json, + payload=payload, + github_event=event, + user_github_connector=user_github_connector + ) + user_github_connector.repo.create_check_run( + name="[Pytest] Pytest Result Validation", + head_sha=payload["after"], + status="completed", + conclusion=pytest_result_conclusion, + output={ + "title": "Pytest Validation Result", + "summary": pytest_result_message[0], + } + ) + except Exception as e: + logging.error(f"Error creating check run: {e}") + + # All exercice need to be validated for -> Token + part, token = validate_and_assign_token(sha=payload["ref"], + tokens=base_tokens, + user_github_connector=user_github_connector, + required_checks=required_checks) + + if part and token: + branch_ref = payload["ref"] # e.g., 'refs/heads/branch_name' + + # Retrieve the pull request associated with the branch + open_prs = user_github_connector.repo.get_pulls(state="open", head=f"{user_github_connector.repo.owner.login}:{branch_ref}") + if open_prs.totalCount == 1: + pr_number = open_prs[0].number + comment_message = ( + f"🎉 Congratulations! You've validated all exercises for {part}. " + f"Here is your token: `{token}`" + ) + # Post a comment in the PR + open_prs[0].create_issue_comment(comment_message) + logging.info(f"Posted comment to PR #{pr_number}: {comment_message}") + else: + logging.warning(f"Could not determine a single open PR for branch {branch_ref}.") + else: + logging.info("Not all exercises are validated. No token assigned.") \ No newline at end of file diff --git a/github_tests_validator_app/bin/user_pytest_summaries_validation.py b/github_tests_validator_app/bin/user_pytest_summaries_validation.py index 5cec09d..be5808c 100644 --- a/github_tests_validator_app/bin/user_pytest_summaries_validation.py +++ b/github_tests_validator_app/bin/user_pytest_summaries_validation.py @@ -107,7 +107,7 @@ def send_user_pytest_summaries( # Get user artifact artifact = get_user_artifact(user_github_connector, sql_client, all_user_artifact, payload) - logging.info(f"User artifact: {artifact}") + # logging.info(f"User artifact: {artifact}") if not artifact: logging.info("[ERROR]: Cannot get user artifact.") return diff --git a/github_tests_validator_app/config.py b/github_tests_validator_app/config.py index b699976..eed47e5 100644 --- a/github_tests_validator_app/config.py +++ b/github_tests_validator_app/config.py @@ -58,3 +58,50 @@ }, }, } + +part_map = { + "Part 1": [ + "validation_tests/test_01_git_101.py", + "validation_tests/test_02_notebooks_to_scripts.py", + "validation_tests/test_03_linting_and_formatting.py", + "validation_tests/test_04_continuous_integration.py", + "validation_tests/test_05_unit_testing.py", + "validation_tests/test_06_code_documentation.py", + "validation_tests/test_07_dependencies_and_venv.py" + # "validation_tests/test_08_packaging.py" + ], + "Part 2": [ + "validation_tests/test_09_ci_cd_pipelines.py", + "validation_tests/test_10_monitoring_and_alerting.py", + "validation_tests/test_11_logging.py", + "validation_tests/test_12_security_testing.py", + "validation_tests/test_13_performance_optimization.py" + ] + } + +base_tokens = { + "Part 1" : [ + "FsQyRcFCNNzlUZpZ", "FyxfmqtAc8HCRLpx", "4VPhvLsrhJwfU3ee", + "V9D2DaQgesfMs9Fc", "CJCIPgYQud6Io1jD", "mLFjjkXsxbTb0VCw", + "9B8rMKEeR0p3gsJD", "i4M9CZuJQiwf8TKL", "JDn5CficECTa4JBN", + "AjMJBlYQyA2bxuXg", "NZ2BNJDcUQ8BZYxX", "eb8YEgo8yoTenrVS", + "WJbcGDT2Y7VjxNrZ", "oTzPvOupEY1eA3O9", "M0zAppk75VZEWAIx", + "HjzUp5L9IzYhRzdj", "A04FC2reSxdIgHaK", "DXM297sx4alfByVx", + "1G03TRqIamYRRNTF", "q78NrY9cESJESCBL" + ], + "Part 2" : [ + "Kh6MYcXAaQtWjKqn", "Diu0KzPzOU6Reced", "GB8DrumrkguJYDbm", + "9Saz9603Gv7fQxh9", "4toXjiNOa2jQmveY", "S798d7fOXpExDtpR", + "l3H2IALb5PziNqwZ", "1kaPHmA1I5o6fjyd", "032IKBCVNiWQdRYT", + "TL2kMMZL5aK8j6rW", "gPdv1ahPY1Pd8Q9T", "4XdS6r53tJ01vFOa", + "afHAgMIzQQI0HyDX", "SmINslEsh2OgAGGu", "RFFR6Z0Fmvsu5poQ", + "hTfmdvk9uilnlkIN", "chyPPvztjsZiYgYE", "bxp7NA9uFwCtdtRL", + "CzeYlSWdW2PgPpzk", "bcaTLaZQaCZx1zyJ" + ] +} + +required_checks = [ + "[Pytest] Pytest Result Validation", + "[Integrity] Test Folder Validation", + "[Integrity] Workflow Folder Validation" +] \ No newline at end of file diff --git a/github_tests_validator_app/lib/connectors/github_client.py b/github_tests_validator_app/lib/connectors/github_client.py index 64a4c6a..660961f 100644 --- a/github_tests_validator_app/lib/connectors/github_client.py +++ b/github_tests_validator_app/lib/connectors/github_client.py @@ -1,5 +1,6 @@ from typing import Any, Dict, List, Union +import os import io import json import time @@ -113,9 +114,9 @@ def get_all_artifacts(self) -> Union[requests.models.Response, Any]: for attempt in range(max_retries): try: response = self._request_data(url, headers=headers) - logging.info(f"Artifacts response: {response} from {url}") + # logging.info(f"Artifacts response: {response} from {url}") if response and response.get("artifacts"): - logging.info(f"Artifacts fetched successfully on attempt {attempt+1}: {response}") + logging.info(f"Artifacts fetched successfully on attempt {attempt+1}") return response logging.warning(f"No artifacts found on attempt {attempt+1}/{max_retries}. Retrying in {delay}s...") time.sleep(delay) @@ -163,8 +164,7 @@ def get_artifact(self, artifact_info: Dict[str, Any]) -> Union[requests.models.R def _get_headers(self) -> Dict[str, str]: - if not self.ACCESS_TOKEN: - self.set_access_token(self.REPO_NAME) + self.ACCESS_TOKEN = os.getenv("GH_PAT") return { "Accept": "application/vnd.github+json", diff --git a/github_tests_validator_app/lib/connectors/sqlalchemy_client.py b/github_tests_validator_app/lib/connectors/sqlalchemy_client.py index 4bfc094..0b9d797 100644 --- a/github_tests_validator_app/lib/connectors/sqlalchemy_client.py +++ b/github_tests_validator_app/lib/connectors/sqlalchemy_client.py @@ -117,7 +117,7 @@ def add_new_pytest_summary( branch: str, info: str, ) -> None: - logging.info(f"Adding new pytest summary: {artifact}") + logging.info(f"Adding new pytest summary ...") pytest_summary = WorkflowRun( id=workflow_run_id, organization_or_user=user_data["organization_or_user"], @@ -147,7 +147,7 @@ def add_new_pytest_detail( results: List[Dict[str, Any]], workflow_run_id: int, ) -> None: - logging.info(f"Adding new pytest details: {results}") + logging.info(f"Adding new pytest details...") with Session(self.engine) as session: try: for test in results: diff --git a/github_tests_validator_app/lib/utils.py b/github_tests_validator_app/lib/utils.py index 34b8b0d..84e8a47 100644 --- a/github_tests_validator_app/lib/utils.py +++ b/github_tests_validator_app/lib/utils.py @@ -3,9 +3,10 @@ import re import hashlib import logging +import random from datetime import datetime -from github import ContentFile +from github import ContentFile, GithubException from github_tests_validator_app.lib.connectors.sqlalchemy_client import User @@ -69,37 +70,75 @@ def pull_requested_test_results( # Extract the PR name and determine the test prefix match = re.match(r"(\d+):", pull_request_title) - if match: - test_prefix = f"validation_tests/test_{match.group(1)}" - else: + if not match: return "No matching test prefix found for this PR." + current_exercice = int(match.group(1)) + test_prefixes = [ + f"validation_tests/test_{str(i).zfill(2)}" for i in range(current_exercice + 1) + ] + # Filter and format test results specific to the PR name test_failed = 0 filtered_messages = [] for test in tests_results_json.get("tests", []): nodeid = test.get("nodeid", "Unknown test") - if test_prefix and nodeid.startswith(test_prefix): - outcome = test.get("outcome", "unknown") - if outcome == "failed": - test_failed += 1 - message = test.get("call", {}).get("crash", {}).get("message", "No message available\n") - traceback = test.get("call", {}).get("crash", {}).get("traceback", "No traceback available\n") - filtered_messages.append( - f"- **{nodeid}**:\n\n" - f" - **Outcome**: {outcome}\n" - f" - **Message**:\n```\n{message}```\n" - f" - **Traceback**:\n```\n{traceback}```\n" - ) + for prefix in test_prefixes: + if nodeid.startswith(prefix): + outcome = test.get("outcome", "unknown") + if outcome == "failed": + test_failed += 1 + message = test.get("call", {}).get("crash", {}).get("message", "No message available\n") + traceback = test.get("call", {}).get("crash", {}).get("traceback", "No traceback available\n") + filtered_messages.append( + f"- **{nodeid}**:\n\n" + f" - **Outcome**: {outcome}\n" + f" - **Message**:\n```\n{message}```\n" + f" - **Traceback**:\n```\n{traceback}```\n" + ) + break if filtered_messages: return "\n".join(filtered_messages), test_failed else: return "No matching test results for this PR.", test_failed - + + def strip_ansi_escape_codes(text: str) -> str: """ Removes ANSI escape codes from the given text. """ ansi_escape = re.compile(r'\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])') - return ansi_escape.sub('', text) \ No newline at end of file + return ansi_escape.sub('', text) + + +def validate_checks_and_assign_token(sha, tokens, user_github_connector, required_checks): + try: + # Fetch check runs for the given SHA + check_runs = user_github_connector.repo.get_commit(sha).get_check_runs() + for check_name in required_checks: + # Filter for matching check runs + matching_checks = [ + check for check in check_runs if check.name == check_name + ] + if not matching_checks: + logging.warning(f"Required check '{check_name}' not found.") + return None, None + + for check in matching_checks: + logging.info(f"Validating check '{check.name}' with status: {check.conclusion}") + if check.conclusion != "success": + logging.warning(f"Check '{check.name}' did not pass.") + return None, None + + # If all checks are successful, assign a token + for part, token_list in tokens.items(): + if token_list: + token = random.choice(token_list) + return part, token + + except GithubException as e: + logging.error(f"Error fetching check runs for SHA {sha}: {e}") + return None, None + + return None, None \ No newline at end of file