Skip to content

Commit

Permalink
Pass pyupgrade, ruff, isort, black checks
Browse files Browse the repository at this point in the history
Add pre-commit hooks and check files
  • Loading branch information
crhf committed Apr 15, 2024
1 parent 91bc9a4 commit 81e7bad
Show file tree
Hide file tree
Showing 21 changed files with 297 additions and 253 deletions.
27 changes: 27 additions & 0 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
default_language_version:
python: python3.11

repos:
- repo: https://github.com/asottile/pyupgrade
rev: v3.15.2
hooks:
- id: pyupgrade
args: ["--py311-plus"]

- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.3.7
hooks:
- id: ruff
args: ["--fix"]

- repo: https://github.com/pycqa/isort
rev: 5.13.2
hooks:
- id: isort
name: isort (python)
args: ["--profile", "black"]

- repo: https://github.com/psf/black
rev: 24.4.0
hooks:
- id: black
36 changes: 18 additions & 18 deletions app/analysis/sbfl.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,16 +13,16 @@
import math
import os
import re
from collections.abc import Mapping
from dataclasses import dataclass
from functools import cache
from pathlib import Path
from pprint import pformat
from typing import Dict, List, Mapping, Tuple

from coverage.sqldata import CoverageData


def canonicalize_testname_sympy_bin_test(testname: str) -> Tuple[str, str]:
def canonicalize_testname_sympy_bin_test(testname: str) -> tuple[str, str]:
"""
The sympy version, who excutes tests with bin/test
Expand All @@ -31,7 +31,7 @@ def canonicalize_testname_sympy_bin_test(testname: str) -> Tuple[str, str]:
return "", testname


def canonicalize_testname_django_runner(testname: str) -> Tuple[str, str]:
def canonicalize_testname_django_runner(testname: str) -> tuple[str, str]:
"""
Same as canonicalize_testname_pytest, but for django test runner.
Need to deal with them separately because the test name formats are diff.
Expand All @@ -57,7 +57,7 @@ def canonicalize_testname_django_runner(testname: str) -> Tuple[str, str]:
return file_name, full_name


def canonicalize_testname_pytest(testname: str) -> Tuple[str, str]:
def canonicalize_testname_pytest(testname: str) -> tuple[str, str]:
"""
Unify the test names in tasks_map.json and pytest-cov.
Expand All @@ -75,7 +75,7 @@ def canonicalize_testname_pytest(testname: str) -> Tuple[str, str]:
return file_name, testname


def canonicalize_testname(task_id: str, testname: str) -> Tuple[str, str]:
def canonicalize_testname(task_id: str, testname: str) -> tuple[str, str]:
if "django" in task_id:
return canonicalize_testname_django_runner(testname)
elif "sympy" in task_id:
Expand All @@ -84,11 +84,11 @@ def canonicalize_testname(task_id: str, testname: str) -> Tuple[str, str]:
return canonicalize_testname_pytest(testname)


class FileExecStats(object):
class FileExecStats:
def __init__(self, filename: str):
self.filename = filename
# line number -> (pass_count, fail_count)
self.line_stats: Dict[int, Tuple[int, int]] = dict()
self.line_stats: dict[int, tuple[int, int]] = dict()

def incre_pass_count(self, line_no: int):
if line_no in self.line_stats:
Expand All @@ -113,10 +113,10 @@ def __repr__(self) -> str:
return self.__str__()


class ExecStats(object):
class ExecStats:
def __init__(self):
# file name -> FileExecStats
self.file_stats: Dict[str, FileExecStats] = dict()
self.file_stats: dict[str, FileExecStats] = dict()

def add_file(self, file_exec_stats: FileExecStats):
self.file_stats[file_exec_stats.filename] = file_exec_stats
Expand Down Expand Up @@ -183,7 +183,7 @@ def rank_lines(
return lines_with_scores


def helper_remove_dup_and_empty(lst: List[str]) -> List[str]:
def helper_remove_dup_and_empty(lst: list[str]) -> list[str]:
"""
Remove duplicates and empty strings from the list.
"""
Expand All @@ -203,7 +203,7 @@ def helper_two_tests_match(test_one: str, test_two: str) -> bool:
return test_one.endswith(test_two) or test_two.endswith(test_one)


def helper_test_match_any(test: str, candidates: List[str]) -> bool:
def helper_test_match_any(test: str, candidates: list[str]) -> bool:
"""
Check if the test matches any of the candidates.
"""
Expand All @@ -216,8 +216,8 @@ def helper_test_match_any(test: str, candidates: List[str]) -> bool:


def run(
pass_tests: List[str], fail_tests: List[str], cov_file: str, task_id: str
) -> tuple[list[str], list[Tuple[str, int, float]]]:
pass_tests: list[str], fail_tests: list[str], cov_file: str, task_id: str
) -> tuple[list[str], list[tuple[str, int, float]]]:
"""
Run SBFL analysis on the given coverage data file.
At the same time, collect the test file names.
Expand Down Expand Up @@ -290,8 +290,8 @@ def run(


def collate_results(
ranked_lines: List[Tuple[str, int, float]], test_file_names: List[str]
) -> List[Tuple[str, int, int, float]]:
ranked_lines: list[tuple[str, int, float]], test_file_names: list[str]
) -> list[tuple[str, int, int, float]]:
"""
From the ranked lines, perform filtering (for lines that are likely to be in the test files),
as well as merging (since multiple ranked lines can be adjacent to each other).
Expand All @@ -300,7 +300,7 @@ def collate_results(
- list of (file, start_line_no, end_line_no, score), sorted
"""
# (1) remove lines with non positive score
positive_lines = [l for l in ranked_lines if l[2] > 0]
positive_lines = [line for line in ranked_lines if line[2] > 0]
# (2) remove lines that are in test files
survived_lines = []
for file, line_no, score in positive_lines:
Expand All @@ -310,7 +310,7 @@ def collate_results(
survived_lines.append((file, line_no, score))

# (3) convert survived lines into dict, key is filename, value is list of (line_no, score)
file_line_score: Mapping[str, List[Tuple[int, float]]] = dict()
file_line_score: Mapping[str, list[tuple[int, float]]] = dict()
for file, line_no, score in survived_lines:
if file not in file_line_score:
file_line_score[file] = []
Expand All @@ -322,7 +322,7 @@ def collate_results(

# (4) merge adjacent lines, the new dict value list is a list of (start_line_no, end_line_no, score)
# note that end_line_no is inclusive
merged_file_line_score: Mapping[str, List[Tuple[int, int, float]]] = dict()
merged_file_line_score: Mapping[str, list[tuple[int, int, float]]] = dict()
for file, line_score in file_line_score.items():
merged_line_score = []
# indexes into the line_score
Expand Down
3 changes: 1 addition & 2 deletions app/api/agent_write_patch.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@
import json
from copy import deepcopy
from os.path import join as pjoin
from typing import Tuple

from app import globals
from app.analysis.sbfl import MethodId
Expand Down Expand Up @@ -61,7 +60,7 @@ def run_with_retries(
testcases_passing,
testcases_failing,
retries=3,
) -> Tuple[str, float, int, int]:
) -> tuple[str, float, int, int]:
"""
Since the agent may not always write an applicable patch, we allow for retries.
This is a wrapper around the actual run.
Expand Down
16 changes: 11 additions & 5 deletions app/api/eval_helper.py
Original file line number Diff line number Diff line change
Expand Up @@ -194,7 +194,7 @@ def get_logs_eval(repo_name: str, log_file_path: str):
Parse a log file to get test status for each test case.
"""
log_parser = MAP_REPO_TO_PARSER[repo_name]
with open(log_file_path, "r") as f:
with open(log_file_path) as f:
content = f.read()
if TESTS_ERROR in content or TESTS_TIMEOUT in content:
# something went wrong and there is not test status to parse
Expand All @@ -203,11 +203,17 @@ def get_logs_eval(repo_name: str, log_file_path: str):
return log_parser(content), True


test_passed = lambda case, sm: case in sm and sm[case] == TestStatus.PASSED.value
def test_passed(case, sm):
return case in sm and sm[case] == TestStatus.PASSED.value

test_failed = lambda case, sm: case not in sm or any(
[sm[case] == status for status in [TestStatus.FAILED.value, TestStatus.ERROR.value]]
)

def test_failed(case, sm):
return case not in sm or any(
[
sm[case] == status
for status in [TestStatus.FAILED.value, TestStatus.ERROR.value]
]
)


# Result Categories
Expand Down
19 changes: 5 additions & 14 deletions app/api/execution.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,23 +3,16 @@
"""

import os
import shutil
from os import PathLike
from os.path import join as pjoin
from pprint import pprint
from typing import Tuple
import subprocess

from app import log
from app import globals, log
from app import utils as app_utils
from app.api.eval_helper import (
ResolvedStatus,
get_eval_report,
get_logs_eval,
get_resolution_status,
)
from app import globals



def run_test_suite_for_correctness(
Expand All @@ -32,7 +25,7 @@ def run_test_suite_for_correctness(
testcases_failing,
run_test_suite_log_file,
logger,
) -> Tuple[bool, str]:
) -> tuple[bool, str]:
"""
Run the developer test suite, and record pass/fail results.
The goal is to check correctness of a patched program, while returning
Expand Down Expand Up @@ -91,7 +84,7 @@ def run_test_suite_for_correctness(

if not parse_ok:
# log file says test execution has error
with open(run_test_suite_log_file, "r") as f:
with open(run_test_suite_log_file) as f:
error_message = f.read()
return False, error_message

Expand All @@ -104,15 +97,13 @@ def run_test_suite_for_correctness(
logger, f"[Run test-suite] Resolution status: {resolution_status}"
)
if resolution_status == ResolvedStatus.FULL:
log.log_and_print(
logger, f"[Run test-suite] Returning True since all resolved."
)
log.log_and_print(logger, "[Run test-suite] Returning True since all resolved.")
return True, ""

else:
# FIXME: The current failure message is simple; maybe can add failure reasons to it
log.log_and_print(
logger, f"[Run test-suite] Returning False since some tests failed."
logger, "[Run test-suite] Returning False since some tests failed."
)
error_message = "Some tests have failed."
return False, error_message
Loading

0 comments on commit 81e7bad

Please sign in to comment.