Skip to content

Commit

Permalink
Add problem validation for dmoj-cli; #335
Browse files Browse the repository at this point in the history
  • Loading branch information
kiritofeng committed Mar 4, 2021
1 parent 47c08c1 commit 3d11bef
Show file tree
Hide file tree
Showing 4 changed files with 114 additions and 18 deletions.
2 changes: 2 additions & 0 deletions dmoj/commands/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
from dmoj.commands.show import ShowCommand
from dmoj.commands.submissions import ListSubmissionsCommand
from dmoj.commands.submit import SubmitCommand
from dmoj.commands.test import TestCommand

all_commands: List[Type[Command]] = [
ListProblemsCommand,
Expand All @@ -18,6 +19,7 @@
ResubmitCommand,
RejudgeCommand,
DifferenceCommand,
TestCommand,
ShowCommand,
HelpCommand,
QuitCommand,
Expand Down
87 changes: 87 additions & 0 deletions dmoj/commands/test.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,87 @@
import sys
import traceback
from operator import itemgetter

from dmoj import executors
from dmoj.commands.base_command import Command
from dmoj.error import InvalidCommandException
from dmoj.judgeenv import get_problem_root, get_supported_problems
from dmoj.problem import Problem
from dmoj.testsuite import Tester
from dmoj.utils.ansi import ansi_style, print_ansi

all_executors = executors.executors


class ProblemTester(Tester):
def test_problem(self, problem_id):
self.output(ansi_style('Testing problem #ansi[%s](cyan|bold)...') % problem_id)
fails = 0

config = Problem.get_config(problem_id)

if not config or 'tests' not in config or not config['tests']:
self.output(ansi_style('\t#ansi[Skipped](magenta|bold) - No tests found'))

for test in config['tests']:
# Do this check here as we need some way to identify the test
if 'source' not in test:
continue

test_name = test.get('label', test['source'])
self.output(ansi_style('\tRunning test #ansi[%s](yellow|bold)') % test_name)
try:
test_fails = self.run_test(problem_id, test)
except Exception:
fails += 1
self.output(ansi_style('\t#ansi[Test failed with exception:](red|bold)'))
self.output(traceback.format_exc())
else:
self.output(ansi_style('\tResult of test #ansi[%s](yellow|bold): ') % test_name +
ansi_style(['#ansi[Failed](red|bold)', '#ansi[Success](green|bold)'][not test_fails]))
fails += test_fails

return fails

def _check_targets(targets):
if 'posix' in targets:
return True
if 'freebsd' in sys.platform:
if 'freebsd' in targets:
return True
if not sys.platform.startswith('freebsd') and 'kfreebsd' in targets:
return True
elif sys.platform.startswith('linux') and 'linux' in targets:
return True
return False

def run_test(self, problem_id, config):
if 'targets' in config and not self._check_targets(config['targets']):
return 0

return self._run_test_case(problem_id, get_problem_root(problem_id), config)


class TestCommand(Command):
name = 'test'
help = 'Runs tests on a problem.'

def _populate_parser(self):
self.arg_parser.add_argument('problem_id', help='id of problem to test')

def execute(self, line):
args = self.arg_parser.parse_args(line)

problem_id = args.problem_id

if problem_id not in map(itemgetter(0), get_supported_problems()):
raise InvalidCommandException("unknown problem '%s'" % problem_id)

tester = ProblemTester()
fails = tester.test_problem(problem_id)
print()
print('Test complete')
if fails:
print_ansi('#ansi[A total of %d test(s) failed](red|bold)' % fails)
else:
print_ansi('#ansi[All tests passed.](green|bold)')
4 changes: 4 additions & 0 deletions dmoj/problem.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,10 @@ def __init__(self, problem_id, time_limit, memory_limit, meta):
self.problem_data.archive = self._resolve_archive_files()
self._resolve_test_cases()

@classmethod
def get_config(cls, problem_id):
return Problem(problem_id, None, None, {}).config

def _match_test_cases(self, filenames, input_case_pattern, output_case_pattern, case_points):
def try_match_int(match, group):
try:
Expand Down
39 changes: 21 additions & 18 deletions dmoj/testsuite.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,12 +23,12 @@ def fail(self, message):
self.output('\t\t' + message.replace('\r\n', '\n').replace('\n', '\r\n\t\t'))
self.failed = True

def set_expected(self, codes_all, codes_cases, points_all, points_cases, feedback_all, feedback_cases):
def set_expected(self, codes_all, codes_cases, score_all, score_cases, feedback_all, feedback_cases):
self.failed = False
self.codes_all = codes_all
self.codes_cases = codes_cases
self.points_all = points_all
self.points_cases = points_cases
self.score_all = score_all
self.score_cases = score_cases
self.feedback_all = feedback_all
self.feedback_cases = feedback_cases

Expand All @@ -49,15 +49,15 @@ def test_case_status_packet(self, position, result):
elif code not in self.codes_all:
self.fail('Unexpected global code: %s, expecting %s' % (code, ', '.join(self.codes_all)))

if position in self.points_cases:
if result.points not in self.points_cases[position]:
if position in self.score_cases:
if result.points not in self.score_cases[position]:
self.fail(
'Unexpected points for case %d: %s, expecting %s'
% (position, result.points, ', '.join(self.points_cases[position]))
'Unexpected score for case %d: %s, expecting %s'
% (position, result.points, ', '.join(self.score_cases[position]))
)
elif self.points_all is not None and result.points not in self.points_all:
self.fail('Unexpected global points: %s, expecting %s' % (
result.points, ', '.join(map(str, self.points_all))))
elif self.score_all is not None and result.points not in self.score_all:
self.fail('Unexpected global score: %s, expecting %s' % (
result.points, ', '.join(map(str, self.score_all))))

feedback = self.feedback_all
if position in self.feedback_cases:
Expand Down Expand Up @@ -194,6 +194,9 @@ def run_test_case(self, problem, case, case_dir):
self.output(ansi_style('\t\t#ansi[Skipped](magenta|bold) - No usable test.yml'))
return 0

return self._run_test_case(problem, case_dir, config)

def _run_test_case(self, problem, case_dir, config):
if 'skip' in config and config['skip']:
self.output(ansi_style('\t\t#ansi[Skipped](magenta|bold) - Unsupported on current platform'))
return 0
Expand All @@ -215,8 +218,8 @@ def run_test_case(self, problem, case, case_dir):
codes_all, codes_cases = self.parse_expect(
config.get('expect', 'AC'), config.get('cases', {}), self.parse_expected_codes
)
points_all, points_cases = self.parse_expect(
config.get('points'), config.get('points_cases', {}), self.parse_points
score_all, score_cases = self.parse_expect(
config.get('score'), config.get('score_cases', {}), self.parse_score
)
feedback_all, feedback_cases = self.parse_expect(
config.get('feedback'), config.get('feedback_cases', {}), self.parse_feedback
Expand All @@ -228,7 +231,7 @@ def output_case(data):
fails = 0
for source in sources:
self.sub_id += 1
self.manager.set_expected(codes_all, codes_cases, points_all, points_cases, feedback_all, feedback_cases)
self.manager.set_expected(codes_all, codes_cases, score_all, score_cases, feedback_all, feedback_cases)
self.judge.begin_grading(
Submission(self.sub_id, problem, language, source, time, memory, False, {}),
blocking=True,
Expand Down Expand Up @@ -257,13 +260,13 @@ def parse_expected_codes(self, codes):
assert not (result - self.all_codes)
return result

def parse_points(self, points):
if points is None or points == '*':
def parse_score(self, score):
if score is None or score == '*':
return None
elif isinstance(points, (str, int)):
return {int(points)}
elif isinstance(score, (str, int)):
return {int(score)}
else:
return set(map(int, points))
return set(map(int, score))

def parse_feedback(self, feedback):
if feedback is None or feedback == '*':
Expand Down

0 comments on commit 3d11bef

Please sign in to comment.