From 852f8b2f571c4f1d0f82614abd5229f9caefb906 Mon Sep 17 00:00:00 2001 From: kiritofeng Date: Thu, 26 Dec 2019 03:13:35 -0500 Subject: [PATCH] Add problem validation for `dmoj-cli`; #335 --- dmoj/commands/__init__.py | 5 +- dmoj/commands/test.py | 106 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 109 insertions(+), 2 deletions(-) create mode 100644 dmoj/commands/test.py diff --git a/dmoj/commands/__init__.py b/dmoj/commands/__init__.py index 64141fb7e..36c948668 100644 --- a/dmoj/commands/__init__.py +++ b/dmoj/commands/__init__.py @@ -10,8 +10,9 @@ from dmoj.commands.show import ShowCommand from dmoj.commands.submissions import ListSubmissionsCommand from dmoj.commands.submit import SubmitCommand +from dmoj.commands.test import TestCommand all_commands: List[Type[Command]] = [ - ListProblemsCommand, ListSubmissionsCommand, SubmitCommand, ResubmitCommand, RejudgeCommand, DifferenceCommand, - ShowCommand, HelpCommand, QuitCommand, + ListProblemsCommand, ListSubmissionsCommand, SubmitCommand, ResubmitCommand, RejudgeCommand, TestCommand, + DifferenceCommand, ShowCommand, HelpCommand, QuitCommand, ] diff --git a/dmoj/commands/test.py b/dmoj/commands/test.py new file mode 100644 index 000000000..609faa374 --- /dev/null +++ b/dmoj/commands/test.py @@ -0,0 +1,106 @@ +import os +import traceback +from operator import itemgetter + +import yaml + +from dmoj import executors +from dmoj.commands.base_command import Command +from dmoj.error import InvalidCommandException +from dmoj.judgeenv import get_problem_root, get_supported_problems +from dmoj.testsuite import Tester +from dmoj.utils.ansi import ansi_style, print_ansi + +all_executors = executors.executors + + +class ProblemTester(Tester): + def test_problem(self, problem): + self.output(ansi_style('Testing problem #ansi[%s](cyan|bold)...') % problem) + fails = 0 + with open(os.path.join(get_problem_root(problem), 'init.yml'), 'r') as f: + config = yaml.safe_load(f.read()) + + if not config or 'tests' not in config or not config['tests']: + self.output(ansi_style('\t#ansi[Skipped](magenta|bold) - No tests found')) + + for test in config['tests']: + # Do this check here as we need some way to identify the test + if 'source' not in test: + continue + + test_name = test.get('label', test['source']) + self.output(ansi_style('\tRunning test #ansi[%s](yellow|bold)') % test_name) + try: + test_fails = self.run_test(problem, test) + except Exception: + fails += 1 + self.output(ansi_style('\t#ansi[Test failed with exception:](red|bold)')) + self.output(traceback.format_exc()) + else: + self.output(ansi_style('\tResult of test #ansi[%s](yellow|bold): ') % test_name + + ansi_style(['#ansi[Failed](red|bold)', '#ansi[Success](green|bold)'][not test_fails])) + fails += test_fails + + return fails + + def run_test(self, problem, config): + if 'skip' in config and config['skip']: + self.output(ansi_style('\t\t#ansi[Skipped](magenta|bold) - Test skipped')) + return 0 + + try: + language = config['lang'] + if language not in all_executors: + self.output(ansi_style('\t\t#ansi[Skipped](magenta|bold) - Language not supported')) + return 0 + + time = config['timelimit'] + memory = config['memlimit'] + except KeyError: + self.output(ansi_style('\t\t#ansi[Skipped](magenta|bold) - Invalid configuration')) + return 0 + + with open(os.path.join(get_problem_root(problem), config['source'])) as f: + source = f.read() + + codes_all, codes_cases = self.parse_expect(config.get('expect', 'AC'), + config.get('cases', {}), + self.parse_expected_codes) + feedback_all, feedback_cases = self.parse_expect(config.get('feedback'), + config.get('feedback_cases', {}), + self.parse_feedback) + + def output_case(data): + self.output('\t\t' + data.strip()) + + self.sub_id += 1 + self.manager.set_expected(codes_all, codes_cases, feedback_all, feedback_cases) + self.judge.begin_grading(self.sub_id, problem, language, source, time, memory, False, False, blocking=True, + report=output_case) + return self.manager.failed + + +class TestCommand(Command): + name = 'test' + help = 'Runs tests on a problem.' + + def _populate_parser(self): + self.arg_parser.add_argument('problem_id', help='id of problem to test') + + def execute(self, line): + args = self.arg_parser.parse_args(line) + + problem_id = args.problem_id + + if problem_id not in map(itemgetter(0), get_supported_problems()): + raise InvalidCommandException("unknown problem '%s'" % problem_id) + + tester = ProblemTester() + fails = tester.test_problem(problem_id) + print() + print('Test complete') + if fails: + print_ansi('#ansi[A total of %d test(s) failed](red|bold)' % fails) + else: + print_ansi('#ansi[All tests passed.](green|bold)')