Skip to content

Commit

Permalink
Add problem validation for dmoj-cli; #335
Browse files Browse the repository at this point in the history
  • Loading branch information
kiritofeng committed Jan 25, 2020
1 parent 1277bad commit 852f8b2
Show file tree
Hide file tree
Showing 2 changed files with 109 additions and 2 deletions.
5 changes: 3 additions & 2 deletions dmoj/commands/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,9 @@
from dmoj.commands.show import ShowCommand
from dmoj.commands.submissions import ListSubmissionsCommand
from dmoj.commands.submit import SubmitCommand
from dmoj.commands.test import TestCommand

all_commands: List[Type[Command]] = [
ListProblemsCommand, ListSubmissionsCommand, SubmitCommand, ResubmitCommand, RejudgeCommand, DifferenceCommand,
ShowCommand, HelpCommand, QuitCommand,
ListProblemsCommand, ListSubmissionsCommand, SubmitCommand, ResubmitCommand, RejudgeCommand, TestCommand,
DifferenceCommand, ShowCommand, HelpCommand, QuitCommand,
]
106 changes: 106 additions & 0 deletions dmoj/commands/test.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,106 @@
import os
import traceback
from operator import itemgetter

import yaml

from dmoj import executors
from dmoj.commands.base_command import Command
from dmoj.error import InvalidCommandException
from dmoj.judgeenv import get_problem_root, get_supported_problems
from dmoj.testsuite import Tester
from dmoj.utils.ansi import ansi_style, print_ansi

all_executors = executors.executors


class ProblemTester(Tester):
def test_problem(self, problem):
self.output(ansi_style('Testing problem #ansi[%s](cyan|bold)...') % problem)
fails = 0
with open(os.path.join(get_problem_root(problem), 'init.yml'), 'r') as f:
config = yaml.safe_load(f.read())

if not config or 'tests' not in config or not config['tests']:
self.output(ansi_style('\t#ansi[Skipped](magenta|bold) - No tests found'))

for test in config['tests']:
# Do this check here as we need some way to identify the test
if 'source' not in test:
continue

test_name = test.get('label', test['source'])
self.output(ansi_style('\tRunning test #ansi[%s](yellow|bold)') % test_name)
try:
test_fails = self.run_test(problem, test)
except Exception:
fails += 1
self.output(ansi_style('\t#ansi[Test failed with exception:](red|bold)'))
self.output(traceback.format_exc())
else:
self.output(ansi_style('\tResult of test #ansi[%s](yellow|bold): ') % test_name +
ansi_style(['#ansi[Failed](red|bold)', '#ansi[Success](green|bold)'][not test_fails]))
fails += test_fails

return fails

def run_test(self, problem, config):
if 'skip' in config and config['skip']:
self.output(ansi_style('\t\t#ansi[Skipped](magenta|bold) - Test skipped'))
return 0

try:
language = config['lang']
if language not in all_executors:
self.output(ansi_style('\t\t#ansi[Skipped](magenta|bold) - Language not supported'))
return 0

time = config['timelimit']
memory = config['memlimit']
except KeyError:
self.output(ansi_style('\t\t#ansi[Skipped](magenta|bold) - Invalid configuration'))
return 0

with open(os.path.join(get_problem_root(problem), config['source'])) as f:
source = f.read()

codes_all, codes_cases = self.parse_expect(config.get('expect', 'AC'),
config.get('cases', {}),
self.parse_expected_codes)
feedback_all, feedback_cases = self.parse_expect(config.get('feedback'),
config.get('feedback_cases', {}),
self.parse_feedback)

def output_case(data):
self.output('\t\t' + data.strip())

self.sub_id += 1
self.manager.set_expected(codes_all, codes_cases, feedback_all, feedback_cases)
self.judge.begin_grading(self.sub_id, problem, language, source, time, memory, False, False, blocking=True,
report=output_case)
return self.manager.failed


class TestCommand(Command):
name = 'test'
help = 'Runs tests on a problem.'

def _populate_parser(self):
self.arg_parser.add_argument('problem_id', help='id of problem to test')

def execute(self, line):
args = self.arg_parser.parse_args(line)

problem_id = args.problem_id

if problem_id not in map(itemgetter(0), get_supported_problems()):
raise InvalidCommandException("unknown problem '%s'" % problem_id)

tester = ProblemTester()
fails = tester.test_problem(problem_id)
print()
print('Test complete')
if fails:
print_ansi('#ansi[A total of %d test(s) failed](red|bold)' % fails)
else:
print_ansi('#ansi[All tests passed.](green|bold)')

0 comments on commit 852f8b2

Please sign in to comment.