Skip to content

Commit

Permalink
chore: migrate to python v3
Browse files Browse the repository at this point in the history
  • Loading branch information
Murf-y committed Apr 13, 2023
1 parent 0902024 commit 7709945
Show file tree
Hide file tree
Showing 60 changed files with 2,035 additions and 1,140 deletions.
2 changes: 1 addition & 1 deletion VERSION
Original file line number Diff line number Diff line change
@@ -1 +1 @@
v1.002
v1.004
Binary file added __pycache__/game.cpython-39.pyc
Binary file not shown.
Binary file added __pycache__/ghostAgents.cpython-39.pyc
Binary file not shown.
Binary file added __pycache__/graphicsDisplay.cpython-39.pyc
Binary file not shown.
Binary file added __pycache__/graphicsUtils.cpython-39.pyc
Binary file not shown.
Binary file added __pycache__/keyboardAgents.cpython-39.pyc
Binary file not shown.
Binary file added __pycache__/layout.cpython-39.pyc
Binary file not shown.
Binary file added __pycache__/multiAgents.cpython-39.pyc
Binary file not shown.
Binary file added __pycache__/pacman.cpython-39.pyc
Binary file not shown.
Binary file added __pycache__/util.cpython-39.pyc
Binary file not shown.
170 changes: 89 additions & 81 deletions autograder.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
Expand All @@ -17,80 +17,87 @@
import imp
import optparse
import os
import pprint
import re
import sys
import projectParams
import random
random.seed(0)
try:
try:
from pacman import GameState
except:
pass

# register arguments and set default values
def readCommand(argv):
parser = optparse.OptionParser(description = 'Run public tests on student code')
parser.set_defaults(generateSolutions=False, edxOutput=False, muteOutput=False, printTestCase=False, noGraphics=False)
parser = optparse.OptionParser(
description='Run public tests on student code')
parser.set_defaults(generateSolutions=False, edxOutput=False, gsOutput=False,
muteOutput=False, printTestCase=False, noGraphics=False)
parser.add_option('--test-directory',
dest = 'testRoot',
default = 'test_cases',
help = 'Root test directory which contains subdirectories corresponding to each question')
dest='testRoot',
default='test_cases',
help='Root test directory which contains subdirectories corresponding to each question')
parser.add_option('--student-code',
dest = 'studentCode',
default = projectParams.STUDENT_CODE_DEFAULT,
help = 'comma separated list of student code files')
dest='studentCode',
default=projectParams.STUDENT_CODE_DEFAULT,
help='comma separated list of student code files')
parser.add_option('--code-directory',
dest = 'codeRoot',
default = "",
help = 'Root directory containing the student and testClass code')
dest='codeRoot',
default="",
help='Root directory containing the student and testClass code')
parser.add_option('--test-case-code',
dest = 'testCaseCode',
default = projectParams.PROJECT_TEST_CLASSES,
help = 'class containing testClass classes for this project')
dest='testCaseCode',
default=projectParams.PROJECT_TEST_CLASSES,
help='class containing testClass classes for this project')
parser.add_option('--generate-solutions',
dest = 'generateSolutions',
action = 'store_true',
help = 'Write solutions generated to .solution file')
dest='generateSolutions',
action='store_true',
help='Write solutions generated to .solution file')
parser.add_option('--edx-output',
dest = 'edxOutput',
action = 'store_true',
help = 'Generate edX output files')
dest='edxOutput',
action='store_true',
help='Generate edX output files')
parser.add_option('--gradescope-output',
dest='gsOutput',
action='store_true',
help='Generate GradeScope output files')
parser.add_option('--mute',
dest = 'muteOutput',
action = 'store_true',
help = 'Mute output from executing tests')
dest='muteOutput',
action='store_true',
help='Mute output from executing tests')
parser.add_option('--print-tests', '-p',
dest = 'printTestCase',
action = 'store_true',
help = 'Print each test case before running them.')
dest='printTestCase',
action='store_true',
help='Print each test case before running them.')
parser.add_option('--test', '-t',
dest = 'runTest',
default = None,
help = 'Run one particular test. Relative to test root.')
dest='runTest',
default=None,
help='Run one particular test. Relative to test root.')
parser.add_option('--question', '-q',
dest = 'gradeQuestion',
default = None,
help = 'Grade one particular question.')
dest='gradeQuestion',
default=None,
help='Grade one particular question.')
parser.add_option('--no-graphics',
dest = 'noGraphics',
action = 'store_true',
help = 'No graphics display for pacman games.')
dest='noGraphics',
action='store_true',
help='No graphics display for pacman games.')
(options, args) = parser.parse_args(argv)
return options


# confirm we should author solution files
def confirmGenerate():
print 'WARNING: this action will overwrite any solution files.'
print 'Are you sure you want to proceed? (yes/no)'
print('WARNING: this action will overwrite any solution files.')
print('Are you sure you want to proceed? (yes/no)')
while True:
ans = sys.stdin.readline().strip()
if ans == 'yes':
break
elif ans == 'no':
sys.exit(0)
else:
print 'please answer either "yes" or "no"'
print('please answer either "yes" or "no"')


# TODO: Fix this so that it tracebacks work correctly
Expand All @@ -103,14 +110,15 @@ def setModuleName(module, filename):

for i in dir(module):
o = getattr(module, i)
if hasattr(o, '__file__'): continue
if hasattr(o, '__file__'):
continue

if type(o) == functionType:
setattr(o, '__file__', filename)
elif type(o) == classType:
setattr(o, '__file__', filename)
# TODO: assign member __file__'s?
#print i, type(o)
# print i, type(o)


#from cStringIO import StringIO
Expand All @@ -122,11 +130,10 @@ def loadModuleString(moduleSource):
#f = StringIO(moduleCodeDict[k])
#tmp = imp.load_module(k, f, k, (".py", "r", imp.PY_SOURCE))
tmp = imp.new_module(k)
exec moduleCodeDict[k] in tmp.__dict__
exec(moduleCodeDict[k], tmp.__dict__)
setModuleName(tmp, k)
return tmp

import py_compile

def loadModuleFile(moduleName, filePath):
with open(filePath, 'r') as f:
Expand All @@ -145,18 +152,18 @@ def readFile(path, root=""):

# TODO: use these
ERROR_HINT_MAP = {
'q1': {
"<type 'exceptions.IndexError'>": """
'q1': {
"<type 'exceptions.IndexError'>": """
We noticed that your project threw an IndexError on q1.
While many things may cause this, it may have been from
assuming a certain number of successors from a state space
or assuming a certain number of actions available from a given
state. Try making your code more general (no hardcoded indices)
and submit again!
"""
},
'q3': {
"<type 'exceptions.AttributeError'>": """
},
'q3': {
"<type 'exceptions.AttributeError'>": """
We noticed that your project threw an AttributeError on q3.
While many things may cause this, it may have been from assuming
a certain size or structure to the state space. For example, if you have
Expand All @@ -165,10 +172,9 @@ def readFile(path, root=""):
making your code more general and submit again!
"""
}
}
}

import pprint

def splitStrings(d):
d2 = dict(d)
Expand All @@ -183,12 +189,12 @@ def splitStrings(d):

def printTest(testDict, solutionDict):
pp = pprint.PrettyPrinter(indent=4)
print "Test case:"
print("Test case:")
for line in testDict["__raw_lines__"]:
print " |", line
print "Solution:"
print(" |", line)
print("Solution:")
for line in solutionDict["__raw_lines__"]:
print " |", line
print(" |", line)


def runTest(testName, moduleDict, printTestCase=False, display=None):
Expand All @@ -211,14 +217,15 @@ def runTest(testName, moduleDict, printTestCase=False, display=None):
printTest(testDict, solutionDict)

# This is a fragile hack to create a stub grades object
grades = grading.Grades(projectParams.PROJECT_NAME, [(None,0)])
grades = grading.Grades(projectParams.PROJECT_NAME, [(None, 0)])
testCase.execute(grades, moduleDict, solutionDict)


# returns all the tests you need to run in order to run question
def getDepends(testParser, testRoot, question):
allDeps = [question]
questionDict = testParser.TestParser(os.path.join(testRoot, question, 'CONFIG')).parse()
questionDict = testParser.TestParser(
os.path.join(testRoot, question, 'CONFIG')).parse()
if 'depends' in questionDict:
depends = questionDict['depends'].split()
for d in depends:
Expand All @@ -228,20 +235,23 @@ def getDepends(testParser, testRoot, question):

# get list of questions to grade
def getTestSubdirs(testParser, testRoot, questionToGrade):
problemDict = testParser.TestParser(os.path.join(testRoot, 'CONFIG')).parse()
problemDict = testParser.TestParser(
os.path.join(testRoot, 'CONFIG')).parse()
if questionToGrade != None:
questions = getDepends(testParser, testRoot, questionToGrade)
if len(questions) > 1:
print 'Note: due to dependencies, the following tests will be run: %s' % ' '.join(questions)
print('Note: due to dependencies, the following tests will be run: %s' %
' '.join(questions))
return questions
if 'order' in problemDict:
return problemDict['order'].split()
return sorted(os.listdir(testRoot))


# evaluate student code
def evaluate(generateSolutions, testRoot, moduleDict, exceptionMap=ERROR_HINT_MAP, edxOutput=False, muteOutput=False,
printTestCase=False, questionToGrade=None, display=None):
def evaluate(generateSolutions, testRoot, moduleDict, exceptionMap=ERROR_HINT_MAP,
edxOutput=False, muteOutput=False, gsOutput=False,
printTestCase=False, questionToGrade=None, display=None):
# imports of testbench code. note that the testClasses import must follow
# the import of student code due to dependencies
import testParser
Expand All @@ -258,14 +268,16 @@ def evaluate(generateSolutions, testRoot, moduleDict, exceptionMap=ERROR_HINT_MA
continue

# create a question object
questionDict = testParser.TestParser(os.path.join(subdir_path, 'CONFIG')).parse()
questionDict = testParser.TestParser(
os.path.join(subdir_path, 'CONFIG')).parse()
questionClass = getattr(testClasses, questionDict['class'])
question = questionClass(questionDict, display)
questionDicts[q] = questionDict

# load test cases into question
tests = filter(lambda t: re.match('[^#~.].*\.test\Z', t), os.listdir(subdir_path))
tests = map(lambda t: re.match('(.*)\.test\Z', t).group(1), tests)
tests = [t for t in os.listdir(
subdir_path) if re.match('[^#~.].*\.test\Z', t)]
tests = [re.match('(.*)\.test\Z', t).group(1) for t in tests]
for t in sorted(tests):
test_file = os.path.join(subdir_path, '%s.test' % t)
solution_file = os.path.join(subdir_path, '%s.solution' % t)
Expand All @@ -276,6 +288,7 @@ def evaluate(generateSolutions, testRoot, moduleDict, exceptionMap=ERROR_HINT_MA
testDict['test_out_file'] = test_out_file
testClass = getattr(projectTestClasses, testDict['class'])
testCase = testClass(question, testDict)

def makefun(testCase, solution_file):
if generateSolutions:
# write solution file to disk
Expand All @@ -296,17 +309,17 @@ def makefun(question):
setattr(sys.modules[__name__], q, makefun(question))
questions.append((q, question.getMaxPoints()))

grades = grading.Grades(projectParams.PROJECT_NAME, questions, edxOutput=edxOutput, muteOutput=muteOutput)
grades = grading.Grades(projectParams.PROJECT_NAME, questions,
gsOutput=gsOutput, edxOutput=edxOutput, muteOutput=muteOutput)
if questionToGrade == None:
for q in questionDicts:
for prereq in questionDicts[q].get('depends', '').split():
grades.addPrereq(q, prereq)

grades.grade(sys.modules[__name__], bonusPic = projectParams.BONUS_PIC)
grades.grade(sys.modules[__name__], bonusPic=projectParams.BONUS_PIC)
return grades.points



def getDisplay(graphicsByDefault, options=None):
graphics = graphicsByDefault
if options is not None and options.noGraphics:
Expand All @@ -321,31 +334,26 @@ def getDisplay(graphicsByDefault, options=None):
return textDisplay.NullGraphics()




if __name__ == '__main__':
options = readCommand(sys.argv)
if options.generateSolutions:
confirmGenerate()
codePaths = options.studentCode.split(',')
# moduleCodeDict = {}
# for cp in codePaths:
# moduleName = re.match('.*?([^/]*)\.py', cp).group(1)
# moduleCodeDict[moduleName] = readFile(cp, root=options.codeRoot)
# moduleCodeDict['projectTestClasses'] = readFile(options.testCaseCode, root=options.codeRoot)
# moduleDict = loadModuleDict(moduleCodeDict)

moduleDict = {}
for cp in codePaths:
moduleName = re.match('.*?([^/]*)\.py', cp).group(1)
moduleDict[moduleName] = loadModuleFile(moduleName, os.path.join(options.codeRoot, cp))
moduleDict[moduleName] = loadModuleFile(
moduleName, os.path.join(options.codeRoot, cp))
moduleName = re.match('.*?([^/]*)\.py', options.testCaseCode).group(1)
moduleDict['projectTestClasses'] = loadModuleFile(moduleName, os.path.join(options.codeRoot, options.testCaseCode))

moduleDict['projectTestClasses'] = loadModuleFile(
moduleName, os.path.join(options.codeRoot, options.testCaseCode))

if options.runTest != None:
runTest(options.runTest, moduleDict, printTestCase=options.printTestCase, display=getDisplay(True, options))
runTest(options.runTest, moduleDict, printTestCase=options.printTestCase,
display=getDisplay(True, options))
else:
evaluate(options.generateSolutions, options.testRoot, moduleDict,
edxOutput=options.edxOutput, muteOutput=options.muteOutput, printTestCase=options.printTestCase,
questionToGrade=options.gradeQuestion, display=getDisplay(options.gradeQuestion!=None, options))
gsOutput=options.gsOutput,
edxOutput=options.edxOutput, muteOutput=options.muteOutput, printTestCase=options.printTestCase,
questionToGrade=options.gradeQuestion, display=getDisplay(options.gradeQuestion != None, options))
Loading

0 comments on commit 7709945

Please sign in to comment.