diff --git a/src/python/WMComponent/DBS3Buffer/DBSBufferUtil.py b/src/python/WMComponent/DBS3Buffer/DBSBufferUtil.py index 78e1cac8da..fa8f63c9b0 100644 --- a/src/python/WMComponent/DBS3Buffer/DBSBufferUtil.py +++ b/src/python/WMComponent/DBS3Buffer/DBSBufferUtil.py @@ -6,6 +6,8 @@ """ from __future__ import print_function +from future.utils import viewitems + import threading from collections import defaultdict @@ -53,14 +55,14 @@ def loadDBSBufferFilesBulk(self, fileObjs): dbsFiles.append(dbsfile) for dbsfile in dbsFiles: - if 'runInfo' in dbsfile.keys(): + if 'runInfo' in dbsfile: # Then we have to replace it with a real run - for r in dbsfile['runInfo'].keys(): + for r in dbsfile['runInfo']: run = Run(runNumber=r) run.extend(dbsfile['runInfo'][r]) dbsfile.addRun(run) del dbsfile['runInfo'] - if 'parentLFNs' in dbsfile.keys(): + if 'parentLFNs' in dbsfile: # Then we have some parents for lfn in dbsfile['parentLFNs']: newFile = DBSBufferFile(lfn=lfn) @@ -139,14 +141,14 @@ def findUploadableFilesByDAS(self, datasetpath): dbsFiles.append(dbsfile) for dbsfile in dbsFiles: - if 'runInfo' in dbsfile.keys(): + if 'runInfo' in dbsfile: # Then we have to replace it with a real run - for r in dbsfile['runInfo'].keys(): + for r in dbsfile['runInfo']: run = Run(runNumber=r) run.extendLumis(dbsfile['runInfo'][r]) dbsfile.addRun(run) del dbsfile['runInfo'] - if 'parentLFNs' in dbsfile.keys(): + if 'parentLFNs' in dbsfile: # Then we have some parents for lfn in dbsfile['parentLFNs']: newFile = DBSBufferFile(lfn=lfn) @@ -173,14 +175,14 @@ def loadFilesByBlock(self, blockname): dbsFiles.append(dbsfile) for dbsfile in dbsFiles: - if 'runInfo' in dbsfile.keys(): + if 'runInfo' in dbsfile: # Then we have to replace it with a real run - for r in dbsfile['runInfo'].keys(): + for r in dbsfile['runInfo']: run = Run(runNumber=r) run.extendLumis(dbsfile['runInfo'][r]) dbsfile.addRun(run) del dbsfile['runInfo'] - if 'parentLFNs' in dbsfile.keys(): + if 'parentLFNs' in dbsfile: # Then we have some parents for lfn in dbsfile['parentLFNs']: newFile = DBSBufferFile(lfn=lfn) @@ -220,7 +222,7 @@ def summaryPhEDExDBSStatus(self, data): returns dictionary with kew as workflow and containing dbs/phedex upload status """ summary = defaultdict(dict) - for workflow, value in data.iteritems(): + for workflow, value in viewitems(data): # only getting completed workflows summary[workflow]["Completed"] = True diff --git a/src/python/WMCore/ACDC/CouchFileset.py b/src/python/WMCore/ACDC/CouchFileset.py index 66d8206c79..be1e47979e 100644 --- a/src/python/WMCore/ACDC/CouchFileset.py +++ b/src/python/WMCore/ACDC/CouchFileset.py @@ -6,6 +6,9 @@ Created by Dave Evans on 2010-03-19. Copyright (c) 2010 Fermilab. All rights reserved. """ + +from future.utils import viewvalues + import time import WMCore.Database.CMSCouch as CMSCouch @@ -185,7 +188,7 @@ def listFiles(self): raise RuntimeError(msg) files = doc["files"] - for d in files.values(): + for d in viewvalues(files): yield d @connectToCouch diff --git a/src/python/WMCore/ACDC/CouchService.py b/src/python/WMCore/ACDC/CouchService.py index f7548bd24e..cba83863a2 100644 --- a/src/python/WMCore/ACDC/CouchService.py +++ b/src/python/WMCore/ACDC/CouchService.py @@ -6,6 +6,7 @@ Created by Dave Evans on 2010-04-20. Copyright (c) 2010 Fermilab. All rights reserved. """ +from builtins import object from time import time import WMCore.Database.CouchUtils as CouchUtils diff --git a/src/python/WMCore/Database/MySQLCore.py b/src/python/WMCore/Database/MySQLCore.py index b0b83eeb05..2db9a838ea 100644 --- a/src/python/WMCore/Database/MySQLCore.py +++ b/src/python/WMCore/Database/MySQLCore.py @@ -73,7 +73,7 @@ def substitute(self, origSQL, origBindsList): # variables: RELEASE_VERSION and RELEASE_VERSION_ID the former will # match against the latter, causing problems. We'll sort the variable # names by length to guard against this. - bindVarNames = origBind.keys() + bindVarNames = list(origBind) bindVarNames.sort(stringLengthCompare) bindPositions = {} diff --git a/src/python/WMCore/JobSplitting/JobFactory.py b/src/python/WMCore/JobSplitting/JobFactory.py index d004cdc60a..d610e9395e 100644 --- a/src/python/WMCore/JobSplitting/JobFactory.py +++ b/src/python/WMCore/JobSplitting/JobFactory.py @@ -4,6 +4,8 @@ """ +from builtins import range + import logging import threading @@ -88,13 +90,13 @@ def __call__(self, jobtype="Job", grouptype="JobGroup", *args, **kwargs): module = __import__(module, globals(), locals(), [grouptype]) self.groupInstance = getattr(module, grouptype.split('.')[-1]) - list(map(lambda x: x.start(), self.generators)) + list([x.start() for x in self.generators]) self.limit = int(kwargs.get("file_load_limit", self.limit)) self.algorithm(*args, **kwargs) self.commit() - list(map(lambda x: x.finish(), self.generators)) + list([x.finish() for x in self.generators]) return self.jobGroups def algorithm(self, *args, **kwargs): @@ -115,7 +117,7 @@ def newGroup(self): """ self.appendJobGroup() self.currentGroup = self.groupInstance(subscription=self.subscription) - list(map(lambda x: x.startGroup(self.currentGroup), self.generators)) + list([x.startGroup(self.currentGroup) for x in self.generators]) return def newJob(self, name=None, files=None, failedJob=False, failedReason=None): @@ -157,7 +159,7 @@ def appendJobGroup(self): """ if self.currentGroup: - list(map(lambda x: x.finishGroup(self.currentGroup), self.generators)) + list([x.finishGroup(self.currentGroup) for x in self.generators]) if self.currentGroup: self.jobGroups.append(self.currentGroup) self.currentGroup = None @@ -328,7 +330,7 @@ def loadFiles(self, size=10): if isinstance(resultProxy.keys, list): keys = resultProxy.keys else: - keys = resultProxy.keys() + keys = resultProxy.keys() # do not futurize this! if isinstance(keys, set): # If it's a set, handle it keys = list(keys) @@ -463,7 +465,7 @@ def getFilesSortedByLocation(self, eventsPerJob): if self.checkForAmountOfWork(): # first, check whether we have enough files to reach the desired events_per_job - for sites in lDict.keys(): + for sites in list(lDict): # lDict changes size during for loop! availableEventsPerLocation = sum([f['events'] for f in lDict[sites]]) if eventsPerJob > availableEventsPerLocation: # then we don't split these files for the moment diff --git a/src/python/WMCore/JobSplitting/MinFileBased.py b/src/python/WMCore/JobSplitting/MinFileBased.py index db224f1fb4..73130e80cc 100644 --- a/src/python/WMCore/JobSplitting/MinFileBased.py +++ b/src/python/WMCore/JobSplitting/MinFileBased.py @@ -39,7 +39,7 @@ def algorithm(self, *args, **kwargs): #Get a dictionary of sites, files locationDict = self.sortByLocation() - for location in locationDict.keys(): + for location in locationDict: #Now we have all the files in a certain location fileList = locationDict[location] filesInJob = 0 diff --git a/src/python/WMCore/JobSplitting/ParentlessMergeBySize.py b/src/python/WMCore/JobSplitting/ParentlessMergeBySize.py index a18e522518..318c8a72b1 100644 --- a/src/python/WMCore/JobSplitting/ParentlessMergeBySize.py +++ b/src/python/WMCore/JobSplitting/ParentlessMergeBySize.py @@ -4,6 +4,8 @@ WMBS merging that ignores file parents. """ +from __future__ import division + import time import threading @@ -203,11 +205,11 @@ def algorithm(self, *args, **kwargs): groupedFiles = self.defineFileGroups(mergeableFiles) - for pnn in groupedFiles.keys(): + for pnn in groupedFiles: if self.mergeAcrossRuns: self.defineMergeJobs(groupedFiles[pnn]) else: - for runNumber in groupedFiles[pnn].keys(): + for runNumber in groupedFiles[pnn]: self.defineMergeJobs(groupedFiles[pnn][runNumber]) return diff --git a/src/python/WMCore/JobSplitting/RunBased.py b/src/python/WMCore/JobSplitting/RunBased.py index b7a1d1571a..d95f9d8bfd 100644 --- a/src/python/WMCore/JobSplitting/RunBased.py +++ b/src/python/WMCore/JobSplitting/RunBased.py @@ -13,6 +13,7 @@ +from builtins import range from WMCore.JobSplitting.JobFactory import JobFactory from WMCore.DataStructs.Fileset import Fileset from WMCore.Services.UUIDLib import makeUUID @@ -48,7 +49,7 @@ def algorithm(self, *args, **kwargs): locationDict = self.sortByLocation() - for location in locationDict.keys(): + for location in locationDict: fileList = locationDict[location] for f in fileList: @@ -69,13 +70,13 @@ def algorithm(self, *args, **kwargs): run = min(runList) #If we don't have the run, we need to add it - if not run in runDict.keys(): + if run not in runDict: runDict[run] = [] runDict[run].append(f) - for run in runDict.keys(): + for run in runDict: #Find the runs in the dictionary we assembled and split the files in them self.newGroup() diff --git a/src/python/WMCore/JobSplitting/SiblingProcessingBased.py b/src/python/WMCore/JobSplitting/SiblingProcessingBased.py index 14becc46cc..41307251dc 100644 --- a/src/python/WMCore/JobSplitting/SiblingProcessingBased.py +++ b/src/python/WMCore/JobSplitting/SiblingProcessingBased.py @@ -61,7 +61,7 @@ def algorithm(self, *args, **kwargs): fileSites[completeFile["pnn"]].append(completeFile) - for siteName in fileSites.keys(): + for siteName in fileSites: if len(fileSites[siteName]) < filesPerJob and not filesetClosed: continue diff --git a/src/python/WMCore/JobSplitting/SizeBased.py b/src/python/WMCore/JobSplitting/SizeBased.py index 7c8911f92a..b437e07cfb 100644 --- a/src/python/WMCore/JobSplitting/SizeBased.py +++ b/src/python/WMCore/JobSplitting/SizeBased.py @@ -30,7 +30,7 @@ def algorithm(self, *args, **kwargs): sizePerJob = int(kwargs.get("size_per_job", 1000)) locationDict = self.sortByLocation() - for location in locationDict.keys(): + for location in locationDict: self.newGroup() fileList = locationDict[location] self.newJob(name = makeUUID()) diff --git a/src/python/WMCore/JobSplitting/SplitFileBased.py b/src/python/WMCore/JobSplitting/SplitFileBased.py index 05641d32ec..17a7d4706f 100644 --- a/src/python/WMCore/JobSplitting/SplitFileBased.py +++ b/src/python/WMCore/JobSplitting/SplitFileBased.py @@ -105,7 +105,7 @@ def defineMergeUnits(self, mergeableFiles): for mergeableFile in mergeableFiles: newMergeFile = {} - for key in mergeableFile.keys(): + for key in mergeableFile: newMergeFile[key] = mergeableFile[key] if newMergeFile["file_run"] not in mergeUnits: @@ -170,7 +170,7 @@ def algorithm(self, *args, **kwargs): mergeableFiles = mergeDAO.execute(self.subscription["id"]) mergeUnits = self.defineMergeUnits(mergeableFiles) - for runNumber in mergeUnits.keys(): + for runNumber in mergeUnits: mergeUnits[runNumber].sort(mergeUnitCompare) self.createProcJobs(mergeUnits[runNumber]) diff --git a/src/python/WMCore/JobSplitting/TwoFileBased.py b/src/python/WMCore/JobSplitting/TwoFileBased.py index 1a663a14b0..db70b89c91 100644 --- a/src/python/WMCore/JobSplitting/TwoFileBased.py +++ b/src/python/WMCore/JobSplitting/TwoFileBased.py @@ -74,7 +74,7 @@ def algorithm(self, *args, **kwargs): #Get a dictionary of sites, files locationDict = self.sortByLocation() - for location in locationDict.keys(): + for location in locationDict: #Now we have all the files in a certain location fileList = locationDict[location] filesInJob = 0 diff --git a/src/python/WMCore/JobSplitting/WMBSMergeBySize.py b/src/python/WMCore/JobSplitting/WMBSMergeBySize.py index 55d6372675..2d3a46896b 100644 --- a/src/python/WMCore/JobSplitting/WMBSMergeBySize.py +++ b/src/python/WMCore/JobSplitting/WMBSMergeBySize.py @@ -103,7 +103,7 @@ def defineMergeUnits(self, mergeableFiles): for mergeableFile in mergeableFiles: newMergeFile = {} - for key in mergeableFile.keys(): + for key in mergeableFile: newMergeFile[key] = mergeableFile[key] if newMergeFile["pnn"] not in mergeUnits: @@ -243,8 +243,8 @@ def algorithm(self, *args, **kwargs): mergeUnits = self.defineMergeUnits(mergeableFiles) - for pnn in mergeUnits.keys(): - for runNumber in mergeUnits[pnn].keys(): + for pnn in mergeUnits: + for runNumber in mergeUnits[pnn]: self.defineMergeJobs(mergeUnits[pnn][runNumber]) return diff --git a/src/python/WMCore/Storage/DeleteMgr.py b/src/python/WMCore/Storage/DeleteMgr.py index a854ce909d..ff3df65bb8 100644 --- a/src/python/WMCore/Storage/DeleteMgr.py +++ b/src/python/WMCore/Storage/DeleteMgr.py @@ -9,6 +9,9 @@ """ from __future__ import print_function +from builtins import object +from future.utils import viewitems + import logging from WMCore.Storage.Registry import retrieveStageOutImpl @@ -34,7 +37,7 @@ def __init__(self, message, **data): self.data.setdefault("ErrorType", self.__class__.__name__) -class DeleteMgr: +class DeleteMgr(object): """ _DeleteMgr_ @@ -150,7 +153,7 @@ def initialiseOverride(self): overrideParams['option'] = "" msg = "=======Delete Override Initialised:================\n" - for key, val in overrideParams.items(): + for key, val in viewitems(overrideParams): msg += " %s : %s\n" % (key, val) msg += "=====================================================\n" self.logger.info(msg) diff --git a/src/python/WMCore/WMBS/Job.py b/src/python/WMCore/WMBS/Job.py index e6e051d4b0..2fc7216a12 100644 --- a/src/python/WMCore/WMBS/Job.py +++ b/src/python/WMCore/WMBS/Job.py @@ -16,6 +16,8 @@ from __future__ import print_function +from builtins import int, str, bytes + from WMCore.DataStructs.Job import Job as WMJob from WMCore.DataStructs.Mask import Mask as WMMask from WMCore.Services.UUIDLib import makeUUID @@ -423,7 +425,7 @@ def setFWJRPath(self, fwjrPath=None): """ if not fwjrPath: - if 'fwjr' in self.keys(): + if 'fwjr' in self: fwjrPath = self['fwjr'] else: return None @@ -443,16 +445,15 @@ def getDataStructsJob(self): job = WMJob(name=self['name']) # Transfer all simple keys - for key in self.keys(): - keyType = type(self.get(key)) - if keyType in [str, long, int, float]: + for key in self: + if isinstance(self.get(key), (str, bytes, int, float)): job[key] = self[key] for fileObj in self['input_files']: job['input_files'].append(fileObj.returnDataStructsFile()) job['mask'] = WMMask() - for key in self["mask"].keys(): + for key in self["mask"]: job["mask"][key] = self["mask"][key] job.baggage = self.baggage diff --git a/src/python/WMCore/WMBS/Subscription.py b/src/python/WMCore/WMBS/Subscription.py index c115588321..6d3b435fe6 100644 --- a/src/python/WMCore/WMBS/Subscription.py +++ b/src/python/WMCore/WMBS/Subscription.py @@ -10,6 +10,8 @@ """ from __future__ import print_function +from future.utils import listvalues + import logging from collections import Counter @@ -200,7 +202,7 @@ def filesOfStatus(self, status, loadChecksums=True, doingJobSplitting=False): if loadChecksums: fl.loadChecksum() fl.update(fileInfoDict[f['file']]) - if 'locations' in f.keys(): + if 'locations' in f: fl.setLocation(f['locations'], immediateSave=False) files.add(fl) @@ -324,9 +326,9 @@ def getNumberOfJobsPerSite(self, location, state): """ jobLocate = self.daofactory(classname="Subscriptions.GetNumberOfJobsPerSite") - result = jobLocate.execute(location=location, - subscription=self['id'], - state=state).values()[0] + result = listvalues(jobLocate.execute(location=location, + subscription=self['id'], + state=state))[0] return result def getJobGroups(self): @@ -622,7 +624,7 @@ def bulkCommit(self, jobGroups): maskList = [] for job in jobList: mask = job['mask'] - if len(mask['runAndLumis'].keys()) > 0: + if len(list(mask['runAndLumis'].keys())) > 0: # Then we have multiple binds binds = mask.produceCommitBinds(jobID=job['id']) maskList.extend(binds) diff --git a/test/python/WMComponent_t/DBS3Buffer_t/DBSBufferUtil_t.py b/test/python/WMComponent_t/DBS3Buffer_t/DBSBufferUtil_t.py index f3938c2e1d..b1f8e4ddcd 100644 --- a/test/python/WMComponent_t/DBS3Buffer_t/DBSBufferUtil_t.py +++ b/test/python/WMComponent_t/DBS3Buffer_t/DBSBufferUtil_t.py @@ -5,6 +5,7 @@ Unit tests for DBSBufferUtil class """ +from builtins import range import unittest import threading diff --git a/test/python/WMCore_t/ACDC_t/CouchCollection_t.py b/test/python/WMCore_t/ACDC_t/CouchCollection_t.py index f426251ad1..39631ce271 100644 --- a/test/python/WMCore_t/ACDC_t/CouchCollection_t.py +++ b/test/python/WMCore_t/ACDC_t/CouchCollection_t.py @@ -6,6 +6,7 @@ Copyright (c) 2010 Fermilab. All rights reserved. """ +from builtins import range import unittest import random @@ -113,10 +114,10 @@ def testCreatePopulateDrop(self): if fileset["name"] == "TestFilesetC": testFiles.extend(testFilesB) - self.assertEqual(len(testFiles), len(fileset.files.keys()), + self.assertEqual(len(testFiles), len(fileset.files), "Error: Wrong number of files in fileset.") for testFile in testFiles: - self.assertTrue(testFile["lfn"] in fileset.files.keys(), + self.assertTrue(testFile["lfn"] in fileset.files, "Error: File is missing.") self.assertEqual(testFile["events"], fileset.files[testFile["lfn"]]["events"], diff --git a/test/python/WMCore_t/ACDC_t/CouchFileset_t.py b/test/python/WMCore_t/ACDC_t/CouchFileset_t.py index 43632ecc33..cfecdc0851 100644 --- a/test/python/WMCore_t/ACDC_t/CouchFileset_t.py +++ b/test/python/WMCore_t/ACDC_t/CouchFileset_t.py @@ -6,6 +6,7 @@ Copyright (c) 2010 Fermilab. All rights reserved. """ +from builtins import range import unittest import random @@ -125,7 +126,7 @@ def testListFiles(self): testFileset.add([testFile]) for file in testFileset.listFiles(): - self.assertTrue(file["lfn"] in testFiles.keys(), + self.assertTrue(file["lfn"] in testFiles, "Error: File missing.") self.assertEqual(file["events"], testFiles[file["lfn"]]["events"], "Error: Wrong number of events.") @@ -157,7 +158,7 @@ def testFileset(self): testFileset.add([testFile]) for file in testFileset.fileset().files: - self.assertTrue(file["lfn"] in testFiles.keys(), + self.assertTrue(file["lfn"] in testFiles, "Error: File missing.") self.assertEqual(file["events"], testFiles[file["lfn"]]["events"], "Error: Wrong number of events.") diff --git a/test/python/WMCore_t/ACDC_t/CouchService_t.py b/test/python/WMCore_t/ACDC_t/CouchService_t.py index 6f024dc7e1..1bb59302b5 100644 --- a/test/python/WMCore_t/ACDC_t/CouchService_t.py +++ b/test/python/WMCore_t/ACDC_t/CouchService_t.py @@ -7,6 +7,7 @@ Copyright (c) 2010 Fermilab. All rights reserved. """ +from builtins import range import unittest import random import time diff --git a/test/python/WMCore_t/JobSplitting_t/FixedDelay_t.py b/test/python/WMCore_t/JobSplitting_t/FixedDelay_t.py index b80bb8b67d..3a25bef52a 100644 --- a/test/python/WMCore_t/JobSplitting_t/FixedDelay_t.py +++ b/test/python/WMCore_t/JobSplitting_t/FixedDelay_t.py @@ -4,6 +4,9 @@ Fixed Delay splitting test. """ +from __future__ import division + +from builtins import range import unittest @@ -39,7 +42,7 @@ def setUp(self): self.multipleFileLumiset = Fileset(name = "TestFileset3") for i in range(10): newFile = File(makeUUID(), size = 1000, events = 100) - newFile.addRun(Run(1, *[45+i/3])) + newFile.addRun(Run(1, *[45 + i // 3])) self.multipleFileLumiset.addFile(newFile) self.singleLumiFileset = Fileset(name = "TestFileset4") diff --git a/test/python/WMCore_t/JobSplitting_t/RunBased_t.py b/test/python/WMCore_t/JobSplitting_t/RunBased_t.py index 5775dcdbe3..b83fc0036d 100644 --- a/test/python/WMCore_t/JobSplitting_t/RunBased_t.py +++ b/test/python/WMCore_t/JobSplitting_t/RunBased_t.py @@ -4,9 +4,9 @@ RunBased splitting test. """ +from __future__ import division - - +from builtins import range import unittest @@ -43,7 +43,7 @@ def setUp(self): self.multipleFileRunset = Fileset(name = "TestFileset3") for i in range(10): newFile = File(makeUUID(), size = 1000, events = 100, locations = set(["somese.cern.ch"])) - newFile.addRun(Run(i/3, *[45])) + newFile.addRun(Run(i//3, *[45])) self.multipleFileRunset.addFile(newFile) self.singleRunFileset = Fileset(name = "TestFileset4") diff --git a/test/python/WMCore_t/JobSplitting_t/SizeBased_t.py b/test/python/WMCore_t/JobSplitting_t/SizeBased_t.py index 837ec37a12..587dedeff2 100644 --- a/test/python/WMCore_t/JobSplitting_t/SizeBased_t.py +++ b/test/python/WMCore_t/JobSplitting_t/SizeBased_t.py @@ -8,6 +8,7 @@ +from builtins import range import unittest from WMCore.DataStructs.File import File diff --git a/test/python/WMCore_t/WMBS_t/Job_t.py b/test/python/WMCore_t/WMBS_t/Job_t.py index d9abba4077..61d4b4c93a 100644 --- a/test/python/WMCore_t/WMBS_t/Job_t.py +++ b/test/python/WMCore_t/WMBS_t/Job_t.py @@ -6,6 +6,7 @@ """ from __future__ import absolute_import +from builtins import str import threading import unittest @@ -908,7 +909,7 @@ def testGetOutputMapDAO(self): outputMapAction = self.daoFactory(classname="Jobs.GetOutputMap") outputMap = outputMapAction.execute(jobID=testJob["id"]) - assert len(outputMap.keys()) == 3, \ + assert len(outputMap) == 3, \ "Error: Wrong number of outputs for primary workflow." goldenMap = {"output": (recoOutputFileset.id, @@ -918,7 +919,7 @@ def testGetOutputMapDAO(self): "DQM": (dqmOutputFileset.id, mergedDqmOutputFileset.id)} - for outputID in outputMap.keys(): + for outputID in outputMap: for outputFilesets in outputMap[outputID]: if outputFilesets["merged_output_fileset"] is None: self.assertEqual(outputFilesets["output_fileset"], @@ -926,7 +927,7 @@ def testGetOutputMapDAO(self): "Error: Cleanup fileset is wrong.") continue - self.assertTrue(outputID in goldenMap.keys(), + self.assertTrue(outputID in goldenMap, "Error: Output identifier is missing.") self.assertEqual(outputFilesets["output_fileset"], goldenMap[outputID][0], @@ -936,7 +937,7 @@ def testGetOutputMapDAO(self): "Error: Merged output fileset is wrong.") del goldenMap[outputID] - self.assertEqual(len(goldenMap.keys()), 0, + self.assertEqual(len(goldenMap), 0, "Error: Missing output maps.") return @@ -982,7 +983,7 @@ def testGetDataStructsJob(self): testJob.baggage.TestSection.test = 100 finalJob = testJob.getDataStructsJob() - for key in finalJob.keys(): + for key in finalJob: if key == 'input_files': for inputFile in testJob['input_files']: self.assertEqual(inputFile.returnDataStructsFile() in finalJob['input_files'], True)