diff --git a/Configuration/Applications/python/ConfigBuilder.py b/Configuration/Applications/python/ConfigBuilder.py index ee88eae5b9749..42fc1e941f1bf 100644 --- a/Configuration/Applications/python/ConfigBuilder.py +++ b/Configuration/Applications/python/ConfigBuilder.py @@ -1,6 +1,5 @@ #! /usr/bin/env python3 -from __future__ import print_function __version__ = "$Revision: 1.19 $" __source__ = "$Source: /local/reps/CMSSW/CMSSW/Configuration/Applications/python/ConfigBuilder.py,v $" @@ -62,7 +61,7 @@ class Options: defaultOptions.eventcontent = None defaultOptions.datatier = None defaultOptions.inlineEventContent = True -defaultOptions.inlineObjets ='' +defaultOptions.inlineObjects ='' defaultOptions.hideGen=False from Configuration.StandardSequences.VtxSmeared import VtxSmearedDefaultKey,VtxSmearedHIDefaultKey defaultOptions.beamspot=None @@ -87,10 +86,10 @@ class Options: defaultOptions.runsScenarioForMCIntegerWeights = None defaultOptions.runUnscheduled = False defaultOptions.timeoutOutput = False -defaultOptions.nThreads = '1' -defaultOptions.nStreams = '0' -defaultOptions.nConcurrentLumis = '0' -defaultOptions.nConcurrentIOVs = '0' +defaultOptions.nThreads = 1 +defaultOptions.nStreams = 0 +defaultOptions.nConcurrentLumis = 0 +defaultOptions.nConcurrentIOVs = 0 defaultOptions.accelerators = None # some helper routines @@ -425,9 +424,9 @@ def addCommon(self): def addMaxEvents(self): """Here we decide how many evts will be processed""" - self.process.maxEvents.input = int(self._options.number) + self.process.maxEvents.input = self._options.number if self._options.number_out: - self.process.maxEvents.output = int(self._options.number_out) + self.process.maxEvents.output = self._options.number_out self.addedObjects.append(("","maxEvents")) def addSource(self): @@ -794,7 +793,7 @@ def addStandardSequences(self): #the file is local self.process.load(mixingDict['file']) print("inlining mixing module configuration") - self._options.inlineObjets+=',mix' + self._options.inlineObjects+=',mix' else: self.loadAndRemember(mixingDict['file']) @@ -1352,8 +1351,8 @@ def prepare_ALCA(self, stepSpec = None, workflow = 'full'): if shortName in alcaList and isinstance(alcastream,cms.FilteredStream): if shortName in AlCaNoConcurrentLumis: print("Setting numberOfConcurrentLuminosityBlocks=1 because of AlCa sequence {}".format(shortName)) - self._options.nConcurrentLumis = "1" - self._options.nConcurrentIOVs = "1" + self._options.nConcurrentLumis = 1 + self._options.nConcurrentIOVs = 1 output = self.addExtraStream(name,alcastream, workflow = workflow) self.executeAndRemember('process.ALCARECOEventContent.outputCommands.extend(process.OutALCARECO'+shortName+'_noDrop.outputCommands)') self.AlCaPaths.append(shortName) @@ -1404,9 +1403,9 @@ def prepare_LHE(self, stepSpec = None): __import__(loadFragment) self.process.load(loadFragment) ##inline the modules - self._options.inlineObjets+=','+stepSpec + self._options.inlineObjects+=','+stepSpec - getattr(self.process,stepSpec).nEvents = int(self._options.number) + getattr(self.process,stepSpec).nEvents = self._options.number #schedule it self.process.lhe_step = cms.Path( getattr( self.process,stepSpec) ) @@ -1453,13 +1452,13 @@ def prepare_GEN(self, stepSpec = None): for name in genModules: theObject = getattr(generatorModule,name) if isinstance(theObject, cmstypes._Module): - self._options.inlineObjets=name+','+self._options.inlineObjets + self._options.inlineObjects=name+','+self._options.inlineObjects if theObject.type_() in noConcurrentLumiGenerators: print("Setting numberOfConcurrentLuminosityBlocks=1 because of generator {}".format(theObject.type_())) - self._options.nConcurrentLumis = "1" - self._options.nConcurrentIOVs = "1" + self._options.nConcurrentLumis = 1 + self._options.nConcurrentIOVs = 1 elif isinstance(theObject, cms.Sequence) or isinstance(theObject, cmstypes.ESProducer): - self._options.inlineObjets+=','+name + self._options.inlineObjects+=','+name if stepSpec == self.GENDefaultSeq or stepSpec == 'pgen_genonly' or stepSpec == 'pgen_smear': if 'ProductionFilterSequence' in genModules and ('generator' in genModules): @@ -1709,8 +1708,8 @@ def leave(self,v): pass expander=PrintAllModules() getattr(self.process,filterSeq).visit( expander ) - self._options.inlineObjets+=','+expander.inliner - self._options.inlineObjets+=','+filterSeq + self._options.inlineObjects+=','+expander.inliner + self._options.inlineObjects+=','+filterSeq ## put the filtering path in the schedule self.scheduleSequence(filterSeq,'filtering_step') @@ -2286,7 +2285,7 @@ def prepare(self, doChecking = False): self.pythonCfgCode += command + "\n" #comma separated list of objects that deserve to be inlined in the configuration (typically from a modified config deep down) - for object in self._options.inlineObjets.split(','): + for object in self._options.inlineObjects.split(','): if not object: continue if not hasattr(self.process,object): @@ -2345,7 +2344,7 @@ def prepare(self, doChecking = False): self.pythonCfgCode+="from PhysicsTools.PatAlgos.tools.helpers import associatePatAlgosToolsTask\n" self.pythonCfgCode+="associatePatAlgosToolsTask(process)\n" - overrideThreads = (self._options.nThreads != "1") + overrideThreads = (self._options.nThreads != 1) overrideConcurrentLumis = (self._options.nConcurrentLumis != defaultOptions.nConcurrentLumis) overrideConcurrentIOVs = (self._options.nConcurrentIOVs != defaultOptions.nConcurrentIOVs) @@ -2353,16 +2352,16 @@ def prepare(self, doChecking = False): self.pythonCfgCode +="\n" self.pythonCfgCode +="#Setup FWK for multithreaded\n" if overrideThreads: - self.pythonCfgCode +="process.options.numberOfThreads = "+self._options.nThreads+"\n" - self.pythonCfgCode +="process.options.numberOfStreams = "+self._options.nStreams+"\n" - self.process.options.numberOfThreads = int(self._options.nThreads) - self.process.options.numberOfStreams = int(self._options.nStreams) + self.pythonCfgCode +="process.options.numberOfThreads = {}\n".format(self._options.nThreads) + self.pythonCfgCode +="process.options.numberOfStreams = {}\n".format(self._options.nStreams) + self.process.options.numberOfThreads = self._options.nThreads + self.process.options.numberOfStreams = self._options.nStreams if overrideConcurrentLumis: - self.pythonCfgCode +="process.options.numberOfConcurrentLuminosityBlocks = "+self._options.nConcurrentLumis+"\n" - self.process.options.numberOfConcurrentLuminosityBlocks = int(self._options.nConcurrentLumis) + self.pythonCfgCode +="process.options.numberOfConcurrentLuminosityBlocks = {}\n".format(self._options.nConcurrentLumis) + self.process.options.numberOfConcurrentLuminosityBlocks = self._options.nConcurrentLumis if overrideConcurrentIOVs: - self.pythonCfgCode +="process.options.eventSetup.numberOfConcurrentIOVs = "+self._options.nConcurrentIOVs+"\n" - self.process.options.eventSetup.numberOfConcurrentIOVs = int(self._options.nConcurrentIOVs) + self.pythonCfgCode +="process.options.eventSetup.numberOfConcurrentIOVs = {}\n".format(self._options.nConcurrentIOVs) + self.process.options.eventSetup.numberOfConcurrentIOVs = self._options.nConcurrentIOVs if self._options.accelerators is not None: accelerators = self._options.accelerators.split(',') diff --git a/Configuration/Applications/python/Options.py b/Configuration/Applications/python/Options.py index fde1e9e4b9113..5abc482d9d7bb 100644 --- a/Configuration/Applications/python/Options.py +++ b/Configuration/Applications/python/Options.py @@ -2,7 +2,7 @@ # A Pyrelval Wrapper -import optparse +from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter import sys import os import re @@ -11,416 +11,467 @@ import traceback # Prepare a parser to read the options usage=\ -"""%prog [options]. +"""%(prog)s [options]. Example: -%prog reco -s RAW2DIGI,RECO --conditions STARTUP_V4::All --eventcontent RECOSIM +%(prog)s reco -s RAW2DIGI,RECO --conditions STARTUP_V4::All --eventcontent RECOSIM """ -parser = optparse.OptionParser(usage) - -expertSettings = optparse.OptionGroup(parser, '===============\n Expert Options', 'Caution: please use only if you know what you are doing.') -famosSettings = optparse.OptionGroup(parser, '===============\n FastSimulation options', '') -parser.add_option_group(expertSettings) - -threeValued=[] -parser.add_option("-s", "--step", - help="The desired step. The possible values are: "+\ - "GEN,SIM,DIGI,L1,DIGI2RAW,HLT,RAW2DIGI,RECO,POSTRECO,DQM,ALCA,VALIDATION,HARVESTING, NONE or ALL.", - default="ALL", - dest="step") - -parser.add_option("--conditions", - help="What conditions to use. This has to be specified", - default=None, - dest="conditions") - -parser.add_option("--eventcontent", - help="What event content to write out. Default=FEVTDEBUG, or FEVT (for cosmics)", - default='RECOSIM', - dest="eventcontent") - -parser.add_option("--filein", - help="The infile name.", - default="",#to be changed in the default form later - dest="filein") - -parser.add_option("--fileout", - help="The outfile name. If absent a default value is assigned", - default="", #to be changed in the default form later - dest="fileout") - -parser.add_option("--filetype", - help="The type of the infile (EDM, LHE or MCDB).", - default=defaultOptions.filetype, - dest="filetype", - choices=['EDM','DAT','LHE','MDCB','DQM','DQMDAQ'] +parser = ArgumentParser(usage=usage, formatter_class=ArgumentDefaultsHelpFormatter) + +expertSettings = parser.add_argument_group('===============\n Expert Options', 'Caution: please use only if you know what you are doing.') + +parser.add_argument("evt_type", metavar="TYPE", nargs='?', type=str) +parser.add_argument("-s", "--step", + help="The desired step. The possible values are: "+\ + "GEN,SIM,DIGI,L1,DIGI2RAW,HLT,RAW2DIGI,RECO,POSTRECO,DQM,ALCA,VALIDATION,HARVESTING, NONE or ALL.", + default="ALL", + type=str, + dest="step") + +parser.add_argument("--conditions", + help="What conditions to use (required; provide value 'help' to get list of options)", + required=True, + type=str, + dest="conditions") + +parser.add_argument("--eventcontent", + help="What event content to write out", + default='RECOSIM', + type=str, + dest="eventcontent") + +parser.add_argument("--filein", + help="The infile name.", + default="", #to be changed in the default form later + type=str, + dest="filein") + +parser.add_argument("--fileout", + help="The outfile name. If absent a default value is assigned", + default="", #to be changed in the default form later + type=str, + dest="fileout") + +parser.add_argument("--filetype", + help="The type of the infile", + default=defaultOptions.filetype, + type=str, + dest="filetype", + choices=['EDM','DAT','LHE','MDCB','DQM','DQMDAQ'] ) -parser.add_option("-n", "--number", - help="The number of events. The default is 1.", - default="1", - dest="number") -parser.add_option("-o", "--number_out", - help="The number of events in output. The default is not set", - default=None, - dest="number_out") - -parser.add_option("--mc", - help="Specify that simulation is to be processed (default = guess based on options", - action="store_true", - default=False, - dest="isMC") - -parser.add_option("--data", - help="Specify that data is to be processed (default = guess based on options", - action="store_true", - default=False, - dest="isData") - - -parser.add_option("--no_exec", - help="Do not exec cmsRun. Just prepare the python config file.", - action="store_true", - default=False, - dest="no_exec_flag") -parser.add_option("--fast", - help="Specify that the configuration is for FASTSIM", - action="store_true", - default=False) - -parser.add_option("--runsAndWeightsForMC", - help="Assign run numbers to MC source according to relative weights. [(run1,weight1),...,(runN,weightN)])", - default=None, - dest="runsAndWeightsForMC") - -parser.add_option("--runsScenarioForMC", - help="Load a scenario to set run numbers in MC.)", - default=None, - dest="runsScenarioForMC") - -parser.add_option("--runsAndWeightsForMCIntegerWeights", - help="Assign run numbers to MC source according to relative weights where weighting is determined by the number of times the run number appears. [(run1,run2,...,runN)])", - default=None, - dest="runsAndWeightsForMCIntegerWeights") - -parser.add_option("--runsScenarioForMCIntegerWeights", - help="Load a scenario to set run numbers in MC with integer IOV weights.)", - default=None, - dest="runsScenarioForMCIntegerWeights") - -parser.add_option("--runUnscheduled", - help="Automatically convert configuration to run unscheduled the EDProducers/EDFilters that were scheduled", - action="store_true", - default=False, - dest="runUnscheduled") +parser.add_argument("-n", "--number", + help="The number of events.", + default=1, + type=int, + dest="number") + +parser.add_argument("-o", "--number_out", + help="The number of events in output.", + default=None, + type=int, + dest="number_out") + +parser.add_argument("--mc", + help="Specify that simulation is to be processed (default = guess based on options)", + action="store_true", + default=False, + dest="isMC") + +parser.add_argument("--data", + help="Specify that data is to be processed (default = guess based on options)", + action="store_true", + default=False, + dest="isData") + +parser.add_argument("--no_exec", + help="Do not exec cmsRun. Just prepare the python config file.", + action="store_true", + default=False, + dest="no_exec_flag") + +parser.add_argument("--fast", + help="Specify that the configuration is for FASTSIM", + action="store_true", + default=False) + +parser.add_argument("--runsAndWeightsForMC", + help="Assign run numbers to MC source according to relative weights. [(run1,weight1),...,(runN,weightN)])", + default=None, + dest="runsAndWeightsForMC") + +parser.add_argument("--runsScenarioForMC", + help="Load a scenario to set run numbers in MC.)", + default=None, + dest="runsScenarioForMC") + +parser.add_argument("--runsAndWeightsForMCIntegerWeights", + help="Assign run numbers to MC source according to relative weights where weighting is determined by the number of times the run number appears. [(run1,run2,...,runN)])", + default=None, + dest="runsAndWeightsForMCIntegerWeights") + +parser.add_argument("--runsScenarioForMCIntegerWeights", + help="Load a scenario to set run numbers in MC with integer IOV weights.", + default=None, + dest="runsScenarioForMCIntegerWeights") + +parser.add_argument("--runUnscheduled", + help="Automatically convert configuration to run unscheduled the EDProducers/EDFilters that were scheduled", + action="store_true", + default=False, + dest="runUnscheduled") # expert settings -expertSettings.add_option("--beamspot", - help="What beam spot to use (from Configuration/StandardSequences). Default depends on scenario", - default=None, - dest="beamspot") - -expertSettings.add_option("--customise", - help="Specify the file where the code to modify the process object is stored.", - default=[], - action="append", - dest="customisation_file") -expertSettings.add_option("--customise_unsch", - help="Specify the file where the code to modify the process object is stored.", - default=[], - action="append", - dest="customisation_file_unsch") -expertSettings.add_option("--customise_commands", - help="Specify a string of commands", - default="", - dest="customise_commands") - -expertSettings.add_option("--inline_custom", - help="inline the customisation file", - default=False, - action="store_true", - dest="inline_custom") - -expertSettings.add_option("--datatier", - help="What data tier to use.", - default='', - dest="datatier") - -expertSettings.add_option( "--dirin", - help="The infile directory.", - default="", - dest="dirin") - -expertSettings.add_option( "--dirout", - help="The outfile directory.", - default="", - dest="dirout") - -expertSettings.add_option("--filtername", - help="What filter name to specify in output module", - default="", - dest="filtername") - -expertSettings.add_option("--geometry", - help="What simulation geometry to use. Default="+defaultOptions.geometry+". Coma separated SimGeometry,RecoGeometry is supported.", - default=defaultOptions.geometry, - dest="geometry") - -expertSettings.add_option("--magField", - help="What magnetic field to use (from Configuration/StandardSequences).", - default=defaultOptions.magField, - dest="magField") - -expertSettings.add_option("--no_output", - help="Do not write anything to disk. This is for "+\ - "benchmarking purposes.", - action="store_true", - default=False, - dest="no_output_flag") - -expertSettings.add_option("--prefix", - help="Specify a prefix to the cmsRun command.", - default="", - dest="prefix") - -expertSettings.add_option("--suffix", - help="Specify a suffix to the cmsRun command.", - default="", - dest="suffix") - -expertSettings.add_option("--relval", - help="Set total number of events and events per job.", #this does not get used but get parsed in the command by DatOps - default="", - dest="relval") - -expertSettings.add_option("--dump_python", - help="Dump the config file in python "+\ - "and do a full expansion of imports.", - action="store_true", - default=False, - dest="dump_python") - -expertSettings.add_option("--pileup", - help="What pileup config to use. Default="+defaultOptions.pileup, - default=defaultOptions.pileup, - dest="pileup") +expertSettings.add_argument("--beamspot", + help="What beam spot to use (from Configuration/StandardSequences). Default depends on scenario", + default=None, + type=str, + dest="beamspot") + +expertSettings.add_argument("--customise", + help="Specify the file where the code to modify the process object is stored.", + default=[], + action="append", + type=str, + dest="customisation_file") + +expertSettings.add_argument("--customise_unsch", + help="Specify the file where the code to modify the process object is stored.", + default=[], + action="append", + type=str, + dest="customisation_file_unsch") + +expertSettings.add_argument("--customise_commands", + help="Specify a string of commands", + default="", + type=str, + dest="customise_commands") + +expertSettings.add_argument("--inline_custom", + help="inline the customisation file", + default=False, + action="store_true", + dest="inline_custom") + +expertSettings.add_argument("--datatier", + help="What data tier to use.", + default='', + type=str, + dest="datatier") + +expertSettings.add_argument( "--dirin", + help="The infile directory.", + default="", + type=str, + dest="dirin") + +expertSettings.add_argument( "--dirout", + help="The outfile directory.", + default="", + type=str, + dest="dirout") + +expertSettings.add_argument("--filtername", + help="What filter name to specify in output module", + default="", + type=str, + dest="filtername") + +expertSettings.add_argument("--geometry", + help="What simulation geometry to use. Comma-separated SimGeometry,RecoGeometry is supported.", + default=defaultOptions.geometry, + type=str, + dest="geometry") + +expertSettings.add_argument("--magField", + help="What magnetic field to use (from Configuration/StandardSequences).", + default=defaultOptions.magField, + type=str, + dest="magField") + +expertSettings.add_argument("--no_output", + help="Do not write anything to disk. This is for "+\ + "benchmarking purposes.", + action="store_true", + default=False, + dest="no_output_flag") + +expertSettings.add_argument("--prefix", + help="Specify a prefix to the cmsRun command.", + default="", + type=str, + dest="prefix") + +expertSettings.add_argument("--suffix", + help="Specify a suffix to the cmsRun command.", + default="", + type=str, + dest="suffix") + +expertSettings.add_argument("--relval", + help="Set total number of events and events per job.", #this does not get used but get parsed in the command by DataOps + default="", + dest="relval") + +expertSettings.add_argument("--dump_python", + help="Dump the config file in python "+\ + "and do a full expansion of imports.", + action="store_true", + default=False, + dest="dump_python") + +expertSettings.add_argument("--pileup", + help="What pileup config to use", + default=defaultOptions.pileup, + type=str, + dest="pileup") -expertSettings.add_option("--pileup_input", - help="define the pile up files to mix with", - default=None, - dest="pileup_input") - -expertSettings.add_option("--pileup_dasoption", - help="Additional option for DAS query of pile up", - default="", - dest="pileup_dasoption") - -expertSettings.add_option("--datamix", - help="What datamix config to use. Default=DataOnSim.", - default=defaultOptions.datamix, - dest="datamix") - -expertSettings.add_option("--gflash", - help="Run the FULL SIM using the GFlash parameterization.", - action="store_true", - default=defaultOptions.gflash, - dest="gflash") - -expertSettings.add_option("--python_filename", - help="Change the name of the created config file ", - default='', - dest="python_filename") - -expertSettings.add_option("--secondfilein", - help="The secondary infile name."+\ +expertSettings.add_argument("--pileup_input", + help="define the pile up files to mix with", + default=None, + type=str, + dest="pileup_input") + +expertSettings.add_argument("--pileup_dasoption", + help="Additional option for DAS query of pile up", + default="", + type=str, + dest="pileup_dasoption") + +expertSettings.add_argument("--datamix", + help="What datamix config to use", + default=defaultOptions.datamix, + type=str, + dest="datamix") + +expertSettings.add_argument("--gflash", + help="Run the FULL SIM using the GFlash parameterization.", + action="store_true", + default=defaultOptions.gflash, + dest="gflash") + +expertSettings.add_argument("--python_filename", + help="Change the name of the created config file", + default='', + type=str, + dest="python_filename") + +expertSettings.add_argument("--secondfilein", + help="The secondary infile name."+\ "for the two-file solution. Default is no file", - default="",#to be changed in the default form later - dest="secondfilein") - -expertSettings.add_option("--processName", - help="set process name explicitly", - default = None, - dest="name" - ) - -expertSettings.add_option("--triggerResultsProcess", - help="for splitting jobs specify from which process to take edm::TriggerResults", - default = None, - dest="triggerResultsProcess" - ) - -expertSettings.add_option("--hltProcess", - help="modify the DQM sequence to look for HLT trigger results with the specified process name", - default = None, - dest="hltProcess" - ) - -expertSettings.add_option("--scenario", - help="Select scenario overriding standard settings (available:"+str(defaultOptions.scenarioOptions)+")", - default='pp', - dest="scenario", - choices=defaultOptions.scenarioOptions) - -expertSettings.add_option("--harvesting", - help="What harvesting to use (from Configuration/StandardSequences). Default=AtRunEnd", - default=defaultOptions.harvesting, - dest="harvesting") - -expertSettings.add_option("--particle_table", - help="Which particle properties table is loaded. Default=pythia", - default=defaultOptions.particleTable, - dest="particleTable") - -expertSettings.add_option("--dasquery", - help="Allow to define the source.fileNames from the das search command", - default='', - dest="dasquery") - -expertSettings.add_option("--dasoption", - help="Additional option for DAS query", - default='', - dest="dasoption") - -expertSettings.add_option("--dbsquery", - help="Deprecated. Please use dasquery option. Functions for backward compatibility", - default='', - dest="dasquery") - -expertSettings.add_option("--lazy_download", - help="Enable lazy downloading of input files", - action="store_true", - default=False, - dest="lazy_download") - -expertSettings.add_option("--repacked", - help="When the input file is a file with repacked raw data with label rawDataRepacker", - action="store_true", - default=False, - dest="isRepacked" - ) - -expertSettings.add_option("--custom_conditions", - help="Allow to give a few overriding tags for the GT", - default='', - dest='custom_conditions') - -expertSettings.add_option("--inline_eventcontent", - help="expand event content definitions", - action="store_true", - default=False, - dest="inlineEventContent") - - -expertSettings.add_option("--inline_object", - help="expand explicitely the definition of a list of objects", - default='', - dest="inlineObjets") - -expertSettings.add_option("--hideGen", - help="do not inline the generator information, just load it", - default=False, - action="store_true") -expertSettings.add_option("--output", - help="specify the list of output modules using dict", - default='', - dest="outputDefinition") - -expertSettings.add_option("--inputCommands", - help="specify the input commands; i.e dropping products", - default=None, - dest="inputCommands") -expertSettings.add_option("--outputCommands", - help="specify the extra output commands;", - default=None, - dest="outputCommands") - -expertSettings.add_option("--inputEventContent", - help="specify the input event content", - default=defaultOptions.inputEventContent, - dest="inputEventContent") - -expertSettings.add_option("--dropDescendant", - help="allow to drop descendant on input", - default=defaultOptions.dropDescendant, - action="store_true") - -expertSettings.add_option("--donotDropOnInput", - help="when using reSTEP, prevent the automatic product dropping on input", - default=defaultOptions.donotDropOnInput - ) - -expertSettings.add_option("--restoreRNDSeeds", - help="restore the random number engine state", - default=False, - ) -threeValued.append( ('--restoreRNDSeeds',True) ) - - -expertSettings.add_option("--era", - help="Specify which era to use (e.g. \"run2\")", - default=None, - dest="era") - -expertSettings.add_option("--procModifiers", - help="Specify any process Modifiers to include (in Configuration/ProcessModiers) - comma separated list", - default=[], - action="append", - dest="procModifiers") - -expertSettings.add_option("--evt_type", - help="specify the gen fragment", - default=None, - dest="evt_type") - -expertSettings.add_option("--profile", - help="add the IgprofService with the parameter provided PROFILER:START:STEP:PEREVENOUTPUTFORMAT:ENDOFJOBOUTPUTFORMAT", - default=None, - dest="profile") - -expertSettings.add_option("--heap_profile", - help="add the JeProfService with the parameter provided PROFILER:START:STEP:PEREVENOUTPUTFORMAT:ENDOFJOBOUTPUTFORMAT", - default=None, - dest="heap_profile") - -expertSettings.add_option("--io", - help="Create a json file with io informations", - default=None, - dest="io") - -expertSettings.add_option("--lumiToProcess", - help="specify a certification json file in input to run on certified data", - default=None, - dest='lumiToProcess' - ) - -expertSettings.add_option("--timeoutOutput", - help="use a TimeoutPoolOutputModule instead of a PoolOutputModule (needed for evt. display)", - default=False, - dest='timeoutOutput' - ) - -expertSettings.add_option("--nThreads", - help="How many threads should CMSSW use (default is 1)", - default=defaultOptions.nThreads, - dest='nThreads' - ) -expertSettings.add_option("--nStreams", - help="How many streams should CMSSW use if nThreads > 1 (default is 0 which makes it same as nThreads)", - default=defaultOptions.nStreams, - dest='nStreams' - ) -expertSettings.add_option("--nConcurrentLumis", - help="How many concurrent LuminosityBlocks should CMSSW use if nThreads > 1 (default is 0 which means 1 for 1 stream and 2 for >= 2 streams)", - default=defaultOptions.nConcurrentLumis, - dest='nConcurrentLumis' - ) -expertSettings.add_option("--nConcurrentIOVs", - help="How many concurrent IOVs should CMSSW use if nThreads > 1 (default is 1)", - default=defaultOptions.nConcurrentIOVs, - dest='nConcurrentIOVs' - ) -expertSettings.add_option("--accelerators", - help="Comma-separated list of accelerators to enable; if 'cpu' is not included, the job will fail if none of the accelerators is available (default is not set, enabling all available accelerators, including the cpu)", - default=None, - dest='accelerators' - ) + default="", #to be changed in the default form later + type=str, + dest="secondfilein") + +expertSettings.add_argument("--processName", + help="set process name explicitly", + default = None, + type=str, + dest="name") + +expertSettings.add_argument("--triggerResultsProcess", + help="for splitting jobs specify from which process to take edm::TriggerResults", + default = None, + type=str, + dest="triggerResultsProcess") + +expertSettings.add_argument("--hltProcess", + help="modify the DQM sequence to look for HLT trigger results with the specified process name", + default = None, + type=str, + dest="hltProcess") + +expertSettings.add_argument("--scenario", + help="Select scenario overriding standard settings", + default='pp', + type=str, + dest="scenario", + choices=defaultOptions.scenarioOptions) + +expertSettings.add_argument("--harvesting", + help="What harvesting to use (from Configuration/StandardSequences)", + default=defaultOptions.harvesting, + type=str, + dest="harvesting") + +expertSettings.add_argument("--particle_table", + help="Which particle properties table is loaded", + default=defaultOptions.particleTable, + type=str, + dest="particleTable") + +expertSettings.add_argument("--dasquery", + help="Allow to define the source.fileNames from the das search command", + default='', + type=str, + dest="dasquery") + +expertSettings.add_argument("--dasoption", + help="Additional option for DAS query", + default='', + type=str, + dest="dasoption") + +expertSettings.add_argument("--dbsquery", + help="Deprecated. Please use dasquery option. Functions for backward compatibility", + default='', + type=str, + dest="dasquery") + +expertSettings.add_argument("--lazy_download", + help="Enable lazy downloading of input files", + action="store_true", + default=False, + dest="lazy_download") + +expertSettings.add_argument("--repacked", + help="When the input file is a file with repacked raw data with label rawDataRepacker", + action="store_true", + default=False, + dest="isRepacked") + +expertSettings.add_argument("--custom_conditions", + help="Allow to give a few overriding tags for the GT", + default='', + type=str, + dest='custom_conditions') + +expertSettings.add_argument("--inline_eventcontent", + help="expand event content definitions", + action="store_true", + default=False, + dest="inlineEventContent") + +expertSettings.add_argument("--inline_object", + help="expand explicitly the definition of a list of objects", + default='', + type=str, + dest="inlineObjects") + +expertSettings.add_argument("--hideGen", + help="do not inline the generator information, just load it", + default=False, + action="store_true") + +expertSettings.add_argument("--output", + help="specify the list of output modules using dict", + default='', + type=str, + dest="outputDefinition") + +expertSettings.add_argument("--inputCommands", + help="specify the input commands; i.e dropping products", + default=None, + type=str, + dest="inputCommands") + +expertSettings.add_argument("--outputCommands", + help="specify the extra output commands;", + default=None, + type=str, + dest="outputCommands") + +expertSettings.add_argument("--inputEventContent", + help="specify the input event content", + default=defaultOptions.inputEventContent, + type=str, + dest="inputEventContent") + +expertSettings.add_argument("--dropDescendant", + help="allow to drop descendant on input", + default=defaultOptions.dropDescendant, + action="store_true") + +expertSettings.add_argument("--donotDropOnInput", + help="when using reSTEP, prevent the automatic product dropping on input", + default=defaultOptions.donotDropOnInput, + type=str) + +# specifying '--restoreRNDSeeds' results in 'options.restoreRNDSeeds = True' +# specifying '--restoreRNDSeeds arg' results in 'options.restoreRNDSeeds = arg' +expertSettings.add_argument("--restoreRNDSeeds", + help="restore the random number engine state", + default=False, + const=True, + type=str, + nargs='?') + +expertSettings.add_argument("--era", + help="Specify which era to use (e.g. \"run2\")", + default=None, + type=str, + dest="era") + +expertSettings.add_argument("--procModifiers", + help="Specify any process Modifiers to include (in Configuration/ProcessModiers) - comma separated list", + default=[], + action="append", + type=str, + dest="procModifiers") + +expertSettings.add_argument("--evt_type", + help="specify the gen fragment", + default=None, + type=str, + dest="evt_type") + +expertSettings.add_argument("--profile", + help="add the IgprofService with the parameter provided PROFILER:START:STEP:PEREVENOUTPUTFORMAT:ENDOFJOBOUTPUTFORMAT", + default=None, + type=str, + dest="profile") + +expertSettings.add_argument("--heap_profile", + help="add the JeProfService with the parameter provided PROFILER:START:STEP:PEREVENOUTPUTFORMAT:ENDOFJOBOUTPUTFORMAT", + default=None, + type=str, + dest="heap_profile") + +expertSettings.add_argument("--io", + help="Create a json file with io informations", + default=None, + type=str, + dest="io") + +expertSettings.add_argument("--lumiToProcess", + help="specify a certification json file in input to run on certified data", + default=None, + type=str, + dest='lumiToProcess') + +expertSettings.add_argument("--timeoutOutput", + help="use a TimeoutPoolOutputModule instead of a PoolOutputModule (needed for evt. display)", + default=False, + action="store_true", + dest='timeoutOutput') + +expertSettings.add_argument("--nThreads", + help="How many threads should CMSSW use", + default=defaultOptions.nThreads, + type=int, + dest='nThreads') + +expertSettings.add_argument("--nStreams", + help="How many streams should CMSSW use if nThreads > 1 (default is 0 which makes it same as nThreads)", + default=defaultOptions.nStreams, + type=int, + dest='nStreams') + +expertSettings.add_argument("--nConcurrentLumis", + help="How many concurrent LuminosityBlocks should CMSSW use if nThreads > 1 (default is 0 which means 1 for 1 stream and 2 for >= 2 streams)", + default=defaultOptions.nConcurrentLumis, + type=int, + dest='nConcurrentLumis') + +expertSettings.add_argument("--nConcurrentIOVs", + help="How many concurrent IOVs should CMSSW use if nThreads > 1", + default=defaultOptions.nConcurrentIOVs, + type=int, + dest='nConcurrentIOVs') + +expertSettings.add_argument("--accelerators", + help="Comma-separated list of accelerators to enable; if 'cpu' is not included, the job will fail if none of the accelerators is available (default is not set, enabling all available accelerators, including the cpu)", + default=None, + type=str, + dest='accelerators') diff --git a/Configuration/Applications/python/cmsDriverOptions.py b/Configuration/Applications/python/cmsDriverOptions.py index 286278df5aefc..eaf702762de64 100755 --- a/Configuration/Applications/python/cmsDriverOptions.py +++ b/Configuration/Applications/python/cmsDriverOptions.py @@ -2,7 +2,6 @@ # A Pyrelval Wrapper -from __future__ import print_function import optparse import sys import os @@ -41,21 +40,16 @@ def OptionsFromCommandLine(): def OptionsFromItems(items): import sys - from Configuration.Applications.Options import parser,threeValued - #three valued options - for (index,item) in enumerate(items): - for (opt,value) in threeValued: - if (str(item) in opt) and (index==len(items)-1 or items[index+1].startswith('-')): - items.insert(index+1,value) - - (options,args) = parser.parse_args(items) - - if not options.conditions or options.conditions=="help": + from Configuration.Applications.Options import parser + + options = parser.parse_args(items) + + if options.conditions=="help": from Configuration.AlCa import autoCond possible="" for k in autoCond.autoCond: possible+="\nauto:"+k+" -> "+str(autoCond.autoCond[k]) - raise Exception("the --conditions option is mandatory. Possibilities are: "+possible) + parser.error("Possibilities for the --conditions option: "+possible) ################################# diff --git a/Configuration/Applications/scripts/cmsDriver.py b/Configuration/Applications/scripts/cmsDriver.py index 90009fe476106..b4e4f0f89b785 100755 --- a/Configuration/Applications/scripts/cmsDriver.py +++ b/Configuration/Applications/scripts/cmsDriver.py @@ -2,7 +2,6 @@ # A Pyrelval Wrapper -from __future__ import print_function def run(): import sys import os @@ -14,17 +13,6 @@ def run(): # after cleanup of all config parameters pass it to the ConfigBuilder configBuilder = ConfigBuilder(options, with_output = True, with_input = True) - # Switch on any eras that have been specified. This is not required to create - # the file, it is only relevant if dump_python is set. It does have to be done - # before the prepare() call though. If not, then the config files will be loaded - # without applying the era changes. This doesn't affect the config file written, - # but when the dump_python branch uses execfile to read it back in it doesn't - # reload the modules - it picks up a reference to the already loaded ones. - if hasattr( options, "era" ) and options.era is not None : - from Configuration.StandardSequences.Eras import eras - for eraName in options.era.split(',') : - getattr( eras, eraName )._setChosen() - configBuilder.prepare() # fetch the results and write it to file config = open(options.python_filename,"w") diff --git a/FWCore/ParameterSet/scripts/edmPythonConfigToCppValidation b/FWCore/ParameterSet/scripts/edmPythonConfigToCppValidation index f68b3a623b86d..28b704406ed7a 100755 --- a/FWCore/ParameterSet/scripts/edmPythonConfigToCppValidation +++ b/FWCore/ParameterSet/scripts/edmPythonConfigToCppValidation @@ -1,6 +1,5 @@ #! /usr/bin/env python3 -from __future__ import print_function from builtins import str from FWCore.ParameterSet.Modules import _TypedParameterizable from FWCore.ParameterSet.Mixins import _ValidatingParameterListBase @@ -219,15 +218,12 @@ def printParameterSet(spacing, psetName, pset, depth): print(spacing+psetName+".add"+trackiness+"Parameter<"+t+'>("'+l+'", '+c(p)+");") -import optparse -usage = "%prog [configuration file name]" -parser = optparse.OptionParser(usage=usage) +from argparse import ArgumentParser +parser = ArgumentParser() +parser.add_argument("config_file",type=str) +options = parser.parse_args() -(options, args) = parser.parse_args() -if len(args) != 1: - parser.error("wrong number of arguments") - -filename = args[0] +filename = options.config_file f = open(filename,'r').read() diff --git a/FWCore/ParameterSet/scripts/edmPythonSearch b/FWCore/ParameterSet/scripts/edmPythonSearch index 505c507bbeb26..e383476e17c76 100755 --- a/FWCore/ParameterSet/scripts/edmPythonSearch +++ b/FWCore/ParameterSet/scripts/edmPythonSearch @@ -1,36 +1,29 @@ #! /usr/bin/env python3 -from __future__ import print_function from FWCore.ParameterSet.TreeCrawler import getImportTree, Color import sys, os -import optparse -usage = "%prog [searchString] [configFile]" -parser = optparse.OptionParser(usage) - -(options, args) = parser.parse_args() - -if len(args) != 2: - parser.error("wrong number of arguments") -else: - filename = args[1] - pattern = args[0] +from argparse import ArgumentParser +parser = ArgumentParser() +parser.add_argument("searchString",type=str) +parser.add_argument("configFile",type=str) +options = parser.parse_args() sys.path.append(os.environ["PWD"]) path = sys.path[:] # get the import tree -importTree = getImportTree(filename, path) +importTree = getImportTree(options.configFile, path) # search the tree result = [] -importTree.search(pattern,result) +importTree.search(options.searchString,result) # sort the output by file name -result.sort(key= lambda x: x.filename) +result.sort(key= lambda x: x.options.configFile) dumpStack = True # dump to screen for item in result: - print(item.line.replace(pattern,Color.hilight+pattern+Color.none)) + print(item.line.replace(options.searchString,Color.hilight+options.searchString+Color.none)) print("%s (line: %s)" %(item.filename, item.number)) if dumpStack and hasattr(item, 'stacks'): # make a set of strings, so it's unique @@ -39,10 +32,3 @@ for item in result: froms.add('From ' + ' -> '.join(stack)) print('\n'.join(froms)) print('\n') - - - - - - - diff --git a/FWCore/ParameterSet/scripts/edmPythonTree b/FWCore/ParameterSet/scripts/edmPythonTree index bad33eec3089b..5de6a30da8e1e 100755 --- a/FWCore/ParameterSet/scripts/edmPythonTree +++ b/FWCore/ParameterSet/scripts/edmPythonTree @@ -3,27 +3,16 @@ from FWCore.ParameterSet.TreeCrawler import getImportTree import sys, os -import optparse -usage = "%prog [configuration file name]" -parser = optparse.OptionParser(usage=usage) - -(options, args) = parser.parse_args() -if len(args) != 1: - parser.error("wrong number of arguments") - -filename = args[0] +from argparse import ArgumentParser +parser = ArgumentParser() +parser.add_argument("config_file",type=str) +options = parser.parse_args() sys.path.append(os.environ["PWD"]) path = sys.path[:] # get the dependencies -importTree = getImportTree(filename, path) +importTree = getImportTree(options.config_file, path) # finally dump the tree importTree.dump(0) - - - - - - diff --git a/FWCore/ParameterSet/test/comparePythonOutput.py b/FWCore/ParameterSet/test/comparePythonOutput.py index be0c5637e6e38..98571397bb3c4 100755 --- a/FWCore/ParameterSet/test/comparePythonOutput.py +++ b/FWCore/ParameterSet/test/comparePythonOutput.py @@ -3,16 +3,18 @@ # ignoring trivial differences like order within # dictionaries or escape characters -from __future__ import print_function -from sys import argv -from sys import exit -if len(argv) < 3: - print("usage: ",argv[0]," ") - exit(-1) -cfg1 = eval(file(argv[1]).read()) -cfg2 = eval(file(argv[2]).read()) +import sys +from argparse import ArgumentParser + +parser = ArgumentParser() +parser.add_argument("file1",type=str) +parser.add_argument("file2",type=str) +options = parser.parse_args() + +cfg1 = eval(file(options.file1).read()) +cfg2 = eval(file(options.file2).read()) if cfg1 != cfg2: - print(argv[1], " and ", argv[2], " do not match") + print(options.file1, " and ", options.file2, " do not match") k1 = set(cfg1.keys()) k2 = set(cfg2.keys()) if k1-k2 : @@ -28,5 +30,5 @@ else: print("The value of key ", key , " does not match") - exit(-1) + sys.exit(-1) print("matched") diff --git a/FWCore/PythonUtilities/scripts/compareJSON.py b/FWCore/PythonUtilities/scripts/compareJSON.py index eccec5aae98be..2b260ec38fd2b 100755 --- a/FWCore/PythonUtilities/scripts/compareJSON.py +++ b/FWCore/PythonUtilities/scripts/compareJSON.py @@ -1,55 +1,55 @@ #!/usr/bin/env python3 -from __future__ import print_function import sys -import optparse +from argparse import ArgumentParser from FWCore.PythonUtilities.LumiList import LumiList if __name__ == '__main__': - parser = optparse.OptionParser ("Usage: %prog --command [--options] alpha.json beta.json [output.json]") + parser = ArgumentParser() # required parameters - cmdGroup = optparse.OptionGroup (parser, "Command Options ") - cmdGroup.add_option ('--and', dest='command', action='store_const', - const='and', - help = '"and" (i.e., take intersection) of two files') - cmdGroup.add_option ('--or', dest='command', action='store_const', - const='or', - help = '"or" (i.e., take union) of two files') - cmdGroup.add_option ('--sub', dest='command', action='store_const', - const='sub', - help = '"subtraction" (i.e., lumi sections in alpha not in beta) of two files') - cmdGroup.add_option ('--diff', dest='command', action='store_const', - const='diff', - help = '"All differences (i.e., alpha - beta AND beta - alpha) of two files. Output will only be to screen (not proper JSON format).') - parser.add_option_group (cmdGroup) - (options, args) = parser.parse_args() - if len (args) < 2 or len (args) > 3: - raise RuntimeError("Two input filenames with one optional output filename must be provided.") + cmdGroupTitle = parser.add_argument_group("Command Options") + cmdGroup = cmdGroupTitle.add_mutually_exclusive_group(required=True) + cmdGroup.add_argument('--and', dest='command', action='store_const', + const='and', + help = '"and" (i.e., take intersection) of two files') + cmdGroup.add_argument('--or', dest='command', action='store_const', + const='or', + help = '"or" (i.e., take union) of two files') + cmdGroup.add_argument('--sub', dest='command', action='store_const', + const='sub', + help = '"subtraction" (i.e., lumi sections in alpha not in beta) of two files') + cmdGroup.add_argument('--diff', dest='command', action='store_const', + const='diff', + help = '"All differences" (i.e., alpha - beta AND beta - alpha) of two files. Output will only be to screen (not proper JSON format).') + parser.add_argument("alpha", metavar="alpha.json", type=str) + parser.add_argument("beta", metavar="beta.json", type=str) + parser.add_argument("output", metavar="output.json", type=str, nargs='?', default=None) + options = parser.parse_args() if not options.command: - raise RuntimeError("Exactly one command option must be specified") + parser.error("Exactly one command option must be specified") - alphaList = LumiList (filename = args[0]) # Read in first JSON file - betaList = LumiList (filename = args[1]) # Read in second JSON file + alphaList = LumiList (filename = options.alpha) # Read in first JSON file + betaList = LumiList (filename = options.beta) # Read in second JSON file ################## ## Diff Command ## ################## if options.command == 'diff': - if len (args) >= 3: + if options.output is not None: raise RuntimeError("Can not output to file with '--diff' option. The output is not standard JSON.") firstOnly = alphaList - betaList secondOnly = betaList - alphaList if not firstOnly and not secondOnly: - print("Files '%s' and '%s' are the same." % (args[0], args[1])) + print("Files '%s' and '%s' are the same." % (options.alpha, options.beta)) sys.exit() - print("'%s'-only lumis:" % args[0]) + print("'%s'-only lumis:" % options.alpha) if firstOnly: print(firstOnly) else: print("None") - print("\n'%s'-only lumis:" % args[1]) + print("\n'%s'-only lumis:" % options.beta) if secondOnly: print(secondOnly) else: @@ -68,8 +68,8 @@ if options.command == 'sub': outputList = alphaList - betaList - if len (args) >= 3: - outputList.writeJSON (args[2]) + if options.output is not None: + outputList.writeJSON(options.output) else: # print to screen print(outputList) diff --git a/FWCore/PythonUtilities/scripts/csv2json.py b/FWCore/PythonUtilities/scripts/csv2json.py index 755b6079fe21b..9d1eaec3f933f 100755 --- a/FWCore/PythonUtilities/scripts/csv2json.py +++ b/FWCore/PythonUtilities/scripts/csv2json.py @@ -1,31 +1,27 @@ #!/usr/bin/env python3 -from __future__ import print_function import sys -import optparse +from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter import re from FWCore.PythonUtilities.LumiList import LumiList - if __name__ == '__main__': - parser = optparse.OptionParser ("Usage: %prog input.csv") - parser.add_option ('--output', dest='output', type='string', - help='Save output to file OUTPUT') - parser.add_option ('--runIndex', dest='runIndex', type='int', - default = 0, - help='column to be converted to run number (default %default)') - parser.add_option ('--lumiIndex', dest='lumiIndex', type='int', - default = 1, - help='column to be converted to lumi section number (default %default)') - # required parameters - (options, args) = parser.parse_args() - if len (args) != 1: - raise RuntimeError("Must provide exactly one input file") + parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter) + parser.add_argument('--output', dest='output', type=str, + help='Save output to file OUTPUT') + parser.add_argument('--runIndex', dest='runIndex', type=int, + default = 0, + help='column to be converted to run number') + parser.add_argument('--lumiIndex', dest='lumiIndex', type=int, + default = 1, + help='column to be converted to lumi section number') + parser.add_argument("input", metavar="input.csv", type=str) + options = parser.parse_args() sepRE = re.compile (r'[\s,;:]+') runLumiDict = {} - events = open (args[0], 'r') + events = open (options.input, 'r') runIndex, lumiIndex = options.runIndex, options.lumiIndex minPieces = max (runIndex, lumiIndex) + 1 for line in events: diff --git a/FWCore/PythonUtilities/scripts/edmDumpEventContent b/FWCore/PythonUtilities/scripts/edmDumpEventContent index e1c0f3897f496..2f741eba8ef64 100755 --- a/FWCore/PythonUtilities/scripts/edmDumpEventContent +++ b/FWCore/PythonUtilities/scripts/edmDumpEventContent @@ -1,11 +1,10 @@ #! /usr/bin/env python3 -from __future__ import print_function from builtins import object import os import sys import copy -import optparse +from argparse import ArgumentParser import re import copy import subprocess @@ -159,41 +158,34 @@ class Branch (object): #branches = '' if __name__ == "__main__": - parser = optparse.OptionParser \ - ("usage: %prog [options] templates.root" \ - "\nPrints out info on edm file.") - parser.add_option ('--name', dest='name', action='store_true', - help='print out only branch names') - parser.add_option ('--all', dest='all', action='store_true', - help='Print out everything: type, module, label, '\ - 'process, and branch name') - parser.add_option ('--lfn', dest='lfn', action='store_true', - help="Force LFN2PFN translation (usually not necessary)") - parser.add_option ('--lumi', dest='lumi', action='store_true', - help="Look at 'lumi' tree") - parser.add_option ('--run', dest='run', action='store_true', - help="Look at 'run' tree") - parser.add_option ("--regex", dest='regex', action="append", - type="string", default=[], - help="Filter results based on regex") - parser.add_option ('--skipping', dest='skipping', action='store_true', - help="Print out branches being skipped") - parser.add_option ('--forceColumns', dest='forceColumns', - action='store_true', - help="Forces printouts to be in nice columns") - options, args = parser.parse_args() - if not args: - print(parser.print_usage()) - sys.exit() - filename = expandFilename (args[0], options) + parser = ArgumentParser(description="Prints out info on edm file.") + nameAllGroup = parser.add_mutually_exclusive_group() + nameAllGroup.add_argument('--name', dest='name', action='store_true', + help='print out only branch names') + nameAllGroup.add_argument('--all', dest='all', action='store_true', + help='Print out everything: type, module, label, '\ + 'process, and branch name') + parser.add_argument('--lfn', dest='lfn', action='store_true', + help="Force LFN2PFN translation (usually not necessary)") + lumiRunGroup = parser.add_mutually_exclusive_group() + lumiRunGroup.add_argument('--lumi', dest='lumi', action='store_true', + help="Look at 'lumi' tree") + lumiRunGroup.add_argument('--run', dest='run', action='store_true', + help="Look at 'run' tree") + parser.add_argument("--regex", dest='regex', action="append", + type=str, default=[], + help="Filter results based on regex") + parser.add_argument('--skipping', dest='skipping', action='store_true', + help="Print out branches being skipped") + parser.add_argument('--forceColumns', dest='forceColumns', + action='store_true', + help="Forces printouts to be in nice columns") + parser.add_argument("templates", metavar="templates.root", type=str) + options = parser.parse_args() + filename = expandFilename (options.templates, options) ################### # process options # ################### - # check for illegal combinations - if options.name and options.all: - raise RuntimeError("Can notuse '--name' and '--all' options together.") - if options.lumi and options.run: - raise RuntimeError("Can not use '--lumi' and '--run' options together.") if options.name: Branch.mode = 'name' elif options.all: @@ -211,14 +203,12 @@ if __name__ == "__main__": regexList.append( re.compile( regexString, re.IGNORECASE ) ) # Because PyRoot is, well, greedy, we want to make sure we have # setup and parsed the command line options before importing ROOT - # otherwise ROOT will try and do this for us. This "feature" can - # be turned of in Root 5.24 or later, but CMSSW uses 5.22. - import PhysicsTools.PythonAnalysis as cmstools + # otherwise ROOT will try and do this for us. import ROOT ROOT.gROOT.SetBatch() # setting batch mode - # Here we turn of stderr so that we don't get all of the errors + # Here we turn off stderr so that we don't get all of the errors # saying we don't have a dictionary for all of the objects in the - # root file. When we've loaded the file, we're turn it back + # root file. When we've loaded the file, we turn it back on oldStderr = os.dup( sys.stderr.fileno() ) newStderr = open ( '/dev/null', 'w') os.dup2( newStderr.fileno(), sys.stderr.fileno() ) diff --git a/FWCore/PythonUtilities/scripts/filterCSVwithJSON.py b/FWCore/PythonUtilities/scripts/filterCSVwithJSON.py index d3d67c9a1b475..e5ad653470bb7 100755 --- a/FWCore/PythonUtilities/scripts/filterCSVwithJSON.py +++ b/FWCore/PythonUtilities/scripts/filterCSVwithJSON.py @@ -1,35 +1,34 @@ #!/usr/bin/env python3 -from __future__ import print_function import sys -import optparse +from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter import re from FWCore.PythonUtilities.LumiList import LumiList - if __name__ == '__main__': - parser = optparse.OptionParser ("Usage: %prog input.json input.csv output.csv") - parser.add_option ('--output', dest='output', type='string', - help='Save output to file OUTPUT') - parser.add_option ('--runIndex', dest='runIndex', type='int', - default = 0, - help='column to be converted to run number (default %default)') - parser.add_option ('--lumiIndex', dest='lumiIndex', type='int', - default = 1, - help='column to be converted to lumi section number (default %default)') - parser.add_option ('--noWarnings', dest='noWarnings', action='store_true', - help='do not print warnings about lines not matching run, lumi numbers') - # required parameters - (options, args) = parser.parse_args() - if len (args) != 3: - raise RuntimeError("Must provide an input JSON file, an input CSV file, and an output CSV file") + parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter) + parser.add_argument('--output', dest='output', type=str, + help='Save output to file OUTPUT') + parser.add_argument('--runIndex', dest='runIndex', type=int, + default = 0, + help='column to be converted to run number') + parser.add_argument('--lumiIndex', dest='lumiIndex', type=int, + default = 1, + help='column to be converted to lumi section number') + parser.add_argument('--noWarnings', dest='noWarnings', action='store_true', + default = False, + help='do not print warnings about lines not matching run, lumi numbers') + parser.add_argument("input_json", metavar="input.json", type=str) + parser.add_argument("input_csv", metavar="input.csv", type=str) + parser.add_argument("output_csv", metavar="output.csv", type=str) + options = parser.parse_args() sepRE = re.compile (r'[\s,;:]+') runLumiDict = {} - jsonList = LumiList (args[0]) - source = open (args[1], 'r') - target = open (args[2], 'w') + jsonList = LumiList(options.input_json) + source = open(options.input_csv, 'r') + target = open(options.output_csv, 'w') runIndex, lumiIndex = options.runIndex, options.lumiIndex minPieces = max (runIndex, lumiIndex) + 1 for line in source: diff --git a/FWCore/PythonUtilities/scripts/filterJSON.py b/FWCore/PythonUtilities/scripts/filterJSON.py index 237fdf256bd76..b7e6387fbba28 100755 --- a/FWCore/PythonUtilities/scripts/filterJSON.py +++ b/FWCore/PythonUtilities/scripts/filterJSON.py @@ -1,28 +1,25 @@ #!/usr/bin/env python3 -from __future__ import print_function import sys -import optparse +from argparse import ArgumentParser import re from FWCore.PythonUtilities.LumiList import LumiList - if __name__ == '__main__': - parser = optparse.OptionParser ("Usage: %prog alpha.json") - parser.add_option ('--max', dest='max', type='int', default=0, - help='maximum run to keep in output') - parser.add_option ('--min', dest='min', type='int', default=0, - help='minimum run to keep in output') - parser.add_option ('--runs', dest='runs', type='string', - action='append', default = [], - help='runs to remove from JSON file') - parser.add_option ('--output', dest='output', type='string', - help='Save output to file OUTPUT') + parser = ArgumentParser() + parser.add_argument('--max', dest='max', type=int, default=0, + help='maximum run to keep in output') + parser.add_argument('--min', dest='min', type=int, default=0, + help='minimum run to keep in output') + parser.add_argument('--runs', dest='runs', type=str, + action='append', default = [], + help='runs to remove from JSON file') + parser.add_argument('--output', dest='output', type=str, + help='Save output to file OUTPUT') + parser.add_argument("alpha", metavar="alpha.json", type=str) # required parameters - (options, args) = parser.parse_args() - if len (args) != 1: - raise RuntimeError("Must provide exactly one input file") + options = parser.parse_args() if options.min and options.max and options.min > options.max: raise RuntimeError("Minimum value (%d) is greater than maximum value (%d)" % (options.min, options.max)) @@ -33,7 +30,7 @@ runs = commaRE.split (chunk) runsToRemove.extend (runs) - alphaList = LumiList (filename = args[0]) # Read in first JSON file + alphaList = LumiList (filename = options.alpha) # Read in first JSON file allRuns = alphaList.getRuns() for run in allRuns: if options.min and int(run) < options.min: diff --git a/FWCore/PythonUtilities/scripts/fjr2json.py b/FWCore/PythonUtilities/scripts/fjr2json.py index c8d0a40afa02c..121ff5383da32 100755 --- a/FWCore/PythonUtilities/scripts/fjr2json.py +++ b/FWCore/PythonUtilities/scripts/fjr2json.py @@ -1,26 +1,23 @@ #! /usr/bin/env python3 -from __future__ import print_function from FWCore.PythonUtilities.XML2Python import xml2obj from FWCore.PythonUtilities.LumiList import LumiList from pprint import pprint import ast -import optparse +from argparse import ArgumentParser import sys - if __name__ == '__main__': - parser = optparse.OptionParser ("Usage: %prog [--options] job1.fjr [job2.fjr...]") - parser.add_option ('--output', dest='output', type='string', - help='Save output to file OUTPUT') - (options, args) = parser.parse_args() - if not args: - raise RuntimeError("Must provide at least one input file") + parser = ArgumentParser() + parser.add_argument('--output', dest='output', type=str, + help='Save output to file OUTPUT') + parser.add_argument("job_fjr", metavar="job.fjr", nargs='+', type=str) + options = parser.parse_args() runsLumisDict = {} - for fjr in args: + for fjr in options.job_fjr: try: obj = xml2obj (filename=fjr) except: diff --git a/FWCore/PythonUtilities/scripts/generateEDF.py b/FWCore/PythonUtilities/scripts/generateEDF.py index 6091c826ffac5..938308f8f0ab7 100755 --- a/FWCore/PythonUtilities/scripts/generateEDF.py +++ b/FWCore/PythonUtilities/scripts/generateEDF.py @@ -1,14 +1,12 @@ #! /usr/bin/env python3 -from __future__ import print_function -from __future__ import division from builtins import zip from builtins import object from past.utils import old_div from builtins import range import sys import re -import optparse +from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter from pprint import pprint import array import ROOT @@ -558,74 +556,68 @@ def makeEDFplot (lumiCont, eventsDict, totalWeight, outputFile, options): ## command line options ## ########################## allowedEDF = ['time', 'instLum', 'instIntLum'] - parser = optparse.OptionParser ("Usage: %prog [options] lumi.csv events.txt output.png", description='Script for generating EDF curves. See https://twiki.cern.ch/twiki/bin/viewauth/CMS/SWGuideGenerateEDF for more details.') - plotGroup = optparse.OptionGroup (parser, "Plot Options") - rangeGroup = optparse.OptionGroup (parser, "Range Options") - inputGroup = optparse.OptionGroup (parser, "Input Options") - modeGroup = optparse.OptionGroup (parser, "Mode Options") - plotGroup.add_option ('--title', dest='title', type='string', - default = 'Empirical Distribution Function', - help = 'title of plot (default %default)') - plotGroup.add_option ('--predicted', dest='pred', type='float', - default = 0, - help = 'factor by which predicted curve is greater than observed') - plotGroup.add_option ('--predLabel', dest='predLabel', type='string', - default = 'Predicted', - help = 'label of predicted in legend') - plotGroup.add_option ('--noDataPoints', dest='noDataPoints', - action='store_true', - help="Draw lines but no points for data") - rangeGroup.add_option ('--minRun', dest='minRun', type='int', default=0, - help='Minimum run number to consider') - rangeGroup.add_option ('--maxRun', dest='maxRun', type='int', default=0, - help='Maximum run number to consider') - rangeGroup.add_option ('--minIntLum', dest='minIntLum', type='float', default=0, - help='Minimum integrated luminosity to consider') - rangeGroup.add_option ('--maxIntLum', dest='maxIntLum', type='float', default=0, - help='Maximum integrated luminosity to consider') - rangeGroup.add_option ('--resetExpected', dest='resetExpected', - action='store_true', - help='Reset expected from total yield to highest point considered') - rangeGroup.add_option ('--breakExpectedIntLum', dest='breakExpectedIntLum', - type='string', action='append', default=[], - help='Break expected curve into pieces at integrated luminosity boundaries') - inputGroup.add_option ('--ignoreNoLumiEvents', dest='ignore', - action='store_true', - help = 'Ignore (with a warning) events that do not have a lumi section') - inputGroup.add_option ('--noWarnings', dest='noWarnings', - action='store_true', - help = 'Do not print warnings about missing luminosity information') - inputGroup.add_option ('--runEventLumi', dest='relOrder', - action='store_true', - help = 'Parse event list assuming Run, Event #, Lumi# order') - inputGroup.add_option ('--weights', dest='weights', action='store_true', - help = 'Read fourth column as a weight') - modeGroup.add_option ('--print', dest='printValues', action='store_true', - help = 'Print X and Y values of EDF plot') - modeGroup.add_option ('--runsWithLumis', dest='runsWithLumis', - type='string',action='append', default=[], - help='Print out run and lumi sections corresponding to integrated luminosities provided and then exits') - modeGroup.add_option ('--edfMode', dest='edfMode', type='string', - default='time', - help="EDF Mode %s (default '%%default')" % allowedEDF) - parser.add_option_group (plotGroup) - parser.add_option_group (rangeGroup) - parser.add_option_group (inputGroup) - parser.add_option_group (modeGroup) - (options, args) = parser.parse_args() - - if options.edfMode not in allowedEDF: - raise RuntimeError("edfMode (currently '%s') must be one of %s" \ - % (options.edfMode, allowedEDF)) - - if len (args) != 3 and not (options.runsWithLumis and len(args) >= 1): - raise RuntimeError("Must provide lumi.csv, events.txt, and output.png") - + parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter, usage='%(prog)s [options] lumi.csv events.txt output.png', description='Script for generating EDF curves. See https://twiki.cern.ch/twiki/bin/viewauth/CMS/SWGuideGenerateEDF for more details.') + plotGroup = parser.add_argument_group("Plot Options") + rangeGroup = parser.add_argument_group("Range Options") + inputGroup = parser.add_argument_group("Input Options") + modeGroup = parser.add_argument_group("Mode Options") + plotGroup.add_argument('--title', dest='title', type=str, + default = 'Empirical Distribution Function', + help = 'title of plot') + plotGroup.add_argument('--predicted', dest='pred', type=float, + default = 0, + help = 'factor by which predicted curve is greater than observed') + plotGroup.add_argument('--predLabel', dest='predLabel', type=str, + default = 'Predicted', + help = 'label of predicted in legend') + plotGroup.add_argument('--noDataPoints', dest='noDataPoints', + default = False, action='store_true', + help="Draw lines but no points for data") + rangeGroup.add_argument('--minRun', dest='minRun', type=int, default=0, + help='Minimum run number to consider') + rangeGroup.add_argument('--maxRun', dest='maxRun', type=int, default=0, + help='Maximum run number to consider') + rangeGroup.add_argument('--minIntLum', dest='minIntLum', type=float, default=0, + help='Minimum integrated luminosity to consider') + rangeGroup.add_argument('--maxIntLum', dest='maxIntLum', type=float, default=0, + help='Maximum integrated luminosity to consider') + rangeGroup.add_argument('--resetExpected', dest='resetExpected', + default = False, action='store_true', + help='Reset expected from total yield to highest point considered') + rangeGroup.add_argument('--breakExpectedIntLum', dest='breakExpectedIntLum', + type=str, action='append', default=[], + help='Break expected curve into pieces at integrated luminosity boundaries') + inputGroup.add_argument('--ignoreNoLumiEvents', dest='ignore', + default = False, action='store_true', + help = 'Ignore (with a warning) events that do not have a lumi section') + inputGroup.add_argument('--noWarnings', dest='noWarnings', + default = False,action='store_true', + help = 'Do not print warnings about missing luminosity information') + inputGroup.add_argument('--runEventLumi', dest='relOrder', + default = False, action='store_true', + help = 'Parse event list assuming Run, Event #, Lumi# order') + inputGroup.add_argument('--weights', dest='weights', default = False, action='store_true', + help = 'Read fourth column as a weight') + modeGroup.add_argument('--print', dest='printValues', default = False, action='store_true', + help = 'Print X and Y values of EDF plot') + modeGroup.add_argument('--runsWithLumis', dest='runsWithLumis', + type=str,action='append', default=[], + help='Print out run and lumi sections corresponding to integrated luminosities provided and then exits') + modeGroup.add_argument('--edfMode', dest='edfMode', type=str, + default='time', + help="EDF Mode", choices=allowedEDF) + parser.add_argument("lumi_csv", metavar="lumi.csv", type=str) + parser.add_argument("events_txt", metavar="events.txt", type=str, nargs='?') + parser.add_argument("output_png", metavar="output.png", type=str, nargs='?') + options = parser.parse_args() + + if not options.runsWithLumis and (options.events_txt is None or options.output_png is None): + parser.error("Must provide lumi.csv, events.txt, and output.png") ########################## ## load Luminosity info ## ########################## - cont = LumiInfoCont (args[0], **options.__dict__) + cont = LumiInfoCont (options.lumi_csv, **options.__dict__) cont.minRun = options.minRun cont.maxRun = options.maxRun cont.minIntLum = options.minIntLum @@ -683,6 +675,6 @@ def makeEDFplot (lumiCont, eventsDict, totalWeight, outputFile, options): ## make EDF plots ## #################### if options.edfMode != 'time' and not cont.xingInfo: - raise RuntimeError("'%s' does not have Xing info" % args[0]) - eventsDict, totalWeight = loadEvents (args[1], cont, options) - makeEDFplot (cont, eventsDict, totalWeight, args[2], options) + raise RuntimeError("'%s' does not have Xing info" % options.lumi_csv) + eventsDict, totalWeight = loadEvents (options.events_txt, cont, options) + makeEDFplot (cont, eventsDict, totalWeight, options.output_png, options) diff --git a/FWCore/PythonUtilities/scripts/mergeJSON.py b/FWCore/PythonUtilities/scripts/mergeJSON.py index 49028d8872ff4..cad95cf4db915 100755 --- a/FWCore/PythonUtilities/scripts/mergeJSON.py +++ b/FWCore/PythonUtilities/scripts/mergeJSON.py @@ -1,12 +1,10 @@ #!/usr/bin/env python3 -from __future__ import print_function import sys -import optparse +from argparse import ArgumentParser import re from FWCore.PythonUtilities.LumiList import LumiList - def filterRuns (lumiList, minRun, maxRun): allRuns = lumiList.getRuns() runsToRemove = [] @@ -16,23 +14,20 @@ def filterRuns (lumiList, minRun, maxRun): if maxRun and int(run) > maxRun: runsToRemove.append (run) lumiList.removeRuns (runsToRemove) - - if __name__ == '__main__': - parser = optparse.OptionParser ("Usage: %prog alpha1.json [alpha2.json:142300-145900]") - parser.add_option ('--output', dest='output', type='string', - help='Save output to file OUTPUT') + parser = ArgumentParser() + parser.add_argument('--output', dest='output', type=str, + help='Save output to file OUTPUT') + parser.add_argument("alpha_json", metavar="alpha.json[:142300-145900]", type=str, nargs='+') # required parameters - (options, args) = parser.parse_args() - if not len (args): - raise RuntimeError("Must provide at least one input file") + options = parser.parse_args() minMaxRE = re.compile (r'(\S+):(\d+)-(\d*)') finalList = LumiList() - for filename in args: + for filename in options.alpha_json: minRun = maxRun = 0 match = minMaxRE.search (filename) if match: diff --git a/FWCore/PythonUtilities/scripts/printJSON.py b/FWCore/PythonUtilities/scripts/printJSON.py index 07333a3db31f8..e13f6e0da0503 100755 --- a/FWCore/PythonUtilities/scripts/printJSON.py +++ b/FWCore/PythonUtilities/scripts/printJSON.py @@ -1,22 +1,18 @@ #!/usr/bin/env python3 -from __future__ import print_function import sys -import optparse +from argparse import ArgumentParser from FWCore.PythonUtilities.LumiList import LumiList - if __name__ == '__main__': - parser = optparse.OptionParser ("Usage: %prog alpha.json") - parser.add_option ('--range', dest='range', action='store_true', - help='Print out run range only') - # required parameters - (options, args) = parser.parse_args() - if len (args) != 1: - raise RuntimeError("Must provide exactly one input file") + parser = ArgumentParser() + parser.add_argument('--range', dest='range', default=False, action='store_true', + help='Print out run range only') + parser.add_argument("alpha_json", metavar="alpha.json", type=str) + options = parser.parse_args() - alphaList = LumiList (filename = args[0]) # Read in first JSON file + alphaList = LumiList (filename = options.alpha_json) # Read in first JSON file if options.range: keys = alphaList.compactList.keys() minRun = min (keys) diff --git a/FWCore/Utilities/scripts/edmAddClassVersion b/FWCore/Utilities/scripts/edmAddClassVersion index 168e012f4f6d1..a00ce356fc717 100755 --- a/FWCore/Utilities/scripts/edmAddClassVersion +++ b/FWCore/Utilities/scripts/edmAddClassVersion @@ -1,6 +1,5 @@ #! /usr/bin/env python3 import string, os -from optparse import OptionParser class ClassesDefXmlParser(object): """Parses a classes_def.xml file looking for class declarations that do not contain @@ -36,9 +35,9 @@ class ClassesDefXmlParser(object): if c == '"' : q1 = not q1 if c == "'" : q2 = not q2 try : p.Parse(nxml) - except xml.parsers.expat.ExpatError, e : - print '--->> edmCheckClassVersion: ERROR: parsing selection file ',self._file - print '--->> edmCheckClassVersion: ERROR: Error is:', e + except xml.parsers.expat.ExpatError as e : + print('--->> edmAddClassVersion: ERROR: parsing selection file ',self._file) + print('--->> edmAddClassVersion: ERROR: Error is:', e) raise f.close() def start_element(self,name,attrs): @@ -101,9 +100,9 @@ class GccXmlOutputParser(object): p.EndElementHandler = self.end_element f = open(self._fileName) try : p.Parse(f.read()) - except xml.parsers.expat.ExpatError, e : - print '--->> edmAddClassVersion: ERROR: parsing selection file ',self._file - print '--->> edmAddClassVersion: ERROR: Error is:', e + except xml.parsers.expat.ExpatError as e : + print('--->> edmAddClassVersion: ERROR: parsing selection file ',self._file) + print('--->> edmAddClassVersion: ERROR: Error is:', e) raise f.close() def start_element(self,name,attrs): @@ -166,7 +165,7 @@ def getNameTreeFromGccXml(headerFileName,gccxml,gccxmlopt,cppopt): cmd = '%s %s "%s" -fxml=%s %s -D__REFLEX__' %(gccxml, gccxmlopt, headerFileName, xmlfile, cppopt) status = os.system(cmd) if status : - print '\n--->> edmAddClassVersion: ERROR: processing file with gccxml. Command failed.' + print('\n--->> edmAddClassVersion: ERROR: processing file with gccxml. Command failed.') exit(1) import subprocess, sys @@ -180,8 +179,8 @@ def getNameTreeFromGccXml(headerFileName,gccxml,gccxmlopt,cppopt): sout = out.read() serr = err.read() if serr : - print '--->> genreflex: WARNING: Could not invoke %s --print' % self.gccxml - print '--->> genreflex: WARNING: %s' % serr + print('--->> genreflex: WARNING: Could not invoke %s --print' % self.gccxml) + print('--->> genreflex: WARNING: %s' % serr) return s try: p = GccXmlOutputParser(xmlfile) @@ -212,16 +211,16 @@ def checkIfTypedefOfTemplate(nameList,nameTree): if __name__ == '__main__': #Setup the options - from optparse import OptionParser - oparser = OptionParser() - oparser.add_option("-l","--lib", dest="library", - help="specify the library to load. If not set classes are found using the PluginManager") - oparser.add_option("-d","--dir", dest="filedir",default=".", - help="the directory holding both the classes_def.xml and classes.h files to read") - oparser.add_option("-c","--cppopt",dest="cppopt", default="", - help="specify the C++ compiler options to pass to gccxml") + from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter + oparser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter) + oparser.add_argument("-l","--lib", dest="library", type=str, + help="specify the library to load. If not set classes are found using the PluginManager") + oparser.add_argument("-d","--dir", dest="filedir",default=".", type=str, + help="the directory holding both the classes_def.xml and classes.h files to read") + oparser.add_argument("-c","--cppopt",dest="cppopt", default="", type=str, + help="specify the C++ compiler options to pass to gccxml") - (options,args)=oparser.parse_args() + options=oparser.parse_args() #Need to not have ROOT load .rootlogon.(C|py) since it can cause interference. import ROOT @@ -245,7 +244,7 @@ if __name__ == '__main__': sout = out.read() serr = err.read() if serr : - print '--->> edmAddClassVerion: WARNING: Could not invoke "scram tool info gccxml"' + print('--->> edmAddClassVersion: WARNING: Could not invoke "scram tool info gccxml"') exit(1) gccxmlpath = '' for l in sout.split("\n"): @@ -254,7 +253,7 @@ if __name__ == '__main__': gccxmlpath = name_value[1] break if not gccxmlpath: - print '--->> edmAddClassVerion: WARNING: Could not find path to gccxml executable' + print('--->> edmAddClassVersion: WARNING: Could not find path to gccxml executable') exit(1) @@ -276,22 +275,22 @@ if __name__ == '__main__': #' -I/uscms_data/d2/cdj/build/temp/classVersion/CMSSW_4_3_0_pre6/src -I/uscms_data/d2/cdj/build/temp/classVersion/CMSSW_4_3_0_pre6/include -I/uscmst1/prod/sw/cms/slc5_amd64_gcc434/cms/cmssw/CMSSW_4_3_0_pre6/src -I/uscmst1/prod/sw/cms/slc5_amd64_gcc434/cms/cmssw/CMSSW_4_3_0_pre6/include -I/uscmst1/prod/sw/cms/slc5_amd64_gcc434/external/boost/1.44.0-cms3/include -I/uscmst1/prod/sw/cms/slc5_amd64_gcc434/external/uuid/1.38-cms2/include -I/uscmst1/prod/sw/cms/slc5_amd64_gcc434/lcg/root/5.27.06b-cms18/cint -I/uscmst1/prod/sw/cms/slc5_amd64_gcc434/external/pcre/4.4-cms/include -I/uscmst1/prod/sw/cms/slc5_amd64_gcc434/external/zlib/1.2.3-cms/include -I/uscmst1/prod/sw/cms/slc5_amd64_gcc434/lcg/root/5.27.06b-cms18/include' incfile = options.filedir+"/classes.h" nameTree = getNameTreeFromGccXml(incfile,gccxml,gccxmlopt,cppopt) - #print typedefToClasses + #print(typedefToClasses) #exit(0) xmlfile = options.filedir+"/classes_def.xml" p = ClassesDefXmlParser(xmlfile) - #print p.classes + #print(p.classes) classesToModify = [x[0] for x in p.classes.items() if not checkIfTypedefOfTemplate([y for y in x[0].split(':') if y != ''],nameTree)] classesWithChecksum = dict([(x,checksumForClass(x)) for x in classesToModify]) - print 'Found the following non-templated classes which will be assigned a ClassVersion and the following checksum' + print('Found the following non-templated classes which will be assigned a ClassVersion and the following checksum') for name,checksum in classesWithChecksum.items(): - print name,checksum + print(name,checksum) #exit(0) #Now create the updated classes_def.xml file - print "creating a './classes_def.xml.generated' file which you should use to replace",xmlfile + print("creating a './classes_def.xml.generated' file which you should use to replace",xmlfile) f = open(xmlfile) outFile = open('classes_def.xml.generated','w') out = '' diff --git a/FWCore/Utilities/scripts/edmCheckClassTransients b/FWCore/Utilities/scripts/edmCheckClassTransients index 918609688c4ab..dbd10c15e86ca 100755 --- a/FWCore/Utilities/scripts/edmCheckClassTransients +++ b/FWCore/Utilities/scripts/edmCheckClassTransients @@ -1,5 +1,4 @@ #! /usr/bin/env python3 -from __future__ import print_function import string import re import collections @@ -50,14 +49,14 @@ def checkTrans(templname,name): return nerrs #Setup the options -from optparse import OptionParser -oparser = OptionParser() -oparser.add_option("-l","--lib", dest="library", - help="specify the library to load") -oparser.add_option("-f","--rootmap", dest="rmfiles", action="append", default=[], - help="specify the rootmap file(s) to read") +from argparse import ArgumentParser +oparser = ArgumentParser() +oparser.add_argument("-l","--lib", dest="library", type=str, + help="specify the library to load") +oparser.add_argument("-f","--rootmap", dest="rmfiles", action="append", type=str, default=[], + help="specify the rootmap file(s) to read") -(options,args)=oparser.parse_args() +options=oparser.parse_args() #Need to not have ROOT load .rootlogon.(C|py) since it can cause interference. import ROOT diff --git a/FWCore/Utilities/scripts/edmCheckClassVersion b/FWCore/Utilities/scripts/edmCheckClassVersion index 897e4ea2e4ee8..8737d180d203c 100755 --- a/FWCore/Utilities/scripts/edmCheckClassVersion +++ b/FWCore/Utilities/scripts/edmCheckClassVersion @@ -1,6 +1,4 @@ #! /usr/bin/env python3 -from __future__ import print_function -from optparse import OptionParser from sys import version_info if version_info[0] > 2: atol = int @@ -153,18 +151,18 @@ def checkClass(name,version,versionsToChecksums): return (noError,classVersion,classChecksum) #Setup the options -from optparse import OptionParser -oparser = OptionParser() -oparser.add_option("-d","--check_dictionaries", dest="checkdict",action="store_true",default=False, - help="check that all required dictionaries are loaded") -oparser.add_option("-l","--lib", dest="library", - help="specify the library to load. If not set classes are found using the PluginManager") -oparser.add_option("-x","--xml_file", dest="xmlfile",default="./classes_def.xml", - help="the classes_def.xml file to read") -oparser.add_option("-g","--generate_new",dest="generate", action="store_true",default=False, - help="instead of issuing errors, generate a new classes_def.xml file.") - -(options,args)=oparser.parse_args() +from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter +oparser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter) +oparser.add_argument("-d","--check_dictionaries", dest="checkdict",action="store_true",default=False, + help="check that all required dictionaries are loaded") +oparser.add_argument("-l","--lib", dest="library", type=str, + help="specify the library to load. If not set classes are found using the PluginManager") +oparser.add_argument("-x","--xml_file", dest="xmlfile",default="./classes_def.xml", type=str, + help="the classes_def.xml file to read") +oparser.add_argument("-g","--generate_new",dest="generate", action="store_true",default=False, + help="instead of issuing errors, generate a new classes_def.xml file.") + +options=oparser.parse_args() #Need to not have ROOT load .rootlogon.(C|py) since it can cause interference. import ROOT