Skip to content

Commit

Permalink
[Test] Simplify setup of "test_problems" regression tests
Browse files Browse the repository at this point in the history
- Use more standard approach to handling constructor kwargs
- Provide default values for additional arguments
- Integrate handling of programs used for multiple tests
  • Loading branch information
speth committed Jun 11, 2020
1 parent ceeee5c commit ac11e7c
Showing 1 changed file with 80 additions and 122 deletions.
202 changes: 80 additions & 122 deletions test_problems/SConscript
Original file line number Diff line number Diff line change
Expand Up @@ -35,36 +35,43 @@ PASSED_FILES = {}


class Test(object):
_validArgs = set(['arguments', 'options', 'artifacts', 'comparisons',
'tolerance', 'threshold', 'ignoreLines', 'extensions',
'dependencies'])

def __init__(self, testName, subdir, programName, blessedName, **kwargs):
assert set(kwargs.keys()) <= self._validArgs, kwargs.keys()
self.subdir = subdir
self.programName = programName
arguments = kwargs.get('arguments') or []
def __init__(self, testName, subdir=None, programName=None,
blessedName='output_blessed.txt', arguments=(), options='',
artifacts=(), comparisons=(), tolerance=1e-5, threshold=1e-14,
dependencies=(), source_files=()):
self.subdir = subdir or testName
self.programName = programName or testName
if isinstance(arguments, str):
arguments = [arguments]
self.arguments = arguments # file arguments
self.options = kwargs.get('options') or ''
self.blessedName = blessedName
self.artifacts = kwargs.get('artifacts') or ()
self.artifacts = artifacts
if isinstance(self.artifacts, str):
self.artifacts = [self.artifacts]
self.comparisons = kwargs.get('comparisons') or ()
self.tolerance = kwargs.get('tolerance') or 1e-5 # error tolerance for CSV comparison
self.threshold = kwargs.get('threshold') or 1e-14 # error threshold for CSV comparison

# ignore lines starting with specified strings when comparing output files
self.ignoreLines = kwargs.get('ignoreLines') or []
self.comparisons = comparisons
self.tolerance = tolerance # error tolerance for CSV comparison
self.threshold = threshold # error threshold for CSV comparison
self.source_files = source_files

self.testName = testName
self.passedFile = '.passed-%s' % testName
PASSED_FILES[self.testName] = pjoin(self.subdir, self.passedFile)

testResults.tests[self.testName] = self
run = self.run(localenv)

if source_files:
self.program = localenv.Program(
pjoin(self.subdir, self.programName), source_files,
LIBS=self.libs or cantera_libs)
else:
self.program = [self.programName]
command = self.program + [pjoin(self.subdir, arg) for arg in arguments]

run = localenv.RegressionTest(pjoin(self.subdir, self.passedFile),
command, active_test_name=testName,
test_blessed_file=blessedName, test_command_options=options,
test_comparisons=comparisons, test_csv_threshold=threshold,
test_csv_tolerance=tolerance, test_ignoreLines=())

localenv.Depends(env['test_results'], run)
localenv.Depends(run, env['build_targets'])
localenv.Alias('test-clean', self.clean(localenv))
Expand All @@ -74,42 +81,25 @@ class Test(object):
# reset: just delete the ".passed" file so that this test will be re-run
localenv.Alias('test-reset', self.reset(localenv))

for dep in kwargs.get('dependencies', []):
for dep in dependencies:
localenv.Depends(run, dep)
env.Depends(run, localenv.get('cantera_shlib', ()))

def run(self, env, *args):
source = list(args)
if not source:
source.append(self.programName)

source.extend(pjoin(self.subdir, arg) for arg in self.arguments)

test = env.RegressionTest(pjoin(self.subdir, self.passedFile), source,
active_test_name=self.testName,
test_blessed_file=self.blessedName,
test_command_options=self.options,
test_comparisons=self.comparisons,
test_csv_threshold=self.threshold,
test_csv_tolerance=self.tolerance,
test_ignoreLines=self.ignoreLines)
return test

def reset(self, env, **kwargs):
def reset(self, env):
f = pjoin(os.getcwd(), self.subdir, self.passedFile)
if os.path.exists(f):
uniqueName = 'reset-%s' % self.testName
target = env.Command(uniqueName, [], [Delete(f)])
return target

def clean(self, env, **kwargs):
def clean(self, env, files=None):
# Name used for the output file
if self.blessedName is not None and 'blessed' in self.blessedName:
outName = self.blessedName.replace('blessed', 'output')
else:
outName = 'test_output.txt'

files = kwargs.get('files') or []
files = files or []
files += [self.passedFile, outName]
files += list(self.artifacts)
files += [comp[1] for comp in self.comparisons]
Expand All @@ -122,96 +112,75 @@ class Test(object):
return target

class CompileAndTest(Test):
def __init__(self, testName, subdir, programName, blessedName, libs=(),
def __init__(self, testName, subdir=None, programName=None,
blessedName='output_blessed.txt', libs=(), extensions=('cpp',),
**kwargs):
self.extensions = kwargs.get('extensions') or ('cpp',)
self.libs = list(libs)
self.sources = mglob(env, subdir, *self.extensions)
Test.__init__(self, testName, subdir, programName, blessedName, **kwargs)

def run(self, env):
prog = env.Program(pjoin(self.subdir, self.programName),
self.sources,
LIBS=self.libs or cantera_libs)
source = [prog]
return Test.run(self, env, *source)
sources = mglob(env, subdir or testName, *extensions)
Test.__init__(self, testName, subdir, programName, blessedName,
source_files=sources, **kwargs)

def clean(self, env):
basenames = [os.path.splitext(f.name)[0] for f in self.sources]
basenames = [os.path.splitext(f.name)[0] for f in self.source_files]
basenames.append(self.programName)
exts = ['', '.o', '.exe', '.exe.manifest', '.ilk', '.obj', '.pdb', '.obj.pdb']
files = [name + ext for name in basenames for ext in exts]
files = [name + ext for name in set(basenames) for ext in exts]
return Test.clean(self, env, files=files)


dhGraph = localenv.Program('cathermo/DH_graph_1/DH_graph_1',
mglob(env, 'cathermo/DH_graph_1', 'cpp'),
LIBS=cantera_libs)
dhGraph_name = dhGraph[0].name

Test('DH_graph_dilute', 'cathermo/DH_graph_1',
dhGraph, 'DH_NaCl_dilute_blessed.csv',
artifacts=['DH_graph_1.log', dhGraph_name],
arguments='DH_NaCl_dilute.xml')
dhGraph = CompileAndTest('DH_graph_dilute', 'cathermo/DH_graph_1', 'DH_graph_1',
'DH_NaCl_dilute_blessed.csv',
artifacts=['DH_graph_1.log'],
arguments='DH_NaCl_dilute.xml')
Test('DH_graph_acommon', 'cathermo/DH_graph_1',
dhGraph, 'DH_NaCl_acommon_blessed.csv',
artifacts=['DH_graph_1.log', dhGraph_name],
dhGraph.program, 'DH_NaCl_acommon_blessed.csv',
artifacts=['DH_graph_1.log'],
arguments='DH_NaCl_acommon.xml')
Test('DH_graph_bdotak', 'cathermo/DH_graph_1',
dhGraph, 'DH_NaCl_bdotak_blessed.csv',
artifacts=['DH_graph_1.log', dhGraph_name],
dhGraph.program, 'DH_NaCl_bdotak_blessed.csv',
artifacts=['DH_graph_1.log'],
arguments='DH_NaCl_bdotak.xml')
Test('DH_graph_NM', 'cathermo/DH_graph_1',
dhGraph, 'DH_NaCl_NM_blessed.csv',
artifacts=['DH_graph_1.log', dhGraph_name],
dhGraph.program, 'DH_NaCl_NM_blessed.csv',
artifacts=['DH_graph_1.log'],
arguments='DH_NaCl_NM.xml')
Test('DH_graph_Pitzer', 'cathermo/DH_graph_1',
dhGraph, 'DH_NaCl_Pitzer_blessed.csv',
artifacts=['DH_graph_1.log', dhGraph_name],
dhGraph.program, 'DH_NaCl_Pitzer_blessed.csv',
artifacts=['DH_graph_1.log'],
arguments='DH_NaCl_Pitzer.xml')

CompileAndTest('HMW_graph_CpvT', 'cathermo/HMW_graph_CpvT',
'HMW_graph_CpvT', 'output_blessed.txt',
extensions=['^HMW_graph_CpvT.cpp'],
arguments=File('#test/data/HMW_NaCl_sp1977_alt.xml').abspath)
CompileAndTest('HMW_graph_GvI', 'cathermo/HMW_graph_GvI',
'HMW_graph_GvI', None,
blessedName=None,
comparisons=[('T298_blessed.csv', 'T298.csv'),
('T523_blessed.csv', 'T523.csv')],
artifacts=['T373.csv','T423.csv','T473.csv',
'T548.csv','T573.csv'])
CompileAndTest('HMW_graph_GvT', 'cathermo/HMW_graph_GvT',
'HMW_graph_GvT', 'output_blessed.txt',
extensions=['^HMW_graph_GvT.cpp'],
arguments=File('#test/data/HMW_NaCl_sp1977_alt.xml').abspath)
CompileAndTest('HMW_graph_HvT', 'cathermo/HMW_graph_HvT',
'HMW_graph_HvT', 'output_blessed.txt',
extensions=['^HMW_graph_HvT.cpp'],
arguments=File('#test/data/HMW_NaCl_sp1977_alt.xml').abspath)
CompileAndTest('HMW_graph_VvT', 'cathermo/HMW_graph_VvT',
'HMW_graph_VvT', 'output_blessed.txt',
extensions=['^HMW_graph_VvT.cpp'],
arguments=File('#test/data/HMW_NaCl_sp1977_alt.xml').abspath)
CompileAndTest('HMW_test_1', 'cathermo/HMW_test_1',
'HMW_test_1', 'output_noD_blessed.txt')
blessedName='output_noD_blessed.txt')
CompileAndTest('HMW_test_3', 'cathermo/HMW_test_3',
'HMW_test_3', 'output_noD_blessed.txt')
CompileAndTest('IMSTester', 'cathermo/ims', 'IMSTester', 'output_blessed.txt')
CompileAndTest('ISSPTester', 'cathermo/issp', 'ISSPTester', 'output_blessed.txt')
CompileAndTest('stoichSub', 'cathermo/stoichSub',
'stoichSub', 'output_blessed.txt')
CompileAndTest('WaterPDSS', 'cathermo/testWaterPDSS',
'testWaterPDSS', 'output_blessed.txt')
CompileAndTest('WaterSSTP', 'cathermo/testWaterTP',
'testWaterSSTP', 'output_blessed.txt')
CompileAndTest('ISSPTester2', 'cathermo/VPissp',
'ISSPTester2', 'output_blessed.txt')
blessedName='output_noD_blessed.txt')
CompileAndTest('IMSTester', 'cathermo/ims')
CompileAndTest('ISSPTester', 'cathermo/issp')
CompileAndTest('stoichSub', 'cathermo/stoichSub')
CompileAndTest('WaterPDSS', 'cathermo/testWaterPDSS')
CompileAndTest('WaterSSTP', 'cathermo/testWaterTP')
CompileAndTest('ISSPTester2', 'cathermo/VPissp')
CompileAndTest('ChemEquil_ionizedGas',
'ChemEquil_ionizedGas', 'ionizedGasEquil',
'output_blessed.txt',
comparisons=[('table_blessed.csv', 'table.csv')])
#CompileAndTest('CpJump', 'CpJump', 'CpJump', 'output_blessed.txt')
CompileAndTest('cxx_ex', 'cxx_ex', 'cxx_examples', 'output_blessed.txt',
CompileAndTest('cxx_ex',
comparisons=[('eq1_blessed.csv', 'eq1.csv'),
('kin1_blessed.csv', 'kin1.csv'),
('tr1_blessed.csv', 'tr1.csv'),
Expand All @@ -221,27 +190,19 @@ CompileAndTest('cxx_ex', 'cxx_ex', 'cxx_examples', 'output_blessed.txt',
artifacts=['eq1.dat', 'kin1.dat', 'kin2.dat', 'kin3.csv',
'kin3.dat', 'tr1.dat', 'tr2.dat'])

diamond = localenv.Program('diamondSurf/runDiamond',
'diamondSurf/runDiamond.cpp',
LIBS=cantera_libs)
diamond_name = diamond[0].name
Test('diamondSurf-xml', 'diamondSurf', diamond, 'runDiamond_blessed.out',
options='diamond_blessed.xml', artifacts=diamond_name)
Test('diamondSurf-cti', 'diamondSurf', diamond, 'runDiamond_blessed.out',
options='diamond.cti', artifacts=diamond_name)
CompileAndTest('dustyGasTransport', 'dustyGasTransport', 'dustyGasTransport',
'output_blessed.txt')
CompileAndTest('mixGasTransport',
'mixGasTransport', 'mixGasTransport', 'output_blessed.txt')
CompileAndTest('multiGasTransport',
'multiGasTransport', 'multiGasTransport', 'output_blessed.txt')

CompileAndTest('pureFluid', 'pureFluidTest', 'testPureWater', 'output_blessed.txt')
CompileAndTest('rankine_democxx', 'rankine_democxx', 'rankine', 'output_blessed.txt')
CompileAndTest('silane_equil', 'silane_equil', 'silane_equil', 'output_blessed.txt')
CompileAndTest('stoichSolidKinetics', 'stoichSolidKinetics',
'stoichSolidKinetics', 'output_blessed.txt')
CompileAndTest('surfkin', 'surfkin', 'surfdemo', 'output_blessed.txt')
diamond = CompileAndTest('diamondSurf-xml', 'diamondSurf', 'diamondSurf',
'runDiamond_blessed.out', options='diamond_blessed.xml')
Test('diamondSurf-cti', 'diamondSurf', diamond.program, 'runDiamond_blessed.out',
options='diamond.cti')
CompileAndTest('dustyGasTransport')
CompileAndTest('mixGasTransport')
CompileAndTest('multiGasTransport')

CompileAndTest('pureFluid', 'pureFluidTest')
CompileAndTest('rankine_democxx')
CompileAndTest('silane_equil')
CompileAndTest('stoichSolidKinetics')
CompileAndTest('surfkin')
CompileAndTest('surfSolver', 'surfSolverTest', 'surfaceSolver', None,
arguments='haca2.cti',
comparisons=[('results_blessed.txt', 'results.txt')],
Expand All @@ -256,18 +217,15 @@ CompileAndTest('VCS-NaCl', 'VCSnonideal/NaCl_equil',
'nacl_equil', 'good_out.txt',
options='-d 3',
artifacts=['vcs_equilibrate_res.csv']), # not testing this file because it's not really csv
vcs_LiSi = localenv.Program('VCSnonideal/LatticeSolid_LiSi/latsol',
'VCSnonideal/LatticeSolid_LiSi/latsol.cpp',
LIBS=cantera_libs)
vcs_LiSi_name = vcs_LiSi[0].name
Test('VCS-LiSi', 'VCSnonideal/LatticeSolid_LiSi', vcs_LiSi, 'output_blessed.txt',
artifacts=['vcs_equilibrate_res.csv', vcs_LiSi_name])
Test('VCS-LiSi-verbose', 'VCSnonideal/LatticeSolid_LiSi', vcs_LiSi, 'verbose_blessed.txt',
options='8',
artifacts=['vcs_equilibrate_res.csv', vcs_LiSi_name])
CompileAndTest('VPsilane_test', 'VPsilane_test', 'VPsilane_test', 'output_blessed.txt')

CompileAndTest('clib', 'clib_test', 'clib_test', 'output_blessed.txt',
vcs_LiSi = CompileAndTest('VCS-LiSi', 'VCSnonideal/LatticeSolid_LiSi', 'latsol',
artifacts=['vcs_equilibrate_res.csv'])
Test('VCS-LiSi-verbose', 'VCSnonideal/LatticeSolid_LiSi', vcs_LiSi.program,
'verbose_blessed.txt', options='8',
artifacts=['vcs_equilibrate_res.csv'])
CompileAndTest('VPsilane_test')

CompileAndTest('clib', 'clib_test', 'clib_test',
extensions=['^clib_test.c'], libs=['cantera_shared'])

# Force explicitly-named tests to run even if SCons thinks they're up to date
Expand Down

0 comments on commit ac11e7c

Please sign in to comment.