Skip to content

Commit

Permalink
begin modifying class structure to make full testsuite
Browse files Browse the repository at this point in the history
  • Loading branch information
Phil Tooley committed May 3, 2019
1 parent 7dad1b5 commit 536773e
Show file tree
Hide file tree
Showing 4 changed files with 215 additions and 42 deletions.
88 changes: 57 additions & 31 deletions benchmarking/pfire_benchmarking/application_routines.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,63 @@
default_mask_name = "default_mask.mask"
config_defaults = {"mask": None}


class pFIRERunnerMixin:
""" Mixin class to provide a pFIRE runner interface
"""

def __init__(self, *args, **kwargs):
super(pFIRERunnerMixin, self).__init__(*args, **kwargs)
self.pfire_fixed_path = None
self.pfire_moved_path = None
self.pfire_mask_path = None
self.pfire_reg_path = None
self.pfire_map_path = None


def run_pfire(self, config_path, comm_size=1):
""" Run pFIRE using provided config file
"""
if comm_size != 1:
raise RuntimeError("MPI pFIRE runs not yet supported")

pfire_workdir, pfire_config = [os.path.normpath(x) for x in
os.path.split(config_path)]
config = ConfigObj(config_path)
print("Running pFIRE on {}".format(pfire_config))

self.pfire_fixed_path = os.path.join(pfire_workdir, config['fixed'])
self.pfire_moved_path = os.path.join(pfire_workdir, config['moved'])
try:
self.pfire_mask_path = os.path.join(pfire_workdir, config['mask'])
except KeyError:
pass

self.pfire_logfile = "{}_pfire.log".format(os.path.splitext(pfire_config)[0])
with open(self.pfire_logfile, 'w') as logfile:
pfire_args = ['pfire', pfire_config]
print(config_path)
res = sp.run(pfire_args, cwd=pfire_workdir, stdout=logfile,
stderr=logfile)

if res.returncode != 0:
raise RuntimeError("Failed to run pFIRE, check log for details: {}"
"".format(self.pfire_logfile))

with open(self.pfire_logfile, 'r') as logfile:
for line in logfile:
if line.startswith("Saved registered image to "):
reg_path = line.replace("Saved registered image to ",
"").strip()
self.pfire_reg_path = os.path.join(pfire_workdir, reg_path)
elif line.startswith("Saved map to "):
map_path = line.replace("Saved map to ", "").strip()
self.pfire_map_path = os.path.join(pfire_workdir, map_path)

if not (self.pfire_reg_path or self.pfire_map_path):
raise RuntimeError("Failed to extract result path(s) from log")


class ResultObject:
"""
Small object to hold registration result info
Expand All @@ -36,37 +93,6 @@ def build_shirt_config(config_file):
defaults.merge(config)
return defaults

def run_pfire(config_file, comm_size=1):
"""
Run pFIRE using provided config file
"""
config = ConfigObj(config_file)
print("Running pFIRE on {}".format(config_file))
pfire_args = ['pfire', config_file]

logfile_name = "{}_pfire.log".format(os.path.splitext(config_file)[0])
with open(logfile_name, 'w') as logfile:
res = sp.run(pfire_args, stdout=logfile, stderr=logfile)

if res.returncode != 0:
raise RuntimeError("Failed to run pFIRE, check log for details: {}"
"".format(logfile_name))

reg_path = str()
map_path = str()
with open(logfile_name, 'r') as logfile:
for line in logfile:
if line.startswith("Saving registered image to "):
reg_path = line.replace("Saving registered image to ", "").strip()
if line.startswith("Saving map to "):
map_path = line.replace("Saving map to ", "").strip()
if not reg_path:
raise RuntimeError("Failed to extract registered image path from log")
if not map_path:
raise RuntimeError("Failed to extract map path from log")

return ResultObject(reg_path, map_path, logfile_name, config['fixed'],
config['moved'])


def run_shirt(config_file):
Expand Down
58 changes: 47 additions & 11 deletions benchmarking/pfire_benchmarking/regression_validate.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
#!/usr/bin/env python3

import argparse
import os
import sys

from .application_routines import run_pfire
from .analysis_routines import compare_image_results
from .testinstance import TestInstance
from .application_routines import pFIRERunnerMixin

def parse_args():
"""
Expand All @@ -14,6 +14,8 @@ def parse_args():
parser = argparse.ArgumentParser()

parser.add_argument("pfire_config")
parser.add_argument("--accepted_image", type=str)
parser.add_argument("--accepted_map", type=str)

return parser.parse_args()

Expand All @@ -26,16 +28,50 @@ def main():

args = parse_args()

pfire_result = run_pfire(args.pfire_config)
path_parts = os.path.split(pfire_result.registered_path)
regression_image = os.path.join(path_parts[0],
"accepted_result",
path_parts[1])
test = ComparisonTest(args.pfire_config, accepted_image=args.accepted_image,
accepted_map=args.accepted_image)

test.run()
test.generate_report()


class ComparisonTest(TestInstance, pFIRERunnerMixin):
""" Test by comparing with an accepted result image/map
"""

def __init__(self, pfire_config, name=None, accepted_image=None,
accepted_map=None):
# Check this before we go anywhere else
if not (accepted_map or accepted_image):
raise ValueError("At least one of accepted_image or accepted_map "
"must be provided")
super().__init__(pfire_config, name=name)


self.accepted_image_path = accepted_image
self.accepted_map_path = accepted_map
self.run_errstring = None

def run(self):
""" Run pfire against provided config
"""
try:
self.run_pfire(self.pfire_config)
except RuntimeError as err:
self.run_errstring = str(err)
return False

return True

def generate_report(self):

image_comparison = compare_image_results(self.pfire_fixed_path,
self.pfire_moved_path,
self.pfire_reg_path,
self.accepted_image_path)



image_comparison = compare_image_results(pfire_result.fixed_path,
pfire_result.moved_path,
pfire_result.registered_path,
regression_image)

if __name__ == "__main__":
main()
83 changes: 83 additions & 0 deletions benchmarking/pfire_benchmarking/testdespatcher.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,83 @@
#!/usr/bin/env python3


import os
from textwrap import dedent
from docutils.core import publish_string
from configobj import ConfigObj

from . import RegressionTest, ComparisonTest

class TestDespatcher:
""" Aggregates and runs a collection of tests
"""

test_types = {"regression": RegressionTest,
"comparison": ComparisonTest}

def __init__(self):
self.tests = []

def add_test(self, testconfig):
""" Add a new test by parsing configfile
"""
with open(testconfig, 'r') as fh:
testconfig = ConfigObj(fh)

try:
test = self.test_types[testconfig['type'].lower()](testconfig)
except KeyError:
types_str = ", ".join(self.test_types.keys())
raise ValueError("Test type must be one of \"{}\""
"".format(types_str))

self.tests.append(test)


def find_tests(self, search_dir):
""" Find all tests in a directory tree
"""
for dirname, _, fnames in os.walk(search_dir):
for fname in fnames:
if fname.endswith(".testconf"):
self.add_test(os.path.join(dirname, fname))

def run_tests(self):
""" Run all tests producing report files
"""
for test in self.tests:
test.run()
test.generate_report()


def create_aggregate_report(self):
""" Create summary report page linking to individual tests
"""
result_files = {}
for test in self.tests:
result_files[test.name] = test.report_filename

index_rst = []

index_rst.append(dedent("""\
=================
Testsuite Results
=================
"""))

for test_name, result_file in result_files.items():
index_rst.append(dedent("""\
`{0}`_
.. _{0}: {1}
""".format(test_name, result_file)))

index_rst = "\n".join(index_rst)

with open("index.rst", 'wt') as fh:
fh.write(index_rst)

index_html = publish_string(index_rst, writer_name='html5')

with open("index.html", 'wt') as fh:
fh.write(index_html.decode())
28 changes: 28 additions & 0 deletions benchmarking/pfire_benchmarking/testinstance.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
#!/usr/bin/env python3

import os

from configobj import ConfigObj

class TestInstance(object):
""" Base class for specific test instances
"""

def __init__(self, pfire_config, name=None):
if name is not None:
self.name = name
else:
self.name = os.path.splitext(os.path.basename(pfire_config))[0]

self.pfire_config = pfire_config

def run(self):
raise NotImplementedError("testinstance should be subclassed")


def generate_report(self):
raise NotImplementedError("testinstance should be subclassed")




0 comments on commit 536773e

Please sign in to comment.