Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Minor refactoring in benchmark runner #694

Merged
merged 12 commits into from
Aug 12, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
79 changes: 45 additions & 34 deletions asv/benchmarks.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,41 @@ def __init__(self, conf, benchmarks, regex=None):
if not regex or any(re.search(reg, benchmark['name']) for reg in regex):
self[benchmark['name']] = benchmark

@property
def benchmark_selection(self):
"""
Active sets of parameterized benchmarks.
"""
return self._benchmark_selection

@property
def benchmark_dir(self):
"""
Benchmark directory.
"""
return self._benchmark_dir

def filter_out(self, skip):
"""
Return a new Benchmarks object, with some benchmarks filtered out.
"""
benchmarks = super(Benchmarks, self).__new__(self.__class__)
benchmarks._conf = self._conf
benchmarks._benchmark_dir = self._benchmark_dir
benchmarks._all_benchmarks = self._all_benchmarks

selected_idx = {}

for name, benchmark in six.iteritems(self):
if name not in skip:
benchmarks[name] = benchmark
if name in self._benchmark_selection:
selected_idx[name] = self._benchmark_selection[name]

benchmarks._benchmark_selection = selected_idx

return benchmarks

@classmethod
def discover(cls, conf, repo, environments, commit_hash, regex=None):
"""
Expand Down Expand Up @@ -248,7 +283,7 @@ def load(cls, conf):
"regenerate benchmarks.json".format(str(err)))

def run_benchmarks(self, env, show_stderr=False, quick=False, profile=False,
skip=None, extra_params=None):
extra_params=None):
"""
Run all of the benchmarks in the given `Environment`.

Expand All @@ -270,9 +305,6 @@ def run_benchmarks(self, env, show_stderr=False, quick=False, profile=False,
When `True`, run the benchmark through the `cProfile`
profiler.

skip : set, optional
Benchmark names to skip.

extra_params : dict, optional
Override values for benchmark attributes.

Expand All @@ -298,29 +330,12 @@ def run_benchmarks(self, env, show_stderr=False, quick=False, profile=False,
- `profile`: If `profile` is `True`, this key will exist,
and be a byte string containing the cProfile data.
"""
log.info("Benchmarking {0}".format(env.name))

benchmarks = sorted(list(six.iteritems(self)))

# Remove skipped benchmarks
if skip:
benchmarks = [
(name, benchmark) for (name, benchmark) in
benchmarks if name not in skip]

# Setup runner and run benchmarks
times = {}
benchmark_runner = runner.BenchmarkRunner(benchmarks,
self._benchmark_dir,
show_stderr=show_stderr,
quick=quick,
extra_params=extra_params,
profile=profile,
selected_idx=self._benchmark_selection)
jobs = benchmark_runner.plan()
times = benchmark_runner.run(jobs, env)

return times
return runner.BenchmarkRunner(self,
show_stderr=show_stderr,
quick=quick,
extra_params=extra_params,
profile=profile).run(env)

def skip_benchmarks(self, env):
"""
Expand All @@ -329,14 +344,10 @@ def skip_benchmarks(self, env):
log.warn("Skipping {0}".format(env.name))
with log.indent():
times = {}
for name in self:
for name, benchmark in six.iteritems(self):
log.step()
log.warn('Benchmark {0} skipped'.format(name))
timestamp = datetime.datetime.utcnow()
times[name] = {'result': None,
'samples': None,
'stats': None,
'params': [],
'started_at': timestamp,
'ended_at': timestamp}
times[name] = runner.get_failed_benchmark_result(
name, benchmark, self._benchmark_selection.get(name))

return times
2 changes: 1 addition & 1 deletion asv/commands/find.py
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,7 @@ def do_benchmark(i):
env.install_project(conf, repo, commit_hash)
x = benchmarks.run_benchmarks(
env, show_stderr=show_stderr)
result = list(x.values())[0]['result']
result = list(x.values())[0].result

results[i] = result

Expand Down
2 changes: 1 addition & 1 deletion asv/commands/profiling.py
Original file line number Diff line number Diff line change
Expand Up @@ -185,7 +185,7 @@ def run(cls, conf, benchmark, revision=None, gui=None, output=None,
results = benchmarks.run_benchmarks(
env, show_stderr=True, quick=False, profile=True)

profile_data = results[benchmark]['profile']
profile_data = results[benchmark].profile

if gui is not None:
log.debug("Opening gui {0}".format(gui))
Expand Down
15 changes: 7 additions & 8 deletions asv/commands/run.py
Original file line number Diff line number Diff line change
Expand Up @@ -287,13 +287,14 @@ def run(cls, conf, range_spec=None, steps=None, bench=None, attribute=None, para
params['python'] = env.python
params.update(env.requirements)

benchmark_set = benchmarks.filter_out(skipped_benchmarks[env.name])

if success:
results = benchmarks.run_benchmarks(
results = benchmark_set.run_benchmarks(
env, show_stderr=show_stderr, quick=quick,
profile=profile, skip=skipped_benchmarks[env.name],
extra_params=attribute)
profile=profile, extra_params=attribute)
else:
results = benchmarks.skip_benchmarks(env)
results = benchmark_set.skip_benchmarks(env)

if dry_run or isinstance(env, environment.ExistingEnvironment):
continue
Expand All @@ -307,10 +308,8 @@ def run(cls, conf, range_spec=None, steps=None, bench=None, attribute=None, para
env.name)

for benchmark_name, d in six.iteritems(results):
if not record_samples:
d['samples'] = None

benchmark_version = benchmarks[benchmark_name]['version']
result.add_result(benchmark_name, d, benchmark_version)
result.add_result(benchmark_name, d, benchmark_version,
record_samples=record_samples)

result.update_save(conf.results_dir)
72 changes: 62 additions & 10 deletions asv/console.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,9 @@
import six
from six.moves import xrange, input

from . import util


WIN = (os.name == "nt")


Expand Down Expand Up @@ -278,7 +281,9 @@ def _stream_formatter(self, record):
color_print('·' * self._indent, end='')
color_print(' ', end='')

if record.levelno < logging.DEBUG:
if hasattr(record, 'color'):
color = record.color
elif record.levelno < logging.DEBUG:
color = 'default'
elif record.levelno < logging.INFO:
color = 'default'
Expand Down Expand Up @@ -325,17 +330,30 @@ def dot(self):

def set_nitems(self, n):
"""
Set the number of items in a lengthy process. Each of these
Set the number of remaining items to process. Each of these
steps should be incremented through using `step`.

Can be called multiple times. The progress percentage is ensured
to be non-decreasing, except if 100% was already reached in which
case it is restarted from 0%.
"""
self._total = n
try:
# Ensure count/total is nondecreasing
self._total = util.ceildiv(n * self._total, self._total - self._count)
self._count = self._total - n
except ZeroDivisionError:
# Reset counting from start
self._total = n
self._count = 0

def step(self):
"""
Write that a step has been completed. A percentage is
displayed along with it.

If we are stepping beyond the number of items, stop counting.
"""
self._count += 1
self._count = min(self._total, self._count + 1)

def enable(self, verbose=False):
sh = logging.StreamHandler()
Expand All @@ -358,21 +376,54 @@ def set_level(self, level):
def is_debug_enabled(self):
return self._logger.getEffectiveLevel() <= logging.DEBUG

def _message(self, routine, message, reserve_space=False, color=None):
kwargs = {}
if color is not None:
kwargs['extra'] = dict(color=color)

if reserve_space:
max_width = max(16, util.get_terminal_width() - 33)
message = truncate_left(message, max_width)
self._prev_message = message

routine(message, **kwargs)

def info(self, *args, **kwargs):
self._logger.info(*args, **kwargs)
self._message(self._logger.info, *args, **kwargs)

def warn(self, *args, **kwargs):
self._logger.warn(*args, **kwargs)
self._message(self._logger.warn, *args, **kwargs)

def debug(self, *args, **kwargs):
self._logger.debug(*args, **kwargs)
self._message(self._logger.debug, *args, **kwargs)

def error(self, *args, **kwargs):
self._logger.error(*args, **kwargs)
self._message(self._logger.error, *args, **kwargs)

def add(self, msg):
_write_with_fallback(msg, sys.stdout.write, sys.stdout)
sys.stdout.flush()
if self._needs_newline:
_write_with_fallback(msg, sys.stdout.write, sys.stdout)
sys.stdout.flush()
else:
self.info(msg)

def add_padded(self, msg):
"""
Final part of two-part info message.
Should be preceded by a call to info/warn/...(msg, reserve_space=True)
"""
if self._prev_message is None:
# No previous part: print as an info message
self.info(msg)
return

padding_length = util.get_terminal_width() - len(self._prev_message) - 14 - 1 - len(msg)
if WIN:
padding_length -= 1
padding = " "*padding_length

self._prev_message = None
self.add(" {0}{1}".format(padding, msg))

def flush(self):
"""
Expand All @@ -384,4 +435,5 @@ def flush(self):
self._needs_newline = False
sys.stdout.flush()


log = Log()
27 changes: 15 additions & 12 deletions asv/results.py
Original file line number Diff line number Diff line change
Expand Up @@ -381,7 +381,8 @@ def remove_result(self, key):
# Remove version (may be missing)
self._benchmark_version.pop(key, None)

def add_result(self, benchmark_name, result, benchmark_version):
def add_result(self, benchmark_name, result, benchmark_version,
record_samples=False):
"""
Add benchmark result.

Expand All @@ -390,21 +391,23 @@ def add_result(self, benchmark_name, result, benchmark_version):
benchmark_name : str
Name of benchmark

result : dict
Result of the benchmark, as returned by `benchmarks.run_benchmark`.
result : runner.BenchmarkResult
Result of the benchmark.

"""
self._results[benchmark_name] = result['result']
self._samples[benchmark_name] = result['samples']
self._stats[benchmark_name] = result['stats']
self._benchmark_params[benchmark_name] = result['params']
self._started_at[benchmark_name] = util.datetime_to_js_timestamp(result['started_at'])
self._ended_at[benchmark_name] = util.datetime_to_js_timestamp(result['ended_at'])
self._results[benchmark_name] = result.result
if record_samples:
self._samples[benchmark_name] = result.samples
else:
self._samples[benchmark_name] = None
self._stats[benchmark_name] = result.stats
self._benchmark_params[benchmark_name] = result.params
self._started_at[benchmark_name] = util.datetime_to_js_timestamp(result.started_at)
self._ended_at[benchmark_name] = util.datetime_to_js_timestamp(result.ended_at)
self._benchmark_version[benchmark_name] = benchmark_version

if 'profile' in result and result['profile']:
profile_data = base64.b64encode(
zlib.compress(result['profile']))
if result.profile:
profile_data = base64.b64encode(zlib.compress(result.profile))
if sys.version_info[0] >= 3:
profile_data = profile_data.decode('ascii')
self._profiles[benchmark_name] = profile_data
Expand Down
Loading