Skip to content

Commit

Permalink
Fix skipping to operate on a per-environment basis
Browse files Browse the repository at this point in the history
  • Loading branch information
astrofrog committed Jan 12, 2018
1 parent 6f5af72 commit 1fa2cc9
Show file tree
Hide file tree
Showing 2 changed files with 25 additions and 12 deletions.
31 changes: 20 additions & 11 deletions asv/commands/run.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@

import logging
import traceback
from collections import defaultdict

from . import Command
from ..benchmarks import Benchmarks
Expand Down Expand Up @@ -209,15 +210,15 @@ def run(cls, conf, range_spec=None, steps=None, bench=None, parallel=1,
_returns['machine_params'] = machine_params.__dict__

for commit_hash in commit_hashes:
skipped_benchmarks = set()
skipped_benchmarks = defaultdict(lambda: set())

if skip_successful or skip_failed or skip_existing_commits:
try:
for result in iter_results_for_machine_and_hash(
conf.results_dir, machine_params.machine, commit_hash):

if skip_existing_commits:
skipped_benchmarks.update(benchmarks)
skipped_benchmarks[result.env_name].update(benchmarks)
break

for key in result.get_result_keys(benchmarks):
Expand All @@ -229,29 +230,37 @@ def run(cls, conf, range_spec=None, steps=None, bench=None, parallel=1,
failed = value is None or (isinstance(value, list) and None in value)

if skip_failed and failed:
skipped_benchmarks.add(key)
skipped_benchmarks[result.env_name].add(key)
if skip_successful and not failed:
skipped_benchmarks.add(key)
skipped_benchmarks[result.env_name].add(key)
except IOError:
pass

for env in environments:
for bench in benchmarks:
if bench in skipped_benchmarks:
if bench in skipped_benchmarks[env.name]:
log.step()

if not set(six.iterkeys(benchmarks)).difference(skipped_benchmarks):
continue

if commit_hash:
log.info(
"For {0} commit hash {1}:".format(
conf.project, commit_hash[:8]))

with log.indent():
for subenv in util.iter_chunks(environments, parallel):
log.info("Building for {0}".format(
', '.join([x.name for x in subenv])))

subenv_name = ', '.join([x.name for x in subenv])

# If there is only one sub-environment and all the
# benchmarks can be skipped, then no need to continue
for env in subenv:
if set(six.iterkeys(benchmarks)).difference(skipped_benchmarks[env.name]):
break
else:
log.info("No benchmarks to run for {0}".format(subenv_name))
continue

log.info("Building for {0}".format(subenv_name))

with log.indent():
args = [(env, conf, repo, commit_hash) for env in subenv]
Expand All @@ -274,7 +283,7 @@ def run(cls, conf, range_spec=None, steps=None, bench=None, parallel=1,
if success:
results = benchmarks.run_benchmarks(
env, show_stderr=show_stderr, quick=quick,
profile=profile, skip=skipped_benchmarks)
profile=profile, skip=skipped_benchmarks[env.name])
else:
results = benchmarks.skip_benchmarks(env)

Expand Down
6 changes: 5 additions & 1 deletion asv/results.py
Original file line number Diff line number Diff line change
Expand Up @@ -301,7 +301,7 @@ def get_result_value(self, key, params):
Returns
-------
value : {float, list of float}
Benchmark result value. If the benchmark is parameterized, return
Benchmark result value. If the benchmark is parameterized, return
a list of values.
"""
return _compatible_results(self._results[key],
Expand Down Expand Up @@ -580,3 +580,7 @@ def rm(self, result_dir):
@classmethod
def update(cls, path):
util.update_json(cls, path, cls.api_version)

@property
def env_name(self):
return self._env_name

0 comments on commit 1fa2cc9

Please sign in to comment.