Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix skipping to operate on a per-environment basis #603

Merged
merged 2 commits into from
Jan 20, 2018
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
30 changes: 19 additions & 11 deletions asv/commands/run.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@

import logging
import traceback
from collections import defaultdict

from . import Command
from ..benchmarks import Benchmarks
Expand Down Expand Up @@ -209,15 +210,15 @@ def run(cls, conf, range_spec=None, steps=None, bench=None, parallel=1,
_returns['machine_params'] = machine_params.__dict__

for commit_hash in commit_hashes:
skipped_benchmarks = set()
skipped_benchmarks = defaultdict(lambda: set())

if skip_successful or skip_failed or skip_existing_commits:
try:
for result in iter_results_for_machine_and_hash(
conf.results_dir, machine_params.machine, commit_hash):

if skip_existing_commits:
skipped_benchmarks.update(benchmarks)
skipped_benchmarks[result.env_name].update(benchmarks)
break

for key in result.get_result_keys(benchmarks):
Expand All @@ -229,29 +230,36 @@ def run(cls, conf, range_spec=None, steps=None, bench=None, parallel=1,
failed = value is None or (isinstance(value, list) and None in value)

if skip_failed and failed:
skipped_benchmarks.add(key)
skipped_benchmarks[result.env_name].add(key)
if skip_successful and not failed:
skipped_benchmarks.add(key)
skipped_benchmarks[result.env_name].add(key)
except IOError:
pass

for env in environments:
for bench in benchmarks:
if bench in skipped_benchmarks:
if bench in skipped_benchmarks[env.name]:
log.step()

if not set(six.iterkeys(benchmarks)).difference(skipped_benchmarks):
continue

if commit_hash:
log.info(
"For {0} commit hash {1}:".format(
conf.project, commit_hash[:8]))

with log.indent():
for subenv in util.iter_chunks(environments, parallel):
log.info("Building for {0}".format(
', '.join([x.name for x in subenv])))

subenv_name = ', '.join([x.name for x in subenv])
Copy link
Collaborator

@pv pv Jan 13, 2018

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think the right thing to do here is to do

active_environments = [env for env in environments 
    if set(six.iterkeys(benchmarks)).difference(skipped_benchmarks[env.name])]

if not active_environments:
    continue

if commit_hash:
...
for subenv in util.iter_chunks(active_environments, parallel):
...

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I agree, this is cleaner - done!


# If all the benchmarks can be skipped, no need to continue
for env in subenv:
if set(six.iterkeys(benchmarks)).difference(skipped_benchmarks[env.name]):
break
else:
log.info("No benchmarks to run for {0}".format(subenv_name))
continue

log.info("Building for {0}".format(subenv_name))

with log.indent():
args = [(env, conf, repo, commit_hash) for env in subenv]
Expand All @@ -274,7 +282,7 @@ def run(cls, conf, range_spec=None, steps=None, bench=None, parallel=1,
if success:
results = benchmarks.run_benchmarks(
env, show_stderr=show_stderr, quick=quick,
profile=profile, skip=skipped_benchmarks)
profile=profile, skip=skipped_benchmarks[env.name])
else:
results = benchmarks.skip_benchmarks(env)

Expand Down
6 changes: 5 additions & 1 deletion asv/results.py
Original file line number Diff line number Diff line change
Expand Up @@ -301,7 +301,7 @@ def get_result_value(self, key, params):
Returns
-------
value : {float, list of float}
Benchmark result value. If the benchmark is parameterized, return
Benchmark result value. If the benchmark is parameterized, return
a list of values.
"""
return _compatible_results(self._results[key],
Expand Down Expand Up @@ -580,3 +580,7 @@ def rm(self, result_dir):
@classmethod
def update(cls, path):
util.update_json(cls, path, cls.api_version)

@property
def env_name(self):
return self._env_name