-
-
Notifications
You must be signed in to change notification settings - Fork 2.7k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
refactor mark evaluators #2773
refactor mark evaluators #2773
Changes from all commits
667e70f
a336509
9ad2b75
8480075
e3b7368
459cc40
8a6bdb2
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -60,22 +60,31 @@ def nop(*args, **kwargs): | |
) | ||
|
||
|
||
class MarkEvaluator: | ||
class MarkEvaluator(object): | ||
def __init__(self, item, name): | ||
self.item = item | ||
self.name = name | ||
|
||
@property | ||
def holder(self): | ||
return self.item.keywords.get(self.name) | ||
self._marks = None | ||
self._mark = None | ||
self._mark_name = name | ||
|
||
def __bool__(self): | ||
return bool(self.holder) | ||
self._marks = self._get_marks() | ||
return bool(self._marks) | ||
__nonzero__ = __bool__ | ||
|
||
def wasvalid(self): | ||
return not hasattr(self, 'exc') | ||
|
||
def _get_marks(self): | ||
|
||
keyword = self.item.keywords.get(self._mark_name) | ||
if isinstance(keyword, MarkDecorator): | ||
return [keyword.mark] | ||
elif isinstance(keyword, MarkInfo): | ||
return [x.combined for x in keyword] | ||
else: | ||
return [] | ||
|
||
def invalidraise(self, exc): | ||
raises = self.get('raises') | ||
if not raises: | ||
|
@@ -95,7 +104,7 @@ def istrue(self): | |
fail("Error evaluating %r expression\n" | ||
" %s\n" | ||
"%s" | ||
% (self.name, self.expr, "\n".join(msg)), | ||
% (self._mark_name, self.expr, "\n".join(msg)), | ||
pytrace=False) | ||
|
||
def _getglobals(self): | ||
|
@@ -107,40 +116,45 @@ def _getglobals(self): | |
def _istrue(self): | ||
if hasattr(self, 'result'): | ||
return self.result | ||
if self.holder: | ||
if self.holder.args or 'condition' in self.holder.kwargs: | ||
self.result = False | ||
# "holder" might be a MarkInfo or a MarkDecorator; only | ||
# MarkInfo keeps track of all parameters it received in an | ||
# _arglist attribute | ||
marks = getattr(self.holder, '_marks', None) \ | ||
or [self.holder.mark] | ||
for _, args, kwargs in marks: | ||
if 'condition' in kwargs: | ||
args = (kwargs['condition'],) | ||
for expr in args: | ||
self._marks = self._get_marks() | ||
|
||
if self._marks: | ||
self.result = False | ||
for mark in self._marks: | ||
self._mark = mark | ||
if 'condition' in mark.kwargs: | ||
args = (mark.kwargs['condition'],) | ||
else: | ||
args = mark.args | ||
|
||
for expr in args: | ||
self.expr = expr | ||
if isinstance(expr, six.string_types): | ||
d = self._getglobals() | ||
result = cached_eval(self.item.config, expr, d) | ||
else: | ||
if "reason" not in mark.kwargs: | ||
# XXX better be checked at collection time | ||
msg = "you need to specify reason=STRING " \ | ||
"when using booleans as conditions." | ||
fail(msg) | ||
result = bool(expr) | ||
if result: | ||
self.result = True | ||
self.reason = mark.kwargs.get('reason', None) | ||
self.expr = expr | ||
if isinstance(expr, six.string_types): | ||
d = self._getglobals() | ||
result = cached_eval(self.item.config, expr, d) | ||
else: | ||
if "reason" not in kwargs: | ||
# XXX better be checked at collection time | ||
msg = "you need to specify reason=STRING " \ | ||
"when using booleans as conditions." | ||
fail(msg) | ||
result = bool(expr) | ||
if result: | ||
self.result = True | ||
self.reason = kwargs.get('reason', None) | ||
self.expr = expr | ||
return self.result | ||
else: | ||
self.result = True | ||
return getattr(self, 'result', False) | ||
return self.result | ||
|
||
if not args: | ||
self.result = True | ||
self.reason = mark.kwargs.get('reason', None) | ||
return self.result | ||
return False | ||
|
||
def get(self, attr, default=None): | ||
return self.holder.kwargs.get(attr, default) | ||
if self._mark is None: | ||
return default | ||
return self._mark.kwargs.get(attr, default) | ||
|
||
def getexplanation(self): | ||
expl = getattr(self, 'reason', None) or self.get('reason', None) | ||
|
@@ -155,17 +169,17 @@ def getexplanation(self): | |
@hookimpl(tryfirst=True) | ||
def pytest_runtest_setup(item): | ||
# Check if skip or skipif are specified as pytest marks | ||
|
||
item._skipped_by_mark = False | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Should we make this part of the public API? Often users want to know if a test has been skipped in some hook, and the only way to do it currently is using the awkward: was_skipped = hasattr(item, "_evalskip") and item._evalskip.istrue() We might as well make it There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. While on that, it might also be useful to have the reason attached tn it. If so, perhaps we should use a frozen item.skip_outcome.skipped == True
item.skip_outcome.reason == "skipped for some reason" There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. im not going to introduce a new api in a refactoring/fixup attempt, There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I understand the sentiment, but by renaming it to Just pointing out we will bring a lot of pain to those people. Perhaps we can brainstorm a little on how the "skip" plugin decorates items with skip information? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. its a private attribute and stays a private attribute There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. OK then, I respect your decision |
||
skipif_info = item.keywords.get('skipif') | ||
if isinstance(skipif_info, (MarkInfo, MarkDecorator)): | ||
eval_skipif = MarkEvaluator(item, 'skipif') | ||
if eval_skipif.istrue(): | ||
item._evalskip = eval_skipif | ||
item._skipped_by_mark = True | ||
skip(eval_skipif.getexplanation()) | ||
|
||
skip_info = item.keywords.get('skip') | ||
if isinstance(skip_info, (MarkInfo, MarkDecorator)): | ||
item._evalskip = True | ||
item._skipped_by_mark = True | ||
if 'reason' in skip_info.kwargs: | ||
skip(skip_info.kwargs['reason']) | ||
elif skip_info.args: | ||
|
@@ -212,7 +226,6 @@ def pytest_runtest_makereport(item, call): | |
outcome = yield | ||
rep = outcome.get_result() | ||
evalxfail = getattr(item, '_evalxfail', None) | ||
evalskip = getattr(item, '_evalskip', None) | ||
# unitttest special case, see setting of _unexpectedsuccess | ||
if hasattr(item, '_unexpectedsuccess') and rep.when == "call": | ||
from _pytest.compat import _is_unittest_unexpected_success_a_failure | ||
|
@@ -248,7 +261,7 @@ def pytest_runtest_makereport(item, call): | |
else: | ||
rep.outcome = "passed" | ||
rep.wasxfail = explanation | ||
elif evalskip is not None and rep.skipped and type(rep.longrepr) is tuple: | ||
elif item._skipped_by_mark and rep.skipped and type(rep.longrepr) is tuple: | ||
# skipped by mark.skipif; change the location of the failure | ||
# to point to the item definition, otherwise it will display | ||
# the location of where the skip exception was raised within pytest | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1 @@ | ||
Remove the internal multi-typed attribute ``Node._evalskip`` and replace it with the boolean ``Node._skipped_by_mark``. |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1 @@ | ||
* remove unnecessary mark evaluator in unittest plugin |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Shouldn't this loop over all marks trying to get the
attr
(which is really akwarg
actually), returningdefault
if no marks have it?Can we rename it to
find_kwarg
instead for clarity btw? To actually understand what this does I had to add anassert 0
and run the tests to figure out where it is called (because you can't actually do a search for "get").There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
To exemplify:
With this we can even get rid of
self._mark
.There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
if one mark has run=True, and another one doesnt, we get wrong results, welcome to why marks are so horrific
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I see thanks for the explanation.
In that case, I suggest we change this to return all the kwarg values, and then the caller has responsibility to decide what to do with them.
I don't like how implicit this is. If you don't like this suggestion, at least I would rename
self._mark
to something clear albeit ugly likeself._first_true_mark
or something.There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
well, it started out as something much worse, i#ll try to whip it in shape more
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
And about this suggestion @RonnyPfannschmidt?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
@nicoddemus that breaks current usage inside pytest at least
so in order to go there i'd prefer to keep that until we iterate it again