diff --git a/_pytest/skipping.py b/_pytest/skipping.py index ef9f601ca2e..11ae2d03df4 100644 --- a/_pytest/skipping.py +++ b/_pytest/skipping.py @@ -60,22 +60,31 @@ def nop(*args, **kwargs): ) -class MarkEvaluator: +class MarkEvaluator(object): def __init__(self, item, name): self.item = item - self.name = name - - @property - def holder(self): - return self.item.keywords.get(self.name) + self._marks = None + self._mark = None + self._mark_name = name def __bool__(self): - return bool(self.holder) + self._marks = self._get_marks() + return bool(self._marks) __nonzero__ = __bool__ def wasvalid(self): return not hasattr(self, 'exc') + def _get_marks(self): + + keyword = self.item.keywords.get(self._mark_name) + if isinstance(keyword, MarkDecorator): + return [keyword.mark] + elif isinstance(keyword, MarkInfo): + return [x.combined for x in keyword] + else: + return [] + def invalidraise(self, exc): raises = self.get('raises') if not raises: @@ -95,7 +104,7 @@ def istrue(self): fail("Error evaluating %r expression\n" " %s\n" "%s" - % (self.name, self.expr, "\n".join(msg)), + % (self._mark_name, self.expr, "\n".join(msg)), pytrace=False) def _getglobals(self): @@ -107,40 +116,45 @@ def _getglobals(self): def _istrue(self): if hasattr(self, 'result'): return self.result - if self.holder: - if self.holder.args or 'condition' in self.holder.kwargs: - self.result = False - # "holder" might be a MarkInfo or a MarkDecorator; only - # MarkInfo keeps track of all parameters it received in an - # _arglist attribute - marks = getattr(self.holder, '_marks', None) \ - or [self.holder.mark] - for _, args, kwargs in marks: - if 'condition' in kwargs: - args = (kwargs['condition'],) - for expr in args: + self._marks = self._get_marks() + + if self._marks: + self.result = False + for mark in self._marks: + self._mark = mark + if 'condition' in mark.kwargs: + args = (mark.kwargs['condition'],) + else: + args = mark.args + + for expr in args: + self.expr = expr + if isinstance(expr, six.string_types): + d = self._getglobals() + result = cached_eval(self.item.config, expr, d) + else: + if "reason" not in mark.kwargs: + # XXX better be checked at collection time + msg = "you need to specify reason=STRING " \ + "when using booleans as conditions." + fail(msg) + result = bool(expr) + if result: + self.result = True + self.reason = mark.kwargs.get('reason', None) self.expr = expr - if isinstance(expr, six.string_types): - d = self._getglobals() - result = cached_eval(self.item.config, expr, d) - else: - if "reason" not in kwargs: - # XXX better be checked at collection time - msg = "you need to specify reason=STRING " \ - "when using booleans as conditions." - fail(msg) - result = bool(expr) - if result: - self.result = True - self.reason = kwargs.get('reason', None) - self.expr = expr - return self.result - else: - self.result = True - return getattr(self, 'result', False) + return self.result + + if not args: + self.result = True + self.reason = mark.kwargs.get('reason', None) + return self.result + return False def get(self, attr, default=None): - return self.holder.kwargs.get(attr, default) + if self._mark is None: + return default + return self._mark.kwargs.get(attr, default) def getexplanation(self): expl = getattr(self, 'reason', None) or self.get('reason', None) @@ -155,17 +169,17 @@ def getexplanation(self): @hookimpl(tryfirst=True) def pytest_runtest_setup(item): # Check if skip or skipif are specified as pytest marks - + item._skipped_by_mark = False skipif_info = item.keywords.get('skipif') if isinstance(skipif_info, (MarkInfo, MarkDecorator)): eval_skipif = MarkEvaluator(item, 'skipif') if eval_skipif.istrue(): - item._evalskip = eval_skipif + item._skipped_by_mark = True skip(eval_skipif.getexplanation()) skip_info = item.keywords.get('skip') if isinstance(skip_info, (MarkInfo, MarkDecorator)): - item._evalskip = True + item._skipped_by_mark = True if 'reason' in skip_info.kwargs: skip(skip_info.kwargs['reason']) elif skip_info.args: @@ -212,7 +226,6 @@ def pytest_runtest_makereport(item, call): outcome = yield rep = outcome.get_result() evalxfail = getattr(item, '_evalxfail', None) - evalskip = getattr(item, '_evalskip', None) # unitttest special case, see setting of _unexpectedsuccess if hasattr(item, '_unexpectedsuccess') and rep.when == "call": from _pytest.compat import _is_unittest_unexpected_success_a_failure @@ -248,7 +261,7 @@ def pytest_runtest_makereport(item, call): else: rep.outcome = "passed" rep.wasxfail = explanation - elif evalskip is not None and rep.skipped and type(rep.longrepr) is tuple: + elif item._skipped_by_mark and rep.skipped and type(rep.longrepr) is tuple: # skipped by mark.skipif; change the location of the failure # to point to the item definition, otherwise it will display # the location of where the skip exception was raised within pytest diff --git a/_pytest/unittest.py b/_pytest/unittest.py index 585f8147298..7842d16580e 100644 --- a/_pytest/unittest.py +++ b/_pytest/unittest.py @@ -9,7 +9,6 @@ from _pytest.config import hookimpl from _pytest.outcomes import fail, skip, xfail from _pytest.python import transfer_markers, Class, Module, Function -from _pytest.skipping import MarkEvaluator def pytest_pycollect_makeitem(collector, name, obj): @@ -134,8 +133,7 @@ def addSkip(self, testcase, reason): try: skip(reason) except skip.Exception: - self._evalskip = MarkEvaluator(self, 'SkipTest') - self._evalskip.result = True + self._skipped_by_mark = True self._addexcinfo(sys.exc_info()) def addExpectedFailure(self, testcase, rawexcinfo, reason=""): diff --git a/changelog/2767.removal b/changelog/2767.removal new file mode 100644 index 00000000000..b9c3984cdbb --- /dev/null +++ b/changelog/2767.removal @@ -0,0 +1 @@ +Remove the internal multi-typed attribute ``Node._evalskip`` and replace it with the boolean ``Node._skipped_by_mark``. \ No newline at end of file diff --git a/changelog/2767.trivial b/changelog/2767.trivial new file mode 100644 index 00000000000..c42a06e07dc --- /dev/null +++ b/changelog/2767.trivial @@ -0,0 +1 @@ +* remove unnecessary mark evaluator in unittest plugin \ No newline at end of file