Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Don't override pytest's default protocol #32

Merged
Merged
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -16,4 +16,5 @@ repos:
rev: v0.3.3
hooks:
- id: ruff
args: [--fix]
- id: ruff-format
1 change: 1 addition & 0 deletions src/pytest_codspeed/_wrapper/.gitignore
Original file line number Diff line number Diff line change
@@ -1 +1,2 @@
dist_callgrind_wrapper.*
build.lock
114 changes: 51 additions & 63 deletions src/pytest_codspeed/plugin.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
from __future__ import annotations

import functools
import gc
import os
import pkgutil
Expand All @@ -16,11 +17,12 @@
from ._wrapper import get_lib

if TYPE_CHECKING:
from typing import Any, Callable, TypeVar
from typing import Any, Callable, ParamSpec, TypeVar

from ._wrapper import LibType

T = TypeVar("T")
P = ParamSpec("P")

IS_PYTEST_BENCHMARK_INSTALLED = pkgutil.find_loader("pytest_benchmark") is not None
SUPPORTS_PERF_TRAMPOLINE = sys.version_info >= (3, 12)
Expand Down Expand Up @@ -172,86 +174,72 @@ def pytest_collection_modifyitems(

def _run_with_instrumentation(
lib: LibType,
nodeId: str,
nodeid: str,
config: pytest.Config,
fn: Callable[..., Any],
*args,
**kwargs,
):
fn: Callable[P, T],
*args: P.args,
**kwargs: P.kwargs,
) -> T:
is_gc_enabled = gc.isenabled()
if is_gc_enabled:
gc.collect()
gc.disable()

result = None

def __codspeed_root_frame__():
nonlocal result
result = fn(*args, **kwargs)

if SUPPORTS_PERF_TRAMPOLINE:
# Warmup CPython performance map cache
__codspeed_root_frame__()
lib.zero_stats()
lib.start_instrumentation()
__codspeed_root_frame__()
lib.stop_instrumentation()
uri = get_git_relative_uri(nodeId, config.rootpath)
lib.dump_stats_at(uri.encode("ascii"))
if is_gc_enabled:
gc.enable()
def __codspeed_root_frame__() -> T:
return fn(*args, **kwargs)

try:
if SUPPORTS_PERF_TRAMPOLINE:
# Warmup CPython performance map cache
__codspeed_root_frame__()

lib.zero_stats()
lib.start_instrumentation()
try:
return __codspeed_root_frame__()
finally:
# Ensure instrumentation is stopped even if the test failed
lib.stop_instrumentation()
uri = get_git_relative_uri(nodeid, config.rootpath)
lib.dump_stats_at(uri.encode("ascii"))
finally:
# Ensure GC is re-enabled even if the test failed
if is_gc_enabled:
gc.enable()


def wrap_runtest(
lib: LibType,
nodeid: str,
config: pytest.Config,
fn: Callable[P, T],
) -> Callable[P, T]:
@functools.wraps(fn)
def wrapped(*args: P.args, **kwargs: P.kwargs) -> T:
return _run_with_instrumentation(lib, nodeid, config, fn, *args, **kwargs)

return result
return wrapped


@pytest.hookimpl(tryfirst=True)
def pytest_runtest_protocol(item: pytest.Item, nextitem: pytest.Item | None):
plugin = get_plugin(item.config)
if not plugin.is_codspeed_enabled or not should_benchmark_item(item):
return (
None # Defer to the default test protocol since no benchmarking is needed
)
# Defer to the default test protocol since no benchmarking is needed
return None

if has_benchmark_fixture(item):
return None # Instrumentation is handled by the fixture
# Instrumentation is handled by the fixture
return None

plugin.benchmark_count += 1
if not plugin.should_measure:
return None # Benchmark counted but will be run in the default protocol

# Setup phase
reports = []
ihook = item.ihook
ihook.pytest_runtest_logstart(nodeid=item.nodeid, location=item.location)
setup_call = pytest.CallInfo.from_call(
lambda: ihook.pytest_runtest_setup(item=item, nextitem=nextitem), "setup"
)
setup_report = ihook.pytest_runtest_makereport(item=item, call=setup_call)
ihook.pytest_runtest_logreport(report=setup_report)
reports.append(setup_report)
# Run phase
if setup_report.passed and not item.config.getoption("setuponly"):
assert plugin.lib is not None
runtest_call = pytest.CallInfo.from_call(
lambda: _run_with_instrumentation(
plugin.lib, item.nodeid, item.config, item.runtest
),
"call",
)
runtest_report = ihook.pytest_runtest_makereport(item=item, call=runtest_call)
ihook.pytest_runtest_logreport(report=runtest_report)
reports.append(runtest_report)

# Teardown phase
teardown_call = pytest.CallInfo.from_call(
lambda: ihook.pytest_runtest_teardown(item=item, nextitem=nextitem), "teardown"
)
teardown_report = ihook.pytest_runtest_makereport(item=item, call=teardown_call)
ihook.pytest_runtest_logreport(report=teardown_report)
reports.append(teardown_report)
ihook.pytest_runtest_logfinish(nodeid=item.nodeid, location=item.location)
if not plugin.should_measure or not plugin.lib:
art049 marked this conversation as resolved.
Show resolved Hide resolved
# Benchmark counted but will be run in the default protocol
return None

return reports # Deny further protocol hooks execution
# Wrap runtest and defer to default protocol
item.runtest = wrap_runtest(plugin.lib, item.nodeid, item.config, item.runtest)
return None


class BenchmarkFixture:
Expand Down
6 changes: 6 additions & 0 deletions tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,3 +37,9 @@
skip_with_perf_trampoline = pytest.mark.skipif(
IS_PERF_TRAMPOLINE_SUPPORTED, reason="perf trampoline is supported"
)

IS_PYTEST_XDIST_INSTALLED = importlib.util.find_spec("pytest_xdist") is not None
skip_without_pytest_xdist = pytest.mark.skipif(
not IS_PYTEST_XDIST_INSTALLED,
reason="pytest_xdist not installed",
)
55 changes: 53 additions & 2 deletions tests/test_pytest_plugin.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
skip_with_pytest_benchmark,
skip_without_perf_trampoline,
skip_without_pytest_benchmark,
skip_without_pytest_xdist,
skip_without_valgrind,
)

Expand Down Expand Up @@ -289,12 +290,12 @@ def test_perf_maps_generation(pytester: pytest.Pytester, codspeed_env) -> None:

@pytest.mark.benchmark
def test_some_addition_marked():
return 1 + 1
assert 1 + 1

def test_some_addition_fixtured(benchmark):
@benchmark
def fixtured_child():
return 1 + 1
assert 1 + 1
"""
)
with codspeed_env():
Expand Down Expand Up @@ -324,6 +325,7 @@ def fixtured_child():

@skip_without_valgrind
@skip_with_pytest_benchmark
@skip_without_pytest_xdist
def test_pytest_xdist_concurrency_compatibility(
pytester: pytest.Pytester, codspeed_env
) -> None:
Expand All @@ -346,3 +348,52 @@ def test_my_stuff(benchmark, i):
result = pytester.runpytest("--codspeed", "-n", "128")
assert result.ret == 0, "the run should have succeeded"
result.stdout.fnmatch_lines(["*256 passed*"])


@skip_without_valgrind
def test_print(pytester: pytest.Pytester, codspeed_env) -> None:
"""Test print statements are captured by pytest (i.e., not printed to terminal in
the middle of the progress bar) and only displayed after test run (on failures)."""
pytester.makepyfile(
"""
import pytest, sys

@pytest.mark.benchmark
def test_print():
print("print to stdout")
print("print to stderr", file=sys.stderr)
"""
)
with codspeed_env():
result = pytester.runpytest("--codspeed")
assert result.ret == 0, "the run should have succeeded"
result.stdout.fnmatch_lines(["*1 benchmarked*"])
result.stdout.no_fnmatch_line("*print to stdout*")
result.stderr.no_fnmatch_line("*print to stderr*")


@skip_without_valgrind
def test_capsys(pytester: pytest.Pytester, codspeed_env) -> None:
"""Test print statements are captured by capsys (i.e., not printed to terminal in
the middle of the progress bar) and can be inspected within test."""
pytester.makepyfile(
"""
import pytest, sys

@pytest.mark.benchmark
def test_capsys(capsys):
print("print to stdout")
print("print to stderr", file=sys.stderr)

stdout, stderr = capsys.readouterr()

assert stdout == "print to stdout\\n"
assert stderr == "print to stderr\\n"
"""
)
with codspeed_env():
result = pytester.runpytest("--codspeed")
assert result.ret == 0, "the run should have succeeded"
result.stdout.fnmatch_lines(["*1 benchmarked*"])
result.stdout.no_fnmatch_line("*print to stdout*")
result.stderr.no_fnmatch_line("*print to stderr*")
Loading