Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
63 changes: 51 additions & 12 deletions src/pytest_codspeed/config.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,14 @@
from __future__ import annotations

from dataclasses import dataclass
from typing import TYPE_CHECKING
import dataclasses
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Generic, TypeVar

T = TypeVar("T")

if TYPE_CHECKING:
from typing import Any, Callable

import pytest


Expand Down Expand Up @@ -64,17 +69,51 @@ def from_pytest_item(cls, item: pytest.Item) -> BenchmarkMarkerOptions:
raise ValueError(
"Positional arguments are not allowed in the benchmark marker"
)
kwargs = marker.kwargs

options = cls(
group=marker.kwargs.pop("group", None),
min_time=marker.kwargs.pop("min_time", None),
max_time=marker.kwargs.pop("max_time", None),
max_rounds=marker.kwargs.pop("max_rounds", None),
)

if len(marker.kwargs) > 0:
unknown_kwargs = set(kwargs.keys()) - {
field.name for field in dataclasses.fields(cls)
}
if unknown_kwargs:
raise ValueError(
"Unknown kwargs passed to benchmark marker: "
+ ", ".join(marker.kwargs.keys())
+ ", ".join(sorted(unknown_kwargs))
)
return options

return cls(**kwargs)


@dataclass(frozen=True)
class PedanticOptions(Generic[T]):
"""Parameters for running a benchmark using the pedantic fixture API."""

target: Callable[..., T]
setup: Callable[[], Any | None] | None
teardown: Callable[..., Any | None] | None
rounds: int
warmup_rounds: int
iterations: int
args: tuple[Any, ...] = field(default_factory=tuple)
kwargs: dict[str, Any] = field(default_factory=dict)

def __post_init__(self) -> None:
if self.rounds < 0:
raise ValueError("rounds must be positive")
if self.warmup_rounds < 0:
raise ValueError("warmup_rounds must be non-negative")
if self.iterations <= 0:
raise ValueError("iterations must be positive")
if self.iterations > 1 and self.setup is not None:
raise ValueError(
"setup cannot be used with multiple iterations, use multiple rounds"
)

def setup_and_get_args_kwargs(self) -> tuple[tuple[Any, ...], dict[str, Any]]:
if self.setup is None:
return self.args, self.kwargs
maybe_result = self.setup(*self.args, **self.kwargs)
if maybe_result is not None:
if len(self.args) > 0 or len(self.kwargs) > 0:
raise ValueError("setup cannot return a value when args are provided")
return maybe_result
return self.args, self.kwargs
20 changes: 14 additions & 6 deletions src/pytest_codspeed/instruments/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,15 +5,14 @@
from typing import TYPE_CHECKING

if TYPE_CHECKING:
from typing import Any, Callable, ClassVar, ParamSpec, TypeVar
from typing import Any, Callable, ClassVar, TypeVar

import pytest

from pytest_codspeed.config import BenchmarkMarkerOptions
from pytest_codspeed.config import BenchmarkMarkerOptions, PedanticOptions
from pytest_codspeed.plugin import CodSpeedConfig

T = TypeVar("T")
P = ParamSpec("P")


class Instrument(metaclass=ABCMeta):
Expand All @@ -31,9 +30,18 @@ def measure(
marker_options: BenchmarkMarkerOptions,
name: str,
uri: str,
fn: Callable[P, T],
*args: P.args,
**kwargs: P.kwargs,
fn: Callable[..., T],
*args: tuple,
**kwargs: dict[str, Any],
) -> T: ...

@abstractmethod
def measure_pedantic(
self,
marker_options: BenchmarkMarkerOptions,
pedantic_options: PedanticOptions[T],
name: str,
uri: str,
) -> T: ...

@abstractmethod
Expand Down
56 changes: 52 additions & 4 deletions src/pytest_codspeed/instruments/valgrind.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
from __future__ import annotations

import sys
import warnings
from typing import TYPE_CHECKING

from pytest_codspeed import __semver_version__
Expand All @@ -12,7 +13,8 @@

from pytest import Session

from pytest_codspeed.instruments import P, T
from pytest_codspeed.config import PedanticOptions
from pytest_codspeed.instruments import T
from pytest_codspeed.plugin import BenchmarkMarkerOptions, CodSpeedConfig

SUPPORTS_PERF_TRAMPOLINE = sys.version_info >= (3, 12)
Expand Down Expand Up @@ -52,9 +54,9 @@ def measure(
marker_options: BenchmarkMarkerOptions,
name: str,
uri: str,
fn: Callable[P, T],
*args: P.args,
**kwargs: P.kwargs,
fn: Callable[..., T],
*args: tuple,
**kwargs: dict[str, Any],
) -> T:
self.benchmark_count += 1

Expand All @@ -78,8 +80,54 @@ def __codspeed_root_frame__() -> T:
self.instrument_hooks.lib.callgrind_stop_instrumentation()
self.instrument_hooks.set_executed_benchmark(uri)

def measure_pedantic(
self,
marker_options: BenchmarkMarkerOptions,
pedantic_options: PedanticOptions[T],
name: str,
uri: str,
) -> T:
if pedantic_options.rounds != 1 or pedantic_options.iterations != 1:
warnings.warn(
"Valgrind instrument ignores rounds and iterations settings "
"in pedantic mode"
)
if not self.instrument_hooks:
args, kwargs = pedantic_options.setup_and_get_args_kwargs()
out = pedantic_options.target(*args, **kwargs)
if pedantic_options.teardown is not None:
pedantic_options.teardown(*args, **kwargs)
return out

def __codspeed_root_frame__(*args, **kwargs) -> T:
return pedantic_options.target(*args, **kwargs)

# Warmup
warmup_rounds = max(
pedantic_options.warmup_rounds, 1 if SUPPORTS_PERF_TRAMPOLINE else 0
)
for _ in range(warmup_rounds):
args, kwargs = pedantic_options.setup_and_get_args_kwargs()
__codspeed_root_frame__(*args, **kwargs)
if pedantic_options.teardown is not None:
pedantic_options.teardown(*args, **kwargs)

# Compute the actual result of the function
args, kwargs = pedantic_options.setup_and_get_args_kwargs()
self.instrument_hooks.lib.callgrind_start_instrumentation()
try:
out = __codspeed_root_frame__(*args, **kwargs)
finally:
self.instrument_hooks.lib.callgrind_stop_instrumentation()
self.instrument_hooks.set_executed_benchmark(uri)
if pedantic_options.teardown is not None:
pedantic_options.teardown(*args, **kwargs)

return out

def report(self, session: Session) -> None:
reporter = session.config.pluginmanager.get_plugin("terminalreporter")
assert reporter is not None, "terminalreporter not found"
count_suffix = "benchmarked" if self.should_measure else "benchmark tested"
reporter.write_sep(
"=",
Expand Down
Loading
Loading