Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ Mainnet:
Frontier: 0
Homestead: 1150000
DAOFork: 1920000
Tangerine: 2463000
TangerineWhistle: 2463000
SpuriousDragon: 2675000
Byzantium: 4370000
Constantinople: 7280000
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -293,7 +293,7 @@
Frontier: 0
Homestead: 1150000
DAOFork: 1920000
Tangerine: 2463000
TangerineWhistle: 2463000
SpuriousDragon: 2675000
Byzantium: 4370000
Constantinople: 7280000
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1208,3 +1208,54 @@ def parametrize_fork(
metafunc.parametrize(
param_names, param_values, scope="function", indirect=indirect
)


def pytest_collection_modifyitems(
config: pytest.Config, items: List[pytest.Item]
) -> None:
"""
Filter tests based on param-level validity markers.

The pytest_generate_tests hook only considers function-level validity markers.
This hook runs after parametrization and can access all markers including
param-level ones, allowing us to properly filter tests based on param-level
valid_from/valid_until markers.
"""
items_to_remove = []

for i, item in enumerate(items):
# Get fork from params if available
params = None
if hasattr(item, "callspec"):
params = item.callspec.params
elif hasattr(item, "params"):
params = item.params

if not params or "fork" not in params or params["fork"] is None:
continue

fork: Fork = params["fork"]

# Get all markers including param-level ones
markers = item.iter_markers()

# Calculate valid fork set from all markers
# If this raises (e.g., duplicate markers from combining function-level
# and param-level), exit immediately with error
try:
valid_fork_set = ValidityMarker.get_test_fork_set_from_markers(
markers
)
except Exception as e:
pytest.exit(
f"Error in test '{item.name}': {e}",
returncode=pytest.ExitCode.USAGE_ERROR,
)

# If the fork is not in the valid set, mark for removal
if fork not in valid_fork_set:
items_to_remove.append(i)

# Remove items in reverse order to maintain indices
for i in reversed(items_to_remove):
del items[i]
Original file line number Diff line number Diff line change
Expand Up @@ -236,3 +236,73 @@ def test_invalid_validity_markers(
errors=1,
)
assert error_string in "\n".join(result.stdout.lines)


# --- Tests for param-level marker errors --- #


param_level_marker_error_test_cases = (
(
"param_level_valid_from_with_function_level_valid_from",
(
"""
import pytest
@pytest.mark.parametrize(
"value",
[
pytest.param(True, marks=pytest.mark.valid_from("Paris")),
],
)
@pytest.mark.valid_from("Berlin")
def test_case(state_test, value):
assert 1
""",
"Too many 'valid_from' markers applied to test",
),
),
(
"param_level_valid_until_with_function_level_valid_until",
(
"""
import pytest
@pytest.mark.parametrize(
"value",
[
pytest.param(True, marks=pytest.mark.valid_until("Cancun")),
],
)
@pytest.mark.valid_until("Prague")
def test_case(state_test, value):
assert 1
""",
"Too many 'valid_until' markers applied to test",
),
),
)


@pytest.mark.parametrize(
"test_function, error_string",
[test_case for _, test_case in param_level_marker_error_test_cases],
ids=[test_id for test_id, _ in param_level_marker_error_test_cases],
)
def test_param_level_marker_errors(
pytester: pytest.Pytester, error_string: str, test_function: str
) -> None:
"""
Test that combining function-level and param-level validity markers
of the same type produces an error.

Unlike function-level errors (caught during test generation), param-level
errors are caught during collection and cause pytest to exit immediately.
"""
pytester.makepyfile(test_function)
pytester.copy_example(
name="src/execution_testing/cli/pytest_commands/pytest_ini_files/pytest-fill.ini"
)
result = pytester.runpytest("-c", "pytest-fill.ini")

# pytest.exit() causes the run to terminate with no test outcomes
assert result.ret != 0, "Expected non-zero exit code"
stdout = "\n".join(result.stdout.lines)
assert error_string in stdout, f"Expected '{error_string}' in output"
Original file line number Diff line number Diff line change
Expand Up @@ -41,11 +41,14 @@ def test_all_forks({StateTest.pytest_parameter_name()}):
forks_under_test = forks_from_until(all_forks[0], all_forks[-1])
expected_skipped = 2 # eels doesn't support Constantinople
expected_passed = (
len(forks_under_test) * len(StateTest.supported_fixture_formats)
len([f for f in forks_under_test if not f.ignore()])
* len(StateTest.supported_fixture_formats)
- expected_skipped
)
stdout = "\n".join(result.stdout.lines)
for test_fork in forks_under_test:
if test_fork.ignore():
continue
for fixture_format in StateTest.supported_fixture_formats:
if isinstance(fixture_format, LabeledFixtureFormat):
fixture_format_label = fixture_format.label
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -222,3 +222,182 @@ def test_fork_markers(
*pytest_args,
)
result.assert_outcomes(**outcomes)


# --- Tests for param-level validity markers --- #


def generate_param_level_marker_test() -> str:
"""Generate a test function with param-level fork validity markers."""
return """
import pytest

@pytest.mark.parametrize(
"value",
[
pytest.param(
True,
id="from_tangerine",
marks=pytest.mark.valid_from("TangerineWhistle"),
),
pytest.param(
False,
id="from_paris",
marks=pytest.mark.valid_from("Paris"),
),
],
)
@pytest.mark.state_test_only
def test_param_level_valid_from(state_test, value):
pass
"""


def generate_param_level_valid_until_test() -> str:
"""Generate a test function with param-level valid_until markers."""
return """
import pytest

@pytest.mark.parametrize(
"value",
[
pytest.param(
True,
id="until_cancun",
marks=pytest.mark.valid_until("Cancun"),
),
pytest.param(
False,
id="until_paris",
marks=pytest.mark.valid_until("Paris"),
),
],
)
@pytest.mark.state_test_only
def test_param_level_valid_until(state_test, value):
pass
"""


def generate_param_level_mixed_test() -> str:
"""Generate a test with both function-level and param-level markers."""
return """
import pytest

@pytest.mark.parametrize(
"value",
[
pytest.param(
True,
id="all_forks",
marks=pytest.mark.valid_from("TangerineWhistle"),
),
pytest.param(
False,
id="paris_only",
marks=pytest.mark.valid_from("Paris"),
),
],
)
@pytest.mark.valid_until("Cancun")
@pytest.mark.state_test_only
def test_mixed_function_and_param_markers(state_test, value):
pass
"""


@pytest.mark.parametrize(
"test_function,pytest_args,outcomes",
[
pytest.param(
generate_param_level_marker_test(),
["--from=Paris", "--until=Cancun"],
# from_tangerine: Paris, Shanghai, Cancun = 3 forks
# from_paris: Paris, Shanghai, Cancun = 3 forks
# Total: 6 tests
{"passed": 6, "failed": 0, "skipped": 0, "errors": 0},
id="param_level_valid_from_paris_to_cancun",
),
pytest.param(
generate_param_level_marker_test(),
["--from=Berlin", "--until=Shanghai"],
# from_tangerine: Berlin, London, Paris, Shanghai = 4 forks
# from_paris: Paris, Shanghai = 2 forks
# Total: 6 tests
{"passed": 6, "failed": 0, "skipped": 0, "errors": 0},
id="param_level_valid_from_berlin_to_shanghai",
),
pytest.param(
generate_param_level_marker_test(),
["--from=Berlin", "--until=London"],
# from_tangerine: Berlin, London = 2 forks
# from_paris: none (Paris > London)
# Total: 2 tests
{"passed": 2, "failed": 0, "skipped": 0, "errors": 0},
id="param_level_valid_from_berlin_to_london",
),
pytest.param(
generate_param_level_valid_until_test(),
["--from=Paris", "--until=Prague"],
# until_cancun: Paris, Shanghai, Cancun = 3 forks
# until_paris: Paris = 1 fork
# Total: 4 tests
{"passed": 4, "failed": 0, "skipped": 0, "errors": 0},
id="param_level_valid_until_paris_to_prague",
),
pytest.param(
generate_param_level_valid_until_test(),
["--from=Shanghai", "--until=Prague"],
# until_cancun: Shanghai, Cancun = 2 forks
# until_paris: none (Shanghai > Paris)
# Total: 2 tests
{"passed": 2, "failed": 0, "skipped": 0, "errors": 0},
id="param_level_valid_until_shanghai_to_prague",
),
pytest.param(
generate_param_level_mixed_test(),
["--from=Berlin", "--until=Prague"],
# Function marker: valid_until("Cancun") limits to <= Cancun
# all_forks (TangerineWhistle): Berlin, London, Paris, Shanghai, Cancun = 5
# paris_only: Paris, Shanghai, Cancun = 3
# Total: 8 tests
{"passed": 8, "failed": 0, "skipped": 0, "errors": 0},
id="mixed_markers_berlin_to_prague",
),
pytest.param(
generate_param_level_mixed_test(),
["--from=Paris", "--until=Shanghai"],
# Function marker: valid_until("Cancun") limits to <= Cancun
# Command line: --until=Shanghai further limits to <= Shanghai
# all_forks: Paris, Shanghai = 2 forks
# paris_only: Paris, Shanghai = 2 forks
# Total: 4 tests
{"passed": 4, "failed": 0, "skipped": 0, "errors": 0},
id="mixed_markers_paris_to_shanghai",
),
],
)
def test_param_level_validity_markers(
pytester: pytest.Pytester,
test_function: str,
outcomes: dict,
pytest_args: List[str],
) -> None:
"""
Test param-level validity markers (valid_from, valid_until on pytest.param).

The pytest_collection_modifyitems hook filters tests based on param-level
markers after parametrization, allowing different parameter values to have
different fork validity ranges.
"""
pytester.makepyfile(test_function)
pytester.copy_example(
name="src/execution_testing/cli/pytest_commands/pytest_ini_files/pytest-fill.ini"
)
result = pytester.runpytest(
"-c",
"pytest-fill.ini",
"-v",
*pytest_args,
)
result.assert_outcomes(**outcomes)
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,12 @@ class EvmOneTransitionTool(TransitionTool):
supports_opcode_count: ClassVar[bool] = True
supports_blob_params: ClassVar[bool] = True

# evmone uses space-separated fork names for some forks
fork_name_map: ClassVar[Dict[str, str]] = {
"TangerineWhistle": "Tangerine Whistle",
"SpuriousDragon": "Spurious Dragon",
}

def __init__(
self,
*,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -145,6 +145,7 @@ class TransitionTool(EthereumCLI):

supports_xdist: ClassVar[bool] = True
supports_blob_params: ClassVar[bool] = False
fork_name_map: ClassVar[Dict[str, str]] = {}

@abstractmethod
def __init__(
Expand Down Expand Up @@ -326,13 +327,19 @@ def _evaluate_filesystem(
}
output_paths["body"] = os.path.join("output", "txs.rlp")

# Get fork name and apply any tool-specific mapping
fork_name = (
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This fixed CI against EvmOne btw... for both forks that have a space in between. Even though I re-ignored them, I think this is a nice addition since it would just take turning the ignore on / off to turn these forks on again.

t8n_data.fork_name_if_supports_blob_params
if self.supports_blob_params
else t8n_data.fork_name
)
fork_name = self.fork_name_map.get(fork_name, fork_name)

# Construct args for evmone-t8n binary
args = [
str(self.binary),
"--state.fork",
t8n_data.fork_name_if_supports_blob_params
if self.supports_blob_params
else t8n_data.fork_name,
fork_name,
"--input.alloc",
input_paths["alloc"],
"--input.env",
Expand Down
Loading
Loading