Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 7 additions & 7 deletions .github/configs/feature.yaml
Original file line number Diff line number Diff line change
@@ -1,27 +1,27 @@
# Unless filling for special features, all features should fill for previous forks (starting from Frontier) too
stable:
evm-type: stable
fill-params: --until=Prague --fill-static-tests --ignore=tests/static/state_tests/stQuadraticComplexityTest
fill-params: --no-html --until=Prague --fill-static-tests --ignore=tests/static/state_tests/stQuadraticComplexityTest

develop:
evm-type: develop
fill-params: --until=BPO4 --fill-static-tests --ignore=tests/static/state_tests/stQuadraticComplexityTest
evm-type: develop
fill-params: --no-html --until=BPO4 --fill-static-tests --ignore=tests/static/state_tests/stQuadraticComplexityTest

benchmark:
evm-type: benchmark
fill-params: --fork=Prague --gas-benchmark-values 1,5,10,30,60,100,150 -m benchmark ./tests/benchmark
fill-params: --no-html --fork=Prague --gas-benchmark-values 1,5,10,30,60,100,150 -m benchmark ./tests/benchmark

benchmark_develop:
evm-type: benchmark
fill-params: --fork=Osaka --gas-benchmark-values 1,5,10,30,60,100,150 -m "benchmark" ./tests/benchmark
fill-params: --no-html --fork=Osaka --gas-benchmark-values 1,5,10,30,60,100,150 -m "benchmark" ./tests/benchmark
feature_only: true

benchmark_fast:
evm-type: benchmark
fill-params: --fork=Prague --gas-benchmark-values 100 -m "benchmark" ./tests/benchmark
fill-params: --no-html --fork=Prague --gas-benchmark-values 100 -m "benchmark" ./tests/benchmark
feature_only: true

bal:
evm-type: develop
fill-params: --fork=Amsterdam --fill-static-tests
fill-params: --no-html --fork=Amsterdam --fill-static-tests
feature_only: true
16 changes: 11 additions & 5 deletions packages/testing/src/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
from execution_testing.client_clis import (
BesuTransitionTool,
ExecutionSpecsTransitionTool,
GethTransitionTool,
TransitionTool,
)

Expand Down Expand Up @@ -49,11 +50,16 @@ def installed_transition_tool_instances() -> Generator[


@pytest.fixture(
params=INSTALLED_TRANSITION_TOOLS,
ids=[
transition_tool_class.__name__
for transition_tool_class in INSTALLED_TRANSITION_TOOLS
],
params=[
pytest.param(
transition_tool,
marks=[pytest.mark.xfail(reason="Geth t8n needs update")]
if transition_tool == GethTransitionTool
else [],
id=transition_tool.__name__,
)
for transition_tool in INSTALLED_TRANSITION_TOOLS
]
)
def installed_t8n(
request: pytest.FixtureRequest,
Expand Down
2 changes: 2 additions & 0 deletions packages/testing/src/execution_testing/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,7 @@
TestParameterGroup,
TestPhaseManager,
Transaction,
TransactionLog,
TransactionReceipt,
TransactionType,
Withdrawal,
Expand Down Expand Up @@ -186,6 +187,7 @@
"TestPrivateKey2",
"Transaction",
"TransactionException",
"TransactionLog",
"TransactionReceipt",
"TransactionTest",
"TransactionTestFiller",
Expand Down
22 changes: 18 additions & 4 deletions packages/testing/src/execution_testing/base_types/serialization.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,10 @@
"""Ethereum test types for serialization and encoding."""

from typing import Any, ClassVar, List
from typing import Any, ClassVar, List, Self, Sequence

import ethereum_rlp as eth_rlp
from ethereum_types.numeric import Uint
from trie import HexaryTrie

from execution_testing.base_types import Bytes

Expand Down Expand Up @@ -33,6 +34,7 @@ class RLPSerializable:
signable: ClassVar[bool] = False
rlp_fields: ClassVar[List[str]]
rlp_signing_fields: ClassVar[List[str]]
rlp_exclude_none: ClassVar[bool] = False

def get_rlp_fields(self) -> List[str]:
"""
Expand Down Expand Up @@ -102,9 +104,10 @@ def to_list_from_fields(self, fields: List[str]) -> List[Any]:
f'in object type "{self.__class__.__name__}"'
)
try:
values_list.append(
to_serializable_element(getattr(self, field))
)
value = getattr(self, field)
if self.rlp_exclude_none and value is None:
continue
values_list.append(to_serializable_element(value))
except Exception as e:
raise Exception(
f'Unable to rlp serialize field "{field}" '
Expand Down Expand Up @@ -151,6 +154,17 @@ def rlp(self) -> Bytes:
self.get_rlp_prefix() + eth_rlp.encode(self.to_list(signing=False))
)

@classmethod
def list_root(cls, element_list: Sequence[Self]) -> bytes:
"""Return the root of a list of the given type."""
t = HexaryTrie(db={})
for i, e in enumerate(element_list):
t.set(
eth_rlp.encode(Uint(i)),
e.rlp(),
)
return t.root_hash


class SignableRLPSerializable(RLPSerializable):
"""
Expand Down
74 changes: 74 additions & 0 deletions packages/testing/src/execution_testing/cli/gen_index.py
Original file line number Diff line number Diff line change
Expand Up @@ -226,5 +226,79 @@ def generate_fixtures_index(
f.write(index.model_dump_json(exclude_none=False, indent=2))


def merge_partial_indexes(output_dir: Path, quiet_mode: bool = False) -> None:
"""
Merge partial index files from all workers into final index.json.

This is called by pytest_sessionfinish on the master process after all
workers have finished and written their partial indexes.

Partial indexes use JSONL format (one JSON object per line) for efficient
append-only writes during fill. Entries are validated with Pydantic here.

Args:
output_dir: The fixture output directory.
quiet_mode: If True, don't print status messages.

"""
meta_dir = output_dir / ".meta"
partial_files = list(meta_dir.glob("partial_index*.jsonl"))

if not partial_files:
raise Exception("No partial indexes found.")

# Merge all partial indexes (JSONL format: one entry per line)
# Read as raw dicts — the data was already validated when collected
# from live Pydantic fixture objects in add_fixture().
all_raw_entries: list[dict] = []
all_forks: set = set()
all_formats: set = set()

for partial_file in partial_files:
with open(partial_file) as f:
for line in f:
line = line.strip()
if not line:
continue
entry_data = json.loads(line)
all_raw_entries.append(entry_data)
# Collect forks and formats from raw strings
if entry_data.get("fork"):
all_forks.add(entry_data["fork"])
if entry_data.get("format"):
all_formats.add(entry_data["format"])

# Compute root hash from raw dicts (no Pydantic needed)
root_hash = HashableItem.from_raw_entries(all_raw_entries).hash()

# Build final index — Pydantic validates the entire structure once
# via model_validate(), not 96k individual model_validate() calls.
index = IndexFile.model_validate(
{
"test_cases": all_raw_entries,
"root_hash": HexNumber(root_hash),
"created_at": datetime.datetime.now(),
"test_count": len(all_raw_entries),
"forks": list(all_forks),
"fixture_formats": list(all_formats),
}
)

# Write final index
index_path = meta_dir / "index.json"
index_path.parent.mkdir(parents=True, exist_ok=True)
index_path.write_text(index.model_dump_json(exclude_none=True, indent=2))

if not quiet_mode:
rich.print(
f"[green]Merged {len(partial_files)} partial indexes "
f"({len(all_raw_entries)} test cases) into {index_path}[/]"
)

# Cleanup partial files
for partial_file in partial_files:
partial_file.unlink()


if __name__ == "__main__":
generate_fixtures_index_cli()
105 changes: 104 additions & 1 deletion packages/testing/src/execution_testing/cli/hasher.py
Original file line number Diff line number Diff line change
@@ -1,17 +1,22 @@
"""Simple CLI tool to hash a directory of JSON fixtures."""

from __future__ import annotations

import hashlib
import json
import sys
from dataclasses import dataclass, field
from enum import IntEnum, auto
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, TypeVar
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, TypeVar

import click
from rich.console import Console
from rich.markup import escape as rich_escape

if TYPE_CHECKING:
from execution_testing.fixtures.consume import TestCaseIndexFile


class HashableItemType(IntEnum):
"""Represents the type of a hashable item."""
Expand Down Expand Up @@ -145,6 +150,104 @@ def from_folder(
items[file_path.name] = item
return cls(type=HashableItemType.FOLDER, items=items, parents=parents)

@classmethod
def from_index_entries(
cls, entries: List["TestCaseIndexFile"]
) -> "HashableItem":
"""
Create a hashable item tree from index entries (no file I/O).

This produces the same hash as from_folder() but uses pre-collected
fixture hashes instead of reading files from disk.

Optimized to O(n) using a trie-like structure built in a single pass,
avoiding repeated path operations and iterations.
"""
raw = [
{
"id": e.id,
"json_path": str(e.json_path),
"fixture_hash": str(e.fixture_hash)
if e.fixture_hash
else None,
}
for e in entries
]
return cls.from_raw_entries(raw)

@classmethod
def from_raw_entries(cls, entries: List[Dict]) -> "HashableItem":
"""
Create a hashable item tree from raw entry dicts (no file I/O).

Accepts dicts with "id", "json_path", and "fixture_hash" keys.
This avoids Pydantic overhead entirely — only plain string/int
operations are used to build the hash tree.

Produces the same hash as from_folder() and from_index_entries().
"""
# Build a trie where each node is either:
# - A dict (folder node) containing child nodes
# - A list of (test_id, hash_bytes) tuples (file node marker)
#
# Structure: {folder: {folder: {file.json: [(id, hash), ...]}}}
root_trie: dict = {}

# Single pass: insert all entries into trie
for entry in entries:
fixture_hash = entry.get("fixture_hash")
if not fixture_hash:
continue

# Navigate/create path to file node
path_parts = Path(entry["json_path"]).parts
current = root_trie

# Navigate to parent folder, creating nodes as needed
for part in path_parts[:-1]:
if part not in current:
current[part] = {}
current = current[part]

# Add test entry to file node
file_name = path_parts[-1]
if file_name not in current:
current[file_name] = []

# Convert hex string to 32-byte hash
hash_bytes = int(fixture_hash, 16).to_bytes(32, "big")
current[file_name].append((entry["id"], hash_bytes))

# Convert trie to HashableItem tree (single recursive pass)
def trie_to_hashable(node: dict) -> Dict[str, "HashableItem"]:
"""Convert a trie node to HashableItem dict."""
items: Dict[str, HashableItem] = {}

for name, child in node.items():
if isinstance(child, list):
# File node: child is list of (test_id, hash_bytes)
test_items = {
test_id: cls(
type=HashableItemType.TEST, root=hash_bytes
)
for test_id, hash_bytes in child
}
items[name] = cls(
type=HashableItemType.FILE, items=test_items
)
else:
# Folder node: recurse
items[name] = cls(
type=HashableItemType.FOLDER,
items=trie_to_hashable(child),
)

return items

return cls(
type=HashableItemType.FOLDER, items=trie_to_hashable(root_trie)
)


def render_hash_report(
folder: Path,
Expand Down
Loading
Loading