From 6ecfea65bf1ae3dbd81554b7856902b13943eaa3 Mon Sep 17 00:00:00 2001 From: protolambda Date: Thu, 31 Oct 2024 23:21:32 +0700 Subject: [PATCH] Interop: local devnet (#11590) * local interop devnet * interop-devnet: experimental op-geth docker image, connect with op-supervisor * interop-devnet: port and path fixes * interop-devnet: datadir fix * interop-local: more fixes * interop-devnet: connect op-supervisor to L2 EL nodes using RPC * more fixes * ops-bedrock: fix l2 op geth dockerfile for interop * interop-devnet: fix supervisor RPC add workaround * interop-devnet: implement review suggestions * fixes from run-testing * Add op-deployer to dockerignore exceptions * use latest geth rc * use RW Locks in Update Functions * add log for new cross-safe head * make updates much more frequent * use LocalDB for LastDerivedFrom * Add log message for finalization update * op-supervisor: fix db locking, fix crossdb usage * interop-devnet: use chain IDs as chain indices, since it's not translated everywhere yet * op-supervisor: cross-derived-from RPC method * Work Process ErrFuture to Debug Log --------- Co-authored-by: axelKingsley --- interop-devnet/create-chains.sh | 77 ++++ interop-devnet/depset.json | 14 + interop-devnet/docker-compose.yml | 354 ++++++++++++++++++ interop-devnet/justfile | 33 ++ op-chain-ops/devkeys/devkeys.go | 30 ++ op-node/cmd/interop/interop.go | 299 +++++++++++++++ op-node/cmd/main.go | 2 + op-node/flags/flags.go | 2 +- op-node/rollup/interop/interop.go | 6 +- op-service/ioutil/streams.go | 16 + op-service/jsonutil/json.go | 1 + op-service/locks/rwmap.go | 48 +++ op-service/locks/rwmap_test.go | 52 +++ op-service/locks/rwvalue.go | 24 ++ op-service/locks/rwvalue_test.go | 16 + op-service/sources/supervisor_client.go | 4 +- op-service/testutils/fake_interop_backend.go | 2 +- op-service/testutils/mock_interop_backend.go | 6 +- op-supervisor/supervisor/backend/backend.go | 4 +- .../backend/cross/safe_start_test.go | 8 +- .../supervisor/backend/cross/worker.go | 9 +- op-supervisor/supervisor/backend/db/db.go | 77 ++-- .../backend/db/fromda/update_test.go | 2 +- op-supervisor/supervisor/backend/db/query.go | 116 ++---- op-supervisor/supervisor/backend/db/update.go | 44 +-- op-supervisor/supervisor/backend/mock.go | 2 +- op-supervisor/supervisor/frontend/frontend.go | 6 +- op-supervisor/supervisor/service.go | 11 + ops-bedrock/l2-op-geth-interop.Dockerfile | 10 + .../op-stack-go/Dockerfile.dockerignore | 1 + 30 files changed, 1091 insertions(+), 185 deletions(-) create mode 100755 interop-devnet/create-chains.sh create mode 100644 interop-devnet/depset.json create mode 100644 interop-devnet/docker-compose.yml create mode 100644 interop-devnet/justfile create mode 100644 op-node/cmd/interop/interop.go create mode 100644 op-service/locks/rwmap.go create mode 100644 op-service/locks/rwmap_test.go create mode 100644 op-service/locks/rwvalue.go create mode 100644 op-service/locks/rwvalue_test.go create mode 100644 ops-bedrock/l2-op-geth-interop.Dockerfile diff --git a/interop-devnet/create-chains.sh b/interop-devnet/create-chains.sh new file mode 100755 index 000000000000..506682099154 --- /dev/null +++ b/interop-devnet/create-chains.sh @@ -0,0 +1,77 @@ +#!/bin/bash + +set -eu + +# Run this with workdir set as root of the repo +if [ -f "../versions.json" ]; then + echo "Running create-chains script." +else + echo "Cannot run create-chains script, must be in interop-devnet dir, but currently in:" + pwd + exit 1 +fi + +# Navigate to repository root +cd .. + +# Check if already created +if [ -d ".devnet-interop" ]; then + echo "Already created chains." + exit 1 +else + echo "Creating new interop devnet chain configs" +fi + +export OP_INTEROP_MNEMONIC="test test test test test test test test test test test junk" + +go run ./op-node/cmd interop dev-setup \ + --artifacts-dir=packages/contracts-bedrock/forge-artifacts \ + --foundry-dir=packages/contracts-bedrock \ + --l1.chainid=900100 \ + --l2.chainids=900200,900201 \ + --out-dir=".devnet-interop" \ + --log.format=logfmt \ + --log.level=info + +# create L1 CL genesis +eth2-testnet-genesis deneb \ + --config=./ops-bedrock/beacon-data/config.yaml \ + --preset-phase0=minimal \ + --preset-altair=minimal \ + --preset-bellatrix=minimal \ + --preset-capella=minimal \ + --preset-deneb=minimal \ + --eth1-config=.devnet-interop/genesis/l1/genesis.json \ + --state-output=.devnet-interop/genesis/l1/beaconstate.ssz \ + --tranches-dir=.devnet-interop/genesis/l1/tranches \ + --mnemonics=./ops-bedrock/mnemonics.yaml \ + --eth1-withdrawal-address=0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa \ + --eth1-match-genesis-time + +echo "Writing env files now..." + +# write env files for each L2 service + +chain_env=".devnet-interop/env/l2/900200" +mkdir -p "$chain_env" +key_cmd="go run ./op-node/cmd interop devkey secret --domain=chain-operator --chainid=900200" +# op-node +echo "OP_NODE_P2P_SEQUENCER_KEY=$($key_cmd --name=sequencer-p2p)" >> "$chain_env/op-node.env" +# proposer +echo "OP_PROPOSER_PRIVATE_KEY=$($key_cmd --name=proposer)" >> "$chain_env/op-proposer.env" +echo "OP_PROPOSER_GAME_FACTORY_ADDRESS=$(jq -r .DisputeGameFactoryProxy .devnet-interop/deployments/l2/900200/addresses.json)" >> "$chain_env/op-proposer.env" +# batcher +echo "OP_BATCHER_PRIVATE_KEY=$($key_cmd --name=batcher)" >> "$chain_env/op-batcher.env" + +chain_env=".devnet-interop/env/l2/900201" +mkdir -p "$chain_env" +key_cmd="go run ./op-node/cmd interop devkey secret --domain=chain-operator --chainid=900201" +# op-node +echo "OP_NODE_P2P_SEQUENCER_KEY=$($key_cmd --name=sequencer-p2p)" >> "$chain_env/op-node.env" +# proposer +echo "OP_PROPOSER_PRIVATE_KEY=$($key_cmd --name=proposer)" >> "$chain_env/op-proposer.env" +echo "OP_PROPOSER_GAME_FACTORY_ADDRESS=$(jq -r .DisputeGameFactoryProxy .devnet-interop/deployments/l2/900201/addresses.json)" >> "$chain_env/op-proposer.env" +# batcher +echo "OP_BATCHER_PRIVATE_KEY=$($key_cmd --name=batcher)" >> "$chain_env/op-batcher.env" + +echo "Interop devnet setup is complete!" diff --git a/interop-devnet/depset.json b/interop-devnet/depset.json new file mode 100644 index 000000000000..6f3600b1d296 --- /dev/null +++ b/interop-devnet/depset.json @@ -0,0 +1,14 @@ +{ + "dependencies": { + "900200": { + "chainIndex": "900200", + "activationTime": 0, + "historyMinTime": 0 + }, + "900201": { + "chainIndex": "900201", + "activationTime": 0, + "historyMinTime": 0 + } + } +} diff --git a/interop-devnet/docker-compose.yml b/interop-devnet/docker-compose.yml new file mode 100644 index 000000000000..97ddcbda655f --- /dev/null +++ b/interop-devnet/docker-compose.yml @@ -0,0 +1,354 @@ +# This Compose file is expected to be used with the devnet-up.sh script. +# The volumes below mount the configs generated by the script into each +# service. + +volumes: + l1_data: + l1_bn_data: + l1_vc_data: + l2_a_data: + safedb_a_data: + l2_b_data: + safedb_b_data: + supervisor_data: + op_log_a: + op_log_b: + +services: + + l1: + build: + context: ../ops-bedrock + dockerfile: l1-geth.Dockerfile + ports: + - "8545:8545" + - "8546:8546" + - "7060:6060" + volumes: + - "l1_data:/db" + - "${PWD}/../.devnet-interop/genesis/l1/genesis.json:/genesis.json" + - "${PWD}/../ops-bedrock/test-jwt-secret.txt:/config/jwt-secret.txt" + environment: + GETH_MINER_RECOMMIT: 100ms + + l1-bn: + depends_on: + - l1 + build: + context: ../ops-bedrock + dockerfile: l1-lighthouse.Dockerfile + ports: + - "9000:9000" + - "5052:5052" + volumes: + - "l1_bn_data:/db" + - "${PWD}/../ops-bedrock/test-jwt-secret.txt:/config/jwt-secret.txt" + - "${PWD}/../ops-bedrock/beacon-data/config.yaml:/genesis/config.yaml" + - "${PWD}/../ops-bedrock/beacon-data/deposit_contract_block.txt:/genesis/deposit_contract_block.txt" + - "${PWD}/../.devnet-interop/genesis/l1/beaconstate.ssz:/genesis/genesis.ssz" + environment: + LH_EXECUTION_ENDPOINT: "http://l1:8551" + entrypoint: + - "/bin/sh" + - "/entrypoint-bn.sh" + + l1-vc: + depends_on: + - l1 + - l1-bn + build: + context: ../ops-bedrock + dockerfile: l1-lighthouse.Dockerfile + volumes: + - "l1_vc_data:/db" + - "${PWD}/../ops-bedrock/beacon-data/data/keys:/validator_setup/validators" + - "${PWD}/../ops-bedrock/beacon-data/data/secrets:/validator_setup/secrets" + - "${PWD}/../ops-bedrock/beacon-data/config.yaml:/genesis/config.yaml" + - "${PWD}/../ops-bedrock/beacon-data/deposit_contract_block.txt:/genesis/deposit_contract_block.txt" + - "${PWD}/../.devnet-interop/genesis/l1/beaconstate.ssz:/genesis/genesis.ssz" + environment: + LH_BEACON_NODES: "http://l1-bn:5052/" + entrypoint: + - "/bin/sh" + - "/entrypoint-vc.sh" + + op-supervisor: + depends_on: + - l1 + build: + context: ../ + dockerfile: ops/docker/op-stack-go/Dockerfile + target: op-supervisor-target + ports: + - "9045:8545" + volumes: + - "supervisor_data:/db" + - "./depset.json:/depset.json" + image: us-docker.pkg.dev/oplabs-tools-artifacts/images/op-supervisor:devnet + command: > + op-supervisor + --datadir="/db" + --dependency-set="/depset.json" + --l2-rpcs="" + --rpc.addr="0.0.0.0" + --rpc.port=8545 + --rpc.enable-admin + --l2-rpcs="ws://l2-a:8546,ws://l2-b:8546" + + l2-a: + depends_on: + - op-supervisor + build: + context: ../ops-bedrock/ + dockerfile: l2-op-geth-interop.Dockerfile + ports: + - "9145:8545" + - "8160:6060" + volumes: + - "l2_a_data:/db" + - "${PWD}/../.devnet-interop/genesis/l2/900200/genesis.json:/genesis.json" + - "${PWD}/../ops-bedrock/test-jwt-secret.txt:/config/jwt-secret.txt" + entrypoint: # pass the L2 specific flags by overriding the entry-point and adding extra arguments + - "/bin/sh" + - "/entrypoint.sh" + environment: + GETH_MINER_RECOMMIT: 100ms + GETH_ROLLUP_INTEROPRPC: "ws://op-supervisor:8545" + + l2-b: + depends_on: + - op-supervisor + build: + context: ../ops-bedrock/ + dockerfile: l2-op-geth-interop.Dockerfile + ports: + - "9245:8545" + - "8260:6060" + volumes: + - "l2_b_data:/db" + - "${PWD}/../.devnet-interop/genesis/l2/900201/genesis.json:/genesis.json" + - "${PWD}/../ops-bedrock/test-jwt-secret.txt:/config/jwt-secret.txt" + entrypoint: # pass the L2 specific flags by overriding the entry-point and adding extra arguments + - "/bin/sh" + - "/entrypoint.sh" + environment: + GETH_MINER_RECOMMIT: 100ms + GETH_ROLLUP_INTEROPRPC: "ws://op-supervisor:8545" + + op-node-a: + depends_on: + - l1 + - l1-bn + - l1-vc + - l2-a + - op-supervisor + build: + context: ../ + dockerfile: ops/docker/op-stack-go/Dockerfile + target: op-node-target + image: us-docker.pkg.dev/oplabs-tools-artifacts/images/op-node:devnet + command: > + op-node + --l1=ws://l1:8546 + --l1.beacon=http://l1-bn:5052 + --l1.epoch-poll-interval=12s + --l1.http-poll-interval=6s + --l2=http://l2-a:8551 + --l2.jwt-secret=/config/jwt-secret.txt + --supervisor=http://op-supervisor:8545 + --sequencer.enabled + --sequencer.l1-confs=0 + --verifier.l1-confs=0 + --rollup.config=/rollup.json + --rpc.addr=0.0.0.0 + --rpc.port=8545 + --p2p.listen.ip=0.0.0.0 + --p2p.listen.tcp=9003 + --p2p.listen.udp=9003 + --p2p.scoring.peers=light + --p2p.ban.peers=true + --metrics.enabled + --metrics.addr=0.0.0.0 + --metrics.port=7300 + --pprof.enabled + --rpc.enable-admin + --safedb.path=/db + ports: + - "7145:8545" + - "9103:9003" + - "7100:7300" + - "6160:6060" + volumes: + - "safedb_a_data:/db" + - "${PWD}/../ops-bedrock/test-jwt-secret.txt:/config/jwt-secret.txt" + - "${PWD}/../.devnet-interop/genesis/l2/900200/rollup.json:/rollup.json" + - op_log_a:/op_log + env_file: + - "${PWD}/../.devnet-interop/env/l2/900200/op-node.env" + + op-node-b: + depends_on: + - l1 + - l1-bn + - l1-vc + - l2-b + - op-supervisor + build: + context: ../ + dockerfile: ops/docker/op-stack-go/Dockerfile + target: op-node-target + image: us-docker.pkg.dev/oplabs-tools-artifacts/images/op-node:devnet + command: > + op-node + --l1=ws://l1:8546 + --l1.beacon=http://l1-bn:5052 + --l1.epoch-poll-interval=12s + --l1.http-poll-interval=6s + --l2=http://l2-b:8551 + --l2.jwt-secret=/config/jwt-secret.txt + --supervisor=http://op-supervisor:8545 + --sequencer.enabled + --sequencer.l1-confs=0 + --verifier.l1-confs=0 + --rollup.config=/rollup.json + --rpc.addr=0.0.0.0 + --rpc.port=8545 + --p2p.listen.ip=0.0.0.0 + --p2p.listen.tcp=9003 + --p2p.listen.udp=9003 + --p2p.scoring.peers=light + --p2p.ban.peers=true + --metrics.enabled + --metrics.addr=0.0.0.0 + --metrics.port=7300 + --pprof.enabled + --rpc.enable-admin + --safedb.path=/db + ports: + - "7245:8545" + - "9203:9003" + - "7200:7300" + - "6260:6060" + volumes: + - "safedb_b_data:/db" + - "${PWD}/../ops-bedrock/test-jwt-secret.txt:/config/jwt-secret.txt" + - "${PWD}/../.devnet-interop/genesis/l2/900201/rollup.json:/rollup.json" + - op_log_b:/op_log + env_file: + - "${PWD}/../.devnet-interop/env/l2/900201/op-node.env" + + op-proposer-a: + depends_on: + - l1 + - op-node-a + build: + context: ../ + dockerfile: ops/docker/op-stack-go/Dockerfile + target: op-proposer-target + image: us-docker.pkg.dev/oplabs-tools-artifacts/images/op-proposer:devnet + ports: + - "6162:6060" + - "7102:7300" + - "6146:8545" + environment: + OP_PROPOSER_L1_ETH_RPC: http://l1:8545 + OP_PROPOSER_ROLLUP_RPC: http://op-node-a:8545 + OP_PROPOSER_POLL_INTERVAL: 1s + OP_PROPOSER_NUM_CONFIRMATIONS: 1 + OP_PROPOSER_GAME_TYPE: "254" + OP_PROPOSER_PROPOSAL_INTERVAL: "12s" + OP_PROPOSER_PPROF_ENABLED: "true" + OP_PROPOSER_METRICS_ENABLED: "true" + OP_PROPOSER_ALLOW_NON_FINALIZED: "true" + OP_PROPOSER_RPC_ENABLE_ADMIN: "true" + env_file: + - "${PWD}/../.devnet-interop/env/l2/900200/op-proposer.env" + + op-proposer-b: + depends_on: + - l1 + - op-node-b + build: + context: ../ + dockerfile: ops/docker/op-stack-go/Dockerfile + target: op-proposer-target + image: us-docker.pkg.dev/oplabs-tools-artifacts/images/op-proposer:devnet + ports: + - "6262:6060" + - "7202:7300" + - "6246:8545" + environment: + OP_PROPOSER_L1_ETH_RPC: http://l1:8545 + OP_PROPOSER_ROLLUP_RPC: http://op-node-b:8545 + OP_PROPOSER_POLL_INTERVAL: 1s + OP_PROPOSER_NUM_CONFIRMATIONS: 1 + OP_PROPOSER_GAME_TYPE: "254" + OP_PROPOSER_PROPOSAL_INTERVAL: "12s" + OP_PROPOSER_PPROF_ENABLED: "true" + OP_PROPOSER_METRICS_ENABLED: "true" + OP_PROPOSER_ALLOW_NON_FINALIZED: "true" + OP_PROPOSER_RPC_ENABLE_ADMIN: "true" + env_file: + - "${PWD}/../.devnet-interop/env/l2/900201/op-proposer.env" + + op-batcher-a: + depends_on: + - l1 + - l2-a + - op-node-a + build: + context: ../ + dockerfile: ops/docker/op-stack-go/Dockerfile + target: op-batcher-target + image: us-docker.pkg.dev/oplabs-tools-artifacts/images/op-batcher:devnet + ports: + - "6161:6060" + - "7101:7300" + - "6145:8545" + environment: + OP_BATCHER_L1_ETH_RPC: http://l1:8545 + OP_BATCHER_L2_ETH_RPC: http://l2-a:8545 + OP_BATCHER_ROLLUP_RPC: http://op-node-a:8545 + OP_BATCHER_MAX_CHANNEL_DURATION: 2 + OP_BATCHER_SUB_SAFETY_MARGIN: 4 # SWS is 15, ChannelTimeout is 40 + OP_BATCHER_POLL_INTERVAL: 1s + OP_BATCHER_NUM_CONFIRMATIONS: 1 + OP_BATCHER_PPROF_ENABLED: "true" + OP_BATCHER_METRICS_ENABLED: "true" + OP_BATCHER_RPC_ENABLE_ADMIN: "true" + OP_BATCHER_BATCH_TYPE: + # uncomment to use blobs + # OP_BATCHER_DATA_AVAILABILITY_TYPE: blobs + env_file: + - "${PWD}/../.devnet-interop/env/l2/900200/op-batcher.env" + + op-batcher-b: + depends_on: + - l1 + - l2-b + - op-node-b + build: + context: ../ + dockerfile: ops/docker/op-stack-go/Dockerfile + target: op-batcher-target + image: us-docker.pkg.dev/oplabs-tools-artifacts/images/op-batcher:devnet + ports: + - "6261:6060" + - "7201:7300" + - "6245:8545" + environment: + OP_BATCHER_L1_ETH_RPC: http://l1:8545 + OP_BATCHER_L2_ETH_RPC: http://l2-b:8545 + OP_BATCHER_ROLLUP_RPC: http://op-node-b:8545 + OP_BATCHER_MAX_CHANNEL_DURATION: 2 + OP_BATCHER_SUB_SAFETY_MARGIN: 4 # SWS is 15, ChannelTimeout is 40 + OP_BATCHER_POLL_INTERVAL: 1s + OP_BATCHER_NUM_CONFIRMATIONS: 1 + OP_BATCHER_PPROF_ENABLED: "true" + OP_BATCHER_METRICS_ENABLED: "true" + OP_BATCHER_RPC_ENABLE_ADMIN: "true" + OP_BATCHER_BATCH_TYPE: + # uncomment to use blobs + # OP_BATCHER_DATA_AVAILABILITY_TYPE: blobs + env_file: + - "${PWD}/../.devnet-interop/env/l2/900201/op-batcher.env" diff --git a/interop-devnet/justfile b/interop-devnet/justfile new file mode 100644 index 000000000000..7aa215dbbd7f --- /dev/null +++ b/interop-devnet/justfile @@ -0,0 +1,33 @@ + +devnet-setup: + bash create-chains.sh + +devnet-build-images: + PWD="$(pwd)" DOCKER_BUILDKIT=1 COMPOSE_DOCKER_CLI_BUILD=1 \ + docker compose build --progress plain \ + --build-arg GIT_COMMIT={git_commit} \ + --build-arg GIT_DATE={git_date} + +devnet-up: + docker compose up -d l1 l1-bn l1-vc + + docker compose up -d \ + op-supervisor \ + op-node-a op-batcher-a op-proposer-a \ + op-node-b op-batcher-b op-proposer-b + +devnet-down: + # stops services, does not remove containers/networks + docker compose stop + +devnet-clean: + rm -rf ../.devnet-interop + # Stops services, and removes containers/networks + docker compose down + # Now manually clean up the related images and volumes + # Note: `justfile` interprets the curly brackets. So we escape them, by wrapping it with more, as a string, like Jinja2. + docker image ls 'interop-devnet*' --format='{{ '{{.Repository}}' }}' | xargs -r docker rmi + docker volume ls --filter name=interop-devnet --format='{{ '{{.Name}}' }}' | xargs -r docker volume rm + +devnet-logs: + docker compose logs -f diff --git a/op-chain-ops/devkeys/devkeys.go b/op-chain-ops/devkeys/devkeys.go index d0526c8c1d3a..fdb151e4e6fc 100644 --- a/op-chain-ops/devkeys/devkeys.go +++ b/op-chain-ops/devkeys/devkeys.go @@ -93,6 +93,21 @@ func (role SuperchainOperatorRole) Key(chainID *big.Int) Key { } } +func (role *SuperchainOperatorRole) UnmarshalText(data []byte) error { + v := string(data) + for i := SuperchainOperatorRole(0); i < 20; i++ { + if i.String() == v { + *role = i + return nil + } + } + return fmt.Errorf("unknown superchain operator role %q", v) +} + +func (role *SuperchainOperatorRole) MarshalText() ([]byte, error) { + return []byte(role.String()), nil +} + // SuperchainOperatorKey is an account specific to an OperationRole of a given OP-Stack chain. type SuperchainOperatorKey struct { ChainID *big.Int @@ -181,6 +196,21 @@ func (role ChainOperatorRole) Key(chainID *big.Int) Key { } } +func (role *ChainOperatorRole) UnmarshalText(data []byte) error { + v := string(data) + for i := ChainOperatorRole(0); i < 20; i++ { + if i.String() == v { + *role = i + return nil + } + } + return fmt.Errorf("unknown chain operator role %q", v) +} + +func (role *ChainOperatorRole) MarshalText() ([]byte, error) { + return []byte(role.String()), nil +} + // ChainOperatorKey is an account specific to an OperationRole of a given OP-Stack chain. type ChainOperatorKey struct { ChainID *big.Int diff --git a/op-node/cmd/interop/interop.go b/op-node/cmd/interop/interop.go new file mode 100644 index 000000000000..3e6f75d530bb --- /dev/null +++ b/op-node/cmd/interop/interop.go @@ -0,0 +1,299 @@ +package interop + +import ( + "fmt" + "math/big" + "os" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/urfave/cli/v2" + + "github.com/ethereum-optimism/optimism/op-chain-ops/devkeys" + "github.com/ethereum-optimism/optimism/op-chain-ops/foundry" + "github.com/ethereum-optimism/optimism/op-chain-ops/interopgen" + op_service "github.com/ethereum-optimism/optimism/op-service" + "github.com/ethereum-optimism/optimism/op-service/cliapp" + "github.com/ethereum-optimism/optimism/op-service/ioutil" + "github.com/ethereum-optimism/optimism/op-service/jsonutil" + oplog "github.com/ethereum-optimism/optimism/op-service/log" + "github.com/ethereum/go-ethereum/crypto" +) + +var EnvPrefix = "OP_INTEROP" + +var ( + l1ChainIDFlag = &cli.Uint64Flag{ + Name: "l1.chainid", + Value: 900100, + EnvVars: op_service.PrefixEnvVar(EnvPrefix, "L1_CHAINID"), + } + l2ChainIDsFlag = &cli.Uint64SliceFlag{ + Name: "l2.chainids", + Value: cli.NewUint64Slice(900200, 900201), + EnvVars: op_service.PrefixEnvVar(EnvPrefix, "L2_CHAINIDS"), + } + timestampFlag = &cli.Uint64Flag{ + Name: "timestamp", + Value: 0, + EnvVars: op_service.PrefixEnvVar(EnvPrefix, "TIMESTAMP"), + Usage: "Will use current timestamp, plus 5 seconds, if not set", + } + artifactsDirFlag = &cli.StringFlag{ + Name: "artifacts-dir", + Value: "packages/contracts-bedrock/forge-artifacts", + EnvVars: op_service.PrefixEnvVar(EnvPrefix, "ARTIFACTS_DIR"), + } + foundryDirFlag = &cli.StringFlag{ + Name: "foundry-dir", + Value: "packages/contracts-bedrock", + EnvVars: op_service.PrefixEnvVar(EnvPrefix, "FOUNDRY_DIR"), + Usage: "Optional, for source-map info during genesis generation", + } + outDirFlag = &cli.StringFlag{ + Name: "out-dir", + Value: ".interop-devnet", + EnvVars: op_service.PrefixEnvVar(EnvPrefix, "OUT_DIR"), + } + // used in both dev-setup and devkey commands + mnemonicFlag = &cli.StringFlag{ + Name: "mnemonic", + Value: devkeys.TestMnemonic, + EnvVars: op_service.PrefixEnvVar(EnvPrefix, "MNEMONIC"), + } + // for devkey command + devkeyDomainFlag = &cli.StringFlag{ + Name: "domain", + Value: "chain-operator", + EnvVars: op_service.PrefixEnvVar(EnvPrefix, "DEVKEY_DOMAIN"), + } + devkeyChainIdFlag = &cli.Uint64Flag{ + Name: "chainid", + Value: 0, + EnvVars: op_service.PrefixEnvVar(EnvPrefix, "DEVKEY_CHAINID"), + } + devkeyNameFlag = &cli.StringFlag{ + Name: "name", + EnvVars: op_service.PrefixEnvVar(EnvPrefix, "DEVKEY_NAME"), + } +) + +var InteropDevSetup = &cli.Command{ + Name: "dev-setup", + Usage: "Generate devnet genesis configs with one L1 and multiple L2s", + Flags: cliapp.ProtectFlags(append([]cli.Flag{ + l1ChainIDFlag, + l2ChainIDsFlag, + timestampFlag, + mnemonicFlag, + artifactsDirFlag, + foundryDirFlag, + outDirFlag, + }, oplog.CLIFlags(EnvPrefix)...)), + Action: func(cliCtx *cli.Context) error { + logCfg := oplog.ReadCLIConfig(cliCtx) + logger := oplog.NewLogger(cliCtx.App.Writer, logCfg) + + recipe := &interopgen.InteropDevRecipe{ + L1ChainID: cliCtx.Uint64(l1ChainIDFlag.Name), + L2ChainIDs: cliCtx.Uint64Slice(l2ChainIDsFlag.Name), + GenesisTimestamp: cliCtx.Uint64(timestampFlag.Name), + } + if recipe.GenesisTimestamp == 0 { + recipe.GenesisTimestamp = uint64(time.Now().Unix() + 5) + } + mnemonic := strings.TrimSpace(cliCtx.String(mnemonicFlag.Name)) + if mnemonic == devkeys.TestMnemonic { + logger.Warn("Using default test mnemonic!") + } + keys, err := devkeys.NewMnemonicDevKeys(mnemonic) + if err != nil { + return fmt.Errorf("failed to setup dev keys from mnemonic: %w", err) + } + worldCfg, err := recipe.Build(keys) + if err != nil { + return fmt.Errorf("failed to build deploy configs from interop recipe: %w", err) + } + if err := worldCfg.Check(logger); err != nil { + return fmt.Errorf("invalid deploy configs: %w", err) + } + artifactsDir := cliCtx.String(artifactsDirFlag.Name) + af := foundry.OpenArtifactsDir(artifactsDir) + var srcFs *foundry.SourceMapFS + if cliCtx.IsSet(foundryDirFlag.Name) { + srcDir := cliCtx.String(foundryDirFlag.Name) + srcFs = foundry.NewSourceMapFS(os.DirFS(srcDir)) + } + worldDeployment, worldOutput, err := interopgen.Deploy(logger, af, srcFs, worldCfg) + if err != nil { + return fmt.Errorf("failed to deploy interop dev setup: %w", err) + } + outDir := cliCtx.String(outDirFlag.Name) + // Write deployments + { + deploymentsDir := filepath.Join(outDir, "deployments") + l1Dir := filepath.Join(deploymentsDir, "l1") + if err := writeJson(filepath.Join(l1Dir, "common.json"), worldDeployment.L1); err != nil { + return fmt.Errorf("failed to write L1 deployment data: %w", err) + } + if err := writeJson(filepath.Join(l1Dir, "superchain.json"), worldDeployment.Superchain); err != nil { + return fmt.Errorf("failed to write Superchain deployment data: %w", err) + } + l2sDir := filepath.Join(deploymentsDir, "l2") + for id, dep := range worldDeployment.L2s { + l2Dir := filepath.Join(l2sDir, id) + if err := writeJson(filepath.Join(l2Dir, "addresses.json"), dep); err != nil { + return fmt.Errorf("failed to write L2 %s deployment data: %w", id, err) + } + } + } + // write genesis + { + genesisDir := filepath.Join(outDir, "genesis") + l1Dir := filepath.Join(genesisDir, "l1") + if err := writeJson(filepath.Join(l1Dir, "genesis.json"), worldOutput.L1.Genesis); err != nil { + return fmt.Errorf("failed to write L1 genesis data: %w", err) + } + l2sDir := filepath.Join(genesisDir, "l2") + for id, dep := range worldOutput.L2s { + l2Dir := filepath.Join(l2sDir, id) + if err := writeJson(filepath.Join(l2Dir, "genesis.json"), dep.Genesis); err != nil { + return fmt.Errorf("failed to write L2 %s genesis config: %w", id, err) + } + if err := writeJson(filepath.Join(l2Dir, "rollup.json"), dep.RollupCfg); err != nil { + return fmt.Errorf("failed to write L2 %s rollup config: %w", id, err) + } + } + } + return nil + }, +} + +func writeJson(path string, content any) error { + return jsonutil.WriteJSON[any](content, ioutil.ToBasicFile(path, 0o755)) +} + +var DevKeySecretCmd = &cli.Command{ + Name: "secret", + Usage: "Retrieve devkey secret, by specifying domain, chain ID, name.", + Flags: cliapp.ProtectFlags([]cli.Flag{ + mnemonicFlag, + devkeyDomainFlag, + devkeyChainIdFlag, + devkeyNameFlag, + }), + Action: func(context *cli.Context) error { + mnemonic := context.String(mnemonicFlag.Name) + domain := context.String(devkeyDomainFlag.Name) + chainID := context.Uint64(devkeyChainIdFlag.Name) + chainIDBig := new(big.Int).SetUint64(chainID) + name := context.String(devkeyNameFlag.Name) + k, err := parseKey(domain, chainIDBig, name) + if err != nil { + return err + } + mnemonicKeys, err := devkeys.NewMnemonicDevKeys(mnemonic) + if err != nil { + return err + } + secret, err := mnemonicKeys.Secret(k) + if err != nil { + return err + } + secretBin := crypto.FromECDSA(secret) + _, err = fmt.Fprintf(context.App.Writer, "%x", secretBin) + if err != nil { + return fmt.Errorf("failed to output secret key: %w", err) + } + return nil + }, +} + +var DevKeyAddressCmd = &cli.Command{ + Name: "address", + Usage: "Retrieve devkey address, by specifying domain, chain ID, name.", + Flags: cliapp.ProtectFlags([]cli.Flag{ + mnemonicFlag, + devkeyDomainFlag, + devkeyChainIdFlag, + devkeyNameFlag, + }), + Action: func(context *cli.Context) error { + mnemonic := context.String(mnemonicFlag.Name) + domain := context.String(devkeyDomainFlag.Name) + chainID := context.Uint64(devkeyChainIdFlag.Name) + chainIDBig := new(big.Int).SetUint64(chainID) + name := context.String(devkeyNameFlag.Name) + k, err := parseKey(domain, chainIDBig, name) + if err != nil { + return err + } + mnemonicKeys, err := devkeys.NewMnemonicDevKeys(mnemonic) + if err != nil { + return err + } + addr, err := mnemonicKeys.Address(k) + if err != nil { + return err + } + _, err = fmt.Fprintf(context.App.Writer, "%s", addr) + if err != nil { + return fmt.Errorf("failed to output address: %w", err) + } + return nil + }, +} + +var DevKeyCmd = &cli.Command{ + Name: "devkey", + Usage: "Retrieve devkey secret or address", + Subcommands: cli.Commands{ + DevKeySecretCmd, + DevKeyAddressCmd, + }, +} + +func parseKey(domain string, chainID *big.Int, name string) (devkeys.Key, error) { + switch domain { + case "user": + index, err := strconv.ParseUint(name, 10, 64) + if err != nil { + return nil, fmt.Errorf("failed to parse user index: %w", err) + } + return devkeys.ChainUserKey{ + ChainID: chainID, + Index: index, + }, nil + case "chain-operator": + var role devkeys.ChainOperatorRole + if err := role.UnmarshalText([]byte(name)); err != nil { + return nil, fmt.Errorf("failed to parse chain operator role: %w", err) + } + return devkeys.ChainOperatorKey{ + ChainID: chainID, + Role: role, + }, nil + case "superchain-operator": + var role devkeys.SuperchainOperatorRole + if err := role.UnmarshalText([]byte(name)); err != nil { + return nil, fmt.Errorf("failed to parse chain operator role: %w", err) + } + return devkeys.SuperchainOperatorKey{ + ChainID: chainID, + Role: role, + }, nil + default: + return nil, fmt.Errorf("unknown devkey domain %q", domain) + } +} + +var InteropCmd = &cli.Command{ + Name: "interop", + Usage: "Experimental tools for OP-Stack interop networks.", + Subcommands: cli.Commands{ + InteropDevSetup, + DevKeyCmd, + }, +} diff --git a/op-node/cmd/main.go b/op-node/cmd/main.go index 8f6688b51cbf..b82b3f6babce 100644 --- a/op-node/cmd/main.go +++ b/op-node/cmd/main.go @@ -12,6 +12,7 @@ import ( opnode "github.com/ethereum-optimism/optimism/op-node" "github.com/ethereum-optimism/optimism/op-node/chaincfg" "github.com/ethereum-optimism/optimism/op-node/cmd/genesis" + "github.com/ethereum-optimism/optimism/op-node/cmd/interop" "github.com/ethereum-optimism/optimism/op-node/cmd/networks" "github.com/ethereum-optimism/optimism/op-node/cmd/p2p" "github.com/ethereum-optimism/optimism/op-node/flags" @@ -62,6 +63,7 @@ func main() { Name: "networks", Subcommands: networks.Subcommands, }, + interop.InteropCmd, } ctx := ctxinterrupt.WithSignalWaiterMain(context.Background()) diff --git a/op-node/flags/flags.go b/op-node/flags/flags.go index 54334c150296..de94b59c0e04 100644 --- a/op-node/flags/flags.go +++ b/op-node/flags/flags.go @@ -73,6 +73,7 @@ var ( EnvVars: prefixEnvVars("L1_BEACON"), Category: RollupCategory, } + /* Optional Flags */ SupervisorAddr = &cli.StringFlag{ Name: "supervisor", Usage: "RPC address of interop supervisor service for cross-chain safety verification." + @@ -80,7 +81,6 @@ var ( Hidden: true, // hidden for now during early testing. EnvVars: prefixEnvVars("SUPERVISOR"), } - /* Optional Flags */ BeaconHeader = &cli.StringFlag{ Name: "l1.beacon-header", Usage: "Optional HTTP header to add to all requests to the L1 Beacon endpoint. Format: 'X-Key: Value'", diff --git a/op-node/rollup/interop/interop.go b/op-node/rollup/interop/interop.go index c929c8f0d01d..a4342b6a19f6 100644 --- a/op-node/rollup/interop/interop.go +++ b/op-node/rollup/interop/interop.go @@ -27,7 +27,7 @@ type InteropBackend interface { SafeView(ctx context.Context, chainID types.ChainID, safe types.ReferenceView) (types.ReferenceView, error) Finalized(ctx context.Context, chainID types.ChainID) (eth.BlockID, error) - DerivedFrom(ctx context.Context, chainID types.ChainID, derived eth.BlockID) (eth.L1BlockRef, error) + CrossDerivedFrom(ctx context.Context, chainID types.ChainID, derived eth.BlockID) (eth.L1BlockRef, error) UpdateLocalUnsafe(ctx context.Context, chainID types.ChainID, head eth.BlockRef) error UpdateLocalSafe(ctx context.Context, chainID types.ChainID, derivedFrom eth.L1BlockRef, lastDerived eth.BlockRef) error @@ -232,10 +232,11 @@ func (d *InteropDeriver) onCrossSafeUpdateEvent(x engine.CrossSafeUpdateEvent) e Hash: result.Cross.Hash, Number: result.Cross.Number, } - derivedFrom, err := d.backend.DerivedFrom(ctx, d.chainID, derived) + derivedFrom, err := d.backend.CrossDerivedFrom(ctx, d.chainID, derived) if err != nil { return fmt.Errorf("failed to get derived-from of %s: %w", result.Cross, err) } + d.log.Info("New cross-safe block", "block", result.Cross.Number) ref, err := d.l2.L2BlockRefByHash(ctx, result.Cross.Hash) if err != nil { return fmt.Errorf("failed to get block ref of %s: %w", result.Cross, err) @@ -272,6 +273,7 @@ func (d *InteropDeriver) onFinalizedUpdate(x engine.FinalizedUpdateEvent) error if err != nil { return fmt.Errorf("failed to get block ref of %s: %w", finalized, err) } + d.log.Info("New finalized block from supervisor", "block", finalized.Number) d.emitter.Emit(engine.PromoteFinalizedEvent{ Ref: ref, }) diff --git a/op-service/ioutil/streams.go b/op-service/ioutil/streams.go index 91f122906db0..c35aefa202ef 100644 --- a/op-service/ioutil/streams.go +++ b/op-service/ioutil/streams.go @@ -1,8 +1,10 @@ package ioutil import ( + "fmt" "io" "os" + "path/filepath" ) var ( @@ -21,6 +23,20 @@ func NoOutputStream() OutputTarget { } } +func ToBasicFile(path string, perm os.FileMode) OutputTarget { + return func() (io.Writer, io.Closer, Aborter, error) { + outDir := filepath.Dir(path) + if err := os.MkdirAll(outDir, perm); err != nil { + return nil, nil, nil, fmt.Errorf("failed to create dir %q: %w", outDir, err) + } + f, err := os.OpenFile(path, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, perm) + if err != nil { + return nil, nil, nil, fmt.Errorf("failed to open %q: %w", path, err) + } + return f, f, func() {}, nil + } +} + func ToAtomicFile(path string, perm os.FileMode) OutputTarget { return func() (io.Writer, io.Closer, Aborter, error) { f, err := NewAtomicWriterCompressed(path, perm) diff --git a/op-service/jsonutil/json.go b/op-service/jsonutil/json.go index 5993138595b4..8549c170d42a 100644 --- a/op-service/jsonutil/json.go +++ b/op-service/jsonutil/json.go @@ -67,6 +67,7 @@ type jsonEncoder struct { func newJSONEncoder(w io.Writer) Encoder { e := json.NewEncoder(w) e.SetIndent("", " ") + e.SetEscapeHTML(false) return &jsonEncoder{ e: e, } diff --git a/op-service/locks/rwmap.go b/op-service/locks/rwmap.go new file mode 100644 index 000000000000..779e3554c824 --- /dev/null +++ b/op-service/locks/rwmap.go @@ -0,0 +1,48 @@ +package locks + +import "sync" + +// RWMap is a simple wrapper around a map, with global Read-Write protection. +// For many concurrent reads/writes a sync.Map may be more performant, +// although it does not utilize Go generics. +// The RWMap does not have to be initialized, +// it is immediately ready for reads/writes. +type RWMap[K comparable, V any] struct { + inner map[K]V + mu sync.RWMutex +} + +func (m *RWMap[K, V]) Has(key K) (ok bool) { + m.mu.RLock() + defer m.mu.RUnlock() + _, ok = m.inner[key] + return +} + +func (m *RWMap[K, V]) Get(key K) (value V, ok bool) { + m.mu.RLock() + defer m.mu.RUnlock() + value, ok = m.inner[key] + return +} + +func (m *RWMap[K, V]) Set(key K, value V) { + m.mu.Lock() + defer m.mu.Unlock() + if m.inner == nil { + m.inner = make(map[K]V) + } + m.inner[key] = value +} + +// Range calls f sequentially for each key and value present in the map. +// If f returns false, range stops the iteration. +func (m *RWMap[K, V]) Range(f func(key K, value V) bool) { + m.mu.RLock() + defer m.mu.RUnlock() + for k, v := range m.inner { + if !f(k, v) { + break + } + } +} diff --git a/op-service/locks/rwmap_test.go b/op-service/locks/rwmap_test.go new file mode 100644 index 000000000000..c78fab97034b --- /dev/null +++ b/op-service/locks/rwmap_test.go @@ -0,0 +1,52 @@ +package locks + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestRWMap(t *testing.T) { + m := &RWMap[uint64, int64]{} + + // get on new map + v, ok := m.Get(123) + require.False(t, ok) + require.Equal(t, int64(0), v) + + // set a value + m.Set(123, 42) + v, ok = m.Get(123) + require.True(t, ok) + require.Equal(t, int64(42), v) + + // overwrite a value + m.Set(123, -42) + v, ok = m.Get(123) + require.True(t, ok) + require.Equal(t, int64(-42), v) + + // add a value + m.Set(10, 100) + + // range over values + got := make(map[uint64]int64) + m.Range(func(key uint64, value int64) bool { + if _, ok := got[key]; ok { + panic("duplicate") + } + got[key] = value + return true + }) + require.Len(t, got, 2) + require.Equal(t, int64(100), got[uint64(10)]) + require.Equal(t, int64(-42), got[uint64(123)]) + + // range and stop early + clear(got) + m.Range(func(key uint64, value int64) bool { + got[key] = value + return false + }) + require.Len(t, got, 1, "stop early") +} diff --git a/op-service/locks/rwvalue.go b/op-service/locks/rwvalue.go new file mode 100644 index 000000000000..12ca65e61d73 --- /dev/null +++ b/op-service/locks/rwvalue.go @@ -0,0 +1,24 @@ +package locks + +import "sync" + +// RWValue is a simple container struct, to deconflict reads/writes of the value, +// without locking up a bigger structure in the caller. +// It exposes the underlying RWLock and Value for direct access where needed. +type RWValue[E any] struct { + sync.RWMutex + Value E +} + +func (c *RWValue[E]) Get() (out E) { + c.RLock() + defer c.RUnlock() + out = c.Value + return +} + +func (c *RWValue[E]) Set(v E) { + c.Lock() + defer c.Unlock() + c.Value = v +} diff --git a/op-service/locks/rwvalue_test.go b/op-service/locks/rwvalue_test.go new file mode 100644 index 000000000000..f99d9345a1cc --- /dev/null +++ b/op-service/locks/rwvalue_test.go @@ -0,0 +1,16 @@ +package locks + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestRWValue(t *testing.T) { + v := &RWValue[uint64]{} + require.Equal(t, uint64(0), v.Get()) + v.Set(123) + require.Equal(t, uint64(123), v.Get()) + v.Set(42) + require.Equal(t, uint64(42), v.Get()) +} diff --git a/op-service/sources/supervisor_client.go b/op-service/sources/supervisor_client.go index ba8663c335ce..d6191b9cfb20 100644 --- a/op-service/sources/supervisor_client.go +++ b/op-service/sources/supervisor_client.go @@ -114,12 +114,12 @@ func (cl *SupervisorClient) Finalized(ctx context.Context, chainID types.ChainID return result, err } -func (cl *SupervisorClient) DerivedFrom(ctx context.Context, chainID types.ChainID, derived eth.BlockID) (eth.BlockRef, error) { +func (cl *SupervisorClient) CrossDerivedFrom(ctx context.Context, chainID types.ChainID, derived eth.BlockID) (eth.BlockRef, error) { var result eth.BlockRef err := cl.client.CallContext( ctx, &result, - "supervisor_derivedFrom", + "supervisor_crossDerivedFrom", chainID, derived) return result, err diff --git a/op-service/testutils/fake_interop_backend.go b/op-service/testutils/fake_interop_backend.go index 4ef439a9a3bc..4c8624d53552 100644 --- a/op-service/testutils/fake_interop_backend.go +++ b/op-service/testutils/fake_interop_backend.go @@ -29,7 +29,7 @@ func (m *FakeInteropBackend) Finalized(ctx context.Context, chainID types.ChainI return m.FinalizedFn(ctx, chainID) } -func (m *FakeInteropBackend) DerivedFrom(ctx context.Context, chainID types.ChainID, derived eth.BlockID) (eth.L1BlockRef, error) { +func (m *FakeInteropBackend) CrossDerivedFrom(ctx context.Context, chainID types.ChainID, derived eth.BlockID) (eth.L1BlockRef, error) { return m.DerivedFromFn(ctx, chainID, derived) } diff --git a/op-service/testutils/mock_interop_backend.go b/op-service/testutils/mock_interop_backend.go index 6724acedd43f..af6762c204cd 100644 --- a/op-service/testutils/mock_interop_backend.go +++ b/op-service/testutils/mock_interop_backend.go @@ -58,13 +58,13 @@ func (m *MockInteropBackend) ExpectFinalized(chainID types.ChainID, result eth.B m.Mock.On("Finalized", chainID).Once().Return(result, &err) } -func (m *MockInteropBackend) DerivedFrom(ctx context.Context, chainID types.ChainID, derived eth.BlockID) (eth.L1BlockRef, error) { - result := m.Mock.MethodCalled("DerivedFrom", chainID, derived) +func (m *MockInteropBackend) CrossDerivedFrom(ctx context.Context, chainID types.ChainID, derived eth.BlockID) (eth.L1BlockRef, error) { + result := m.Mock.MethodCalled("CrossDerivedFrom", chainID, derived) return result.Get(0).(eth.L1BlockRef), *result.Get(1).(*error) } func (m *MockInteropBackend) ExpectDerivedFrom(chainID types.ChainID, derived eth.BlockID, result eth.L1BlockRef, err error) { - m.Mock.On("DerivedFrom", chainID, derived).Once().Return(result, &err) + m.Mock.On("CrossDerivedFrom", chainID, derived).Once().Return(result, &err) } func (m *MockInteropBackend) UpdateLocalUnsafe(ctx context.Context, chainID types.ChainID, head eth.BlockRef) error { diff --git a/op-supervisor/supervisor/backend/backend.go b/op-supervisor/supervisor/backend/backend.go index ff5d5575cd31..d4328f6932c7 100644 --- a/op-supervisor/supervisor/backend/backend.go +++ b/op-supervisor/supervisor/backend/backend.go @@ -402,11 +402,11 @@ func (su *SupervisorBackend) Finalized(ctx context.Context, chainID types.ChainI return v.ID(), nil } -func (su *SupervisorBackend) DerivedFrom(ctx context.Context, chainID types.ChainID, derived eth.BlockID) (derivedFrom eth.BlockRef, err error) { +func (su *SupervisorBackend) CrossDerivedFrom(ctx context.Context, chainID types.ChainID, derived eth.BlockID) (derivedFrom eth.BlockRef, err error) { su.mu.RLock() defer su.mu.RUnlock() - v, err := su.chainDBs.DerivedFrom(chainID, derived) + v, err := su.chainDBs.CrossDerivedFromBlockRef(chainID, derived) if err != nil { return eth.BlockRef{}, err } diff --git a/op-supervisor/supervisor/backend/cross/safe_start_test.go b/op-supervisor/supervisor/backend/cross/safe_start_test.go index cb6bd4757214..1a5924e06963 100644 --- a/op-supervisor/supervisor/backend/cross/safe_start_test.go +++ b/op-supervisor/supervisor/backend/cross/safe_start_test.go @@ -223,7 +223,7 @@ func TestCrossSafeHazards(t *testing.T) { require.ErrorContains(t, err, "some error") require.Empty(t, hazards) }) - t.Run("timestamp is less, DerivedFrom returns error", func(t *testing.T) { + t.Run("timestamp is less, CrossDerivedFrom returns error", func(t *testing.T) { ssd := &mockSafeStartDeps{} sampleBlockSeal := types.BlockSeal{Number: 3, Hash: common.BytesToHash([]byte{0x02})} ssd.checkFn = func() (includedIn types.BlockSeal, err error) { @@ -245,7 +245,7 @@ func TestCrossSafeHazards(t *testing.T) { require.ErrorContains(t, err, "some error") require.Empty(t, hazards) }) - t.Run("timestamp is less, DerivedFrom Number is greater", func(t *testing.T) { + t.Run("timestamp is less, CrossDerivedFrom Number is greater", func(t *testing.T) { ssd := &mockSafeStartDeps{} sampleBlockSeal := types.BlockSeal{Number: 3, Hash: common.BytesToHash([]byte{0x02})} ssd.checkFn = func() (includedIn types.BlockSeal, err error) { @@ -268,7 +268,7 @@ func TestCrossSafeHazards(t *testing.T) { require.ErrorIs(t, err, types.ErrOutOfScope) require.Empty(t, hazards) }) - t.Run("timestamp is less, DerivedFrom Number less", func(t *testing.T) { + t.Run("timestamp is less, CrossDerivedFrom Number less", func(t *testing.T) { ssd := &mockSafeStartDeps{} sampleBlockSeal := types.BlockSeal{Number: 3, Hash: common.BytesToHash([]byte{0x02})} ssd.checkFn = func() (includedIn types.BlockSeal, err error) { @@ -291,7 +291,7 @@ func TestCrossSafeHazards(t *testing.T) { require.NoError(t, err) require.Empty(t, hazards) }) - t.Run("timestamp is less, DerivedFrom Number equal", func(t *testing.T) { + t.Run("timestamp is less, CrossDerivedFrom Number equal", func(t *testing.T) { ssd := &mockSafeStartDeps{} sampleBlockSeal := types.BlockSeal{Number: 3, Hash: common.BytesToHash([]byte{0x02})} ssd.checkFn = func() (includedIn types.BlockSeal, err error) { diff --git a/op-supervisor/supervisor/backend/cross/worker.go b/op-supervisor/supervisor/backend/cross/worker.go index 1342d7048fab..b80c78442c6b 100644 --- a/op-supervisor/supervisor/backend/cross/worker.go +++ b/op-supervisor/supervisor/backend/cross/worker.go @@ -6,6 +6,7 @@ import ( "sync" "time" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" "github.com/ethereum/go-ethereum/log" ) @@ -37,7 +38,7 @@ func NewWorker(log log.Logger, workFn workFn) *Worker { log: log, poke: make(chan struct{}, 1), // The data may have changed, and we may have missed a poke, so re-attempt regularly. - pollDuration: time.Second * 4, + pollDuration: 250 * time.Millisecond, ctx: ctx, cancel: cancel, } @@ -69,7 +70,11 @@ func (s *Worker) worker() { if errors.Is(err, s.ctx.Err()) { return } - s.log.Error("Failed to process work", "err", err) + if errors.Is(err, types.ErrFuture) { + s.log.Debug("Failed to process work", "err", err) + } else { + s.log.Warn("Failed to process work", "err", err) + } } // await next time we process, or detect shutdown diff --git a/op-supervisor/supervisor/backend/db/db.go b/op-supervisor/supervisor/backend/db/db.go index 922f849dea05..b667718759b7 100644 --- a/op-supervisor/supervisor/backend/db/db.go +++ b/op-supervisor/supervisor/backend/db/db.go @@ -4,12 +4,12 @@ import ( "errors" "fmt" "io" - "sync" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-service/locks" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/fromda" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/logs" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/depset" @@ -73,28 +73,23 @@ var _ LogStorage = (*logs.DB)(nil) // ChainsDB is a database that stores logs and derived-from data for multiple chains. // it implements the LogStorage interface, as well as several DB interfaces needed by the cross package. type ChainsDB struct { - // RW mutex: - // Read = chains can be read / mutated. - // Write = set of chains is changing. - mu sync.RWMutex - // unsafe info: the sequence of block seals and events - logDBs map[types.ChainID]LogStorage + logDBs locks.RWMap[types.ChainID, LogStorage] // cross-unsafe: how far we have processed the unsafe data. // If present but set to a zeroed value the cross-unsafe will fallback to cross-safe. - crossUnsafe map[types.ChainID]types.BlockSeal + crossUnsafe locks.RWMap[types.ChainID, *locks.RWValue[types.BlockSeal]] // local-safe: index of what we optimistically know about L2 blocks being derived from L1 - localDBs map[types.ChainID]LocalDerivedFromStorage + localDBs locks.RWMap[types.ChainID, LocalDerivedFromStorage] // cross-safe: index of L2 blocks we know to only have cross-L2 valid dependencies - crossDBs map[types.ChainID]CrossDerivedFromStorage + crossDBs locks.RWMap[types.ChainID, CrossDerivedFromStorage] // finalized: the L1 finality progress. This can be translated into what may be considered as finalized in L2. // It is initially zeroed, and the L2 finality query will return // an error until it has this L1 finality to work with. - finalizedL1 eth.L1BlockRef + finalizedL1 locks.RWValue[eth.L1BlockRef] // depSet is the dependency set, used to determine what may be tracked, // what is missing, and to provide it to DB users. @@ -105,78 +100,62 @@ type ChainsDB struct { func NewChainsDB(l log.Logger, depSet depset.DependencySet) *ChainsDB { return &ChainsDB{ - logDBs: make(map[types.ChainID]LogStorage), - logger: l, - localDBs: make(map[types.ChainID]LocalDerivedFromStorage), - crossDBs: make(map[types.ChainID]CrossDerivedFromStorage), - crossUnsafe: make(map[types.ChainID]types.BlockSeal), - depSet: depSet, + logger: l, + depSet: depSet, } } func (db *ChainsDB) AddLogDB(chainID types.ChainID, logDB LogStorage) { - db.mu.Lock() - defer db.mu.Unlock() - - if _, ok := db.logDBs[chainID]; ok { + if db.logDBs.Has(chainID) { db.logger.Warn("overwriting existing log DB for chain", "chain", chainID) } - db.logDBs[chainID] = logDB + db.logDBs.Set(chainID, logDB) } func (db *ChainsDB) AddLocalDerivedFromDB(chainID types.ChainID, dfDB LocalDerivedFromStorage) { - db.mu.Lock() - defer db.mu.Unlock() - - if _, ok := db.localDBs[chainID]; ok { + if db.localDBs.Has(chainID) { db.logger.Warn("overwriting existing local derived-from DB for chain", "chain", chainID) } - db.localDBs[chainID] = dfDB + db.localDBs.Set(chainID, dfDB) } func (db *ChainsDB) AddCrossDerivedFromDB(chainID types.ChainID, dfDB CrossDerivedFromStorage) { - db.mu.Lock() - defer db.mu.Unlock() - - if _, ok := db.crossDBs[chainID]; ok { + if db.crossDBs.Has(chainID) { db.logger.Warn("overwriting existing cross derived-from DB for chain", "chain", chainID) } - db.crossDBs[chainID] = dfDB + db.crossDBs.Set(chainID, dfDB) } func (db *ChainsDB) AddCrossUnsafeTracker(chainID types.ChainID) { - db.mu.Lock() - defer db.mu.Unlock() - - if _, ok := db.crossUnsafe[chainID]; ok { + if db.crossUnsafe.Has(chainID) { db.logger.Warn("overwriting existing cross-unsafe tracker for chain", "chain", chainID) } - db.crossUnsafe[chainID] = types.BlockSeal{} + db.crossUnsafe.Set(chainID, &locks.RWValue[types.BlockSeal]{}) } // ResumeFromLastSealedBlock prepares the chains db to resume recording events after a restart. // It rewinds the database to the last block that is guaranteed to have been fully recorded to the database, // to ensure it can resume recording from the first log of the next block. func (db *ChainsDB) ResumeFromLastSealedBlock() error { - db.mu.RLock() - defer db.mu.RUnlock() - - for chain, logStore := range db.logDBs { + var result error + db.logDBs.Range(func(chain types.ChainID, logStore LogStorage) bool { headNum, ok := logStore.LatestSealedBlockNum() if !ok { // db must be empty, nothing to rewind to db.logger.Info("Resuming, but found no DB contents", "chain", chain) - continue + return true } db.logger.Info("Resuming, starting from last sealed block", "head", headNum) if err := logStore.Rewind(headNum); err != nil { - return fmt.Errorf("failed to rewind chain %s to sealed block %d", chain, headNum) + result = fmt.Errorf("failed to rewind chain %s to sealed block %d", chain, headNum) + return false } - } - return nil + return true + }) + return result } func (db *ChainsDB) DependencySet() depset.DependencySet { @@ -184,14 +163,12 @@ func (db *ChainsDB) DependencySet() depset.DependencySet { } func (db *ChainsDB) Close() error { - db.mu.Lock() - defer db.mu.Unlock() - var combined error - for id, logDB := range db.logDBs { + db.logDBs.Range(func(id types.ChainID, logDB LogStorage) bool { if err := logDB.Close(); err != nil { combined = errors.Join(combined, fmt.Errorf("failed to close log db for chain %v: %w", id, err)) } - } + return true + }) return combined } diff --git a/op-supervisor/supervisor/backend/db/fromda/update_test.go b/op-supervisor/supervisor/backend/db/fromda/update_test.go index 41be48ae0815..10b6b3bdd053 100644 --- a/op-supervisor/supervisor/backend/db/fromda/update_test.go +++ b/op-supervisor/supervisor/backend/db/fromda/update_test.go @@ -64,7 +64,7 @@ func TestBadUpdates(t *testing.T) { assertFn: noChange, }, { - name: "DerivedFrom with conflicting parent root, same L1 height, new L2: accepted, L1 parent-hash is used only on L1 increments.", + name: "CrossDerivedFrom with conflicting parent root, same L1 height, new L2: accepted, L1 parent-hash is used only on L1 increments.", setupFn: func(t *testing.T, db *DB, m *stubMetrics) { require.NoError(t, db.AddDerived(toRef(dDerivedFrom, common.Hash{0x42}), toRef(eDerived, dDerived.Hash)), types.ErrConflict) }, diff --git a/op-supervisor/supervisor/backend/db/query.go b/op-supervisor/supervisor/backend/db/query.go index 01e5ef0171c6..867e4e71955b 100644 --- a/op-supervisor/supervisor/backend/db/query.go +++ b/op-supervisor/supervisor/backend/db/query.go @@ -12,10 +12,7 @@ import ( ) func (db *ChainsDB) FindSealedBlock(chain types.ChainID, number uint64) (seal types.BlockSeal, err error) { - db.mu.RLock() - defer db.mu.RUnlock() - - logDB, ok := db.logDBs[chain] + logDB, ok := db.logDBs.Get(chain) if !ok { return types.BlockSeal{}, fmt.Errorf("%w: %v", types.ErrUnknownChain, chain) } @@ -26,10 +23,7 @@ func (db *ChainsDB) FindSealedBlock(chain types.ChainID, number uint64) (seal ty // for the given chain. It does not contain safety guarantees. // The block number might not be available (empty database, or non-existent chain). func (db *ChainsDB) LatestBlockNum(chain types.ChainID) (num uint64, ok bool) { - db.mu.RLock() - defer db.mu.RUnlock() - - logDB, knownChain := db.logDBs[chain] + logDB, knownChain := db.logDBs.Get(chain) if !knownChain { return 0, false } @@ -37,16 +31,15 @@ func (db *ChainsDB) LatestBlockNum(chain types.ChainID) (num uint64, ok bool) { } func (db *ChainsDB) IsCrossUnsafe(chainID types.ChainID, block eth.BlockID) error { - db.mu.RLock() - defer db.mu.RUnlock() - v, ok := db.crossUnsafe[chainID] + v, ok := db.crossUnsafe.Get(chainID) if !ok { return types.ErrUnknownChain } - if v == (types.BlockSeal{}) { + crossUnsafe := v.Get() + if crossUnsafe == (types.BlockSeal{}) { return types.ErrFuture } - if block.Number > v.Number { + if block.Number > crossUnsafe.Number { return types.ErrFuture } // TODO(#11693): make cross-unsafe reorg safe @@ -54,9 +47,7 @@ func (db *ChainsDB) IsCrossUnsafe(chainID types.ChainID, block eth.BlockID) erro } func (db *ChainsDB) ParentBlock(chainID types.ChainID, parentOf eth.BlockID) (parent eth.BlockID, err error) { - db.mu.RLock() - defer db.mu.RUnlock() - logDB, ok := db.logDBs[chainID] + logDB, ok := db.logDBs.Get(chainID) if !ok { return eth.BlockID{}, types.ErrUnknownChain } @@ -72,9 +63,7 @@ func (db *ChainsDB) ParentBlock(chainID types.ChainID, parentOf eth.BlockID) (pa } func (db *ChainsDB) IsLocalUnsafe(chainID types.ChainID, block eth.BlockID) error { - db.mu.RLock() - defer db.mu.RUnlock() - logDB, ok := db.logDBs[chainID] + logDB, ok := db.logDBs.Get(chainID) if !ok { return types.ErrUnknownChain } @@ -89,10 +78,7 @@ func (db *ChainsDB) IsLocalUnsafe(chainID types.ChainID, block eth.BlockID) erro } func (db *ChainsDB) LocalUnsafe(chainID types.ChainID) (types.BlockSeal, error) { - db.mu.RLock() - defer db.mu.RUnlock() - - eventsDB, ok := db.logDBs[chainID] + eventsDB, ok := db.logDBs.Get(chainID) if !ok { return types.BlockSeal{}, types.ErrUnknownChain } @@ -104,29 +90,24 @@ func (db *ChainsDB) LocalUnsafe(chainID types.ChainID) (types.BlockSeal, error) } func (db *ChainsDB) CrossUnsafe(chainID types.ChainID) (types.BlockSeal, error) { - db.mu.RLock() - defer db.mu.RUnlock() - - result, ok := db.crossUnsafe[chainID] + result, ok := db.crossUnsafe.Get(chainID) if !ok { return types.BlockSeal{}, types.ErrUnknownChain } + crossUnsafe := result.Get() // Fall back to cross-safe if cross-unsafe is not known yet - if result == (types.BlockSeal{}) { + if crossUnsafe == (types.BlockSeal{}) { _, crossSafe, err := db.CrossSafe(chainID) if err != nil { return types.BlockSeal{}, fmt.Errorf("no cross-unsafe known for chain %s, and failed to fall back to cross-safe value: %w", chainID, err) } return crossSafe, nil } - return result, nil + return crossUnsafe, nil } func (db *ChainsDB) LocalSafe(chainID types.ChainID) (derivedFrom types.BlockSeal, derived types.BlockSeal, err error) { - db.mu.RLock() - defer db.mu.RUnlock() - - localDB, ok := db.localDBs[chainID] + localDB, ok := db.localDBs.Get(chainID) if !ok { return types.BlockSeal{}, types.BlockSeal{}, types.ErrUnknownChain } @@ -134,10 +115,7 @@ func (db *ChainsDB) LocalSafe(chainID types.ChainID) (derivedFrom types.BlockSea } func (db *ChainsDB) CrossSafe(chainID types.ChainID) (derivedFrom types.BlockSeal, derived types.BlockSeal, err error) { - db.mu.RLock() - defer db.mu.RUnlock() - - crossDB, ok := db.crossDBs[chainID] + crossDB, ok := db.crossDBs.Get(chainID) if !ok { return types.BlockSeal{}, types.BlockSeal{}, types.ErrUnknownChain } @@ -145,10 +123,7 @@ func (db *ChainsDB) CrossSafe(chainID types.ChainID) (derivedFrom types.BlockSea } func (db *ChainsDB) Finalized(chainID types.ChainID) (types.BlockSeal, error) { - db.mu.RLock() - defer db.mu.RUnlock() - - finalizedL1 := db.finalizedL1 + finalizedL1 := db.finalizedL1.Get() if finalizedL1 == (eth.L1BlockRef{}) { return types.BlockSeal{}, errors.New("no finalized L1 signal, cannot determine L2 finality yet") } @@ -160,26 +135,25 @@ func (db *ChainsDB) Finalized(chainID types.ChainID) (types.BlockSeal, error) { } func (db *ChainsDB) LastDerivedFrom(chainID types.ChainID, derivedFrom eth.BlockID) (derived types.BlockSeal, err error) { - crossDB, ok := db.crossDBs[chainID] + crossDB, ok := db.crossDBs.Get(chainID) if !ok { return types.BlockSeal{}, types.ErrUnknownChain } return crossDB.LastDerivedAt(derivedFrom) } -func (db *ChainsDB) DerivedFrom(chainID types.ChainID, derived eth.BlockID) (derivedFrom eth.BlockRef, err error) { - db.mu.RLock() - defer db.mu.RUnlock() - - localDB, ok := db.localDBs[chainID] +// CrossDerivedFromBlockRef returns the block that the given block was derived from, if it exists in the cross derived-from storage. +// This includes the parent-block lookup. Use CrossDerivedFrom if no parent-block info is needed. +func (db *ChainsDB) CrossDerivedFromBlockRef(chainID types.ChainID, derived eth.BlockID) (derivedFrom eth.BlockRef, err error) { + xdb, ok := db.crossDBs.Get(chainID) if !ok { return eth.BlockRef{}, types.ErrUnknownChain } - res, err := localDB.DerivedFrom(derived) + res, err := xdb.DerivedFrom(derived) if err != nil { return eth.BlockRef{}, err } - parent, err := localDB.PreviousDerivedFrom(res.ID()) + parent, err := xdb.PreviousDerivedFrom(res.ID()) if err != nil { return eth.BlockRef{}, err } @@ -189,10 +163,7 @@ func (db *ChainsDB) DerivedFrom(chainID types.ChainID, derived eth.BlockID) (der // Check calls the underlying logDB to determine if the given log entry exists at the given location. // If the block-seal of the block that includes the log is known, it is returned. It is fully zeroed otherwise, if the block is in-progress. func (db *ChainsDB) Check(chain types.ChainID, blockNum uint64, logIdx uint32, logHash common.Hash) (includedIn types.BlockSeal, err error) { - db.mu.RLock() - defer db.mu.RUnlock() - - logDB, ok := db.logDBs[chain] + logDB, ok := db.logDBs.Get(chain) if !ok { return types.BlockSeal{}, fmt.Errorf("%w: %v", types.ErrUnknownChain, chain) } @@ -202,10 +173,7 @@ func (db *ChainsDB) Check(chain types.ChainID, blockNum uint64, logIdx uint32, l // OpenBlock returns the Executing Messages for the block at the given number on the given chain. // it routes the request to the appropriate logDB. func (db *ChainsDB) OpenBlock(chainID types.ChainID, blockNum uint64) (seal eth.BlockRef, logCount uint32, execMsgs map[uint32]*types.ExecutingMessage, err error) { - db.mu.RLock() - defer db.mu.RUnlock() - - logDB, ok := db.logDBs[chainID] + logDB, ok := db.logDBs.Get(chainID) if !ok { return eth.BlockRef{}, 0, nil, types.ErrUnknownChain } @@ -215,10 +183,7 @@ func (db *ChainsDB) OpenBlock(chainID types.ChainID, blockNum uint64) (seal eth. // LocalDerivedFrom returns the block that the given block was derived from, if it exists in the local derived-from storage. // it routes the request to the appropriate localDB. func (db *ChainsDB) LocalDerivedFrom(chain types.ChainID, derived eth.BlockID) (derivedFrom types.BlockSeal, err error) { - db.mu.RLock() - defer db.mu.RUnlock() - - lDB, ok := db.localDBs[chain] + lDB, ok := db.localDBs.Get(chain) if !ok { return types.BlockSeal{}, types.ErrUnknownChain } @@ -228,10 +193,7 @@ func (db *ChainsDB) LocalDerivedFrom(chain types.ChainID, derived eth.BlockID) ( // CrossDerivedFrom returns the block that the given block was derived from, if it exists in the cross derived-from storage. // it routes the request to the appropriate crossDB. func (db *ChainsDB) CrossDerivedFrom(chain types.ChainID, derived eth.BlockID) (derivedFrom types.BlockSeal, err error) { - db.mu.RLock() - defer db.mu.RUnlock() - - xDB, ok := db.crossDBs[chain] + xDB, ok := db.crossDBs.Get(chain) if !ok { return types.BlockSeal{}, types.ErrUnknownChain } @@ -247,15 +209,12 @@ func (db *ChainsDB) CrossDerivedFrom(chain types.ChainID, derived eth.BlockID) ( // Or ErrOutOfScope, with non-zero derivedFromScope, // if additional L1 data is needed to cross-verify the candidate L2 block. func (db *ChainsDB) CandidateCrossSafe(chain types.ChainID) (derivedFromScope, crossSafe eth.BlockRef, err error) { - db.mu.RLock() - defer db.mu.RUnlock() - - xDB, ok := db.crossDBs[chain] + xDB, ok := db.crossDBs.Get(chain) if !ok { return eth.BlockRef{}, eth.BlockRef{}, types.ErrUnknownChain } - lDB, ok := db.localDBs[chain] + lDB, ok := db.localDBs.Get(chain) if !ok { return eth.BlockRef{}, eth.BlockRef{}, types.ErrUnknownChain } @@ -323,9 +282,7 @@ func (db *ChainsDB) CandidateCrossSafe(chain types.ChainID) (derivedFromScope, c } func (db *ChainsDB) PreviousDerived(chain types.ChainID, derived eth.BlockID) (prevDerived types.BlockSeal, err error) { - db.mu.RLock() - defer db.mu.RUnlock() - lDB, ok := db.localDBs[chain] + lDB, ok := db.localDBs.Get(chain) if !ok { return types.BlockSeal{}, types.ErrUnknownChain } @@ -333,9 +290,7 @@ func (db *ChainsDB) PreviousDerived(chain types.ChainID, derived eth.BlockID) (p } func (db *ChainsDB) PreviousDerivedFrom(chain types.ChainID, derivedFrom eth.BlockID) (prevDerivedFrom types.BlockSeal, err error) { - db.mu.RLock() - defer db.mu.RUnlock() - lDB, ok := db.localDBs[chain] + lDB, ok := db.localDBs.Get(chain) if !ok { return types.BlockSeal{}, types.ErrUnknownChain } @@ -343,9 +298,7 @@ func (db *ChainsDB) PreviousDerivedFrom(chain types.ChainID, derivedFrom eth.Blo } func (db *ChainsDB) NextDerivedFrom(chain types.ChainID, derivedFrom eth.BlockID) (after eth.BlockRef, err error) { - db.mu.RLock() - defer db.mu.RUnlock() - lDB, ok := db.localDBs[chain] + lDB, ok := db.localDBs.Get(chain) if !ok { return eth.BlockRef{}, types.ErrUnknownChain } @@ -360,9 +313,6 @@ func (db *ChainsDB) NextDerivedFrom(chain types.ChainID, derivedFrom eth.BlockID // it assumes the log entry has already been checked and is valid, this function only checks safety levels. // Safety levels are assumed to graduate from LocalUnsafe to LocalSafe to CrossUnsafe to CrossSafe, with Finalized as the strongest. func (db *ChainsDB) Safest(chainID types.ChainID, blockNum uint64, index uint32) (safest types.SafetyLevel, err error) { - db.mu.RLock() - defer db.mu.RUnlock() - if finalized, err := db.Finalized(chainID); err == nil { if finalized.Number >= blockNum { return types.Finalized, nil @@ -395,7 +345,7 @@ func (db *ChainsDB) Safest(chainID types.ChainID, blockNum uint64, index uint32) } func (db *ChainsDB) IteratorStartingAt(chain types.ChainID, sealedNum uint64, logIndex uint32) (logs.Iterator, error) { - logDB, ok := db.logDBs[chain] + logDB, ok := db.logDBs.Get(chain) if !ok { return nil, fmt.Errorf("%w: %v", types.ErrUnknownChain, chain) } diff --git a/op-supervisor/supervisor/backend/db/update.go b/op-supervisor/supervisor/backend/db/update.go index e45fd3f469a5..932b19875575 100644 --- a/op-supervisor/supervisor/backend/db/update.go +++ b/op-supervisor/supervisor/backend/db/update.go @@ -15,10 +15,7 @@ func (db *ChainsDB) AddLog( parentBlock eth.BlockID, logIdx uint32, execMsg *types.ExecutingMessage) error { - db.mu.RLock() - defer db.mu.RUnlock() - - logDB, ok := db.logDBs[chain] + logDB, ok := db.logDBs.Get(chain) if !ok { return fmt.Errorf("cannot AddLog: %w: %v", types.ErrUnknownChain, chain) } @@ -26,10 +23,7 @@ func (db *ChainsDB) AddLog( } func (db *ChainsDB) SealBlock(chain types.ChainID, block eth.BlockRef) error { - db.mu.RLock() - defer db.mu.RUnlock() - - logDB, ok := db.logDBs[chain] + logDB, ok := db.logDBs.Get(chain) if !ok { return fmt.Errorf("cannot SealBlock: %w: %v", types.ErrUnknownChain, chain) } @@ -42,10 +36,7 @@ func (db *ChainsDB) SealBlock(chain types.ChainID, block eth.BlockRef) error { } func (db *ChainsDB) Rewind(chain types.ChainID, headBlockNum uint64) error { - db.mu.RLock() - defer db.mu.RUnlock() - - logDB, ok := db.logDBs[chain] + logDB, ok := db.logDBs.Get(chain) if !ok { return fmt.Errorf("cannot Rewind: %w: %s", types.ErrUnknownChain, chain) } @@ -53,10 +44,7 @@ func (db *ChainsDB) Rewind(chain types.ChainID, headBlockNum uint64) error { } func (db *ChainsDB) UpdateLocalSafe(chain types.ChainID, derivedFrom eth.BlockRef, lastDerived eth.BlockRef) error { - db.mu.RLock() - defer db.mu.RUnlock() - - localDB, ok := db.localDBs[chain] + localDB, ok := db.localDBs.Get(chain) if !ok { return fmt.Errorf("cannot UpdateLocalSafe: %w: %v", types.ErrUnknownChain, chain) } @@ -65,22 +53,17 @@ func (db *ChainsDB) UpdateLocalSafe(chain types.ChainID, derivedFrom eth.BlockRe } func (db *ChainsDB) UpdateCrossUnsafe(chain types.ChainID, crossUnsafe types.BlockSeal) error { - db.mu.RLock() - defer db.mu.RUnlock() - - if _, ok := db.crossUnsafe[chain]; !ok { + v, ok := db.crossUnsafe.Get(chain) + if !ok { return fmt.Errorf("cannot UpdateCrossUnsafe: %w: %s", types.ErrUnknownChain, chain) } db.logger.Debug("Updating cross unsafe", "chain", chain, "crossUnsafe", crossUnsafe) - db.crossUnsafe[chain] = crossUnsafe + v.Set(crossUnsafe) return nil } func (db *ChainsDB) UpdateCrossSafe(chain types.ChainID, l1View eth.BlockRef, lastCrossDerived eth.BlockRef) error { - db.mu.RLock() - defer db.mu.RUnlock() - - crossDB, ok := db.crossDBs[chain] + crossDB, ok := db.crossDBs.Get(chain) if !ok { return fmt.Errorf("cannot UpdateCrossSafe: %w: %s", types.ErrUnknownChain, chain) } @@ -89,13 +72,14 @@ func (db *ChainsDB) UpdateCrossSafe(chain types.ChainID, l1View eth.BlockRef, la } func (db *ChainsDB) UpdateFinalizedL1(finalized eth.BlockRef) error { - db.mu.RLock() - defer db.mu.RUnlock() + // Lock, so we avoid race-conditions in-between getting (for comparison) and setting. + db.finalizedL1.Lock() + defer db.finalizedL1.Unlock() - if db.finalizedL1.Number > finalized.Number { - return fmt.Errorf("cannot rewind finalized L1 head from %s to %s", db.finalizedL1, finalized) + if v := db.finalizedL1.Value; v.Number > finalized.Number { + return fmt.Errorf("cannot rewind finalized L1 head from %s to %s", v, finalized) } db.logger.Debug("Updating finalized L1", "finalizedL1", finalized) - db.finalizedL1 = finalized + db.finalizedL1.Value = finalized return nil } diff --git a/op-supervisor/supervisor/backend/mock.go b/op-supervisor/supervisor/backend/mock.go index c07e74a40013..b40c5209d5ef 100644 --- a/op-supervisor/supervisor/backend/mock.go +++ b/op-supervisor/supervisor/backend/mock.go @@ -62,7 +62,7 @@ func (m *MockBackend) Finalized(ctx context.Context, chainID types.ChainID) (eth return eth.BlockID{}, nil } -func (m *MockBackend) DerivedFrom(ctx context.Context, chainID types.ChainID, derived eth.BlockID) (derivedFrom eth.BlockRef, err error) { +func (m *MockBackend) CrossDerivedFrom(ctx context.Context, chainID types.ChainID, derived eth.BlockID) (derivedFrom eth.BlockRef, err error) { return eth.BlockRef{}, nil } diff --git a/op-supervisor/supervisor/frontend/frontend.go b/op-supervisor/supervisor/frontend/frontend.go index 26e751200a5a..0a5b70a4799a 100644 --- a/op-supervisor/supervisor/frontend/frontend.go +++ b/op-supervisor/supervisor/frontend/frontend.go @@ -17,7 +17,7 @@ type AdminBackend interface { type QueryBackend interface { CheckMessage(identifier types.Identifier, payloadHash common.Hash) (types.SafetyLevel, error) CheckMessages(messages []types.Message, minSafety types.SafetyLevel) error - DerivedFrom(ctx context.Context, chainID types.ChainID, derived eth.BlockID) (derivedFrom eth.BlockRef, err error) + CrossDerivedFrom(ctx context.Context, chainID types.ChainID, derived eth.BlockID) (derivedFrom eth.BlockRef, err error) UnsafeView(ctx context.Context, chainID types.ChainID, unsafe types.ReferenceView) (types.ReferenceView, error) SafeView(ctx context.Context, chainID types.ChainID, safe types.ReferenceView) (types.ReferenceView, error) Finalized(ctx context.Context, chainID types.ChainID) (eth.BlockID, error) @@ -67,8 +67,8 @@ func (q *QueryFrontend) Finalized(ctx context.Context, chainID types.ChainID) (e return q.Supervisor.Finalized(ctx, chainID) } -func (q *QueryFrontend) DerivedFrom(ctx context.Context, chainID types.ChainID, derived eth.BlockID) (derivedFrom eth.BlockRef, err error) { - return q.Supervisor.DerivedFrom(ctx, chainID, derived) +func (q *QueryFrontend) CrossDerivedFrom(ctx context.Context, chainID types.ChainID, derived eth.BlockID) (derivedFrom eth.BlockRef, err error) { + return q.Supervisor.CrossDerivedFrom(ctx, chainID, derived) } type AdminFrontend struct { diff --git a/op-supervisor/supervisor/service.go b/op-supervisor/supervisor/service.go index 469a80439e34..8bccaf1eb82f 100644 --- a/op-supervisor/supervisor/service.go +++ b/op-supervisor/supervisor/service.go @@ -72,6 +72,17 @@ func (su *SupervisorService) initBackend(ctx context.Context, cfg *config.Config su.backend = backend.NewMockBackend() return nil } + // the flag is a string slice, which has the potential to have empty strings + filterBlank := func(in []string) []string { + out := make([]string, 0, len(in)) + for _, s := range in { + if s != "" { + out = append(out, s) + } + } + return out + } + cfg.L2RPCs = filterBlank(cfg.L2RPCs) be, err := backend.NewSupervisorBackend(ctx, su.log, su.metrics, cfg) if err != nil { return fmt.Errorf("failed to create supervisor backend: %w", err) diff --git a/ops-bedrock/l2-op-geth-interop.Dockerfile b/ops-bedrock/l2-op-geth-interop.Dockerfile new file mode 100644 index 000000000000..41a667c0fc29 --- /dev/null +++ b/ops-bedrock/l2-op-geth-interop.Dockerfile @@ -0,0 +1,10 @@ +FROM us-docker.pkg.dev/oplabs-tools-artifacts/images/op-geth:v1.101411.1-rc.3 +# Note: depend on dev-release for sequencer interop message checks + +RUN apk add --no-cache jq + +COPY l2-op-geth-entrypoint.sh /entrypoint.sh + +VOLUME ["/db"] + +ENTRYPOINT ["/bin/sh", "/entrypoint.sh"] diff --git a/ops/docker/op-stack-go/Dockerfile.dockerignore b/ops/docker/op-stack-go/Dockerfile.dockerignore index 1c0841df1f33..e5eceaea73b8 100644 --- a/ops/docker/op-stack-go/Dockerfile.dockerignore +++ b/ops/docker/op-stack-go/Dockerfile.dockerignore @@ -6,6 +6,7 @@ !/op-batcher !/op-bootnode !/op-chain-ops +!/op-deployer !/op-challenger !/packages/contracts-bedrock/snapshots !/op-dispute-mon