Skip to content

Commit

Permalink
op-node: Implement fjord RLP & Channel Bank Size limit Increases (#10357
Browse files Browse the repository at this point in the history
)

* op-node: Increase MaxChannelBankSize with Fjord

This also creates a ChainSpec object which is responsible for returning protocol
parameters. We use a different object than the rollup.Config because the config
is primarily a disk representation & does not concern itself with protocol constants.

* op-node: Increase MaxRLPBytesPerChannel with Fjord
  • Loading branch information
trianglesphere authored May 7, 2024
1 parent 1812f16 commit 4386680
Show file tree
Hide file tree
Showing 17 changed files with 237 additions and 65 deletions.
2 changes: 1 addition & 1 deletion op-batcher/batcher/channel_builder_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -485,7 +485,7 @@ func TestChannelBuilder_OutputFrames_SpanBatch(t *testing.T) {
func ChannelBuilder_MaxRLPBytesPerChannel(t *testing.T, batchType uint) {
t.Parallel()
channelConfig := defaultTestChannelConfig()
channelConfig.MaxFrameSize = derive.MaxRLPBytesPerChannel * 2
channelConfig.MaxFrameSize = rollup.SafeMaxRLPBytesPerChannel * 2
channelConfig.InitNoneCompressor()
channelConfig.BatchType = batchType

Expand Down
4 changes: 2 additions & 2 deletions op-e2e/actions/garbage_channel_out.go
Original file line number Diff line number Diff line change
Expand Up @@ -159,9 +159,9 @@ func (co *GarbageChannelOut) AddBlock(rollupCfg *rollup.Config, block *types.Blo
buf.Reset()
buf.Write(bufBytes)
}
if co.rlpLength+buf.Len() > derive.MaxRLPBytesPerChannel {
if co.rlpLength+buf.Len() > rollup.SafeMaxRLPBytesPerChannel {
return fmt.Errorf("could not add %d bytes to channel of %d bytes, max is %d. err: %w",
buf.Len(), co.rlpLength, derive.MaxRLPBytesPerChannel, derive.ErrTooManyRLPBytes)
buf.Len(), co.rlpLength, rollup.SafeMaxRLPBytesPerChannel, derive.ErrTooManyRLPBytes)
}
co.rlpLength += buf.Len()

Expand Down
4 changes: 2 additions & 2 deletions op-node/benchmarks/batchbuilding_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ func (t BatchingBenchmarkTC) String() string {
// Every Compressor in the compressor map is benchmarked for each test case
// The results of the Benchmark measure *only* the time to add the final batch to the channel out,
// not the time to send all the batches through the channel out
// Hint: Raise the derive.MaxRLPBytesPerChannel to 10_000_000_000 to avoid hitting limits if adding larger test cases
// Hint: Raise the rollup.MaxRLPBytesPerChannel to 10_000_000_000 to avoid hitting limits if adding larger test cases
func BenchmarkFinalBatchChannelOut(b *testing.B) {
// Targets define the number of batches and transactions per batch to test
type target struct{ bs, tpb int }
Expand Down Expand Up @@ -203,7 +203,7 @@ func BenchmarkIncremental(b *testing.B) {
// Every Compressor in the compressor map is benchmarked for each test case
// The results of the Benchmark measure the time to add the *all batches* to the channel out,
// not the time to send all the batches through the channel out
// Hint: Raise the derive.MaxRLPBytesPerChannel to 10_000_000_000 to avoid hitting limits
// Hint: Raise the rollup.MaxRLPBytesPerChannel to 10_000_000_000 to avoid hitting limits
func BenchmarkAllBatchesChannelOut(b *testing.B) {
// Targets define the number of batches and transactions per batch to test
type target struct{ bs, tpb int }
Expand Down
2 changes: 1 addition & 1 deletion op-node/cmd/batch_decoder/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -161,7 +161,7 @@ func main() {
L2GenesisTime: L2GenesisTime,
L2BlockTime: L2BlockTime,
}
reassemble.Channels(config)
reassemble.Channels(config, rollupCfg)
return nil
},
},
Expand Down
10 changes: 6 additions & 4 deletions op-node/cmd/batch_decoder/reassemble/reassemble.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ import (
"sort"

"github.com/ethereum-optimism/optimism/op-node/cmd/batch_decoder/fetch"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum/go-ethereum/common"
Expand Down Expand Up @@ -61,7 +62,7 @@ func LoadFrames(directory string, inbox common.Address) []FrameWithMetadata {
// Channels loads all transactions from the given input directory that are submitted to the
// specified batch inbox and then re-assembles all channels & writes the re-assembled channels
// to the out directory.
func Channels(config Config) {
func Channels(config Config, rollupCfg *rollup.Config) {
if err := os.MkdirAll(config.OutDirectory, 0750); err != nil {
log.Fatal(err)
}
Expand All @@ -71,7 +72,7 @@ func Channels(config Config) {
framesByChannel[frame.Frame.ID] = append(framesByChannel[frame.Frame.ID], frame)
}
for id, frames := range framesByChannel {
ch := processFrames(config, id, frames)
ch := processFrames(config, rollupCfg, id, frames)
filename := path.Join(config.OutDirectory, fmt.Sprintf("%s.json", id.String()))
if err := writeChannel(ch, filename); err != nil {
log.Fatal(err)
Expand All @@ -89,7 +90,8 @@ func writeChannel(ch ChannelWithMetadata, filename string) error {
return enc.Encode(ch)
}

func processFrames(cfg Config, id derive.ChannelID, frames []FrameWithMetadata) ChannelWithMetadata {
func processFrames(cfg Config, rollupCfg *rollup.Config, id derive.ChannelID, frames []FrameWithMetadata) ChannelWithMetadata {
spec := rollup.NewChainSpec(rollupCfg)
ch := derive.NewChannel(id, eth.L1BlockRef{Number: frames[0].InclusionBlock})
invalidFrame := false

Expand All @@ -109,7 +111,7 @@ func processFrames(cfg Config, id derive.ChannelID, frames []FrameWithMetadata)
var batchTypes []int
invalidBatches := false
if ch.IsReady() {
br, err := derive.BatchReader(ch.Reader())
br, err := derive.BatchReader(ch.Reader(), spec.MaxRLPBytesPerChannel(ch.HighestBlock().Time))
if err == nil {
for batchData, err := br(); err != io.EOF; batchData, err = br() {
if err != nil {
Expand Down
57 changes: 57 additions & 0 deletions op-node/rollup/chain_spec.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@
package rollup

// maxChannelBankSize is the amount of memory space, in number of bytes,
// till the bank is pruned by removing channels, starting with the oldest channel.
// It's value is changed with the Fjord network upgrade.
const (
maxChannelBankSizeBedrock = 100_000_000
maxChannelBankSizeFjord = 1_000_000_000
)

// MaxRLPBytesPerChannel is the maximum amount of bytes that will be read from
// a channel. This limit is set when decoding the RLP.
const (
maxRLPBytesPerChannelBedrock = 10_000_000
maxRLPBytesPerChannelFjord = 100_000_000
)

// SafeMaxRLPBytesPerChannel is a limit of RLP Bytes per channel that is valid across every OP Stack chain.
// The limit on certain chains at certain times may be higher
// TODO(#10428) Remove this parameter
const SafeMaxRLPBytesPerChannel = maxRLPBytesPerChannelBedrock

type ChainSpec struct {
config *Config
}

func NewChainSpec(config *Config) *ChainSpec {
return &ChainSpec{config}
}

// IsCanyon returns true if t >= canyon_time
func (s *ChainSpec) IsCanyon(t uint64) bool {
return s.config.IsCanyon(t)
}

// MaxChannelBankSize returns the maximum number of bytes the can allocated inside the channel bank
// before pruning occurs at the given timestamp.
func (s *ChainSpec) MaxChannelBankSize(t uint64) uint64 {
if s.config.IsFjord(t) {
return maxChannelBankSizeFjord
}
return maxChannelBankSizeBedrock
}

// ChannelTimeout returns the channel timeout constant.
func (s *ChainSpec) ChannelTimeout() uint64 {
return s.config.ChannelTimeout
}

// MaxRLPBytesPerChannel returns the maximum amount of bytes that will be read from
// a channel at a given timestamp.
func (s *ChainSpec) MaxRLPBytesPerChannel(t uint64) uint64 {
if s.config.IsFjord(t) {
return maxRLPBytesPerChannelFjord
}
return maxRLPBytesPerChannelBedrock
}
121 changes: 121 additions & 0 deletions op-node/rollup/chain_spec_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,121 @@
package rollup

import (
"math/big"
"testing"

"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum/go-ethereum/common"
"github.com/stretchr/testify/require"
)

func u64ptr(n uint64) *uint64 {
return &n
}

var testConfig = Config{
Genesis: Genesis{
L1: eth.BlockID{
Hash: common.HexToHash("0x438335a20d98863a4c0c97999eb2481921ccd28553eac6f913af7c12aec04108"),
Number: 17422590,
},
L2: eth.BlockID{
Hash: common.HexToHash("0xdbf6a80fef073de06add9b0d14026d6e5a86c85f6d102c36d3d8e9cf89c2afd3"),
Number: 105235063,
},
L2Time: 0,
SystemConfig: eth.SystemConfig{
BatcherAddr: common.HexToAddress("0x6887246668a3b87f54deb3b94ba47a6f63f32985"),
Overhead: eth.Bytes32(common.HexToHash("0x00000000000000000000000000000000000000000000000000000000000000bc")),
Scalar: eth.Bytes32(common.HexToHash("0x00000000000000000000000000000000000000000000000000000000000a6fe0")),
GasLimit: 30_000_000,
},
},
BlockTime: 2,
MaxSequencerDrift: 600,
SeqWindowSize: 3600,
ChannelTimeout: 300,
L1ChainID: big.NewInt(1),
L2ChainID: big.NewInt(10),
RegolithTime: u64ptr(10),
CanyonTime: u64ptr(20),
DeltaTime: u64ptr(30),
EcotoneTime: u64ptr(40),
FjordTime: u64ptr(50),
InteropTime: nil,
BatchInboxAddress: common.HexToAddress("0xff00000000000000000000000000000000000010"),
DepositContractAddress: common.HexToAddress("0xbEb5Fc579115071764c7423A4f12eDde41f106Ed"),
L1SystemConfigAddress: common.HexToAddress("0x229047fed2591dbec1eF1118d64F7aF3dB9EB290"),
ProtocolVersionsAddress: common.HexToAddress("0x8062AbC286f5e7D9428a0Ccb9AbD71e50d93b935"),
UsePlasma: false,
}

func TestCanyonForkActivation(t *testing.T) {
c := NewChainSpec(&testConfig)
tests := []struct {
name string
blockNum uint64
isCanyon bool
}{
{"Genesis", 0, false},
{"CanyonTimeMinusOne", 19, false},
{"CanyonTime", 20, true},
{"CanyonTimePlusOne", 21, true},
{"DeltaTime", 30, true},
{"EcotoneTime", 40, true},
{"FjordTime", 50, true},
}

for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := c.IsCanyon(tt.blockNum)
require.Equal(t, tt.isCanyon, result, "Block number %d should be Canyon", tt.blockNum)
})
}
}

func TestMaxChannelBankSize(t *testing.T) {
c := NewChainSpec(&testConfig)
tests := []struct {
name string
blockNum uint64
expected uint64
description string
}{
{"Genesis", 0, uint64(maxChannelBankSizeBedrock), "Before Fjord activation, should use Bedrock size"},
{"FjordTimeMinusOne", 49, uint64(maxChannelBankSizeBedrock), "Just before Fjord, should still use Bedrock size"},
{"FjordTime", 50, uint64(maxChannelBankSizeFjord), "At Fjord activation, should switch to Fjord size"},
{"FjordTimePlusOne", 51, uint64(maxChannelBankSizeFjord), "After Fjord activation, should use Fjord size"},
{"NextForkTime", 60, uint64(maxChannelBankSizeFjord), "Well after Fjord, should continue to use Fjord size"},
}

for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := c.MaxChannelBankSize(tt.blockNum)
require.Equal(t, tt.expected, result, tt.description)
})
}
}

func TestMaxRLPBytesPerChannel(t *testing.T) {
c := NewChainSpec(&testConfig)
tests := []struct {
name string
blockNum uint64
expected uint64
description string
}{
{"Genesis", 0, uint64(maxRLPBytesPerChannelBedrock), "Before Fjord activation, should use Bedrock RLP bytes limit"},
{"FjordTimeMinusOne", 49, uint64(maxRLPBytesPerChannelBedrock), "Just before Fjord, should still use Bedrock RLP bytes limit"},
{"FjordTime", 50, uint64(maxRLPBytesPerChannelFjord), "At Fjord activation, should switch to Fjord RLP bytes limit"},
{"FjordTimePlusOne", 51, uint64(maxRLPBytesPerChannelFjord), "After Fjord activation, should use Fjord RLP bytes limit"},
{"NextForkTime", 60, uint64(maxRLPBytesPerChannelFjord), "Well after Fjord, should continue to use Fjord RLP bytes limit"},
}

for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := c.MaxRLPBytesPerChannel(tt.blockNum)
require.Equal(t, tt.expected, result, tt.description)
})
}
}
9 changes: 7 additions & 2 deletions op-node/rollup/derive/channel.go
Original file line number Diff line number Diff line change
Expand Up @@ -100,6 +100,11 @@ func (ch *Channel) OpenBlockNumber() uint64 {
return ch.openBlock.Number
}

// HighestBlock returns the last L1 block which affect this channel
func (ch *Channel) HighestBlock() eth.L1BlockRef {
return ch.highestL1InclusionBlock
}

// Size returns the current size of the channel including frame overhead.
// Reading from the channel does not reduce the size as reading is done
// on uncompressed data while this size is over compressed data.
Expand Down Expand Up @@ -146,13 +151,13 @@ func (ch *Channel) Reader() io.Reader {
// The L1Inclusion block is also provided at creation time.
// Warning: the batch reader can read every batch-type.
// The caller of the batch-reader should filter the results.
func BatchReader(r io.Reader) (func() (*BatchData, error), error) {
func BatchReader(r io.Reader, maxRLPBytesPerChannel uint64) (func() (*BatchData, error), error) {
// Setup decompressor stage + RLP reader
zr, err := zlib.NewReader(r)
if err != nil {
return nil, err
}
rlpReader := rlp.NewStream(zr, MaxRLPBytesPerChannel)
rlpReader := rlp.NewStream(zr, maxRLPBytesPerChannel)
// Read each batch iteratively
return func() (*BatchData, error) {
var batchData BatchData
Expand Down
14 changes: 7 additions & 7 deletions op-node/rollup/derive/channel_bank.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ type NextFrameProvider interface {
// ChannelBank buffers channel frames, and emits full channel data
type ChannelBank struct {
log log.Logger
cfg *rollup.Config
spec *rollup.ChainSpec
metrics Metrics

channels map[ChannelID]*Channel // channels by ID
Expand All @@ -47,7 +47,7 @@ var _ ResettableStage = (*ChannelBank)(nil)
func NewChannelBank(log log.Logger, cfg *rollup.Config, prev NextFrameProvider, fetcher L1Fetcher, m Metrics) *ChannelBank {
return &ChannelBank{
log: log,
cfg: cfg,
spec: rollup.NewChainSpec(cfg),
metrics: m,
channels: make(map[ChannelID]*Channel),
channelQueue: make([]ChannelID, 0, 10),
Expand All @@ -67,7 +67,7 @@ func (cb *ChannelBank) prune() {
totalSize += ch.size
}
// prune until it is reasonable again. The high-priority channel failed to be read, so we start pruning there.
for totalSize > MaxChannelBankSize {
for totalSize > cb.spec.MaxChannelBankSize(cb.Origin().Time) {
id := cb.channelQueue[0]
ch := cb.channels[id]
cb.channelQueue = cb.channelQueue[1:]
Expand Down Expand Up @@ -98,7 +98,7 @@ func (cb *ChannelBank) IngestFrame(f Frame) {
}

// check if the channel is not timed out
if currentCh.OpenBlockNumber()+cb.cfg.ChannelTimeout < origin.Number {
if currentCh.OpenBlockNumber()+cb.spec.ChannelTimeout() < origin.Number {
log.Warn("channel is timed out, ignore frame")
return
}
Expand All @@ -125,7 +125,7 @@ func (cb *ChannelBank) Read() (data []byte, err error) {
// channels at the head of the queue and we want to remove them all.
first := cb.channelQueue[0]
ch := cb.channels[first]
timedOut := ch.OpenBlockNumber()+cb.cfg.ChannelTimeout < cb.Origin().Number
timedOut := ch.OpenBlockNumber()+cb.spec.ChannelTimeout() < cb.Origin().Number
if timedOut {
cb.log.Info("channel timed out", "channel", first, "frames", len(ch.inputs))
cb.metrics.RecordChannelTimedOut()
Expand All @@ -139,7 +139,7 @@ func (cb *ChannelBank) Read() (data []byte, err error) {
// Post-Canyon we read the entire channelQueue for the first ready channel. If no channel is
// available, we return `nil, io.EOF`.
// Canyon is activated when the first L1 block whose time >= CanyonTime, not on the L2 timestamp.
if !cb.cfg.IsCanyon(cb.Origin().Time) {
if !cb.spec.IsCanyon(cb.Origin().Time) {
return cb.tryReadChannelAtIndex(0)
}

Expand All @@ -157,7 +157,7 @@ func (cb *ChannelBank) Read() (data []byte, err error) {
func (cb *ChannelBank) tryReadChannelAtIndex(i int) (data []byte, err error) {
chanID := cb.channelQueue[i]
ch := cb.channels[chanID]
timedOut := ch.OpenBlockNumber()+cb.cfg.ChannelTimeout < cb.Origin().Number
timedOut := ch.OpenBlockNumber()+cb.spec.ChannelTimeout() < cb.Origin().Number
if timedOut || !ch.IsReady() {
return nil, io.EOF
}
Expand Down
Loading

0 comments on commit 4386680

Please sign in to comment.