Skip to content

Commit e30cb11

Browse files
committed
unify dummyda
1 parent 5c00612 commit e30cb11

File tree

4 files changed

+275
-270
lines changed

4 files changed

+275
-270
lines changed

node/helpers_test.go

Lines changed: 13 additions & 166 deletions
Original file line numberDiff line numberDiff line change
@@ -7,15 +7,14 @@ import (
77
"fmt"
88
"strings"
99
"sync"
10-
"sync/atomic"
1110
"testing"
1211
"time"
1312

1413
testutils "github.com/celestiaorg/utils/test"
1514
"github.com/evstack/ev-node/block"
1615
coreexecutor "github.com/evstack/ev-node/core/execution"
1716
coresequencer "github.com/evstack/ev-node/core/sequencer"
18-
datypes "github.com/evstack/ev-node/pkg/da/types"
17+
"github.com/evstack/ev-node/test/testda"
1918
"github.com/ipfs/go-datastore"
2019
dssync "github.com/ipfs/go-datastore/sync"
2120
"github.com/libp2p/go-libp2p/core/peer"
@@ -43,182 +42,30 @@ const (
4342
MockExecutorAddress = "127.0.0.1:40041"
4443
)
4544

46-
// createTestComponents creates test components for node initialization
47-
type dummyDAClient struct {
48-
backend *dummyDABackend
49-
}
50-
51-
type dummyDABackend struct {
52-
mu sync.Mutex
53-
height uint64
54-
maxBlobSz uint64
55-
blobs map[uint64]map[string][][]byte // height -> namespace -> blobs
56-
failSubmit atomic.Bool
57-
58-
tickerMu sync.Mutex
59-
tickerStop chan struct{}
60-
tickerRefs atomic.Int32
61-
}
62-
45+
// sharedDummyDA is a shared DummyDA instance for multi-node tests.
6346
var (
64-
sharedDABackend *dummyDABackend
65-
sharedDABackendOnce sync.Once
47+
sharedDummyDA *testda.DummyDA
48+
sharedDummyDAOnce sync.Once
6649
)
6750

68-
func getSharedDABackend(maxBlobSize uint64) *dummyDABackend {
69-
sharedDABackendOnce.Do(func() {
70-
sharedDABackend = &dummyDABackend{
71-
maxBlobSz: maxBlobSize,
72-
blobs: make(map[uint64]map[string][][]byte),
73-
}
51+
func getSharedDummyDA(maxBlobSize uint64) *testda.DummyDA {
52+
sharedDummyDAOnce.Do(func() {
53+
sharedDummyDA = testda.New(testda.WithMaxBlobSize(maxBlobSize))
7454
})
75-
76-
if maxBlobSize > 0 {
77-
sharedDABackend.mu.Lock()
78-
if sharedDABackend.maxBlobSz == 0 || maxBlobSize > sharedDABackend.maxBlobSz {
79-
sharedDABackend.maxBlobSz = maxBlobSize
80-
}
81-
sharedDABackend.mu.Unlock()
82-
}
83-
84-
return sharedDABackend
85-
}
86-
87-
func (b *dummyDABackend) reset() {
88-
b.mu.Lock()
89-
b.height = 0
90-
b.blobs = make(map[uint64]map[string][][]byte)
91-
b.failSubmit.Store(false)
92-
b.mu.Unlock()
93-
94-
b.tickerMu.Lock()
95-
if b.tickerStop != nil {
96-
close(b.tickerStop)
97-
b.tickerStop = nil
98-
}
99-
b.tickerRefs.Store(0)
100-
b.tickerMu.Unlock()
55+
return sharedDummyDA
10156
}
10257

10358
func resetSharedDummyDA() {
104-
if sharedDABackend != nil {
105-
sharedDABackend.reset()
59+
if sharedDummyDA != nil {
60+
sharedDummyDA.Reset()
10661
}
10762
}
10863

109-
func newDummyDAClient(maxBlobSize uint64) *dummyDAClient {
64+
func newDummyDAClient(maxBlobSize uint64) *testda.DummyDA {
11065
if maxBlobSize == 0 {
111-
maxBlobSize = 2 * 1024 * 1024
112-
}
113-
return &dummyDAClient{backend: getSharedDABackend(maxBlobSize)}
114-
}
115-
116-
func (d *dummyDAClient) Submit(ctx context.Context, data [][]byte, gasPrice float64, namespace []byte, options []byte) datypes.ResultSubmit {
117-
_ = ctx
118-
_ = gasPrice
119-
_ = options
120-
if d.backend.failSubmit.Load() {
121-
return datypes.ResultSubmit{BaseResult: datypes.BaseResult{Code: datypes.StatusError, Message: "simulated DA failure"}}
122-
}
123-
var blobSz uint64
124-
for _, b := range data {
125-
if uint64(len(b)) > d.backend.maxBlobSz {
126-
return datypes.ResultSubmit{BaseResult: datypes.BaseResult{Code: datypes.StatusTooBig, Message: datypes.ErrBlobSizeOverLimit.Error()}}
127-
}
128-
blobSz += uint64(len(b))
129-
}
130-
d.backend.mu.Lock()
131-
d.backend.height++
132-
height := d.backend.height
133-
if d.backend.blobs[height] == nil {
134-
d.backend.blobs[height] = make(map[string][][]byte)
135-
}
136-
nsKey := string(namespace)
137-
d.backend.blobs[height][nsKey] = append(d.backend.blobs[height][nsKey], data...)
138-
d.backend.mu.Unlock()
139-
return datypes.ResultSubmit{BaseResult: datypes.BaseResult{Code: datypes.StatusSuccess, Height: height, BlobSize: blobSz, SubmittedCount: uint64(len(data)), Timestamp: time.Now()}}
140-
}
141-
142-
func (d *dummyDAClient) Retrieve(ctx context.Context, height uint64, namespace []byte) datypes.ResultRetrieve {
143-
_ = ctx
144-
d.backend.mu.Lock()
145-
byHeight := d.backend.blobs[height]
146-
var blobs [][]byte
147-
if byHeight != nil {
148-
blobs = byHeight[string(namespace)]
149-
}
150-
d.backend.mu.Unlock()
151-
152-
if len(blobs) == 0 {
153-
return datypes.ResultRetrieve{BaseResult: datypes.BaseResult{Code: datypes.StatusNotFound, Height: height, Message: datypes.ErrBlobNotFound.Error(), Timestamp: time.Now()}}
154-
}
155-
156-
ids := make([][]byte, len(blobs))
157-
return datypes.ResultRetrieve{
158-
BaseResult: datypes.BaseResult{Code: datypes.StatusSuccess, Height: height, IDs: ids, Timestamp: time.Now()},
159-
Data: blobs,
160-
}
161-
}
162-
func (d *dummyDAClient) RetrieveHeaders(ctx context.Context, height uint64) datypes.ResultRetrieve {
163-
return d.Retrieve(ctx, height, d.GetHeaderNamespace())
164-
}
165-
func (d *dummyDAClient) RetrieveData(ctx context.Context, height uint64) datypes.ResultRetrieve {
166-
return d.Retrieve(ctx, height, d.GetDataNamespace())
167-
}
168-
func (d *dummyDAClient) RetrieveForcedInclusion(ctx context.Context, height uint64) datypes.ResultRetrieve {
169-
return d.Retrieve(ctx, height, d.GetForcedInclusionNamespace())
170-
}
171-
172-
func (d *dummyDAClient) GetHeaderNamespace() []byte { return []byte("hdr") }
173-
func (d *dummyDAClient) GetDataNamespace() []byte { return []byte("data") }
174-
func (d *dummyDAClient) GetForcedInclusionNamespace() []byte { return nil }
175-
func (d *dummyDAClient) HasForcedInclusionNamespace() bool { return false }
176-
func (d *dummyDAClient) Get(ctx context.Context, ids []datypes.ID, namespace []byte) ([]datypes.Blob, error) {
177-
return nil, nil
178-
}
179-
func (d *dummyDAClient) GetProofs(ctx context.Context, ids []datypes.ID, namespace []byte) ([]datypes.Proof, error) {
180-
return nil, nil
181-
}
182-
func (d *dummyDAClient) Validate(ctx context.Context, ids []datypes.ID, proofs []datypes.Proof, namespace []byte) ([]bool, error) {
183-
return nil, nil
184-
}
185-
186-
func (d *dummyDAClient) SetSubmitFailure(shouldFail bool) { d.backend.failSubmit.Store(shouldFail) }
187-
188-
func (d *dummyDAClient) StartHeightTicker(interval time.Duration) func() {
189-
d.backend.tickerMu.Lock()
190-
if d.backend.tickerStop == nil {
191-
d.backend.tickerStop = make(chan struct{})
192-
stopCh := d.backend.tickerStop
193-
go func() {
194-
ticker := time.NewTicker(interval)
195-
defer ticker.Stop()
196-
for {
197-
select {
198-
case <-ticker.C:
199-
d.backend.mu.Lock()
200-
d.backend.height++
201-
d.backend.mu.Unlock()
202-
case <-stopCh:
203-
return
204-
}
205-
}
206-
}()
207-
}
208-
d.backend.tickerRefs.Add(1)
209-
d.backend.tickerMu.Unlock()
210-
211-
return func() {
212-
if d.backend.tickerRefs.Add(-1) != 0 {
213-
return
214-
}
215-
d.backend.tickerMu.Lock()
216-
defer d.backend.tickerMu.Unlock()
217-
if d.backend.tickerStop != nil {
218-
close(d.backend.tickerStop)
219-
d.backend.tickerStop = nil
220-
}
66+
maxBlobSize = testda.DefaultMaxBlobSize
22167
}
68+
return getSharedDummyDA(maxBlobSize)
22269
}
22370

22471
func createTestComponents(t *testing.T, config evconfig.Config) (coreexecutor.Executor, coresequencer.Sequencer, block.DAClient, *p2p.Client, datastore.Batching, *key.NodeKey, func()) {

node/single_sequencer_integration_test.go

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@ import (
1616

1717
coreexecutor "github.com/evstack/ev-node/core/execution"
1818
evconfig "github.com/evstack/ev-node/pkg/config"
19+
"github.com/evstack/ev-node/test/testda"
1920
)
2021

2122
// FullNodeTestSuite is a test suite for full node integration tests
@@ -321,8 +322,8 @@ func TestBatchQueueThrottlingWithDAFailure(t *testing.T) {
321322
require.True(ok, "Expected DummyExecutor implementation")
322323

323324
// Cast dummyDA to our test double so we can simulate failures
324-
dummyDAImpl, ok := dummyDA.(*dummyDAClient)
325-
require.True(ok, "Expected dummyDAClient implementation")
325+
dummyDAImpl, ok := dummyDA.(*testda.DummyDA)
326+
require.True(ok, "Expected testda.DummyDA implementation")
326327

327328
// Create node with components
328329
node, cleanup := createNodeWithCustomComponents(t, config, executor, sequencer, dummyDAImpl, p2pClient, ds, func() {})

sequencers/single/sequencer_test.go

Lines changed: 8 additions & 102 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,6 @@ import (
44
"bytes"
55
"context"
66
"errors"
7-
"sync/atomic"
87
"testing"
98
"time"
109

@@ -16,9 +15,9 @@ import (
1615

1716
"github.com/evstack/ev-node/block"
1817
coresequencer "github.com/evstack/ev-node/core/sequencer"
19-
datypes "github.com/evstack/ev-node/pkg/da/types"
2018
"github.com/evstack/ev-node/pkg/genesis"
2119
damocks "github.com/evstack/ev-node/test/mocks"
20+
"github.com/evstack/ev-node/test/testda"
2221
)
2322

2423
// MockForcedInclusionRetriever is a mock implementation of DARetriever for testing
@@ -34,101 +33,8 @@ func (m *MockForcedInclusionRetriever) RetrieveForcedIncludedTxs(ctx context.Con
3433
return args.Get(0).(*block.ForcedInclusionEvent), args.Error(1)
3534
}
3635

37-
type dummyDA struct {
38-
failSubmit atomic.Bool
39-
daHeight atomic.Uint64
40-
tickerStop chan struct{}
41-
tickerDur time.Duration
42-
maxBlobSize uint64
43-
}
44-
45-
func newDummyDA(maxBlobSize uint64, tick time.Duration) *dummyDA {
46-
return &dummyDA{
47-
tickerStop: make(chan struct{}),
48-
tickerDur: tick,
49-
maxBlobSize: maxBlobSize,
50-
}
51-
}
52-
53-
func (d *dummyDA) Submit(ctx context.Context, data [][]byte, gasPrice float64, namespace []byte, options []byte) datypes.ResultSubmit {
54-
if d.failSubmit.Load() {
55-
return datypes.ResultSubmit{BaseResult: datypes.BaseResult{Code: datypes.StatusError, Message: "submit failed"}}
56-
}
57-
height := d.daHeight.Load()
58-
return datypes.ResultSubmit{
59-
BaseResult: datypes.BaseResult{
60-
Code: datypes.StatusSuccess,
61-
Height: height,
62-
IDs: [][]byte{},
63-
},
64-
}
65-
}
66-
67-
func (d *dummyDA) Retrieve(ctx context.Context, height uint64, namespace []byte) datypes.ResultRetrieve {
68-
return datypes.ResultRetrieve{BaseResult: datypes.BaseResult{Code: datypes.StatusSuccess, Height: height}}
69-
}
70-
71-
func (d *dummyDA) RetrieveHeaders(ctx context.Context, height uint64) datypes.ResultRetrieve {
72-
return datypes.ResultRetrieve{BaseResult: datypes.BaseResult{Code: datypes.StatusSuccess, Height: height}}
73-
}
74-
75-
func (d *dummyDA) RetrieveData(ctx context.Context, height uint64) datypes.ResultRetrieve {
76-
return datypes.ResultRetrieve{BaseResult: datypes.BaseResult{Code: datypes.StatusSuccess, Height: height}}
77-
}
78-
79-
func (d *dummyDA) RetrieveForcedInclusion(ctx context.Context, height uint64) datypes.ResultRetrieve {
80-
return datypes.ResultRetrieve{BaseResult: datypes.BaseResult{Code: datypes.StatusNotFound, Height: height}}
81-
}
82-
83-
func (d *dummyDA) Get(ctx context.Context, ids []datypes.ID, namespace []byte) ([]datypes.Blob, error) {
84-
return nil, nil
85-
}
86-
87-
func (d *dummyDA) GetProofs(ctx context.Context, ids []datypes.ID, namespace []byte) ([]datypes.Proof, error) {
88-
return nil, nil
89-
}
90-
91-
func (d *dummyDA) Validate(ctx context.Context, ids []datypes.ID, proofs []datypes.Proof, namespace []byte) ([]bool, error) {
92-
res := make([]bool, len(ids))
93-
for i := range res {
94-
res[i] = true
95-
}
96-
return res, nil
97-
}
98-
99-
func (d *dummyDA) GetHeaderNamespace() []byte { return []byte("hdr") }
100-
func (d *dummyDA) GetDataNamespace() []byte { return []byte("data") }
101-
func (d *dummyDA) GetForcedInclusionNamespace() []byte { return nil }
102-
func (d *dummyDA) HasForcedInclusionNamespace() bool { return false }
103-
104-
func (d *dummyDA) StartHeightTicker() {
105-
if d.tickerDur == 0 {
106-
return
107-
}
108-
ticker := time.NewTicker(d.tickerDur)
109-
go func() {
110-
for {
111-
select {
112-
case <-ticker.C:
113-
d.daHeight.Add(1)
114-
case <-d.tickerStop:
115-
ticker.Stop()
116-
return
117-
}
118-
}
119-
}()
120-
}
121-
122-
func (d *dummyDA) StopHeightTicker() {
123-
select {
124-
case <-d.tickerStop:
125-
default:
126-
close(d.tickerStop)
127-
}
128-
}
129-
130-
func (d *dummyDA) SetSubmitFailure(shouldFail bool) {
131-
d.failSubmit.Store(shouldFail)
36+
func newDummyDA(maxBlobSize uint64) *testda.DummyDA {
37+
return testda.New(testda.WithMaxBlobSize(maxBlobSize))
13238
}
13339

13440
// newTestSequencer creates a sequencer for tests that don't need full initialization
@@ -158,7 +64,7 @@ func newTestSequencer(t *testing.T, db ds.Batching, fiRetriever ForcedInclusionR
15864
}
15965

16066
func TestSequencer_SubmitBatchTxs(t *testing.T) {
161-
dummyDA := newDummyDA(100_000_000, 10*time.Second)
67+
dummyDA := newDummyDA(100_000_000)
16268
db := ds.NewMapDatastore()
16369
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
16470
defer cancel()
@@ -212,7 +118,7 @@ func TestSequencer_SubmitBatchTxs(t *testing.T) {
212118
}
213119

214120
func TestSequencer_SubmitBatchTxs_EmptyBatch(t *testing.T) {
215-
dummyDA := newDummyDA(100_000_000, 10*time.Second)
121+
dummyDA := newDummyDA(100_000_000)
216122
db := ds.NewMapDatastore()
217123
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
218124
defer cancel()
@@ -937,9 +843,9 @@ func TestSequencer_DAFailureAndQueueThrottling_Integration(t *testing.T) {
937843
defer db.Close()
938844

939845
// Create a dummy DA that we can make fail
940-
dummyDA := newDummyDA(100_000, 100*time.Millisecond)
941-
dummyDA.StartHeightTicker()
942-
defer dummyDA.StopHeightTicker()
846+
dummyDA := newDummyDA(100_000)
847+
stopTicker := dummyDA.StartHeightTicker(100 * time.Millisecond)
848+
defer stopTicker()
943849

944850
// Create sequencer with small queue size to trigger throttling quickly
945851
queueSize := 3 // Small for testing

0 commit comments

Comments
 (0)