Skip to content

Commit 57c3b56

Browse files
committed
WIP
1 parent e510b43 commit 57c3b56

File tree

8 files changed

+129
-61
lines changed

8 files changed

+129
-61
lines changed

.github/workflows/juno-test.yml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@ jobs:
1818
fail-fast: false
1919
matrix:
2020
os: [ubuntu-latest, macos-latest, ubuntu-arm64-4-core]
21+
iteration: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
2122
runs-on: ${{ matrix.os }}
2223
env:
2324
VM_DEBUG: true

consensus/p2p/buffered/buffered_test.go

Lines changed: 62 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -4,11 +4,12 @@ import (
44
"context"
55
"fmt"
66
"maps"
7-
"sync"
7+
"slices"
88
"testing"
99
"time"
1010

1111
"github.com/NethermindEth/juno/consensus/p2p/buffered"
12+
"github.com/NethermindEth/juno/consensus/p2p/config"
1213
"github.com/NethermindEth/juno/p2p/pubsub/testutils"
1314
"github.com/NethermindEth/juno/utils"
1415
pubsub "github.com/libp2p/go-libp2p-pubsub"
@@ -22,17 +23,22 @@ import (
2223
)
2324

2425
const (
25-
chainID = "1"
26-
protocolID = "test-buffered-topic-subscription-protocol"
27-
topicName = "test-buffered-topic-subscription-topic"
28-
nodeCount = 20
29-
messageCount = 100
30-
logLevel = zapcore.ErrorLevel
31-
retryInterval = 1 * time.Second
26+
chainID = "1"
27+
protocolID = "test-buffered-topic-subscription-protocol"
28+
topicName = "test-buffered-topic-subscription-topic"
29+
nodeCount = 20
30+
messageCount = 50
31+
logLevel = zapcore.InfoLevel
32+
maxWait = 5 * time.Second
3233
)
3334

3435
type TestMessage = consensus.ConsensusStreamId
3536

37+
type origin struct {
38+
Source int
39+
Index int
40+
}
41+
3642
func TestBufferedTopicSubscriptionAndProtoBroadcaster(t *testing.T) {
3743
t.Run(fmt.Sprintf("%d nodes, each sending %d messages", nodeCount, messageCount), func(t *testing.T) {
3844
logger, err := utils.NewZapLogger(utils.NewLogLevel(logLevel), true)
@@ -42,7 +48,7 @@ func TestBufferedTopicSubscriptionAndProtoBroadcaster(t *testing.T) {
4248
topics := nodes.JoinTopic(t, chainID, protocolID, topicName)
4349

4450
messages := make([][]*TestMessage, nodeCount)
45-
allMessages := make(map[string]struct{})
51+
allMessages := make(map[string]origin)
4652

4753
for i := range messages {
4854
messages[i] = make([]*TestMessage, messageCount)
@@ -53,46 +59,65 @@ func TestBufferedTopicSubscriptionAndProtoBroadcaster(t *testing.T) {
5359
msgBytes, err := proto.Marshal(msg)
5460
require.NoError(t, err)
5561

56-
allMessages[string(msgBytes)] = struct{}{}
62+
allMessages[string(msgBytes)] = origin{Source: i, Index: j}
5763
}
5864
}
5965

60-
iterator := iter.Iterator[*pubsub.Topic]{MaxGoroutines: len(topics)}
61-
wg := sync.WaitGroup{}
62-
wg.Add(len(messages))
66+
iterator := iter.Iterator[*pubsub.Topic]{MaxGoroutines: nodeCount}
67+
finished := make(chan struct{}, nodeCount)
68+
liveness := make(chan struct{}, 1)
6369

6470
go func() {
6571
iterator.ForEachIdx(topics, func(i int, destination **pubsub.Topic) {
6672
logger := &utils.ZapLogger{SugaredLogger: logger.Named(fmt.Sprintf("destination-%d", i))}
6773
pending := maps.Clone(allMessages)
74+
75+
// Ignore the messages we are broadcasting
76+
for _, message := range messages[i] {
77+
msgBytes, err := proto.Marshal(message)
78+
require.NoError(t, err)
79+
delete(pending, string(msgBytes))
80+
}
81+
6882
subscription := buffered.NewTopicSubscription(logger, nodeCount*messageCount, func(ctx context.Context, msg *pubsub.Message) {
69-
if len(pending) == 0 {
83+
msgStr := string(msg.Message.Data)
84+
if _, ok := pending[msgStr]; !ok {
7085
return
7186
}
7287

73-
delete(pending, string(msg.Message.Data))
88+
select {
89+
case liveness <- struct{}{}:
90+
default:
91+
}
92+
93+
delete(pending, msgStr)
94+
7495
if len(pending) == 0 {
75-
wg.Done()
96+
finished <- struct{}{}
7697
logger.Info("all messages received")
7798
}
7899
logger.Debugw("received", "message", string(msg.Message.Data), "pending", len(pending))
79100
})
80101

81102
subscription.Loop(t.Context(), *destination)
103+
if len(pending) > 0 {
104+
logger.Infow("missing messages", "pending", slices.Collect(maps.Values(pending)))
105+
}
82106
})
83107
}()
84108

85109
go func() {
86-
time.Sleep(1 * time.Second)
87110
iterator.ForEachIdx(topics, func(i int, source **pubsub.Topic) {
88111
logger := &utils.ZapLogger{SugaredLogger: logger.Named(fmt.Sprintf("source-%d", i))}
112+
rebroadcastInterval := config.DefaultBufferSizes.RebroadcastInterval
113+
89114
var rebroadcastStrategy buffered.RebroadcastStrategy[*TestMessage]
90115
if i%2 == 0 {
91-
rebroadcastStrategy = buffered.NewRebroadcastStrategy(retryInterval, func(msg *TestMessage) uint64 {
116+
rebroadcastStrategy = buffered.NewRebroadcastStrategy(rebroadcastInterval, func(msg *TestMessage) uint64 {
92117
return msg.BlockNumber
93118
})
94119
}
95-
broadcaster := buffered.NewProtoBroadcaster(logger, messageCount, retryInterval, rebroadcastStrategy)
120+
broadcaster := buffered.NewProtoBroadcaster(logger, messageCount, rebroadcastInterval, rebroadcastStrategy)
96121
go broadcaster.Loop(t.Context(), *source)
97122
for _, message := range messages[i] {
98123
logger.Debugw("publishing", "message", message)
@@ -101,7 +126,9 @@ func TestBufferedTopicSubscriptionAndProtoBroadcaster(t *testing.T) {
101126
})
102127
}()
103128

104-
wg.Wait()
129+
for range nodeCount {
130+
wait(t, liveness, finished)
131+
}
105132
})
106133

107134
t.Run("canceled context", func(t *testing.T) {
@@ -128,6 +155,21 @@ func TestBufferedTopicSubscriptionAndProtoBroadcaster(t *testing.T) {
128155
})
129156
}
130157

158+
func wait(t *testing.T, liveness, finished chan struct{}) {
159+
t.Helper()
160+
for {
161+
select {
162+
case <-finished:
163+
return
164+
case <-liveness:
165+
continue
166+
case <-time.After(maxWait):
167+
require.FailNow(t, "liveness check failed")
168+
return
169+
}
170+
}
171+
}
172+
131173
func getTestMessage(node, messageIndex int) *TestMessage {
132174
return &TestMessage{
133175
Nonce: uint64(node*messageCount + messageIndex),

consensus/p2p/buffered/proto_broadcaster.go

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -42,6 +42,7 @@ func (b ProtoBroadcaster[M]) Broadcast(ctx context.Context, msg M) {
4242
func (b ProtoBroadcaster[M]) Loop(ctx context.Context, topic *pubsub.Topic) {
4343
readinessOpt := pubsub.WithReadiness(pubsub.MinTopicSize(1))
4444
var rebroadcasted rebroadcastMessages
45+
4546
for {
4647
select {
4748
case <-ctx.Done():

consensus/p2p/p2p.go

Lines changed: 14 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -25,12 +25,10 @@ import (
2525
type topicName string
2626

2727
const (
28-
protocolPrefix = "starknet"
2928
chainID = "1" // TODO: Make this configurable
3029
consensusProtocolID = "consensus"
3130
proposalTopicName topicName = "consensus_proposals"
3231
voteTopicName topicName = "consensus_votes"
33-
gossipSubHistory = 60
3432
)
3533

3634
type P2P[V types.Hashable[H], H types.Hash, A types.Addr] interface {
@@ -122,14 +120,6 @@ func New(
122120
}
123121
}
124122

125-
func (p *p2p[V, H, A]) getGossipSubOptions() libp2p.Option {
126-
params := libp2p.DefaultGossipSubParams()
127-
params.HistoryLength = gossipSubHistory
128-
params.HistoryGossip = gossipSubHistory
129-
130-
return libp2p.WithGossipSubParams(params)
131-
}
132-
133123
func (p *p2p[V, H, A]) Run(ctx context.Context) error {
134124
gossipSub, err := pubsub.Run(
135125
ctx,
@@ -138,36 +128,42 @@ func (p *p2p[V, H, A]) Run(ctx context.Context) error {
138128
p.host,
139129
p.pubSubQueueSize,
140130
p.bootstrapPeersFn,
141-
p.getGossipSubOptions(),
142131
)
143132
if err != nil {
144133
return fmt.Errorf("unable to create gossipsub with error: %w", err)
145134
}
146135

147-
topics := make(map[topicName]*libp2p.Topic)
136+
topics := make([]*libp2p.Topic, 0, len(p.topicAttachment))
137+
relayCancels := make([]func(), 0, len(p.topicAttachment))
148138
defer func() {
139+
for _, cancel := range relayCancels {
140+
cancel()
141+
}
149142
for _, topic := range topics {
150143
topic.Close()
151144
}
152145
}()
153146

154147
wg := conc.NewWaitGroup()
148+
defer wg.Wait()
155149

156-
for topicName := range p.topicAttachment {
157-
if topics[topicName], err = gossipSub.Join(string(topicName)); err != nil {
150+
for topicName, services := range p.topicAttachment {
151+
topic, relayCancel, err := pubsub.JoinTopic(gossipSub, string(topicName))
152+
if err != nil {
158153
return fmt.Errorf("unable to join topic %s with error: %w", topicName, err)
159154
}
160-
}
161155

162-
for topicName, services := range p.topicAttachment {
156+
topics = append(topics, topic)
157+
relayCancels = append(relayCancels, relayCancel)
158+
163159
for _, service := range services {
164160
wg.Go(func() {
165-
service.Loop(ctx, topics[topicName])
161+
service.Loop(ctx, topic)
166162
})
167163
}
168164
}
169165

170-
wg.Wait()
166+
<-ctx.Done()
171167
return nil
172168
}
173169

mempool/p2p/p2p.go

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -61,10 +61,11 @@ func (p *P2P) Run(ctx context.Context) error {
6161
return fmt.Errorf("unable to create gossipsub with error: %w", err)
6262
}
6363

64-
topic, err := gossipSub.Join(transactionTopicName)
64+
topic, relayCancel, err := pubsub.JoinTopic(gossipSub, transactionTopicName)
6565
if err != nil {
6666
return fmt.Errorf("unable to join topic %s with error: %w", transactionTopicName, err)
6767
}
68+
defer relayCancel()
6869
defer topic.Close()
6970

7071
wg := conc.NewWaitGroup()

mempool/p2p/p2p_broadcasters_listeners_test.go

Lines changed: 21 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@ import (
44
"context"
55
"fmt"
66
"maps"
7+
"slices"
78
"testing"
89
"time"
910

@@ -20,12 +21,17 @@ import (
2021
)
2122

2223
const (
23-
logLevel = zapcore.DebugLevel
24-
nodeCount = 3
25-
txCount = 300
24+
logLevel = zapcore.InfoLevel
25+
nodeCount = 20
26+
txCount = 50
2627
maxWait = 5 * time.Second
2728
)
2829

30+
type origin struct {
31+
Source int
32+
Index int
33+
}
34+
2935
// mockMempool implements mempool.Pool for testing
3036
type mockMempool chan<- *mempool.BroadcastedTransaction
3137

@@ -45,10 +51,10 @@ func TestMempoolBroadcastersAndListeners(t *testing.T) {
4551

4652
nodes := pubsubtestutils.BuildNetworks(t, pubsubtestutils.LineNetworkConfig(nodeCount))
4753

48-
txSet := make(map[string]struct{})
54+
txSet := make(map[string]origin)
4955
for node := range nodeCount {
50-
for _, tx := range transactions[node] {
51-
txSet[tx.Transaction.Hash().String()] = struct{}{}
56+
for index, tx := range transactions[node] {
57+
txSet[tx.Transaction.Hash().String()] = origin{Source: node, Index: index}
5258
}
5359
}
5460

@@ -75,15 +81,23 @@ func TestMempoolBroadcastersAndListeners(t *testing.T) {
7581

7682
transactionWait.Go(func() {
7783
pending := maps.Clone(txSet)
84+
85+
// Ignore the transactions we are broadcasting
86+
for _, transaction := range transactions[index] {
87+
delete(pending, transaction.Transaction.Hash().String())
88+
}
89+
7890
for {
7991
select {
8092
case transaction := <-received:
8193
delete(pending, transaction.Transaction.Hash().String())
8294
logger.Debugw("pending", "count", len(pending))
8395
if len(pending) == 0 {
96+
logger.Infow("all transactions received")
8497
return
8598
}
8699
case <-time.After(maxWait):
100+
logger.Infow("missing transactions", "pending", slices.Collect(maps.Values(pending)))
87101
require.FailNow(t, "timed out waiting for transactions")
88102
}
89103
}
@@ -96,9 +110,7 @@ func TestMempoolBroadcastersAndListeners(t *testing.T) {
96110

97111
func getRandomTransactions(t *testing.T) []mempool.BroadcastedTransaction {
98112
transactions := make([]mempool.BroadcastedTransaction, txCount)
99-
transactions[0], _ = testutils.TransactionBuilder.GetTestDeclareTransaction(t)
100-
transactions[1], _ = testutils.TransactionBuilder.GetTestDeployAccountTransaction(t)
101-
for i := 2; i < txCount; i++ {
113+
for i := range txCount {
102114
transactions[i], _ = testutils.TransactionBuilder.GetTestInvokeTransaction(t)
103115
}
104116
return transactions

0 commit comments

Comments
 (0)