@@ -26,19 +26,19 @@ import (
26
26
)
27
27
28
28
const (
29
- topicName = "test-buffered-topic-subscription"
30
- discoveryServiceTag = "test-buffered-topic-subscription-discovery"
31
- nodeCount = 20
32
- messageCount = 100
33
- throttledRate = 5 * nodeCount * time .Millisecond
34
- logLevel = zapcore .ErrorLevel
35
- retryInterval = 1 * time .Second
29
+ topicName = "test-buffered-topic-subscription"
30
+ nodeCount = 20
31
+ messageCount = 100
32
+ logLevel = zapcore .ErrorLevel
33
+ retryInterval = 1 * time .Second
36
34
)
37
35
36
+ type TestMessage = consensus.ConsensusStreamId
37
+
38
38
type node struct {
39
39
host host.Host
40
40
topic * pubsub.Topic
41
- messages []proto. Message
41
+ messages []* TestMessage
42
42
}
43
43
44
44
func TestBufferedTopicSubscriptionAndProtoBroadcaster (t * testing.T ) {
@@ -67,7 +67,7 @@ func TestBufferedTopicSubscriptionAndProtoBroadcaster(t *testing.T) {
67
67
allMessages := make (map [string ]struct {})
68
68
69
69
for i := range nodes {
70
- nodes [i ].messages = make ([]proto. Message , messageCount )
70
+ nodes [i ].messages = make ([]* TestMessage , messageCount )
71
71
for j := range nodes [i ].messages {
72
72
msg := getTestMessage (i , j )
73
73
nodes [i ].messages [j ] = msg
@@ -88,6 +88,10 @@ func TestBufferedTopicSubscriptionAndProtoBroadcaster(t *testing.T) {
88
88
logger := & utils.ZapLogger {SugaredLogger : logger .Named (fmt .Sprintf ("destination-%d" , i ))}
89
89
pending := maps .Clone (allMessages )
90
90
subscription := buffered .NewTopicSubscription (logger , nodeCount * messageCount , func (ctx context.Context , msg * pubsub.Message ) {
91
+ if len (pending ) == 0 {
92
+ return
93
+ }
94
+
91
95
delete (pending , string (msg .Message .Data ))
92
96
if len (pending ) == 0 {
93
97
wg .Done ()
@@ -104,12 +108,17 @@ func TestBufferedTopicSubscriptionAndProtoBroadcaster(t *testing.T) {
104
108
time .Sleep (1 * time .Second )
105
109
iterator .ForEachIdx (nodes , func (i int , source * node ) {
106
110
logger := & utils.ZapLogger {SugaredLogger : logger .Named (fmt .Sprintf ("source-%d" , i ))}
107
- broadcaster := buffered .NewProtoBroadcaster (logger , messageCount , retryInterval )
111
+ var rebroadcastStrategy buffered.RebroadcastStrategy [* TestMessage ] = buffered.NoRebroadcast [* TestMessage ]{}
112
+ if i % 2 == 0 {
113
+ rebroadcastStrategy = buffered .NewRebroadcastLatest (retryInterval , func (msg * TestMessage ) uint64 {
114
+ return msg .BlockNumber
115
+ })
116
+ }
117
+ broadcaster := buffered .NewProtoBroadcaster (logger , messageCount , retryInterval , rebroadcastStrategy )
108
118
go broadcaster .Loop (t .Context (), source .topic )
109
119
for _ , message := range source .messages {
110
120
logger .Debugw ("publishing" , "message" , message )
111
121
broadcaster .Broadcast (t .Context (), message )
112
- time .Sleep (throttledRate )
113
122
}
114
123
})
115
124
}()
@@ -138,8 +147,8 @@ func TestBufferedTopicSubscriptionAndProtoBroadcaster(t *testing.T) {
138
147
})
139
148
}
140
149
141
- func getTestMessage (node , messageIndex int ) proto. Message {
142
- return & consensus. ConsensusStreamId {
150
+ func getTestMessage (node , messageIndex int ) * TestMessage {
151
+ return & TestMessage {
143
152
Nonce : uint64 (node * messageCount + messageIndex ),
144
153
}
145
154
}
@@ -158,7 +167,12 @@ func getNode(t *testing.T) node {
158
167
)
159
168
require .NoError (t , err )
160
169
161
- gossipSub , err := pubsub .NewGossipSub (t .Context (), host )
170
+ gossipSub , err := pubsub .NewGossipSub (
171
+ t .Context (),
172
+ host ,
173
+ pubsub .WithValidateQueueSize (nodeCount * messageCount ),
174
+ pubsub .WithPeerOutboundQueueSize (nodeCount * messageCount ),
175
+ )
162
176
require .NoError (t , err )
163
177
164
178
topic , err := gossipSub .Join (topicName )
@@ -171,7 +185,7 @@ func getNode(t *testing.T) node {
171
185
return node {
172
186
host : host ,
173
187
topic : topic ,
174
- messages : make ([]proto. Message , messageCount ),
188
+ messages : make ([]* TestMessage , messageCount ),
175
189
}
176
190
}
177
191
0 commit comments