Skip to content

Commit c374c39

Browse files
authored
Add ping uptimes test (ava-labs#1550)
1 parent eaf5256 commit c374c39

File tree

1 file changed

+140
-9
lines changed

1 file changed

+140
-9
lines changed

network/peer/peer_test.go

+140-9
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@ import (
2121
"github.com/ava-labs/avalanchego/proto/pb/p2p"
2222
"github.com/ava-labs/avalanchego/snow/networking/router"
2323
"github.com/ava-labs/avalanchego/snow/networking/tracker"
24+
"github.com/ava-labs/avalanchego/snow/uptime"
2425
"github.com/ava-labs/avalanchego/snow/validators"
2526
"github.com/ava-labs/avalanchego/staking"
2627
"github.com/ava-labs/avalanchego/utils/constants"
@@ -60,7 +61,7 @@ func newMessageCreator(t *testing.T) message.Creator {
6061
return mc
6162
}
6263

63-
func makeRawTestPeers(t *testing.T) (*rawTestPeer, *rawTestPeer) {
64+
func makeRawTestPeers(t *testing.T, trackedSubnets set.Set[ids.ID]) (*rawTestPeer, *rawTestPeer) {
6465
t.Helper()
6566
require := require.New(t)
6667

@@ -98,7 +99,8 @@ func makeRawTestPeers(t *testing.T) (*rawTestPeer, *rawTestPeer) {
9899
Log: logging.NoLog{},
99100
InboundMsgThrottler: throttling.NewNoInboundThrottler(),
100101
VersionCompatibility: version.GetCompatibility(constants.LocalID),
101-
MySubnets: set.Set[ids.ID]{},
102+
MySubnets: trackedSubnets,
103+
UptimeCalculator: uptime.NoOpCalculator,
102104
Beacons: validators.NewSet(),
103105
NetworkID: constants.LocalID,
104106
PingFrequency: constants.DefaultPingFrequency,
@@ -146,8 +148,8 @@ func makeRawTestPeers(t *testing.T) (*rawTestPeer, *rawTestPeer) {
146148
return peer0, peer1
147149
}
148150

149-
func makeTestPeers(t *testing.T) (*testPeer, *testPeer) {
150-
rawPeer0, rawPeer1 := makeRawTestPeers(t)
151+
func makeTestPeers(t *testing.T, trackedSubnets set.Set[ids.ID]) (*testPeer, *testPeer) {
152+
rawPeer0, rawPeer1 := makeRawTestPeers(t, trackedSubnets)
151153

152154
peer0 := &testPeer{
153155
Peer: Start(
@@ -182,11 +184,11 @@ func makeTestPeers(t *testing.T) (*testPeer, *testPeer) {
182184
return peer0, peer1
183185
}
184186

185-
func makeReadyTestPeers(t *testing.T) (*testPeer, *testPeer) {
187+
func makeReadyTestPeers(t *testing.T, trackedSubnets set.Set[ids.ID]) (*testPeer, *testPeer) {
186188
t.Helper()
187189
require := require.New(t)
188190

189-
peer0, peer1 := makeTestPeers(t)
191+
peer0, peer1 := makeTestPeers(t, trackedSubnets)
190192

191193
err := peer0.AwaitReady(context.Background())
192194
require.NoError(err)
@@ -204,8 +206,7 @@ func makeReadyTestPeers(t *testing.T) (*testPeer, *testPeer) {
204206
func TestReady(t *testing.T) {
205207
require := require.New(t)
206208

207-
rawPeer0, rawPeer1 := makeRawTestPeers(t)
208-
209+
rawPeer0, rawPeer1 := makeRawTestPeers(t, set.Set[ids.ID]{})
209210
peer0 := Start(
210211
rawPeer0.config,
211212
rawPeer0.conn,
@@ -255,7 +256,7 @@ func TestReady(t *testing.T) {
255256
func TestSend(t *testing.T) {
256257
require := require.New(t)
257258

258-
peer0, peer1 := makeReadyTestPeers(t)
259+
peer0, peer1 := makeReadyTestPeers(t, set.Set[ids.ID]{})
259260
mc := newMessageCreator(t)
260261

261262
outboundGetMsg, err := mc.Get(ids.Empty, 1, time.Second, ids.Empty, p2p.EngineType_ENGINE_TYPE_SNOWMAN)
@@ -273,3 +274,133 @@ func TestSend(t *testing.T) {
273274
err = peer1.AwaitClosed(context.Background())
274275
require.NoError(err)
275276
}
277+
278+
func TestPingUptimes(t *testing.T) {
279+
trackedSubnetID := ids.GenerateTestID()
280+
untrackedSubnetID := ids.GenerateTestID()
281+
282+
trackedSubnets := set.NewSet[ids.ID](1)
283+
trackedSubnets.Add(trackedSubnetID)
284+
285+
mc := newMessageCreator(t)
286+
287+
testCases := []struct {
288+
name string
289+
msg message.OutboundMessage
290+
shouldClose bool
291+
assertFn func(*require.Assertions, *testPeer)
292+
}{
293+
{
294+
name: "primary network only",
295+
msg: func() message.OutboundMessage {
296+
pingMsg, err := mc.Ping(1, nil)
297+
require.NoError(t, err)
298+
return pingMsg
299+
}(),
300+
assertFn: func(require *require.Assertions, peer *testPeer) {
301+
uptime, ok := peer.ObservedUptime(constants.PrimaryNetworkID)
302+
require.True(ok)
303+
require.Equal(uint32(1), uptime)
304+
305+
uptime, ok = peer.ObservedUptime(trackedSubnetID)
306+
require.False(ok)
307+
require.Zero(uptime)
308+
},
309+
},
310+
{
311+
name: "primary network and subnet",
312+
msg: func() message.OutboundMessage {
313+
pingMsg, err := mc.Ping(
314+
1,
315+
[]*p2p.SubnetUptime{
316+
{
317+
SubnetId: trackedSubnetID[:],
318+
Uptime: 1,
319+
},
320+
},
321+
)
322+
require.NoError(t, err)
323+
return pingMsg
324+
}(),
325+
assertFn: func(require *require.Assertions, peer *testPeer) {
326+
uptime, ok := peer.ObservedUptime(constants.PrimaryNetworkID)
327+
require.True(ok)
328+
require.Equal(uint32(1), uptime)
329+
330+
uptime, ok = peer.ObservedUptime(trackedSubnetID)
331+
require.True(ok)
332+
require.Equal(uint32(1), uptime)
333+
},
334+
},
335+
{
336+
name: "primary network and non tracked subnet",
337+
msg: func() message.OutboundMessage {
338+
pingMsg, err := mc.Ping(
339+
1,
340+
[]*p2p.SubnetUptime{
341+
{
342+
// Providing the untrackedSubnetID here should cause
343+
// the remote peer to disconnect from us.
344+
SubnetId: untrackedSubnetID[:],
345+
Uptime: 1,
346+
},
347+
{
348+
SubnetId: trackedSubnetID[:],
349+
Uptime: 1,
350+
},
351+
},
352+
)
353+
require.NoError(t, err)
354+
return pingMsg
355+
}(),
356+
shouldClose: true,
357+
},
358+
}
359+
360+
// Note: we reuse peers across tests because makeReadyTestPeers takes awhile
361+
// to run.
362+
peer0, peer1 := makeReadyTestPeers(t, trackedSubnets)
363+
defer func() {
364+
peer1.StartClose()
365+
peer0.StartClose()
366+
require.NoError(t, peer0.AwaitClosed(context.Background()))
367+
require.NoError(t, peer1.AwaitClosed(context.Background()))
368+
}()
369+
370+
for _, tc := range testCases {
371+
t.Run(tc.name, func(t *testing.T) {
372+
require := require.New(t)
373+
374+
require.True(peer0.Send(context.Background(), tc.msg))
375+
376+
// Note: shouldClose can only be `true` for the last test because
377+
// we reuse peers across tests.
378+
if tc.shouldClose {
379+
require.NoError(peer1.AwaitClosed(context.Background()))
380+
return
381+
}
382+
383+
// we send Get message after ping to ensure Ping is handled by the
384+
// time Get is handled. This is because Get is routed to the handler
385+
// whereas Ping is handled by the peer directly. We have no way to
386+
// know when the peer has handled the Ping message.
387+
sendAndFlush(t, peer0, peer1)
388+
389+
tc.assertFn(require, peer1)
390+
})
391+
}
392+
}
393+
394+
// Helper to send a message from sender to receiver and assert that the
395+
// receiver receives the message. This can be used to test a prior message
396+
// was handled by the peer.
397+
func sendAndFlush(t *testing.T, sender *testPeer, receiver *testPeer) {
398+
t.Helper()
399+
mc := newMessageCreator(t)
400+
outboundGetMsg, err := mc.Get(ids.Empty, 1, time.Second, ids.Empty, p2p.EngineType_ENGINE_TYPE_SNOWMAN)
401+
require.NoError(t, err)
402+
sent := sender.Send(context.Background(), outboundGetMsg)
403+
require.True(t, sent)
404+
inboundGetMsg := <-receiver.inboundMsgChan
405+
require.Equal(t, message.GetOp, inboundGetMsg.Op())
406+
}

0 commit comments

Comments
 (0)