Skip to content

Commit

Permalink
Avoid using peers returning empty responses, not sending skeleton req…
Browse files Browse the repository at this point in the history
…uests (erigontech#6281)

Co-authored-by: Alexey Sharp <alexeysharp@Alexeys-iMac.local>
  • Loading branch information
AlexeyAkhunov and Alexey Sharp authored Dec 11, 2022
1 parent ec582dd commit 1c3c486
Show file tree
Hide file tree
Showing 5 changed files with 40 additions and 8 deletions.
26 changes: 24 additions & 2 deletions cmd/sentry/sentry/sentry_grpc_server.go
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,7 @@ type PeerInfo struct {
peer *p2p.Peer
lock sync.RWMutex
deadlines []time.Time // Request deadlines
uselessTill time.Time
latestDealine time.Time
height uint64
rw p2p.MsgReadWriter
Expand Down Expand Up @@ -151,6 +152,12 @@ func (pi *PeerInfo) AddDeadline(deadline time.Time) {
pi.latestDealine = deadline
}

func (pi *PeerInfo) UselessTill(deadline time.Time) {
pi.lock.Lock()
defer pi.lock.Unlock()
pi.uselessTill = deadline
}

func (pi *PeerInfo) Height() uint64 {
return atomic.LoadUint64(&pi.height)
}
Expand Down Expand Up @@ -184,6 +191,12 @@ func (pi *PeerInfo) ClearDeadlines(now time.Time, givePermit bool) int {
return len(pi.deadlines)
}

func (pi *PeerInfo) IsUseless(now time.Time) bool {
pi.lock.RLock()
defer pi.lock.RUnlock()
return now.Before(pi.uselessTill)
}

func (pi *PeerInfo) LatestDeadline() time.Time {
pi.lock.RLock()
defer pi.lock.RUnlock()
Expand Down Expand Up @@ -736,6 +749,15 @@ func (ss *GrpcServer) PeerMinBlock(_ context.Context, req *proto_sentry.PeerMinB
return &emptypb.Empty{}, nil
}

func (ss *GrpcServer) PeerUseless(_ context.Context, req *proto_sentry.PeerUselessRequest) (*emptypb.Empty, error) {
peerID := ConvertH512ToPeerID(req.PeerId)
peerInfo := ss.getPeer(peerID)
if peerInfo != nil {
peerInfo.UselessTill(time.Now().Add(10 * time.Minute))
}
return &emptypb.Empty{}, nil
}

func (ss *GrpcServer) findBestPeersWithPermit(peerCount int) []*PeerInfo {
// Choose peer(s) that we can send this request to, with maximum number of permits
now := time.Now()
Expand All @@ -746,7 +768,7 @@ func (ss *GrpcServer) findBestPeersWithPermit(peerCount int) []*PeerInfo {
deadlines := peerInfo.ClearDeadlines(now, false /* givePermit */)
height := peerInfo.Height()
//fmt.Printf("%d deadlines for peer %s\n", deadlines, peerID)
if deadlines < maxPermitsPerPeer {
if deadlines < maxPermitsPerPeer && !peerInfo.IsUseless(now) {
heap.Push(&byMinBlock, PeerRef{pi: peerInfo, height: height})
if byMinBlock.Len() > peerCount {
// Remove the worst peer
Expand Down Expand Up @@ -782,7 +804,7 @@ func (ss *GrpcServer) findPeerByMinBlock(minBlock uint64) (*PeerInfo, bool) {
if peerInfo.Height() >= minBlock {
deadlines := peerInfo.ClearDeadlines(now, false /* givePermit */)
//fmt.Printf("%d deadlines for peer %s\n", deadlines, peerID)
if deadlines < maxPermitsPerPeer {
if deadlines < maxPermitsPerPeer && !peerInfo.IsUseless(now) {
permits := maxPermitsPerPeer - deadlines
if permits > maxPermits {
maxPermits = permits
Expand Down
10 changes: 10 additions & 0 deletions cmd/sentry/sentry/sentry_multi_client.go
Original file line number Diff line number Diff line change
Expand Up @@ -390,6 +390,16 @@ func (cs *MultiClient) blockHeaders66(ctx context.Context, in *proto_sentry.Inbo
}

func (cs *MultiClient) blockHeaders(ctx context.Context, pkt eth.BlockHeadersPacket, rlpStream *rlp.Stream, peerID *proto_types.H512, sentry direct.SentryClient) error {
if len(pkt) == 0 {
outreq := proto_sentry.PeerUselessRequest{
PeerId: peerID,
}
if _, err := sentry.PeerUseless(ctx, &outreq, &grpc.EmptyCallOption{}); err != nil {
return fmt.Errorf("sending peer useless request: %v", err)
}
// No point processing empty response
return nil
}
// Stream is at the BlockHeadersPacket, which is list of headers
if _, err := rlpStream.List(); err != nil {
return fmt.Errorf("decode 2 BlockHeadersPacket66: %w", err)
Expand Down
2 changes: 1 addition & 1 deletion go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ module github.com/ledgerwatch/erigon
go 1.18

require (
github.com/ledgerwatch/erigon-lib v0.0.0-20221211145319-874c497dda90
github.com/ledgerwatch/erigon-lib v0.0.0-20221211222033-d70b55bc1a66
github.com/ledgerwatch/erigon-snapshot v1.1.1-0.20221117232719-cf68648bf146
github.com/ledgerwatch/log/v3 v3.6.0
github.com/ledgerwatch/secp256k1 v1.0.0
Expand Down
4 changes: 2 additions & 2 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -561,8 +561,8 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758 h1:0D5M2HQSGD3PYPwICLl+/9oulQauOuETfgFvhBDffs0=
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8=
github.com/ledgerwatch/erigon-lib v0.0.0-20221211145319-874c497dda90 h1:ZsCnxEiUWDgp9ed/TeAnF15nVha4TRiL/1RGRecgs/c=
github.com/ledgerwatch/erigon-lib v0.0.0-20221211145319-874c497dda90/go.mod h1:Cy/yMqN6ufAXayVYLEIKnRoYz25PK9Uxmp1yil+wA/A=
github.com/ledgerwatch/erigon-lib v0.0.0-20221211222033-d70b55bc1a66 h1:6BpdJ+kx5pAKkVnET5jPTvbP9ymooENJ4k2WYW2LzTY=
github.com/ledgerwatch/erigon-lib v0.0.0-20221211222033-d70b55bc1a66/go.mod h1:JmIvswKJVoUFNWHzMdszyBdTxboIM4dVs0u5dE6YZmI=
github.com/ledgerwatch/erigon-snapshot v1.1.1-0.20221117232719-cf68648bf146 h1:BBoJuTSC1Z41wvz26l+HBJCXEBrPh3LXjkJd4CT6n1E=
github.com/ledgerwatch/erigon-snapshot v1.1.1-0.20221117232719-cf68648bf146/go.mod h1:3AuPxZc85jkehh/HA9h8gabv5MSi3kb/ddtzBsTVJFo=
github.com/ledgerwatch/log/v3 v3.6.0 h1:JBUSK1epPyutUrz7KYDTcJtQLEHnehECRpKbM1ugy5M=
Expand Down
6 changes: 3 additions & 3 deletions turbo/stages/headerdownload/header_algos.go
Original file line number Diff line number Diff line change
Expand Up @@ -440,7 +440,7 @@ func (hd *HeaderDownload) requestMoreHeadersForPOS(currentTime time.Time) (timeo
Anchor: anchor,
Hash: anchor.parentHash,
Number: anchor.blockHeight - 1,
Length: 192,
Length: 128,
Skip: 0,
Reverse: true,
}
Expand All @@ -456,7 +456,7 @@ func (hd *HeaderDownload) UpdateStats(req *HeaderRequest, skeleton bool) {
hd.stats.SkeletonReqMinBlock = req.Number
}
if req.Number+req.Length*req.Skip > hd.stats.SkeletonReqMaxBlock {
hd.stats.SkeletonReqMaxBlock = req.Number + req.Length*req.Skip
hd.stats.SkeletonReqMaxBlock = req.Number + req.Length*(req.Skip+1)
}
} else {
hd.stats.Requests++
Expand Down Expand Up @@ -489,7 +489,7 @@ func (hd *HeaderDownload) RequestSkeleton() *HeaderRequest {
hd.lock.RLock()
defer hd.lock.RUnlock()
log.Debug("[Downloader] Request skeleton", "anchors", len(hd.anchors), "top seen height", hd.topSeenHeightPoW, "highestInDb", hd.highestInDb)
stride := uint64(8 * 192)
stride := uint64(1) // Fix for BSC, for some reason most peers cannot response to the skeleton requests with non-zero strides anymore, so we are getting stuck very frequently
strideHeight := hd.highestInDb + stride
var length uint64 = 192
return &HeaderRequest{Number: strideHeight, Length: length, Skip: stride - 1, Reverse: false}
Expand Down

0 comments on commit 1c3c486

Please sign in to comment.