Skip to content

Commit

Permalink
txPool: propagate on peer connect (erigontech#2335)
Browse files Browse the repository at this point in the history
  • Loading branch information
AskAlexSharov authored Jul 11, 2021
1 parent e1c17e0 commit 188dfb1
Show file tree
Hide file tree
Showing 17 changed files with 501 additions and 96 deletions.
99 changes: 92 additions & 7 deletions cmd/sentry/download/broadcast.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,10 @@ package download
import (
"context"
"math/big"
"strings"

proto_sentry "github.com/ledgerwatch/erigon-lib/gointerfaces/sentry"
types2 "github.com/ledgerwatch/erigon-lib/gointerfaces/types"
"github.com/ledgerwatch/erigon/common"
"github.com/ledgerwatch/erigon/core/types"
"github.com/ledgerwatch/erigon/eth/protocols/eth"
Expand Down Expand Up @@ -101,6 +103,9 @@ func (cs *ControlServerImpl) BroadcastNewBlock(ctx context.Context, block *types
}

if _, err = sentry.SendMessageToRandomPeers(ctx, req65, &grpc.EmptyCallOption{}); err != nil {
if isPeerNotFoundErr(err) {
continue
}
log.Error("broadcastNewBlock", "error", err)
}

Expand All @@ -115,29 +120,35 @@ func (cs *ControlServerImpl) BroadcastNewBlock(ctx context.Context, block *types
}
}
if _, err = sentry.SendMessageToRandomPeers(ctx, req66, &grpc.EmptyCallOption{}); err != nil {
if isPeerNotFoundErr(err) {
continue
}
log.Error("broadcastNewBlock", "error", err)
}
continue
}
}
}

func (cs *ControlServerImpl) BroadcastNewTxs(ctx context.Context, txs []types.Transaction) {
func (cs *ControlServerImpl) BroadcastPooledTxs(ctx context.Context, txs []common.Hash) {
if len(txs) == 0 {
return
}

cs.lock.RLock()
defer cs.lock.RUnlock()

for len(txs) > 0 {

pendingLen := maxTxPacketSize / common.HashLength
pending := make([]common.Hash, 0, pendingLen)

for i := 0; i < pendingLen && i < len(txs); i++ {
pending = append(pending, txs[i].Hash())
pending = append(pending, txs[i])
}
txs = txs[len(pending):]

data, err := rlp.EncodeToBytes(eth.NewPooledTransactionHashesPacket(pending))
if err != nil {
log.Error("broadcastNewBlock", "error", err)
log.Error("BroadcastPooledTxs", "error", err)
}
var req66, req65 *proto_sentry.SendMessageToRandomPeersRequest
for _, sentry := range cs.sentries {
Expand All @@ -158,7 +169,10 @@ func (cs *ControlServerImpl) BroadcastNewTxs(ctx context.Context, txs []types.Tr
}

if _, err = sentry.SendMessageToRandomPeers(ctx, req65, &grpc.EmptyCallOption{}); err != nil {
log.Error("broadcastNewBlock", "error", err)
if isPeerNotFoundErr(err) {
continue
}
log.Error("BroadcastPooledTxs", "error", err)
}

case eth.ETH66:
Expand All @@ -172,10 +186,81 @@ func (cs *ControlServerImpl) BroadcastNewTxs(ctx context.Context, txs []types.Tr
}
}
if _, err = sentry.SendMessageToRandomPeers(ctx, req66, &grpc.EmptyCallOption{}); err != nil {
log.Error("broadcastNewBlock", "error", err)
if isPeerNotFoundErr(err) {
continue
}
log.Error("BroadcastPooledTxs", "error", err)
}
continue
}
}
}
}

func (cs *ControlServerImpl) PropagatePooledTxsToPeersList(ctx context.Context, peers []*types2.H512, txs []common.Hash) {
if len(txs) == 0 {
return
}

cs.lock.RLock()
defer cs.lock.RUnlock()
for len(txs) > 0 {

pendingLen := maxTxPacketSize / common.HashLength
pending := make([]common.Hash, 0, pendingLen)

for i := 0; i < pendingLen && i < len(txs); i++ {
pending = append(pending, txs[i])
}
txs = txs[len(pending):]

data, err := rlp.EncodeToBytes(eth.NewPooledTransactionHashesPacket(pending))
if err != nil {
log.Error("PropagatePooledTxsToPeersList", "error", err)
}
for _, sentry := range cs.sentries {
if !sentry.Ready() {
continue
}

for _, peer := range peers {
switch sentry.Protocol() {
case eth.ETH65:
req65 := &proto_sentry.SendMessageByIdRequest{
PeerId: peer,
Data: &proto_sentry.OutboundMessageData{
Id: proto_sentry.MessageId_NEW_POOLED_TRANSACTION_HASHES_65,
Data: data,
},
}

if _, err = sentry.SendMessageById(ctx, req65, &grpc.EmptyCallOption{}); err != nil {
if isPeerNotFoundErr(err) {
continue
}
log.Error("broadcastNewBlock", "error", err)
}

case eth.ETH66:
req66 := &proto_sentry.SendMessageByIdRequest{
PeerId: peer,
Data: &proto_sentry.OutboundMessageData{
Id: proto_sentry.MessageId_NEW_POOLED_TRANSACTION_HASHES_66,
Data: data,
},
}
if _, err = sentry.SendMessageById(ctx, req66, &grpc.EmptyCallOption{}); err != nil {
if isPeerNotFoundErr(err) {
continue
}
log.Error("PropagatePooledTxsToPeersList", "error", err)
}
}
}
}
}
}

func isPeerNotFoundErr(err error) bool {
return strings.Contains(err.Error(), "peer not found")
}
8 changes: 6 additions & 2 deletions cmd/sentry/download/downloader.go
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,6 @@ func RecvMessageLoop(ctx context.Context,
}
if err := RecvMessage(ctx, sentry, cs.HandleInboundMessage, wg); err != nil {
log.Error("[RecvMessage]", "err", err)

}
}
}
Expand Down Expand Up @@ -631,6 +630,9 @@ func (cs *ControlServerImpl) getBlockHeaders66(ctx context.Context, inreq *proto
}
_, err = sentry.SendMessageById(ctx, &outreq, &grpc.EmptyCallOption{})
if err != nil {
if !isPeerNotFoundErr(err) {
return fmt.Errorf("send header response 65: %v", err)
}
return fmt.Errorf("send header response 66: %v", err)
}
//log.Info(fmt.Sprintf("[%s] GetBlockHeaderMsg{hash=%x, number=%d, amount=%d, skip=%d, reverse=%t, responseLen=%d}", string(gointerfaces.ConvertH512ToBytes(inreq.PeerId)), query.Origin.Hash, query.Origin.Number, query.Amount, query.Skip, query.Reverse, len(b)))
Expand Down Expand Up @@ -666,7 +668,9 @@ func (cs *ControlServerImpl) getBlockHeaders65(ctx context.Context, inreq *proto
}
_, err = sentry.SendMessageById(ctx, &outreq, &grpc.EmptyCallOption{})
if err != nil {
return fmt.Errorf("send header response 65: %v", err)
if !isPeerNotFoundErr(err) {
return fmt.Errorf("send header response 65: %v", err)
}
}
//log.Info(fmt.Sprintf("[%s] GetBlockHeaderMsg{hash=%x, number=%d, amount=%d, skip=%d, reverse=%t, responseLen=%d}", string(gointerfaces.ConvertH512ToBytes(inreq.PeerId)), query.Origin.Hash, query.Origin.Number, query.Amount, query.Skip, query.Reverse, len(b)))
return nil
Expand Down
Loading

0 comments on commit 188dfb1

Please sign in to comment.