diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go index 6f104c48c8..979f73f7ab 100644 --- a/dot/sync/chain_sync.go +++ b/dot/sync/chain_sync.go @@ -37,6 +37,13 @@ const ( tip ) +type blockOrigin byte + +const ( + networkInitialSync blockOrigin = iota + networkBroadcast +) + func (s chainSyncState) String() string { switch s { case bootstrap: @@ -126,6 +133,7 @@ type chainSync struct { telemetry Telemetry badBlocks []string requestMaker network.RequestMaker + waitPeersDuration time.Duration } type chainSyncConfig struct { @@ -142,6 +150,7 @@ type chainSyncConfig struct { blockImportHandler BlockImportHandler telemetry Telemetry badBlocks []string + waitPeersDuration time.Duration } func newChainSync(cfg chainSyncConfig) *chainSync { @@ -166,26 +175,40 @@ func newChainSync(cfg chainSyncConfig) *chainSync { workerPool: newSyncWorkerPool(cfg.net, cfg.requestMaker), badBlocks: cfg.badBlocks, requestMaker: cfg.requestMaker, + waitPeersDuration: cfg.waitPeersDuration, } } -func (cs *chainSync) start() { - // since the default status from sync mode is syncMode(tip) - isSyncedGauge.Set(1) +func (cs *chainSync) waitEnoughPeersAndTarget() { + waitPeersTimer := time.NewTimer(cs.waitPeersDuration) - // wait until we have a minimal workers in the sync worker pool for { + cs.workerPool.useConnectedPeers() + _, err := cs.getTarget() + totalAvailable := cs.workerPool.totalWorkers() - if totalAvailable >= uint(cs.minPeers) { - break + if totalAvailable >= uint(cs.minPeers) && err == nil { + return } - // TODO: https://github.com/ChainSafe/gossamer/issues/3402 - time.Sleep(time.Second) + select { + case <-waitPeersTimer.C: + waitPeersTimer.Reset(cs.waitPeersDuration) + case <-cs.stopCh: + return + } } +} + +func (cs *chainSync) start() { + // since the default status from sync mode is syncMode(tip) + isSyncedGauge.Set(1) cs.wg.Add(1) go cs.pendingBlocks.run(cs.finalisedCh, cs.stopCh, &cs.wg) + + // wait until we have a minimal workers in the sync worker pool + cs.waitEnoughPeersAndTarget() } func (cs *chainSync) stop() error { @@ -265,7 +288,7 @@ func (cs *chainSync) bootstrapSync() { if isFarFromTarget { cs.workerPool.useConnectedPeers() - err = cs.requestMaxBlocksFrom(bestBlockHeader) + err = cs.requestMaxBlocksFrom(bestBlockHeader, networkInitialSync) if err != nil { logger.Errorf("requesting max blocks from best block header: %s", err) } @@ -424,7 +447,7 @@ func (cs *chainSync) requestChainBlocks(announcedHeader, bestBlockHeader *types. resultsQueue := make(chan *syncTaskResult) cs.workerPool.submitRequest(request, &peerWhoAnnounced, resultsQueue) - err := cs.handleWorkersResults(resultsQueue, startAtBlock, totalBlocks) + err := cs.handleWorkersResults(resultsQueue, networkBroadcast, startAtBlock, totalBlocks) if err != nil { return fmt.Errorf("while handling workers results: %w", err) } @@ -462,7 +485,7 @@ func (cs *chainSync) requestForkBlocks(bestBlockHeader, highestFinalizedHeader, resultsQueue := make(chan *syncTaskResult) cs.workerPool.submitRequest(request, &peerWhoAnnounced, resultsQueue) - err = cs.handleWorkersResults(resultsQueue, startAtBlock, gapLength) + err = cs.handleWorkersResults(resultsQueue, networkBroadcast, startAtBlock, gapLength) if err != nil { return fmt.Errorf("while handling workers results: %w", err) } @@ -490,7 +513,7 @@ func (cs *chainSync) requestPendingBlocks(highestFinalizedHeader *types.Header) } if parentExists { - err := cs.handleReadyBlock(pendingBlock.toBlockData()) + err := cs.handleReadyBlock(pendingBlock.toBlockData(), networkBroadcast) if err != nil { return fmt.Errorf("handling ready block: %w", err) } @@ -515,7 +538,7 @@ func (cs *chainSync) requestPendingBlocks(highestFinalizedHeader *types.Header) // TODO: we should handle the requests concurrently // a way of achieve that is by constructing a new `handleWorkersResults` for // handling only tip sync requests - err = cs.handleWorkersResults(resultsQueue, startAtBlock, *descendingGapRequest.Max) + err = cs.handleWorkersResults(resultsQueue, networkBroadcast, startAtBlock, *descendingGapRequest.Max) if err != nil { return fmt.Errorf("while handling workers results: %w", err) } @@ -524,7 +547,7 @@ func (cs *chainSync) requestPendingBlocks(highestFinalizedHeader *types.Header) return nil } -func (cs *chainSync) requestMaxBlocksFrom(bestBlockHeader *types.Header) error { +func (cs *chainSync) requestMaxBlocksFrom(bestBlockHeader *types.Header, origin blockOrigin) error { //nolint:unparam startRequestAt := bestBlockHeader.Number + 1 // targetBlockNumber is the virtual target we will request, however @@ -551,7 +574,7 @@ func (cs *chainSync) requestMaxBlocksFrom(bestBlockHeader *types.Header) error { } resultsQueue := cs.workerPool.submitRequests(requests) - err = cs.handleWorkersResults(resultsQueue, startRequestAt, expectedAmountOfBlocks) + err = cs.handleWorkersResults(resultsQueue, origin, startRequestAt, expectedAmountOfBlocks) if err != nil { return fmt.Errorf("while handling workers results: %w", err) } @@ -589,7 +612,7 @@ func (cs *chainSync) getTarget() (uint, error) { // in the queue and wait for it to completes // TODO: handle only justification requests func (cs *chainSync) handleWorkersResults( - workersResults chan *syncTaskResult, startAtBlock uint, expectedSyncedBlocks uint32) error { + workersResults chan *syncTaskResult, origin blockOrigin, startAtBlock uint, expectedSyncedBlocks uint32) error { startTime := time.Now() defer func() { @@ -739,7 +762,7 @@ taskResultLoop: // response was validated! place into ready block queue for _, bd := range syncingChain { // block is ready to be processed! - if err := cs.handleReadyBlock(bd); err != nil { + if err := cs.handleReadyBlock(bd, origin); err != nil { return fmt.Errorf("while handling ready block: %w", err) } } @@ -747,7 +770,7 @@ taskResultLoop: return nil } -func (cs *chainSync) handleReadyBlock(bd *types.BlockData) error { +func (cs *chainSync) handleReadyBlock(bd *types.BlockData, origin blockOrigin) error { // if header was not requested, get it from the pending set // if we're expecting headers, validate should ensure we have a header if bd.Header == nil { @@ -779,7 +802,7 @@ func (cs *chainSync) handleReadyBlock(bd *types.BlockData) error { bd.Header = block.header } - err := cs.processBlockData(*bd) + err := cs.processBlockData(*bd, origin) if err != nil { // depending on the error, we might want to save this block for later logger.Errorf("block data processing for block with hash %s failed: %s", bd.Hash, err) @@ -793,7 +816,7 @@ func (cs *chainSync) handleReadyBlock(bd *types.BlockData) error { // processBlockData processes the BlockData from a BlockResponse and // returns the index of the last BlockData it handled on success, // or the index of the block data that errored on failure. -func (cs *chainSync) processBlockData(blockData types.BlockData) error { +func (cs *chainSync) processBlockData(blockData types.BlockData, origin blockOrigin) error { // while in bootstrap mode we don't need to broadcast block announcements announceImportedBlock := cs.getSyncMode() == tip var blockDataJustification []byte @@ -808,7 +831,7 @@ func (cs *chainSync) processBlockData(blockData types.BlockData) error { } if blockData.Body != nil { - err = cs.processBlockDataWithHeaderAndBody(blockData, announceImportedBlock) + err = cs.processBlockDataWithHeaderAndBody(blockData, origin, announceImportedBlock) if err != nil { return fmt.Errorf("processing block data with header and body: %w", err) } @@ -842,10 +865,13 @@ func (cs *chainSync) verifyJustification(headerHash common.Hash, justification [ } func (cs *chainSync) processBlockDataWithHeaderAndBody(blockData types.BlockData, - announceImportedBlock bool) (err error) { - err = cs.babeVerifier.VerifyBlock(blockData.Header) - if err != nil { - return fmt.Errorf("babe verifying block: %w", err) + origin blockOrigin, announceImportedBlock bool) (err error) { + + if origin != networkInitialSync { + err = cs.babeVerifier.VerifyBlock(blockData.Header) + if err != nil { + return fmt.Errorf("babe verifying block: %w", err) + } } cs.handleBody(blockData.Body) diff --git a/dot/sync/chain_sync_test.go b/dot/sync/chain_sync_test.go index 27f8bb5845..af5a82fd8c 100644 --- a/dot/sync/chain_sync_test.go +++ b/dot/sync/chain_sync_test.go @@ -185,7 +185,7 @@ func Test_chainSync_onBlockAnnounce(t *testing.T) { const announceBlock = true ensureSuccessfulBlockImportFlow(t, block1AnnounceHeader, mockedBlockResponse.BlockData, blockStateMock, babeVerifierMock, storageStateMock, importHandlerMock, telemetryMock, - announceBlock) + networkBroadcast, announceBlock) workerPool := newSyncWorkerPool(networkMock, requestMaker) // include the peer who announced the block in the pool @@ -306,10 +306,10 @@ func Test_chainSync_onBlockAnnounceHandshake_tipModeNeedToCatchup(t *testing.T) ensureSuccessfulBlockImportFlow(t, block1AnnounceHeader, firstMockedResponse.BlockData, blockStateMock, babeVerifierMock, storageStateMock, importHandlerMock, telemetryMock, - announceBlock) + networkInitialSync, announceBlock) ensureSuccessfulBlockImportFlow(t, latestItemFromMockedResponse.Header, secondMockedResponse.BlockData, blockStateMock, babeVerifierMock, storageStateMock, importHandlerMock, telemetryMock, - announceBlock) + networkInitialSync, announceBlock) state := atomic.Value{} state.Store(tip) @@ -535,7 +535,7 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithOneWorker(t *testing.T) { const announceBlock = false // setup mocks for new synced blocks that doesn't exists in our local database ensureSuccessfulBlockImportFlow(t, mockedGenesisHeader, totalBlockResponse.BlockData, mockedBlockState, - mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, announceBlock) + mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, networkInitialSync, announceBlock) // setup a chain sync which holds in its peer view map // 3 peers, each one announce block X as its best block number. @@ -555,7 +555,7 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithOneWorker(t *testing.T) { // the worker pool executes the workers management cs.workerPool.fromBlockAnnounce(peer.ID("noot")) - err = cs.requestMaxBlocksFrom(mockedGenesisHeader) + err = cs.requestMaxBlocksFrom(mockedGenesisHeader, networkInitialSync) require.NoError(t, err) err = cs.workerPool.stop() @@ -591,7 +591,7 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithTwoWorkers(t *testing.T) { // the first peer will respond the from the block 1 to 128 so the ensureBlockImportFlow // will setup the expectations starting from the genesis header until block 128 ensureSuccessfulBlockImportFlow(t, mockedGenesisHeader, worker1Response.BlockData, mockBlockState, - mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, announceBlock) + mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, networkInitialSync, announceBlock) worker2Response := &network.BlockResponseMessage{ BlockData: blockResponse.BlockData[128:], @@ -600,7 +600,7 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithTwoWorkers(t *testing.T) { // will setup the expectations starting from block 128, from previous worker, until block 256 parent := worker1Response.BlockData[len(worker1Response.BlockData)-1] ensureSuccessfulBlockImportFlow(t, parent.Header, worker2Response.BlockData, mockBlockState, - mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, announceBlock) + mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, networkInitialSync, announceBlock) // we use gomock.Any since I cannot guarantee which peer picks which request // but the first call to DoBlockRequest will return the first set and the second @@ -641,7 +641,7 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithTwoWorkers(t *testing.T) { cs.workerPool.fromBlockAnnounce(peer.ID("noot")) cs.workerPool.fromBlockAnnounce(peer.ID("noot2")) - err = cs.requestMaxBlocksFrom(mockedGenesisHeader) + err = cs.requestMaxBlocksFrom(mockedGenesisHeader, networkInitialSync) require.NoError(t, err) err = cs.workerPool.stop() @@ -678,7 +678,7 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithOneWorkerFailing(t *testing. // the first peer will respond the from the block 1 to 128 so the ensureBlockImportFlow // will setup the expectations starting from the genesis header until block 128 ensureSuccessfulBlockImportFlow(t, mockedGenesisHeader, worker1Response.BlockData, mockBlockState, - mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, announceBlock) + mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, networkInitialSync, announceBlock) worker2Response := &network.BlockResponseMessage{ BlockData: blockResponse.BlockData[128:], @@ -687,7 +687,7 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithOneWorkerFailing(t *testing. // will setup the expectations starting from block 128, from previous worker, until block 256 parent := worker1Response.BlockData[len(worker1Response.BlockData)-1] ensureSuccessfulBlockImportFlow(t, parent.Header, worker2Response.BlockData, mockBlockState, - mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, announceBlock) + mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, networkInitialSync, announceBlock) // we use gomock.Any since I cannot guarantee which peer picks which request // but the first call to DoBlockRequest will return the first set and the second @@ -735,7 +735,7 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithOneWorkerFailing(t *testing. cs.workerPool.fromBlockAnnounce(peer.ID("alice")) cs.workerPool.fromBlockAnnounce(peer.ID("bob")) - err = cs.requestMaxBlocksFrom(mockedGenesisHeader) + err = cs.requestMaxBlocksFrom(mockedGenesisHeader, networkInitialSync) require.NoError(t, err) err = cs.workerPool.stop() @@ -772,7 +772,7 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithProtocolNotSupported(t *test // the first peer will respond the from the block 1 to 128 so the ensureBlockImportFlow // will setup the expectations starting from the genesis header until block 128 ensureSuccessfulBlockImportFlow(t, mockedGenesisHeader, worker1Response.BlockData, mockBlockState, - mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, announceBlock) + mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, networkInitialSync, announceBlock) worker2Response := &network.BlockResponseMessage{ BlockData: blockResponse.BlockData[128:], @@ -781,7 +781,7 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithProtocolNotSupported(t *test // will setup the expectations starting from block 128, from previous worker, until block 256 parent := worker1Response.BlockData[len(worker1Response.BlockData)-1] ensureSuccessfulBlockImportFlow(t, parent.Header, worker2Response.BlockData, mockBlockState, - mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, announceBlock) + mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, networkInitialSync, announceBlock) // we use gomock.Any since I cannot guarantee which peer picks which request // but the first call to DoBlockRequest will return the first set and the second @@ -835,7 +835,7 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithProtocolNotSupported(t *test cs.workerPool.fromBlockAnnounce(peer.ID("alice")) cs.workerPool.fromBlockAnnounce(peer.ID("bob")) - err = cs.requestMaxBlocksFrom(mockedGenesisHeader) + err = cs.requestMaxBlocksFrom(mockedGenesisHeader, networkInitialSync) require.NoError(t, err) err = cs.workerPool.stop() @@ -872,7 +872,7 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithNilHeaderInResponse(t *testi // the first peer will respond the from the block 1 to 128 so the ensureBlockImportFlow // will setup the expectations starting from the genesis header until block 128 ensureSuccessfulBlockImportFlow(t, mockedGenesisHeader, worker1Response.BlockData, mockBlockState, - mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, announceBlock) + mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, networkInitialSync, announceBlock) worker2Response := &network.BlockResponseMessage{ BlockData: blockResponse.BlockData[128:], @@ -881,7 +881,7 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithNilHeaderInResponse(t *testi // will setup the expectations starting from block 128, from previous worker, until block 256 parent := worker1Response.BlockData[127] ensureSuccessfulBlockImportFlow(t, parent.Header, worker2Response.BlockData, mockBlockState, - mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, announceBlock) + mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, networkInitialSync, announceBlock) // we use gomock.Any since I cannot guarantee which peer picks which request // but the first call to DoBlockRequest will return the first set and the second @@ -937,7 +937,7 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithNilHeaderInResponse(t *testi cs.workerPool.fromBlockAnnounce(peer.ID("alice")) cs.workerPool.fromBlockAnnounce(peer.ID("bob")) - err = cs.requestMaxBlocksFrom(mockedGenesisHeader) + err = cs.requestMaxBlocksFrom(mockedGenesisHeader, networkInitialSync) require.NoError(t, err) err = cs.workerPool.stop() @@ -974,7 +974,7 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithResponseIsNotAChain(t *testi // the first peer will respond the from the block 1 to 128 so the ensureBlockImportFlow // will setup the expectations starting from the genesis header until block 128 ensureSuccessfulBlockImportFlow(t, mockedGenesisHeader, worker1Response.BlockData, mockBlockState, - mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, announceBlock) + mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, networkInitialSync, announceBlock) worker2Response := &network.BlockResponseMessage{ BlockData: blockResponse.BlockData[128:], @@ -983,7 +983,7 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithResponseIsNotAChain(t *testi // will setup the expectations starting from block 128, from previous worker, until block 256 parent := worker1Response.BlockData[127] ensureSuccessfulBlockImportFlow(t, parent.Header, worker2Response.BlockData, mockBlockState, - mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, announceBlock) + mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, networkInitialSync, announceBlock) // we use gomock.Any since I cannot guarantee which peer picks which request // but the first call to DoBlockRequest will return the first set and the second @@ -1035,7 +1035,7 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithResponseIsNotAChain(t *testi cs.workerPool.fromBlockAnnounce(peer.ID("alice")) cs.workerPool.fromBlockAnnounce(peer.ID("bob")) - err = cs.requestMaxBlocksFrom(mockedGenesisHeader) + err = cs.requestMaxBlocksFrom(mockedGenesisHeader, networkInitialSync) require.NoError(t, err) err = cs.workerPool.stop() @@ -1072,7 +1072,7 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithReceivedBadBlock(t *testing. // the first peer will respond the from the block 1 to 128 so the ensureBlockImportFlow // will setup the expectations starting from the genesis header until block 128 ensureSuccessfulBlockImportFlow(t, mockedGenesisHeader, worker1Response.BlockData, mockBlockState, - mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, announceBlock) + mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, networkInitialSync, announceBlock) worker2Response := &network.BlockResponseMessage{ BlockData: blockResponse.BlockData[128:], @@ -1081,7 +1081,7 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithReceivedBadBlock(t *testing. // will setup the expectations starting from block 128, from previous worker, until block 256 parent := worker1Response.BlockData[len(worker1Response.BlockData)-1] ensureSuccessfulBlockImportFlow(t, parent.Header, worker2Response.BlockData, mockBlockState, - mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, announceBlock) + mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, networkInitialSync, announceBlock) fakeBadBlockHash := common.MustHexToHash("0x18767cb4bb4cc13bf119f6613aec5487d4c06a2e453de53d34aea6f3f1ee9855") @@ -1149,7 +1149,7 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithReceivedBadBlock(t *testing. cs.workerPool.fromBlockAnnounce(peer.ID("alice")) cs.workerPool.fromBlockAnnounce(peer.ID("bob")) - err = cs.requestMaxBlocksFrom(mockedGenesisHeader) + err = cs.requestMaxBlocksFrom(mockedGenesisHeader, networkInitialSync) require.NoError(t, err) err = cs.workerPool.stop() @@ -1190,7 +1190,7 @@ func TestChainSync_BootstrapSync_SucessfulSync_ReceivedPartialBlockData(t *testi // the first peer will respond the from the block 1 to 96 so the ensureBlockImportFlow // will setup the expectations starting from the genesis header until block 96 ensureSuccessfulBlockImportFlow(t, mockedGenesisHeader, worker1Response.BlockData, mockBlockState, - mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, announceBlock) + mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, networkInitialSync, announceBlock) worker1MissingBlocksResponse := &network.BlockResponseMessage{ BlockData: blockResponse.BlockData[97:], @@ -1199,7 +1199,7 @@ func TestChainSync_BootstrapSync_SucessfulSync_ReceivedPartialBlockData(t *testi // last item from the previous response parent := worker1Response.BlockData[96] ensureSuccessfulBlockImportFlow(t, parent.Header, worker1MissingBlocksResponse.BlockData, mockBlockState, - mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, announceBlock) + mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, networkInitialSync, announceBlock) doBlockRequestCount := 0 mockRequestMaker.EXPECT(). @@ -1231,7 +1231,7 @@ func TestChainSync_BootstrapSync_SucessfulSync_ReceivedPartialBlockData(t *testi cs.workerPool.fromBlockAnnounce(peer.ID("alice")) - err = cs.requestMaxBlocksFrom(mockedGenesisHeader) + err = cs.requestMaxBlocksFrom(mockedGenesisHeader, networkInitialSync) require.NoError(t, err) err = cs.workerPool.stop() @@ -1284,11 +1284,13 @@ func createSuccesfullBlockResponse(t *testing.T, parentHeader common.Hash, func ensureSuccessfulBlockImportFlow(t *testing.T, parentHeader *types.Header, blocksReceived []*types.BlockData, mockBlockState *MockBlockState, mockBabeVerifier *MockBabeVerifier, mockStorageState *MockStorageState, - mockImportHandler *MockBlockImportHandler, mockTelemetry *MockTelemetry, announceBlock bool) { + mockImportHandler *MockBlockImportHandler, mockTelemetry *MockTelemetry, origin blockOrigin, announceBlock bool) { t.Helper() for idx, blockData := range blocksReceived { - mockBabeVerifier.EXPECT().VerifyBlock(blockData.Header).Return(nil) + if origin != networkInitialSync { + mockBabeVerifier.EXPECT().VerifyBlock(blockData.Header).Return(nil) + } var previousHeader *types.Header if idx == 0 { @@ -1710,7 +1712,7 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithInvalidJusticationBlock(t *t // the first peer will respond the from the block 1 to 128 so the ensureBlockImportFlow // will setup the expectations starting from the genesis header until block 128 ensureSuccessfulBlockImportFlow(t, mockedGenesisHeader, worker1Response.BlockData[:90], mockBlockState, - mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, announceBlock) + mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, networkInitialSync, announceBlock) errVerifyBlockJustification := errors.New("VerifyBlockJustification mock error") mockFinalityGadget.EXPECT(). @@ -1754,7 +1756,7 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithInvalidJusticationBlock(t *t cs.workerPool.fromBlockAnnounce(peer.ID("alice")) //cs.workerPool.fromBlockAnnounce(peer.ID("bob")) - err = cs.requestMaxBlocksFrom(mockedGenesisHeader) + err = cs.requestMaxBlocksFrom(mockedGenesisHeader, networkInitialSync) require.ErrorIs(t, err, errVerifyBlockJustification) err = cs.workerPool.stop() diff --git a/dot/sync/syncer.go b/dot/sync/syncer.go index 32ec786f99..c511f2644c 100644 --- a/dot/sync/syncer.go +++ b/dot/sync/syncer.go @@ -64,6 +64,7 @@ func NewService(cfg *Config) (*Service, error) { telemetry: cfg.Telemetry, badBlocks: cfg.BadBlocks, requestMaker: cfg.RequestMaker, + waitPeersDuration: 100 * time.Millisecond, } chainSync := newChainSync(csCfg)