Skip to content

Commit

Permalink
Computing uptimes if they are not up to date (#1081)
Browse files Browse the repository at this point in the history
* testing

* working

* lint

* testing

* lint

* Make code more similar to master

Co-authored-by: Mariano Cortesi <mariano@celo.org>
  • Loading branch information
mrsmkl and Mariano Cortesi authored Jul 2, 2020
1 parent 4c86d8d commit 93fa73a
Show file tree
Hide file tree
Showing 2 changed files with 74 additions and 20 deletions.
69 changes: 51 additions & 18 deletions eth/downloader/downloader.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ import (

"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus/istanbul"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
Expand Down Expand Up @@ -66,11 +67,11 @@ var (
reorgProtThreshold = 48 // Threshold number of recent blocks to disable mini reorg protection
reorgProtHeaderDelay = 2 // Number of headers to delay delivering to cover mini reorgs

fsHeaderCheckFrequency = 100 // Verification frequency of the downloaded headers during fast sync
fsHeaderSafetyNet = 2048 // Number of headers to discard in case a chain violation is detected
fsHeaderForceVerify = 24 // Number of headers to verify before and after the pivot to accept it
fsHeaderContCheck = 3 * time.Second // Time interval to check for header continuations during state download
fsMinFullBlocks = 64 // Number of blocks to retrieve fully even in fast sync
fsHeaderCheckFrequency = 100 // Verification frequency of the downloaded headers during fast sync
fsHeaderSafetyNet = 2048 // Number of headers to discard in case a chain violation is detected
fsHeaderForceVerify = 24 // Number of headers to verify before and after the pivot to accept it
fsHeaderContCheck = 3 * time.Second // Time interval to check for header continuations during state download
fsMinFullBlocks uint64 = 64 // Number of blocks to retrieve fully even in fast sync
)

var (
Expand Down Expand Up @@ -484,13 +485,11 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td *big.I
// Ensure our origin point is below any fast sync pivot point
pivot := uint64(0)
if d.Mode == FastSync {
if height <= uint64(fsMinFullBlocks) {
pivot = d.calcPivot(height)
if pivot == 0 {
origin = 0
} else {
pivot = height - uint64(fsMinFullBlocks)
if pivot <= origin {
origin = pivot - 1
}
} else if pivot <= origin {
origin = pivot - 1
}
}
d.committed = 1
Expand Down Expand Up @@ -1703,6 +1702,42 @@ func (d *Downloader) importBlockResults(results []*fetchResult) error {
return nil
}

func max(a uint64, b uint64) uint64 {
if a < b {
return b
}
return a
}

func computePivot(height uint64, epochSize uint64) uint64 {
if height <= fsMinFullBlocks {
return 0
}
target := height - fsMinFullBlocks
targetEpoch := istanbul.GetEpochNumber(target, epochSize)

// if target is on first epoch start on genesis
if targetEpoch <= 1 {
return 0
}

// else start on first block of the epoch
pivot, _ := istanbul.GetEpochFirstBlockNumber(targetEpoch, epochSize)
return pivot

}

func (d *Downloader) calcPivot(height uint64) uint64 {
// If epoch is not set (not IBFT) use old logic
if d.epoch == 0 {
if fsMinFullBlocks > height {
return 0
}
return height - fsMinFullBlocks
}
return computePivot(height, d.epoch)
}

// processFastSyncContent takes fetch results from the queue and writes them to the
// database. It also controls the synchronisation of state nodes of the pivot block.
func (d *Downloader) processFastSyncContent(latest *types.Header) error {
Expand All @@ -1718,10 +1753,7 @@ func (d *Downloader) processFastSyncContent(latest *types.Header) error {
go closeOnErr(sync)
// Figure out the ideal pivot block. Note, that this goalpost may move if the
// sync takes long enough for the chain head to move significantly.
pivot := uint64(0)
if height := latest.Number.Uint64(); height > uint64(fsMinFullBlocks) {
pivot = height - uint64(fsMinFullBlocks)
}
pivot := d.calcPivot(latest.Number.Uint64())
// To cater for moving pivot points, track the pivot block and subsequently
// accumulated download results separately.
var (
Expand Down Expand Up @@ -1754,9 +1786,10 @@ func (d *Downloader) processFastSyncContent(latest *types.Header) error {
// Split around the pivot block and process the two sides via fast/full sync
if atomic.LoadInt32(&d.committed) == 0 {
latest = results[len(results)-1].Header
if height := latest.Number.Uint64(); height > pivot+2*uint64(fsMinFullBlocks) {
log.Warn("Pivot became stale, moving", "old", pivot, "new", height-uint64(fsMinFullBlocks))
pivot = height - uint64(fsMinFullBlocks)
if height := latest.Number.Uint64(); height > pivot+2*max(d.epoch, fsMinFullBlocks) {
newPivot := d.calcPivot(height)
log.Warn("Pivot became stale, moving", "old", pivot, "new", newPivot)
pivot = newPivot
}
}
P, beforeP, afterP := splitAroundPivot(pivot, results)
Expand Down
25 changes: 23 additions & 2 deletions eth/downloader/downloader_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -917,7 +917,7 @@ func testInvalidHeaderRollback(t *testing.T, protocol int, mode SyncMode) {
defer tester.terminate()

// Create a small enough block chain to download
targetBlocks := 3*fsHeaderSafetyNet + 256 + fsMinFullBlocks
targetBlocks := 3*fsHeaderSafetyNet + 256 + int(fsMinFullBlocks)
chain := testChainBase.shorten(targetBlocks)

// Attempt to sync with an attacker that feeds junk during the fast sync phase.
Expand Down Expand Up @@ -1558,7 +1558,7 @@ func testCheckpointEnforcement(t *testing.T, protocol int, mode SyncMode) {
tester := newTester()
defer tester.terminate()

tester.downloader.checkpoint = uint64(fsMinFullBlocks) + 256
tester.downloader.checkpoint = fsMinFullBlocks + 256
chain := testChainBase.shorten(int(tester.downloader.checkpoint) - 1)

// Attempt to sync with the peer and validate the result
Expand All @@ -1577,3 +1577,24 @@ func testCheckpointEnforcement(t *testing.T, protocol int, mode SyncMode) {
assertOwnChain(t, tester, chain.len())
}
}

func TestPivot(t *testing.T) {
testCases := []struct {
height uint64
epoch uint64
expected uint64
}{
{0, 0, 0},
{172, 17280, 0},
{17280, 17280, 0},
{17280*10 + 1000, 17280, 17280*10 + 1},
{17280*10 + 10, 17280, 17280*9 + 1},
{17280 * 10, 17280, 17280*9 + 1},
{17280*10 - 1000, 17280, 17280*9 + 1},
}
for _, tt := range testCases {
if res := computePivot(tt.height, tt.epoch); res != tt.expected {
t.Errorf("Got %v expected %v for value %v", res, tt.expected, tt.height)
}
}
}

0 comments on commit 93fa73a

Please sign in to comment.