Skip to content

Commit

Permalink
merge devel
Browse files Browse the repository at this point in the history
  • Loading branch information
AskAlexSharov committed Apr 10, 2024
2 parents e08f3ec + 56cf84b commit 65b467f
Show file tree
Hide file tree
Showing 171 changed files with 9,902 additions and 1,871 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/qa-tip-tracking.yml
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ jobs:
uses: actions/upload-artifact@v4
with:
name: test-results
path: $ERIGON_QA_PATH/test_system/qa-tests/tip-tracking/result.json
path: ${{ github.workspace }}/result.json

- name: Action for Success
if: steps.test_step.outputs.TEST_RESULT == 'success'
Expand Down
8 changes: 4 additions & 4 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -54,13 +54,13 @@ in `erigon --help`). We don't allow change this flag after first start.
System Requirements
===================

* For an Archive node of Ethereum Mainnet we recommend >=3.5TB storage space: 2.2TB state (as of December 2023),
470GB snapshots (can symlink or mount folder `<datadir>/snapshots` to another disk), 200GB temp files (can symlink or mount folder `<datadir>/temp` to another disk). Ethereum Mainnet Full node (
see `--prune*` flags): 400Gb (April 2022).
* For an Archive node of Ethereum Mainnet we recommend >=3.5TB storage space: 2.3TiB state (as of March 2024),
643GiB snapshots (can symlink or mount folder `<datadir>/snapshots` to another disk), 200GB temp files (can symlink or mount folder `<datadir>/temp` to another disk). Ethereum Mainnet Full node (
see `--prune*` flags): 1.1TiB (March 2024).

* Goerli Full node (see `--prune*` flags): 189GB on Beta, 114GB on Alpha (April 2022).

* Gnosis Chain Archive: 600GB (October 2023).
* Gnosis Chain Archive: 1.7TiB (March 2024). Gnosis Chain Full node (`--prune=hrtc` flag): 530GiB (March 2024).

* Polygon Mainnet Archive: 8.5TiB (December 2023). `--prune.*.older 15768000`: 5.1Tb (September 2023). Polygon Mumbai Archive:
1TB. (April 2022).
Expand Down
69 changes: 69 additions & 0 deletions cl/aggregation/mock/pool.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

12 changes: 12 additions & 0 deletions cl/aggregation/pool.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
package aggregation

import (
"github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon/cl/cltypes/solid"
)

type AggregationPool interface {
AddAttestation(att *solid.Attestation) error
//GetAggregatations(slot uint64, committeeIndex uint64) ([]*solid.Attestation, error)
GetAggregatationByRoot(root common.Hash) *solid.Attestation
}
131 changes: 131 additions & 0 deletions cl/aggregation/pool_impl.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,131 @@
package aggregation

import (
"context"
"fmt"
"sync"
"time"

"github.com/Giulio2002/bls"
"github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon/cl/clparams"
"github.com/ledgerwatch/erigon/cl/cltypes/solid"
"github.com/ledgerwatch/erigon/cl/utils"
)

var (
blsAggregate = bls.AggregateSignatures
)

type aggregationPoolImpl struct {
// config
genesisConfig *clparams.GenesisConfig
beaconConfig *clparams.BeaconChainConfig
netConfig *clparams.NetworkConfig
aggregatesLock sync.RWMutex
aggregates map[common.Hash]*solid.Attestation
}

func NewAggregationPool(
ctx context.Context,
genesisConfig *clparams.GenesisConfig,
beaconConfig *clparams.BeaconChainConfig,
netConfig *clparams.NetworkConfig,
) AggregationPool {
p := &aggregationPoolImpl{
genesisConfig: genesisConfig,
beaconConfig: beaconConfig,
netConfig: netConfig,
aggregatesLock: sync.RWMutex{},
aggregates: make(map[common.Hash]*solid.Attestation),
}
go p.sweepStaleAtt(ctx)
return p
}

func (p *aggregationPoolImpl) AddAttestation(inAtt *solid.Attestation) error {
// use hash of attestation data as key
hashRoot, err := inAtt.AttestantionData().HashSSZ()
if err != nil {
return err
}

p.aggregatesLock.Lock()
defer p.aggregatesLock.Unlock()
att, ok := p.aggregates[hashRoot]
if !ok {
p.aggregates[hashRoot] = inAtt.Clone().(*solid.Attestation)
return nil
}

if utils.IsSupersetBitlist(att.AggregationBits(), inAtt.AggregationBits()) {
// no need to merge existing signatures
return nil
}

// merge signature
baseSig := att.Signature()
inSig := inAtt.Signature()
merged, err := blsAggregate([][]byte{baseSig[:], inSig[:]})
if err != nil {
return err
}
if len(merged) > 96 {
return fmt.Errorf("merged signature is too long")
}
var mergedSig [96]byte
copy(mergedSig[:], merged)

// merge aggregation bits
mergedBits := make([]byte, len(att.AggregationBits()))
for i := range att.AggregationBits() {
mergedBits[i] = att.AggregationBits()[i] | inAtt.AggregationBits()[i]
}

// update attestation
p.aggregates[hashRoot] = solid.NewAttestionFromParameters(
mergedBits,
inAtt.AttestantionData(),
mergedSig,
)
return nil
}

func (p *aggregationPoolImpl) GetAggregatationByRoot(root common.Hash) *solid.Attestation {
p.aggregatesLock.RLock()
defer p.aggregatesLock.RUnlock()
att := p.aggregates[root]
if att == nil {
return nil
}
return att.Clone().(*solid.Attestation)
}

func (p *aggregationPoolImpl) sweepStaleAtt(ctx context.Context) {
ticker := time.NewTicker(time.Minute)
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
p.aggregatesLock.Lock()
toRemoves := make([][32]byte, 0)
for hashRoot := range p.aggregates {
att := p.aggregates[hashRoot]
if p.slotIsStale(att.AttestantionData().Slot()) {
toRemoves = append(toRemoves, hashRoot)
}
}
// remove stale attestation
for _, hashRoot := range toRemoves {
delete(p.aggregates, hashRoot)
}
p.aggregatesLock.Unlock()
}
}
}

func (p *aggregationPoolImpl) slotIsStale(targetSlot uint64) bool {
curSlot := utils.GetCurrentSlot(p.genesisConfig.GenesisTime, p.beaconConfig.SecondsPerSlot)
return curSlot-targetSlot > p.netConfig.AttestationPropagationSlotRange
}
121 changes: 121 additions & 0 deletions cl/aggregation/pool_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,121 @@
package aggregation

import (
"context"
"log"
"testing"

"github.com/ledgerwatch/erigon/cl/cltypes/solid"
"github.com/stretchr/testify/suite"
)

var (
// mock attestations with attestation data 1
attData1 = solid.NewAttestionDataFromParameters(1, 1, [32]byte{0, 4, 2, 6},
solid.NewCheckpointFromParameters([32]byte{0}, 4),
solid.NewCheckpointFromParameters([32]byte{0}, 4))
attData1Root, _ = attData1.HashSSZ()

att1_1 = solid.NewAttestionFromParameters(
[]byte{0b00000001, 0, 0, 0},
attData1,
[96]byte{'a', 'b', 'c', 'd', 'e', 'f'},
)
att1_2 = solid.NewAttestionFromParameters(
[]byte{0b00001011, 0, 0, 0},
attData1,
[96]byte{'d', 'e', 'f', 'g', 'h', 'i'},
)
att1_3 = solid.NewAttestionFromParameters(
[]byte{0b00000100, 0b00000011, 0, 0},
attData1,
[96]byte{'g', 'h', 'i', 'j', 'k', 'l'},
)
att1_4 = solid.NewAttestionFromParameters(
[]byte{0b00111010, 0, 0, 0},
attData1,
[96]byte{'m', 'n', 'o', 'p', 'q', 'r'},
)
// mock attestations with attestation data 2
attData2 = solid.NewAttestionDataFromParameters(3, 1, [32]byte{5, 5, 6, 6},
solid.NewCheckpointFromParameters([32]byte{0}, 4),
solid.NewCheckpointFromParameters([32]byte{0}, 4))
att2_1 = solid.NewAttestionFromParameters(
[]byte{0b00000001, 0, 0, 0},
attData2,
[96]byte{'t', 'e', 's', 't', 'i', 'n'},
)

mockAggrResult = [96]byte{'m', 'o', 'c', 'k'}
)

type PoolTestSuite struct {
suite.Suite
}

func (t *PoolTestSuite) SetupTest() {
blsAggregate = func(sigs [][]byte) ([]byte, error) {
ret := make([]byte, 96)
copy(ret, mockAggrResult[:])
return ret, nil
}
}

func (t *PoolTestSuite) TearDownTest() {
}

func (t *PoolTestSuite) TestAddAttestation() {
testcases := []struct {
name string
atts []*solid.Attestation
hashRoot [32]byte
expect *solid.Attestation
}{
{
name: "simple, different hashRoot",
atts: []*solid.Attestation{
att1_1,
att2_1,
},
hashRoot: attData1Root,
expect: att1_1,
},
{
name: "att1_2 is a super set of att1_1. skip att1_1",
atts: []*solid.Attestation{
att1_2,
att1_1,
att2_1, // none of its business
},
hashRoot: attData1Root,
expect: att1_2,
},
{
name: "merge att1_2, att1_3, att1_4",
atts: []*solid.Attestation{
att1_2,
att1_3,
att1_4,
},
hashRoot: attData1Root,
expect: solid.NewAttestionFromParameters(
[]byte{0b00111111, 0b00000011, 0, 0}, // merge of att1_2, att1_3 and att1_4
attData1,
mockAggrResult),
},
}

for _, tc := range testcases {
log.Printf("test case: %s", tc.name)
pool := NewAggregationPool(context.Background(), nil, nil, nil)
for _, att := range tc.atts {
pool.AddAttestation(att)
}
att := pool.GetAggregatationByRoot(tc.hashRoot)
t.Equal(tc.expect, att, tc.name)
}
}

func TestPool(t *testing.T) {
suite.Run(t, new(PoolTestSuite))
}
4 changes: 2 additions & 2 deletions cl/antiquary/antiquary.go
Original file line number Diff line number Diff line change
Expand Up @@ -244,7 +244,7 @@ func (a *Antiquary) antiquate(from, to uint64) error {
}

log.Info("[Antiquary]: Antiquating", "from", from, "to", to)
if err := freezeblocks.DumpBeaconBlocks(a.ctx, a.mainDB, from, to, a.dirs.Tmp, a.dirs.Snap, 1, log.LvlDebug, a.logger); err != nil {
if err := freezeblocks.DumpBeaconBlocks(a.ctx, a.mainDB, from, to, a.sn.Salt, a.dirs, 1, log.LvlDebug, a.logger); err != nil {
return err
}
tx, err := a.mainDB.BeginRw(a.ctx)
Expand Down Expand Up @@ -332,7 +332,7 @@ func (a *Antiquary) antiquateBlobs() error {
roTx.Rollback()
a.logger.Info("[Antiquary]: Antiquating blobs", "from", currentBlobsProgress, "to", to)
// now, we need to retire the blobs
if err := freezeblocks.DumpBlobsSidecar(a.ctx, a.blobStorage, a.mainDB, currentBlobsProgress, to, a.dirs.Tmp, a.dirs.Snap, 1, log.LvlDebug, a.logger); err != nil {
if err := freezeblocks.DumpBlobsSidecar(a.ctx, a.blobStorage, a.mainDB, currentBlobsProgress, to, a.sn.Salt, a.dirs, 1, log.LvlDebug, a.logger); err != nil {
return err
}
to = (to / snaptype.Erigon2MergeLimit) * snaptype.Erigon2MergeLimit
Expand Down
Loading

0 comments on commit 65b467f

Please sign in to comment.