Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion beacon-chain/core/blocks/eth1_data.go
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ func Eth1DataHasEnoughSupport(beaconState state.ReadOnlyBeaconState, data *ethpb
voteCount := uint64(0)

for _, vote := range beaconState.Eth1DataVotes() {
if AreEth1DataEqual(vote, data.Copy()) {
if AreEth1DataEqual(vote, data) {
voteCount++
}
}
Expand Down
119 changes: 64 additions & 55 deletions beacon-chain/state/stategen/migrate.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ import (
"fmt"

"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
"github.com/sirupsen/logrus"
Expand Down Expand Up @@ -37,76 +38,84 @@ func (s *State) MigrateToCold(ctx context.Context, fRoot [32]byte) error {
return nil
}

// Start at previous finalized slot, stop at current finalized slot (it will be handled in the next migration).
// If the slot is on archived point, save the state of that slot to the DB.
for slot := oldFSlot; slot < fSlot; slot++ {
// Calculate the first archived point slot >= oldFSlot (but > 0).
// This avoids iterating through every slot and only visits archived points directly.
var startSlot primitives.Slot
if oldFSlot == 0 {
startSlot = s.slotsPerArchivedPoint
} else {
// Round up to the next archived point
startSlot = (oldFSlot + s.slotsPerArchivedPoint - 1) / s.slotsPerArchivedPoint * s.slotsPerArchivedPoint
}

// Start at the first archived point after old finalized slot, stop before current finalized slot.
// Jump directly between archived points.
for slot := startSlot; slot < fSlot; slot += s.slotsPerArchivedPoint {
if ctx.Err() != nil {
return ctx.Err()
}

if slot%s.slotsPerArchivedPoint == 0 && slot != 0 {
cached, exists, err := s.epochBoundaryStateCache.getBySlot(slot)
if err != nil {
return fmt.Errorf("could not get epoch boundary state for slot %d", slot)
}
cached, exists, err := s.epochBoundaryStateCache.getBySlot(slot)
if err != nil {
return fmt.Errorf("could not get epoch boundary state for slot %d", slot)
}

var aRoot [32]byte
var aState state.BeaconState
var aRoot [32]byte
var aState state.BeaconState

// When the epoch boundary state is not in cache due to skip slot scenario,
// we have to regenerate the state which will represent epoch boundary.
// By finding the highest available block below epoch boundary slot, we
// generate the state for that block root.
if exists {
aRoot = cached.root
aState = cached.state
} else {
_, roots, err := s.beaconDB.HighestRootsBelowSlot(ctx, slot)
// When the epoch boundary state is not in cache due to skip slot scenario,
// we have to regenerate the state which will represent epoch boundary.
// By finding the highest available block below epoch boundary slot, we
// generate the state for that block root.
if exists {
aRoot = cached.root
aState = cached.state
} else {
_, roots, err := s.beaconDB.HighestRootsBelowSlot(ctx, slot)
if err != nil {
return err
}
// Given the block has been finalized, the db should not have more than one block in a given slot.
// We should error out when this happens.
if len(roots) != 1 {
return errUnknownBlock
}
aRoot = roots[0]
// There's no need to generate the state if the state already exists in the DB.
// We can skip saving the state.
if !s.beaconDB.HasState(ctx, aRoot) {
aState, err = s.StateByRoot(ctx, aRoot)
if err != nil {
return err
}
// Given the block has been finalized, the db should not have more than one block in a given slot.
// We should error out when this happens.
if len(roots) != 1 {
return errUnknownBlock
}
aRoot = roots[0]
// There's no need to generate the state if the state already exists in the DB.
// We can skip saving the state.
if !s.beaconDB.HasState(ctx, aRoot) {
aState, err = s.StateByRoot(ctx, aRoot)
if err != nil {
return err
}
}
}
}

if s.beaconDB.HasState(ctx, aRoot) {
// If you are migrating a state and its already part of the hot state cache saved to the db,
// you can just remove it from the hot state cache as it becomes redundant.
s.saveHotStateDB.lock.Lock()
roots := s.saveHotStateDB.blockRootsOfSavedStates
for i := range roots {
if aRoot == roots[i] {
s.saveHotStateDB.blockRootsOfSavedStates = append(roots[:i], roots[i+1:]...)
// There shouldn't be duplicated roots in `blockRootsOfSavedStates`.
// Break here is ok.
break
}
if s.beaconDB.HasState(ctx, aRoot) {
// If you are migrating a state and its already part of the hot state cache saved to the db,
// you can just remove it from the hot state cache as it becomes redundant.
s.saveHotStateDB.lock.Lock()
roots := s.saveHotStateDB.blockRootsOfSavedStates
for i := range roots {
if aRoot == roots[i] {
s.saveHotStateDB.blockRootsOfSavedStates = append(roots[:i], roots[i+1:]...)
// There shouldn't be duplicated roots in `blockRootsOfSavedStates`.
// Break here is ok.
break
}
s.saveHotStateDB.lock.Unlock()
continue
}
s.saveHotStateDB.lock.Unlock()
continue
}

if err := s.beaconDB.SaveState(ctx, aState, aRoot); err != nil {
return err
}
log.WithFields(
logrus.Fields{
"slot": aState.Slot(),
"root": hex.EncodeToString(bytesutil.Trunc(aRoot[:])),
}).Info("Saved state in DB")
if err := s.beaconDB.SaveState(ctx, aState, aRoot); err != nil {
return err
}
log.WithFields(
logrus.Fields{
"slot": aState.Slot(),
"root": hex.EncodeToString(bytesutil.Trunc(aRoot[:])),
}).Info("Saved state in DB")
}

// Update finalized info in memory.
Expand Down
4 changes: 2 additions & 2 deletions beacon-chain/sync/pending_attestations_queue.go
Original file line number Diff line number Diff line change
Expand Up @@ -265,7 +265,7 @@ func (s *Service) processVerifiedAttestation(
if key, err := generateUnaggregatedAttCacheKey(broadcastAtt); err != nil {
log.WithError(err).Error("Failed to generate cache key for attestation tracking")
} else {
s.setSeenUnaggregatedAtt(key)
_ = s.setSeenUnaggregatedAtt(key)
}

valCount, err := helpers.ActiveValidatorCount(ctx, preState, slots.ToEpoch(data.Slot))
Expand Down Expand Up @@ -320,7 +320,7 @@ func (s *Service) processAggregate(ctx context.Context, aggregate ethpb.SignedAg
return
}

s.setAggregatorIndexEpochSeen(att.GetData().Target.Epoch, aggregate.AggregateAttestationAndProof().GetAggregatorIndex())
_ = s.setAggregatorIndexEpochSeen(att.GetData().Target.Epoch, aggregate.AggregateAttestationAndProof().GetAggregatorIndex())

if err := s.cfg.p2p.Broadcast(ctx, aggregate); err != nil {
log.WithError(err).Debug("Could not broadcast aggregated attestation")
Expand Down
12 changes: 10 additions & 2 deletions beacon-chain/sync/validate_aggregate_proof.go
Original file line number Diff line number Diff line change
Expand Up @@ -137,7 +137,9 @@ func (s *Service) validateAggregateAndProof(ctx context.Context, pid peer.ID, ms
return validationRes, err
}

s.setAggregatorIndexEpochSeen(data.Target.Epoch, m.AggregateAttestationAndProof().GetAggregatorIndex())
if first := s.setAggregatorIndexEpochSeen(data.Target.Epoch, m.AggregateAttestationAndProof().GetAggregatorIndex()); !first {
return pubsub.ValidationIgnore, nil
}

msg.ValidatorData = m

Expand Down Expand Up @@ -265,13 +267,19 @@ func (s *Service) hasSeenAggregatorIndexEpoch(epoch primitives.Epoch, aggregator
}

// Set aggregate's aggregator index target epoch as seen.
func (s *Service) setAggregatorIndexEpochSeen(epoch primitives.Epoch, aggregatorIndex primitives.ValidatorIndex) {
// Returns true if this is the first time seeing this aggregator index and epoch.
func (s *Service) setAggregatorIndexEpochSeen(epoch primitives.Epoch, aggregatorIndex primitives.ValidatorIndex) bool {
b := append(bytesutil.Bytes32(uint64(epoch)), bytesutil.Bytes32(uint64(aggregatorIndex))...)

s.seenAggregatedAttestationLock.Lock()
defer s.seenAggregatedAttestationLock.Unlock()

_, seen := s.seenAggregatedAttestationCache.Get(string(b))
if seen {
return false
}
s.seenAggregatedAttestationCache.Add(string(b), true)
return true
}

// This validates the bitfield is correct and aggregator's index in state is within the beacon committee.
Expand Down
24 changes: 24 additions & 0 deletions beacon-chain/sync/validate_aggregate_proof_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -801,3 +801,27 @@ func TestValidateAggregateAndProof_RejectWhenAttEpochDoesntEqualTargetEpoch(t *t
assert.NotNil(t, err)
assert.Equal(t, pubsub.ValidationReject, res)
}

func Test_SetAggregatorIndexEpochSeen(t *testing.T) {
db := dbtest.SetupDB(t)
p := p2ptest.NewTestP2P(t)

r := &Service{
cfg: &config{
p2p: p,
beaconDB: db,
},
seenAggregatedAttestationCache: lruwrpr.New(10),
}

aggIndex := primitives.ValidatorIndex(42)
epoch := primitives.Epoch(7)

require.Equal(t, false, r.hasSeenAggregatorIndexEpoch(epoch, aggIndex))
first := r.setAggregatorIndexEpochSeen(epoch, aggIndex)
require.Equal(t, true, first)
require.Equal(t, true, r.hasSeenAggregatorIndexEpoch(epoch, aggIndex))

second := r.setAggregatorIndexEpochSeen(epoch, aggIndex)
require.Equal(t, false, second)
}
17 changes: 13 additions & 4 deletions beacon-chain/sync/validate_beacon_attestation.go
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,8 @@ func (s *Service) validateCommitteeIndexBeaconAttestation(
}

if !s.slasherEnabled {
// Verify this the first attestation received for the participating validator for the slot.
// Verify this the first attestation received for the participating validator for the slot. This verification is here to return early if we've already seen this attestation.
// This verification is carried again later after all other validations to avoid TOCTOU issues.
if s.hasSeenUnaggregatedAtt(attKey) {
return pubsub.ValidationIgnore, nil
}
Expand Down Expand Up @@ -228,7 +229,10 @@ func (s *Service) validateCommitteeIndexBeaconAttestation(
Data: eventData,
})

s.setSeenUnaggregatedAtt(attKey)
if first := s.setSeenUnaggregatedAtt(attKey); !first {
// Another concurrent validation processed the same attestation meanwhile
return pubsub.ValidationIgnore, nil
}

// Attach final validated attestation to the message for further pipeline use
msg.ValidatorData = attForValidation
Expand Down Expand Up @@ -385,11 +389,16 @@ func (s *Service) hasSeenUnaggregatedAtt(key string) bool {
}

// Set an incoming attestation as seen for the participating validator for the slot.
func (s *Service) setSeenUnaggregatedAtt(key string) {
// Returns false if the attestation was already seen.
func (s *Service) setSeenUnaggregatedAtt(key string) bool {
s.seenUnAggregatedAttestationLock.Lock()
defer s.seenUnAggregatedAttestationLock.Unlock()

_, seen := s.seenUnAggregatedAttestationCache.Get(key)
if seen {
return false
}
s.seenUnAggregatedAttestationCache.Add(key, true)
return true
}

// hasBlockAndState returns true if the beacon node knows about a block and associated state in the
Expand Down
51 changes: 43 additions & 8 deletions beacon-chain/sync/validate_beacon_attestation_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -499,33 +499,50 @@ func TestService_setSeenUnaggregatedAtt(t *testing.T) {
Data: &ethpb.AttestationData{Slot: 2, CommitteeIndex: 0},
AggregationBits: bitfield.Bitlist{0b1001},
}
s3c0a0 := &ethpb.Attestation{
Data: &ethpb.AttestationData{Slot: 3, CommitteeIndex: 0},
AggregationBits: bitfield.Bitlist{0b1001},
}

t.Run("empty cache", func(t *testing.T) {
key := generateKey(t, s0c0a0)
assert.Equal(t, false, s.hasSeenUnaggregatedAtt(key))
})
t.Run("ok", func(t *testing.T) {
key := generateKey(t, s0c0a0)
s.setSeenUnaggregatedAtt(key)
first := s.setSeenUnaggregatedAtt(key)
assert.Equal(t, true, s.hasSeenUnaggregatedAtt(key))
assert.Equal(t, true, first)
})
t.Run("already seen", func(t *testing.T) {
key := generateKey(t, s3c0a0)
first := s.setSeenUnaggregatedAtt(key)
assert.Equal(t, true, s.hasSeenUnaggregatedAtt(key))
assert.Equal(t, true, first)
first = s.setSeenUnaggregatedAtt(key)
assert.Equal(t, true, s.hasSeenUnaggregatedAtt(key))
assert.Equal(t, false, first)
})
t.Run("different slot", func(t *testing.T) {
key1 := generateKey(t, s1c0a0)
key2 := generateKey(t, s2c0a0)
s.setSeenUnaggregatedAtt(key1)
first := s.setSeenUnaggregatedAtt(key1)
assert.Equal(t, false, s.hasSeenUnaggregatedAtt(key2))
assert.Equal(t, true, first)
})
t.Run("different committee index", func(t *testing.T) {
key1 := generateKey(t, s0c1a0)
key2 := generateKey(t, s0c2a0)
s.setSeenUnaggregatedAtt(key1)
first := s.setSeenUnaggregatedAtt(key1)
assert.Equal(t, false, s.hasSeenUnaggregatedAtt(key2))
assert.Equal(t, true, first)
})
t.Run("different bit", func(t *testing.T) {
key1 := generateKey(t, s0c0a1)
key2 := generateKey(t, s0c0a2)
s.setSeenUnaggregatedAtt(key1)
first := s.setSeenUnaggregatedAtt(key1)
assert.Equal(t, false, s.hasSeenUnaggregatedAtt(key2))
assert.Equal(t, true, first)
})
t.Run("0 bits set is considered not seen", func(t *testing.T) {
a := &ethpb.Attestation{AggregationBits: bitfield.Bitlist{0b1000}}
Expand Down Expand Up @@ -576,33 +593,51 @@ func TestService_setSeenUnaggregatedAtt(t *testing.T) {
CommitteeId: 0,
AttesterIndex: 0,
}
s3c0a0 := &ethpb.SingleAttestation{
Data: &ethpb.AttestationData{Slot: 2},
CommitteeId: 0,
AttesterIndex: 0,
}

t.Run("empty cache", func(t *testing.T) {
key := generateKey(t, s0c0a0)
assert.Equal(t, false, s.hasSeenUnaggregatedAtt(key))
})
t.Run("ok", func(t *testing.T) {
key := generateKey(t, s0c0a0)
s.setSeenUnaggregatedAtt(key)
first := s.setSeenUnaggregatedAtt(key)
assert.Equal(t, true, s.hasSeenUnaggregatedAtt(key))
assert.Equal(t, true, first)
})
t.Run("different slot", func(t *testing.T) {
key1 := generateKey(t, s1c0a0)
key2 := generateKey(t, s2c0a0)
s.setSeenUnaggregatedAtt(key1)
first := s.setSeenUnaggregatedAtt(key1)
assert.Equal(t, false, s.hasSeenUnaggregatedAtt(key2))
assert.Equal(t, true, first)
})
t.Run("already seen", func(t *testing.T) {
key := generateKey(t, s3c0a0)
first := s.setSeenUnaggregatedAtt(key)
assert.Equal(t, true, s.hasSeenUnaggregatedAtt(key))
assert.Equal(t, true, first)
first = s.setSeenUnaggregatedAtt(key)
assert.Equal(t, true, s.hasSeenUnaggregatedAtt(key))
assert.Equal(t, false, first)
})
t.Run("different committee index", func(t *testing.T) {
key1 := generateKey(t, s0c1a0)
key2 := generateKey(t, s0c2a0)
s.setSeenUnaggregatedAtt(key1)
first := s.setSeenUnaggregatedAtt(key1)
assert.Equal(t, false, s.hasSeenUnaggregatedAtt(key2))
assert.Equal(t, true, first)
})
t.Run("different attester", func(t *testing.T) {
key1 := generateKey(t, s0c0a1)
key2 := generateKey(t, s0c0a2)
s.setSeenUnaggregatedAtt(key1)
first := s.setSeenUnaggregatedAtt(key1)
assert.Equal(t, false, s.hasSeenUnaggregatedAtt(key2))
assert.Equal(t, true, first)
})
t.Run("single attestation is considered not seen", func(t *testing.T) {
a := &ethpb.AttestationElectra{}
Expand Down
3 changes: 3 additions & 0 deletions changelog/potuz_check_twice_attseen.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
### Fixed

- Fixed possible race when validating two attestations at the same time.
3 changes: 3 additions & 0 deletions changelog/satushh-eth1copy.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
### Removed

- Unnecessary copy is removed from Eth1DataHasEnoughSupport
Loading
Loading