@@ -10,6 +10,7 @@ import (
10
10
11
11
"github.com/libp2p/go-libp2p/core/peer"
12
12
"github.com/pkg/errors"
13
+ "github.com/prysmaticlabs/prysm/v5/beacon-chain/sync/verify"
13
14
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
14
15
"github.com/sirupsen/logrus"
15
16
@@ -60,7 +61,7 @@ type dataColumnSampler1D struct {
60
61
// peerFromColumn maps a column to the peer responsible for custody.
61
62
peerFromColumn map [uint64 ]map [peer.ID ]bool
62
63
// columnVerifier verifies a column according to the specified requirements.
63
- columnVerifier verification.NewColumnVerifier
64
+ columnVerifier verification.NewDataColumnsVerifier
64
65
}
65
66
66
67
// newDataColumnSampler1D creates a new 1D data column sampler.
@@ -69,7 +70,7 @@ func newDataColumnSampler1D(
69
70
clock * startup.Clock ,
70
71
ctxMap ContextByteVersions ,
71
72
stateNotifier statefeed.Notifier ,
72
- colVerifier verification.NewColumnVerifier ,
73
+ colVerifier verification.NewDataColumnsVerifier ,
73
74
) * dataColumnSampler1D {
74
75
numColumns := params .BeaconConfig ().NumberOfColumns
75
76
peerFromColumn := make (map [uint64 ]map [peer.ID ]bool , numColumns )
@@ -265,7 +266,7 @@ func (d *dataColumnSampler1D) handleStateNotification(ctx context.Context, event
265
266
samplesCount := min (params .BeaconConfig ().SamplesPerSlot , uint64 (len (d .nonCustodyColumns ))- params .BeaconConfig ().NumberOfColumns / 2 )
266
267
267
268
// TODO: Use the first output of `incrementalDAS` as input of the fork choice rule.
268
- _ , _ , err = d .incrementalDAS (ctx , data . BlockRoot , randomizedColumns , samplesCount )
269
+ _ , _ , err = d .incrementalDAS (ctx , data , randomizedColumns , samplesCount )
269
270
if err != nil {
270
271
log .WithError (err ).Error ("Failed to run incremental DAS" )
271
272
}
@@ -276,21 +277,22 @@ func (d *dataColumnSampler1D) handleStateNotification(ctx context.Context, event
276
277
// According to https://github.com/ethereum/consensus-specs/issues/3825, we're going to select query samples exclusively from the non custody columns.
277
278
func (d * dataColumnSampler1D ) incrementalDAS (
278
279
ctx context.Context ,
279
- root [ fieldparams . RootLength ] byte ,
280
+ blockProcessedData * statefeed. BlockProcessedData ,
280
281
columns []uint64 ,
281
282
sampleCount uint64 ,
282
283
) (bool , []roundSummary , error ) {
283
284
allowedFailures := uint64 (0 )
284
285
firstColumnToSample , extendedSampleCount := uint64 (0 ), peerdas .ExtendedSampleCount (sampleCount , allowedFailures )
285
286
roundSummaries := make ([]roundSummary , 0 , 1 ) // We optimistically allocate only one round summary.
287
+ blockRoot := blockProcessedData .BlockRoot
286
288
287
289
start := time .Now ()
288
290
289
291
for round := 1 ; ; /*No exit condition */ round ++ {
290
292
if extendedSampleCount > uint64 (len (columns )) {
291
293
// We already tried to sample all possible columns, this is the unhappy path.
292
294
log .WithFields (logrus.Fields {
293
- "root" : fmt .Sprintf ("%#x" , root ),
295
+ "root" : fmt .Sprintf ("%#x" , blockRoot ),
294
296
"round" : round - 1 ,
295
297
}).Warning ("Some columns are still missing after trying to sample all possible columns" )
296
298
return false , roundSummaries , nil
@@ -301,13 +303,13 @@ func (d *dataColumnSampler1D) incrementalDAS(
301
303
columnsToSampleCount := extendedSampleCount - firstColumnToSample
302
304
303
305
log .WithFields (logrus.Fields {
304
- "root" : fmt .Sprintf ("%#x" , root ),
306
+ "root" : fmt .Sprintf ("%#x" , blockRoot ),
305
307
"columns" : columnsToSample ,
306
308
"round" : round ,
307
309
}).Debug ("Start data columns sampling" )
308
310
309
311
// Sample data columns from peers in parallel.
310
- retrievedSamples := d .sampleDataColumns (ctx , root , columnsToSample )
312
+ retrievedSamples := d .sampleDataColumns (ctx , blockProcessedData , columnsToSample )
311
313
312
314
missingSamples := make (map [uint64 ]bool )
313
315
for _ , column := range columnsToSample {
@@ -325,7 +327,7 @@ func (d *dataColumnSampler1D) incrementalDAS(
325
327
if retrievedSampleCount == columnsToSampleCount {
326
328
// All columns were correctly sampled, this is the happy path.
327
329
log .WithFields (logrus.Fields {
328
- "root" : fmt .Sprintf ("%#x" , root ),
330
+ "root" : fmt .Sprintf ("%#x" , blockRoot ),
329
331
"neededRounds" : round ,
330
332
"duration" : time .Since (start ),
331
333
}).Debug ("All columns were successfully sampled" )
@@ -344,7 +346,7 @@ func (d *dataColumnSampler1D) incrementalDAS(
344
346
extendedSampleCount = peerdas .ExtendedSampleCount (sampleCount , allowedFailures )
345
347
346
348
log .WithFields (logrus.Fields {
347
- "root" : fmt .Sprintf ("%#x" , root ),
349
+ "root" : fmt .Sprintf ("%#x" , blockRoot ),
348
350
"round" : round ,
349
351
"missingColumnsCount" : allowedFailures ,
350
352
"currentSampleIndex" : oldExtendedSampleCount ,
@@ -355,7 +357,7 @@ func (d *dataColumnSampler1D) incrementalDAS(
355
357
356
358
func (d * dataColumnSampler1D ) sampleDataColumns (
357
359
ctx context.Context ,
358
- root [ fieldparams . RootLength ] byte ,
360
+ blockProcessedData * statefeed. BlockProcessedData ,
359
361
columns []uint64 ,
360
362
) map [uint64 ]bool {
361
363
// distribute samples to peer
@@ -365,10 +367,12 @@ func (d *dataColumnSampler1D) sampleDataColumns(
365
367
mu sync.Mutex
366
368
wg sync.WaitGroup
367
369
)
370
+
368
371
res := make (map [uint64 ]bool )
372
+
369
373
sampleFromPeer := func (pid peer.ID , cols map [uint64 ]bool ) {
370
374
defer wg .Done ()
371
- retrieved := d .sampleDataColumnsFromPeer (ctx , pid , root , cols )
375
+ retrieved := d .sampleDataColumnsFromPeer (ctx , pid , blockProcessedData , cols )
372
376
373
377
mu .Lock ()
374
378
for col := range retrieved {
@@ -414,15 +418,15 @@ func (d *dataColumnSampler1D) distributeSamplesToPeer(
414
418
func (d * dataColumnSampler1D ) sampleDataColumnsFromPeer (
415
419
ctx context.Context ,
416
420
pid peer.ID ,
417
- root [ fieldparams . RootLength ] byte ,
421
+ blockProcessedData * statefeed. BlockProcessedData ,
418
422
requestedColumns map [uint64 ]bool ,
419
423
) map [uint64 ]bool {
420
424
retrievedColumns := make (map [uint64 ]bool )
421
425
422
426
req := make (types.DataColumnSidecarsByRootReq , 0 )
423
427
for col := range requestedColumns {
424
428
req = append (req , & eth.DataColumnIdentifier {
425
- BlockRoot : root [:],
429
+ BlockRoot : blockProcessedData . BlockRoot [:],
426
430
ColumnIndex : col ,
427
431
})
428
432
}
@@ -434,22 +438,23 @@ func (d *dataColumnSampler1D) sampleDataColumnsFromPeer(
434
438
return nil
435
439
}
436
440
441
+ // TODO: Once peer sampling is used, we should verify all sampled data columns in a single batch instead of looping over columns.
437
442
for _ , roDataColumn := range roDataColumns {
438
- if verifyColumn (roDataColumn , root , pid , requestedColumns , d .columnVerifier ) {
443
+ if verifyColumn (roDataColumn , blockProcessedData , pid , requestedColumns , d .columnVerifier ) {
439
444
retrievedColumns [roDataColumn .ColumnIndex ] = true
440
445
}
441
446
}
442
447
443
448
if len (retrievedColumns ) == len (requestedColumns ) {
444
449
log .WithFields (logrus.Fields {
445
450
"peerID" : pid ,
446
- "root" : fmt .Sprintf ("%#x" , root ),
451
+ "root" : fmt .Sprintf ("%#x" , blockProcessedData . BlockRoot ),
447
452
"requestedColumns" : sortedSliceFromMap (requestedColumns ),
448
453
}).Debug ("Sampled columns from peer successfully" )
449
454
} else {
450
455
log .WithFields (logrus.Fields {
451
456
"peerID" : pid ,
452
- "root" : fmt .Sprintf ("%#x" , root ),
457
+ "root" : fmt .Sprintf ("%#x" , blockProcessedData . BlockRoot ),
453
458
"requestedColumns" : sortedSliceFromMap (requestedColumns ),
454
459
"retrievedColumns" : sortedSliceFromMap (retrievedColumns ),
455
460
}).Debug ("Sampled columns from peer with some errors" )
@@ -506,20 +511,22 @@ func selectRandomPeer(peers map[peer.ID]bool) peer.ID {
506
511
// the KZG inclusion and the KZG proof.
507
512
func verifyColumn (
508
513
roDataColumn blocks.RODataColumn ,
509
- root [ 32 ] byte ,
514
+ blockProcessedData * statefeed. BlockProcessedData ,
510
515
pid peer.ID ,
511
516
requestedColumns map [uint64 ]bool ,
512
- columnVerifier verification.NewColumnVerifier ,
517
+ dataColumnsVerifier verification.NewDataColumnsVerifier ,
513
518
) bool {
514
519
retrievedColumn := roDataColumn .ColumnIndex
515
520
516
521
// Filter out columns with incorrect root.
517
- actualRoot := roDataColumn .BlockRoot ()
518
- if actualRoot != root {
522
+ columnRoot := roDataColumn .BlockRoot ()
523
+ blockRoot := blockProcessedData .BlockRoot
524
+
525
+ if columnRoot != blockRoot {
519
526
log .WithFields (logrus.Fields {
520
527
"peerID" : pid ,
521
- "requestedRoot" : fmt .Sprintf ("%#x" , root ),
522
- "actualRoot " : fmt .Sprintf ("%#x" , actualRoot ),
528
+ "requestedRoot" : fmt .Sprintf ("%#x" , blockRoot ),
529
+ "columnRoot " : fmt .Sprintf ("%#x" , columnRoot ),
523
530
}).Debug ("Retrieved root does not match requested root" )
524
531
525
532
return false
@@ -538,25 +545,18 @@ func verifyColumn(
538
545
return false
539
546
}
540
547
541
- vf := columnVerifier (roDataColumn , verification .SamplingColumnSidecarRequirements )
542
- // Filter out columns which did not pass the KZG inclusion proof verification.
543
- if err := vf .SidecarInclusionProven (); err != nil {
544
- log .WithFields (logrus.Fields {
545
- "peerID" : pid ,
546
- "root" : fmt .Sprintf ("%#x" , root ),
547
- "index" : retrievedColumn ,
548
- }).WithError (err ).Debug ("Failed to verify KZG inclusion proof for retrieved column" )
549
- return false
548
+ roBlock := blockProcessedData .SignedBlock .Block ()
549
+
550
+ wrappedBlockDataColumns := []verify.WrappedBlockDataColumn {
551
+ {
552
+ ROBlock : roBlock ,
553
+ RODataColumn : roDataColumn ,
554
+ },
550
555
}
551
556
552
- // Filter out columns which did not pass the KZG proof verification.
553
- if err := vf .SidecarKzgProofVerified (); err != nil {
554
- log .WithFields (logrus.Fields {
555
- "peerID" : pid ,
556
- "root" : fmt .Sprintf ("%#x" , root ),
557
- "index" : retrievedColumn ,
558
- }).WithError (err ).Debug ("Failed to verify KZG proof for retrieved column" )
557
+ if err := verify .DataColumnsAlignWithBlock (wrappedBlockDataColumns , dataColumnsVerifier ); err != nil {
559
558
return false
560
559
}
560
+
561
561
return true
562
562
}
0 commit comments