1
- use std:: sync:: Arc ;
1
+ use std:: collections:: HashMap ;
2
+ use std:: sync:: { Arc , Mutex } ;
3
+ use std:: time:: Duration ;
2
4
3
5
use alloy:: primitives:: U256 ;
6
+ use itertools:: Itertools ;
4
7
use mempool_test_utils:: in_ci;
5
8
use mempool_test_utils:: starknet_api_test_utils:: DEFAULT_ANVIL_L1_ACCOUNT_ADDRESS ;
6
9
use papyrus_base_layer:: ethereum_base_layer_contract:: {
@@ -12,17 +15,26 @@ use papyrus_base_layer::test_utils::{
12
15
anvil_instance_from_config,
13
16
ethereum_base_layer_config_for_anvil,
14
17
} ;
18
+ use starknet_api:: block:: BlockNumber ;
15
19
use starknet_api:: contract_address;
16
20
use starknet_api:: core:: { EntryPointSelector , Nonce } ;
17
21
use starknet_api:: executable_transaction:: L1HandlerTransaction as ExecutableL1HandlerTransaction ;
18
22
use starknet_api:: hash:: StarkHash ;
19
23
use starknet_api:: transaction:: fields:: { Calldata , Fee } ;
20
24
use starknet_api:: transaction:: { L1HandlerTransaction , TransactionHasher , TransactionVersion } ;
21
- use starknet_l1_provider_types:: Event ;
25
+ use starknet_l1_provider_types:: { Event , L1ProviderClient } ;
26
+ use starknet_sequencer_infra:: trace_util:: configure_tracing;
27
+ use starknet_state_sync_types:: communication:: MockStateSyncClient ;
28
+ use starknet_state_sync_types:: state_sync_types:: SyncBlock ;
22
29
23
- use crate :: event_identifiers_to_track ;
30
+ use crate :: l1_provider :: create_l1_provider ;
24
31
use crate :: l1_scraper:: { L1Scraper , L1ScraperConfig } ;
25
32
use crate :: test_utils:: FakeL1ProviderClient ;
33
+ use crate :: { event_identifiers_to_track, L1ProviderConfig } ;
34
+
35
+ const fn height_add ( block_number : BlockNumber , k : u64 ) -> BlockNumber {
36
+ BlockNumber ( block_number. 0 + k)
37
+ }
26
38
27
39
// TODO(Gilad): Replace EthereumBaseLayerContract with a mock that has a provider initialized with
28
40
// `with_recommended_fillers`, in order to be able to create txs from non-default users.
@@ -123,6 +135,288 @@ async fn txs_happy_flow() {
123
135
fake_client. assert_add_events_received_with ( & [ ] ) ;
124
136
}
125
137
138
+ // TODO(Gilad): figure out how To setup anvil on a specific L1 block (through genesis.json?) and
139
+ // with a specified L2 block logged to L1 (hopefully without having to use real backup).
140
+ /// This test simulates a bootstrapping flow, in which 3 blocks are synced from L2, during which two
141
+ /// new blocks from past the catch-up height arrive. The expected behavior is that the synced
142
+ /// commit_blocks are processed as they come, and the two new blocks are backlogged until the synced
143
+ /// blocks are processed, after which they are processed in order.
144
+ #[ tokio:: test]
145
+ #[ ignore = "stale and broken, will be fixed in next PR" ]
146
+ async fn bootstrap_e2e ( ) {
147
+ if !in_ci ( ) {
148
+ return ;
149
+ }
150
+ configure_tracing ( ) . await ;
151
+
152
+ // Setup.
153
+
154
+ let l1_provider_client = Arc :: new ( FakeL1ProviderClient :: default ( ) ) ;
155
+ let startup_height = BlockNumber ( 2 ) ;
156
+ let catch_up_height = BlockNumber ( 5 ) ;
157
+
158
+ // Make the mocked sync client try removing from a hashmap as a response to get block.
159
+ let mut sync_client = MockStateSyncClient :: default ( ) ;
160
+ let sync_response = Arc :: new ( Mutex :: new ( HashMap :: < BlockNumber , SyncBlock > :: new ( ) ) ) ;
161
+ let mut sync_response_clone = sync_response. lock ( ) . unwrap ( ) . clone ( ) ;
162
+ sync_client. expect_get_block ( ) . returning ( move |input| Ok ( sync_response_clone. remove ( & input) ) ) ;
163
+
164
+ let config = L1ProviderConfig {
165
+ bootstrap_catch_up_height_override : Some ( catch_up_height) ,
166
+ startup_sync_sleep_retry_interval : Duration :: from_millis ( 10 ) ,
167
+ ..Default :: default ( )
168
+ } ;
169
+ let mut l1_provider = create_l1_provider (
170
+ config,
171
+ l1_provider_client. clone ( ) ,
172
+ Arc :: new ( sync_client) ,
173
+ startup_height,
174
+ ) ;
175
+
176
+ // Test.
177
+
178
+ // Trigger the bootstrapper: this will trigger the sync task to start trying to fetch blocks
179
+ // from the sync client, which will always return nothing since the hash map above is still
180
+ // empty. The sync task will busy-wait on the height until we feed the hashmap.
181
+ // TODO(Gilad): Consider adding txs here and in the commit blocks, might make the test harder to
182
+ // understand though.
183
+ let scraped_l1_handler_txs = vec ! [ ] ; // No txs to scrape in this test.
184
+ l1_provider. initialize ( scraped_l1_handler_txs) . await . unwrap ( ) ;
185
+
186
+ // Load first **Sync** response: the initializer task will pick it up within the specified
187
+ // interval.
188
+ sync_response. lock ( ) . unwrap ( ) . insert ( startup_height, SyncBlock :: default ( ) ) ;
189
+ tokio:: time:: sleep ( config. startup_sync_sleep_retry_interval ) . await ;
190
+
191
+ // **Commit** 2 blocks past catchup height, should be received after the previous sync.
192
+ let no_txs_committed = vec ! [ ] ; // Not testing txs in this test.
193
+ l1_provider_client. commit_block ( no_txs_committed. clone ( ) , catch_up_height) . await . unwrap ( ) ;
194
+ tokio:: time:: sleep ( config. startup_sync_sleep_retry_interval ) . await ;
195
+ l1_provider_client
196
+ . commit_block ( no_txs_committed, height_add ( catch_up_height, 1 ) )
197
+ . await
198
+ . unwrap ( ) ;
199
+ tokio:: time:: sleep ( config. startup_sync_sleep_retry_interval ) . await ;
200
+
201
+ // Feed sync task the remaining blocks, will be received after the commits above.
202
+ sync_response. lock ( ) . unwrap ( ) . insert ( height_add ( startup_height, 1 ) , SyncBlock :: default ( ) ) ;
203
+ sync_response. lock ( ) . unwrap ( ) . insert ( height_add ( startup_height, 2 ) , SyncBlock :: default ( ) ) ;
204
+ tokio:: time:: sleep ( 2 * config. startup_sync_sleep_retry_interval ) . await ;
205
+
206
+ // Assert that initializer task has received the stubbed responses from the sync client and sent
207
+ // the corresponding commit blocks to the provider, in the order implied to by the test
208
+ // structure.
209
+ let mut commit_blocks = l1_provider_client. commit_blocks_received . lock ( ) . unwrap ( ) ;
210
+ let received_order = commit_blocks. iter ( ) . map ( |block| block. height ) . collect_vec ( ) ;
211
+ let expected_order =
212
+ vec ! [ BlockNumber ( 2 ) , BlockNumber ( 5 ) , BlockNumber ( 6 ) , BlockNumber ( 3 ) , BlockNumber ( 4 ) ] ;
213
+ assert_eq ! (
214
+ received_order, expected_order,
215
+ "Sanity check failed: commit block order mismatch. Expected {:?}, got {:?}" ,
216
+ expected_order, received_order
217
+ ) ;
218
+
219
+ // Apply commit blocks and assert that correct height commit_blocks are applied, but commit
220
+ // blocks past catch_up_height are backlogged.
221
+ // TODO(Gilad): once we are able to create clients on top of channels, this manual'ness won't
222
+ // be necessary. Right now we cannot create clients without spinning up all servers, so we have
223
+ // to use a mock.
224
+
225
+ let mut commit_blocks = commit_blocks. drain ( ..) ;
226
+
227
+ // Apply height 2.
228
+ let next_block = commit_blocks. next ( ) . unwrap ( ) ;
229
+ l1_provider. commit_block ( & next_block. committed_txs , next_block. height ) . unwrap ( ) ;
230
+ assert_eq ! ( l1_provider. current_height, BlockNumber ( 3 ) ) ;
231
+
232
+ // Backlog height 5.
233
+ let next_block = commit_blocks. next ( ) . unwrap ( ) ;
234
+ l1_provider. commit_block ( & next_block. committed_txs , next_block. height ) . unwrap ( ) ;
235
+ // Assert that this didn't affect height; this commit block is too high so is backlogged.
236
+ assert_eq ! ( l1_provider. current_height, BlockNumber ( 3 ) ) ;
237
+
238
+ // Backlog height 6.
239
+ let next_block = commit_blocks. next ( ) . unwrap ( ) ;
240
+ l1_provider. commit_block ( & next_block. committed_txs , next_block. height ) . unwrap ( ) ;
241
+ // Assert backlogged, like height 5.
242
+ assert_eq ! ( l1_provider. current_height, BlockNumber ( 3 ) ) ;
243
+
244
+ // Apply height 3
245
+ let next_block = commit_blocks. next ( ) . unwrap ( ) ;
246
+ l1_provider. commit_block ( & next_block. committed_txs , next_block. height ) . unwrap ( ) ;
247
+ assert_eq ! ( l1_provider. current_height, BlockNumber ( 4 ) ) ;
248
+
249
+ // Apply height 4 ==> this triggers committing the backlogged heights 5 and 6.
250
+ let next_block = commit_blocks. next ( ) . unwrap ( ) ;
251
+ l1_provider. commit_block ( & next_block. committed_txs , next_block. height ) . unwrap ( ) ;
252
+ assert_eq ! ( l1_provider. current_height, BlockNumber ( 7 ) ) ;
253
+
254
+ // Assert that the bootstrapper has been dropped.
255
+ assert ! ( !l1_provider. state. is_bootstrapping( ) ) ;
256
+ }
257
+
258
+ #[ tokio:: test]
259
+ async fn bootstrap_delayed_sync_state_with_trivial_catch_up ( ) {
260
+ if !in_ci ( ) {
261
+ return ;
262
+ }
263
+ configure_tracing ( ) . await ;
264
+
265
+ // Setup.
266
+
267
+ let l1_provider_client = Arc :: new ( FakeL1ProviderClient :: default ( ) ) ;
268
+ let startup_height = BlockNumber ( 3 ) ;
269
+
270
+ let mut sync_client = MockStateSyncClient :: default ( ) ;
271
+ // Mock sync response for an arbitrary number of calls to get_latest_block_number.
272
+ // Later in the test we modify it to become something else.
273
+ let sync_height_response = Arc :: new ( Mutex :: new ( None ) ) ;
274
+ let sync_response_clone = sync_height_response. clone ( ) ;
275
+ sync_client
276
+ . expect_get_latest_block_number ( )
277
+ . returning ( move || Ok ( * sync_response_clone. lock ( ) . unwrap ( ) ) ) ;
278
+
279
+ let config = L1ProviderConfig {
280
+ startup_sync_sleep_retry_interval : Duration :: from_millis ( 10 ) ,
281
+ ..Default :: default ( )
282
+ } ;
283
+ let mut l1_provider = create_l1_provider (
284
+ config,
285
+ l1_provider_client. clone ( ) ,
286
+ Arc :: new ( sync_client) ,
287
+ startup_height,
288
+ ) ;
289
+
290
+ // Test.
291
+
292
+ // Start the sync sequence, should busy-wait until the sync height is sent.
293
+ let scraped_l1_handler_txs = [ ] ; // No txs to scrape in this test.
294
+ l1_provider. initialize ( scraped_l1_handler_txs. into ( ) ) . await . unwrap ( ) ;
295
+
296
+ // **Commit** a few blocks. The height starts from the provider's current height, since this
297
+ // is a trivial catchup scenario (nothing to catch up).
298
+ // This checks that the trivial catch_up_height doesn't mess up this flow.
299
+ let no_txs_committed = [ ] ; // Not testing txs in this test.
300
+ l1_provider_client. commit_block ( no_txs_committed. to_vec ( ) , startup_height) . await . unwrap ( ) ;
301
+ l1_provider_client
302
+ . commit_block ( no_txs_committed. to_vec ( ) , height_add ( startup_height, 1 ) )
303
+ . await
304
+ . unwrap ( ) ;
305
+
306
+ // Forward all messages buffered in the client to the provider.
307
+ l1_provider_client. flush_messages ( & mut l1_provider) . await ;
308
+
309
+ // Commit blocks should have been applied.
310
+ let start_height_plus_2 = height_add ( startup_height, 2 ) ;
311
+ assert_eq ! ( l1_provider. current_height, start_height_plus_2) ;
312
+ // Should still be bootstrapping, since catchup height isn't determined yet.
313
+ // Technically we could end bootstrapping at this point, but its simpler to let it
314
+ // terminate gracefully once the the sync is ready.
315
+ assert ! ( l1_provider. state. is_bootstrapping( ) ) ;
316
+
317
+ * sync_height_response. lock ( ) . unwrap ( ) = Some ( BlockNumber ( 2 ) ) ;
318
+
319
+ // Let the sync task continue, it should short circuit.
320
+ tokio:: time:: sleep ( config. startup_sync_sleep_retry_interval ) . await ;
321
+ // Assert height is unchanged from last time, no commit block was called from the sync task.
322
+ assert_eq ! ( l1_provider. current_height, start_height_plus_2) ;
323
+ // Finally, commit a new block to trigger the bootstrapping check, should switch to steady
324
+ // state.
325
+ l1_provider. commit_block ( & no_txs_committed, start_height_plus_2) . unwrap ( ) ;
326
+ assert_eq ! ( l1_provider. current_height, height_add( start_height_plus_2, 1 ) ) ;
327
+ // The new commit block triggered the catch-up check, which ended the bootstrapping phase.
328
+ assert ! ( !l1_provider. state. is_bootstrapping( ) ) ;
329
+ }
330
+
331
+ #[ tokio:: test]
332
+ async fn bootstrap_delayed_sync_state_with_sync_behind_batcher ( ) {
333
+ if !in_ci ( ) {
334
+ return ;
335
+ }
336
+ configure_tracing ( ) . await ;
337
+
338
+ // Setup.
339
+
340
+ let l1_provider_client = Arc :: new ( FakeL1ProviderClient :: default ( ) ) ;
341
+ let startup_height = BlockNumber ( 1 ) ;
342
+ let sync_height = BlockNumber ( 3 ) ;
343
+
344
+ let mut sync_client = MockStateSyncClient :: default ( ) ;
345
+ // Mock sync response for an arbitrary number of calls to get_latest_block_number.
346
+ // Later in the test we modify it to become something else.
347
+ let sync_height_response = Arc :: new ( Mutex :: new ( None ) ) ;
348
+ let sync_response_clone = sync_height_response. clone ( ) ;
349
+ sync_client
350
+ . expect_get_latest_block_number ( )
351
+ . returning ( move || Ok ( * sync_response_clone. lock ( ) . unwrap ( ) ) ) ;
352
+ sync_client. expect_get_block ( ) . returning ( |_| Ok ( Some ( SyncBlock :: default ( ) ) ) ) ;
353
+
354
+ let config = L1ProviderConfig {
355
+ startup_sync_sleep_retry_interval : Duration :: from_millis ( 10 ) ,
356
+ ..Default :: default ( )
357
+ } ;
358
+ let mut l1_provider = create_l1_provider (
359
+ config,
360
+ l1_provider_client. clone ( ) ,
361
+ Arc :: new ( sync_client) ,
362
+ startup_height,
363
+ ) ;
364
+
365
+ // Test.
366
+
367
+ // Start the sync sequence, should busy-wait until the sync height is sent.
368
+ let scraped_l1_handler_txs = [ ] ; // No txs to scrape in this test.
369
+ l1_provider. initialize ( scraped_l1_handler_txs. into ( ) ) . await . unwrap ( ) ;
370
+
371
+ // **Commit** a few blocks. These should get backlogged since they are post-sync-height.
372
+ // Sleeps are sprinkled in to give the async task a couple shots at attempting to get the sync
373
+ // height (see DEBUG log).
374
+ let no_txs_committed = [ ] ; // Not testing txs in this test.
375
+ l1_provider_client
376
+ . commit_block ( no_txs_committed. to_vec ( ) , sync_height. unchecked_next ( ) )
377
+ . await
378
+ . unwrap ( ) ;
379
+ tokio:: time:: sleep ( config. startup_sync_sleep_retry_interval ) . await ;
380
+ l1_provider_client
381
+ . commit_block ( no_txs_committed. to_vec ( ) , sync_height. unchecked_next ( ) . unchecked_next ( ) )
382
+ . await
383
+ . unwrap ( ) ;
384
+
385
+ // Forward all messages buffered in the client to the provider.
386
+ l1_provider_client. flush_messages ( & mut l1_provider) . await ;
387
+ tokio:: time:: sleep ( config. startup_sync_sleep_retry_interval ) . await ;
388
+
389
+ // Assert commit blocks are backlogged (didn't affect start height).
390
+ assert_eq ! ( l1_provider. current_height, startup_height) ;
391
+ // Should still be bootstrapping, since catchup height isn't determined yet.
392
+ assert ! ( l1_provider. state. is_bootstrapping( ) ) ;
393
+
394
+ // Simulate the state sync service finally being ready, and give the async task enough time to
395
+ // pick this up and sync up the provider.
396
+ * sync_height_response. lock ( ) . unwrap ( ) = Some ( sync_height) ;
397
+ tokio:: time:: sleep ( config. startup_sync_sleep_retry_interval ) . await ;
398
+ // Forward all messages buffered in the client to the provider.
399
+ l1_provider_client. flush_messages ( & mut l1_provider) . await ;
400
+
401
+ // Two things happened here: the async task sent 2 commit blocks it got from the sync_client,
402
+ // which bumped the provider height to sync_height+1, then the backlog was applied which bumped
403
+ // it twice again.
404
+ assert_eq ! (
405
+ l1_provider. current_height,
406
+ sync_height. unchecked_next( ) . unchecked_next( ) . unchecked_next( )
407
+ ) ;
408
+ // Sync height was reached, bootstrapping was completed.
409
+ assert ! ( !l1_provider. state. is_bootstrapping( ) ) ;
410
+ }
411
+
412
+ #[ test]
413
+ #[ ignore = "similar to backlog_happy_flow, only shorter, and sprinkle some start_block/get_txs \
414
+ attempts while its bootstrapping (and assert failure on height), then assert that they \
415
+ succeed after bootstrapping ends."]
416
+ fn bootstrap_completion ( ) {
417
+ todo ! ( )
418
+ }
419
+
126
420
#[ tokio:: test]
127
421
#[ ignore = "Not yet implemented: generate an l1 and an cancel event for that tx, also check an \
128
422
abort for a different tx"]
0 commit comments