1
- use std:: sync:: Arc ;
1
+ use std:: collections:: HashMap ;
2
+ use std:: sync:: { Arc , Mutex } ;
3
+ use std:: time:: Duration ;
2
4
3
5
use alloy:: primitives:: U256 ;
6
+ use itertools:: Itertools ;
4
7
use mempool_test_utils:: in_ci;
5
8
use mempool_test_utils:: starknet_api_test_utils:: DEFAULT_ANVIL_L1_ACCOUNT_ADDRESS ;
6
9
use papyrus_base_layer:: ethereum_base_layer_contract:: {
@@ -12,17 +15,22 @@ use papyrus_base_layer::test_utils::{
12
15
anvil_instance_from_config,
13
16
ethereum_base_layer_config_for_anvil,
14
17
} ;
18
+ use starknet_api:: block:: BlockNumber ;
15
19
use starknet_api:: contract_address;
16
20
use starknet_api:: core:: { EntryPointSelector , Nonce } ;
17
21
use starknet_api:: executable_transaction:: L1HandlerTransaction as ExecutableL1HandlerTransaction ;
18
22
use starknet_api:: hash:: StarkHash ;
19
23
use starknet_api:: transaction:: fields:: { Calldata , Fee } ;
20
24
use starknet_api:: transaction:: { L1HandlerTransaction , TransactionHasher , TransactionVersion } ;
21
- use starknet_l1_provider_types:: Event ;
25
+ use starknet_l1_provider_types:: { Event , L1ProviderClient } ;
26
+ use starknet_sequencer_infra:: trace_util:: configure_tracing;
27
+ use starknet_state_sync_types:: communication:: MockStateSyncClient ;
28
+ use starknet_state_sync_types:: state_sync_types:: SyncBlock ;
22
29
23
- use crate :: event_identifiers_to_track ;
30
+ use crate :: l1_provider :: create_l1_provider ;
24
31
use crate :: l1_scraper:: { L1Scraper , L1ScraperConfig } ;
25
32
use crate :: test_utils:: FakeL1ProviderClient ;
33
+ use crate :: { event_identifiers_to_track, L1ProviderConfig } ;
26
34
27
35
// TODO(Gilad): Replace EthereumBaseLayerContract with a mock that has a provider initialized with
28
36
// `with_recommended_fillers`, in order to be able to create txs from non-default users.
@@ -123,6 +131,207 @@ async fn txs_happy_flow() {
123
131
fake_client. assert_add_events_received_with ( & [ ] ) ;
124
132
}
125
133
134
+ // TODO(Gilad): figure out how To setup anvil on a specific L1 block (through genesis.json?) and
135
+ // with a specified L2 block logged to L1 (hopefully without having to use real backup).
136
+ /// This test simulates a bootstrapping flow, in which 3 blocks are synced from L2, during which two
137
+ /// new blocks from past the catch-up height arrive. The expected behavior is that the synced
138
+ /// commit_blocks are processed as they come, and the two new blocks are backlogged until the synced
139
+ /// blocks are processed, after which they are processed in order.
140
+ #[ tokio:: test]
141
+ #[ ignore = "stale and broken, will be fixed in next PR" ]
142
+ async fn bootstrap_e2e ( ) {
143
+ if !in_ci ( ) {
144
+ return ;
145
+ }
146
+ configure_tracing ( ) . await ;
147
+
148
+ // Setup.
149
+
150
+ let l1_provider_client = Arc :: new ( FakeL1ProviderClient :: default ( ) ) ;
151
+ let startup_height = BlockNumber ( 2 ) ;
152
+ let catch_up_height = BlockNumber ( 5 ) ;
153
+
154
+ // Make the mocked sync client try removing from a hashmap as a response to get block.
155
+ let mut sync_client = MockStateSyncClient :: default ( ) ;
156
+ let sync_response = Arc :: new ( Mutex :: new ( HashMap :: < BlockNumber , SyncBlock > :: new ( ) ) ) ;
157
+ let mut sync_response_clone = sync_response. lock ( ) . unwrap ( ) . clone ( ) ;
158
+ sync_client. expect_get_block ( ) . returning ( move |input| Ok ( sync_response_clone. remove ( & input) ) ) ;
159
+
160
+ let config = L1ProviderConfig {
161
+ bootstrap_catch_up_height_override : Some ( catch_up_height) ,
162
+ startup_sync_sleep_retry_interval : Duration :: from_millis ( 10 ) ,
163
+ ..Default :: default ( )
164
+ } ;
165
+ let mut l1_provider = create_l1_provider (
166
+ config,
167
+ l1_provider_client. clone ( ) ,
168
+ Arc :: new ( sync_client) ,
169
+ startup_height,
170
+ ) ;
171
+
172
+ // Test.
173
+
174
+ // Trigger the bootstrapper: this will trigger the sync task to start trying to fetch blocks
175
+ // from the sync client, which will always return nothing since the hash map above is still
176
+ // empty. The sync task will busy-wait on the height until we feed the hashmap.
177
+ // TODO(Gilad): Consider adding txs here and in the commit blocks, might make the test harder to
178
+ // understand though.
179
+ let scraped_l1_handler_txs = vec ! [ ] ; // No txs to scrape in this test.
180
+ l1_provider. initialize ( scraped_l1_handler_txs) . await . unwrap ( ) ;
181
+
182
+ // Load first **Sync** response: the initializer task will pick it up within the specified
183
+ // interval.
184
+ sync_response. lock ( ) . unwrap ( ) . insert ( startup_height, SyncBlock :: default ( ) ) ;
185
+ tokio:: time:: sleep ( config. startup_sync_sleep_retry_interval ) . await ;
186
+
187
+ // **Commit** 2 blocks past catchup height, should be received after the previous sync.
188
+ let no_txs_committed = vec ! [ ] ; // Not testing txs in this test.
189
+ l1_provider_client
190
+ . commit_block ( no_txs_committed. clone ( ) , catch_up_height. unchecked_next ( ) )
191
+ . await
192
+ . unwrap ( ) ;
193
+ tokio:: time:: sleep ( config. startup_sync_sleep_retry_interval ) . await ;
194
+ l1_provider_client
195
+ . commit_block ( no_txs_committed, catch_up_height. unchecked_next ( ) . unchecked_next ( ) )
196
+ . await
197
+ . unwrap ( ) ;
198
+ tokio:: time:: sleep ( config. startup_sync_sleep_retry_interval ) . await ;
199
+
200
+ // Feed sync task the remaining blocks, will be received after the commits above.
201
+ sync_response. lock ( ) . unwrap ( ) . insert ( BlockNumber ( startup_height. 0 + 1 ) , SyncBlock :: default ( ) ) ;
202
+ sync_response. lock ( ) . unwrap ( ) . insert ( BlockNumber ( startup_height. 0 + 2 ) , SyncBlock :: default ( ) ) ;
203
+ tokio:: time:: sleep ( 2 * config. startup_sync_sleep_retry_interval ) . await ;
204
+
205
+ // Assert that initializer task has received the stubbed responses from the sync client and sent
206
+ // the corresponding commit blocks to the provider, in the order implied to by the test
207
+ // structure.
208
+ let mut commit_blocks = l1_provider_client. commit_blocks_received . lock ( ) . unwrap ( ) ;
209
+ let received_order = commit_blocks. iter ( ) . map ( |block| block. height ) . collect_vec ( ) ;
210
+ let expected_order =
211
+ vec ! [ BlockNumber ( 2 ) , BlockNumber ( 5 ) , BlockNumber ( 6 ) , BlockNumber ( 3 ) , BlockNumber ( 4 ) ] ;
212
+ assert_eq ! (
213
+ received_order, expected_order,
214
+ "Sanity check failed: commit block order mismatch. Expected {:?}, got {:?}" ,
215
+ expected_order, received_order
216
+ ) ;
217
+
218
+ // Apply commit blocks and assert that correct height commit_blocks are applied, but commit
219
+ // blocks past catch_up_height are backlogged.
220
+ // TODO(Gilad): once we are able to create clients on top of channels, this manual'ness won't
221
+ // be necessary. Right now we cannot create clients without spinning up all servers, so we have
222
+ // to use a mock.
223
+
224
+ let mut commit_blocks = commit_blocks. drain ( ..) ;
225
+
226
+ // Apply height 2.
227
+ let next_block = commit_blocks. next ( ) . unwrap ( ) ;
228
+ l1_provider. commit_block ( & next_block. committed_txs , next_block. height ) . unwrap ( ) ;
229
+ assert_eq ! ( l1_provider. current_height, BlockNumber ( 3 ) ) ;
230
+
231
+ // Backlog height 5.
232
+ let next_block = commit_blocks. next ( ) . unwrap ( ) ;
233
+ l1_provider. commit_block ( & next_block. committed_txs , next_block. height ) . unwrap ( ) ;
234
+ // Assert that this didn't affect height; this commit block is too high so is backlogged.
235
+ assert_eq ! ( l1_provider. current_height, BlockNumber ( 3 ) ) ;
236
+
237
+ // Backlog height 6.
238
+ let next_block = commit_blocks. next ( ) . unwrap ( ) ;
239
+ l1_provider. commit_block ( & next_block. committed_txs , next_block. height ) . unwrap ( ) ;
240
+ // Assert backlogged, like height 5.
241
+ assert_eq ! ( l1_provider. current_height, BlockNumber ( 3 ) ) ;
242
+
243
+ // Apply height 3
244
+ let next_block = commit_blocks. next ( ) . unwrap ( ) ;
245
+ l1_provider. commit_block ( & next_block. committed_txs , next_block. height ) . unwrap ( ) ;
246
+ assert_eq ! ( l1_provider. current_height, BlockNumber ( 4 ) ) ;
247
+
248
+ // Apply height 4 ==> this triggers committing the backlogged heights 5 and 6.
249
+ let next_block = commit_blocks. next ( ) . unwrap ( ) ;
250
+ l1_provider. commit_block ( & next_block. committed_txs , next_block. height ) . unwrap ( ) ;
251
+ assert_eq ! ( l1_provider. current_height, BlockNumber ( 7 ) ) ;
252
+
253
+ // Assert that the bootstrapper has been dropped.
254
+ assert ! ( !l1_provider. state. is_bootstrapping( ) ) ;
255
+ }
256
+
257
+ #[ tokio:: test]
258
+ async fn bootstrap_delayed_sync_state_with_trivial_catch_up ( ) {
259
+ if !in_ci ( ) {
260
+ return ;
261
+ }
262
+ configure_tracing ( ) . await ;
263
+
264
+ // Setup.
265
+
266
+ let l1_provider_client = Arc :: new ( FakeL1ProviderClient :: default ( ) ) ;
267
+ let startup_height = BlockNumber ( 2 ) ;
268
+
269
+ let mut sync_client = MockStateSyncClient :: default ( ) ;
270
+ // Mock sync response for an arbitrary number of calls to get_latest_block_number.
271
+ // Later in the test we modify it to become something else.
272
+ let sync_height_response = Arc :: new ( Mutex :: new ( None ) ) ;
273
+ let sync_response_clone = sync_height_response. clone ( ) ;
274
+ sync_client
275
+ . expect_get_latest_block_number ( )
276
+ . returning ( move || Ok ( * sync_response_clone. lock ( ) . unwrap ( ) ) ) ;
277
+
278
+ let config = L1ProviderConfig {
279
+ startup_sync_sleep_retry_interval : Duration :: from_millis ( 10 ) ,
280
+ ..Default :: default ( )
281
+ } ;
282
+ let mut l1_provider = create_l1_provider (
283
+ config,
284
+ l1_provider_client. clone ( ) ,
285
+ Arc :: new ( sync_client) ,
286
+ startup_height,
287
+ ) ;
288
+
289
+ // Test.
290
+
291
+ // Start the sync sequence, should busy-wait until the sync height is sent.
292
+ let scraped_l1_handler_txs = [ ] ; // No txs to scrape in this test.
293
+ l1_provider. initialize ( scraped_l1_handler_txs. into ( ) ) . await . unwrap ( ) ;
294
+
295
+ // **Commit** a few blocks. The height starts from the provider's current height, since this
296
+ // is a trivial catchup scenario (nothing to catch up).
297
+ // This checks that the trivial catch_up_height doesn't mess up this flow.
298
+ let no_txs_committed = [ ] ; // Not testing txs in this test.
299
+ l1_provider. commit_block ( & no_txs_committed, startup_height) . unwrap ( ) ;
300
+ tokio:: time:: sleep ( config. startup_sync_sleep_retry_interval ) . await ;
301
+ l1_provider. commit_block ( & no_txs_committed, startup_height. unchecked_next ( ) ) . unwrap ( ) ;
302
+ tokio:: time:: sleep ( config. startup_sync_sleep_retry_interval ) . await ;
303
+ // Commit blocks should have been applied.
304
+ let start_height_plus_2 = startup_height. unchecked_next ( ) . unchecked_next ( ) ;
305
+ assert_eq ! ( l1_provider. current_height, start_height_plus_2) ;
306
+ // Should still be bootstrapping, since catchup height isn't determined yet.
307
+ // Technically we could end bootstrapping at this point, but its simpler to let it
308
+ // terminate gracefully once the the sync is ready.
309
+ assert ! ( l1_provider. state. is_bootstrapping( ) ) ;
310
+
311
+ * sync_height_response. lock ( ) . unwrap ( ) = Some ( BlockNumber ( 2 ) ) ;
312
+
313
+ // Let the sync task continue, it should short circuit.
314
+ tokio:: time:: sleep ( config. startup_sync_sleep_retry_interval ) . await ;
315
+ // Assert height is unchanged from last time, no commit block was called from the sync task.
316
+ assert_eq ! ( l1_provider. current_height, start_height_plus_2) ;
317
+ // Finally, commit a new block to trigger the bootstrapping check, should switch to steady
318
+ // state.
319
+ l1_provider. commit_block ( & no_txs_committed, start_height_plus_2) . unwrap ( ) ;
320
+ assert_eq ! ( l1_provider. current_height, start_height_plus_2. unchecked_next( ) ) ;
321
+ // Should still be bootstrapping, since catchup height isn't determined yet.
322
+ // Technically we could end bootstrapping at this point, but its simpler to let it
323
+ // terminate gracefully once the the sync is ready.
324
+ assert ! ( !l1_provider. state. is_bootstrapping( ) ) ;
325
+ }
326
+
327
+ #[ test]
328
+ #[ ignore = "similar to backlog_happy_flow, only shorter, and sprinkle some start_block/get_txs \
329
+ attempts while its bootstrapping (and assert failure on height), then assert that they \
330
+ succeed after bootstrapping ends."]
331
+ fn bootstrap_completion ( ) {
332
+ todo ! ( )
333
+ }
334
+
126
335
#[ tokio:: test]
127
336
#[ ignore = "Not yet implemented: generate an l1 and an cancel event for that tx, also check an \
128
337
abort for a different tx"]
0 commit comments