@@ -10,6 +10,7 @@ import (
10
10
11
11
"github.com/matrix-org/complement/internal/b"
12
12
"github.com/matrix-org/complement/internal/client"
13
+ "github.com/matrix-org/complement/internal/federation"
13
14
"github.com/matrix-org/complement/runtime"
14
15
)
15
16
@@ -251,6 +252,120 @@ func TestSync(t *testing.T) {
251
252
res , _ := alice .MustSync (t , client.SyncReq {Since : nextBatch })
252
253
usersInPresenceEvents (t , res .Get ("presence" ), []string {})
253
254
})
255
+
256
+ t .Run ("sync should succeed even if the sync token points to a redaction of an unknown event" , func (t * testing.T ) {
257
+ // this is a regression test for https://github.com/matrix-org/synapse/issues/12864
258
+ //
259
+ // The idea here is that we need a sync token which points to a redaction
260
+ // for an event which doesn't exist. Such a redaction may not be served to
261
+ // the client. This can lead to server bugs when the server tries to fetch
262
+ // the event corresponding to the sync token.
263
+ //
264
+ // The C-S API does not permit us to generate such a redaction event, so
265
+ // we have to poke it in from a federated server.
266
+ //
267
+ // The situation is complicated further by the very fact that we
268
+ // cannot see the faulty redaction, and therefore cannot tell whether
269
+ // our sync token includes it or not. The normal trick here would be
270
+ // to send another (regular) event as a sentinel, and then if that sentinel
271
+ // is returned by /sync, we can be sure the faulty event has also been
272
+ // processed. However, that doesn't work here, because doing so will mean
273
+ // that the sync token points to the sentinel rather than the redaction,
274
+ // negating the whole point of the test.
275
+ //
276
+ // Instead, as a rough proxy, we send a sentinel in a *different* room.
277
+ // There is no guarantee that the target server will process the events
278
+ // in the order we send them, but in practice it seems to get close
279
+ // enough.
280
+
281
+ t .Parallel ()
282
+
283
+ // alice creates two rooms, which charlie (on our test server) joins
284
+ srv := federation .NewServer (t , deployment ,
285
+ federation .HandleKeyRequests (),
286
+ federation .HandleTransactionRequests (nil , nil ),
287
+ )
288
+ cancel := srv .Listen ()
289
+ defer cancel ()
290
+
291
+ charlie := srv .UserID ("charlie" )
292
+
293
+ redactionRoomID := alice .CreateRoom (t , map [string ]interface {}{"preset" : "public_chat" })
294
+ redactionRoom := srv .MustJoinRoom (t , deployment , "hs1" , redactionRoomID , charlie )
295
+
296
+ sentinelRoomID := alice .CreateRoom (t , map [string ]interface {}{"preset" : "public_chat" })
297
+ sentinelRoom := srv .MustJoinRoom (t , deployment , "hs1" , sentinelRoomID , charlie )
298
+
299
+ // charlie creates a bogus redaction, which he sends out, followed by
300
+ // a good event - in another room - to act as a sentinel. It's not
301
+ // guaranteed, but hopefully if the sentinel is received, so was the
302
+ // redaction.
303
+ redactionEvent := srv .MustCreateEvent (t , redactionRoom , b.Event {
304
+ Type : "m.room.redaction" ,
305
+ Sender : charlie ,
306
+ Content : map [string ]interface {}{},
307
+ Redacts : "$12345" ,
308
+ })
309
+ redactionRoom .AddEvent (redactionEvent )
310
+ t .Logf ("Created redaction event %s" , redactionEvent .EventID ())
311
+ srv .MustSendTransaction (t , deployment , "hs1" , []json.RawMessage {redactionEvent .JSON ()}, nil )
312
+
313
+ sentinelEvent := srv .MustCreateEvent (t , sentinelRoom , b.Event {
314
+ Type : "m.room.test" ,
315
+ Sender : charlie ,
316
+ Content : map [string ]interface {}{"body" : "1234" },
317
+ })
318
+ sentinelRoom .AddEvent (sentinelEvent )
319
+ srv .MustSendTransaction (t , deployment , "hs1" , []json.RawMessage {redactionEvent .JSON (), sentinelEvent .JSON ()}, nil )
320
+
321
+ // wait for the sentinel to arrive
322
+ nextBatch := alice .MustSyncUntil (t , client.SyncReq {}, client .SyncTimelineHasEventID (sentinelRoomID , sentinelEvent .EventID ()))
323
+
324
+ // charlie sends another batch of events to force a gappy sync.
325
+ // We have to send 11 events to force a gap, since we use a filter with a timeline limit of 10 events.
326
+ pdus := make ([]json.RawMessage , 11 )
327
+ var lastSentEventId string
328
+ for i := range pdus {
329
+ ev := srv .MustCreateEvent (t , redactionRoom , b.Event {
330
+ Type : "m.room.message" ,
331
+ Sender : charlie ,
332
+ Content : map [string ]interface {}{},
333
+ })
334
+ redactionRoom .AddEvent (ev )
335
+ pdus [i ] = ev .JSON ()
336
+ lastSentEventId = ev .EventID ()
337
+ }
338
+ srv .MustSendTransaction (t , deployment , "hs1" , pdus , nil )
339
+ t .Logf ("Sent filler events, with final event %s" , lastSentEventId )
340
+
341
+ // sync, starting from the same ?since each time, until the final message turns up.
342
+ // This is basically an inlining of MustSyncUntil, with the key difference that we
343
+ // keep the same ?since each time, instead of incrementally syncing on each pass.
344
+ numResponsesReturned := 0
345
+ start := time .Now ()
346
+ for {
347
+ if time .Since (start ) > alice .SyncUntilTimeout {
348
+ t .Fatalf ("%s: timed out after %v. Seen %d /sync responses" , alice .UserID , time .Since (start ), numResponsesReturned )
349
+ }
350
+ // sync, using a filter with a limit smaller than the number of PDUs we sent.
351
+ syncResponse , _ := alice .MustSync (t , client.SyncReq {Filter : filterID , Since : nextBatch })
352
+ numResponsesReturned += 1
353
+ timeline := syncResponse .Get ("rooms.join." + client .GjsonEscape (redactionRoomID ) + ".timeline" )
354
+ timelineEvents := timeline .Get ("events" ).Array ()
355
+ lastEventIdInSync := timelineEvents [len (timelineEvents )- 1 ].Get ("event_id" ).String ()
356
+
357
+ t .Logf ("Iteration %d: /sync returned %d events, with final event %s" , numResponsesReturned , len (timelineEvents ), lastEventIdInSync )
358
+ if lastEventIdInSync == lastSentEventId {
359
+ // check we actually got a gappy sync - else this test isn't testing the right thing
360
+ if ! timeline .Get ("limited" ).Bool () {
361
+ t .Fatalf ("Not a gappy sync after redaction" )
362
+ }
363
+ break
364
+ }
365
+ }
366
+
367
+ // that's it - we successfully did a gappy sync.
368
+ })
254
369
})
255
370
}
256
371
0 commit comments