@@ -7,11 +7,19 @@ import {
7
7
container ,
8
8
ErrorCode ,
9
9
errors ,
10
- logger ,
10
+ Logger ,
11
+ logger as defaultLogger ,
11
12
ReplicationAssertionError ,
12
13
ServiceError
13
14
} from '@powersync/lib-services-framework' ;
14
- import { deserializeBson , InternalOpId , SaveOperationTag , storage , utils } from '@powersync/service-core' ;
15
+ import {
16
+ BucketStorageMarkRecordUnavailable ,
17
+ deserializeBson ,
18
+ InternalOpId ,
19
+ SaveOperationTag ,
20
+ storage ,
21
+ utils
22
+ } from '@powersync/service-core' ;
15
23
import * as timers from 'node:timers/promises' ;
16
24
import { PowerSyncMongo } from './db.js' ;
17
25
import { CurrentBucket , CurrentDataDocument , SourceKey , SyncRuleDocument } from './models.js' ;
@@ -46,12 +54,18 @@ export interface MongoBucketBatchOptions {
46
54
* Set to true for initial replication.
47
55
*/
48
56
skipExistingRows : boolean ;
57
+
58
+ markRecordUnavailable : BucketStorageMarkRecordUnavailable | undefined ;
59
+
60
+ logger ?: Logger ;
49
61
}
50
62
51
63
export class MongoBucketBatch
52
64
extends BaseObserver < storage . BucketBatchStorageListener >
53
65
implements storage . BucketStorageBatch
54
66
{
67
+ private logger : Logger ;
68
+
55
69
private readonly client : mongo . MongoClient ;
56
70
public readonly db : PowerSyncMongo ;
57
71
public readonly session : mongo . ClientSession ;
@@ -65,6 +79,7 @@ export class MongoBucketBatch
65
79
66
80
private batch : OperationBatch | null = null ;
67
81
private write_checkpoint_batch : storage . CustomWriteCheckpointOptions [ ] = [ ] ;
82
+ private markRecordUnavailable : BucketStorageMarkRecordUnavailable | undefined ;
68
83
69
84
/**
70
85
* Last LSN received associated with a checkpoint.
@@ -86,6 +101,7 @@ export class MongoBucketBatch
86
101
87
102
constructor ( options : MongoBucketBatchOptions ) {
88
103
super ( ) ;
104
+ this . logger = options . logger ?? defaultLogger ;
89
105
this . client = options . db . client ;
90
106
this . db = options . db ;
91
107
this . group_id = options . groupId ;
@@ -96,6 +112,7 @@ export class MongoBucketBatch
96
112
this . sync_rules = options . syncRules ;
97
113
this . storeCurrentData = options . storeCurrentData ;
98
114
this . skipExistingRows = options . skipExistingRows ;
115
+ this . markRecordUnavailable = options . markRecordUnavailable ;
99
116
this . batch = new OperationBatch ( ) ;
100
117
101
118
this . persisted_op = options . keepaliveOp ?? null ;
@@ -232,7 +249,9 @@ export class MongoBucketBatch
232
249
current_data_lookup . set ( cacheKey ( doc . _id . t , doc . _id . k ) , doc ) ;
233
250
}
234
251
235
- let persistedBatch : PersistedBatch | null = new PersistedBatch ( this . group_id , transactionSize ) ;
252
+ let persistedBatch : PersistedBatch | null = new PersistedBatch ( this . group_id , transactionSize , {
253
+ logger : this . logger
254
+ } ) ;
236
255
237
256
for ( let op of b ) {
238
257
if ( resumeBatch ) {
@@ -311,11 +330,18 @@ export class MongoBucketBatch
311
330
// Not an error if we re-apply a transaction
312
331
existing_buckets = [ ] ;
313
332
existing_lookups = [ ] ;
314
- // Log to help with debugging if there was a consistency issue
315
333
if ( this . storeCurrentData ) {
316
- logger . warn (
317
- `Cannot find previous record for update on ${ record . sourceTable . qualifiedName } : ${ beforeId } / ${ record . before ?. id } `
318
- ) ;
334
+ if ( this . markRecordUnavailable != null ) {
335
+ // This will trigger a "resnapshot" of the record.
336
+ // This is not relevant if storeCurrentData is false, since we'll get the full row
337
+ // directly in the replication stream.
338
+ this . markRecordUnavailable ( record ) ;
339
+ } else {
340
+ // Log to help with debugging if there was a consistency issue
341
+ this . logger . warn (
342
+ `Cannot find previous record for update on ${ record . sourceTable . qualifiedName } : ${ beforeId } / ${ record . before ?. id } `
343
+ ) ;
344
+ }
319
345
}
320
346
} else {
321
347
existing_buckets = result . buckets ;
@@ -332,8 +358,8 @@ export class MongoBucketBatch
332
358
existing_buckets = [ ] ;
333
359
existing_lookups = [ ] ;
334
360
// Log to help with debugging if there was a consistency issue
335
- if ( this . storeCurrentData ) {
336
- logger . warn (
361
+ if ( this . storeCurrentData && this . markRecordUnavailable == null ) {
362
+ this . logger . warn (
337
363
`Cannot find previous record for delete on ${ record . sourceTable . qualifiedName } : ${ beforeId } / ${ record . before ?. id } `
338
364
) ;
339
365
}
@@ -430,7 +456,7 @@ export class MongoBucketBatch
430
456
}
431
457
}
432
458
) ;
433
- logger . error (
459
+ this . logger . error (
434
460
`Failed to evaluate data query on ${ record . sourceTable . qualifiedName } .${ record . after ?. id } : ${ error . error } `
435
461
) ;
436
462
}
@@ -470,7 +496,7 @@ export class MongoBucketBatch
470
496
}
471
497
}
472
498
) ;
473
- logger . error (
499
+ this . logger . error (
474
500
`Failed to evaluate parameter query on ${ record . sourceTable . qualifiedName } .${ after . id } : ${ error . error } `
475
501
) ;
476
502
}
@@ -524,7 +550,7 @@ export class MongoBucketBatch
524
550
if ( e instanceof mongo . MongoError && e . hasErrorLabel ( 'TransientTransactionError' ) ) {
525
551
// Likely write conflict caused by concurrent write stream replicating
526
552
} else {
527
- logger . warn ( 'Transaction error' , e as Error ) ;
553
+ this . logger . warn ( 'Transaction error' , e as Error ) ;
528
554
}
529
555
await timers . setTimeout ( Math . random ( ) * 50 ) ;
530
556
throw e ;
@@ -549,7 +575,7 @@ export class MongoBucketBatch
549
575
await this . withTransaction ( async ( ) => {
550
576
flushTry += 1 ;
551
577
if ( flushTry % 10 == 0 ) {
552
- logger . info ( `${ this . slot_name } ${ description } - try ${ flushTry } ` ) ;
578
+ this . logger . info ( `${ description } - try ${ flushTry } ` ) ;
553
579
}
554
580
if ( flushTry > 20 && Date . now ( ) > lastTry ) {
555
581
throw new ServiceError ( ErrorCode . PSYNC_S1402 , 'Max transaction tries exceeded' ) ;
@@ -619,13 +645,13 @@ export class MongoBucketBatch
619
645
if ( this . last_checkpoint_lsn != null && lsn < this . last_checkpoint_lsn ) {
620
646
// When re-applying transactions, don't create a new checkpoint until
621
647
// we are past the last transaction.
622
- logger . info ( `Re-applied transaction ${ lsn } - skipping checkpoint` ) ;
648
+ this . logger . info ( `Re-applied transaction ${ lsn } - skipping checkpoint` ) ;
623
649
// Cannot create a checkpoint yet - return false
624
650
return false ;
625
651
}
626
652
if ( lsn < this . no_checkpoint_before_lsn ) {
627
653
if ( Date . now ( ) - this . lastWaitingLogThottled > 5_000 ) {
628
- logger . info (
654
+ this . logger . info (
629
655
`Waiting until ${ this . no_checkpoint_before_lsn } before creating checkpoint, currently at ${ lsn } . Persisted op: ${ this . persisted_op } `
630
656
) ;
631
657
this . lastWaitingLogThottled = Date . now ( ) ;
@@ -677,7 +703,8 @@ export class MongoBucketBatch
677
703
_id : this . group_id
678
704
} ,
679
705
{
680
- $set : update
706
+ $set : update ,
707
+ $unset : { snapshot_lsn : 1 }
681
708
} ,
682
709
{ session : this . session }
683
710
) ;
@@ -699,7 +726,7 @@ export class MongoBucketBatch
699
726
if ( this . persisted_op != null ) {
700
727
// The commit may have been skipped due to "no_checkpoint_before_lsn".
701
728
// Apply it now if relevant
702
- logger . info ( `Commit due to keepalive at ${ lsn } / ${ this . persisted_op } ` ) ;
729
+ this . logger . info ( `Commit due to keepalive at ${ lsn } / ${ this . persisted_op } ` ) ;
703
730
return await this . commit ( lsn ) ;
704
731
}
705
732
@@ -713,7 +740,8 @@ export class MongoBucketBatch
713
740
snapshot_done : true ,
714
741
last_fatal_error : null ,
715
742
last_keepalive_ts : new Date ( )
716
- }
743
+ } ,
744
+ $unset : { snapshot_lsn : 1 }
717
745
} ,
718
746
{ session : this . session }
719
747
) ;
@@ -722,6 +750,22 @@ export class MongoBucketBatch
722
750
return true ;
723
751
}
724
752
753
+ async setSnapshotLsn ( lsn : string ) : Promise < void > {
754
+ const update : Partial < SyncRuleDocument > = {
755
+ snapshot_lsn : lsn
756
+ } ;
757
+
758
+ await this . db . sync_rules . updateOne (
759
+ {
760
+ _id : this . group_id
761
+ } ,
762
+ {
763
+ $set : update
764
+ } ,
765
+ { session : this . session }
766
+ ) ;
767
+ }
768
+
725
769
async save ( record : storage . SaveOptions ) : Promise < storage . FlushedResult | null > {
726
770
const { after, before, sourceTable, tag } = record ;
727
771
for ( const event of this . getTableEvents ( sourceTable ) ) {
@@ -746,7 +790,7 @@ export class MongoBucketBatch
746
790
return null ;
747
791
}
748
792
749
- logger . debug ( `Saving ${ record . tag } :${ record . before ?. id } /${ record . after ?. id } ` ) ;
793
+ this . logger . debug ( `Saving ${ record . tag } :${ record . before ?. id } /${ record . after ?. id } ` ) ;
750
794
751
795
this . batch ??= new OperationBatch ( ) ;
752
796
this . batch . push ( new RecordOperation ( record ) ) ;
@@ -817,7 +861,7 @@ export class MongoBucketBatch
817
861
session : session
818
862
} ) ;
819
863
const batch = await cursor . toArray ( ) ;
820
- const persistedBatch = new PersistedBatch ( this . group_id , 0 ) ;
864
+ const persistedBatch = new PersistedBatch ( this . group_id , 0 , { logger : this . logger } ) ;
821
865
822
866
for ( let value of batch ) {
823
867
persistedBatch . saveBucketData ( {
@@ -847,6 +891,37 @@ export class MongoBucketBatch
847
891
return last_op ! ;
848
892
}
849
893
894
+ async updateTableProgress (
895
+ table : storage . SourceTable ,
896
+ progress : Partial < storage . TableSnapshotStatus >
897
+ ) : Promise < storage . SourceTable > {
898
+ const copy = table . clone ( ) ;
899
+ const snapshotStatus = {
900
+ totalEstimatedCount : progress . totalEstimatedCount ?? copy . snapshotStatus ?. totalEstimatedCount ?? 0 ,
901
+ replicatedCount : progress . replicatedCount ?? copy . snapshotStatus ?. replicatedCount ?? 0 ,
902
+ lastKey : progress . lastKey ?? copy . snapshotStatus ?. lastKey ?? null
903
+ } ;
904
+ copy . snapshotStatus = snapshotStatus ;
905
+
906
+ await this . withTransaction ( async ( ) => {
907
+ await this . db . source_tables . updateOne (
908
+ { _id : table . id } ,
909
+ {
910
+ $set : {
911
+ snapshot_status : {
912
+ last_key : snapshotStatus . lastKey == null ? null : new bson . Binary ( snapshotStatus . lastKey ) ,
913
+ total_estimated_count : snapshotStatus . totalEstimatedCount ,
914
+ replicated_count : snapshotStatus . replicatedCount
915
+ }
916
+ }
917
+ } ,
918
+ { session : this . session }
919
+ ) ;
920
+ } ) ;
921
+
922
+ return copy ;
923
+ }
924
+
850
925
async markSnapshotDone ( tables : storage . SourceTable [ ] , no_checkpoint_before_lsn : string ) {
851
926
const session = this . session ;
852
927
const ids = tables . map ( ( table ) => table . id ) ;
@@ -857,6 +932,9 @@ export class MongoBucketBatch
857
932
{
858
933
$set : {
859
934
snapshot_done : true
935
+ } ,
936
+ $unset : {
937
+ snapshot_status : 1
860
938
}
861
939
} ,
862
940
{ session }
@@ -880,17 +958,8 @@ export class MongoBucketBatch
880
958
}
881
959
} ) ;
882
960
return tables . map ( ( table ) => {
883
- const copy = new storage . SourceTable (
884
- table . id ,
885
- table . connectionTag ,
886
- table . objectId ,
887
- table . schema ,
888
- table . table ,
889
- table . replicaIdColumns ,
890
- table . snapshotComplete
891
- ) ;
892
- copy . syncData = table . syncData ;
893
- copy . syncParameters = table . syncParameters ;
961
+ const copy = table . clone ( ) ;
962
+ copy . snapshotComplete = true ;
894
963
return copy ;
895
964
} ) ;
896
965
}
0 commit comments