@@ -270,7 +270,6 @@ type cache struct {
270
270
buf []byte
271
271
wp int
272
272
idx []uint32
273
- lrl uint32
274
273
fseq uint64
275
274
nra bool
276
275
}
@@ -5071,7 +5070,10 @@ func (fs *fileStore) removeMsg(seq uint64, secure, viaLimits, needFSLock bool) (
5071
5070
// If erase but block is empty, we can simply remove the block later.
5072
5071
if secure && ! isEmpty {
5073
5072
// Grab record info, but use the pre-computed record length.
5074
- ri , _ , _ , _ := mb .slotInfo (int (seq - mb .cache .fseq ))
5073
+ ri , _ , _ , err := mb .slotInfo (int (seq - mb .cache .fseq ))
5074
+ if err != nil {
5075
+ return false , err
5076
+ }
5075
5077
if err := mb .eraseMsg (seq , int (ri ), int (msz ), isLastBlock ); err != nil {
5076
5078
mb .finishedWithCache ()
5077
5079
return false , err
@@ -5316,7 +5318,14 @@ func (mb *msgBlock) compactWithFloor(floor uint64) {
5316
5318
// Grab info from a slot.
5317
5319
// Lock should be held.
5318
5320
func (mb * msgBlock ) slotInfo (slot int ) (uint32 , uint32 , bool , error ) {
5319
- if slot < 0 || mb .cache == nil || slot >= len (mb .cache .idx ) {
5321
+ switch {
5322
+ case mb .cache == nil : // Shouldn't be possible, but check it anyway.
5323
+ return 0 , 0 , false , errNoCache
5324
+ case slot < 0 :
5325
+ mb .fs .warn ("Partial cache: offset slot index %d is less zero" , slot )
5326
+ return 0 , 0 , false , errPartialCache
5327
+ case slot >= len (mb .cache .idx ):
5328
+ mb .fs .warn ("Partial cache: offset slot index %d is greater than index len %d" , slot , len (mb .cache .idx ))
5320
5329
return 0 , 0 , false , errPartialCache
5321
5330
}
5322
5331
@@ -5330,24 +5339,20 @@ func (mb *msgBlock) slotInfo(slot int) (uint32, uint32, bool, error) {
5330
5339
5331
5340
// Determine record length
5332
5341
var rl uint32
5333
- if slot >= len (mb .cache .idx ) {
5334
- rl = mb .cache .lrl
5335
- } else {
5336
- // Need to account for dbit markers in idx.
5337
- // So we will walk until we find valid idx slot to calculate rl.
5338
- for i := 1 ; slot + i < len (mb .cache .idx ); i ++ {
5339
- ni := mb .cache .idx [slot + i ] &^ cbit
5340
- if ni == dbit {
5341
- continue
5342
- }
5343
- rl = ni - ri
5344
- break
5345
- }
5346
- // check if we had all trailing dbits.
5347
- // If so use len of cache buf minus ri.
5348
- if rl == 0 {
5349
- rl = uint32 (len (mb .cache .buf )) - ri
5342
+ // Need to account for dbit markers in idx.
5343
+ // So we will walk until we find valid idx slot to calculate rl.
5344
+ for i := 1 ; slot + i < len (mb .cache .idx ); i ++ {
5345
+ ni := mb .cache .idx [slot + i ] &^ cbit
5346
+ if ni == dbit {
5347
+ continue
5350
5348
}
5349
+ rl = ni - ri
5350
+ break
5351
+ }
5352
+ // check if we had all trailing dbits.
5353
+ // If so use len of cache buf minus ri.
5354
+ if rl == 0 {
5355
+ rl = uint32 (len (mb .cache .buf )) - ri
5351
5356
}
5352
5357
if rl < msgHdrSize {
5353
5358
return 0 , 0 , false , errBadMsg {mb .mfn , fmt .Sprintf ("length too short for slot %d" , slot )}
@@ -5772,10 +5777,10 @@ func (mb *msgBlock) tryForceExpireCache() {
5772
5777
5773
5778
// We will attempt to force expire this by temporarily clearing the last load time.
5774
5779
func (mb * msgBlock ) tryForceExpireCacheLocked () {
5775
- llts := mb .llts
5776
- mb .llts = 0
5780
+ llts , lwts := mb .llts , mb . lwts
5781
+ mb .llts , mb . lwts = 0 , 0
5777
5782
mb .expireCacheLocked ()
5778
- mb .llts = llts
5783
+ mb .llts , mb . lwts = llts , lwts
5779
5784
}
5780
5785
5781
5786
// This is for expiration of the write cache, which will be partial with fip.
@@ -5850,6 +5855,7 @@ func (mb *msgBlock) expireCacheLocked() {
5850
5855
recycleMsgBlockBuf (mb .cache .buf )
5851
5856
}
5852
5857
mb .cache .buf = nil
5858
+ mb .cache .idx = mb .cache .idx [:0 ]
5853
5859
mb .cache .wp = 0
5854
5860
}
5855
5861
@@ -6342,7 +6348,6 @@ func (mb *msgBlock) writeMsgRecordLocked(rl, seq uint64, subj string, mhdr, msg
6342
6348
// Update write through cache.
6343
6349
// Write to msg record.
6344
6350
mb .cache .buf = append (mb .cache .buf , checksum ... )
6345
- mb .cache .lrl = uint32 (rl )
6346
6351
6347
6352
// Set cache timestamp for last store.
6348
6353
mb .lwts = ts
@@ -7051,7 +7056,6 @@ func (mb *msgBlock) indexCacheBuf(buf []byte) error {
7051
7056
}
7052
7057
// Add to our index.
7053
7058
idx = append (idx , index )
7054
- mb .cache .lrl = uint32 (rl )
7055
7059
// Adjust if we guessed wrong.
7056
7060
if seq != 0 && seq < fseq {
7057
7061
fseq = seq
@@ -7599,7 +7603,7 @@ func (mb *msgBlock) cacheLookupEx(seq uint64, sm *StoreMsg, doCopy bool) (*Store
7599
7603
}
7600
7604
// Check partial cache status.
7601
7605
if seq < mb .cache .fseq {
7602
- mb .fs .warn ("Cache lookup detected partial cache: seq %d vs cache fseq %d" , seq , mb .cache .fseq )
7606
+ mb .fs .warn ("Partial cache: seq %d is less than cache fseq %d" , seq , mb .cache .fseq )
7603
7607
return nil , errPartialCache
7604
7608
}
7605
7609
@@ -7613,6 +7617,7 @@ func (mb *msgBlock) cacheLookupEx(seq uint64, sm *StoreMsg, doCopy bool) (*Store
7613
7617
7614
7618
li := int (bi )
7615
7619
if li >= len (mb .cache .buf ) {
7620
+ mb .fs .warn ("Partial cache: slot index %d is less than cache buffer len %d" , li , len (mb .cache .buf ))
7616
7621
return nil , errPartialCache
7617
7622
}
7618
7623
buf := mb .cache .buf [li :]
@@ -7635,8 +7640,7 @@ func (mb *msgBlock) cacheLookupEx(seq uint64, sm *StoreMsg, doCopy bool) (*Store
7635
7640
}
7636
7641
7637
7642
if seq != fsm .seq { // See TestFileStoreInvalidIndexesRebuilt.
7638
- recycleMsgBlockBuf (mb .cache .buf )
7639
- mb .cache .buf = nil
7643
+ mb .tryForceExpireCacheLocked ()
7640
7644
return nil , fmt .Errorf ("sequence numbers for cache load did not match, %d vs %d" , seq , fsm .seq )
7641
7645
}
7642
7646
0 commit comments