@@ -2,7 +2,15 @@ use crate::Error;
2
2
use lru:: LruCache ;
3
3
use std:: collections:: { BTreeMap , HashMap , HashSet } ;
4
4
use std:: num:: NonZeroUsize ;
5
- use types:: { BeaconState , EthSpec , Hash256 , Slot } ;
5
+ use types:: { BeaconState , Epoch , EthSpec , Hash256 , Slot } ;
6
+
7
+ /// Fraction of the LRU cache to leave intact during culling.
8
+ const CULL_EXEMPT_NUMERATOR : usize = 1 ;
9
+ const CULL_EXEMPT_DENOMINATOR : usize = 10 ;
10
+
11
+ /// States that are less than or equal to this many epochs old *could* become finalized and will not
12
+ /// be culled from the cache.
13
+ const EPOCH_FINALIZATION_LIMIT : u64 = 4 ;
6
14
7
15
#[ derive( Debug ) ]
8
16
pub struct FinalizedState < E : EthSpec > {
@@ -27,6 +35,8 @@ pub struct StateCache<E: EthSpec> {
27
35
finalized_state : Option < FinalizedState < E > > ,
28
36
states : LruCache < Hash256 , BeaconState < E > > ,
29
37
block_map : BlockMap ,
38
+ capacity : NonZeroUsize ,
39
+ max_epoch : Epoch ,
30
40
}
31
41
32
42
#[ derive( Debug ) ]
@@ -42,6 +52,8 @@ impl<E: EthSpec> StateCache<E> {
42
52
finalized_state : None ,
43
53
states : LruCache :: new ( capacity) ,
44
54
block_map : BlockMap :: default ( ) ,
55
+ capacity,
56
+ max_epoch : Epoch :: new ( 0 ) ,
45
57
}
46
58
}
47
59
@@ -115,6 +127,14 @@ impl<E: EthSpec> StateCache<E> {
115
127
} ) ;
116
128
}
117
129
130
+ // Update the cache's idea of the max epoch.
131
+ self . max_epoch = std:: cmp:: max ( state. current_epoch ( ) , self . max_epoch ) ;
132
+
133
+ // If the cache is full, use the custom cull routine to make room.
134
+ if let Some ( over_capacity) = self . len ( ) . checked_sub ( self . capacity . get ( ) ) {
135
+ self . cull ( over_capacity + 1 ) ;
136
+ }
137
+
118
138
// Insert the full state into the cache.
119
139
self . states . put ( state_root, state. clone ( ) ) ;
120
140
@@ -166,6 +186,60 @@ impl<E: EthSpec> StateCache<E> {
166
186
}
167
187
}
168
188
}
189
+
190
+ /// Cull approximately `count` states from the cache.
191
+ ///
192
+ /// States are culled LRU, with the following extra order imposed:
193
+ ///
194
+ /// - Advanced states.
195
+ /// - Mid-epoch unadvanced states.
196
+ /// - Epoch-boundary states that are too old to be finalized.
197
+ /// - Epoch-boundary states that could be finalized.
198
+ pub fn cull ( & mut self , count : usize ) {
199
+ let cull_exempt = std:: cmp:: max (
200
+ 1 ,
201
+ self . len ( ) * CULL_EXEMPT_NUMERATOR / CULL_EXEMPT_DENOMINATOR ,
202
+ ) ;
203
+
204
+ // Stage 1: gather states to cull.
205
+ let mut advanced_state_roots = vec ! [ ] ;
206
+ let mut mid_epoch_state_roots = vec ! [ ] ;
207
+ let mut old_boundary_state_roots = vec ! [ ] ;
208
+ let mut good_boundary_state_roots = vec ! [ ] ;
209
+ for ( & state_root, state) in self . states . iter ( ) . skip ( cull_exempt) {
210
+ let is_advanced = state. slot ( ) > state. latest_block_header ( ) . slot ;
211
+ let is_boundary = state. slot ( ) % E :: slots_per_epoch ( ) == 0 ;
212
+ let could_finalize =
213
+ ( self . max_epoch - state. current_epoch ( ) ) <= EPOCH_FINALIZATION_LIMIT ;
214
+
215
+ if is_advanced {
216
+ advanced_state_roots. push ( state_root) ;
217
+ } else if !is_boundary {
218
+ mid_epoch_state_roots. push ( state_root) ;
219
+ } else if !could_finalize {
220
+ old_boundary_state_roots. push ( state_root) ;
221
+ } else {
222
+ good_boundary_state_roots. push ( state_root) ;
223
+ }
224
+
225
+ // Terminate early in the common case where we've already found enough junk to cull.
226
+ if advanced_state_roots. len ( ) == count {
227
+ break ;
228
+ }
229
+ }
230
+
231
+ // Stage 2: delete.
232
+ // This could probably be more efficient in how it interacts with the block map.
233
+ for state_root in advanced_state_roots
234
+ . iter ( )
235
+ . chain ( mid_epoch_state_roots. iter ( ) )
236
+ . chain ( old_boundary_state_roots. iter ( ) )
237
+ . chain ( good_boundary_state_roots. iter ( ) )
238
+ . take ( count)
239
+ {
240
+ self . delete_state ( state_root) ;
241
+ }
242
+ }
169
243
}
170
244
171
245
impl BlockMap {
0 commit comments