@@ -1252,6 +1252,42 @@ impl<A: Allocator + Clone> RawTableInner<A> {
1252
1252
}
1253
1253
}
1254
1254
1255
+ /// Finds the position to insert something in a group.
1256
+ #[ inline]
1257
+ unsafe fn find_insert_slot_in_group (
1258
+ & self ,
1259
+ group : & Group ,
1260
+ probe_seq : & ProbeSeq ,
1261
+ ) -> Option < usize > {
1262
+ let bit = group. match_empty_or_deleted ( ) . lowest_set_bit ( ) ;
1263
+
1264
+ if likely ( bit. is_some ( ) ) {
1265
+ let mut index = ( probe_seq. pos + bit. unwrap ( ) ) & self . bucket_mask ;
1266
+
1267
+ // In tables smaller than the group width, trailing control
1268
+ // bytes outside the range of the table are filled with
1269
+ // EMPTY entries. These will unfortunately trigger a
1270
+ // match, but once masked may point to a full bucket that
1271
+ // is already occupied. We detect this situation here and
1272
+ // perform a second scan starting at the begining of the
1273
+ // table. This second scan is guaranteed to find an empty
1274
+ // slot (due to the load factor) before hitting the trailing
1275
+ // control bytes (containing EMPTY).
1276
+ if unlikely ( is_full ( * self . ctrl ( index) ) ) {
1277
+ debug_assert ! ( self . bucket_mask < Group :: WIDTH ) ;
1278
+ debug_assert_ne ! ( probe_seq. pos, 0 ) ;
1279
+
1280
+ index = Group :: load_aligned ( self . ctrl ( 0 ) )
1281
+ . match_empty_or_deleted ( )
1282
+ . lowest_set_bit_nonzero ( )
1283
+ }
1284
+
1285
+ Some ( index)
1286
+ } else {
1287
+ None
1288
+ }
1289
+ }
1290
+
1255
1291
/// Searches for an element in the table,
1256
1292
/// or a potential slot where that element could be inserted.
1257
1293
#[ inline]
@@ -1263,42 +1299,22 @@ impl<A: Allocator + Clone> RawTableInner<A> {
1263
1299
unsafe {
1264
1300
let mut tombstone = None ;
1265
1301
self . search ( hash, eq, |group, probe_seq| {
1266
- let bit = group. match_empty_or_deleted ( ) . lowest_set_bit ( ) ;
1267
-
1268
- if likely ( bit. is_some ( ) ) {
1269
- let mut index = ( probe_seq. pos + bit. unwrap ( ) ) & self . bucket_mask ;
1270
-
1271
- // In tables smaller than the group width, trailing control
1272
- // bytes outside the range of the table are filled with
1273
- // EMPTY entries. These will unfortunately trigger a
1274
- // match, but once masked may point to a full bucket that
1275
- // is already occupied. We detect this situation here and
1276
- // perform a second scan starting at the begining of the
1277
- // table. This second scan is guaranteed to find an empty
1278
- // slot (due to the load factor) before hitting the trailing
1279
- // control bytes (containing EMPTY).
1280
- if unlikely ( is_full ( * self . ctrl ( index) ) ) {
1281
- debug_assert ! ( self . bucket_mask < Group :: WIDTH ) ;
1282
- debug_assert_ne ! ( probe_seq. pos, 0 ) ;
1283
-
1284
- index = Group :: load_aligned ( self . ctrl ( 0 ) )
1285
- . match_empty_or_deleted ( )
1286
- . lowest_set_bit_nonzero ( )
1287
- }
1302
+ let index = self . find_insert_slot_in_group ( group, probe_seq) ;
1288
1303
1304
+ if likely ( index. is_some ( ) ) {
1289
1305
// Only stop the search if the group is empty. The element might be
1290
1306
// in a following group.
1291
1307
if likely ( group. match_empty ( ) . any_bit_set ( ) ) {
1292
1308
// Use a tombstone if we found one
1293
1309
if unlikely ( tombstone. is_some ( ) ) {
1294
1310
tombstone
1295
1311
} else {
1296
- Some ( index)
1312
+ index
1297
1313
}
1298
1314
} else {
1299
1315
// We found a tombstone, record it so we can return it as a potential
1300
1316
// insertion location.
1301
- tombstone = Some ( index) ;
1317
+ tombstone = index;
1302
1318
None
1303
1319
}
1304
1320
} else {
@@ -1330,27 +1346,10 @@ impl<A: Allocator + Clone> RawTableInner<A> {
1330
1346
loop {
1331
1347
unsafe {
1332
1348
let group = Group :: load ( self . ctrl ( probe_seq. pos ) ) ;
1333
- if let Some ( bit) = group. match_empty_or_deleted ( ) . lowest_set_bit ( ) {
1334
- let result = ( probe_seq. pos + bit) & self . bucket_mask ;
1335
-
1336
- // In tables smaller than the group width, trailing control
1337
- // bytes outside the range of the table are filled with
1338
- // EMPTY entries. These will unfortunately trigger a
1339
- // match, but once masked may point to a full bucket that
1340
- // is already occupied. We detect this situation here and
1341
- // perform a second scan starting at the begining of the
1342
- // table. This second scan is guaranteed to find an empty
1343
- // slot (due to the load factor) before hitting the trailing
1344
- // control bytes (containing EMPTY).
1345
- if unlikely ( is_full ( * self . ctrl ( result) ) ) {
1346
- debug_assert ! ( self . bucket_mask < Group :: WIDTH ) ;
1347
- debug_assert_ne ! ( probe_seq. pos, 0 ) ;
1348
- return Group :: load_aligned ( self . ctrl ( 0 ) )
1349
- . match_empty_or_deleted ( )
1350
- . lowest_set_bit_nonzero ( ) ;
1351
- }
1349
+ let index = self . find_insert_slot_in_group ( & group, & probe_seq) ;
1352
1350
1353
- return result;
1351
+ if likely ( index. is_some ( ) ) {
1352
+ return index. unwrap ( ) ;
1354
1353
}
1355
1354
}
1356
1355
probe_seq. move_next ( self . bucket_mask ) ;
0 commit comments