Skip to content

Commit 8560169

Browse files
committed
Factor out common code to find_insert_slot_in_group
1 parent ba2db9a commit 8560169

File tree

1 file changed

+43
-44
lines changed

1 file changed

+43
-44
lines changed

src/raw/mod.rs

Lines changed: 43 additions & 44 deletions
Original file line numberDiff line numberDiff line change
@@ -1252,6 +1252,42 @@ impl<A: Allocator + Clone> RawTableInner<A> {
12521252
}
12531253
}
12541254

1255+
/// Finds the position to insert something in a group.
1256+
#[inline]
1257+
unsafe fn find_insert_slot_in_group(
1258+
&self,
1259+
group: &Group,
1260+
probe_seq: &ProbeSeq,
1261+
) -> Option<usize> {
1262+
let bit = group.match_empty_or_deleted().lowest_set_bit();
1263+
1264+
if likely(bit.is_some()) {
1265+
let mut index = (probe_seq.pos + bit.unwrap()) & self.bucket_mask;
1266+
1267+
// In tables smaller than the group width, trailing control
1268+
// bytes outside the range of the table are filled with
1269+
// EMPTY entries. These will unfortunately trigger a
1270+
// match, but once masked may point to a full bucket that
1271+
// is already occupied. We detect this situation here and
1272+
// perform a second scan starting at the begining of the
1273+
// table. This second scan is guaranteed to find an empty
1274+
// slot (due to the load factor) before hitting the trailing
1275+
// control bytes (containing EMPTY).
1276+
if unlikely(is_full(*self.ctrl(index))) {
1277+
debug_assert!(self.bucket_mask < Group::WIDTH);
1278+
debug_assert_ne!(probe_seq.pos, 0);
1279+
1280+
index = Group::load_aligned(self.ctrl(0))
1281+
.match_empty_or_deleted()
1282+
.lowest_set_bit_nonzero()
1283+
}
1284+
1285+
Some(index)
1286+
} else {
1287+
None
1288+
}
1289+
}
1290+
12551291
/// Searches for an element in the table,
12561292
/// or a potential slot where that element could be inserted.
12571293
#[inline]
@@ -1263,42 +1299,22 @@ impl<A: Allocator + Clone> RawTableInner<A> {
12631299
unsafe {
12641300
let mut tombstone = None;
12651301
self.search(hash, eq, |group, probe_seq| {
1266-
let bit = group.match_empty_or_deleted().lowest_set_bit();
1267-
1268-
if likely(bit.is_some()) {
1269-
let mut index = (probe_seq.pos + bit.unwrap()) & self.bucket_mask;
1270-
1271-
// In tables smaller than the group width, trailing control
1272-
// bytes outside the range of the table are filled with
1273-
// EMPTY entries. These will unfortunately trigger a
1274-
// match, but once masked may point to a full bucket that
1275-
// is already occupied. We detect this situation here and
1276-
// perform a second scan starting at the begining of the
1277-
// table. This second scan is guaranteed to find an empty
1278-
// slot (due to the load factor) before hitting the trailing
1279-
// control bytes (containing EMPTY).
1280-
if unlikely(is_full(*self.ctrl(index))) {
1281-
debug_assert!(self.bucket_mask < Group::WIDTH);
1282-
debug_assert_ne!(probe_seq.pos, 0);
1283-
1284-
index = Group::load_aligned(self.ctrl(0))
1285-
.match_empty_or_deleted()
1286-
.lowest_set_bit_nonzero()
1287-
}
1302+
let index = self.find_insert_slot_in_group(group, probe_seq);
12881303

1304+
if likely(index.is_some()) {
12891305
// Only stop the search if the group is empty. The element might be
12901306
// in a following group.
12911307
if likely(group.match_empty().any_bit_set()) {
12921308
// Use a tombstone if we found one
12931309
if unlikely(tombstone.is_some()) {
12941310
tombstone
12951311
} else {
1296-
Some(index)
1312+
index
12971313
}
12981314
} else {
12991315
// We found a tombstone, record it so we can return it as a potential
13001316
// insertion location.
1301-
tombstone = Some(index);
1317+
tombstone = index;
13021318
None
13031319
}
13041320
} else {
@@ -1330,27 +1346,10 @@ impl<A: Allocator + Clone> RawTableInner<A> {
13301346
loop {
13311347
unsafe {
13321348
let group = Group::load(self.ctrl(probe_seq.pos));
1333-
if let Some(bit) = group.match_empty_or_deleted().lowest_set_bit() {
1334-
let result = (probe_seq.pos + bit) & self.bucket_mask;
1335-
1336-
// In tables smaller than the group width, trailing control
1337-
// bytes outside the range of the table are filled with
1338-
// EMPTY entries. These will unfortunately trigger a
1339-
// match, but once masked may point to a full bucket that
1340-
// is already occupied. We detect this situation here and
1341-
// perform a second scan starting at the begining of the
1342-
// table. This second scan is guaranteed to find an empty
1343-
// slot (due to the load factor) before hitting the trailing
1344-
// control bytes (containing EMPTY).
1345-
if unlikely(is_full(*self.ctrl(result))) {
1346-
debug_assert!(self.bucket_mask < Group::WIDTH);
1347-
debug_assert_ne!(probe_seq.pos, 0);
1348-
return Group::load_aligned(self.ctrl(0))
1349-
.match_empty_or_deleted()
1350-
.lowest_set_bit_nonzero();
1351-
}
1349+
let index = self.find_insert_slot_in_group(&group, &probe_seq);
13521350

1353-
return result;
1351+
if likely(index.is_some()) {
1352+
return index.unwrap();
13541353
}
13551354
}
13561355
probe_seq.move_next(self.bucket_mask);

0 commit comments

Comments
 (0)