@@ -1775,7 +1775,31 @@ impl<T, A: Allocator> Vec<T, A> {
17751775 return ;
17761776 }
17771777
1778- /* INVARIANT: vec.len() > read >= write > write-1 >= 0 */
1778+ // Check if we ever want to remove anything.
1779+ // This allows to use copy_non_overlapping in next cycle.
1780+ // And avoids any memory writes if we don't need to remove anything.
1781+ let mut possible_remove_idx = 1 ;
1782+ let start = self . as_mut_ptr ( ) ;
1783+ while possible_remove_idx < len {
1784+ let found_duplicate = unsafe {
1785+ // SAFETY: possible_remove_idx always in range [1..len)
1786+ // Note that we start iteration from 1 so we never overflow.
1787+ let prev = start. add ( possible_remove_idx. wrapping_sub ( 1 ) ) ;
1788+ let current = start. add ( possible_remove_idx) ;
1789+ // We explicitly say in docs that references are reversed.
1790+ same_bucket ( & mut * current, & mut * prev)
1791+ } ;
1792+ if found_duplicate {
1793+ break ;
1794+ }
1795+ possible_remove_idx += 1 ;
1796+ }
1797+ // Don't need to remove anything.
1798+ if possible_remove_idx >= len {
1799+ return ;
1800+ }
1801+
1802+ /* INVARIANT: vec.len() > read > write > write-1 >= 0 */
17791803 struct FillGapOnDrop < ' a , T , A : core:: alloc:: Allocator > {
17801804 /* Offset of the element we want to check if it is duplicate */
17811805 read : usize ,
@@ -1821,31 +1845,39 @@ impl<T, A: Allocator> Vec<T, A> {
18211845 }
18221846 }
18231847
1824- let mut gap = FillGapOnDrop { read : 1 , write : 1 , vec : self } ;
1825- let ptr = gap. vec . as_mut_ptr ( ) ;
1826-
18271848 /* Drop items while going through Vec, it should be more efficient than
18281849 * doing slice partition_dedup + truncate */
18291850
1851+ // Construct gap first and then drop item to avoid memory corruption if `T::drop` panics.
1852+ let mut gap =
1853+ FillGapOnDrop { read : possible_remove_idx + 1 , write : possible_remove_idx, vec : self } ;
1854+ unsafe {
1855+ // SAFETY: we checked that possible_remove_idx < len before.
1856+ // If drop panics, `gap` would remove this item without drop.
1857+ ptr:: drop_in_place ( start. add ( possible_remove_idx) ) ;
1858+ }
1859+
18301860 /* SAFETY: Because of the invariant, read_ptr, prev_ptr and write_ptr
18311861 * are always in-bounds and read_ptr never aliases prev_ptr */
18321862 unsafe {
18331863 while gap. read < len {
1834- let read_ptr = ptr . add ( gap. read ) ;
1835- let prev_ptr = ptr . add ( gap. write . wrapping_sub ( 1 ) ) ;
1864+ let read_ptr = start . add ( gap. read ) ;
1865+ let prev_ptr = start . add ( gap. write . wrapping_sub ( 1 ) ) ;
18361866
1837- if same_bucket ( & mut * read_ptr, & mut * prev_ptr) {
1867+ // We explicitly say in docs that references are reversed.
1868+ let found_duplicate = same_bucket ( & mut * read_ptr, & mut * prev_ptr) ;
1869+ if found_duplicate {
18381870 // Increase `gap.read` now since the drop may panic.
18391871 gap. read += 1 ;
18401872 /* We have found duplicate, drop it in-place */
18411873 ptr:: drop_in_place ( read_ptr) ;
18421874 } else {
1843- let write_ptr = ptr . add ( gap. write ) ;
1875+ let write_ptr = start . add ( gap. write ) ;
18441876
1845- /* Because ` read_ptr` can be equal to ` write_ptr`, we either
1846- * have to use `copy` or conditional `copy_nonoverlapping` .
1847- * Looks like the first option is faster. * /
1848- ptr:: copy ( read_ptr, write_ptr, 1 ) ;
1877+ /* read_ptr cannot be equal to write_ptr because at this point
1878+ * we guaranteed to skip at least one element (before loop starts) .
1879+ */
1880+ ptr:: copy_nonoverlapping ( read_ptr, write_ptr, 1 ) ;
18491881
18501882 /* We have filled that place, so go further */
18511883 gap. write += 1 ;
0 commit comments