@@ -1775,7 +1775,32 @@ impl<T, A: Allocator> Vec<T, A> {
17751775 return ;
17761776 }
17771777
1778- /* INVARIANT: vec.len() > read >= write > write-1 >= 0 */
1778+ // Check if we ever want to remove anything.
1779+ // This allows to use copy_non_overlapping in next cycle.
1780+ // And avoids any memory writes if we don't need to remove anything.
1781+ let mut first_duplicate_idx: usize = 1 ;
1782+ let start = self . as_mut_ptr ( ) ;
1783+ while first_duplicate_idx != len {
1784+ let found_duplicate = unsafe {
1785+ // SAFETY: first_duplicate always in range [1..len)
1786+ // Note that we start iteration from 1 so we never overflow.
1787+ let prev = start. add ( first_duplicate_idx. wrapping_sub ( 1 ) ) ;
1788+ let current = start. add ( first_duplicate_idx) ;
1789+ // We explicitly say in docs that references are reversed.
1790+ same_bucket ( & mut * current, & mut * prev)
1791+ } ;
1792+ if found_duplicate {
1793+ break ;
1794+ }
1795+ first_duplicate_idx += 1 ;
1796+ }
1797+ // Don't need to remove anything.
1798+ // We cannot get bigger than len.
1799+ if first_duplicate_idx == len {
1800+ return ;
1801+ }
1802+
1803+ /* INVARIANT: vec.len() > read > write > write-1 >= 0 */
17791804 struct FillGapOnDrop < ' a , T , A : core:: alloc:: Allocator > {
17801805 /* Offset of the element we want to check if it is duplicate */
17811806 read : usize ,
@@ -1821,31 +1846,39 @@ impl<T, A: Allocator> Vec<T, A> {
18211846 }
18221847 }
18231848
1824- let mut gap = FillGapOnDrop { read : 1 , write : 1 , vec : self } ;
1825- let ptr = gap. vec . as_mut_ptr ( ) ;
1826-
18271849 /* Drop items while going through Vec, it should be more efficient than
18281850 * doing slice partition_dedup + truncate */
18291851
1852+ // Construct gap first and then drop item to avoid memory corruption if `T::drop` panics.
1853+ let mut gap =
1854+ FillGapOnDrop { read : first_duplicate_idx + 1 , write : first_duplicate_idx, vec : self } ;
1855+ unsafe {
1856+ // SAFETY: we checked that first_duplicate_idx in bounds before.
1857+ // If drop panics, `gap` would remove this item without drop.
1858+ ptr:: drop_in_place ( start. add ( first_duplicate_idx) ) ;
1859+ }
1860+
18301861 /* SAFETY: Because of the invariant, read_ptr, prev_ptr and write_ptr
18311862 * are always in-bounds and read_ptr never aliases prev_ptr */
18321863 unsafe {
18331864 while gap. read < len {
1834- let read_ptr = ptr . add ( gap. read ) ;
1835- let prev_ptr = ptr . add ( gap. write . wrapping_sub ( 1 ) ) ;
1865+ let read_ptr = start . add ( gap. read ) ;
1866+ let prev_ptr = start . add ( gap. write . wrapping_sub ( 1 ) ) ;
18361867
1837- if same_bucket ( & mut * read_ptr, & mut * prev_ptr) {
1868+ // We explicitly say in docs that references are reversed.
1869+ let found_duplicate = same_bucket ( & mut * read_ptr, & mut * prev_ptr) ;
1870+ if found_duplicate {
18381871 // Increase `gap.read` now since the drop may panic.
18391872 gap. read += 1 ;
18401873 /* We have found duplicate, drop it in-place */
18411874 ptr:: drop_in_place ( read_ptr) ;
18421875 } else {
1843- let write_ptr = ptr . add ( gap. write ) ;
1876+ let write_ptr = start . add ( gap. write ) ;
18441877
1845- /* Because ` read_ptr` can be equal to ` write_ptr`, we either
1846- * have to use `copy` or conditional `copy_nonoverlapping` .
1847- * Looks like the first option is faster. * /
1848- ptr:: copy ( read_ptr, write_ptr, 1 ) ;
1878+ /* read_ptr cannot be equal to write_ptr because at this point
1879+ * we guaranteed to skip at least one element (before loop starts) .
1880+ */
1881+ ptr:: copy_nonoverlapping ( read_ptr, write_ptr, 1 ) ;
18491882
18501883 /* We have filled that place, so go further */
18511884 gap. write += 1 ;
0 commit comments