@@ -1451,6 +1451,347 @@ impl<T> AtomicPtr<T> {
14511451 }
14521452 Err ( prev)
14531453 }
1454+
1455+ /// Offsets the pointer's address by adding `val` (in units of `T`),
1456+ /// returning the previous pointer.
1457+ ///
1458+ /// This is equivalent to using [`wrapping_add`] to atomically perform the
1459+ /// equivalent of `ptr = ptr.wrapping_add(val);`.
1460+ ///
1461+ /// This method operates in units of `T`, which means that it cannot be used
1462+ /// to offset the pointer by an amount which is not a multiple of
1463+ /// `size_of::<T>()`. This can sometimes be inconvenient, as you may want to
1464+ /// work with a deliberately misaligned pointer. In such cases, you may use
1465+ /// the [`fetch_byte_add`](Self::fetch_byte_add) method instead.
1466+ ///
1467+ /// `fetch_ptr_add` takes an [`Ordering`] argument which describes the
1468+ /// memory ordering of this operation. All ordering modes are possible. Note
1469+ /// that using [`Acquire`] makes the store part of this operation
1470+ /// [`Relaxed`], and using [`Release`] makes the load part [`Relaxed`].
1471+ ///
1472+ /// **Note**: This method is only available on platforms that support atomic
1473+ /// operations on [`AtomicPtr`].
1474+ ///
1475+ /// [`wrapping_add`]: pointer::wrapping_add
1476+ ///
1477+ /// # Examples
1478+ ///
1479+ /// ```
1480+ /// #![feature(strict_provenance_atomic_ptr, strict_provenance)]
1481+ /// use core::sync::atomic::{AtomicPtr, Ordering};
1482+ ///
1483+ /// let atom = AtomicPtr::<i64>::new(core::ptr::null_mut());
1484+ /// assert_eq!(atom.fetch_ptr_add(1, Ordering::Relaxed).addr(), 0);
1485+ /// // Note: units of `size_of::<i64>()`.
1486+ /// assert_eq!(atom.load(Ordering::Relaxed).addr(), 8);
1487+ /// ```
1488+ #[ inline]
1489+ #[ cfg( target_has_atomic = "ptr" ) ]
1490+ #[ unstable( feature = "strict_provenance_atomic_ptr" , issue = "95228" ) ]
1491+ pub fn fetch_ptr_add ( & self , val : usize , order : Ordering ) -> * mut T {
1492+ self . fetch_byte_add ( val. wrapping_mul ( core:: mem:: size_of :: < T > ( ) ) , order)
1493+ }
1494+
1495+ /// Offsets the pointer's address by subtracting `val` (in units of `T`),
1496+ /// returning the previous pointer.
1497+ ///
1498+ /// This is equivalent to using [`wrapping_sub`] to atomically perform the
1499+ /// equivalent of `ptr = ptr.wrapping_sub(val);`.
1500+ ///
1501+ /// This method operates in units of `T`, which means that it cannot be used
1502+ /// to offset the pointer by an amount which is not a multiple of
1503+ /// `size_of::<T>()`. This can sometimes be inconvenient, as you may want to
1504+ /// work with a deliberately misaligned pointer. In such cases, you may use
1505+ /// the [`fetch_byte_sub`](Self::fetch_byte_sub) method instead.
1506+ ///
1507+ /// `fetch_ptr_sub` takes an [`Ordering`] argument which describes the memory
1508+ /// ordering of this operation. All ordering modes are possible. Note that
1509+ /// using [`Acquire`] makes the store part of this operation [`Relaxed`],
1510+ /// and using [`Release`] makes the load part [`Relaxed`].
1511+ ///
1512+ /// **Note**: This method is only available on platforms that support atomic
1513+ /// operations on [`AtomicPtr`].
1514+ ///
1515+ /// [`wrapping_sub`]: pointer::wrapping_sub
1516+ ///
1517+ /// # Examples
1518+ ///
1519+ /// ```
1520+ /// #![feature(strict_provenance_atomic_ptr)]
1521+ /// use core::sync::atomic::{AtomicPtr, Ordering};
1522+ ///
1523+ /// let array = [1i32, 2i32];
1524+ /// let atom = AtomicPtr::new(array.as_ptr().wrapping_add(1) as *mut _);
1525+ ///
1526+ /// assert!(core::ptr::eq(
1527+ /// atom.fetch_ptr_sub(1, Ordering::Relaxed),
1528+ /// &array[1],
1529+ /// ));
1530+ /// assert!(core::ptr::eq(atom.load(Ordering::Relaxed), &array[0]));
1531+ /// ```
1532+ #[ inline]
1533+ #[ cfg( target_has_atomic = "ptr" ) ]
1534+ #[ unstable( feature = "strict_provenance_atomic_ptr" , issue = "95228" ) ]
1535+ pub fn fetch_ptr_sub ( & self , val : usize , order : Ordering ) -> * mut T {
1536+ self . fetch_byte_sub ( val. wrapping_mul ( core:: mem:: size_of :: < T > ( ) ) , order)
1537+ }
1538+
1539+ /// Offsets the pointer's address by adding `val` *bytes*, returning the
1540+ /// previous pointer.
1541+ ///
1542+ /// This is equivalent to using [`wrapping_add`] and [`cast`] to atomically
1543+ /// perform `ptr = ptr.cast::<u8>().wrapping_add(val).cast::<T>()`.
1544+ ///
1545+ /// `fetch_byte_add` takes an [`Ordering`] argument which describes the
1546+ /// memory ordering of this operation. All ordering modes are possible. Note
1547+ /// that using [`Acquire`] makes the store part of this operation
1548+ /// [`Relaxed`], and using [`Release`] makes the load part [`Relaxed`].
1549+ ///
1550+ /// **Note**: This method is only available on platforms that support atomic
1551+ /// operations on [`AtomicPtr`].
1552+ ///
1553+ /// [`wrapping_add`]: pointer::wrapping_add
1554+ /// [`cast`]: pointer::cast
1555+ ///
1556+ /// # Examples
1557+ ///
1558+ /// ```
1559+ /// #![feature(strict_provenance_atomic_ptr, strict_provenance)]
1560+ /// use core::sync::atomic::{AtomicPtr, Ordering};
1561+ ///
1562+ /// let atom = AtomicPtr::<i64>::new(core::ptr::null_mut());
1563+ /// assert_eq!(atom.fetch_byte_add(1, Ordering::Relaxed).addr(), 0);
1564+ /// // Note: in units of bytes, not `size_of::<i64>()`.
1565+ /// assert_eq!(atom.load(Ordering::Relaxed).addr(), 1);
1566+ /// ```
1567+ #[ inline]
1568+ #[ cfg( target_has_atomic = "ptr" ) ]
1569+ #[ unstable( feature = "strict_provenance_atomic_ptr" , issue = "95228" ) ]
1570+ pub fn fetch_byte_add ( & self , val : usize , order : Ordering ) -> * mut T {
1571+ #[ cfg( not( bootstrap) ) ]
1572+ // SAFETY: data races are prevented by atomic intrinsics.
1573+ unsafe {
1574+ atomic_add ( self . p . get ( ) , core:: ptr:: invalid_mut ( val) , order) . cast ( )
1575+ }
1576+ #[ cfg( bootstrap) ]
1577+ // SAFETY: data races are prevented by atomic intrinsics.
1578+ unsafe {
1579+ atomic_add ( self . p . get ( ) . cast :: < usize > ( ) , val, order) as * mut T
1580+ }
1581+ }
1582+
1583+ /// Offsets the pointer's address by subtracting `val` *bytes*, returning the
1584+ /// previous pointer.
1585+ ///
1586+ /// This is equivalent to using [`wrapping_sub`] and [`cast`] to atomically
1587+ /// perform `ptr = ptr.cast::<u8>().wrapping_sub(val).cast::<T>()`.
1588+ ///
1589+ /// `fetch_byte_sub` takes an [`Ordering`] argument which describes the
1590+ /// memory ordering of this operation. All ordering modes are possible. Note
1591+ /// that using [`Acquire`] makes the store part of this operation
1592+ /// [`Relaxed`], and using [`Release`] makes the load part [`Relaxed`].
1593+ ///
1594+ /// **Note**: This method is only available on platforms that support atomic
1595+ /// operations on [`AtomicPtr`].
1596+ ///
1597+ /// [`wrapping_sub`]: pointer::wrapping_sub
1598+ /// [`cast`]: pointer::cast
1599+ ///
1600+ /// # Examples
1601+ ///
1602+ /// ```
1603+ /// #![feature(strict_provenance_atomic_ptr, strict_provenance)]
1604+ /// use core::sync::atomic::{AtomicPtr, Ordering};
1605+ ///
1606+ /// let atom = AtomicPtr::<i64>::new(core::ptr::invalid_mut(1));
1607+ /// assert_eq!(atom.fetch_byte_sub(1, Ordering::Relaxed).addr(), 1);
1608+ /// assert_eq!(atom.load(Ordering::Relaxed).addr(), 0);
1609+ /// ```
1610+ #[ inline]
1611+ #[ cfg( target_has_atomic = "ptr" ) ]
1612+ #[ unstable( feature = "strict_provenance_atomic_ptr" , issue = "95228" ) ]
1613+ pub fn fetch_byte_sub ( & self , val : usize , order : Ordering ) -> * mut T {
1614+ #[ cfg( not( bootstrap) ) ]
1615+ // SAFETY: data races are prevented by atomic intrinsics.
1616+ unsafe {
1617+ atomic_sub ( self . p . get ( ) , core:: ptr:: invalid_mut ( val) , order) . cast ( )
1618+ }
1619+ #[ cfg( bootstrap) ]
1620+ // SAFETY: data races are prevented by atomic intrinsics.
1621+ unsafe {
1622+ atomic_sub ( self . p . get ( ) . cast :: < usize > ( ) , val, order) as * mut T
1623+ }
1624+ }
1625+
1626+ /// Performs a bitwise "or" operation on the address of the current pointer,
1627+ /// and the argument `val`, and stores a pointer with provenance of the
1628+ /// current pointer and the resulting address.
1629+ ///
1630+ /// This is equivalent equivalent to using [`map_addr`] to atomically
1631+ /// perform `ptr = ptr.map_addr(|a| a | val)`. This can be used in tagged
1632+ /// pointer schemes to atomically set tag bits.
1633+ ///
1634+ /// **Caveat**: This operation returns the previous value. To compute the
1635+ /// stored value without losing provenance, you may use [`map_addr`]. For
1636+ /// example: `a.fetch_or(val).map_addr(|a| a | val)`.
1637+ ///
1638+ /// `fetch_or` takes an [`Ordering`] argument which describes the memory
1639+ /// ordering of this operation. All ordering modes are possible. Note that
1640+ /// using [`Acquire`] makes the store part of this operation [`Relaxed`],
1641+ /// and using [`Release`] makes the load part [`Relaxed`].
1642+ ///
1643+ /// **Note**: This method is only available on platforms that support atomic
1644+ /// operations on [`AtomicPtr`].
1645+ ///
1646+ /// This API and its claimed semantics are part of the Strict Provenance
1647+ /// experiment, see the [module documentation for `ptr`][crate::ptr] for
1648+ /// details.
1649+ ///
1650+ /// [`map_addr`]: pointer::map_addr
1651+ ///
1652+ /// # Examples
1653+ ///
1654+ /// ```
1655+ /// #![feature(strict_provenance_atomic_ptr, strict_provenance)]
1656+ /// use core::sync::atomic::{AtomicPtr, Ordering};
1657+ ///
1658+ /// let pointer = &mut 3i64 as *mut i64;
1659+ ///
1660+ /// let atom = AtomicPtr::<i64>::new(pointer);
1661+ /// // Tag the bottom bit of the pointer.
1662+ /// assert_eq!(atom.fetch_or(1, Ordering::Relaxed).addr() & 1, 0);
1663+ /// // Extract and untag.
1664+ /// let tagged = atom.load(Ordering::Relaxed);
1665+ /// assert_eq!(tagged.addr() & 1, 1);
1666+ /// assert_eq!(tagged.map_addr(|p| p & !1), pointer);
1667+ /// ```
1668+ #[ inline]
1669+ #[ cfg( target_has_atomic = "ptr" ) ]
1670+ #[ unstable( feature = "strict_provenance_atomic_ptr" , issue = "95228" ) ]
1671+ pub fn fetch_or ( & self , val : usize , order : Ordering ) -> * mut T {
1672+ #[ cfg( not( bootstrap) ) ]
1673+ // SAFETY: data races are prevented by atomic intrinsics.
1674+ unsafe {
1675+ atomic_or ( self . p . get ( ) , core:: ptr:: invalid_mut ( val) , order) . cast ( )
1676+ }
1677+ #[ cfg( bootstrap) ]
1678+ // SAFETY: data races are prevented by atomic intrinsics.
1679+ unsafe {
1680+ atomic_or ( self . p . get ( ) . cast :: < usize > ( ) , val, order) as * mut T
1681+ }
1682+ }
1683+
1684+ /// Performs a bitwise "and" operation on the address of the current
1685+ /// pointer, and the argument `val`, and stores a pointer with provenance of
1686+ /// the current pointer and the resulting address.
1687+ ///
1688+ /// This is equivalent equivalent to using [`map_addr`] to atomically
1689+ /// perform `ptr = ptr.map_addr(|a| a & val)`. This can be used in tagged
1690+ /// pointer schemes to atomically unset tag bits.
1691+ ///
1692+ /// **Caveat**: This operation returns the previous value. To compute the
1693+ /// stored value without losing provenance, you may use [`map_addr`]. For
1694+ /// example: `a.fetch_and(val).map_addr(|a| a & val)`.
1695+ ///
1696+ /// `fetch_and` takes an [`Ordering`] argument which describes the memory
1697+ /// ordering of this operation. All ordering modes are possible. Note that
1698+ /// using [`Acquire`] makes the store part of this operation [`Relaxed`],
1699+ /// and using [`Release`] makes the load part [`Relaxed`].
1700+ ///
1701+ /// **Note**: This method is only available on platforms that support atomic
1702+ /// operations on [`AtomicPtr`].
1703+ ///
1704+ /// This API and its claimed semantics are part of the Strict Provenance
1705+ /// experiment, see the [module documentation for `ptr`][crate::ptr] for
1706+ /// details.
1707+ ///
1708+ /// [`map_addr`]: pointer::map_addr
1709+ ///
1710+ /// # Examples
1711+ ///
1712+ /// ```
1713+ /// #![feature(strict_provenance_atomic_ptr, strict_provenance)]
1714+ /// use core::sync::atomic::{AtomicPtr, Ordering};
1715+ ///
1716+ /// let pointer = &mut 3i64 as *mut i64;
1717+ /// // A tagged pointer
1718+ /// let atom = AtomicPtr::<i64>::new(pointer.map_addr(|a| a | 1));
1719+ /// assert_eq!(atom.fetch_or(1, Ordering::Relaxed).addr() & 1, 1);
1720+ /// // Untag, and extract the previously tagged pointer.
1721+ /// let untagged = atom.fetch_and(!1, Ordering::Relaxed)
1722+ /// .map_addr(|a| a & !1);
1723+ /// assert_eq!(untagged, pointer);
1724+ /// ```
1725+ #[ inline]
1726+ #[ cfg( target_has_atomic = "ptr" ) ]
1727+ #[ unstable( feature = "strict_provenance_atomic_ptr" , issue = "95228" ) ]
1728+ pub fn fetch_and ( & self , val : usize , order : Ordering ) -> * mut T {
1729+ #[ cfg( not( bootstrap) ) ]
1730+ // SAFETY: data races are prevented by atomic intrinsics.
1731+ unsafe {
1732+ atomic_and ( self . p . get ( ) , core:: ptr:: invalid_mut ( val) , order) . cast ( )
1733+ }
1734+ #[ cfg( bootstrap) ]
1735+ // SAFETY: data races are prevented by atomic intrinsics.
1736+ unsafe {
1737+ atomic_and ( self . p . get ( ) . cast :: < usize > ( ) , val, order) as * mut T
1738+ }
1739+ }
1740+
1741+ /// Performs a bitwise "xor" operation on the address of the current
1742+ /// pointer, and the argument `val`, and stores a pointer with provenance of
1743+ /// the current pointer and the resulting address.
1744+ ///
1745+ /// This is equivalent equivalent to using [`map_addr`] to atomically
1746+ /// perform `ptr = ptr.map_addr(|a| a ^ val)`. This can be used in tagged
1747+ /// pointer schemes to atomically toggle tag bits.
1748+ ///
1749+ /// **Caveat**: This operation returns the previous value. To compute the
1750+ /// stored value without losing provenance, you may use [`map_addr`]. For
1751+ /// example: `a.fetch_xor(val).map_addr(|a| a ^ val)`.
1752+ ///
1753+ /// `fetch_xor` takes an [`Ordering`] argument which describes the memory
1754+ /// ordering of this operation. All ordering modes are possible. Note that
1755+ /// using [`Acquire`] makes the store part of this operation [`Relaxed`],
1756+ /// and using [`Release`] makes the load part [`Relaxed`].
1757+ ///
1758+ /// **Note**: This method is only available on platforms that support atomic
1759+ /// operations on [`AtomicPtr`].
1760+ ///
1761+ /// This API and its claimed semantics are part of the Strict Provenance
1762+ /// experiment, see the [module documentation for `ptr`][crate::ptr] for
1763+ /// details.
1764+ ///
1765+ /// [`map_addr`]: pointer::map_addr
1766+ ///
1767+ /// # Examples
1768+ ///
1769+ /// ```
1770+ /// #![feature(strict_provenance_atomic_ptr, strict_provenance)]
1771+ /// use core::sync::atomic::{AtomicPtr, Ordering};
1772+ ///
1773+ /// let pointer = &mut 3i64 as *mut i64;
1774+ /// let atom = AtomicPtr::<i64>::new(pointer);
1775+ ///
1776+ /// // Toggle a tag bit on the pointer.
1777+ /// atom.fetch_xor(1, Ordering::Relaxed);
1778+ /// assert_eq!(atom.load(Ordering::Relaxed).addr() & 1, 1);
1779+ /// ```
1780+ #[ inline]
1781+ #[ cfg( target_has_atomic = "ptr" ) ]
1782+ #[ unstable( feature = "strict_provenance_atomic_ptr" , issue = "95228" ) ]
1783+ pub fn fetch_xor ( & self , val : usize , order : Ordering ) -> * mut T {
1784+ #[ cfg( not( bootstrap) ) ]
1785+ // SAFETY: data races are prevented by atomic intrinsics.
1786+ unsafe {
1787+ atomic_xor ( self . p . get ( ) , core:: ptr:: invalid_mut ( val) , order) . cast ( )
1788+ }
1789+ #[ cfg( bootstrap) ]
1790+ // SAFETY: data races are prevented by atomic intrinsics.
1791+ unsafe {
1792+ atomic_xor ( self . p . get ( ) . cast :: < usize > ( ) , val, order) as * mut T
1793+ }
1794+ }
14541795}
14551796
14561797#[ cfg( target_has_atomic_load_store = "8" ) ]
0 commit comments