8
8
// option. This file may not be copied, modified, or distributed
9
9
// except according to those terms.
10
10
11
- /*!
12
- An attempt to move all intrinsic declarations to a single place,
13
- as mentioned in #3369
14
- The intrinsics are defined in librustc/middle/trans/foreign.rs.
11
+ /*! rustc compiler intrinsics.
12
+
13
+ The corresponding definitions are in librustc/middle/trans/foreign.rs.
14
+
15
+ # Atomics
16
+
17
+ The atomic intrinsics provide common atomic operations on machine
18
+ words, with multiple possible memory orderings. They obey the same
19
+ semantics as C++0x. See the LLVM documentation on [[atomics]].
20
+
21
+ [atomics]: http://llvm.org/docs/Atomics.html
22
+
23
+ A quick refresher on memory ordering:
24
+
25
+ * Acquire - a barrier for aquiring a lock. Subsequent reads and writes
26
+ take place after the barrier.
27
+ * Release - a barrier for releasing a lock. Preceding reads and writes
28
+ take place before the barrier.
29
+ * Sequentially consistent - sequentially consistent operations are
30
+ guaranteed to happen in order. This is the standard mode for working
31
+ with atomic types and is equivalent to Java's `volatile`.
32
+
15
33
*/
16
34
17
35
#[ abi = "rust-intrinsic" ]
18
36
pub extern "rust-intrinsic" {
37
+
38
+ /// Atomic compare and exchange, sequentially consistent.
19
39
pub fn atomic_cxchg ( dst : & mut int , old : int , src : int ) -> int ;
40
+ /// Atomic compare and exchange, acquire ordering.
20
41
pub fn atomic_cxchg_acq ( dst : & mut int , old : int , src : int ) -> int ;
42
+ /// Atomic compare and exchange, release ordering.
21
43
pub fn atomic_cxchg_rel ( dst : & mut int , old : int , src : int ) -> int ;
22
44
45
+ /// Atomic load, sequentially consistent.
23
46
#[ cfg( not( stage0) ) ]
24
47
pub fn atomic_load ( src : & int ) -> int ;
48
+ /// Atomic load, acquire ordering.
25
49
#[ cfg( not( stage0) ) ]
26
50
pub fn atomic_load_acq ( src : & int ) -> int ;
27
51
52
+ /// Atomic store, sequentially consistent.
28
53
#[ cfg( not( stage0) ) ]
29
54
pub fn atomic_store ( dst : & mut int , val : int ) ;
55
+ /// Atomic store, release ordering.
30
56
#[ cfg( not( stage0) ) ]
31
57
pub fn atomic_store_rel ( dst : & mut int , val : int ) ;
32
58
59
+ /// Atomic exchange, sequentially consistent.
33
60
pub fn atomic_xchg ( dst : & mut int , src : int ) -> int ;
61
+ /// Atomic exchange, acquire ordering.
34
62
pub fn atomic_xchg_acq ( dst : & mut int , src : int ) -> int ;
63
+ /// Atomic exchange, release ordering.
35
64
pub fn atomic_xchg_rel ( dst : & mut int , src : int ) -> int ;
36
65
66
+ /// Atomic addition, sequentially consistent.
37
67
pub fn atomic_xadd ( dst : & mut int , src : int ) -> int ;
68
+ /// Atomic addition, acquire ordering.
38
69
pub fn atomic_xadd_acq ( dst : & mut int , src : int ) -> int ;
70
+ /// Atomic addition, release ordering.
39
71
pub fn atomic_xadd_rel ( dst : & mut int , src : int ) -> int ;
40
72
73
+ /// Atomic subtraction, sequentially consistent.
41
74
pub fn atomic_xsub ( dst : & mut int , src : int ) -> int ;
75
+ /// Atomic subtraction, acquire ordering.
42
76
pub fn atomic_xsub_acq ( dst : & mut int , src : int ) -> int ;
77
+ /// Atomic subtraction, release ordering.
43
78
pub fn atomic_xsub_rel ( dst : & mut int , src : int ) -> int ;
44
79
80
+ /// The size of a type in bytes.
81
+ ///
82
+ /// This is the exact number of bytes in memory taken up by a
83
+ /// value of the given type. In other words, a memset of this size
84
+ /// would *exactly* overwrite a value. When laid out in vectors
85
+ /// and structures there may be additional padding between
86
+ /// elements.
45
87
pub fn size_of < T > ( ) -> uint ;
46
88
89
+ /// Move a value to a memory location containing a value.
90
+ ///
91
+ /// Drop glue is run on the destination, which must contain a
92
+ /// valid Rust value.
47
93
pub fn move_val < T > ( dst : & mut T , src : T ) ;
94
+
95
+ /// Move a value to an uninitialized memory location.
96
+ ///
97
+ /// Drop glue is not run on the destination.
48
98
pub fn move_val_init < T > ( dst : & mut T , src : T ) ;
49
99
50
100
pub fn min_align_of < T > ( ) -> uint ;
51
101
pub fn pref_align_of < T > ( ) -> uint ;
52
102
103
+ /// Get a static pointer to a type descriptor.
53
104
pub fn get_tydesc < T > ( ) -> * ( ) ;
54
105
55
- /// init is unsafe because it returns a zeroed-out datum,
106
+ /// Create a value initialized to zero.
107
+ ///
108
+ /// `init` is unsafe because it returns a zeroed-out datum,
56
109
/// which is unsafe unless T is POD. We don't have a POD
57
- /// kind yet. (See #4074)
110
+ /// kind yet. (See #4074).
58
111
pub unsafe fn init < T > ( ) -> T ;
59
112
113
+ /// Create an uninitialized value.
60
114
#[ cfg( not( stage0) ) ]
61
115
pub unsafe fn uninit < T > ( ) -> T ;
62
116
63
- /// forget is unsafe because the caller is responsible for
64
- /// ensuring the argument is deallocated already
117
+ /// Move a value out of scope without running drop glue.
118
+ ///
119
+ /// `forget` is unsafe because the caller is responsible for
120
+ /// ensuring the argument is deallocated already.
65
121
pub unsafe fn forget < T > ( _: T ) -> ( ) ;
66
122
123
+ /// Returns `true` if a type requires drop glue.
67
124
pub fn needs_drop < T > ( ) -> bool ;
68
125
69
126
// XXX: intrinsic uses legacy modes and has reference to TyDesc
@@ -72,9 +129,12 @@ pub extern "rust-intrinsic" {
72
129
// XXX: intrinsic uses legacy modes
73
130
//fn frame_address(f: &once fn(*u8));
74
131
132
+ /// Get the address of the `__morestack` stack growth function.
75
133
pub fn morestack_addr ( ) -> * ( ) ;
76
134
135
+ /// Equivalent to the `llvm.memmove.p0i8.0i8.i32` intrinsic.
77
136
pub fn memmove32 ( dst : * mut u8 , src : * u8 , size : u32 ) ;
137
+ /// Equivalent to the `llvm.memmove.p0i8.0i8.i64` intrinsic.
78
138
pub fn memmove64 ( dst : * mut u8 , src : * u8 , size : u64 ) ;
79
139
80
140
pub fn sqrtf32 ( x : f32 ) -> f32 ;
0 commit comments