Skip to content

Commit a477805

Browse files
powerboat9dkm
authored andcommitted
nr2.0: Fix some paths in test files
This is similar to 9faba02, but it applies to execute tests. gcc/testsuite/ChangeLog: * rust/execute/torture/for-loop1.rs: Adjust paths. * rust/execute/torture/for-loop2.rs: Likewise. * rust/execute/torture/iter1.rs: Likewise. Signed-off-by: Owen Avery <powerboat9.gamer@gmail.com>
1 parent 8665cca commit a477805

File tree

3 files changed

+57
-57
lines changed

3 files changed

+57
-57
lines changed

gcc/testsuite/rust/execute/torture/for-loop1.rs

Lines changed: 19 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -102,51 +102,51 @@ mod ptr {
102102
#[lang = "const_ptr"]
103103
impl<T> *const T {
104104
pub unsafe fn offset(self, count: isize) -> *const T {
105-
intrinsics::offset(self, count)
105+
crate::intrinsics::offset(self, count)
106106
}
107107
}
108108

109109
#[lang = "mut_ptr"]
110110
impl<T> *mut T {
111111
pub unsafe fn offset(self, count: isize) -> *mut T {
112-
intrinsics::offset(self, count) as *mut T
112+
crate::intrinsics::offset(self, count) as *mut T
113113
}
114114
}
115115

116116
pub unsafe fn swap_nonoverlapping<T>(x: *mut T, y: *mut T, count: usize) {
117117
let x = x as *mut u8;
118118
let y = y as *mut u8;
119-
let len = mem::size_of::<T>() * count;
119+
let len = crate::mem::size_of::<T>() * count;
120120
swap_nonoverlapping_bytes(x, y, len)
121121
}
122122

123123
pub unsafe fn swap_nonoverlapping_one<T>(x: *mut T, y: *mut T) {
124124
// For types smaller than the block optimization below,
125125
// just swap directly to avoid pessimizing codegen.
126-
if mem::size_of::<T>() < 32 {
126+
if crate::mem::size_of::<T>() < 32 {
127127
let z = read(x);
128-
intrinsics::copy_nonoverlapping(y, x, 1);
128+
crate::intrinsics::copy_nonoverlapping(y, x, 1);
129129
write(y, z);
130130
} else {
131131
swap_nonoverlapping(x, y, 1);
132132
}
133133
}
134134

135135
pub unsafe fn write<T>(dst: *mut T, src: T) {
136-
intrinsics::move_val_init(&mut *dst, src)
136+
crate::intrinsics::move_val_init(&mut *dst, src)
137137
}
138138

139139
pub unsafe fn read<T>(src: *const T) -> T {
140-
let mut tmp: T = mem::uninitialized();
141-
intrinsics::copy_nonoverlapping(src, &mut tmp, 1);
140+
let mut tmp: T = crate::mem::uninitialized();
141+
crate::intrinsics::copy_nonoverlapping(src, &mut tmp, 1);
142142
tmp
143143
}
144144

145145
pub unsafe fn swap_nonoverlapping_bytes(x: *mut u8, y: *mut u8, len: usize) {
146146
struct Block(u64, u64, u64, u64);
147147
struct UnalignedBlock(u64, u64, u64, u64);
148148

149-
let block_size = mem::size_of::<Block>();
149+
let block_size = crate::mem::size_of::<Block>();
150150

151151
// Loop through x & y, copying them `Block` at a time
152152
// The optimizer should unroll the loop fully for most types
@@ -155,31 +155,31 @@ mod ptr {
155155
while i + block_size <= len {
156156
// Create some uninitialized memory as scratch space
157157
// Declaring `t` here avoids aligning the stack when this loop is unused
158-
let mut t: Block = mem::uninitialized();
158+
let mut t: Block = crate::mem::uninitialized();
159159
let t = &mut t as *mut _ as *mut u8;
160160
let x = x.offset(i as isize);
161161
let y = y.offset(i as isize);
162162

163163
// Swap a block of bytes of x & y, using t as a temporary buffer
164164
// This should be optimized into efficient SIMD operations where available
165-
intrinsics::copy_nonoverlapping(x, t, block_size);
166-
intrinsics::copy_nonoverlapping(y, x, block_size);
167-
intrinsics::copy_nonoverlapping(t, y, block_size);
165+
crate::intrinsics::copy_nonoverlapping(x, t, block_size);
166+
crate::intrinsics::copy_nonoverlapping(y, x, block_size);
167+
crate::intrinsics::copy_nonoverlapping(t, y, block_size);
168168
i += block_size;
169169
}
170170

171171
if i < len {
172172
// Swap any remaining bytes
173-
let mut t: UnalignedBlock = mem::uninitialized();
173+
let mut t: UnalignedBlock = crate::mem::uninitialized();
174174
let rem = len - i;
175175

176176
let t = &mut t as *mut _ as *mut u8;
177177
let x = x.offset(i as isize);
178178
let y = y.offset(i as isize);
179179

180-
intrinsics::copy_nonoverlapping(x, t, rem);
181-
intrinsics::copy_nonoverlapping(y, x, rem);
182-
intrinsics::copy_nonoverlapping(t, y, rem);
180+
crate::intrinsics::copy_nonoverlapping(x, t, rem);
181+
crate::intrinsics::copy_nonoverlapping(y, x, rem);
182+
crate::intrinsics::copy_nonoverlapping(t, y, rem);
183183
}
184184
}
185185
}
@@ -194,7 +194,7 @@ mod mem {
194194

195195
pub fn swap<T>(x: &mut T, y: &mut T) {
196196
unsafe {
197-
ptr::swap_nonoverlapping_one(x, y);
197+
crate::ptr::swap_nonoverlapping_one(x, y);
198198
}
199199
}
200200

@@ -204,7 +204,7 @@ mod mem {
204204
}
205205

206206
pub unsafe fn uninitialized<T>() -> T {
207-
intrinsics::uninit()
207+
crate::intrinsics::uninit()
208208
}
209209
}
210210

gcc/testsuite/rust/execute/torture/for-loop2.rs

Lines changed: 19 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -101,51 +101,51 @@ mod ptr {
101101
#[lang = "const_ptr"]
102102
impl<T> *const T {
103103
pub unsafe fn offset(self, count: isize) -> *const T {
104-
intrinsics::offset(self, count)
104+
crate::intrinsics::offset(self, count)
105105
}
106106
}
107107

108108
#[lang = "mut_ptr"]
109109
impl<T> *mut T {
110110
pub unsafe fn offset(self, count: isize) -> *mut T {
111-
intrinsics::offset(self, count) as *mut T
111+
crate::intrinsics::offset(self, count) as *mut T
112112
}
113113
}
114114

115115
pub unsafe fn swap_nonoverlapping<T>(x: *mut T, y: *mut T, count: usize) {
116116
let x = x as *mut u8;
117117
let y = y as *mut u8;
118-
let len = mem::size_of::<T>() * count;
118+
let len = crate::mem::size_of::<T>() * count;
119119
swap_nonoverlapping_bytes(x, y, len)
120120
}
121121

122122
pub unsafe fn swap_nonoverlapping_one<T>(x: *mut T, y: *mut T) {
123123
// For types smaller than the block optimization below,
124124
// just swap directly to avoid pessimizing codegen.
125-
if mem::size_of::<T>() < 32 {
125+
if crate::mem::size_of::<T>() < 32 {
126126
let z = read(x);
127-
intrinsics::copy_nonoverlapping(y, x, 1);
127+
crate::intrinsics::copy_nonoverlapping(y, x, 1);
128128
write(y, z);
129129
} else {
130130
swap_nonoverlapping(x, y, 1);
131131
}
132132
}
133133

134134
pub unsafe fn write<T>(dst: *mut T, src: T) {
135-
intrinsics::move_val_init(&mut *dst, src)
135+
crate::intrinsics::move_val_init(&mut *dst, src)
136136
}
137137

138138
pub unsafe fn read<T>(src: *const T) -> T {
139-
let mut tmp: T = mem::uninitialized();
140-
intrinsics::copy_nonoverlapping(src, &mut tmp, 1);
139+
let mut tmp: T = crate::mem::uninitialized();
140+
crate::intrinsics::copy_nonoverlapping(src, &mut tmp, 1);
141141
tmp
142142
}
143143

144144
pub unsafe fn swap_nonoverlapping_bytes(x: *mut u8, y: *mut u8, len: usize) {
145145
struct Block(u64, u64, u64, u64);
146146
struct UnalignedBlock(u64, u64, u64, u64);
147147

148-
let block_size = mem::size_of::<Block>();
148+
let block_size = crate::mem::size_of::<Block>();
149149

150150
// Loop through x & y, copying them `Block` at a time
151151
// The optimizer should unroll the loop fully for most types
@@ -154,31 +154,31 @@ mod ptr {
154154
while i + block_size <= len {
155155
// Create some uninitialized memory as scratch space
156156
// Declaring `t` here avoids aligning the stack when this loop is unused
157-
let mut t: Block = mem::uninitialized();
157+
let mut t: Block = crate::mem::uninitialized();
158158
let t = &mut t as *mut _ as *mut u8;
159159
let x = x.offset(i as isize);
160160
let y = y.offset(i as isize);
161161

162162
// Swap a block of bytes of x & y, using t as a temporary buffer
163163
// This should be optimized into efficient SIMD operations where available
164-
intrinsics::copy_nonoverlapping(x, t, block_size);
165-
intrinsics::copy_nonoverlapping(y, x, block_size);
166-
intrinsics::copy_nonoverlapping(t, y, block_size);
164+
crate::intrinsics::copy_nonoverlapping(x, t, block_size);
165+
crate::intrinsics::copy_nonoverlapping(y, x, block_size);
166+
crate::intrinsics::copy_nonoverlapping(t, y, block_size);
167167
i += block_size;
168168
}
169169

170170
if i < len {
171171
// Swap any remaining bytes
172-
let mut t: UnalignedBlock = mem::uninitialized();
172+
let mut t: UnalignedBlock = crate::mem::uninitialized();
173173
let rem = len - i;
174174

175175
let t = &mut t as *mut _ as *mut u8;
176176
let x = x.offset(i as isize);
177177
let y = y.offset(i as isize);
178178

179-
intrinsics::copy_nonoverlapping(x, t, rem);
180-
intrinsics::copy_nonoverlapping(y, x, rem);
181-
intrinsics::copy_nonoverlapping(t, y, rem);
179+
crate::intrinsics::copy_nonoverlapping(x, t, rem);
180+
crate::intrinsics::copy_nonoverlapping(y, x, rem);
181+
crate::intrinsics::copy_nonoverlapping(t, y, rem);
182182
}
183183
}
184184
}
@@ -193,7 +193,7 @@ mod mem {
193193

194194
pub fn swap<T>(x: &mut T, y: &mut T) {
195195
unsafe {
196-
ptr::swap_nonoverlapping_one(x, y);
196+
crate::ptr::swap_nonoverlapping_one(x, y);
197197
}
198198
}
199199

@@ -203,7 +203,7 @@ mod mem {
203203
}
204204

205205
pub unsafe fn uninitialized<T>() -> T {
206-
intrinsics::uninit()
206+
crate::intrinsics::uninit()
207207
}
208208
}
209209

gcc/testsuite/rust/execute/torture/iter1.rs

Lines changed: 19 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -99,51 +99,51 @@ mod ptr {
9999
#[lang = "const_ptr"]
100100
impl<T> *const T {
101101
pub unsafe fn offset(self, count: isize) -> *const T {
102-
intrinsics::offset(self, count)
102+
crate::intrinsics::offset(self, count)
103103
}
104104
}
105105

106106
#[lang = "mut_ptr"]
107107
impl<T> *mut T {
108108
pub unsafe fn offset(self, count: isize) -> *mut T {
109-
intrinsics::offset(self, count) as *mut T
109+
crate::intrinsics::offset(self, count) as *mut T
110110
}
111111
}
112112

113113
pub unsafe fn swap_nonoverlapping<T>(x: *mut T, y: *mut T, count: usize) {
114114
let x = x as *mut u8;
115115
let y = y as *mut u8;
116-
let len = mem::size_of::<T>() * count;
116+
let len = crate::mem::size_of::<T>() * count;
117117
swap_nonoverlapping_bytes(x, y, len)
118118
}
119119

120120
pub unsafe fn swap_nonoverlapping_one<T>(x: *mut T, y: *mut T) {
121121
// For types smaller than the block optimization below,
122122
// just swap directly to avoid pessimizing codegen.
123-
if mem::size_of::<T>() < 32 {
123+
if crate::mem::size_of::<T>() < 32 {
124124
let z = read(x);
125-
intrinsics::copy_nonoverlapping(y, x, 1);
125+
crate::intrinsics::copy_nonoverlapping(y, x, 1);
126126
write(y, z);
127127
} else {
128128
swap_nonoverlapping(x, y, 1);
129129
}
130130
}
131131

132132
pub unsafe fn write<T>(dst: *mut T, src: T) {
133-
intrinsics::move_val_init(&mut *dst, src)
133+
crate::intrinsics::move_val_init(&mut *dst, src)
134134
}
135135

136136
pub unsafe fn read<T>(src: *const T) -> T {
137-
let mut tmp: T = mem::uninitialized();
138-
intrinsics::copy_nonoverlapping(src, &mut tmp, 1);
137+
let mut tmp: T = crate::mem::uninitialized();
138+
crate::intrinsics::copy_nonoverlapping(src, &mut tmp, 1);
139139
tmp
140140
}
141141

142142
unsafe fn swap_nonoverlapping_bytes(x: *mut u8, y: *mut u8, len: usize) {
143143
struct Block(u64, u64, u64, u64);
144144
struct UnalignedBlock(u64, u64, u64, u64);
145145

146-
let block_size = mem::size_of::<Block>();
146+
let block_size = crate::mem::size_of::<Block>();
147147

148148
// Loop through x & y, copying them `Block` at a time
149149
// The optimizer should unroll the loop fully for most types
@@ -152,31 +152,31 @@ mod ptr {
152152
while i + block_size <= len {
153153
// Create some uninitialized memory as scratch space
154154
// Declaring `t` here avoids aligning the stack when this loop is unused
155-
let mut t: Block = mem::uninitialized();
155+
let mut t: Block = crate::mem::uninitialized();
156156
let t = &mut t as *mut _ as *mut u8;
157157
let x = x.offset(i as isize);
158158
let y = y.offset(i as isize);
159159

160160
// Swap a block of bytes of x & y, using t as a temporary buffer
161161
// This should be optimized into efficient SIMD operations where available
162-
intrinsics::copy_nonoverlapping(x, t, block_size);
163-
intrinsics::copy_nonoverlapping(y, x, block_size);
164-
intrinsics::copy_nonoverlapping(t, y, block_size);
162+
crate::intrinsics::copy_nonoverlapping(x, t, block_size);
163+
crate::intrinsics::copy_nonoverlapping(y, x, block_size);
164+
crate::intrinsics::copy_nonoverlapping(t, y, block_size);
165165
i += block_size;
166166
}
167167

168168
if i < len {
169169
// Swap any remaining bytes
170-
let mut t: UnalignedBlock = mem::uninitialized();
170+
let mut t: UnalignedBlock = crate::mem::uninitialized();
171171
let rem = len - i;
172172

173173
let t = &mut t as *mut _ as *mut u8;
174174
let x = x.offset(i as isize);
175175
let y = y.offset(i as isize);
176176

177-
intrinsics::copy_nonoverlapping(x, t, rem);
178-
intrinsics::copy_nonoverlapping(y, x, rem);
179-
intrinsics::copy_nonoverlapping(t, y, rem);
177+
crate::intrinsics::copy_nonoverlapping(x, t, rem);
178+
crate::intrinsics::copy_nonoverlapping(y, x, rem);
179+
crate::intrinsics::copy_nonoverlapping(t, y, rem);
180180
}
181181
}
182182
}
@@ -191,7 +191,7 @@ mod mem {
191191

192192
pub fn swap<T>(x: &mut T, y: &mut T) {
193193
unsafe {
194-
ptr::swap_nonoverlapping_one(x, y);
194+
crate::ptr::swap_nonoverlapping_one(x, y);
195195
}
196196
}
197197

@@ -201,7 +201,7 @@ mod mem {
201201
}
202202

203203
pub unsafe fn uninitialized<T>() -> T {
204-
intrinsics::uninit()
204+
crate::intrinsics::uninit()
205205
}
206206
}
207207

0 commit comments

Comments
 (0)